query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Get data from a URL as a python dictionary
|
def get_data_as_dict(url):
print url
result = requests.get(url)
data = json.loads(result.text)
return data
|
[
"def fetch_data(url):\r\n response = requests.get(url)\r\n return response.json()",
"def get_data(url):\n \n request_string = url \n r=requests.get(request_string)\n data = r.json() \n #print data.keys()\n meta, results = data[\"meta\"], data[\"results\"]\n \n return [meta, results]",
"def get_page_data(url):\n req = requests.get(url)\n soup = BeautifulSoup(req.content, 'html.parser')\n return {\n \"url\": url,\n \"title\": get_title(soup),\n \"h1\": get_h1(soup),\n \"h2\": get_h2(soup),\n }",
"def parse(url):\n with request.urlopen(url) as films: \n source = films.read()\n data = json.loads(source)\n return data",
"def _get_data(self, url, auth=None):\n req = urllib2.Request(url, None, headers(self.agent_config))\n if auth:\n add_basic_auth(req, *auth)\n request = urllib2.urlopen(req)\n response = request.read()\n return json.loads(response)",
"def readDataFromURL():\n return",
"def pull_data(url):\n # get the metadata of the table \n table_meta_data = get_url(url)\n print table_meta_data\n # build a json query to pool all the data\n json_query = built_post_query(table_meta_data) \n print json_query\n # send a POST request \n req = urllib2.Request(url, json_query)\n # get the response \n response = urllib2.urlopen(req)\n data = response.read()\n # returns dictionary\n return ast.literal_eval(data)",
"def pubchem_parsing(url):\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req).read()\n fin = json.loads(res.decode())\n return fin",
"def extract_metadata(url: str) -> dict:\n\n r = requests.get(url)\n base_url = get_base_url(r.text, r.url)\n return extruct.extract(r.text, base_url=base_url)",
"def _1get_json(self, url):\n #with self._1sessionLock:\n # return loads(self._1session.get(url, headers=self._1headers).text)\n return loads(RequestsGet(url, headers=self._1headers).text)",
"def fetch_url(self, url):\n url_data = {\n \"url\": url,\n \"content\": None,\n \"size\": 0\n }\n corp_file_name = self.corpus.get_file_name(url) #Using Corpus method to get file_name associated with URL\n content = b'' #To initialize binary content\n for data in open(corp_file_name, mode = 'rb'):\n content += data #To iterate through the data by opening the file\n if corp_file_name != None: #Updating the dictionary with newly obtained content and size of file\n url_data[\"content\"] = content \n url_data[\"size\"] = os.path.getsize(corp_file_name) \n return url_data",
"def _get_data(item_or_url):\n if '://' in item_or_url:\n url = item_or_url\n else:\n url = '%s/%s' % (_base_url, item_or_url)\n req = Request(url, headers={'User-Agent': 'flexx/%s' % flexx.__version__})\n return urlopen(req).read()",
"def request_weather(url):\n response = requests.get(url)\n response_dict = response.json()\n return response_dict",
"def get_json_data(url: str) -> {\"json text\"}:\r\n response = None\r\n try:\r\n response = urllib.request.urlopen(url)\r\n json_data = response.read().decode(encoding=\"utf-8\")\r\n return json.loads(json_data) #parsed json data\r\n finally:\r\n if response != None:\r\n response.close()",
"def readurl(url):\n with contextlib.closing(urlopen(url)) as conn:\n return decode(conn.read())",
"def fetch_data(url):\n\n headers = {'Authorization': f'Bearer {config.spotify_auth}'}\n resp = requests.get(url, headers=headers)\n if resp.status_code != 200:\n raise Exception('erreur: {}'.format(resp.status_code))\n else:\n result = resp.json()\n \n return result",
"def retrieve_article(url):\n article = Article(url)\n article.download()\n article.parse()\n # print(dict(article))\n article_dict = {\n \"title\": article.title,\n \"text\": article.text,\n \"date\": str(article.publish_date)\n }\n # print(article_dict)\n return article_dict",
"def data_request(self, url):\n logging.info(f\"URL | {url}\")\n api_key = os.environ.get('MYSPORTSFEEDS_API_KEY')\n password = os.environ.get('MYSPORTSFEEDS_PASSWORD')\n byte_string = base64.b64encode('{}:{}'.format(api_key, password).encode('utf-8'))\n headers = {\n \"Authorization\": f\"Basic {byte_string.decode('ascii')}\"\n }\n session = requests.session()\n request = session.get(url, headers=headers, verify=False)\n logging.info(request.status_code)\n if request.status_code != 200:\n logging.error(request.status_code)\n logging.error(request.content)\n raise NFLRequestException('Error with Mysportsfeeds API request')\n data = request.json()\n return data",
"def httpObjGet(self, relative_url, subst=\"\"):\n xmldata = self.httpXmlGet(relative_url)\n return self.mapToDict(xmldata, subst)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Draws the maze walls, the goal, and the ball at its starting location.
|
def drawMaze(self):
self.mySquare = self.wallCanvas.create_rectangle(40, 40, 76, 76, fill="blue")
self.goal = self.wallCanvas.create_rectangle(230, 250, 280, 300, fill="green", outline="green")
text = self.wallCanvas.create_text(255, 275, text="GOAL", fill="white")
wallBounds = [[0, 0, 10, 410], [0, 0, 410, 10], [0, 395, 410, 410], [395, 0, 405, 410],
[0, 130, 70, 140], [60, 200, 135, 210], [0, 260, 35, 270], [75, 320, 135, 330],
[125, 0, 135, 330], [200, 310, 350, 320], [200, 160, 340, 170], [200, 160, 210, 320],
[330, 85, 340, 160], [225, 0, 235, 95]]
self.wallIDList = []
for (ulx, uly, lrx, lry) in wallBounds:
wall = self.wallCanvas.create_rectangle(ulx, uly, lrx, lry, fill="red", outline="red")
self.wallIDList.append(wall)
print(self.wallIDList)
|
[
"def draw_maze(self):\n\n # plot the walls",
"def draw_maze(self):\n self._display.fill(COLOURS['white'])\n \n for i in range(0, self.width + 1, 20):\n pdraw.line(self._display, COLOURS['black'], (i, 0), (i, self.height))\n for i in range(0, self.height + 1, 20):\n pdraw.line(self._display, COLOURS['black'], (0, i), (self.width, i))\n \n pdraw.rect(self._display, COLOURS['mustard yellow'], (self.start_node[0] + 1, self.start_node[1] + 1, 19, 19)) # start node\n pdraw.rect(self._display, COLOURS['dark blue'], (self.goal_node[0] + 1, self.goal_node[1] + 1, 19, 19)) # goal node",
"def drawWalls(maze):\n \n for wall in maze.walls:\n coord_0 = (wall.start.x, wall.start.y)\n coord_1 = (wall.end.x, wall.end.y)\n pygame.draw.line(screen, color_BONES, coord_0, coord_1, THICKNESS)",
"def draw(self, extras=False):\n im = self.image\n for y in range(6):\n for x in range(6):\n #draw the dots\n cv2.rectangle(im, tuple(np.array(maze_to_image_coords(x, y))-1),\n tuple(np.array(maze_to_image_coords(x, y))+1), (100,100,100), -1)\n\n #draw any walls present\n if self.hedge_mask[y][x]:\n hline(im, x, y)\n if self.vedge_mask[y][x]:\n vline(im, x, y)\n if extras:\n #draw the start / target\n x, y = self.target\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 8, (0, 0, 255), -1)\n x, y = self.start\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 5, (50, 50, 220), -1)",
"def draw_walls(self):\n\t\twall_keys = list(self.gridworld.wall_map.keys())\n\t\tfor i in range(0, len(wall_keys)):\n\t\t\twall_loc = eval(wall_keys[i])\n\t\t\t#top left triangle\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, wall_loc[1]*self.cell_height) #top left of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell\n\t\t\t#bottom right triangle\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom right of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell",
"def _draw_walls(self, ax=None):\r\n if ax is None:\r\n ax = self.ax\r\n\r\n for x in range(self.x_max_range):\r\n for y in range(self.y_max_range):\r\n # North Wall\r\n if self.adjacent[x,y,0] == 0:\r\n ax.plot([x, x+1], [y, y], \"b-\", linewidth=4)\r\n\r\n # East Wall\r\n if self.adjacent[x,y,1] == 0:\r\n ax.plot([x+1, x+1], [y, y+1], \"b-\", linewidth=4)\r\n\r\n # South Wall\r\n if self.adjacent[x,y,2] == 0:\r\n ax.plot([x, x+1], [y+1, y+1], \"b-\", linewidth=4)\r\n\r\n # West Wall\r\n if self.adjacent[x,y,3] == 0:\r\n ax.plot([x, x], [y, y+1], \"b-\", linewidth=4)",
"def _draw_balls(self):\n self.blue_ball.draw(self.screen, BLUE)\n self.red_ball.draw(self.screen, RED)",
"def draw(self,):\n\n # \n # Clear the screen.\n #\n sys.stdout.write(\"\\x1b[2J\")\n\n if not self.animate:\n # \n # Move to upper left and defer to superclass.\n #\n sys.stdout.write(\"\\x1b[H\")\n Maze.draw(self) \n \n else:\n # \n # If we are animating, clear the screen and start carving:\n #\n Kruskal.carve_passages(self)",
"def draw_maze(self, values=None, title=\"\"):\r\n if values is None:\r\n values = self.rewards\r\n\r\n # If Figure has not been made yet, make the figure\r\n if self.figure is None:\r\n self.figure, self.ax, self.current_box, self.target_box, self.value_labels = self._create_plot(title)\r\n plt.show(block=False)\r\n\r\n # Draw current position\r\n self._update_plot(values)\r\n # Render new position on screen\r\n plt.pause(0.005)",
"def main():\n bext.clear()\n\n # Generate some balls.\n balls = []\n for i in range(NUMBER_OF_BALLS):\n balls.append({COLOR: random.choice(COLORS),\n X: random.randint(1, WIDTH - 2),\n Y: random.randint(1, HEIGHT - 2),\n DIR: random.choice(DIRECTIONS)})\n\n while True: # Main program loop.\n oldBallPositions = []\n\n for ball in balls:\n # Draw our balls:\n bext.goto(ball[X], ball[Y])\n bext.fg(ball[COLOR])\n print(BALL_CHAR, end='')\n\n oldBallPositions.append((ball[X], ball[Y]))\n sys.stdout.flush() # (Required for bext-using programs.)\n time.sleep(0.1)\n\n for ball in balls:\n # Move our balls:\n if ball[DIR] == UP_RIGHT:\n ball[X] += 1\n ball[Y] -= 1\n elif ball[DIR] == UP_LEFT:\n ball[X] -= 1\n ball[Y] -= 1\n elif ball[DIR] == DOWN_RIGHT:\n ball[X] += 1\n ball[Y] += 1\n elif ball[DIR] == DOWN_LEFT:\n ball[X] -= 1\n ball[Y] += 1\n\n # See if our balls bounce off the corners:\n if ball[X] == 0 and ball[Y] == 0:\n ball[DIR] = DOWN_RIGHT\n elif ball[X] == 0 and ball[Y] == HEIGHT - 1:\n ball[DIR] = UP_RIGHT\n elif ball[X] == WIDTH - 1 and ball[Y] == 0:\n ball[DIR] = DOWN_LEFT\n elif ball[X] == WIDTH - 1 and ball[Y] == HEIGHT - 1:\n ball[DIR] = UP_LEFT\n\n # See if our balls bounce off the walls:\n elif ball[X] == 0 and ball[DIR] == UP_LEFT:\n ball[DIR] = UP_RIGHT\n elif ball[X] == 0 and ball[DIR] == DOWN_LEFT:\n ball[DIR] = DOWN_RIGHT\n\n elif ball[X] == WIDTH - 1 and ball[DIR] == UP_RIGHT:\n ball[DIR] = UP_LEFT\n elif ball[X] == WIDTH - 1 and ball[DIR] == DOWN_RIGHT:\n ball[DIR] = DOWN_LEFT\n\n elif ball[Y] == 0 and ball[DIR] == UP_LEFT:\n ball[DIR] = DOWN_LEFT\n elif ball[Y] == 0 and ball[DIR] == UP_RIGHT:\n ball[DIR] = DOWN_RIGHT\n\n elif ball[Y] == HEIGHT - 1 and ball[DIR] == DOWN_LEFT:\n ball[DIR] = UP_LEFT\n elif ball[Y] == HEIGHT - 1 and ball[DIR] == DOWN_RIGHT:\n ball[DIR] = UP_RIGHT\n\n for position in oldBallPositions:\n # Erase all of the balls.\n bext.goto(position[0], position[1])\n print(' ', end='')\n # At this point, go back to the start of the main program loop.",
"def draw_house_walls(x, y, width, height):\n print('Drawing house walls', x, y, width, height)\n pass",
"def drawMazeNodes(frame):\n # in this case, frame is a Frame instance\n # rather than a graph\n \n # render the main body\n for node in frame.maze.nodes:\n for neighbor in node.neighbors:\n drawChannel(node.current, neighbor)\n \n # render the construction cell\n X, Y = frame.head.x, frame.head.y\n head_inner_rect = pygame.Rect(X-OFFSET, Y-OFFSET, SIZESQ, SIZESQ)\n pygame.draw.rect(screen, color_FOOD, head_inner_rect)",
"def draw(self):\r\n\r\n global launch_start,power,max_power\r\n\r\n draw.circle(screen,(0,0,255),self.home_planet[0:2],self.home_planet[2])\r\n message = density_font.render(str(round(self.home_planet[4],2)),1,(255,255,255))\r\n w,h = density_font.size(str(round(self.home_planet[4],2)))\r\n screen.blit(message,(self.home_planet[0]-w/2,self.home_planet[1]-h/2))\r\n\r\n draw.circle(screen,(0,255,0),self.goal_planet[0:2],self.goal_planet[2])\r\n message = density_font.render(str(round(self.goal_planet[4],2)),1,(255,255,255))\r\n w,h = density_font.size(str(round(self.goal_planet[4],2)))\r\n screen.blit(message,(self.goal_planet[0]-w/2,self.goal_planet[1]-h/2))\r\n\r\n for i in self.planets:\r\n draw.circle(screen,(255,0,0),i[0:2],i[2])\r\n message = density_font.render(str(round(i[4],2)),1,(255,255,255))\r\n w,h = density_font.size(str(round(i[4],2)))\r\n screen.blit(message,(i[0]-w/2,i[1]-h/2))\r\n if self.player != []:\r\n x2 = self.player[0]-self.player[2]/6\r\n y2 = self.player[1]-self.player[3]/6\r\n draw.line(screen,(125,0,125),(self.player[0],self.player[1]),(x2,y2),2)\r\n\r\n draw.rect(screen,(0,0,0),(740,10,50,100),5)\r\n draw.rect(screen,(0,255,0),(740,10+100-power*(100/max_power),50,power*(100/max_power)))",
"def printMaze(self):\n\t\tfor i in range(self.height + 1):\n\t\t\trow = ''\n\t\t\t# i_columns collects all tuples from added coordinates that have i for their y coordinate\n\t\t\ti_columns = [tup for tup in self.special_coords if tup[1] == i]\n\t\t\t# sorted_columns orders i_columns according to their x coordinates in reverse order (for use as a stack)\n\t\t\tsorted_columns = sorted(i_columns, key=lambda tup: tup[0], reverse=True)\n\t\t\tfor x in range(self.width + 1):\n\t\t\t\t# creates a wall at this coordinate if no special treatment specified by addCoordinate()\n\t\t\t\tif x not in [tup[0] for tup in sorted_columns]:\n\t\t\t\t\trow += '*'\n\t\t\t\t\tself.maze_coords.append((x,i,1))\n\t\t\t\t# checks stack if blocktype of special coordinate should be an open area (uses stack ADT to preserve order in x coordinates for row y)\n\t\t\t\telif sorted_columns.pop()[2] == 0: # if x is not one of the special coord's x then that x must be the referring \n\t\t\t\t\trow += ' ' # to the first popped tuple in the reverse ordered list of special coords\n\t\t\t\t\tself.maze_coords.append((x,i,0))\n\t\t\t\telse:\n\t\t\t\t\trow += '*'\n\t\t\t\t\tself.maze_coords.append((x,i,1))\n\t\t\t# once row i is processed, print row (this preserves ordering of y coordinates)\n\t\t\tprint(row)\n\t\t# updates class' maze_walls and maze_open_areas with updated coordinates for use in other functions\n\t\tself.maze_walls = [(coord[0],coord[1]) for coord in self.maze_coords if coord[2]==1]\n\t\tself.maze_open_areas = [(coord[0],coord[1]) for coord in self.maze_coords if coord[2]==0]\n\t\tpass",
"def draw():\n\n # Make the background white\n screen.clear()\n screen.fill((255, 255, 255))\n\n # Draw the actors\n apple.draw()\n orange.draw()\n pineapple.draw()\n bomb.draw()\n\n # TODO: Show instructions, message and points",
"def __draw_objects(self, img):\n if self.ball:\n (x, y), radius = self.ball\n cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 2)\n cv2.putText(img, \"BALL\", (int(x)+15, int(y)-15), cv2.FONT_ITALIC, 0.6, (0, 0, 255, 255), 2)\n for goal in [self.goal_y, self.goal_b]:\n if goal:\n (x, y), (w, h) = goal\n p1 = (int(x - w/2), int(y - h/2))\n p2 = (int(x + w/2), int(y + h/2))\n cv2.rectangle(img, p1, p2, (0, 255, 0), 2)\n cv2.putText(img, \"GOAL\", (p1[0], p1[1]-10), cv2.FONT_ITALIC, 0.6, (0, 0, 255, 255), 2)",
"def draw_walls(pygame, screen, walls, wall_thickness, wall_color):\r\n for left_wall in walls[\"left\"]:\r\n pygame.draw.line(surface=screen, color=wall_color, width=wall_thickness,\r\n start_pos=left_wall[0],\r\n end_pos=left_wall[1])\r\n\r\n for top_wall in walls[\"top\"]:\r\n pygame.draw.line(surface=screen, color=wall_color, width=wall_thickness,\r\n start_pos=top_wall[0],\r\n end_pos=top_wall[1])\r\n\r\n for right_wall in walls[\"right\"]:\r\n pygame.draw.line(surface=screen, color=wall_color, width=wall_thickness,\r\n start_pos=right_wall[0],\r\n end_pos=right_wall[1])\r\n\r\n for bottom_wall in walls[\"bottom\"]:\r\n pygame.draw.line(surface=screen, color=wall_color, width=wall_thickness,\r\n start_pos=bottom_wall[0],\r\n end_pos=bottom_wall[1])",
"def OnDraw(self):\n self.SetCurrent()\n \n glClear(GL_COLOR_BUFFER_BIT)\n \n if self.arena != None:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = self.arena.GetColor()\n glColor3f(red, green, blue)\n for lines in self.arena.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n \n \n glEnd()\n \n \n for pillar in self.pillar:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = pillar.GetColor()\n glColor3f(red, green, blue)\n for lines in pillar.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n glEnd()\n\n\n#\t if self.temppoint != []:\n#\t \t glBegin(GL_POINTS)\n#\t \t glVertex2f(self.temppoint[0][0], self.temppoint[0][1])\n# glEnd()\n\t\n #Currentray is the ray where we have to worry about animation and changes.\n if self.currentray is not None: \n glBegin(GL_LINES)\n [red, green, blue] = self.currentray.GetColor()\n glColor3f(red, green, blue)\n\t\n [x, y] = [self.currentray.GetPoint().GetPosition(0), self.currentray.GetPoint().GetPosition(1)]\n glVertex2f(x, y)\n \n \n [x, y] = self.currentray.GetEndPoint(self.t)\n \n glVertex2f(x, y)\n\t\n glEnd()\n \n #These rays are static, since they have come to a stop at their points of collision.\n for i in self.ray:\n glBegin(GL_LINES)\n [red, green, blue] = i.GetColor()\n glColor3f(red, green, blue)\n \n [x, y] = [i.GetPoint().GetPosition(0), i.GetPoint().GetPosition(1)]\n glVertex(x, y)\n \n [x, y] = i.GetEndPoint(i.finaltime)\n glVertex2f(x, y)\n glEnd()\n\t\t\t\n \n self.SwapBuffers()\n \n return",
"def plot_obstacles(self, labels=False):\n MCR.plot_shapes(self.obstacles, labels)\n MCR.plot_points([self.start, self.goal])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks to see if the player's square is either touching a wall or touching the goal, and if so it marks the game as over and displays a gameover message. Note that find_overlapping always includes the square itself!
|
def checkForEndOfGame(self):
# Find list of items on canvas that overlap with region of square
(x1, y1, x2, y2) = self.wallCanvas.coords(self.mySquare)
onItems = self.wallCanvas.find_overlapping(x1, y1, x2, y2)
# If more than one overlaps, then the square is touching a wall or the goal
if len(onItems) > 1:
for item in onItems:
if item in self.wallIDList:
self.gameOver = "loss"
self.wallCanvas.addtag_withtag()
break
elif item == self.goal:
self.gameOver = "win"
break
# Display win/loss message if game is over
if self.gameOver == 'win':
self.wallCanvas.create_oval(50, 50, 350, 350, fill="yellow")
self.wallCanvas.create_text(200, 200, text="You've won!")
elif self.gameOver == 'loss':
self.wallCanvas.create_oval(50, 50, 350, 350, fill="saddle brown")
self.wallCanvas.create_text(200, 200, text="You've lost!")
|
[
"def gameOverCheck(self, snake):\n gameOver = False\n headX = snake.snakeBody[-1][0]\n headY = snake.snakeBody[-1][1]\n\n if headX < 0:\n gameOver = True\n print(\"Collides with left Wall\")\n elif headY < 0:\n gameOver = True\n print(\"Collides with lower wall\")\n elif headX >= SCREEN_WIDTH:\n gameOver = True\n print(\"Collides with right wall\")\n elif headY >= SCREEN_HEIGHT:\n gameOver = True\n print(\"Collides with upper wall\")\n elif snake.snakeBody[-1] in snake.snakeBody[:-1]:\n gameOver = True\n print(\"Collides with body\")\n\n return gameOver",
"def detect_collision(self):\n dino_bounding_box = [(18, self.dino.jumping.value), (18 + 10, self.dino.jumping.value + 11)]\n dino_pixels = []\n for y in range(dino_bounding_box[0][1], dino_bounding_box[1][1] + 1):\n for x in range(dino_bounding_box[0][0], dino_bounding_box[1][0] + 1):\n dino_pixels.append((x, y))\n for obstacle in self.obstacles:\n obstacle_top_left = (obstacle.position, 27 - obstacle.obstacle_type.value[1])\n obstacle_bitmap = obstacle.obstacle_type.value[2]\n obstacle_pixels = []\n # Make list of pixels (coordinates) that are obstacles (so only the cactus, not the bounding box)\n for y in range(len(obstacle_bitmap)):\n for x in range(len(obstacle_bitmap[0])):\n if obstacle_bitmap[y][x] == 1:\n obstacle_pixels.append(((obstacle_top_left[0] + x), (obstacle_top_left[1] + y)))\n if len(set(dino_pixels).intersection(obstacle_pixels)) > 0:\n # If there is at least one set of coordinates overlapping the dino and the obstacle,\n # game over!\n print(\"game over! single click the button to restart, hold down to exit\")\n self.duration = time.time_ns() / 1000000 - self.start\n # Draw a black filled box to clear the image.\n self.draw.rectangle((0, 0, self.width, self.height), outline=0, fill=0)\n # Draw the final score.\n font = ImageFont.truetype(\"Mario-Kart-DS.ttf\", size=32)\n self.draw.text((42,0), f'{self.score}', font=font, fill=255)\n self.update_display()\n # Stop the game.\n self.stop()\n button_pressed = float(\"inf\")\n start = time.time_ns() / 1000000000\n while True:\n button_state = not self.button.value # We are inverting the button's state.\n # Because of the way the button is wired, not pressed = True and pressed = False.\n if button_state and button_pressed == float(\"inf\"):\n button_pressed = time.time_ns() / 1000000000\n if (time.time_ns() / 1000000000) - start >= 10: # timeout, so end\n break\n if not button_state and button_pressed != float(\"inf\"):\n t = (time.time_ns() / 1000000000)\n button_time_pressed = t - button_pressed\n if button_time_pressed > 1:\n break\n else:\n self.stop()\n self.restart()\n return False\n if SCREEN_RECORD:\n screen_recording[0].save('screen_recording.gif', save_all=True, optimize=False, duration=self.duration / len(screen_recording), loop=0, append_images=screen_recording[1:])\n return True\n return False",
"def game_over(self):\n\n if self.board[1] == self.board[4] == self.board[7] != \" \":\n print(f\"{self.board[1]} WINS!\")\n return True\n elif self.board[2] == self.board[5] == self.board[8] != \" \":\n print(f\"{self.board[2]} WINS!\")\n return True\n elif self.board[3] == self.board[6] == self.board[9] != \" \":\n print(f\"{self.board[3]} WINS!\")\n return True\n elif self.board[1] == self.board[2] == self.board[3] != \" \":\n print(f\"{self.board[1]} WINS!\")\n return True\n elif self.board[4] == self.board[5] == self.board[6] != \" \":\n print(f\"{self.board[4]} WINS!\")\n return True\n elif self.board[7] == self.board[8] == self.board[9] != \" \":\n print(f\"{self.board[7]} WINS!\")\n return True\n elif self.board[1] == self.board[5] == self.board[9] != \" \":\n print(f\"{self.board[1]} WINS!\")\n return True\n elif self.board[3] == self.board[5] == self.board[7] != \" \":\n print(f\"{self.board[3]} WINS!\")\n return True\n elif all(value != \" \" for value in self.board.values()):\n print(\"It's a tie!\")\n return True\n return False",
"def see_if_game_is_over():\n\n see_if_winner()\n see_if_draw()",
"def game_over(self):\n if [4, 1] in self._occupied or [5, 1] in self._occupied:\n self._is_game_over = True",
"def getSquaresHit(grid, laserRow, laserCol, laserVert):\r\n\t# do down / right first\r\n\tif laserVert:\r\n\t\tchangeVert = 1\r\n\t\tchangeHoriz = 0\r\n\telse:\r\n\t\tchangeVert = 0\r\n\t\tchangeHoriz = 1\r\n\tsquaresHit = set()\r\n\tpos = (laserRow + changeVert, laserCol + changeHoriz)\r\n\twhile True:\r\n\t\tif pos[0] < 0 or pos[1] < 0 or pos[0] >= len(grid) or pos[1] >= len(grid[0]):\r\n\t\t\t# went off the grid --> done\r\n\t\t\tbreak\r\n\t\telif grid[pos[0]][pos[1]] == \"-\" or grid[pos[0]][pos[1]] == \"|\":\r\n\t\t\t# hit a laser --> bad\r\n\t\t\treturn \"bad\"\r\n\t\telif grid[pos[0]][pos[1]] == \"#\":\r\n\t\t\t# hit a wall --> done\r\n\t\t\tbreak\r\n\t\telif grid[pos[0]][pos[1]] == \"/\":\r\n\t\t\t# mirror --> change direction\r\n\t\t\tchangeVert, changeHoriz = -changeHoriz, -changeVert\r\n\t\telif grid[pos[0]][pos[1]] == \"\\\\\":\r\n\t\t\tchangeVert, changeHoriz = changeHoriz, changeVert\r\n\t\telse:\r\n\t\t\t# empty square\r\n\t\t\tsquaresHit.add(pos)\r\n\t\tpos = (pos[0] + changeVert, pos[1] + changeHoriz)\r\n\r\n\t# do up / left next\r\n\tif laserVert:\r\n\t\tchangeVert = -1\r\n\t\tchangeHoriz = 0\r\n\telse:\r\n\t\tchangeVert = 0\r\n\t\tchangeHoriz = -1\r\n\tpos = (laserRow + changeVert, laserCol + changeHoriz)\r\n\twhile True:\r\n\t\tif pos[0] < 0 or pos[1] < 0 or pos[0] >= len(grid) or pos[1] >= len(grid[0]):\r\n\t\t\t# went off the grid --> done\r\n\t\t\tbreak\r\n\t\telif grid[pos[0]][pos[1]] == \"-\" or grid[pos[0]][pos[1]] == \"|\":\r\n\t\t\t# hit a laser --> bad\r\n\t\t\treturn \"bad\"\r\n\t\telif grid[pos[0]][pos[1]] == \"#\":\r\n\t\t\t# hit a wall --> done\r\n\t\t\tbreak\r\n\t\telif grid[pos[0]][pos[1]] == \"/\":\r\n\t\t\t# mirror --> change direction\r\n\t\t\tchangeVert, changeHoriz = -changeHoriz, -changeVert\r\n\t\telif grid[pos[0]][pos[1]] == \"\\\\\":\r\n\t\t\tchangeVert, changeHoriz = changeHoriz, changeVert\r\n\t\telse:\r\n\t\t\t# empty square\r\n\t\t\tsquaresHit.add(pos)\r\n\t\tpos = (pos[0] + changeVert, pos[1] + changeHoriz)\r\n\r\n\treturn squaresHit",
"def check_collision(self): \n snake = self.snake.get_locations()\n stones = self.stones.get_locations()\n apples = self.apples.get_locations()\n\n snake_location = snake[1:]\n dead_area = set(snake_location + stones + self.wall)\n if snake[0] in dead_area:\n self.ctx['res_holder']['music'].HIT.play()\n self._set_state('over') \n\n # Check if we ate the apple\n if snake[0] in apples:\n idx = apples.index(snake[0])\n self.snake.update(grow=True)\n self.ctx['res_holder']['music'].POINT.play()\n self.points += 10 # TODO different for other entities\n self.apples.destroy(idx)\n self.apples.create(self)",
"def under_attack(self, square, color):\n\n def non_sliding_piece_check(position, move_range, piece_type):\n for i in move_range:\n potential_capturing_piece_pos = position + i\n if (type(self[potential_capturing_piece_pos]) == piece_type and\n self[potential_capturing_piece_pos].color != color):\n return True\n return False\n\n def sliding_piece_check(position, move_range, piece_type):\n for i in move_range:\n a = position + i\n while (self[a] != 0):\n if not self.is_empty(a):\n if self[a].color != color:\n if type(self[a]) == piece_type or type(self[a]) == Queen:\n return True\n else:\n break\n else:\n break\n a += i\n return False\n\n pawn_range = (9, 11) if color == WHITE_COLOR else (-9, -11)\n\n for move_range, piece_type in (\n ((1, -1, 10, -10, 9, 11, -9, -11), King),\n ((-12, -21, -19, -8, 8, 19, 21, 12), Knight),\n (pawn_range, Pawn)\n ):\n if non_sliding_piece_check(square, move_range, piece_type):\n return True\n\n for move_range, piece_type in (\n ((1, 10, -1, -10), Rook),\n ((9, 11, -11, -9), Bishop),\n ):\n if sliding_piece_check(square, move_range, piece_type):\n return True\n\n return False",
"def is_game_over(self, player, item, item_x, item_y):\n return self.cols_have_same_values(item, item_x, item_y) or \\\n self.rows_have_same_values(item, item_x, item_y) or \\\n self.element_diagonal_has_same_value(item, item_x, item_y)",
"def detect_collision(self):\n\n has_collided, obstacle = check_collision(\n self.model.ship, self.model.current_obstacles)\n\n if has_collided:\n self.model.ship.lives -= 1\n self.model.current_obstacles.remove(obstacle)\n\n if not self.model.ship.lives:\n self.model.current_screen = \"Game Over\"",
"def on_board(self, square):\n x, y = square\n return x >= 0 and y >= 0 and x < self.x_dim and y < self.y_dim and self.rows[y][x] != 'O'",
"def checkCollision(self):\n for x in range(len(self.body)):\n #check for wall collision\n if( (self.body[x].pos[0] + self.size > size[0]) or (self.body[x].pos[0] < 0) ):\n self.gameOver = True\n elif((self.body[x].pos[1] < 0) or (self.body[x].pos[1] + self.size > size[1]) ):\n self.gameOver = True\n #check for apple collision\n if(self.body[x].pos[0] == apple.pos[0] and self.body[x].pos[1] == apple.pos[1]):\n apple.new_pos()\n for x in range(0,len(self.body) ):\n if x == 0:\n pass\n else:\n if self.body[0].pos == self.body[x].pos:\n self.gameOver = True",
"def is_suicide_for_win_better_then_defend(game):\n \n # need rework!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n my_castle = game.get_my_castle()\n enemy_castle = game.get_enemy_castle()\n enemy_most_dangrous_elf = get_closest_enemy_elf(game, my_castle)\n my_most_dangrous_elf = get_closest_my_elf(game, enenemy_castle)\n \n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3 :\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3:\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n return True\n \n return False",
"def hit_a_wall(self) -> bool:\n\n snake = self.snake\n if snake.moving_direction == \"left\" and snake.head.x <= self.boundaries[\"left\"]:\n return True\n if snake.moving_direction == \"right\" and snake.head.x >= self.boundaries[\"right\"]:\n return True\n if snake.moving_direction == \"up\" and snake.head.y <= self.boundaries[\"up\"]:\n return True\n if snake.moving_direction == \"down\" and snake.head.y >= self.boundaries[\"down\"]:\n return True\n\n return False",
"def gameover(self):\n for ship in self.ships_list:\n for coordinate in ship.ship_coordinates():\n if coordinate not in self.hits_lists:\n return False\n return True",
"def collision_checker(self, player):\n # TODO fix small bug when moving up and left into walls, collision detected, but not dealt with...\n wall_collisions = collision_detection(player, self.walls)\n if wall_collisions[1]:\n for wall in wall_collisions[0]:\n if player.vel.x > 0: # Player is moving to the right\n player.pos.x = wall.rect.left - player.size - 1\n player.vel.x = 0\n elif player.vel.x < 0: # Player is moving to the left\n player.pos.x = wall.rect.right + 1\n player.vel.x = 0\n if player.vel.y > 0: # Player is moving to the bottom\n player.pos.y = wall.rect.top - player.size - 1\n player.vel.y = 0\n elif player.vel.y < 0: # Player is moving to the top\n player.vel.y = 0\n player.pos.y = wall.rect.bottom + 1\n pygame.time.wait(1)\n\n else:\n player.update()",
"def check_win(self):\n check_pos = self.get_occupied_positions()\n winner = 0\n if not check_pos:\n # empty board, nobody wins\n return winner\n for pos in check_pos:\n winner = self.check_win_at_pos(pos)\n if winner > 0:\n return winner\n return winner",
"def isWall(mapObj, x, y):\n if x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return False # x and y aren't actually on the map.\n elif mapObj[x][y] in ('#', 'x'):\n return True # wall is blocking\n return False",
"def check_shot(self,row,column):\n if self.board[row][column] == \"H\" or self.board[row][column] == \"M\":\n return \"You've already fired there\"\n\n elif self.board[row][column] == \"S\":\n self.place_hit(row,column)\n return \"Hit!\"\n \n elif self.board[row][column] == \"-\":\n self.place_miss(row,column)\n return \"Miss!\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Callback function for the Quit button, closes the main window and ends the event loop.
|
def doQuit(self):
self.mainWin2.destroy()
|
[
"def quit(self):\r\n self.root.quit()\r\n self.root.destroy()",
"def shutdown_gui(self):\n Gtk.main_quit()",
"def quit(self, event):\n self.Destroy()",
"def close_window(_):\n root.destroy()",
"def closeWindowCallback(self, event):\n\t\tself.EndModal(self.status)",
"def exit_program():\n # exit cleanly by closing socket and destroying the tkinter window\n notification_socket.close()\n notification_window.destroy()",
"def quit_app(self):\n ans = askokcancel('Verify exit', 'Really quit?')\n if ans:\n datautils.save_data(self.datalist)\n self.parent.quit()",
"def GameQuit(self):\n print(\"Goodbye!\")\n sys.exit(0)",
"def exit():\n SysTrayIcon().exit()",
"def OnCloseWindow(self):\n pass",
"def close_window(self):\r\n Window.close()",
"def exit_game(self):\n pygame.quit()\n sys.exit()",
"def auto_exit(self):\n # self.window.quit()\n self.window.destroy()\n self.plot_states()",
"def close_window(window):\r\n window.destroy()",
"def quit_program():\n\n print(\"The program will now exit.\")\n sys.exit()",
"def terminate(self):\n pygame.quit()\n sys.exit()",
"def ev_windowclose(self, event: WindowEvent) -> None:",
"def quit(self):\n self.player.stop()\n exit()",
"def close(self):\r\n pygame.quit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates an Amazon Redshift cluster on AWS
|
def create_redshift_cluster(config, iam_role_arn, cluster_sg_id):
try:
response = redshift_client.describe_clusters(ClusterIdentifier=config.get('CLUSTER', 'CLUSTERIDENTIFIER'))
print('Redshift Cluster already exists: ' + response['Clusters'][0]['ClusterIdentifier'])
return None
except:
response = None
if response is None:
try:
response = redshift_client.create_cluster(
ClusterIdentifier=config.get('CLUSTER', 'CLUSTERIDENTIFIER')
,ClusterType=config.get('CLUSTER', 'CLUSTERTYPE')
,NumberOfNodes=config.getint('CLUSTER', 'NUMBEROFNODES')
,NodeType=config.get('CLUSTER', 'NODETYPE')
,PubliclyAccessible=True
,DBName=config.get('CLUSTER', 'DB_NAME')
,MasterUsername=config.get('CLUSTER', 'DB_USER')
,MasterUserPassword=config.get('CLUSTER', 'DB_PASSWORD')
,Port=config.getint('CLUSTER', 'DB_PORT')
,IamRoles=[iam_role_arn]
,VpcSecurityGroupIds=[cluster_sg_id]
)
return response['Cluster']
except ClientError as e:
print(f'ERROR: {e}')
return None
|
[
"def start_cluster(redshift, roleArn):\n global DWH_CLUSTER_TYPE, DWH_NODE_TYPE, DWH_NUM_NODES, \\\n DWH_DB, DWH_CLUSTER_IDENTIFIER, DWH_DB_USER, DWH_DB_PASSWORD\n print('Starting the cluster...')\n try:\n response = redshift.create_cluster( \n #HW\n ClusterType=DWH_CLUSTER_TYPE,\n NodeType=DWH_NODE_TYPE,\n NumberOfNodes=int(DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=DWH_DB,\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n MasterUsername=DWH_DB_USER,\n MasterUserPassword=DWH_DB_PASSWORD,\n \n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\n print('Redshift HTTP response status code: ')\n print(response['ResponseMetadata']['HTTPStatusCode'])\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n except Exception as e:\n print(e)\n return False",
"def create_cluster(ctx, name, region, verbosity,\n cp_role, subnets, tags, vpc_cidr, zones, kubeconf, username, heptio_auth, cp_only, node_name,\n node_role, node_sg_ingress, node_min, node_max, node_subnets, node_type, keyname, ssh_public_key,\n ami, no_user_data, yes):\n if node_subnets and not subnets:\n print('If node subnets are specified, the cluster subnets must appear!')\n exit(1)\n elif node_subnets and subnets:\n s = [ns for ns in node_subnets if ns not in subnets]\n if s:\n print('[{}] not one of the cluster subnets.'.format(','.join(s)))\n exit(1)\n\n if not kubeconf:\n files = os.environ.get('KUBECONFIG', '~/.kube/config')\n kubeconf = os.path.expanduser(files.split(':')[0])\n if not yes:\n if not click.confirm('Are you sure to create the EKS cluster in '\n 'region[{}] with kubeconfig[{}]'.format(region, kubeconf)):\n exit(0)\n\n cp = ControlPlane(name, subnets=subnets, role=cp_role, region=region, tags=tags,\n vpc_cidr=vpc_cidr, zones=zones)\n cluster_info = cp.create()\n kc = KubeConfig(cluster_info, kubeconf, user=username, heptio_auth=heptio_auth)\n kc.create()\n\n if cp_only:\n LOG.info('To create EKS cluster control plane only.')\n return\n\n ng = NodeGroup(node_name, cluster_info=cluster_info, keypair=keyname, region=region, ami=ami, subnets=node_subnets,\n kubeconf=kubeconf, role=node_role, sg_ingresses=node_sg_ingress, min_nodes=node_min,\n max_nodes=node_max, instance_type=node_type, ssh_public_key=ssh_public_key,\n no_user_data=no_user_data)\n ng.create()",
"def build_cluster(self):\n self.redshift_client_create()\n self.iam_client_create()\n self.ec2_client_create()\n self.create_iam_role()\n # self.update_iam_config()\n self.create_redshift_cluster()\n # uses created redshift cluster's vpc_id\n self.open_tcp_port()",
"def create_cluster(self,\n cluster_name: str,\n logging_s3_path: str,\n emr_release: str,\n subnet_id: str,\n emr_ec2_role: str,\n emr_role: str,\n instance_type_master: str,\n instance_type_core: str,\n instance_type_task: str,\n instance_ebs_size_master: int,\n instance_ebs_size_core: int,\n instance_ebs_size_task: int,\n instance_num_on_demand_master: int,\n instance_num_on_demand_core: int,\n instance_num_on_demand_task: int,\n instance_num_spot_master: int,\n instance_num_spot_core: int,\n instance_num_spot_task: int,\n spot_bid_percentage_of_on_demand_master: int,\n spot_bid_percentage_of_on_demand_core: int,\n spot_bid_percentage_of_on_demand_task: int,\n spot_provisioning_timeout_master: int,\n spot_provisioning_timeout_core: int,\n spot_provisioning_timeout_task: int,\n spot_timeout_to_on_demand_master: bool = True,\n spot_timeout_to_on_demand_core: bool = True,\n spot_timeout_to_on_demand_task: bool = True,\n python3: bool = True,\n spark_glue_catalog: bool = True,\n hive_glue_catalog: bool = True,\n presto_glue_catalog: bool = True,\n consistent_view: bool = False,\n consistent_view_retry_seconds: int = 10,\n consistent_view_retry_count: int = 5,\n consistent_view_table_name: str = \"EmrFSMetadata\",\n bootstraps_paths: Optional[List[str]] = None,\n debugging: bool = True,\n applications: Optional[List[str]] = None,\n visible_to_all_users: bool = True,\n key_pair_name: Optional[str] = None,\n security_group_master: Optional[str] = None,\n security_groups_master_additional: Optional[List[str]] = None,\n security_group_slave: Optional[str] = None,\n security_groups_slave_additional: Optional[List[str]] = None,\n security_group_service_access: Optional[str] = None,\n spark_log_level: str = \"WARN\",\n spark_jars_path: Optional[List[str]] = None,\n spark_defaults: Optional[Dict[str, str]] = None,\n spark_pyarrow: bool = False,\n maximize_resource_allocation: bool = False,\n steps: Optional[List[Dict[str, Collection[str]]]] = None,\n keep_cluster_alive_when_no_steps: bool = True,\n termination_protected: bool = False,\n tags: Optional[Dict[str, str]] = None) -> str:\n args = EMR._build_cluster_args(**locals())\n response = self._client_emr.run_job_flow(**args)\n logger.info(f\"response: \\n{json.dumps(response, default=str, indent=4)}\")\n return response[\"JobFlowId\"]",
"def create_cluster(cluster_config: str):\n with open(cluster_config) as f:\n config = yaml.safe_load(f)\n\n p = subprocess.run(\n [\n \"cortex\",\n \"cluster\",\n \"up\",\n cluster_config,\n \"-y\",\n \"--configure-env\",\n config[\"cluster_name\"],\n ],\n stdout=sys.stdout,\n stderr=sys.stderr,\n )\n\n if p.returncode != 0:\n raise ClusterCreationException(f\"failed to create cluster with config: {cluster_config}\")",
"def cli_cosmosdb_managed_cassandra_cluster_create(client,\n resource_group_name,\n cluster_name,\n location,\n delegated_management_subnet_id,\n tags=None,\n identity_type='None',\n cluster_name_override=None,\n initial_cassandra_admin_password=None,\n client_certificates=None,\n external_gossip_certificates=None,\n external_seed_nodes=None,\n restore_from_backup_id=None,\n cassandra_version=None,\n authentication_method=None,\n hours_between_backups=None,\n repair_enabled=None):\n\n if authentication_method != 'None' and initial_cassandra_admin_password is None and external_gossip_certificates is None:\n raise CLIError('At least one out of the Initial Cassandra Admin Password or External Gossip Certificates is required.')\n\n if initial_cassandra_admin_password is not None and external_gossip_certificates is not None:\n raise CLIError('Only one out of the Initial Cassandra Admin Password or External Gossip Certificates has to be specified.')\n\n cluster_properties = ClusterResourceProperties(\n delegated_management_subnet_id=delegated_management_subnet_id,\n cluster_name_override=cluster_name_override,\n initial_cassandra_admin_password=initial_cassandra_admin_password,\n client_certificates=client_certificates,\n external_gossip_certificates=external_gossip_certificates,\n external_seed_nodes=external_seed_nodes,\n restore_from_backup_id=restore_from_backup_id,\n cassandra_version=cassandra_version,\n authentication_method=authentication_method,\n hours_between_backups=hours_between_backups,\n repair_enabled=repair_enabled)\n\n managed_service_identity_parameter = ManagedCassandraManagedServiceIdentity(\n type=identity_type\n )\n\n cluster_resource_create_update_parameters = ClusterResource(\n location=location,\n tags=tags,\n identity=managed_service_identity_parameter,\n properties=cluster_properties)\n\n return client.begin_create_update(resource_group_name, cluster_name, cluster_resource_create_update_parameters)",
"def cli_cosmosdb_managed_cassandra_cluster_create(client,\r\n resource_group_name,\r\n cluster_name,\r\n location,\r\n delegated_management_subnet_id,\r\n tags=None,\r\n identity_type='None',\r\n cluster_name_override=None,\r\n initial_cassandra_admin_password=None,\r\n client_certificates=None,\r\n external_gossip_certificates=None,\r\n external_seed_nodes=None,\r\n restore_from_backup_id=None,\r\n cassandra_version=None,\r\n authentication_method=None,\r\n hours_between_backups=None,\r\n repair_enabled=None):\r\n\r\n if initial_cassandra_admin_password is None and external_gossip_certificates is None:\r\n raise CLIError('At least one out of the Initial Cassandra Admin Password or External Gossip Certificates is required.')\r\n\r\n if initial_cassandra_admin_password is not None and external_gossip_certificates is not None:\r\n raise CLIError('Only one out of the Initial Cassandra Admin Password or External Gossip Certificates has to be specified.')\r\n\r\n cluster_properties = ClusterResourceProperties(\r\n delegated_management_subnet_id=delegated_management_subnet_id,\r\n cluster_name_override=cluster_name_override,\r\n initial_cassandra_admin_password=initial_cassandra_admin_password,\r\n client_certificates=client_certificates,\r\n external_gossip_certificates=external_gossip_certificates,\r\n external_seed_nodes=external_seed_nodes,\r\n restore_from_backup_id=restore_from_backup_id,\r\n cassandra_version=cassandra_version,\r\n authentication_method=authentication_method,\r\n hours_between_backups=hours_between_backups,\r\n repair_enabled=repair_enabled)\r\n\r\n managed_service_identity_parameter = ManagedCassandraManagedServiceIdentity(\r\n type=identity_type\r\n )\r\n\r\n cluster_resource_create_update_parameters = ClusterResource(\r\n location=location,\r\n tags=tags,\r\n identity=managed_service_identity_parameter,\r\n properties=cluster_properties)\r\n\r\n return client.begin_create_update(resource_group_name, cluster_name, cluster_resource_create_update_parameters)",
"def create_SQL_cluster(self, environment_id, session_id, domain_name=\"\"):\n AG = self.config.murano.agListnerIP\n clIP = self.config.murano.clusterIP\n post_body = {\"domain\": domain_name, \"domainAdminPassword\": \"P@ssw0rd\",\n \"externalAD\": False,\n \"sqlServiceUserName\": \"Administrator\",\n \"sqlServicePassword\": \"P@ssw0rd\",\n \"osImage\": {\"type\": \"ws-2012-std\", \"name\": self.windows,\n \"title\": \"Windows Server 2012 Standard\"},\n \"agListenerName\": \"SomeSQL_AGListner\",\n \"flavor\": \"m1.medium\",\n \"agGroupName\": \"SomeSQL_AG\",\n \"domainAdminUserName\": \"Administrator\",\n \"agListenerIP\": AG,\n \"clusterIP\": clIP,\n \"type\": \"msSqlClusterServer\", \"availabilityZone\": \"nova\",\n \"adminPassword\": \"P@ssw0rd\",\n \"clusterName\": \"SomeSQL\", \"mixedModeAuth\": True,\n \"unitNamingPattern\": \"\", \"units\": [{\"isMaster\": True,\n \"name\": \"node1\", \"isSync\": True}, {\"isMaster\": False,\n \"name\": \"node2\", \"isSync\": True}],\n \"name\": \"Sqlname\", \"saPassword\": \"P@ssw0rd\",\n \"databases\": ['NewDB']}\n post_body = json.dumps(post_body)\n self.client.headers.update({'X-Configuration-Session': session_id})\n resp, body = self.client.post('environments/' + str(environment_id) +\n '/services', post_body,\n self.client.headers)\n return resp, json.loads(body)",
"def create_keyspace():\n\n cluster = Cluster(['127.0.0.1'])\n session = cluster.connect()\n\n session.execute(\"\"\"CREATE KEYSPACE IF NOT EXISTS sparkifydb\n WITH REPLICATION =\n { 'class': 'SimpleStrategy', 'replication_factor' : 1}\"\"\")\n\n session.set_keyspace('sparkifydb')\n\n return session, cluster",
"def create_cluster(cluster_tag_key, cluster_tag_value, cluster_subnet_id, cluster_az, cluster_hsm_count):\n\n cluster_id = None\n client = boto3.client('cloudhsmv2')\n\n # iterate over clusters list to detect if required cluster exists\n print('* Checking clusters list for cluster tag name: {} and value: {}.'.\n format(cluster_tag_key, cluster_tag_value))\n response = client.describe_clusters()\n\n for cluster in response['Clusters']:\n response = client.list_tags(ResourceId=cluster['ClusterId'])\n\n for tag in response['TagList']:\n if tag['Key'] == cluster_tag_key and tag['Value'] == cluster_tag_value:\n cluster_id = cluster['ClusterId']\n break\n\n # if the cluster_id is None at this stage, a cluster with provided tags value\n # was found and there is no need to continue searching\n if cluster_id is not None:\n print('* Found cluster: {}.'.format(cluster_id))\n break\n\n # create required cluster if it wasn't found\n if cluster_id is None:\n print('** Required cluster not found, creating new one.')\n init_cluster(cluster_subnet_id)\n\n # tag new cluster with required name and value so that resource would be artificially idemnpotent\n set_cluster_tags(cluster_id, cluster_tag_key, cluster_tag_value)\n\n # check number of hsm's and create or delete as needed\n set_cluster_hsm_count(cluster_id, cluster_hsm_count)",
"def create_cluster(self, username, options, config):\n cluster_name = uuid.uuid4().hex\n token = uuid.uuid4().hex\n tls_cert, tls_key = new_keypair(cluster_name)\n # Encode the tls credentials for storing in the database\n tls_credentials = self.encode_tls_credentials(tls_cert, tls_key)\n enc_token = self.encode_token(token)\n\n common = {\n \"name\": cluster_name,\n \"username\": username,\n \"options\": options,\n \"status\": JobStatus.CREATED,\n \"target\": JobStatus.RUNNING,\n \"count\": 0,\n \"state\": {},\n \"scheduler_address\": \"\",\n \"dashboard_address\": \"\",\n \"api_address\": \"\",\n \"start_time\": timestamp(),\n }\n\n with self.db.begin() as conn:\n res = conn.execute(\n clusters.insert().values(\n tls_credentials=tls_credentials,\n token=enc_token,\n config=config,\n **common,\n )\n )\n cluster = Cluster(\n id=res.inserted_primary_key[0],\n token=token,\n tls_cert=tls_cert,\n tls_key=tls_key,\n config=FrozenAttrDict(config),\n **common,\n )\n self.id_to_cluster[cluster.id] = cluster\n self.name_to_cluster[cluster_name] = cluster\n self.username_to_clusters[username][cluster_name] = cluster\n\n return cluster",
"def create_ecs_cluster(aws_conn_id: str, cluster_name: str) -> None:\n hook = AwsBaseHook(\n aws_conn_id=aws_conn_id,\n client_type=\"ecs\",\n )\n hook.conn.create_cluster(\n clusterName=cluster_name,\n capacityProviders=[\n \"FARGATE_SPOT\",\n \"FARGATE\",\n ],\n defaultCapacityProviderStrategy=[\n {\n \"capacityProvider\": \"FARGATE_SPOT\",\n \"weight\": 1,\n \"base\": 0,\n },\n {\n \"capacityProvider\": \"FARGATE\",\n \"weight\": 1,\n \"base\": 0,\n },\n ],\n )",
"def make_cluster(CID, mach_type, nof_machs, ZID):\n\n cmd = \"gcloud container clusters create {0} --machine-type {1} --zone {3} --num-nodes {2}\".format(CID, mach_type, nof_machs, ZID)\n\n rc = subprocess.call(cmd, shell=True)\n return rc",
"def create_redshift_iam_role(iam_role_name, access_key, secret_key):\n \n # In the creation of the IAM client we must specify the Access_key and Secret_Key of the `dwhadmin` user. \n\n iam = boto3.client('iam', aws_access_key_id=access_key, aws_secret_access_key=secret_key)\n \n \n # We fix the assume the Redshift Policy document before creating the role. \n AssumePolicyDocumentRedshift = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"redshift.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n\n # Let's create the role with the name set in the .cfg file and the previous policy.\n try:\n dwhRole = iam.create_role(RoleName=iam_role_name, \n AssumeRolePolicyDocument=json.dumps(AssumePolicyDocumentRedshift),\n Path='/',\n Description='Allows Redshift clusters to call AWS services')\n \n except Exception as e:\n print(f\"Exception produced: {e}\")\n \n # Once the role is created, we can attach a predefined policy to give this role read permissions on S3 buckets.\n iam.attach_role_policy(RoleName=iam_role_name, \n PolicyArn='arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess')",
"def test_create_clusters(self):\n cluster_values = {\n \"project_id\": UUID1,\n \"name\": \"Rabbit Cluster\",\n \"network_id\": UUID2,\n \"flavor\": \"medium\",\n \"size\": 5,\n \"volume_size\": 250,\n }\n\n db_cluster = self.dbapi.create_cluster(self.context, cluster_values)\n self.assertEqual(cluster_values[\"name\"], db_cluster.name,\n \"invalid name value\")\n self.assertEqual(cluster_values[\"network_id\"], db_cluster.network_id,\n \"invalid network_id value\")\n self.assertEqual(cluster_values[\"flavor\"], db_cluster.flavor,\n \"invalid flavor value\")\n self.assertEqual(cluster_values[\"size\"], db_cluster.size,\n \"invalid size value\")\n self.assertEqual(cluster_values[\"volume_size\"], db_cluster.volume_size,\n \"invalid volume_size value\")\n self.assertFalse(db_cluster.deleted, \"invalid deleted value\")",
"def create(self, path_config_output):\n logger.info(\"Creating IamRole - to enable DWH access from S3..\")\n role_arn = self.create_iam_role_with_s3_access()\n\n logger.info(\"Creating Redshift Cluster - to host DWH..\")\n self.create_cluster(role_arn)\n _wait_cluster_switching(\n self.redshift,\n self.cluster_identifier,\n initial_status=\"creating\"\n )\n\n logger.info(\"Extracting cluster properties..\")\n cluster_descriptor = self.redshift.describe_clusters(\n ClusterIdentifier=self.cluster_identifier\n )['Clusters'][0]\n dwh_endpoint = cluster_descriptor['Endpoint']['Address']\n dwh_role_arn = cluster_descriptor['IamRoles'][0]['IamRoleArn']\n dwh_vpc_id = cluster_descriptor['VpcId']\n\n logger.info(\"Enabling communication s3 <-> DWH..\")\n self.enable_communication_s3_with_dwh(dwh_vpc_id)\n\n \"Setup completed, Host:\\npostgresql://{}:{}@{}:{}/{}\".format(\n self.db_user,\n self.db_password,\n dwh_endpoint,\n self.db_port,\n self.db_name\n )\n\n logger.info(\"Exporting current machine configuration in {}\".format(path_config_output))\n self.export_dwh_current_config(path_config_output, dwh_vpc_id, dwh_role_arn, dwh_endpoint)",
"def create_cluster(worker_count=0):\n global nodes, stash, seeds\n nodes = []\n seeds = []\n stash = []\n #create the seed node\n seeds.append(Node(cluster_name, node_type=\"seed\", number=0, create=True, IPv4=True))\n #create the rest of the nodes\n for i in range(worker_count):\n stash.append(Node(cluster_name, node_type=\"node\", number=\"%02d\" % (i+1), create=True, IPv4=True))\n\n #save the cluster to file\n save_cluster()\n #wait until everybody is ready\n Cluster.wait_nodes(seeds+nodes)\n find_orchestrator()\n inject_hosts_files()\n log.info('Every node is ready for SSH')",
"def create_clusters(provider, context, **kwargs):\n conn = get_session(provider.region).client('ecs')\n\n try:\n clusters = kwargs[\"clusters\"]\n except KeyError:\n logger.error(\"setup_clusters hook missing \\\"clusters\\\" argument\")\n return False\n\n if isinstance(clusters, basestring):\n clusters = [clusters]\n\n cluster_info = {}\n for cluster in clusters:\n logger.debug(\"Creating ECS cluster: %s\", cluster)\n r = conn.create_cluster(clusterName=cluster)\n cluster_info[r[\"cluster\"][\"clusterName\"]] = r\n return {\"clusters\": cluster_info}",
"def main():\n\n # Load config\n config = configparser.ConfigParser()\n config.read(\"etl.cfg\")\n\n aws_key = config.get(\"aws\", \"key\")\n aws_secret = config.get(\"aws\", \"secret\")\n\n db_cluster_id = config.get(\"redshift\", \"cluster_identifier\")\n db_name = config.get(\"redshift\", \"db_name\")\n db_user = config.get(\"redshift\", \"db_user\")\n db_password = config.get(\"redshift\", \"db_password\")\n db_port = config.get(\"redshift\", \"db_port\")\n\n redshift = boto3.client(\n \"redshift\",\n region_name=\"us-west-2\",\n aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret,\n )\n\n # Make sure the Redshift cluster exists\n try:\n cluster_props = redshift.describe_clusters(ClusterIdentifier=db_cluster_id)[\"Clusters\"][0]\n except redshift.exceptions.ClusterNotFoundFault:\n print(\"Error: Cluster does not exist.\")\n return\n\n if cluster_props[\"ClusterStatus\"] != \"available\":\n print(f\"Error: Cluster is not available. Current status is: {cluster_props['ClusterStatus']}\")\n return\n\n # Dynamically retrieve the Redshift cluster host\n db_host = cluster_props[\"Endpoint\"][\"Address\"]\n\n # Connect to Redshift cluster\n conn = psycopg2.connect(\n f\"host={db_host} dbname={db_name} user={db_user} password={db_password} port={db_port}\"\n )\n\n # Data checks to run\n data_checks = [\n has_no_empty_tables,\n has_valid_temperature,\n has_valid_ratings,\n has_valid_barcode,\n has_valid_checkout_year,\n ]\n\n with conn.cursor() as cursor:\n for data_check in data_checks:\n print(f\"Running data check: {data_check.__name__}...\", end=\" \")\n data_check(cursor)\n print(\"OK\")\n\n conn.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
user submits an expense
|
def submit_expense(request):
# TODO validation
this_token = request.POST['token']
this_user = User.objects.filter(token__token=this_token).get()
if 'date' not in request.POST
now = datetime.now()
Expense.objects.create(
user=this_user, amount=request.POST['amount'], text=request.POST['text'],
date=now)
print("I'm in submit expense")
print(request.POST)
return JsonResponse(
{'status': 'ok',
}, encoder=json.JSONEncoder
)
|
[
"def add_expense(user_name, expense_date, expense_amount, expense_description):\n\n return \"Expense added successfully!\"",
"def test_expenses_post(self):\n pass",
"def SUBMIT_EXPENSE_CATEGORY_ENTRY():\n print(\"DOING Expense CATEGORIES\")\n ASSET_INPUT = ExpenseCategory_ENTRY()\n ASSET_INPUT.set_expense_category_name(entryExpense_Category_name.get())\n ASSET_INPUT.SUBMIT_EXPENSE_CATEGORY()",
"def Expense(request, expense_id):\n if request.META['REQUEST_METHOD'] == 'GET':\n expense = models.Expense.get_by_id(int(expense_id))\n params = {}\n if expense is None:\n params['error'] = 'Expense ID %s not found!' % expense_id\n else:\n params.update(\n {'expense': expense})\n return common.Respond(request, 'expense.html', params)\n elif request.META['REQUEST_METHOD'] == 'POST':\n logging.info(request.POST)",
"def add_expense(self):\n new_expense = self._create_expense()\n self.expense_tracker.add_expense(new_expense)\n threadSignals.update_data.emit()\n self._reset_fields()",
"def test_expenses_with_payment_post(self):\n pass",
"async def expedition(self, ctx):\r\n #Make sure they're not on an adventure or traveling\r\n adventure = await AssetCreation.getAdventure(self.client.pg_con, ctx.author.id)\r\n if adventure['adventure'] is not None:\r\n ctx.command.reset_cooldown(ctx)\r\n return await ctx.reply('You are currently traveling. Please wait until you arrive at your destination before traveling again.')\r\n\r\n #Tell database they're going on an expedition\r\n await AssetCreation.setAdventure(self.client.pg_con, int(time.time()), \"EXPEDITION\", ctx.author.id)\r\n\r\n #Send message\r\n await ctx.reply('You have went on an expedition! You can return at any time with the `arrive` command.\\nYou will gain increased rewards the longer you are on expedition, but only for 1 week.')",
"def _create_expense(self):\n name = self.le_name.text()\n amount = float(self.le_amount.text())\n category = self.le_category.text()\n date = self.de_date.text()\n\n return Expense(name, amount, category, date)",
"def insert_expenses(expense_type, cost):\n insert_expense_command = \"\"\"insert into {} (Expense_Type, Expense_Amount) values (?,?)\"\"\".format(current_month)\n insert_expense_name = expense_type\n insert_expense_amt = cost\n multi_expense_insert = insert_expense_name, insert_expense_amt\n conn.execute(insert_expense_command, multi_expense_insert)\n conn.execute(\"commit;\")",
"def approve_expense_reimbursement(request):\n if request.user.is_authenticated and check_user_group(request.user, \"Manager\"):\n # If the user is making a post request, updates the database.\n if request.method == \"POST\":\n expense_id = request.POST['expense_id']\n expense_request = Expenses.objects.get(expense_id=expense_id)\n if \"approve\" in request.POST:\n expense_request.status = \"Approved\"\n if \"reject\" in request.POST:\n expense_request.status = \"Denied\"\n expense_request.save()\n\n # Default behavior: Load all pending time sheets.\n pending_expense_requests = Expenses.objects.filter(status=\"Pending\")\n processed_expense_requests = Expenses.objects.exclude(status=\"Pending\")\n\n # Load all approved time sheets.\n context = {\n 'pending_expense_requests': pending_expense_requests,\n 'processed_expense_requests': processed_expense_requests\n }\n return render(request, 'approvalsexpenses.html', context)\n else:\n return redirect(login_user)",
"def expense_reimbursement(request):\n if request.user.is_authenticated:\n if request.POST:\n form = ExpenseRequestForm(request.POST, request.FILES)\n else:\n form = ExpenseRequestForm()\n layout = get_layout_based_on_user_group(request.user)\n # Retrieving existing requests from the database\n this_username = request.user\n user = User.objects.get(username=this_username)\n expense_requests = Expenses.objects.filter(user_id__username=this_username)\n print(\"FOUND EXPENSE REQUESTS: \")\n print(expense_requests)\n # Display the page normally.\n context = {\n \"layout\": layout,\n \"form\": form,\n \"expense_requests\": expense_requests,\n }\n # If the user is posting, saving the form data and saving to the database.\n if form.is_valid():\n print(\"CREATING EXPENSE REIMBURSEMENT\")\n form = ExpenseRequestForm(request.POST, request.FILES)\n new_expense_request = form.save(commit=False)\n new_expense_request.user_id = user\n new_expense_request.status = 'Pending'\n print(new_expense_request.file.url)\n new_expense_request.save()\n # Redirect is done instead of rendering because refreshing will cause form resubmission.\n return HttpResponseRedirect('expense-requests/')\n else:\n return render(request, 'expense-requests.html', context)\n else:\n # User is not logged in. Show them the way.\n return redirect(login_user)",
"def create_exp():\n # References functions in utils.py file\n if is_logged_in():\n # Post method inserts new experience into database\n if request.method == \"POST\": \n experience = {\n \"experience_name\": request.form.get(\"experience_name\"), \n \"category_name\": request.form.get(\"category_name\"),\n \"img_address\": request.form.get(\"img_address\"),\n \"description\": request.form.get(\"description\"),\n \"added_by\": session[\"user\"]\n }\n mongo.db.experiences.insert_one(experience)\n flash(\"Experience Successfully Added!\")\n return redirect(url_for(\"user.profile\", username=session['user']))\n # Get method to retrieve category choices for dropdown\n else:\n categories = mongo.db.categories.find().sort(\"category_name\", 1)\n return render_template(\n \"create_experience.html\",\n categories=categories)\n # Redirects user to log in screen if they are not logged in \n else:\n flash(\"You need to log in to perform this operation\")\n return redirect(url_for('user.log_in'))",
"def test_expenses_id_payment_post(self):\n pass",
"def assessment_submit():\n\n if _check_login() is False:\n return redirect(url_for('index'))\n\n answer = int(request.form['test-question'])\n\n if answer == 1:\n score = 10.0\n else:\n score = 0.0\n\n save_assessment_event_submitted_grade_event(session['username'], score)\n\n return render_template('assessment.html', answer=answer)",
"def expenses_view():\n expense = None\n expenses_tab = expenses_table()\n form = ExpenseViewForm()\n form.category.choices = category_choice()\n if form.validate_on_submit():\n if form.category.data != \"\" and form.expense_type.data != \"\": # when user select category and type\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(category=form.category.data). \\\n filter_by(expense_type=form.expense_type.data).all()\n elif form.category.data != \"\": # when user select category only\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(category=form.category.data).all()\n elif form.expense_type.data != \"\": # when user select type only\n expense = Expenses.query.filter_by(budget_id=selected_budget()). \\\n filter_by(expense_type=form.expense_type.data).all()\n expenses_tab = expenses_table(expense)\n return render_template('expenses_view.html', form=form, expenses_tab=Markup(expenses_tab))\n\n return render_template('expenses_view.html', form=form, expenses_tab=Markup(expenses_tab))",
"def save_expenses():\n # checking whether entered information in 'price' entry are digits or not\n price_result = digit_checker(price_entry)\n\n if price_entry.get() == \"\":\n messagebox.showerror(\"Empty\", \"Price box is empty. Please , fill it.\")\n\n elif price_result is False:\n messagebox.showerror(\"Invalid\", \"Please , insert digits only in it.\")\n\n else:\n\n # -------- as a whole maintaining balance after adding expenses budget so, subtracting now --------\n read_my_files()\n\n global balance\n # now checking whether a file is existing or not. if yes it is True, if not False.\n\n #if os.path.isfile(str(password_entry_signup.get() + \"_income.txt\")):\n balance = float(fetched_balance) # to read previous\n\n # now , balance stored in new variable because just to show warning if balance is low\n current_balance = balance\n\n balance -= float(price_entry.get())\n #print(f\"Your balance after spending : {balance}\")\n # checking balance is lesser than 0 , if yes show warning , if no do calcaulation\n if balance >= 0:\n global spend, expense_fetch_first\n\n if expense_fetch_first == 0:\n\n spend = float(price_entry.get()) + float(fetched_expenses)\n expense_fetch_first += 1\n\n else:\n spend += float(price_entry.get())\n #print(\"after spending = \", spend)\n with open(str(password_entry_signup.get() + \"_expenses.txt\"), \"w+\") as expenses_file:\n expenses_file.write(str(spend))\n\n with open(str(password_entry_signup.get() + \"_balance.txt\"), \"r+\") as balance_file:\n balance_file.write(str(balance))\n messagebox.showinfo(\"Expenses\", \"Your recent expenditure is $\" + price_entry.get() +\n \", total expenditure is $\" + str(spend) +\n \" and your new balance is $\" + str(balance) + \" Thank you !\")\n else:\n # inserted price is added with existed balance because to make balance after subtracting\n balance += float(price_entry.get())\n messagebox.showwarning(\"No balance\", \"Sorry, no enough money. Your current balance is $\"\n + str(current_balance))\n\n price_entry.delete(0, END)",
"def add_expenditure():\n\n from models import Budget, Expenditure, User, Category\n from utils import expenditure_total_amount_and_avg, budget_totals, get_dates_for_budget, get_progress, get_budget_per_category, connect_to_db \n\n # Set the value of the user id of the user in the session\n id = session.get('id')\n\n # Get values from the form\n category_id = int(request.form.get(\"category\"))\n price = request.form.get(\"price\")\n date_of_expenditure = request.form.get(\"date\")\n where_bought = request.form.get(\"wherebought\")\n description = request.form.get(\"description\")\n\n start_date, end_date = get_dates_for_budget(category_id, id)\n\n # Create a new expenditure object to insert into the expenditures table\n new_expenditure = Expenditure(\n category_id = category_id,\n price = price,\n date_of_expenditure = date_of_expenditure,\n where_bought = where_bought,\n description = description,\n expenditure_userid = id\n )\n\n # Insert the new expenditure into the expenditures table and commit the insert\n db.session.add(new_expenditure)\n db.session.commit()\n\n # Unpacking the function call\n total_cat_price, avg_cat_expenditures = expenditure_total_amount_and_avg(category_id, id, start_date, end_date)\n budget_minus_expenses = budget_totals(category_id, id, total_cat_price)\n cat_budget = get_budget_per_category(category_id, id)\n category_progress = get_progress(budget_minus_expenses, cat_budget)\n\n expenditure_info = {\n 'total_cat_price': total_cat_price,\n 'avg_cat_expenditures': avg_cat_expenditures,\n 'category_id': category_id,\n 'expenditure_id': new_expenditure.id,\n 'date_of_expenditure': new_expenditure.date_of_expenditure.strftime('%Y-%m-%d'),\n 'where_bought': new_expenditure.where_bought,\n 'description': new_expenditure.description,\n 'price': str(new_expenditure.price),\n 'category': new_expenditure.category.category,\n 'cat_budget_minus_expenses': budget_minus_expenses,\n 'category_progress': category_progress\n }\n\n return jsonify(expenditure_info)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def commit_request():\n name = request.form['name']\n justification = request.form['justification']\n pledges = request.form['pledges'].split(',')\n suggested_value = request.form['suggestedValue']\n now = datetime.date.today()\n\n for pledge_id in pledges:\n record_id = Record.add_record(name, now, suggested_value, justification, pledge_id)\n return 'Request Successfully Submited'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Encodes a URL to a shortened URL.
|
def encode(self, longUrl: str) -> str:
short=str(hash(longUrl))
self.shorttolong[short]=longUrl
return "http://tinyurl.com/"+short
|
[
"def encode(self, longUrl):\n global ID\n encoded = hex(ID).lstrip('0xX')\n url_map[ID] = longUrl\n ID += 1\n return 'http://shorturl.com/' + encoded",
"def encode(self, longUrl: str) -> str:\n path = self.service.insert(longUrl)\n return 'https://a.com/' + path",
"def shorten_url():\n\n\t# Let's not accept invalid url values\n\tif 'url' not in request.form or not request.form['url']:\n\t\tresponse = app.make_response(json.dumps({'error': 'Invalid url.'}))\n\t\tresponse.status = '403 Invalid url'\n\t\tresponse.mimetype = 'text/json'\n\n\t\treturn response\n\n\turl = request.form['url']\n\n\t# Correct invalid URLs (very simple)\n\tif not url.startswith('http'):\n\t\turl = 'http://%s' % url\n\n\t# Keep only the first 5 characters of the sha value\n\tshortened_url = sha.sha(url).hexdigest()[:5]\n\n\t# Record the mapping in our DB\n\t_record_url(shortened_url, url)\n\n\tresponse = app.make_response(json.dumps({'url': url_for('get_url', shortened_url=shortened_url)}))\n\tresponse.mimetype = 'text/json'\n\n\treturn response",
"def ShortenUrl(url):\n tinyurl_api = \"http://tinyurl.com/api-create.php?url=\"\n try:\n url = urllib2.urlopen(tinyurl_api + url).read()\n except urllib2.URLError:\n info(\"failed to call out to tinyurl.com\")\n return url",
"def Shorten(self, url):\n \n service = SETTINGS[\"urlshorter\"] or \"is.gd\"\n log.logger.info(\"Shortening URL %s with %s\", url, service)\n if self.IsShort(url): return url\n try:\n s = urlshorter.PROTOCOLS[service].URLShorter()\n return s.short(url)\n except: return url",
"def shorten_url(url):\n if _is_valid_short(url): # dont short our short urls\n code = url[len(url_for('main.index', _external=True)):]\n long_url = lengthen_url(code)\n if long_url:\n return code, long_url\n else:\n return None, None\n url = _standardize_url(url)\n if not url:\n return None, None\n\n #get the id for this url (whether new or otherwise)\n link = Url.query.filter_by(url=url).first()\n if not link: #url not yet inserted into database\n link = Url(url=url)\n db.session.add(link) #insert and get its id\n db.session.commit()\n code = _convert_to_code(link.id)\n return code, url",
"def shortURL(self, url, target=50):\n base = self.database.defaultURLPrefix\n return shorturls.shortURL(url, target, base)",
"def shorten_url(url, user_id):\n try:\n return requests.get(SHORTENER_URL, params={\n 'token': config.get_str('botan_token'),\n 'url': url,\n 'user_ids': str(user_id),\n }).text\n except Exception as e:\n logger.exception(e)\n return url",
"def get_shortened_url():\n url = request.args.get(\"url\")\n if not is_valid_url(url):\n return make_response(\"The url was not valid! Make sure to start the url with http:// or https://\", 404)\n key = url_key_dict.get(url)\n if key:\n if not expired(key):\n return make_response(prefix + key, 200)\n key_url_dict.pop(key, None)\n url_key_dict.pop(url, None)\n key = create_new_key_url_pair(url)\n return make_response(prefix + key, 200)\n key = create_new_key_url_pair(url)\n return make_response(prefix + key, 200)",
"def bitlify(url):\n\treturn Bittle.objects.bitlify(url).shortUrl",
"def shorten_url(db):\n long_url = str(request.body.getvalue(), encoding=\"UTF-8\")\n print(long_url)\n found = db.execute('''SELECT base62 FROM ShortUrl WHERE url = ?''', (long_url,))\n row = found.fetchone()\n if row is not None:\n return row[0]\n else:\n count = __count__.increment()\n short_url = convert_to_base62_string(count)\n db.execute('''INSERT INTO ShortUrl VALUES (?, ?)''', (long_url, short_url))\n return short_url",
"def add_url():\n url = request.form['url']\n\n if not tlds.has_valid_tld(url):\n flash(\"Sorry, but {0} isn't a valid URL. \".format(url))\n return redirect(url_for('show_mainpage'))\n\n try:\n # assume http if no protocol/scheme given\n if urlparse(url).scheme == '':\n url = 'http://' + url\n\n cr = g.db.cursor()\n cr.execute('INSERT INTO Link (longurl) VALUES (?);',\n [url])\n res = cr.execute('SELECT id FROM Link WHERE longurl = (?);',\n [url])\n\n short_url = res.fetchone()[0]\n g.db.commit()\n cr.close()\n except Exception as e:\n # TODO log error msg and send to me.\n flash(\"We're sorry, but an error occurred.\")\n else:\n flash('Short url is {0}/{1}'.format(app.config['HOSTNAME'], \n short_url))\n \n return redirect(url_for('show_mainpage'))",
"def _format_short(self, url):\n slug = url.split('=')[-1]\n return 'https://youtu.be/' + slug",
"def _record_url(shortened_url, url):\n\tdb.set(PREFIX + shortened_url, url)",
"def shorten():\n url = request.args.get('url', None)\n if not url:\n return jsonify({'status': 'err'})\n code, long_url = shorten_url(url)\n if not code:\n return jsonify({'status': 'err'})\n return jsonify({'status': 'ok',\n 'code': code,\n 's_url': url_for('main.expand', code=code, _external=True),\n })",
"def url_shortner_page(request):\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n if request.POST.get('url', None) is None:\n return HttpResponseBadRequest(MESSAGE_INVALID_DATA_PARAMS)\n\n url_to_shorten = request.POST.get('url')\n\n short_url, created = ShortURL.objects.get_or_create(url=url_to_shorten)\n\n return JsonResponse({\n 'url': short_url.url,\n 'tiny_url': short_url.abs_tiny_url,\n }, status=201 if created else 200)",
"def generate_shortlink(url):\r\n # parameters from rebrandly\r\n short_url_domain = 'go.teatexts.me'\r\n api_key = 'YOUR_API_KEY_HERE'\r\n\r\n # payloads for the API call\r\n linkRequest = {\r\n 'destination': url,\r\n 'domain': {\r\n 'fullName': short_url_domain\r\n }\r\n # 'slashtag': 'TEXT_HERE'\r\n }\r\n\r\n # request headers for the api call\r\n requestHeaders = {\r\n 'Content-type': 'application/json',\r\n 'apikey': api_key\r\n }\r\n \r\n # make the api call to generate the link\r\n r = requests.post('https://api.rebrandly.com/v1/links',\r\n data = json.dumps(linkRequest),\r\n headers = requestHeaders)\r\n \r\n # return the shortlink, plus error handling\r\n if (r.status_code == requests.codes.ok):\r\n link = r.json()\r\n return link[\"shortUrl\"]\r\n else:\r\n return \"Error\"",
"async def tinyurl(self, ctx, *, link: str):\n url = link.strip(\"<>\")\n url = 'http://tinyurl.com/api-create.php?url=' + url\n async with aiohttp.ClientSession() as cs:\n async with cs.get(url) as resp:\n new = await resp.text()\n embed = discord.Embed(title='TinyURL Link Shortener', color=self.colour)\n embed.add_field(name='Original Link', value=link, inline=False)\n embed.add_field(name='Shortened Link', value=new, inline=False)\n await ctx.send(embed=embed)\n try:\n await ctx.message.delete()\n except discord.errors.Forbidden:\n pass",
"def create_url(self, URL):\r\n return '{0}{1}'.format(self.url, URL)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Torrents should be a list If passed a string it will be downloaded file_filter will filter the files we want to download this will be a list of strings that should match the file names
|
def add_torrents(self,torrents, download_dir=None, file_filter=None):
if not isinstance(torrents,(tuple,list)):
assert isinstance(torrents,basestring)
torrents = [torrents]
if download_dir is None: download_dir = self.download_dir
for torrent in torrents:
self.logger.debug(str(torrent))
if torrent.startswith("magnet"): # Handling magnet links
running_torrent=self.transmission.add_torrent("1",filename=torrent,download_dir = self.download_dir)
else:
running_torrent=self.transmission.add_torrent(torrent,download_dir = self.download_dir)
#Currently disabling the filter file, it seems the magnet links are taking time
#I'll probably filter in post processing on completed torrents. More space used, but still functional
#self.filter_torrent(running_torrent,file_filter)
|
[
"def filter_torrent(self,torrent,file_filter):\r\n try:\r\n files_dict = self.wait_for_files(torrent,timeout=5*60)\r\n files = []\r\n for file_key in files_dict.keys():\r\n for file_name in file_filter:\r\n if re.match(file_name,files_dict[file_key]['name'],re.IGNORECASE) is not None:\r\n files.append(file_key)\r\n if not files: raise NoFilesException\r\n self.transmission.change(torrent.fields['id'], files_wanted = files)\r\n self.transmission.start(torrent['id'])\r\n except NoFilesException:\r\n self.transmission.remove(torrent['id'], delete_data=True)",
"def torrent_files(server_url, torrent_hash):\n response = requests.get(server_url + '/torrents/' + torrent_hash)\n torrent = response.json()\n return torrent['files']",
"def filter_urls(urls):\n filtered_urls = []\n for url in urls:\n if url.endswith(\"txt\") or url.endswith(\"rec\"):\n filtered_urls.extend([url])\n return filtered_urls",
"def rest_broker_file_list(args):\n rtb = _rest_target_broker_find_by_id_url(args.target)\n for name, hexdigest in rtb.rest_tb_file_list().iteritems():\n print name, hexdigest",
"def download_filenames(self) -> List[str]:\n if self.config.archive_filenames:\n return _list_of_strings(self.config.archive_filenames)\n return [os.path.basename(urlparse(url).path) for url in self.download_urls]",
"def list_wheel(wheel_file):\n return [f.filename for f in zipfile.ZipFile(str(wheel_file)).filelist if f.filename.startswith(\"spam/\")]",
"def get_torrents(self, search, filter_=None, ordering_keys=None):\n if six.PY2:\n search = search.encode(\"utf-8\")\n self.grab_torrents(search)\n torrent_list = self._torrentItems\n if filter_:\n torrent_list = self.filter(filter_)\n if ordering_keys:\n torrent_list = sorted(torrent_list, key=attrgetter(*ordering_keys))\n return torrent_list",
"def filter(self, filter_):\n valid_torrent_items = []\n results = []\n for torrent_item in self._torrentItems:\n filter_result = filter_.test(torrent_item)\n if filter_.test(torrent_item) == filter_.TEST_OK:\n valid_torrent_items.append(torrent_item)\n results.append((torrent_item, filter_result))\n if not valid_torrent_items:\n self.logger.debug(\"No valid torrents Found, test results [%d]:\", len(results))\n for result in results:\n torrent, flag = result\n if flag & TorrentFilter.TEST_FAILED_AUTHOR_NO_MATCH:\n self.logger.debug(\"%s: no matches in author regex (%s) => (%s)\", torrent.title, torrent.author,\n filter_.author_filter)\n elif flag & TorrentFilter.TEST_FAILED_NAME_NO_MATCH:\n self.logger.debug(\"%s: no matches in title regexs (%s) => (%s)\", torrent.title, torrent.title,\n \", \".join(filter_.name_filters))\n elif flag & TorrentFilter.TEST_FAILED_SIZE_TOO_BIG:\n self.logger.debug(\"%s: size too big (%d bytes) => (%d)\", torrent.title, torrent.size,\n filter_.size_filter[\"lt\"])\n elif flag & TorrentFilter.TEST_FAILED_SIZE_TOO_SMALL:\n self.logger.debug(\"%s: size too small (%d bytes) => (%d)\", torrent.title, torrent.size,\n filter_.size_filter[\"gt\"])\n else:\n self.logger.debug(\"%s: OK\", torrent.title)\n return valid_torrent_items",
"def torrent_files( self, torrents ):\n \n return dict( ( key, [\n pathlib.Path( f[ \"name\" ] ) for f in val[ \"files\" ]\n ] ) for key, val in self.mapped_rpc(\n torrents,\n ( \"files\", )\n ).items() )",
"def getFiles(fileNames):\n listBeatboxers = list()\n for fileName in fileNames:\n with open(fileName) as f:\n listBeatboxers.extend(f.readlines())\n return listBeatboxers",
"def download_list(self):\n # override file with unique tracks\n log.info(\"Overriding {} with unique tracks\".format(self.tracks_file))\n self._override_file()\n\n # Remove tracks to skip from tracks list\n if self.skip_file is not None:\n self.tracks = self._filter_tracks_against_skip_file()\n\n log.info(u\"Preparing to download {} songs\".format(len(self.tracks)))\n return self._download_list()",
"def filter_files(files, filter):\r\n filtered_files = []\r\n for file in files:\r\n if filter.lower() in file.lower():\r\n filtered_files.append(file)\r\n return filtered_files",
"def _get_file_list(file_filter, dat_path=DAT_PATH):\n file_list = []\n for (root, dirs, files,) in os.walk(dat_path, followlinks=True):\n for file_ in files:\n if file_.endswith('.{}'.format(file_filter)):\n file_list.append(os.path.join(root, file_))\n return file_list",
"def get_mirror_download_filenames(self, mirror: DatasetFallbackMirror):\n if self.config.archive_filenames:\n return _list_of_strings(self.config.archive_filenames)\n return [os.path.basename(path) for path in mirror.download_paths]",
"def _normalize_torrent_files(user_files):\n if not user_files:\n return None, None\n\n prefix = \"torrent__\"\n # if it's string-like and not a list|set|tuple, then make it a list\n # checking for 'read' attr since a single file handle is iterable but also needs to be in a list\n is_string_like = isinstance(user_files, (bytes, six.text_type))\n is_file_like = hasattr(user_files, \"read\")\n if is_string_like or is_file_like or not isinstance(user_files, Iterable):\n user_files = [user_files]\n\n # up convert to a dictionary to add fabricated torrent names\n norm_files = (\n user_files\n if isinstance(user_files, Mapping)\n else {prefix + str(i): f for i, f in enumerate(user_files)}\n )\n\n files = {}\n files_to_close = []\n for name, torrent_file in norm_files.items():\n try:\n fh = None\n if isinstance(torrent_file, bytes):\n # since strings are bytes on python 2, simple filepaths will end up here\n # just check if it's a file first in that case...\n # this does prevent providing more useful IO errors on python 2....but it's dead anyway...\n try:\n filepath = path.abspath(\n path.realpath(path.expanduser(torrent_file.decode()))\n )\n if path.exists(filepath): # pragma: no branch\n fh = open(filepath, \"rb\")\n files_to_close.append(fh)\n name = path.basename(filepath)\n except Exception:\n fh = None\n # if bytes, assume it's a raw torrent file that was downloaded or read from disk\n if not fh:\n fh = torrent_file\n elif hasattr(torrent_file, \"read\") and callable(torrent_file.read):\n # if hasattr('read'), assume this is a file handle from open() or similar...\n # there isn't a reliable way to detect a file-like object on both python 2 & 3\n fh = torrent_file\n else:\n # otherwise, coerce to a string and try to open it as a file\n filepath = path.abspath(\n path.realpath(path.expanduser(str(torrent_file)))\n )\n fh = open(filepath, \"rb\")\n files_to_close.append(fh)\n name = path.basename(filepath)\n\n # if using default name, let Requests try to figure out the filename to send\n # Requests will fall back to \"name\" as the dict key if fh doesn't provide a file name\n files[name] = fh if name.startswith(prefix) else (name, fh)\n except IOError as io_err:\n if io_err.errno == errno.ENOENT:\n raise TorrentFileNotFoundError(\n errno.ENOENT, os_strerror(errno.ENOENT), torrent_file\n )\n if io_err.errno == errno.EACCES:\n raise TorrentFilePermissionError(\n errno.ENOENT, os_strerror(errno.EACCES), torrent_file\n )\n raise TorrentFileError(io_err)\n return files, files_to_close",
"def get_files(self, filelist, dest, progtrack, version, header=None, pub=None):\n\n raise NotImplementedError",
"def filter_filenames(filenames: Iterable[str], pattern: str) -> List[str]:\n filtered_filenames = []\n for filename in filenames:\n if re.search(pattern, filename):\n filtered_filenames.append(filename)\n return filtered_filenames",
"def add_torrents( self, torrents, trash, dry_run = False ):\n \n for torrent in torrents:\n ( print if dry_run else self.log.verbose )(\n \"adding torrent to {!r} from {}\".format(\n torrent[ \"location\" ].as_posix(),\n torrent[ \"source\" ]\n )\n )\n \n if not dry_run:\n self.rpc(\n \"torrent-add\",\n {\n \"filename\" : torrent[ \"source\" ],\n \"download-dir\" : torrent[ \"location\" ].as_posix(),\n }\n )",
"def append_external_torrents(self, *a):\n self.external_torrents.extend(a)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function will take a torrent and select only the relevant files on it if there's nothing to use in it, it will remove it from the queue
|
def filter_torrent(self,torrent,file_filter):
try:
files_dict = self.wait_for_files(torrent,timeout=5*60)
files = []
for file_key in files_dict.keys():
for file_name in file_filter:
if re.match(file_name,files_dict[file_key]['name'],re.IGNORECASE) is not None:
files.append(file_key)
if not files: raise NoFilesException
self.transmission.change(torrent.fields['id'], files_wanted = files)
self.transmission.start(torrent['id'])
except NoFilesException:
self.transmission.remove(torrent['id'], delete_data=True)
|
[
"def cleanup_completed_torrents(self):\r\n torrent_ids = self.transmission.list().keys()\r\n torrent_ids = filter(lambda my_id: self.check_torrent_name(self.transmission.get_torrent(my_id)._fields['name'].value),torrent_ids)\r\n # Now we have only our interesting torrents\r\n for my_id in torrent_ids:\r\n self.logger.debug(\"ID : {0}\".format(my_id))\r\n if self.transmission.get_torrent(my_id).status in (\"seeding\",\"stopped\"):\r\n torrent_name = self.transmission.get_torrent(my_id)._fields['name'].value\r\n self.transmission.remove(my_id, delete_data=False)\r\n torrent_directory = self.conf['download_dir']\r\n #finding the torrent directory\r\n self.logger.debug(\"Checking {0}\".format(torrent_name))\r\n for folder in os.listdir(torrent_directory):\r\n if re.match(torrent_name,folder,re.IGNORECASE) is not None:\r\n torrent_directory = torrent_directory + \"/\" + folder\r\n self.logger.info(\"Found {0}\".format(torrent_name))\r\n break\r\n #going over the files in the torrent and taking only what we want\r\n self.organize_files(torrent_directory)",
"def add_torrents(self,torrents, download_dir=None, file_filter=None):\r\n if not isinstance(torrents,(tuple,list)):\r\n assert isinstance(torrents,basestring)\r\n torrents = [torrents]\r\n if download_dir is None: download_dir = self.download_dir\r\n for torrent in torrents:\r\n self.logger.debug(str(torrent))\r\n if torrent.startswith(\"magnet\"): # Handling magnet links\r\n running_torrent=self.transmission.add_torrent(\"1\",filename=torrent,download_dir = self.download_dir)\r\n else:\r\n running_torrent=self.transmission.add_torrent(torrent,download_dir = self.download_dir)\r\n #Currently disabling the filter file, it seems the magnet links are taking time\r\n #I'll probably filter in post processing on completed torrents. More space used, but still functional\r\n #self.filter_torrent(running_torrent,file_filter)\r",
"def remove_dir(option, opt_str, value, parser):\n\n current_dir = os.listdir(value)\n current_dir.sort()\n num_of_files = 0\n for each in current_dir:\n if each.endswith(\".torrent\"):\n file_path = value\n file_path += each\n each_file = open(file_path, \"r\")\n newfile = nuke(each_file)\n each_file.close()\n f = open(file_path, \"w\")\n for eachline in newfile:\n f.write(eachline)\n f.close()\n num_of_files += 1\n print each, \" ... completed!\"\n print \"Processed %d torrent files successfully.\" % (num_of_files)",
"def get_tsfiles(self):\n self.thread_pool = []\n for i in range(self.threads):\n t = Thread(target=self._keep_download, args=(\n self.restore_obj['downloaded_ts'], ))\n self.thread_pool.append(t)\n t.daemon = True\n t.start()\n for i in range(self.threads):\n self.thread_pool[i].join()\n pass",
"def torrent_removed(self, infohash):\n raise NotImplementedError('BasicApp.torrent_removed() removing missing torrents not implemented')",
"def update_downloaded_queue(self):\n if len(self.queue) >= 2:\n song_file = youtube.download_mp3(self.queue[1][1], \"audio_files/\")\n self.downloaded_queue.append(song_file)",
"def stop_torrents(options):\n torrent_hashes = requests.get(options['url'], headers=options['headers']).json()\n for t in torrent_hashes:\n requests.delete(options['url'] + '/{0}'.format(t['infoHash']), headers=options['headers'])\n return True",
"def process_files(self):\n while self.search_set:\n current_file = self.add_file(self.search_set.pop())\n self.save_file(current_file)",
"def _fill_input_queue(self):\n self.input_queue = Queue()\n\n for f in self.path.iterdir():\n if f.match(self.pattern):\n print(\"putting on queue: \", f)\n self.input_queue.put(f)",
"def torrent_files(server_url, torrent_hash):\n response = requests.get(server_url + '/torrents/' + torrent_hash)\n torrent = response.json()\n return torrent['files']",
"def queue_callback(self):\n selected_song, index = self._player.selected_song()\n response = requests.get('http://localhost:5000/song/' + selected_song)\n song_object = response.json()\n media_file = song_object['pathname']\n media_file = media_file.replace('/', '\\\\')\n song_name = song_object['title']\n self.queue_path.append(media_file)\n self.queue_name.append(song_name)\n self._player.list_songs_queue(self.queue_name)",
"def remove_files(self):\n flag = False\n _, _, files = next(os.walk(self.dest_path), (self.dest_path, [], []))\n for each in files:\n file_path = os.path.join(self.dest_path, each)\n try:\n os.remove(file_path)\n except PermissionError:\n flag = True\n print(\"Zipper: File: {} is being used by another process\".format(each))\n # Will not create and store the zip in todecode folder until all files are removed.\n if flag:\n self.remove_files()",
"def request_executor(credential, m_queue):\n local_ftp = TalkToFTP(credential)\n\n alive = True\n while alive:\n if m_queue.empty():\n \"\"\"\n this part is a ping every 5 mins to be sure that the server is still alive\n \"\"\"\n if (int(time.time()*1000)) % 300000 == 0:\n try:\n local_ftp.connect()\n local_ftp.get_folder_content(\"default\")\n local_ftp.disconnect()\n print(\"update\")\n except local_ftp.all_errors as e:\n print(e)\n continue\n continue\n else:\n tmp = m_queue.get()[1]\n # thread kill\n if tmp[0] == \"end\":\n alive = False\n continue\n\n if tmp[0] == \"create_file\":\n try:\n local_ftp.connect()\n local_ftp.file_transfer(tmp[1], tmp[2], tmp[3])\n local_ftp.disconnect()\n except local_ftp.all_errors as e:\n print(tmp)\n print(e)\n continue\n continue\n\n if tmp[0] == \"create_dir\":\n try:\n local_ftp.connect()\n local_ftp.create_folder(tmp[1])\n local_ftp.disconnect()\n except local_ftp.all_errors as e:\n print(tmp)\n print(e)\n continue\n continue\n if tmp[0] == \"remove_dir\":\n print(tmp)\n try:\n local_ftp.connect()\n local_ftp.remove_folder(tmp[1])\n local_ftp.disconnect()\n except local_ftp.all_errors as e:\n print(tmp)\n print(e)\n continue\n continue\n if tmp[0] == \"remove_file\":\n try:\n local_ftp.connect()\n local_ftp.remove_file(tmp[1])\n local_ftp.disconnect()\n except local_ftp.all_errors as e:\n print(tmp)\n print(e)\n continue\n continue",
"def cleanup(streams):\n\n current_date = time.time()\n nearest = min(streams, key=lambda f: abs(current_date - get_date_from_file(f)))\n\n for stream in streams:\n if stream == nearest:\n continue\n try:\n os.remove(os.path.join(STREAM_DIR, stream))\n except:\n pass\n return [os.path.join(STREAM_DIR, nearest)]",
"def remove_torrents( self, torrents, trash, dry_run = False ):\n \n torrents = tuple( torrents )\n \n for hash in torrents:\n ( print if dry_run else self.log.verbose )(\n \"removing torrent {}\".format( hash )\n )\n \n if not dry_run and torrents:\n locations = self.rpc(\n \"torrent-get\",\n {\n \"ids\" : torrents,\n \"fields\" : ( \"downloadDir\", \"name\", ),\n }\n )[ \"torrents\" ]\n for location in locations:\n anime_manager.filesystem.trash_item(\n pathlib.Path( location[ \"downloadDir\" ] ) / location[ \"name\" ],\n trash\n )\n self.rpc(\n \"torrent-remove\",\n {\n \"ids\" : torrents,\n \"delete-local-data\" : False,\n }\n )",
"def clean_sontek_files(self):\r\n\r\n file_list = copy.deepcopy(self.files)\r\n for filename in file_list:\r\n if os.path.basename(filename).startswith('Smba'):\r\n self.files.remove(filename)\r\n elif os.path.basename(filename).startswith('Loop'):\r\n self.files.remove(filename)",
"def queue_archive_files(ap, fq):\n if is_zipfile(ap):\n with ZipFile(ap, 'r') as archive:\n for archive_file in archive.namelist():\n if not archive_file.endswith('/') and ZipFile.getinfo(archive, archive_file).file_size > 0:\n fq.put(archive_file)\n elif tarfile.is_tarfile(ap):\n with tarfile.open(ap) as archive:\n for archive_file in archive:\n if archive_file.isreg() and archive_file.size > 0:\n fq.put(archive_file)\n else:\n raise IOError('could not match {0} to zip or tar format'.format(ap))",
"def run(self):\n start_time = time.time()\n file_info = self.remote_files[0]\n remote_file = ZapFile()\n remote_file.filename = file_info.filename\n remote_file.number_of_blocks = file_info.number_of_blocks\n remote_file.mark_as_remote()\n self.local_files.add(remote_file)\n child_threads = []\n\n for f in self.remote_files:\n remote_location = {}\n remote_location['ip'] = f.ip\n remote_location['port'] = f.port\n remote_location['name'] = f.name\n child_thread = self.remote_file_downloader(remote_file, f)\n child_thread.start()\n child_threads.append(child_thread)\n\n # How do we wait for them to finish?\n # TODO: what if I can't download the whole file?\n while not remote_file.is_downloaded():\n time.sleep(4)\n\n # Now all child threads are gone, I hope.\n remote_file.save_to_disk()\n zap_debug_print(\"remote file digest is \", remote_file.digest, \"file_info.digest is \", file_info.digest)\n if remote_file.digest != file_info.digest:\n # Our file does not match. Quit this thread and return an error\n zap_debug_print(\"Digest does not match! I should delete downloaded file!\")\n self.local_files.remove(remote_file)\n os.remove(remote_file.path)\n return False\n else:\n stop_time = time.time()\n log_string = \"file %s %s %s\" % (remote_file.filename, os.path.getsize(remote_file.path),\n stop_time - start_time)\n zap_log(log_string)\n print(\"Finished downloading %s. Find it at %s.\" % (remote_file.filename, remote_file.path))\n return True",
"def _on_merge_files(self, focus):\n #get parent of focus\n self.qr.put(('LOCKGUI', None))\n e_child = self.trout.find(\".//\" + focus)\n #if e_child is not collection/project give up\n if e_child.attrib['Type'] not in ['project', 'collection']:\n self.qr.put(('MESSAGEBOXSHOWWARNING2', \\\n (\"Not a collection\", \"Please select a collection not a file.\")))\n else:\n #list mp3 files which are immediate children of focus\n children = [c for c in e_child if c.attrib['Type'] is 'file']\n if len(children) > 1:\n second_of_silence = AudioSegment.silent(duration=1000) # in milliseconds second_of_silence = \n sound = AudioSegement.from_mp3(children[0].attrib['Location'])\n for c in children[1:]:\n sound += second_of_silence + AudioSegement.from_mp3(c.attrib['Location'])\n # now save new file in temp workspace?\n #create temp workspace\n #walk up tree creating list of ancestors, stop at project\n ancestors = list()\n this_child = e_child\n while this_child.attrib['Type'] is not 'project':\n e_parent = this_child.getparent()\n ancestors.insert(0, e_parent.tag)\n this_child = e_parent\n workspace = os.path.normpath('{}/Temp'.format(self.Pub2SD))\n for ancestor in ancestors:\n workspace = os.path.normpath('{}/{}'.format(workspace, ancestor.tag))\n os.makedirs(workspace, mode=0o777, exist_ok=True)\n filename = '{}/{}.mp3'.format(workspace,e_child.tag)\n sound.export(filename, 'mp3')\n e_parent = e_child.getparent()\n somevalues = self._read_mp3_tags(echild.attrib['Location'])\n self._add_a_file(afile, e_parent, somevalues)\n else:\n self.qr.put(('MESSAGEBOXSHOWWARNING2', \\\n (e_child.text, \"There are no immediate descendants which are mp3 files.\")))\n# (\"No mp3 files\", \"There are no immediate descendants which are mp3 files.\")))\n \n if etree.iselement(e_child):\n e_parent = e_child.getparent()\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n child_index = e_parent.index(e_child)\n if child_index > 0:\n child_index -= 1\n e_parent.remove(e_child)\n e_parent.insert(child_index, e_child)\n# self.qr.put(('PRINT', [[kid.tag, e_parent.index(kid)] for kid in e_parent.getchildren()]))\n self._on_reload_tree()\n self.qr.put(('SEEFOCUS', focus))\n self.qr.put(('UNLOCKGUI', None))\n #list children of focus which are mp3 files\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
At this point we'll querry the Transmission database and get all apropriate files and filter them We'll also stop the torrent
|
def cleanup_completed_torrents(self):
torrent_ids = self.transmission.list().keys()
torrent_ids = filter(lambda my_id: self.check_torrent_name(self.transmission.get_torrent(my_id)._fields['name'].value),torrent_ids)
# Now we have only our interesting torrents
for my_id in torrent_ids:
self.logger.debug("ID : {0}".format(my_id))
if self.transmission.get_torrent(my_id).status in ("seeding","stopped"):
torrent_name = self.transmission.get_torrent(my_id)._fields['name'].value
self.transmission.remove(my_id, delete_data=False)
torrent_directory = self.conf['download_dir']
#finding the torrent directory
self.logger.debug("Checking {0}".format(torrent_name))
for folder in os.listdir(torrent_directory):
if re.match(torrent_name,folder,re.IGNORECASE) is not None:
torrent_directory = torrent_directory + "/" + folder
self.logger.info("Found {0}".format(torrent_name))
break
#going over the files in the torrent and taking only what we want
self.organize_files(torrent_directory)
|
[
"def filter_torrent(self,torrent,file_filter):\r\n try:\r\n files_dict = self.wait_for_files(torrent,timeout=5*60)\r\n files = []\r\n for file_key in files_dict.keys():\r\n for file_name in file_filter:\r\n if re.match(file_name,files_dict[file_key]['name'],re.IGNORECASE) is not None:\r\n files.append(file_key)\r\n if not files: raise NoFilesException\r\n self.transmission.change(torrent.fields['id'], files_wanted = files)\r\n self.transmission.start(torrent['id'])\r\n except NoFilesException:\r\n self.transmission.remove(torrent['id'], delete_data=True)",
"def burn():\n file_list = Item.select().iterator()\n if not file_list:\n print 'Database empty, skipping.'\n return\n for f in file_list:\n if os.path.exists(f.filename):\n print 'Removing %s' % f.filename\n os.remove(f.filename)\n print 'Resetting database.'\n reset_database()",
"def do_maint (self):\n self.log.trace ('cleanup')\n now = time.time()\n zombies = []\n for k, fd in self.files.iteritems():\n if fd.queue and fd.waddr:\n self.log.trace (\"passing %r to %s\", fd.ident, fd.wname)\n fd.send_to (self.router_stream)\n if (fd.count == 0) and (now - fd.atime > 2 * CLOSE_DELAY): # you'd better use msg for this\n self.log.debug (\"forgetting %r\", fd.ident)\n zombies.append(k)\n for k in zombies:\n self.files.pop(k)",
"def on_pause_all(self):\n torrents = self.__torrent_client.get_torrents()\n\n for torrent in torrents:\n if torrent.status == 'downloading' or torrent.status == 'seeding':\n torrent.stop()",
"def get_tsfiles(self):\n self.thread_pool = []\n for i in range(self.threads):\n t = Thread(target=self._keep_download, args=(\n self.restore_obj['downloaded_ts'], ))\n self.thread_pool.append(t)\n t.daemon = True\n t.start()\n for i in range(self.threads):\n self.thread_pool[i].join()\n pass",
"def algorithm(self, parameters):\n logging.debug(\"Running subscription / fileset matching algorithm\")\n myThread = threading.currentThread()\n try:\n myThread.transaction.begin()\n self.databaseWork()\n myThread.transaction.commit()\n except:\n myThread.transaction.rollback()\n raise",
"def run(self):\n try:\n wakeup = None\n while True:\n now = Timestamp.now()\n wokeup = wakeup\n (pending, wakeup) = self.process_fetchers(now)\n if wakeup == -1:\n print \"Nothing to process.\"\n break\n remaining = wakeup - now\n if pending:\n print \"Waiting for \" + str(remaining)\n asyncore.loop(timeout = remaining, count = 1)\n else:\n ts = int(now)\n result = self.analyzer.analyze(ts)\n self.reporter.show(result, ts)\n if not self.continuous:\n break\n print \"sleeping for \" + str(remaining)\n time.sleep(remaining)\n print \"Wokeup ... \"\n except KeyboardInterrupt:\n sys.exit(0)\n finally:\n for f in self.fetchers:\n f.cleanup()",
"def on_resume_all(self):\n torrents = self.__torrent_client.get_torrents()\n\n for torrent in torrents:\n if torrent.status == 'stopped':\n torrent.start()",
"def _open_external_torrents(self):\n while self.external_torrents:\n arg = self.external_torrents.pop(0)\n df = self.open_torrent_arg(arg)\n yield df\n\n try:\n metainfo = df.getResult()\n except GetTorrent.GetTorrentException:\n self.logger.exception(\"Failed to get torrent\")\n continue\n\n if metainfo is not None:\n # metainfo may be none if IE passes us a path to a\n # file in its cache that has already been deleted\n # because it came from a website which set\n # Pragma:No-Cache on it.\n # See GetTorrent.get_quietly().\n\n df = self.multitorrent.torrent_known(metainfo.infohash)\n yield df\n known = df.getResult()\n if known:\n self.torrent_already_open(metainfo)\n else:\n df = self.open_torrent_metainfo(metainfo)\n if df is not None:\n yield df\n try:\n df.getResult()\n except TorrentAlreadyInQueue:\n pass\n except TorrentAlreadyRunning:\n pass\n self.open_external_torrents_deferred = None",
"def main(ticker_id, filter_text=''):\n # Build URL to make request to Edgar\n url = build_url(ticker_id)\n print(\"\\nGetting request for Edgar URL: \", url, \"with the filter of: \", filter_text, \"\\n\")\n page_data = make_request(url)\n # Parse page data for documents\n print(\"Getting soup and parsing docs..\\n\")\n # Make a list of links that contains the documents we want to extract from\n docs_contents = get_soup_contents(page_data, filter_text)\n xml_results = {} # Results from parsing XML contents\n results_files = [] # List of filenames with TSV data saved from docs\n for each in docs_contents:\n doc_title = each[0]\n doc_link = each[1]\n doc_filing_date = each[2]\n text_data = extract_data_from_text_files(doc_link) # Get text data from ticker docs\n if (text_data is not None):\n xml_data = extract_xml_content_from_text(text_data) # Extracted XML content to be converted\n # into TSV files.\n if xml_data is not None:\n print(\"Parsed XML contents for: \" + doc_title, \"from\", doc_filing_date, \"from\", ticker_id)\n # print(xml_data)\n xml_results[doc_title + '_' + ticker_id] = xml_data\n filename = ticker_id + '_' + doc_title + '_' + doc_filing_date + '_results'\n filename = filename.replace('/', '-') # Sometimes there are slash names within the doc title,\n # replace this as it makes an invalid filename.\n convert_to_tsv(filename, xml_results[doc_title + '_' + ticker_id])\n print(\"Converted XML results and saved as TSV in\", filename+'.tsv')\n results_files.append(filename+'.tsv')\n print(\"\\nResults of\" , ticker_id, \"for filter\", filter_text, \"saved to\", results_files)",
"def run(self):\n start_time = time.time()\n file_info = self.remote_files[0]\n remote_file = ZapFile()\n remote_file.filename = file_info.filename\n remote_file.number_of_blocks = file_info.number_of_blocks\n remote_file.mark_as_remote()\n self.local_files.add(remote_file)\n child_threads = []\n\n for f in self.remote_files:\n remote_location = {}\n remote_location['ip'] = f.ip\n remote_location['port'] = f.port\n remote_location['name'] = f.name\n child_thread = self.remote_file_downloader(remote_file, f)\n child_thread.start()\n child_threads.append(child_thread)\n\n # How do we wait for them to finish?\n # TODO: what if I can't download the whole file?\n while not remote_file.is_downloaded():\n time.sleep(4)\n\n # Now all child threads are gone, I hope.\n remote_file.save_to_disk()\n zap_debug_print(\"remote file digest is \", remote_file.digest, \"file_info.digest is \", file_info.digest)\n if remote_file.digest != file_info.digest:\n # Our file does not match. Quit this thread and return an error\n zap_debug_print(\"Digest does not match! I should delete downloaded file!\")\n self.local_files.remove(remote_file)\n os.remove(remote_file.path)\n return False\n else:\n stop_time = time.time()\n log_string = \"file %s %s %s\" % (remote_file.filename, os.path.getsize(remote_file.path),\n stop_time - start_time)\n zap_log(log_string)\n print(\"Finished downloading %s. Find it at %s.\" % (remote_file.filename, remote_file.path))\n return True",
"def processAllSRBFiles( self ):\n \n EDVerbose.DEBUG( strftime(\"%Y-%m-%d %H:%M:%S\") + \" *** EDPluginControlDLSArchiverv10.processAllSRBFiles() : processing all '.xml.clean' files if any.\" ) \n \n searchDir = self.getDataInput().getDropZonePath().getPath().getValue()\n fileList = []\n \n # process files by order of modification date (oldest first) \n allfileList = self.sortFiles( searchDir , 'clean' ) #self.sortFiles( searchDir , 'xml' ) \n \n # process only 100 files per round\n nbProcessableFiles = 100\n if len(allfileList) >= nbProcessableFiles:\n fileList = allfileList[0:nbProcessableFiles]\n else:\n fileList.extend(allfileList) \n \n \n for name in fileList : \n \n # get the full pathname of the xml file\n fullname = os.path.join( self.getDataInput().getDropZonePath().getPath().getValue(), name ) \n #edExtension = fullname.split( '.' )[-1]\n \n # # #\n # keep a copy of clean files before being registered with srb\n # # #\n #self.keepFileCopy(fullname, searchDir, 'srbinput') \n # # # \n \n # get the size of the current file\n edFileStats = os.stat( fullname )\n edFileSize = edFileStats [stat.ST_SIZE]\n \n # now call the SRB register on the xml file\n if ( edFileSize > 0 ) :# ignore zero size files\n \n \n print '' \n EDVerbose.DEBUG( strftime(\"%Y-%m-%d %H:%M:%S\") + \" *** EDPluginControlDLSArchiverv10 '%s' is being registered with the SRB\" % fullname ) \n print ''\n \n edSRBPlugin = self.loadPlugin( 'EDPluginExecSRBRegisterv10' )\n \n from XSDataExecSRBRegisterv10 import XSDataInputPluginExecSRBRegister\n from XSDataExecSRBRegisterv10 import XSDataResultPluginExecSRBRegister\n \n # build the plugin input\n xsDataPluginExecSRBRegister = XSDataInputPluginExecSRBRegister()\n \n # create the dropfile name\n edDropfileName = XSDataFile( XSDataString( os.path.join( self.getDataInput().getSrbDropFilePath().getPath().getValue(), '%s.drop' % name ) ) )\n # edDropfileName = XSDataFile(XSDataString(self.getDataInput().getSrbDropFilePath().getPath().getValue() + name + '.drop'))\n \n xsDataPluginExecSRBRegister.setSrbDropFileName( edDropfileName )\n \n #xsDataPluginExecSRBRegister.setSrbURIPattern( self.getDataInput().getSrbURIPattern() )\n \n #xsDataPluginExecSRBRegister.setSrbURIReplacement( self.getDataInput().getSrbURIReplacement() )\n \n xsDataPluginExecSRBRegister.setXmlIngestFileName( XSDataFile( XSDataString( fullname ) ) ) \n \n xsDataPluginExecSRBRegister.setIgnoreList( self.getDataInput().getIgnoreList() )\n \n xsDataPluginExecSRBRegister.setIgnoreSubdirList( self.getDataInput().getIgnoreSubdirList() )\n \n \n edSRBPlugin.setDataInput( xsDataPluginExecSRBRegister )\n \n # now run the plugin\n edSRBPlugin.connectSUCCESS( self.doSuccessSRB )\n edSRBPlugin.connectFAILURE( self.doFailureSRB )\n edSRBPlugin.executeSynchronous()\n \n else: # rename zero files so they are not processed a second time\n # this is currently already done before this step\n EDVerbose.DEBUG( strftime(\"%Y-%m-%d %H:%M:%S\") + \" *** EDPluginControlDLSArchiverv10 '%s' is a zero file. Renamed and ignored. \" % fullname )\n #os.rename( fullname, fullname + \".zero\" )\n shutil.move( fullname, fullname + \".zero\" )",
"def scan(self):\n\n def signal_handler(sig, frame):\n \"\"\"\n Handles Ctrl+C being pressed (SIGINT)\n :param sig: Unused\n :param frame: Unused\n :return: void\n \"\"\"\n self.cleanup(interrupted=True)\n\n signal.signal(signal.SIGINT, signal_handler)\n\n \"\"\"\n Start walking the directories...\n \"\"\"\n for root, sub_folders, files in os.walk(self.source):\n for filename in files:\n full_path = os.path.join(root, filename)\n if not self.filter.is_ignored_file(filename) and not self.filter.is_ignored_file(root):\n\n \"\"\"\n Skip files beginning with a period.\n If there is no file extension, use file name.\n \"\"\"\n if re.match(r\"^\\.\", filename):\n continue\n\n try:\n garbage, extension = os.path.splitext(full_path)\n try:\n extension = extension.split('.')[1]\n except IndexError:\n pass\n\n try:\n pattern = self.filter.patterns_by_filetype[extension]\n except KeyError:\n \"\"\" Key not found in lookup table in filter.py \"\"\"\n continue\n\n if pattern:\n if not self.quiet:\n if self.verbose:\n sys.stdout.write(\"\\nScanning {0}\".format(full_path))\n sys.stdout.flush()\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n\n line_number = 0\n random.seed(time.time())\n\n filep = open(full_path, 'r')\n\n if filep.read(3) == '/*!':\n \"\"\"\n Ignore vendor JavaScript files\n which commonly begin with '/*!' to tell YUI compressor\n not to remove their header comment.\n \"\"\"\n continue\n\n for line in filep:\n rnum = random.randint(1, 1000000)\n line_number += 1\n if self.filter.is_ignored_pattern(line):\n continue\n\n def search_in_line(_pattern, _line):\n match = _pattern.search(_line)\n if match:\n _line = _line.strip()\n\n if re.match('<|>', _line, re.MULTILINE):\n _line = re.sub('<', '<', _line, re.MULTILINE)\n _line = re.sub('>', '>', _line, re.MULTILINE)\n\n try:\n password = match.group(2).strip()\n except IndexError:\n password = match.group(1).strip()\n if not password:\n password = match.group(0).strip()\n\n if password:\n if not self.quiet:\n if self.color:\n print \"\\n{0}:{1}: {2}\".format(\n self.color.light_gray(full_path),\n self.color.light_blue(str(line_number)),\n _line.replace(password, self.color.red(password)\n ))\n else:\n print \"\\n{0}:{1}: {2}\".format(full_path, str(line_number), _line)\n\n \"\"\" Output to HTML file \"\"\"\n highlight = _line.replace(password,\n '<span class=\"highlight\">{0}</span>'.format(password))\n self.html = self.html.replace(\n '###OUTPUT###',\n '<tr>'\n '<td>{0}:<span class=\"line-number\">{1}</span></td><td><b>{2}</b>'\n '<span class=\"expand\" id=\"expand-{3}\">[+]</span>'\n '<div class=\"hidden\" id=\"hidden-{4}\"><code>{5}</code></div></td>'\n '</tr>###OUTPUT###'.format(\n full_path,\n str(line_number),\n password,\n str(rnum),\n str(rnum),\n highlight\n ))\n\n if type(pattern) is list:\n for p in pattern:\n search_in_line(p, line)\n else:\n search_in_line(pattern, line)\n filep.close()\n else:\n \"\"\" File doesn't match filter criteria \"\"\"\n continue\n except Exception, e:\n print full_path\n print '{0}: {1}'.format(str(e.__class__), str(e))\n raise\n self.cleanup()",
"def __filtering(self):\n\n\t\ttry:\n\t\t\tstart = 0\n\t\t\tlogs = self.__ssh()\n\t\t\tsession = self.db_conn.get_last_session()\n\n\t\t\tfor log in logs:\n\t\t\t\tif session == log.split() and session != 'none':\n\t\t\t\t\tlogger.info(\"Raw Log and Sesion Log Match, Start Filtering New Log From Last Session\")\n\t\t\t\t\tstart = 1\n\t\t\t\t\tcontinue\n\n\t\t\t\tif session == 'none' or start == 1:\n\t\t\t\t\tfor pattern in self.patterns:\n\t\t\t\t\t\tif '-' in pattern.split():\n\t\t\t\t\t\t\tif ' '.join(pattern.split()[1:]) in log[:]:\n\t\t\t\t\t\t\t\tlogger.info(\"Got - (%s) Pattern\", ' '.join(pattern.split()[1:]))\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif '+' in pattern.split():\n\t\t\t\t\t\t\tif ' '.join(pattern.split()[1:]) in log[:]:\n\t\t\t\t\t\t\t\tself.filtered_log.append(log.split())\n\t\t\t\t\t\t\t\tlogger.info(\"Got + (%s) Pattern\", ' '.join(pattern.split()[1:]))\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogger.warning(\"There is some unknown symbol on pattern - !! (%s)\", pattern)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t\tif len(self.filtered_log) != 0:\n\n\t\t\t\tlogger.info(\"Got (%s) New Record\", len(self.filtered_log))\n\n\t\t\t\tself.db_conn.insert_filtered_log(self.filtered_log)\n\n\t\t\telse:\n\t\t\t\tlogger.info(\"No New Log Detected\")\n\n\t\t\tsess_buff = logs[-1].split()\n\t\t\tlogger.info(\"Last Record On (%s)\", ' '.join(sess_buff))\n\t\t\tself.db_conn.insert_latest_session(sess_buff)\n\n\t\texcept Exception:\n\t\t\tlogger.exception(\"FILTERING LOG ERROR\")\n\t\t\tsys.exit(1)",
"def scan_for_new_files(self):\r\n\r\n self.db_manager.scan_for_new_files(TOP_LEVEL)",
"def algorithm(self, parameters = None):\n try:\n query = {}\n since = self.db.loadView('AsyncTransfer', 'lastFilesCleaningTime', query)['rows'][0]['key']\n end_time = self.monitoring_db.loadView('UserMonitoring', 'LastSummariesUpdate', query)['rows'][0]['key']\n except IndexError:\n self.logger.debug('No records to determine end time, waiting for next iteration')\n return\n except KeyError:\n self.logger.debug('Could not get results from CouchDB, waiting for next iteration')\n return\n except Exception, e:\n self.logger.exception('A problem occured when contacting couchDB: %s' % e)\n return\n updateUri = \"/\" + self.db.name + \"/_design/AsyncTransfer/_update/lastCleaningTime/LAST_CLEANING_TIME\"\n updateUri += \"?last_cleaning_time=%d\" % (end_time + 1)\n self.db.makeRequest(uri = updateUri, type = \"PUT\", decode = False)\n query = { 'startkey': since, 'endkey': end_time + 1 }\n all_LFNs = self.db.loadView('AsyncTransfer', 'LFNSiteByLastUpdate', query)['rows']\n self.logger.debug('LFNs to remove: %s' %all_LFNs)\n for lfnDetails in all_LFNs:\n lfn = lfnDetails['value']['lfn']\n location = lfnDetails['value']['location']\n pfn = self.apply_tfc_to_lfn( '%s:%s' %( location, lfn ) )\n logfile = open('%s/%s_%s.lcg-del.log' % ( self.log_dir, location, str(time.time()) ), 'w')\n command = 'export X509_USER_PROXY=%s ; source %s ; lcg-del -lv %s' % \\\n (self.opProxy, self.uiSetupScript, pfn)\n self.logger.debug(\"Running remove command %s\" % command)\n self.logger.debug(\"log file: %s\" % logfile.name)\n proc = subprocess.Popen(\n [\"/bin/bash\"], shell=True, cwd=os.environ['PWD'],\n stdout=logfile,\n stderr=logfile,\n stdin=subprocess.PIPE,\n )\n proc.stdin.write(command)\n stdout, stderr = proc.communicate()\n rc = proc.returncode\n logfile.close()\n\n return",
"def process_files(self):\n while self.search_set:\n current_file = self.add_file(self.search_set.pop())\n self.save_file(current_file)",
"def run():\n dtx = get_start_date()\n # get time of last sync\n limit = get_last_sync()\n # download only until the day before the last sync\n while dtx < limit.date():\n # get sleep data\n skey = check(dtx, 'sleep')\n if skey:\n add_entry(skey, fb.sleep(dtx))\n # get activity data\n skey = check(dtx, 'activities')\n if skey:\n add_entry(skey, fb.activities(dtx))\n # add one day\n dtx += datetime.timedelta(days=1)",
"def prepare_import_tellingen():\n assert os.getenv(\"TELLUS_OBJECTSTORE_PASSWORD\")\n importer = get_importer()\n\n log.debug(\"Delete all telling objects: \")\n Telling.objects.all().delete()\n log.debug(\"Delete telling objects done\")\n\n file_names = fetch_tellus_data_file_names()\n\n file_paths = [importer.download_tellus_data(file_name) for file_name in file_names]\n return file_paths"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Imports a photo from a photo import schema
|
def import_photo(photo: PhotoImportSchema):
pass
|
[
"def import_image(imgfn):\n r = rio.open(imgfn)\n metadata = r.meta.copy()\n img = r.read()\n \n return img, metadata",
"def import_image(filepath, landmark_resolver=same_name, normalise=True):\n kwargs = {'normalise': normalise}\n return _import(filepath, image_types,\n landmark_ext_map=image_landmark_types,\n landmark_resolver=landmark_resolver,\n landmark_attach_func=_import_object_attach_landmarks,\n importer_kwargs=kwargs)",
"def test_import_album():\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\"--verbose\", \"--album\", \"My New Album\", test_image_1],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image_1).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1\n albums = photo_1.albums\n assert len(albums) == 1\n assert albums[0].title == \"My New Album\"",
"def photoscript_photo(photo: osxphotos.PhotoInfo) -> photoscript.Photo:\n return photoscript.Photo(photo.uuid)",
"def test_import_clear_metadata():\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\n \"--verbose\",\n \"--clear-metadata\",\n test_image_1,\n ],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image_1).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1\n assert not photo_1.title\n assert not photo_1.description\n assert not photo_1.keywords",
"def importShape(importType, fileName):\n\n #Check to see what type of file we're working with\n if importType == ImportTypes.STEP:\n return importStep(fileName)",
"def import_instance(DryRun=None, Description=None, LaunchSpecification=None, DiskImages=None, Platform=None):\n pass",
"def import_image(self): \r\n \r\n self.img = mpimg.imread('C:/Users/adeq/Desktop/{}.png'.format(import_name))\r\n self.image = self.imp\r\n return self.image",
"def import_photos_from_devices(self, import_photos_from_devices):\n\n self._import_photos_from_devices = import_photos_from_devices",
"def get_upload(self, flickr_photo_id):\n log.info('Fetching photo by flickr_photo_id: %s' % flickr_photo_id)\n schema_obj = getattr(schema, 'Upload')\n query_obj = self.io.session.query(schema_obj).filter(\n schema_obj.flickr_photo_id == flickr_photo_id)\n res = self.alchemy_fetch_validate(query_obj)\n if len(res) > 0:\n return res[0]\n else:\n return None",
"def test_import_function_template():\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n function = os.path.join(cwd, \"examples/template_function_import.py\")\n with TemporaryDirectory() as tempdir:\n test_image = shutil.copy(\n test_image_1, os.path.join(tempdir, \"MyAlbum_IMG_0001.jpg\")\n )\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\n \"--verbose\",\n \"--album\",\n \"{function:\" + function + \"::example}\",\n test_image,\n ],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1\n albums = [a.title for a in photo_1.albums]\n assert albums == [\"MyAlbum\"]",
"def import_image(DryRun=None, Description=None, DiskContainers=None, LicenseType=None, Hypervisor=None, Architecture=None, Platform=None, ClientData=None, ClientToken=None, RoleName=None):\n pass",
"def test_import_exiftool():\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\n \"--verbose\",\n \"--clear-metadata\",\n \"--exiftool\",\n test_image_1,\n ],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image_1).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1\n assert photo_1.title == TEST_DATA[TEST_IMAGE_1][\"title\"]\n assert photo_1.description == TEST_DATA[TEST_IMAGE_1][\"description\"]\n assert photo_1.keywords == TEST_DATA[TEST_IMAGE_1][\"keywords\"]\n lat, lon = photo_1.location\n assert lat == approx(TEST_DATA[TEST_IMAGE_1][\"lat\"])\n assert lon == approx(TEST_DATA[TEST_IMAGE_1][\"lon\"])",
"def test_document_image(self):\n importer = self.assertImportZip(\n 'test_import_image.zip',\n ['/root/folder/document',\n '/root/folder/pictures/chocobo',\n '/root/folder/pictures',\n '/root/folder'])\n self.assertEqual(importer.getProblems(), [])\n self.assertItemsEqual(\n self.root.folder.objectIds(),\n ['document', 'pictures'])\n self.assertItemsEqual(\n self.root.folder.pictures.objectIds(),\n ['chocobo'])\n\n document = self.root.folder.document\n image = self.root.folder.pictures.chocobo\n self.assertTrue(IDocument.providedBy(document))\n\n version = document.get_viewable()\n self.assertFalse(version is None)\n self.assertTrue(IDocumentVersion.providedBy(version))\n self.assertEqual(document.get_editable(), None)\n self.assertEqual(version.get_title(), u'New picture shoots')\n\n service = getUtility(IReferenceService)\n # Hopefully there is only one image in the document so this\n # should match the only link we have\n reference = service.get_reference(version, LINK_REFERENCE_TAG)\n self.assertFalse(reference is None)\n self.assertItemsEqual(\n list(service.get_references_from(version)), [reference])\n self.assertEqual(reference.target, image)\n\n self.assertDocumentEqual(\n version, 'test_imported_image.docxml',\n image_reference=reference.tags[1])",
"def test_import_dup_check():\n say(\"Please click Import when prompted by Photos to import duplicate photo.\")\n\n cwd = os.getcwd()\n test_image_1 = os.path.join(cwd, TEST_IMAGE_1)\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\"--verbose\", \"--dup-check\", test_image_1],\n terminal_width=TERMINAL_WIDTH,\n )\n\n assert result.exit_code == 0\n\n import_data = parse_import_output(result.output)\n file_1 = pathlib.Path(test_image_1).name\n uuid_1 = import_data[file_1]\n photo_1 = Photo(uuid_1)\n\n assert photo_1.filename == file_1",
"def load_images():\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n ImageChoice.query.delete()\n\n # Read u.user file and insert data\n for row in open(\"static/seed_data/images2\"):\n row = row.rstrip()\n image_id , image_path = row.split(\"|\")\n image = ImageChoice(image_id=image_id, url=image_path)\n # We need to add to the session or it won't ever be stored\n db.session.add(image)\n print(image)\n\n # Once we're done, we should commit our work\n db.session.commit()",
"def test_get_image_schema(self):\n schema = \"image\"\n body = self.schemas_client.show_schema(schema)\n self.assertEqual(\"image\", body['name'])",
"def test_get_images_schema(self):\n schema = \"images\"\n body = self.schemas_client.show_schema(schema)\n self.assertEqual(\"images\", body['name'])",
"def test_import_parse_date(tmp_path: pathlib.Path):\n\n # set up test images\n os.environ[\"TZ\"] = \"US/Pacific\"\n cwd = os.getcwd()\n test_image_source = os.path.join(cwd, TEST_IMAGE_NO_EXIF)\n\n default_date = datetime(1999, 1, 1, 0, 0, 0)\n test_data = [\n [\"img_1234_2020_11_22_12_34_56.jpg\", datetime(2020, 11, 22, 12, 34, 56)],\n [\"img_1234_20211122.jpg\", datetime(2021, 11, 22, 0, 0, 0)],\n [\"19991231_20221122.jpg\", datetime(2022, 11, 22, 0, 0, 0)],\n [\"test_parse_date.jpg\", default_date],\n ]\n images = []\n for img in [x[0] for x in test_data]:\n test_file = tmp_path / img\n shutil.copy(test_image_source, test_file)\n images.append(test_file)\n\n # set file time to default date\n os.utime(test_file, (default_date.timestamp(), default_date.timestamp()))\n\n runner = CliRunner()\n result = runner.invoke(\n import_cli,\n [\n \"--verbose\",\n \"--parse-date\",\n \"img_*_%Y_%m_%d_%H_%M_%S|img_{4}_%Y%m%d|_%Y%m%d.\",\n *[str(x) for x in images],\n ],\n terminal_width=TERMINAL_WIDTH,\n )\n assert result.exit_code == 0\n\n # verify that the date was parsed correctly\n photosdb = PhotosDB()\n for test_case in test_data:\n photo = photosdb.query(QueryOptions(name=[test_case[0]]))[0]\n assert datetime_remove_tz(photo.date) == test_case[1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Save objects. >>> Storage.save_objects(objects=[0,1,2],save_path='__cases/obj.pk') True
|
def save_objects(objects, save_path):
if os.path.exists(save_path):
os.remove(save_path)
with open(save_path, 'wb') as file:
pickle.dump(objects, file, pickle.HIGHEST_PROTOCOL)
return True
|
[
"def save_all(cls, objects):\n db.session.bulk_save_objects(objects)\n db.session.commit()",
"def save_objects(path, frame, objects):\n full_path = path + str(frame) + '/'\n create_folder(full_path)\n cnt = 0\n for obj in objects:\n img = Image.fromarray(obj.cutout)\n img.save(full_path + 'obj-' + str(cnt) + '.png')\n cnt += 1",
"def savePrefObjects():\n pass",
"def request_save_objects(self):\n if len(self.objects) >= self.batch_size:\n self.save_objects()",
"def save_object(obj, file_name, mode=0600, safe=False):\n data = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)\n save(data, file_name, mode, safe)",
"def save_object(obj, path):\r\n _, ext = os.path.splitext(path)\r\n try:\r\n if ext.lower() == '.pkl':\r\n with bz2.BZ2File(path, 'wb') as fid:\r\n #with open(path, 'wb') as fid:\r\n pickle.dump(obj, fid, protocol=pickle.HIGHEST_PROTOCOL)\r\n elif ext.lower() == '.npy':\r\n np.save(path, obj)\r\n else:\r\n raise ValueError(f\"Invalid extension: {ext}, use '.pkl' or '.npy'\")\r\n except:\r\n traceback.print_exc()",
"def save_pickled(self, obj, filename):\n path = os.path.join(pickle_dir, filename)\n with open(path, 'wb+') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)",
"def testSaveWriteObjects(self):\n\t\tf = IECore.FileIndexedIO( os.path.join( \".\", \"test\", \"FileIndexedIO.fio\" ), [], IECore.IndexedIO.OpenMode.Write)\n\t\tself.assertEqual( f.path() , [] )\n\t\tself.assertEqual( f.currentEntryId() , \"/\" )\n\t\ttxt = IECore.StringData(\"test1\")\n\t\ttxt.save( f, \"obj1\" )\n\t\ttxt.save( f, \"obj2\" )\n\t\tdel f\n\n\t\tf2 = IECore.FileIndexedIO( os.path.join( \".\", \"test\", \"FileIndexedIO.fio\" ), [], IECore.IndexedIO.OpenMode.Read)\n\t\tself.assertEqual( txt, IECore.Object.load( f2, \"obj1\" ) )\n\t\tself.assertEqual( txt, IECore.Object.load( f2, \"obj2\" ) )",
"def save(self):\n json_dict = {}\n for key_id in self.__objects.keys():\n json_dict[key_id] = self.__objects[key_id].to_dict()\n with open(self.__file_path, \"w\") as f:\n f.write(json.dumps(json_dict))",
"def save(self):\n my_dict = {}\n for obj in self.__objects:\n my_dict[obj] = self.__objects[obj].to_dict()\n\n with open(self.__file_path, mode='w') as f:\n json.dump(my_dict, f)",
"def save(self, segids, filename=None, file_format='obj'):\n if type(segids) != list:\n segids = [ segids ]\n\n meshdata = self.get(segids)\n\n if file_format != 'obj':\n raise NotImplementedError('Only .obj is currently supported.')\n\n if not filename:\n filename = str(segids[0])\n if len(segids) > 1:\n filename = \"{}_{}\".format(segids[0], segids[-1])\n\n with open('./{}.obj'.format(filename), 'wb') as f:\n objdata = mesh_to_obj(meshdata, progress=self.vol.progress)\n objdata = '\\n'.join(objdata) + '\\n'\n f.write(objdata.encode('utf8'))",
"def save_object_npy(alfpath, dico, object):\n alfpath = Path(alfpath)\n status = check_dimensions(dico)\n if status != 0:\n raise ValueError('Dimensions are not consistent to save all arrays in ALF format: ' +\n str([(k, v.shape) for k, v in dico.items()]))\n\n for k, v in dico.items():\n np.save(alfpath / (object + '.' + k + '.npy'), v)",
"def test_save(self):\n storage = FileStorage()\n new_dict = {}\n for key, value in classes.items():\n instance = value()\n instance_key = instance.__class__.__name__ + \".\" + instance.id\n new_dict[instance_key] = instance\n save = FileStorage._FileStorage__objects\n FileStorage._FileStorage__objects = new_dict\n storage.save()\n FileStorage._FileStorage__objects = save\n for key, value in new_dict.items():\n new_dict[key] = value.to_dict()\n string = json.dumps(new_dict)\n with open(\"file.json\", \"r\") as f:\n js = f.read()\n self.assertEqual(json.loads(string), json.loads(js))",
"def save(self, key, obj):\n # Pickle and save to disk\n full_path = self.get_full_path(key)\n mkpath(full_path)\n pickle.dump(obj, open(full_path, 'wb'))\n if self.verbose > 1:\n print \"[{}: log] Saved file: {}\".format(self.pp, full_path)",
"def store_pickle(obj, full_path):\n with open(full_path, 'wb') as f:\n pickle.dump(obj, f)",
"def store(self, objs, keys, complete_sets=[]):\r\n pass",
"def pickle_obj(obj: Any, filename: str) -> None:\n with open(filename, 'wb') as f:\n pickle.dump(obj, f)",
"def save_object(object, filename, protocol = cPickle.HIGHEST_PROTOCOL):\r\n gc.disable()\r\n if filename.endswith('.zip'):\r\n f = gzip.GzipFile(filename, 'wb')\r\n else:\r\n f = open(filename, 'wb')\r\n# cPickle.dump(object, f, protocol)\r\n dill.dump(object, f)\r\n f.close()\r\n gc.enable()",
"def concurrent_save(self,\n objs: tp.Iterable[tp.Any],\n keys: tp.Iterable[str],\n upload_params=None, progress=True, **kwargs) -> tp.List[str]:\n\n objs = list(objs)\n keys = list(keys)\n\n if len(objs) != len(keys):\n raise SaverError(f'Number of objects ({len(objs)}) != number of keys ({len(keys)})')\n\n futures = []\n for key, obj in zip(keys, objs):\n future = self.async_save(obj=obj, key=key, upload_params=upload_params, progress=False, **kwargs)\n futures.append(future)\n\n # TODO add retries handling\n for future in tqdm.tqdm(as_completed(futures),\n total=len(futures), desc=f\"Concurrent save by {self.saver.__class__.__name__} \"\n f\"over {self.saver.remote.name}\"):\n if future.exception() is not None:\n raise future.exception()\n\n return [x.result() for x in futures]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
load objects. >>> Storage.load_objects(saved_path='__cases/obj.pk') [0, 1, 2]
|
def load_objects(saved_path):
objects = None
if os.path.exists(saved_path):
with open(saved_path, 'rb') as file:
objects = pickle.load(file)
return objects
|
[
"def load_obj(load_dir):\r\n return pickle.load(open(load_dir, 'rb'))",
"def loadPrefObjects():\n pass",
"def load_obj(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)",
"def load_object(self, obj):\n pass",
"def load_pickled_objects(name, bucket=CAMD_S3_BUCKET):\n client = boto3.client(\"s3\")\n prefix = \"{}/{}\".format(META_AGENT_PREFIX, name)\n all_objs = []\n for obj_name in [\"agent_pool\", \"experiment\", \"analyzer\"]:\n try:\n raw_obj = client.get_object(\n Bucket=bucket, Key=\"{}/{}.pickle\".format(prefix, obj_name),\n )[\"Body\"]\n except botocore.exceptions.ClientError:\n raise ValueError(\n \"{} does not exist in s3, cannot update agent pool\".format(name)\n )\n obj = pickle.loads(raw_obj.read())\n all_objs.append(obj)\n return all_objs",
"def load_saves() -> List['Save']:\n\n # Check if the save folder exists; If not, create it\n if not os.path.exists(Save.SAVE_FOLDER):\n os.makedirs(Save.SAVE_FOLDER)\n\n files = []\n for entry in os.listdir(Save.SAVE_FOLDER):\n if os.path.isdir(f\"{Save.SAVE_FOLDER}/{entry}\"):\n username = entry\n files.append(Save(username))\n saves = []\n for i in range(len(files)):\n try:\n saves.append(files[i])\n saves[i].load()\n except FileNotFoundError:\n print(f\"issue loading {files[i].get_username()}\")\n return saves",
"def load_obj(name):\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)",
"def load_batch(filepath, batch_size):\n # First we need to find how many pickled objects there are in the file\n # I don't know any more efficient way to do this than to just load and discard every object in the file\n _file = open(filepath, mode='rb')\n obj_count = 0\n while True:\n try:\n pickle.load(_file)\n obj_count += 1\n except:\n break\n\n _file.close()\n\n if batch_size > obj_count:\n raise RuntimeError(f'Batch size request, {batch_size} exceeds number of objects in file, {obj_count}')\n\n batch_indicies = random.choices(range(obj_count), k=batch_size)\n print(batch_indicies)\n\n return_list = []\n\n _file = open(filepath, mode='rb')\n i = 0\n while i <= max(batch_indicies):\n obj_current = pickle.load(_file)\n if i in batch_indicies:\n return_list.append(obj_current)\n\n i += 1\n\n _file.close()\n\n return return_list",
"def load_data(self, data):\n self.objects = []\n if not self.nested_list and not self.nested_dict:\n for d in data:\n obj = self.build_obj(**d)\n self.objects.append(obj)\n elif self.nested_list:\n for key, obj_set in data.iteritems():\n for obj_data in obj_set:\n obj = self.model(\n key,\n **obj_data\n )\n self.objects.append(obj)\n elif self.nested_dict:\n for key, obj_set in data.iteritems():\n for obj_key, obj_data in obj_set.iteritems():\n obj = self.model(\n key,\n obj_key,\n obj_data\n )\n self.objects.append(obj)",
"def load_results(pickle_path):\n\n # make sure the the files actually exist\n if not results_exist(pickle_path):\n return None\n\n # if only one file path is given\n if isinstance(pickle_path, str):\n with open(pickle_path, 'rb') as f:\n return pickle.load(f)\n \n # if a list of paths is provided, return a list of objects\n results = []\n for p in pickle_path:\n with open(p, 'rb') as f:\n results.append(pickle.load(f))\n\n return results",
"def recover_objects(worker):\n if db_instance():\n raw_objs = db_instance().hgetall(worker.id)\n objects = {\n int(key.decode(\"utf-8\")): deserialize(value)\n for key, value in raw_objs.items()\n }\n worker._objects = objects\n return worker",
"def load_models(self, load_path=None):\n\n # Load the saved file as a dictionary\n if load_path is not None:\n checkpoint = torch.load(load_path)\n\n # Store the saved models\n self.ac_nn.load_state_dict(checkpoint['actor_critic_model'])\n self.ac_optimizer.load_state_dict(checkpoint['ac_optimizer'])\n\n # Evaluate the neural network to ensure the weights were properly loaded\n self.ac_nn.eval()\n\n return",
"def _load_model_object(self):\n # load models for objects\n path = xml_path_completion(furniture_xmls[self._furniture_id])\n logger.debug(\"load furniture %s\" % path)\n resize_factor = None\n if self._manual_resize is not None:\n resize_factor = 1 + self._manual_resize\n elif self._config.furn_size_rand != 0:\n rand = self._init_random(1, \"resize\")[0]\n resize_factor = 1 + rand\n self._objects = MujocoXMLObject(path, debug=self._debug, resize=resize_factor)\n self._objects.hide_visualization()\n part_names = self._objects.get_children_names()\n\n # furniture pieces\n lst = []\n for part_name in part_names:\n lst.append((part_name, self._objects))\n\n self.mujoco_objects = OrderedDict(lst)\n self.n_objects = len(self.mujoco_objects)\n self.mujoco_equality = self._objects.equality",
"def test_pickle_load():\n for entry in pickle_result:\n pickle.loads(entry)",
"def _load_objects_from_settings(self, settings_path, obj_type, sub_condition=None):\r\n try:\r\n settings = self._host_api.settings(settings_path)\r\n except KeyError:\r\n settings = None\r\n\r\n objects = []\r\n if settings is not None:\r\n if isinstance(obj_type, str):\r\n obj_type = [obj_type]\r\n\r\n if sub_condition is None:\r\n sub_condition = BaseUtils.do_nothing\r\n\r\n for obj in settings.ls():\r\n if obj.type in obj_type:\r\n if sub_condition(obj):\r\n objects.append(TrObject(obj))\r\n return objects",
"def reload(self):\n\n if os.access(self.__file_path, os.F_OK):\n with open(self.__file_path, mode='r') as f:\n j_file = json.load(f)\n for key, value in j_file.items():\n obj = eval(value[\"__class__\"])(**value)\n self.__objects[key] = obj",
"def load_cv_folds(filepath):\n folds = pickle.load(open(filepath, \"rb\"))\n\n if not isinstance(folds, list):\n raise RuntimeError(\"Loaded a non-list item as CV folds.\")\n\n if not isinstance(folds[0], tuple) or not len(folds[0]) == 3:\n print(type(folds[0]))\n print(len(folds))\n raise RuntimeError(\"CV-folds object is malformed\")\n\n return folds",
"def load_object_from(source_path):\n abs_source_path = os.path.abspath(source_path)\n f = open(abs_source_path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj",
"def loadPickledMDP(load_from_file):\n mdp_file = os.path.join(mdp_obj_path, load_from_file)\n print \"Loading file {}.\".format(mdp_file)\n with open(mdp_file) as _file:\n list_to_unpack = pickle.load(_file)\n if not isinstance(list_to_unpack, list):\n list_to_unpack = [list_to_unpack]\n list_to_unpack.append(mdp_file)\n return list_to_unpack"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract inputs from features dictionary.
|
def _extract_input(self, feat_dict):
sa_xyz = feat_dict['sa_xyz']
sa_features = feat_dict['sa_features']
assert len(sa_xyz) == len(sa_features)
return sa_xyz, sa_features
|
[
"def get_input_features(self):\n input_features = self.get_input_example().columns\n\n return list(input_features)",
"def input_features(self, x):\n x = self.add_on_layers(x)\n return x",
"def _extract_features(self, preprocessed_inputs): \n preprocessed_inputs = shape_utils.check_min_image_dim(33, preprocessed_inputs)\n image_features = self.net(ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple))\n layouts = {self._used_nodes[i]: image_features[i] for i, x in enumerate(self._used_nodes) if x}\n feature_maps = self._feature_map_generator(layouts)\n if self._additional_layer_depth:\n final_feature_map = []\n for idx, feature in enumerate(feature_maps.values()):\n feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n kernel_size=1,\n strides=[1, 1],\n use_bias=True,\n data_format=self._data_format,\n name='conv1x1_'+str(idx))(feature)\n feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n feature = tf.keras.layers.ReLU(max_value=6)(feature)\n final_feature_map.append(feature)\n return final_feature_map\n else:\n return feature_maps.values() \n \n # with tf.variable_scope(\"EfficientNetFeatureExtractor\", reuse=tf.AUTO_REUSE):\n # # architecture \n # _, endpoints = build_model_base(preprocessed_inputs, self._network_name, training=self._is_training)\n # arch_feature_nodes = [x for x in self._feature_map_layout[\"from_layer\"] if x]\n # arch_features = {x: endpoints[x] for x in arch_feature_nodes}\n # feature_maps = self._feature_map_generator(arch_features)\n # if self._additional_layer_depth:\n # final_feature_map = []\n # for idx, feature in enumerate(feature_maps.values()):\n # feature = tf.keras.layers.Conv2D(filters=self._additional_layer_depth,\n # kernel_size=1,\n # strides=[1, 1],\n # use_bias=True,\n # data_format=self._data_format,\n # name='conv1x1_'+str(idx))(feature)\n # feature = tf.keras.layers.BatchNormalization()(feature, training=self._is_training)\n # feature = tf.keras.layers.ReLU(max_value=6)(feature)\n # final_feature_map.append(feature)\n # return final_feature_map\n # else:\n # return feature_maps ",
"def _build_lm_inputs(self, features):\n targets = features[\"targets\"]\n target_tags = features[\"target_tags\"]\n\n if self._hparams.mode == tf_estimator.ModeKeys.PREDICT:\n target_tags = tf.tile(target_tags, [self._hparams.beam_width, 1, 1, 1])\n\n # Construct LM inputs.\n inputs = common_layers.shift_right(targets, pad_value=target_tags)\n inputs_length = common_layers.length_from_embedding(targets) + 1\n inputs = common_layers.flatten4d3d(inputs)\n\n return inputs, inputs_length",
"def _get_features_and_labels_from_input_fn(self, input_fn, mode):\n result = self._call_input_fn(input_fn, mode)\n input_hooks = []\n if isinstance(result, tf.data.Dataset):\n iterator = result.make_initializable_iterator()\n #input_hooks.append(_DatasetInitializerHook(iterator))\n if mode == tf.estimator.ModeKeys.TRAIN:\n input_hooks.append(_DatasetInitializerHook(iterator, self.train_input_hook.feed_fn))\n else: # mode == tf.estimator.ModeKeys.EVAL\n input_hooks.append(_DatasetInitializerHook(iterator, self.eval_input_hook.feed_fn))\n result = iterator.get_next()\n if isinstance(result, (list, tuple)):\n if len(result) != 2:\n raise ValueError(\n 'input_fn should return (feautures, labels) as a len 2 tuple.')\n return result[0], result[1], input_hooks\n return result, None, input_hooks",
"def _copy_features(sg_features: tf.train.Feature,\n ex_features_dict: Dict[str, tf.train.Feature]):\n for feature_name, ex_feature in ex_features_dict.items():\n sg_feature = sg_features.feature.get(feature_name, None)\n if sg_feature is None:\n # Feature is empty for that node. Fail for now, ragged tensors are not\n # supported by this conversion routine.\n raise ValueError(\"Feature '{}' is missing from input: {}\".format(\n feature_name, sg_features))\n ex_feature.MergeFrom(sg_feature)",
"def input_fn():\n features_placeholders = {}\n for name, t in features.items():\n shape_list = t.get_shape().as_list()\n shape_list[0] = default_batch_size\n shape = tensor_shape.TensorShape(shape_list)\n\n features_placeholders[name] = array_ops.placeholder(\n dtype=t.dtype, shape=shape, name=t.op.name)\n labels = None # these are not known in serving!\n return InputFnOps(features_placeholders, labels, features_placeholders)",
"def get_features(label, features):\n\tfeature_responses = {}\n\tfor feature in features[label]:\n\t\tresponse = input(feature + ' ')\n\t\tif response:\n\t\t\tfeature_responses[feature] = response\n\t\telse:\n\t\t\tfeature_responses[feature] = \"N/A\"\n\treturn feature_responses",
"def get_features(input_dir):\n kps, des = None, None\n sift = cv2.xfeatures2d.SIFT_create()\n\n for filename in os.listdir(input_dir):\n if not filename.endswith(\".png\"):\n continue\n img = io.imread(input_dir + filename)\n kp, d = sift.detectAndCompute(img, None)\n\n if des is None:\n kps = kp\n des = d\n else:\n kps = np.concatenate([kps, kp], axis=0)\n des = np.concatenate([des, d], axis=0)\n return kps, des",
"def computeFeatures(img, features=...) -> features:\n ...",
"def extract_inputs(self, transform_inputs: dict[str, Input]) -> list[Input]:\n job_inputs = {i.id: i for i in self.job_in.inputs}\n missing_ids = job_inputs.keys() - transform_inputs.keys()\n if missing_ids:\n raise HTTPException(\n 400,\n f\"The job input(s) {', '.join(repr(i) for i in sorted(missing_ids))} \"\n f'are not specified in the transform.',\n )\n\n out: list[Input] = []\n for transform_input in transform_inputs.values():\n if not transform_input.modifiable:\n continue\n job_input = job_inputs.get(transform_input.id)\n if job_input:\n transform_input = transform_input.copy()\n transform_input.value = job_input.value\n out.append(transform_input)\n return out",
"def train(self, features):",
"def contextual_feature_map(self, features):\n return features",
"def feature_extraction(self, sample):\n image, filename = sample\n\n if self.feature_model.training:\n print(\"Run feature model in inference mode!\")\n exit(0)\n\n if self.feature_model:\n feature = np.squeeze(self.feature_model(image[None, ...].to(self.device)).data.cpu().numpy())\n return feature",
"def _extract_decoder_features(self, preprocessed_inputs, scope,\n concat_levels, residual_depth):\n pass",
"def extractedFeatures(self):\n return self.all_features.keys()",
"def input_fn():\n serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,\n shape=[default_batch_size],\n name='input_example_tensor')\n inputs = {'examples': serialized_tf_example}\n features = parsing_ops.parse_example(serialized_tf_example, feature_spec)\n labels = None # these are not known in serving!\n return InputFnOps(features, labels, inputs)",
"def extract_libffm_features(input_lines, has_label=True):\n labels = []\n features = []\n impression_ids = []\n\n start_index = 1 if has_label else 0\n\n for _ in input_lines:\n line = _.strip()\n if not line:\n continue\n tmp = line.strip().split('%')\n if len(tmp) == 2:\n impression_ids.append(tmp[1].strip())\n else:\n impression_ids.append('none')\n\n line = tmp[0]\n cols = line.strip().split(' ')\n label = float(cols[0].strip()) if has_label else 0\n #if label > 0:\n # label = 1\n #else:\n # label = 0\n cur_feature_list = []\n\n for word in cols[start_index:]:\n if not word.strip():\n continue\n tokens = word.strip().split(':')\n cur_feature_list.append( \\\n [int(tokens[0]) -1, \\\n int(tokens[1]) -1, \\\n float(tokens[2])])\n features.append(cur_feature_list)\n labels.append(label)\n\n result = {}\n result['labels'] = labels\n result['features'] = features\n result['impression_ids'] = impression_ids\n return result",
"def get_ts_features_to_preprocess(self):",
"def inputs(self):\n return tf.placeholder(tf.float32, shape=[None, self.n_features], name=\"x\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if it is a substring of a string.
|
def is_substring(string, substring):
return substring in string
|
[
"def isSubstring(str1, str2):",
"def findSubstring(self, s):\n\t\treturn self.traverse(s) is not None",
"def checkSubstring(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tfor i in range(len2-len1+1):\n\t\tif str1 == str2[i:len1 + i]:\n\t\t\treturn True\n\treturn False",
"def is_substring(string1, string2):\n difference = len(string2) - len(string1)\n if difference < 0:\n return False\n for i in range(0, difference + 1, 1):\n substring = string2[i:i+len(string1)]\n if string1 == substring:\n return True\n return False",
"def substring_check(self, str1, str2):\n return self.sanitize(str1) in self.sanitize(str2) or self.sanitize(str2) in self.sanitize(str1)",
"def has_sub_string(check_string: str, sub_strings: Union[str, list]) -> bool:\n if type(sub_strings) is list:\n for sub_string in sub_strings:\n if sub_string in check_string:\n return True\n elif type(sub_strings) is str:\n if sub_strings in check_string:\n return True\n return False",
"def check_sub_string(string, sub_string):\n\tif sub_string == \"\":\n\t\treturn 1\n\telse:\n\t\tfor i in range(len(string)):\n\t\t\tif string[0] == sub_string[0]:\n\t\t\t\treturn check_sub_string(string[1:], sub_string[1:])\n\t\t\telse:\n\t\t\t\treturn 0",
"def _check(self, substring: str) -> bool:\n word_len = len(self.words[0])\n sub_words_count = Counter([\n substring[i*word_len:(i+1)*word_len] for i in range(len(self.words))\n ])\n return sub_words_count == self.words_count",
"def is_substring(substring, cluster):\n is_found = True\n for data in cluster:\n LOG.debug(\"is_substring: Searching %s for substring %s...\", data, substring)\n is_found = is_found and substring in data\n LOG.debug(\"is_substring: is_found = %s\\n\", str(is_found))\n return is_found",
"def has_a_forbidden_substring(string):\n return any(forbidden_string in string for forbidden_string in [\"ab\", \"cd\", \"pq\", \"xy\"])",
"def response_has(self, substring: str) -> bool:\n return substring in self.data",
"def failIfSubstring(self, substring, astring, msg=None):\n if astring.find(substring) != -1:\n raise self.failureException(msg or \"%r found in %r\"\n % (substring, astring))\n return substring",
"def failUnlessSubstring(self, substring, astring, msg=None):\n if astring.find(substring) == -1:\n raise self.failureException(msg or \"%r not found in %r\"\n % (substring, astring))\n return substring",
"def containsSubstring(searchString, searchItem):\n return searchItem.lower() in searchString.lower()",
"def ends_with(string, suffix, start=0, end=-1):\n # String_val is the python slice specified by the user.\n string_val = string[start][end]\n # Test if the string_val is in the suffix. Using in allows the suffix to be\n # a string or a tuple. If string_val in suffix then return True.\n if string_val in suffix:\n return True\n # Else return false. The suffix is not in the string.\n else:\n return False",
"def contains(self, string):\n node, length = self._find_node(string)\n return length == len(string) and node.is_terminal()",
"def _is_subword(self, token):\n token = self._tokenizer.convert_tokens_to_string(token)\n return True",
"def is_string(s):\n return type(s) == str",
"def substring(s, start, end):\n startless = start is None\n endless = end is None\n if startless and endless:\n return s\n if endless:\n return s[start:]\n if startless:\n return s[:end]\n return s[start:end]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if str2 is a rotation of str1 using only one call of is_substring.
|
def string_rotation(str1, str2):
if len(str1) == len(str2):
return is_substring(str1+str1, str2)
return False
|
[
"def rotateString(self, A: str, B: str) -> bool:\n return len(A) == len(B) and B in A + A",
"def isSubstring(str1, str2):",
"def checkSubstring(str1, str2):\n\tlen1 = len(str1)\n\tlen2 = len(str2)\n\tfor i in range(len2-len1+1):\n\t\tif str1 == str2[i:len1 + i]:\n\t\t\treturn True\n\treturn False",
"def canConvert(self, str1: str, str2: str) -> bool:\n if str1 == str2:\n return True\n m = {}\n for i in range(len(str1)):\n if str1[i] not in m:\n m[str1[i]] = str2[i]\n elif m[str1[i]] != str2[i]:\n return False\n return len(set(str2)) < 26",
"def is_substring(string1, string2):\n difference = len(string2) - len(string1)\n if difference < 0:\n return False\n for i in range(0, difference + 1, 1):\n substring = string2[i:i+len(string1)]\n if string1 == substring:\n return True\n return False",
"def rotateString(self, A: str, B: str) -> bool:\n # Concatenate A with itself and then search for B\n return len(A) == len(B) and B in (A + A)",
"def substring_check(self, str1, str2):\n return self.sanitize(str1) in self.sanitize(str2) or self.sanitize(str2) in self.sanitize(str1)",
"def is_permutation_v2(string1, string2):\n\tstring1_dict = str_count_dict(string1)\n\tstring2_dict = str_count_dict(string2)\n\n\tif string1_dict == string2_dict:\n\t\treturn True\n\treturn False",
"def test_string_rotration_false():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'nothello') is False",
"def twoStrings(s1, s2):\n str1 = set(s1)\n str2 = set(s2)\n\n sub_string = str1.intersection(str2)\n\n if len(sub_string):\n return 'YES'\n\n return 'NO'",
"def IsRotation(a,b):\n\trotation = 0\n\trotate_max = len(a)\n\n\twhile rotation < rotate_max:\n\t\trotation += 1\n\n\t\tif a == b:\n\t\t\treturn True\n\n\t\ta = a[-1] + a[:-1]\n\n\treturn False",
"def test_string_rotation_true():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'elloh') is True",
"def strandJudge(seq1, seq2):\n strandness1 = seq1.getOrientation()\n strandness2 = seq2.getOrientation()\n return strandness1 != strandness2",
"def are_isometric(first_string: str, second_string: str) -> bool:\n check_type(str, first_string)\n check_type(str, second_string)\n\n if len(first_string) != len(second_string):\n return False\n\n letters = [letter for letter, _ in set(zip(first_string, second_string))]\n return len(letters) == len(set(letters))",
"def is_permutation_v3(string1, string2):\n\n\tstring1_dict = str_count_dict(string1)\n\n\tfor c in string2:\n\t\tif c in string1_dict:\n\t\t\tstring1_dict[c] -= 1\n\t\telse:\n\t\t\treturn False\n\n\tfor char, count in string1_dict.iteritems():\n\t\tif count != 0:\n\t\t\treturn False\n\n\treturn True",
"def is_subsequence(l1: List[StringLike], l2: List[StringLike]) -> bool:\n i = 0\n for x in l2:\n if l1[i] == x:\n i += 1\n if i == len(l1):\n return True\n return False",
"def twoStrings(s1, s2):\n #brute force solution O(len(s1) * len(s2))\n # for c1 in s1:\n # for c2 in s2:\n # if c1 == c2:\n # return 'YES'\n # return 'NO'\n\n # set solution O(len(s1)) since 'in' keyword is O(1) time\n all_chars = dict.fromkeys(set(s2), 1)\n for c in s1:\n if c in all_chars.keys():\n return 'YES'\n return 'NO'",
"def is_rotated(array_1, array_2):\n if len(array_1) != len(array_2):\n return False\n if array_1 == array_2:\n return True\n if set(array_1) != set(array_2):\n return False\n index = array_1.index(array_2[0])\n return (array_2 == (array_1[index:] + array_1[:index]))",
"def repeats(first_str, second_str):\n first_occurance = second_str.find(first_str)\n second_occurance = second_str.find(first_str, first_occurance + len(first_str))\n if first_occurance + len(first_str) == second_occurance:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
should 404 if no events for requested semester
|
def test_no_events(self, db, client):
response = client.get(reverse("events:by-semester", args=["spring", 2099]))
assert response.status_code == 404
|
[
"def events(request):\n try:\n if request.method == 'GET':\n events_list = Events.retrieve_all()\n if events_list is not []: # not empty list\n node_id = request.GET.get('node_id', '')\n user_id = request.GET.get('user_id', '')\n status = request.GET.get('status', '')\n\n if status is not '' and status not in data_checker.VALID_EVENT_STATUS_LIST:\n raise ValueError('Status ' + status + ' is not valid')\n\n node_search = node_id is not ''\n user_search = user_id is not ''\n status_search = status is not ''\n\n events_search_list = []\n\n if node_search or user_search or status_search: # has parameters to search\n if node_search and user_search and status_search: # search by node, user and status\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and user_search: # search by node and user\n for event in events_list:\n if event['node_id'] == node_id and event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif user_search and status_search: # search by user and status\n for event in events_list:\n if event['user_id'] == user_id and event['status'] == status:\n events_search_list.append(event)\n\n elif node_search and status_search: # search by node and status\n for event in events_list:\n if event['node_id'] == node_id and event['status'] == status:\n events_search_list.append(event)\n\n elif user_search: # search only by user\n for event in events_list:\n if event['user_id'] == user_id:\n events_search_list.append(event)\n\n elif node_search: # search only by node\n for event in events_list:\n if event['node_id'] == node_id:\n events_search_list.append(event)\n\n elif status_search: # search only by status\n for event in events_list:\n if event['status'] == status:\n events_search_list.append(event)\n\n resp = {\n 'success': 'true',\n 'data': events_search_list\n }\n\n else: # all without parameters\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n else:\n resp = {\n 'success': 'true',\n 'data': events_list\n }\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n data_checker.check_event(data, request.method)\n\n created_event_key = Events.create(data)\n\n if created_event_key:\n print 'create event successful'\n if 'user_id' not in data:\n resp = {\n 'success': 'true',\n 'data': created_event_key\n }\n else:\n all_events_list = Events.retrieve_all()\n resp_events = []\n for event in all_events_list:\n if event['user_id'] == data['user_id'] and event['status'] == data_checker.EVENT_UNREAD:\n resp_events.append(event)\n\n resp = {\n 'success': 'true',\n 'data': resp_events\n }\n else:\n raise RuntimeError('Orchestrate service temporarily unavailable')\n else:\n raise NotImplementedError('Only GET, POST methods are allowed')\n\n return JSONResponse(resp)\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n\n return JSONResponse(err)",
"def test_09_api_event_not_found(self):\n sample_event_id = '-'\n response = self.app.get('/api/events/%s' % sample_event_id, headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data['error'], 'Not found')",
"def test_no_events_query_success(self):\n endpoint_url = get_all_events_endpoint_url()\n response = client.get(endpoint_url)\n assert check_get_all_events_response_valid(response, 0)",
"def test_ajax_day_view(self):\n response = self.client.get(\n reverse(\n 'calendar:day_list',\n kwargs={'year': '2015', 'month': '2', 'day': '2'}\n ),\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n self.assertEqual(response['Content-Type'], 'application/json')\n\n data = loads(response.content.decode('utf8'))\n self.assertEqual([], data['events'])",
"def test_schedule_retrieve_schedule_none(self):\n # request\n response = self.client.get(reverse(self.view_name, args=[1]))\n # test response\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def testViewEventNotLoggedIn(self):\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);",
"def events_view(request, course_id):\n calendar_id = get_calendar_id_by_course_id(course_id)\n try:\n response = gcal_service.events().list(calendarId=calendar_id,\n pageToken=None).execute()\n events = [{\n \"id\": api_event[\"id\"],\n \"text\": api_event[\"summary\"],\n \"start_date\": from_google_datetime(api_event[\"start\"][\"dateTime\"]),\n \"end_date\": from_google_datetime(api_event[\"end\"][\"dateTime\"]),\n \"readonly\": not has_permission(request.user, api_event)\n } for api_event in response['items']]\n except Exception as e:\n log.exception(e)\n return JsonResponse(data={'errors': e}, status=500, safe=False)\n else:\n return JsonResponse(data=events, status=200, safe=False)",
"def test_03_api_get_events_unauthorized(self):\n response = self.app.get('/api/events')\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 401)\n self.assertEqual(data['error'], 'Unauthorized access')",
"def get_events(data):\n query_params = data.GET.dict()\n if not query_params:\n\n # If no payload is passed to the request, simply fetch future approved events\n start_date = datetime.now(timezone(TIMEZONE))\n\n # TODO: When the user first visits the homepage, all events occurring\n # in the week are fetched. Should this be changed instead to display\n # only events for the current day?\n end_date = datetime.now(timezone(TIMEZONE)) + timedelta(days=7)\n\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__range=(start_date, end_date))\\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))\n\n if 'isMonthly' in query_params and query_params['isMonthly'] == 'true':\n # Fetch events for the whole month\n\n month = int(query_params['month'])\n\n # TODO: Ensure that timezone differences are properly accounted for\n # when using the `__month` filter\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__month=month)\\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))\n\n else:\n # Fetch events for a selected date\n day = query_params['day']\n month = query_params['month']\n year = query_params['year']\n start_date = datetime.strptime(f\"{year}-{month}-{day} 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\n end_date = datetime.strptime(f\"{year}-{month}-{day} 23:59:59\", \"%Y-%m-%d %H:%M:%S\")\n\n current_timezone = timezone(TIMEZONE)\n events = models.Event.objects.filter(approval_status=constants.EventApprovalStatus.APPROVED.name,\n start_datetime__range=(current_timezone.localize(start_date),\n current_timezone.localize(end_date))) \\\n .order_by('start_datetime')\n return HttpResponse(json.dumps(make_events_data_response(events)))",
"def test_no_events_not_loggedin(self):\n response = self.client.get(self.top_events_url)\n\n self.assertEquals(response.status_code, 403)",
"def test_no_events_not_loggedin(self):\n response = self.client.get(self.top_url)\n\n self.assertEquals(response.status_code, 403)",
"def search():\n #get the name given\n name = request.args.get('q')\n #get the given page and number of events or set them to default\n page = request.args.get(\"page\", default=1, type=int)\n per_page = request.args.get(\"limit\", default=15, type=int)\n if name:\n found_events = Events.get_events_by_name(name, page, per_page)\n if found_events.items:\n event_list = make_event_list(found_events.items)\n return jsonify(event_list), 200\n return jsonify({\"message\" : \"there are no more events matching the given name\"}), 404\n return jsonify({\"message\" : \"can not search events, provide event name\"}), 400",
"def event(request, index_id, event_id):\n context = {\"index_id\": index_id, \"event_id\": event_id}\n return render(request, 'event.html', context)",
"def event_by_id(request, event_id):\n if request.method == 'GET':\n print 'get event by id'\n try:\n if event_id == '':\n raise ValueError('No ID is given while trying to get event by ID')\n\n event_get = Events.retrieve_by_id(event_id)\n if 'code' in event_get and event_get['code'] == 'items_not_found':\n raise ValueError('No event found with given id=' + event_id)\n\n event_response = {\n 'success': 'true',\n 'data': event_get\n }\n return JSONResponse(event_response)\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n return JSONResponse(err)\n\n elif request.method == 'PUT':\n print 'put update by event id'\n try:\n data = JSONParser().parse(request)\n data_checker.check_event(data, request.method)\n\n update_event_key = Events.update(event_id, data)\n\n if update_event_key:\n print 'create event successful'\n if 'user_id' not in data:\n resp = {\n 'success': 'true',\n 'data': update_event_key\n }\n else:\n all_events_list = Events.retrieve_all()\n resp_events = []\n for event in all_events_list:\n if event['user_id'] == data['user_id'] and event['status'] == data_checker.EVENT_UNREAD:\n resp_events.append(event)\n\n resp = {\n 'success': 'true',\n 'data': resp_events\n }\n else:\n raise RuntimeError('Orchestrate service temporarily unavailable')\n\n except Exception, e:\n err = {\n 'success': 'false',\n 'data': {},\n 'err_message': str(e)\n }\n return JSONResponse(err)\n\n return JSONResponse(resp)\n else:\n err = {\n \"success\": \"false\",\n \"err_message\": \"Only GET and PUT method is allowed\",\n \"data\": {}\n }\n return JSONResponse(err)",
"def test_10_api_can_get_all_free_events(self):\n response = self.app.get('/api/events/free', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 0)",
"def test_feedback_get_api_with_wrong_event(self):\n response = self.client.get(\n self.end_point + '?event_id={}'.format(100),\n HTTP_AUTHORIZATION=\"Bearer {}\".format(self.token),\n content_type=\"application/json\"\n )\n self.assertEquals(response.status_code, 400)",
"def test_sections_in_semester() -> None:\n\n assert a2_courses.sections_in_semester(SCHEDULE_4, 'F') == {CSC110_LEC0101, CON123_LEC0123}",
"def test_02_api_get_empty(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 0)",
"def test_daily_schedule_page(self):\n self.make_request(\n \"/daily-schedule/%s/\" % self.fx.DailyScheduleData.schedule_provincial.id,\n follow_redirects=True,\n )\n self.assertIn(\"Daily Schedules\", self.html)\n self.assertIn(self.fx.DailyScheduleData.schedule_provincial.title, self.html)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Overload div operator for HerosStats objects to do a numpy array division on data portion of attrs.
|
def __div__(self, other):
res = np.array(zip(*self.hero_and_value)[1]) / np.array(zip(*other.hero_and_value)[1], dtype=float)
hero_and_value = zip(zip(*self.hero_and_value)[0], res)
hs = HerosStats(self.stat_name, hero_and_value)
return hs
|
[
"def _div_scalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def _scatter_elemwise_div(lhs=None, rhs=None, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def _DivScalar(data=None, scalar=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)",
"def divData(data, numRow, numColumns):\n totalArrays = numRow * numColumns\n total = np.zeros((0, totalArrays), dtype=np.float32)\n for i in range(48):\n dataValue = data[i]\n array1D = []\n # size = dataValue.shape\n # numRows = int(size[0] / 2)\n # numColumns = int(size[1] / 2)\n # dataSecc1 = dataValue[0:numRows, 0:numColumns]\n # dataSecc2 = dataValue[numRows:, 0:numColumns]\n # dataSecc3 = dataValue[0:numRows, numColumns:]\n # dataSecc4 = dataValue[numRows:, numColumns:]\n # prom1 = dataSecc1.sum() / (numColumns * numRows)\n # prom2 = dataSecc2.sum() / (numColumns * numRows)\n # prom3 = dataSecc3.sum() / (numColumns * numRows)\n # prom4 = dataSecc4.sum() / (numColumns * numRows)\n # array1D.append(prom1)\n # array1D.append(prom2)\n # array1D.append(prom3)\n # array1D.append(prom4)\n rows = []\n div = np.vsplit(dataValue, numRow)\n for xs in div:\n divSplit = np.array_split(xs, numColumns)\n for ls in divSplit:\n rows.append(ls)\n for ys in rows:\n meanVal = np.mean(ys)\n array1D.append(meanVal)\n total = np.insert(total, i, array1D, axis=0)\n return total",
"def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)",
"def divisions():\n pass",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorSI___div__(self, *args)",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorD___div__(self, *args)",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUC___div__(self, *args)",
"def div_expr(lh_op, rh_op):\r\n return lo.LinOp(lo.DIV, lh_op.size, [lh_op], rh_op)",
"def divide(base, array):\n return [base / item for item in array]",
"def div(a, b):\n\n def divide(a, b):\n \"\"\"Division\"\"\"\n return a / b\n\n return op_with_scalar_cast(a, b, divide)",
"def divValues(a1,a2):\n #user picks which array is divisor\n divisor = input(\"Specify divisor array (array1 or array2): \")\n #set the program to ignore divide by zero errors (replaces with inf)\n np.seterr(divide = 'ignore')\n #plugs in array args as desired by user\n if divisor == \"array1\":\n x = np.divide(a2,a1) \n elif divisor == \"array2\":\n x = np.divide(a1,a2) \n #changes type inf to none as instructed\n x[x == np.inf] = None\n print(x)\n return x",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___div__(self, *args)",
"def divide(self, delta):\n if uwsgi_loaded:\n uwsgi.metric_div(self._metric_name, delta)",
"def _prf_divide(numerator, denominator):\n result = numerator / denominator\n mask = denominator == 0.0\n if not mask.any():\n return result\n\n # remove nan\n result[mask] = 0.0\n return result",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorF___div__(self, *args)",
"def vm_impl_div(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n return Tensor(x / y)\n\n return vm_impl",
"def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUI___div__(self, *args)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the heros attribute of this object.
|
def set_heros(self):
heros = []
for pair in self.hero_and_value:
heros.append(pair[0])
self.heros = heros
|
[
"def set_historico(self):\n\n pass",
"def set_children(self, children) :\n self.__children = children",
"def set(self, chore):\n\n # Just set using the node and dumped data\n\n self.redis.set(f\"/chore/{chore['id']}\", json.dumps(chore))",
"def set_halo(self, halo_dict=None):\n self.c0 = halo_dict[\"c0\"]/(1.0 + self._redshift)\n self.beta = halo_dict[\"beta\"]\n self.alpha = -1.0\n\n self.mass = mass_function.MassFunction(\n self._redshift, self.cosmo_dict, halo_dict)\n\n self.local_hod.set_halo(halo_dict)\n self.set_hod(self.local_hod)",
"def childs(self, childs):\n\n self._childs = childs",
"def set_species(self, h):\n if self.project_instance is not None and not self.selected_column == -1:\n # Update relevant graphics:\n self.project_instance.graph.set_species(self.selected_column, h)\n self.gs_overlay_composition.re_draw_vertex(self.selected_column)\n self.gs_atomic_graph.redraw_neighbourhood(self.selected_column)\n self.gs_zeta_graph.redraw_neighbourhood(self.selected_column)\n # Update control window info:\n self.control_window.select_column()",
"def set_species(self, value):\n if value is None:\n return\n value = list(value)\n self._species_to_index_dict = {el: i for i, el in enumerate(value)}\n self._species = value[:]\n self._store_elements = {el.Abbreviation: el for el in value}",
"def __set__(self, instance, value):\n hstore_dictionary = getattr(instance, self.hstore_field_name)\n if hstore_dictionary is None:\n # init empty HStoreDict\n setattr(instance, self.hstore_field_name, HStoreDict())\n # reassign\n hstore_dictionary = getattr(instance, self.hstore_field_name)\n hstore_dictionary[self.name] = value",
"def set_child(self, i, child):\r\n child.parent = self\r\n self.children[i].parent = None\r\n self.children[i] = child\r\n self.changed()",
"def hero(self, hero):\n\n if hero not in self.heroes:\n raise ValueError('Hero {cls_id} not owned by {steamid}.'.format(\n cls_id=hero.cls_id, steamid=self.steamid\n ))\n save_hero_data(database_path, self.steamid, self.hero)\n self._hero = hero",
"def set_children_heap(self, children_heap):\n\n self.children_heap = children_heap\n self.children_heap.sort(reverse=True)\n for child in children_heap:\n child.set_parent_heap(self)",
"def makeset(self, v):\n self.parent[v] = v\n self.rank[v] = 0",
"def __setattr__(self, name, value):\n if name != \"parent\":\n for child in flatten(value):\n if isinstance(child, CtreeNode):\n child.parent = self\n super(CtreeNode, self).__setattr__(name, value)",
"def __set__(self, instance, value):\n instance.__dict__[self.name] = value",
"def set_edges(self, edges):\n self.edges = edges",
"def setDisks(self, partitioner, lunset):\n self.__disks = partitioner.disks\n self.__lunset = lunset",
"def set_elos(self, elos_per_player):\n self.elos_per_player = elos_per_player",
"def set_hip(self):\n for residue in self.residues:\n if isinstance(residue, aa.HIS):\n self.apply_patch(\"HIP\", residue)",
"def set_branches(self, branches):\n self.branches = branches"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method to return list of hero to value mappings for a HerosStats instance.
|
def get_data(self):
return map(lambda pair: pair[1], self.hero_and_value)
|
[
"def observed_stat(heroes):\n\n return ...",
"def set_heros(self):\n heros = []\n for pair in self.hero_and_value:\n heros.append(pair[0])\n self.heros = heros",
"def stats(self):\n for hero in self.heroes:\n if hero.deaths == 0:\n hero.deaths = 1\n kd = hero.kills / hero.deaths\n print(f\"{hero.name} kill/deaths: \", end=\"\")\n print(\"{:.2f}\".format(kd))",
"def view_all_heroes(self):\n print(self.heroes)",
"def __div__(self, other):\n res = np.array(zip(*self.hero_and_value)[1]) / np.array(zip(*other.hero_and_value)[1], dtype=float)\n hero_and_value = zip(zip(*self.hero_and_value)[0], res)\n hs = HerosStats(self.stat_name, hero_and_value)\n return hs",
"def poes_hazard_maps(self):\n return self.job_ctxt.extract_values_from_config(\n general.POES_PARAM_NAME,\n check_value=lambda v: v >= 0.0 and v <= 1.0)",
"def get_heroes(self):\n\n url = self.__build_url(weburl.GET_HEROES)\n print(\"requesting for: \" + url)\n response = urlopen(url)\n json_result = json.loads(response.read().decode('utf-8'))\n heroes = json_result['result']['heroes']\n return heroes",
"def voi_stats(label_map, scalar_map, image_coordinate_system, phantom_definition, clamp=None):\n voi_stats = {}\n pixel_volume = image_coordinate_system.pixel_spacing_cm * image_coordinate_system.pixel_spacing_cm * image_coordinate_system.spacing_between_slices_cm\n\n voi_labels = np.unique(label_map)[1:]\n for i in voi_labels:\n # get the points for this VOI\n voi_points_z, voi_points_x, voi_points_y = np.nonzero(label_map == i)\n # get scalar values for this VOI\n scalar_values = scalar_map[voi_points_z, voi_points_x, voi_points_y]\n voi_num_pixels = len(scalar_values)\n if clamp is not None:\n scalar_values = scalar_values[scalar_values > clamp[0]]\n scalar_values = scalar_values[scalar_values < clamp[1]]\n # return stats on this VOI\n voi_def = phantom_definition[\"vois\"][i]\n mean = np.mean(scalar_values)\n median = np.mean(scalar_values)\n expected_value = voi_def[\"expected_value\"]\n\n voi_stats[i] = { \"median\": median,\n \"median_percent_diff\": (median - expected_value)/expected_value * 100 if expected_value is not None else None,\n \"mean\": np.mean(scalar_values),\n \"mean_percent_diff\": (mean - expected_value)/expected_value * 100 if expected_value is not None else None, \n \"max\": np.amax(scalar_values),\n \"std_dev\": np.std(scalar_values),\n \"clamped_pixels\": voi_num_pixels - len(scalar_values),\n \"label\": i,\n \"content_type\": voi_def[\"content_type\"],\n \"expected_value\": expected_value, \n \"min\": np.amin(scalar_values),\n \"count\": voi_num_pixels,\n \"volume\": voi_num_pixels * pixel_volume\n }\n return voi_stats",
"def compute_stats(values):\n \n return {\n 'min': min(values),\n 'max': max(values),\n 'mean': np.mean(values),\n 'stdev': np.std(values),\n }",
"def get_monster_stats(self, level, numberOfPlayers):\n\n index = 0\n for key in list(self.baseStats.keys())[::-1]:\n if level >= key:\n index = key\n\n multiplier = 1 + self.baseStats[index][\"Monster\"][\"Scaling\"] * numberOfPlayers\n health = int(self.baseStats[index][\"Monster\"][\"HP\"] * level * multiplier)\n attack = int(self.baseStats[index][\"Monster\"][\"Attack\"] * level * multiplier)\n defence = int(self.baseStats[index][\"Monster\"][\"Defence\"] * level * multiplier)\n return health, attack, defence",
"def heroes():\n \n heroes = [\"\"] * 115\n with open(\"../ref/heroes.json\",\"r\") as file:\n heroes_raw = json.load(file)[\"heroes\"]\n file.close()\n for hero in heroes_raw:\n heroes[hero[\"id\"]-1] = hero[\"localized_name\"].strip().lower().replace(\" \",\"\").replace(\"-\",\"\")\n with open(\"data/heroes.json\",\"w\") as file:\n json.dump(heroes, file)\n file.close()",
"def health_monitor_list(self, **kwargs):\n return self._clients.octavia().health_monitor_list(**kwargs)",
"def get_species_for_histogram():\n return ['P1', 'P2']",
"def get_stats(self, ship_name):\n result = {}\n for stat in self.stats:\n result[stat] = self.base_stats[ship_name][stat] if stat in self.base_stats[ship_name] else 0\n ship_parts = self.get_ship_parts(ship_name)\n for sp in ship_parts:\n result['initiative'] += sp.initiative\n result['movement'] += sp.movement\n result['computer'] += sp.computer\n result['shield'] += sp.shield\n result['hull'] += sp.hull \n if sp.missile:\n result['missile2'] += sp.n_dice\n elif sp.hits > 0:\n result['cannon' + str(sp.hits)] += sp.n_dice \n result['energy'] += sp.energy\n \n return result",
"def _wanted_statistics(sensor_states: list[State]) -> dict[str, set[str]]:\n return {\n state.entity_id: DEFAULT_STATISTICS[state.attributes[ATTR_STATE_CLASS]]\n for state in sensor_states\n }",
"def get_stats_npc(healer=False, trader=False):\n npc = NonPersonCharacter.query.filter_by(healer=healer, trader=trader).first().to_dict()\n return npc['texts'][random.randint(0, len(npc['texts']) - 1)], npc['img_path'], npc['name']",
"def stats_from_category_hours(dict, labels):\n \n medians = []\n means = []\n for key in labels:\n vals = dict[key]\n medians.append(np.median(vals))\n means.append(vals.mean())\n return (medians, means)",
"def humans(self):\n players = list()\n\n for player in self._players:\n if player.human == 1:\n players.append(player)\n\n return players",
"def vh_histograms(map):\n return np.sum(map, axis=1), np.sum(map, axis=0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
You can implement a configurable callable behaviour by implemenenting the call(...) method. Of course, it is also backward compatible with legacy __call__ override.
|
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
|
[
"def call(fn, args=(), kwargs={}):\r\n return fn(*args, **kwargs)",
"def __call__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def _do_call(call):\n try:\n return _CALL_CACHE[call]\n except KeyError:\n if callable(call[0]):\n result = call[0](*call[1:])\n else:\n result = _run(call)\n _CALL_CACHE[call] = result\n return result",
"def __call__(self, *args, **kwargs):\n\t\treturn ComposableFn(self.fn, *args, name=self.name)",
"def call_factory(package):\r\n return functools.partial(call, package)",
"def call(self, call):\n\n self._call = call",
"def __call__(self, *args, **kwargs):\n func = self.decorated\n if func is None:\n func = args[0]\n if args[1:] or kwargs:\n raise ValueError('Cannot decorate and setup simultaneously '\n 'with __call__(). Use __init__() or '\n 'setup() for setup. Use __call__() or '\n 'decorate() to decorate.')\n self.decorate(func)\n return self\n else:\n return self.run(func, *args, **kwargs)",
"def wrap (cls, callable_object, arguments = (), keywords = None):\n\n if arguments or keywords:\n return cls (callable_object, arguments, keywords)\n else:\n if not is_callable (callable_object):\n raise TypeError (\"'callable_object' must be callable\")\n\n return callable_object",
"def __call__(self, *params):\n return Function(name=self.name, params=params)",
"def call(method, *args, check_error=True, **kwargs):\n\n try:\n if method and isinstance(method, collections.Callable):\n try:\n inspect.getcallargs(method, *args, **kwargs)\n except TypeError as err:\n raise InvalidArgumentError(\n \"Attempt to invoke %s resulted in a TypeError %s\" % (method, err),\n method=method,\n args=args,\n kwargs=kwargs.copy()\n )\n else:\n return method(*args, **kwargs)\n else:\n raise InvalidMethodError(method=method)\n except (InvalidMethodError, InvalidArgumentError):\n if check_error:\n raise",
"def call(self, func, *args, **kwargs):\n guessed_kwargs = self._guess_kwargs(func)\n for key, val in guessed_kwargs.items():\n kwargs.setdefault(key, val)\n try:\n return func(*args, **kwargs)\n except TypeError as exc:\n msg = (\n \"tried calling function %r but failed, probably \"\n \"because it takes arguments that cannot be resolved\"\n ) % func\n raise DiayException(msg) from exc",
"def __call__(self, func: Callable[..., T], args_gen: ArgsGen = None) -> None:\n assert (\n not self.spawned\n ), \"Hook has already spawned a function, cannot run another\"\n self.prepare()\n if args_gen is None:\n args_gen = make_args_gen(func)\n # TODO: should we check the return types here?\n supplied = list(self._kwargs)\n demanded = args_gen(supplied)\n assert set(supplied).issuperset(\n demanded\n ), f\"Hook demanded arguments {demanded}, but only supplied {supplied}\"\n kwargs = {k: self._kwargs[k] for k in demanded}\n assert self._spawn, \"No spawned function\"\n self.spawned = self._spawn(self._run, func, kwargs)",
"def call(self, call, args=(), kargs=None, delay=0.0, priority=0, id_=\"\", include_id=False, timeout=0.0, default=None):\n assert isinstance(timeout, float)\n assert 0.0 <= timeout\n assert self._thread_ident\n def callback(result):\n container[0] = result\n event.set()\n\n if self._thread_ident == get_ident():\n if kargs:\n return call(*args, **kargs)\n else:\n return call(*args)\n\n else:\n # result container\n container = [default]\n event = Event()\n\n # register the call\n self.register(call, args, kargs, delay, priority, id_, callback, (), None, include_id)\n\n # wait for call to finish\n event.wait(None if timeout == 0.0 else timeout)\n\n if isinstance(container[0], Exception):\n raise container[0]\n else:\n return container[0]",
"def __call__(self, fn):\n # We record the function name for that command\n self.fn = fn.__name__\n # And we decorate the function\n def decorated(cls, player, arg):\n m = self.regex.match(arg)\n if m:\n # if arguments match, we execute the command\n return fn(cls, player, m)\n else:\n # orelse we print a short usage\n if self.onfail is not None:\n return getattr(cls, self.onfail)(player, arg)\n else:\n return info(player,\n \"<b>Usage:</b> <code>{} <i>{}</i></code>.\"\n .format(self.name, self.usage)\n )\n return decorated",
"def call_with_injection(\n self, callable: Callable[..., T], self_: Any = None, args: Any = (), kwargs: Any = {}\n ) -> T:\n\n bindings = get_bindings(callable)\n signature = inspect.signature(callable)\n full_args = args\n if self_ is not None:\n full_args = (self_,) + full_args\n bound_arguments = signature.bind_partial(*full_args)\n\n needed = dict(\n (k, v) for (k, v) in bindings.items() if k not in kwargs and k not in bound_arguments.arguments\n )\n\n dependencies = self.args_to_inject(\n function=callable,\n bindings=needed,\n owner_key=self_.__class__ if self_ is not None else callable.__module__,\n )\n\n dependencies.update(kwargs)\n\n try:\n return callable(*full_args, **dependencies)\n except TypeError as e:\n reraise(e, CallError(self_, callable, args, dependencies, e, self._stack))\n # Needed because of a mypy-related issue (https://github.com/python/mypy/issues/8129).\n assert False, \"unreachable\" # pragma: no cover",
"def call_on(recipient, method_name, *args, check_error=True, **kwargs):\n\n method = getattr(recipient, method_name, None)\n call(method, *args, check_error=check_error, **kwargs)",
"def is_callable(obj):\n # __call__\n return hasattr(obj, '__call__')",
"def T_call(*args):\n return _seb.T_call(*args)",
"def __init__(self, func: Callable[[Any], None]):\n\n if not callable(func):\n raise ValueError('func must be callable')\n\n self._callable = func"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to fix fields in the vcf header
|
def fix_vcf_header( vcf_reader ):
#dbNSFP_clinvar_clnsig has a Integer type but sometimes it is a String, e.g. 2|2
vcf_reader.infos['dbNSFP_clinvar_clnsig'] = pyvcf.parser._Info("dbNSFP_clinvar_clnsig",1,"String","Field 'clinvar_clnsig' from dbNSFP", None, None)
return( vcf_reader )
|
[
"def set_info_header(vcf):\n vcf.infos = {\n 'IMPRECISE': py_vcf.parser._Info(\"IMPRECISE\", 0, \"Flag\", \"Imprecise structural variant\", \"NanoSV\", __version__),\n 'SVTYPE': py_vcf.parser._Info(\"SVTYPE\", 1, \"String\", \"Type of structural variant\", \"NanoSV\", __version__),\n 'SVMETHOD': py_vcf.parser._Info(\"SVMETHOD\", 1, \"String\", \"Type of approach used to detect SV\", \"NanoSV\", __version__),\n 'END': py_vcf.parser._Info(\"END\", 1, \"Integer\", \"End position of structural variant\", \"NanoSV\", __version__),\n 'CIPOS': py_vcf.parser._Info(\"CIPOS\", 2, \"Integer\", \"Confidence interval around POS\", \"NanoSV\", __version__),\n 'CIEND': py_vcf.parser._Info(\"CIEND\", 2, \"Integer\", \"Confidence interval around END\", \"NanoSV\", __version__),\n 'SVLEN': py_vcf.parser._Info(\"SVLEN\", None, \"Integer\", \"Distance between the two genomic positions\", \"NanoSV\", __version__),\n 'RT': py_vcf.parser._Info(\"RT\", 3, \"Integer\", \"Number of the different read types (2d, template, complement)\",\"NanoSV\", __version__),\n 'GAP': py_vcf.parser._Info(\"GAP\", 1, \"Integer\",\"Median number of bases between the two segments of the SV, in case of an insertion this is the size of the insertion\",\"NanoSV\", __version__),\n 'MAPQ': py_vcf.parser._Info(\"MAPQ\", 2, \"Integer\",\"Median mapping quality of the two segments of the structural variant\", \"NanoSV\",__version__),\n 'PID': py_vcf.parser._Info(\"PID\", 2, \"Float\",\"Median percentage identity to the reference of the two segments of the structural variant\",\"NanoSV\", __version__),\n 'PLENGTH': py_vcf.parser._Info(\"PLENGTH\", 2, \"Float\",\"Median segment length percentage of the two segments of the structural variant\",\"NanoSV\", __version__),\n 'RLENGTH': py_vcf.parser._Info(\"RLENGTH\", 1, \"Integer\", \"Median length of the total reads\", \"NanoSV\", __version__),\n 'MATEID': py_vcf.parser._Info('MATEID', None, 'String', 'ID of mate breakend', 'NanoSV', __version__),\n 'PURITY_SCORE': py_vcf.parser._Info('PURITY_SCORE', None, \"Integer\", \"Purity of clusters after phasing\", \"NanoSV\", __version__),\n 'PHASING_SCORE': py_vcf.parser._Info('PHASING_SCORE', None, \"Integer\", \"Percentage of reads in two largest clusters after phasing\", \"NanoSV\", __version__),\n 'SNPS_USED': py_vcf.parser._Info('SNPS_USED', None, \"Integer\", \"SNPs used during phasing\", \"NanoSV\", __version__),\n 'ALT_READ_IDS': py_vcf.parser._Info('ALT_READ_IDS', None, \"String\", \"Read ids of the supporting alt reads\", \"NanoSV\", __version__),\n 'REF_READ_IDS_1': py_vcf.parser._Info('REF_READ_IDS_1', None, \"String\", \"Read ids of the supporting reference reads for bp1\", \"NanoSV\", __version__),\n 'REF_READ_IDS_2': py_vcf.parser._Info('REF_READ_IDS_2', None, \"String\", \"Read ids of the supporting reference reads for bp2\", \"NanoSV\", __version__)\n }\n\n if NanoSV.opts_depth_support:\n vcf.infos['DEPTHPVAL'] = py_vcf.parser._Info(\"DEPTHPVAL\", 1, \"Float\", \"In case of a duplication or deletion the P-value of the significance test is shown here\", \"NanoSV\", __version__)",
"def test_vcf_header(self):\n hd = vcf_header(\n source='23andme',\n reference='http://example.com',\n format_info=['<ID=GT,Number=1,Type=String,Description=\"GT\">'])\n self.assertEqual(len(hd), 6)\n expected_header_fields = [\"##fileformat\",\n \"##fileDate\",\n '##source',\n '##reference',\n '##FORMAT',\n '#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER' +\n '\\tINFO\\tFORMAT\\t23ANDME_DATA']\n self.assertEqual([i.split(\"=\")[0] for i in hd], expected_header_fields)",
"def set_format_header(vcf):\n vcf.formats = {\n 'GT': py_vcf.parser._Format('GT', 1, 'String', 'Genotype'),\n 'DR': py_vcf.parser._Format('DR', 2, 'Integer', 'Number of reference reads'),\n 'DV': py_vcf.parser._Format('DV', 2, 'Integer', 'Number of variant reads'),\n 'GQ': py_vcf.parser._Format('GQ', 1, 'Integer', 'Genotype quality'),\n 'HR': py_vcf.parser._Format('HR', 2, 'Integer', 'Number of hanging variant reads'),\n 'PL': py_vcf.parser._Format('PL', 'G', 'Integer','Normalized, Phred-scaled likelihoods for genotypes as defined in the VCF specification')\n }",
"def test_bcf_record_construct_minimal():\n header = bcf.BCFHeader()\n assert header.to_vcf_header() == '##fileformat=VCFv4.2\\n'",
"def __normalize_fields(self, check_fields: bool = False) -> None:\n field_dict = OrderedDict()\n for k, v in self.fields.items():\n # field names to lower case\n k = k.strip().lower()\n # assert k in BIB_FIELDS, f\"{k} is not a valid field name\"\n # remove redundant curly braces and commas\n v = str(v).strip(\" ,\") # DO NOT strip \"{}\"\n self.__double_braces_flags[k] = False\n braces_count = 0\n while all([v.startswith(\"{\"), v.endswith(\"}\")]) or all(\n [v.startswith('\"'), v.endswith('\"')]\n ):\n v = v[1:-1]\n braces_count += 1\n if braces_count >= 2:\n self.__double_braces_flags[k] = True\n # convert month to number if applicable\n if k.lower().strip() == \"month\" and v.capitalize() in calendar.month_abbr:\n v = strptime(v, \"%b\").tm_mon\n field_dict[k] = v\n self.__fields = field_dict\n if check_fields:\n self.check_required_fields()\n for k, v in self.fields.items():\n self.__setattr__(k, v)",
"def flatten_header(header,delete=False):\n\n if not isinstance(header,fits.Header):\n raise Exception(\"flatten_header requires a fits.Header instance\")\n\n newheader = header.copy()\n\n for key in newheader.keys():\n try:\n if delete and int(key[-1]) >= 3 and key[:2] in ['CD','CR','CT','CU','NA']:\n newheader.pop(key)\n elif (int(key[-1]) >= 3 or int(key[2])>=3) and key[:2] in ['CD','CR','CT','CU','NA','PC']:\n newheader.rename_keyword(key,'A'+key,force=True)\n if delete and (int(key[4]) >= 3 or int(key[7]) >= 3) and key[:2]=='PC' and key in newheader:\n newheader.pop(key)\n except ValueError:\n # if key[-1] is not an int\n pass\n except IndexError:\n # if len(key) < 2\n pass\n newheader['NAXIS'] = 2\n if header.get('WCSAXES'):\n newheader['WCSAXES'] = 2\n\n return newheader",
"def manipulate_header(header, dim):\n proj_variable = header.fields[dim-1]\n removed_variable = header.fields[2-dim] # assuming 2d FES\n value_field = header.fields[2]\n header.fields = [proj_variable, \"proj.\" + value_field]\n remove_const = [const for const in header.constants if removed_variable in const]\n for const in remove_const: # remove constants related to projected out variable\n del header.constants[const]",
"def clean_fieldnames(fieldnames):\n num_generated_headers = 0\n new_fieldnames = []\n for fieldname in fieldnames:\n new_fieldname = unidecode(fieldname)\n if fieldname == '':\n num_generated_headers += 1\n new_fieldname = f'{SEED_GENERATED_HEADER_PREFIX} {num_generated_headers}'\n\n new_fieldnames.append(new_fieldname)\n return new_fieldnames, num_generated_headers > 0",
"def set_filter_header(vcf):\n vcf.filters = {\n 'SVcluster': py_vcf.parser._Filter(\"SVcluster\",\"There are more than \" + str(NanoSV.opts_svcluster) + \" SVs in a window of \" + str(NanoSV.opts_window_size) + \" on both sides\"),\n 'Gap': py_vcf.parser._Filter(\"Gap\", \"The median gap size is larger than \" + str(NanoSV.opts_gap_flag) + \" for non insertions\"),\n 'MapQual': py_vcf.parser._Filter(\"MapQual\", \"The median mapping quality is less than \" + str(NanoSV.opts_mapq_flag)),\n 'PID': py_vcf.parser._Filter(\"PID\", \"The median PID of one of the two sides is less than \" + str(NanoSV.opts_pid_flag)),\n 'CIPOS': py_vcf.parser._Filter(\"CIPOS\", \"The CIPOS is greater than \" + str(NanoSV.opts_ci_flag)),\n 'CIEND': py_vcf.parser._Filter(\"CIEND\", \"The CIEND is greater than \" + str(NanoSV.opts_ci_flag)) ,\n 'LowQual': py_vcf.parser._Filter('LowQual','QUAL score is less than 20')\n }",
"def set_alt_header(vcf):\n vcf.alts = {\n 'DEL': py_vcf.parser._Alt(\"DEL\", \"Deletion\"),\n 'DUP': py_vcf.parser._Alt(\"DUP\", \"Duplication\"),\n 'BND': py_vcf.parser._Alt(\"BND\", \"Breakend\"),\n 'INS': py_vcf.parser._Alt(\"INS\", \"Insertion\")\n }",
"def __dbfHeader(self):\r\n f = self.__getFileObj(self.dbf)\r\n f.seek(0)\r\n version = 3\r\n year, month, day = time.localtime()[:3]\r\n year -= 1900\r\n # Remove deletion flag placeholder from fields\r\n for field in self.fields:\r\n if field[0].startswith(\"Deletion\"):\r\n self.fields.remove(field)\r\n numRecs = len(self.records)\r\n numFields = len(self.fields)\r\n headerLength = numFields * 32 + 33\r\n recordLength = sum([int(field[2]) for field in self.fields]) + 1\r\n header = pack('<BBBBLHH20x', version, year, month, day, numRecs,\r\n headerLength, recordLength)\r\n f.write(header)\r\n # Field descriptors\r\n for field in self.fields:\r\n name, fieldType, size, decimal = field\r\n name = name.replace(' ', '_')\r\n name = name.ljust(11).replace(' ', '\\x00')\r\n size = int(size)\r\n fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)\r\n f.write(fld)\r\n # Terminator\r\n f.write('\\r')",
"def FixRequestHeaderAndTrailer(self, cmd):\n req_params_len = len(cmd.request.fields)\n if cmd.tag == \"TPM_TAG_RQU_AUTH2_COMMAND\":\n req_params_len -= 10\n cmd.has_auth1 = True\n cmd.has_auth2 = True\n elif cmd.tag == \"TPM_TAG_RQU_AUTH1_COMMAND\":\n req_params_len -= 5\n cmd.has_auth1 = True\n # remove first three fields\n cmd.request.fields = self.FixFields(cmd.request.fields, req_params_len)\n cmd.has_ins = len(cmd.request.fields) > 0",
"def _normalize_csp_header(header):\n return {p.strip() for p in (header or \"\").split(\";\")}",
"def preprocess_df_headers(dataframe: pd.DataFrame):\n dataframe.columns = dataframe.columns.str.strip()\n dataframe.columns = dataframe.columns.str.replace(\" \", \"_\")\n dataframe.columns = dataframe.columns.str.replace(\"#\", \"NUM\")\n dataframe.columns = dataframe.columns.str.replace(\"/\", \"_\")\n dataframe.replace('', np.nan, inplace=True)",
"def _removeBeautifyHeader(self, requestBytes):\n requestInfo = self._helpers.analyzeRequest(requestBytes)\n headers = requestInfo.getHeaders()\n header = None\n for h in headers:\n if h.startswith('X-dotNet-Beautifier'):\n header = h\n break\n if header:\n headers.remove(header)\n return self._helpers.buildHttpMessage(headers, requestBytes[requestInfo.getBodyOffset():])",
"def convert_header(header):\n return pf.Header().fromstring(header, sep='\\n') if header else None",
"def set_original_header(nc_file: Dataset, ds: xr.Dataset): # dfile, datatype):\n # emulates the libcchdo behavior with having # and an extra end line\n comments = ds.attrs.get(\"comments\", \"\").splitlines()\n nc_file.ORIGINAL_HEADER = \"\\n\".join(\n [comments[0], *[f\"#{line}\" for line in comments[1:]], \"\"]\n )",
"def cull_fields(ps):\n purchases = remove_field(ps,'@Code')\n purchases = remove_field(purchases,'@ArticleID')\n purchases = remove_field(purchases,'@ArticleName')\n purchases = remove_field(purchases,'@CurrencyCode')\n purchases = remove_field(purchases,'@VAT')\n # Other fields that could conceivably be removed:\n # @ExternalID, @PurchaseStateName, some fields in PurchasePayUnit, maybe others\n\n # Filtering out a lot more fields to try to slim down the amount of data:\n #purchases = remove_field(purchases,'@PurchaseGuid')\n #purchases = remove_field(purchases,'@TerminalGuid')\n #purchases = remove_field(purchases,'@PurchaseDateUtc')#\n #purchases = remove_field(purchases,'@PayIntervalStartLocal')#\n #purchases = remove_field(purchases,'@PayIntervalStartUtc')#\n #purchases = remove_field(purchases,'@PayIntervalEndLocal')#\n #purchases = remove_field(purchases,'@PayIntervalEndUtc')#\n #purchases = remove_field(purchases,'@EndDateLocal')\n #purchases = remove_field(purchases,'@EndDateUtc')#\n #purchases = remove_field(purchases,'@PaymentServiceType')\n #purchases = remove_field(purchases,'@TicketNumber') # Commented out 2019-01-28\n #purchases = remove_field(purchases,'@TariffPackageID') # Commented out 2019-01-28\n #purchases = remove_field(purchases,'@ExternalID') # Commented out 2019-01-28\n #purchases = remove_field(purchases,'@PurchaseStateName')\n purchases = remove_field(purchases,'@PurchaseTriggerTypeName')\n #purchases = remove_field(purchases,'@PurchaseTypeName')#\n purchases = remove_field(purchases,'@MaskedPAN','PurchasePayUnit')\n purchases = remove_field(purchases,'@BankAuthorizationReference','PurchasePayUnit')\n purchases = remove_field(purchases,'@CardFeeAmount','PurchasePayUnit')\n purchases = remove_field(purchases,'@PayUnitID','PurchasePayUnit')\n #purchases = remove_field(purchases,'@TransactionReference','PurchasePayUnit')\n purchases = remove_field(purchases,'@CardIssuer','PurchasePayUnit')\n\n return purchases",
"def _validate_and_get_extra_fields(self, header_fields):\n if \"email\" not in header_fields:\n raise KeyError\n if \"name\" not in header_fields:\n raise KeyError\n\n extra_fields = header_fields\n extra_fields.remove(\"email\")\n extra_fields.remove(\"name\")\n return extra_fields"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to get the sample name from the bam file
|
def get_sample_name( bamfile ):
header = bamfile.header
if 'RG' in header:
if type(header['RG']) is list:
return(header['RG'][0]['SM'])
else:
return(header['RG']['SM'])
return( False )
|
[
"def get_sample_name_from_bam(bam):\n\n sample_name = str(subprocess.check_output(\"%s view -H %s | egrep '^@RG'\"%(samtools, bam), shell=True)).split(\"ID:\")[1].split(\"\\\\t\")[0]\n \n return sample_name",
"def strainName(self):\n basename = os.path.basename(self.bamfile)\n if basename.lower().endswith(\".bam\"):\n basename = basename[:-4]\n return basename",
"def get_sample_name(history_name):\n match = re.match('(?P<tag>\\S+)\\s+(?P<name>TNP-TMA\\S+)\\s',\n history_name, flags=re.I)\n name = match.group('name')\n tag = match.group('tag')\n\n rval = name + '__' + tag\n\n log.info(f\"Generate sample name `{rval}`.\")\n return rval",
"def bam_path(sample_id):\n return sample_annot.loc[sample_id, \"clean_bam_file_capture\"]",
"def get_sampleName_from_perSVade_outdir(perSVade_outdir, get_automatic_sampleName=True):\n\n # automatic\n if get_automatic_sampleName is True: return \"SAMPLE\"\n \n # based on the input\n if perSVade_outdir.endswith(\"/\"): name_sample = \"sampleX\"\n else: name_sample = get_file(perSVade_outdir)[0:10]\n if len(name_sample)==0: raise ValueError(\"name_sample should not be empty\")\n\n return name_sample",
"def get_name(self, i):\n over_sample_index = i // self.over_sample\n return self.files[over_sample_index]",
"def clip_bedfile_name(bedfile,filetype):\n if filetype == \"\":\n toolused = bedfile.split(\"/\")[-3]\n sample = bedfile.split(\"/\")[-2]\n else:\n toolused = filetype\n sample = bedfile.split(\"/\")[-1].split(\".\")[0].strip(\"_peaks\").strip(\"_broadpeaks\")\n return( toolused, sample )",
"def extract_sample_names(vcf_file):\n if vcf_file.lower().endswith(\".gz\"):\n opener = gzip.open\n else:\n opener = open\n sample_names = []\n with opener(vcf_file, \"rt\") as vcf:\n for line in vcf:\n line = line.strip(\"\\n\")\n if line.startswith(\"#CHROM\"):\n record = line.split(\"\\t\")\n sample_names = [record[i].replace(\"./\", \"\") for i in range(9, len(record))]\n break\n return sample_names",
"def test_extract_fastq_sample_name(self):\n filenames = [\n \"NA12345 - 4KC_S7_L001_R1_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L001_R2_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L002_R1_001.fastq.gz\",\n \"NA12345 - 4KC_S7_L002_R2_001.fastq.gz\",\n \"L2000552_S1_R1_001.fastq.gz\",\n \"L2000552_S1_R2_001.fastq.gz\",\n \"L1000555_S3_R1_001.fastq.gz\",\n \"L1000555_S3_R2_001.fastq.gz\",\n \"L1000555_S3_R3_001.fastq.gz\",\n \"L3000666_S7_R1_001.fastq.gz\",\n \"L4000888_S99_R1_001.fastq.gz\",\n \"L4000888_S3K_S99_R2_001.fastq.gz\",\n \"L4000888_SK_S99_I1_001.fastq.gz\",\n \"L400S888_S99_I2_001.fastq.gz\",\n \"L400S888_S5-9_S99_I2_001.fastq.gz\",\n \"PTC_TsqN999999_L9900001_S101_I2_001.fastq.gz\",\n \"PRJ111119_L1900000_S102_I2_001.fastq.gz\",\n \"MDX199999_L1999999_topup_S201_I2_001.fastq.gz\",\n ]\n\n for name in filenames:\n sample_name = fastq.extract_fastq_sample_name(name)\n logger.info((sample_name, name))\n self.assertTrue(\"_R\" not in sample_name)\n\n self.assertIsNone(fastq.extract_fastq_sample_name(\"L1999999_topup_R1_001.fastq.gz\"))",
"def fixture_sample_tag_name() -> str:\n return \"sample\"",
"def getID(bamFile,IDpath,sampleName):\n start = time.time()\n command = \"%s/samtools view -@ 12 %s \" %(samtoolsPath,bamFile)+\\\n \"| awk {'print $1'} > %s/%s.id.dat\" %(IDpath,sampleName)\n print sampleName+': '+command\n os.system(command)\n end = time.time() - start \n sys.stderr.write(sampleName+': Get IDs from mapped bam Used time %.3f min\\n' %(end/60))\n return 0",
"def get_generated_title(file_path, preset):\n return \"{0} {1} {2}\".format(get_file_shortname(file_path),\n preset[presets.IN_RANGE],\n preset[presets.OUT_RANGE])",
"def get_bam(sample, chr_num, outdir):\n \n # Creating ftp path to file\n path = '/vol1/ftp/phase3/data/{}/alignment/'.format(sample)\n fname = (('{}.chrom{}.ILLUMINA.bwa')\n .format(sample, str(chr_num)))\n \n # Downloading file\n download_data(path, 'bam', outdir, fname)\n \n return",
"def getBaseName(self):\n a = self._attr.split(\".\")\n b = a[1].split(\"[\")\n return b[0]",
"def readwavename(sfilename):\n f=open(sfilename)\n for line in f:\n if line[79]=='6':\n if 'wavename' in locals():\n wavename+=line[1:79].strip()\n else:\n wavename=line[1:79].strip()\n f.close()\n return wavename",
"def get_kaggle_sample_names(directory):\n \n # Retrieve all of the files in the directory.\n files = set(os.listdir(directory))\n \n # Each sample should have 1) a .bytes file and 2) a .asm file. Both files\n # should have the same name minus the extension. This name should be the md5\n # hash that is used to uniquely identify the file. In order to retrieve all\n # of the potential hashes in the directory, we can remove all of the file\n # extensions and filter out duplicate names.\n names = set(os.path.splitext(file)[0] for file in files)\n \n # Ensure each unique name has both an associated .bytes and associated\n # .asm file.\n names = list(name for name in names if ('%s.bytes' % name) in files \n and ('%s.asm' % name) in files)\n \n # Sort the names alphabetically.\n names.sort()\n \n return names",
"def name(self):\n return self.transcript_name",
"def sample_names(self):\n return self._sample_names",
"def test_get_title_prefix_genome(self):\n self.assertEqual(get_title_prefix(3, 'Blood'), 'genome sequencing of homo sapiens: whole blood: Sample ')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to check a pileup read. Returns True if the read needs to be kept and returns False if read can be skipped.
|
def check_pileupread( pileupread ):
if pileupread.alignment.is_duplicate:
return( False )
if pileupread.is_del:
return( False )
if pileupread.is_refskip:
return( False )
if not pileupread.query_position:
return( False )
if pileupread.alignment.mapq < args.mapq:
return( False )
if pileupread.alignment.query_qualities[pileupread.query_position] < args.base_phred_quality:
return( False )
return( True )
|
[
"def read_ok(read):\n if any([ord(c)-33 < _BASE_QUAL_CUTOFF for c in list(read.qual)]):\n return False\n else:\n return True",
"def keep(self, read):\n\t\tif self.discard_trimmed and read.trimmed:\n\t\t\treturn False\n\t\tif self.discard_untrimmed and not read.trimmed:\n\t\t\treturn False\n\t\tif len(read.sequence) < self.minimum_length:\n\t\t\tself.too_short += 1\n\t\t\tif self.too_short_outfile is not None:\n\t\t\t\tread.write(self.too_short_outfile)\n\t\t\treturn False\n\t\telif len(read.sequence) > self.maximum_length:\n\t\t\tself.too_long += 1\n\t\t\tif self.too_long_outfile is not None:\n\t\t\t\tread.write(self.too_long_outfile)\n\t\t\treturn False\n\t\treturn True",
"def _run_successful(sample):\n # TODO Implement more thoroughly than just checking if file is empty\n return os.stat(sample.mature_readcount).st_size >= 0 and os.stat(sample.hairpin_readcount).st_size >= 0",
"def is_move_complete(self) -> bool:\n info = yield from self._get_stepper_info()\n if info is None:\n # read did fail\n return False\n return info['position'] == self._position",
"def can_read(self):\n if (hasattr(self._ser, \"in_waiting\")):\n return (self._ser.in_waiting >= 6)\n else:\n return (self._ser.inWaiting() >= 6)",
"def is_critical_aux(cardname, progress, discardpile):\n if progress[cardname[1]] >= int(cardname[0]):\n return False\n return discardpile.count(cardname) + 1 == SUIT_CONTENTS.count(cardname[0])",
"def check_skip(previous, percentile, slack):\n prev_9s = get_9s(previous)\n # print(f\"prev: {previous}, perc: {percentile}, prev_9s: {prev_9s}\")\n return percentile > previous + slack * (1 / (10 ** (prev_9s + 1)))",
"def is_pileup_line_edited(pileup_line: Pileup_line, read_thresh_hold=1, editing_min_percent_threshold=0.,\n editing_max_percent_threshold=100, noise_percent_threshold=100, const_tag='', editing_read_thresh=0):\n clean_str = pileup_line.clean_base_string\n sense_string = clean_str.replace(',', '.')\n # TODO before use assumed letters are only upper case. I fix this assumption here\n sense_string = sense_string.upper()\n\n # Finds the candidate nucleotide reads\n nucl_A, nucl_C, nucl_T, nucl_G = 0, 0, 0, 0\n nucl_changes = {\"A\": 0, \"C\": 0, \"G\": 0, \"T\": 0}\n\n for nucl in list(sense_string):\n if (nucl == 'A' or nucl == 'C' or nucl == 'G' or nucl == 'T'):\n nucl_changes[nucl] = nucl_changes[nucl] + 1\n else:\n continue\n # get the maximal nucleous change, key is the value of the dict\n (candidate_nucl, candidate_nucl_reads) = max(nucl_changes.items(), key=lambda x: x[1])\n\n if candidate_nucl_reads == 0:\n editing_percent = 0.0\n noise_percent = 0.0\n else:\n editing_percent = round(float(100 * (candidate_nucl_reads / pileup_line.base_count)), 3)\n # Find the noise reads\n # all ACTGs that are not candidate_nucl\n noise_reads = 0\n\n for nucl in list(pileup_line.reads_string.upper()):\n if (nucl != candidate_nucl) and (nucl in ['A', 'C', 'T', 'G']):\n noise_reads += 1\n noise_percent = round(float(100 * (noise_reads / pileup_line.base_count)), 3)\n if is_reads_threshold_match(pileup_line, read_thresh_hold) and (editing_percent >= editing_min_percent_threshold \n and editing_percent <= editing_max_percent_threshold) and (noise_percent <= noise_percent_threshold) and (\n editing_percent is not 0.0) and (candidate_nucl_reads >= editing_read_thresh):\n is_editing_site = True\n else:\n is_editing_site = False\n '''\n if (pileup_line._gene_pos == \"10517276\"):\n print(\"pileupline\",pileup_line, \"read_thresh_hold\",read_thresh_hold, \"editing_percent\",editing_percent, \n \"noise_percent\", noise_percent, \"candidate_nucl_reads\", candidate_nucl_reads)\n print(\"editing_min_percent_threshold\",editing_min_percent_threshold, \"editing_max_percent_threshold\",\n editing_max_percent_threshold, \"noise_percent_threshold\",noise_percent_threshold, \"editing_read_thresh\", editing_read_thresh)\n '''\n # change pileup line\n pileup_line.tags[\"editing_percent\"] = str(editing_percent)\n pileup_line.tags[\"noise_percent\"] = str(noise_percent)\n if (const_tag != '') and is_editing_site:\n pileup_line.tags[\"const_tag\"] = const_tag\n\n # calculate editing type\n if is_editing_site:\n #TODO patch change to make sure we dont get lowercase reference or nucl change\n editing_type = (pileup_line.reference.upper(), candidate_nucl.upper())\n #editing_type = (pileup_line.reference, candidate_nucl)\n else:\n editing_type = 'unchanged'\n\n return editing_type, pileup_line",
"def _is_junction_read(self, read):\n try:\n if len(read.cigartuples) < 3: # has >=1 intron\n return False\n if read.is_secondary:\n return False\n return True\n except:\n return False",
"def can_read(user, i):\n print(\"In can_read checking {} vs {}\".format(user, i))\n # Under our rules, any user who can write a file can also read it;\n # this check is done first because it doesn't need to hit the network.\n if can_write(user, i):\n return True\n\n # Some files are world-readable. We need the inode to know that.\n n = secfs.fs.get_inode(i)\n # Ex3-note: encryptfor is None if the file is world-readble.\n if not n.encryptfor:\n return True\n # Ex3-note: return false if our user is not listed in the readkey.\n # TODO: consider whether we really want to do this.\n if n.readkey and user in n.readkey:\n return True\n return False",
"def _loud_enough(self):\n return self.num_loud_chunks > self.min_loud_chunks",
"def check_bricks(main_pile, discard):\n\n if main_pile == 0:\n piles_tuple[0] = shuffle_bricks(discard)\n piles_tuple[1].append(piles_tuple[0][0])",
"def chunk_exists(self):\n name = \"%s%s%s\" % (self.filename,\n self.chunk_suffix,\n self.kwargs.get('resumableChunkNumber').zfill(4))\n\n #101706_8503e389gw1ejtv88yh6xj20c90up0wb.jpg_part_0001\n if not self.storage.exists(name):\n return False\n chunk_size = int(self.kwargs.get('resumableCurrentChunkSize'))\n return self.storage.size(name) == chunk_size",
"def close_shortReads_table_is_correct(close_shortReads_table):\n\n # if it is empty return false\n if file_is_empty(close_shortReads_table): return False\n\n # load as df\n close_shortReads_table_df = pd.read_csv(close_shortReads_table, sep=\"\\t\")\n\n # check that all the reads exist\n reads_files = set(close_shortReads_table_df[\"short_reads1\"]).union(close_shortReads_table_df[\"short_reads2\"])\n\n if any([file_is_empty(f) for f in reads_files]): return False\n else: return True",
"def check_file_validation(self):\r\n if self.snap is None:\r\n # file existence\r\n print('file for stock %s at date %s is not existed' % (self.code, self.date))\r\n return False\r\n elif self.snap.iloc[-1]['iTurnover'] == 0:\r\n # stock is traded or not\r\n print('stock %s has no trade record at date %s' % (self.code, self.date))\r\n return False\r\n else:\r\n return True",
"def check_piece_range(torr_handle, start_piece, end_piece):\n result = True\n for piece in xrange(start_piece, end_piece + 1):\n if not torr_handle.have_piece(piece):\n result = False\n break\n return result",
"def can_read(uri):\n\tif uri is None:\n\t\traise ValueError(\"Provided URI is None.\")\n\ttry:\n\t\tparsed = urllib.parse.urlparse(uri)\n\texcept ValueError: #Badly-formed IPv6 address.\n\t\treturn False #We don't care. We can only read locally anyway.\n\n\tif parsed.scheme != \"file\": #Can only read from file names.\n\t\treturn False\n\tif not parsed.path or parsed.path[-1] == \"/\": #Must have a file name, not a directory.\n\t\treturn False\n\treturn True",
"def filter_pileup_by_categories(pileup_filename, output, reads_threshold=None, any_change=None, editing_min_percent=None,\n editing_max_percent=None, noise_percent=None, editing_read_thresh=0):\n\n with open(pileup_filename) as pileup, open(output, \"w\") as out:\n for line in class_generator(Pileup_line, file=pileup):\n\n if reads_threshold is not None and is_reads_threshold_match(line,reads_threshold):\n out.write(str(line) + \"\\n\")\n\n if any_change:\n editing_type, pileup_line = is_pileup_line_edited(line)\n if editing_type != 'unchanged':\n out.write(str(line) + \"\\n\")\n\n if editing_min_percent is not None:\n if all([editing_max_percent, noise_percent]):\n editing_type, pileup_line = is_pileup_line_edited(line, editing_min_percent_threshold=editing_min_percent,\n editing_max_percent_threshold=editing_max_percent,\n noise_percent_threshold=noise_percent,\n editing_read_thresh=editing_read_thresh)\n elif editing_max_percent is not None:\n editing_type, pileup_line = is_pileup_line_edited(line, editing_min_percent_threshold=editing_min_percent, editing_max_percent_threshold=editing_max_percent, editing_read_thresh=editing_read_thresh)\n elif noise_percent_threshold is not None:\n editing_type, pileup_line = is_pileup_line_edited(line, editing_min_percent_threshold=editing_min_percent, noise_percent_threshold=noise_percent, editing_read_thresh=editing_read_thresh)\n else:\n editing_type, pileup_line = is_pileup_line_edited(line, editing_min_percent_threshold=editing_min_percent, editing_read_thresh=editing_read_thresh)\n\n if editing_type != 'unchanged':\n out.write(str(line) + \"\\n\")",
"def is_read(self):\n return self.read_at is not None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Function to check a record. Returns True if the record needs to be kept and returns False if record can be skipped.
|
def check_record( record ):
if record.QUAL < args.QUAL:
return( False )
if record.FILTER:
return( False )
return( True )
|
[
"def _check_record_ok(line):\n if 3 != len(line):\n logger.debug(\"Broken record: %s\", line)\n return False\n\n if line[1] is None or len(line[1]) == 0:\n logger.debug(\"Broken url: %s\", line)\n return False\n\n return True",
"def has_record(self) -> bool:\n # since accessing the 'record' property may be expensive, we try\n # to minimize the cost of this function by checking the\n # 'record_pid_id' first, which does not require joins\n if self.record_pid_id is None or self.record_pid is None:\n return False\n elif self.record is None:\n return False\n else:\n return True",
"def should_add_record(record):\n return not apps.get_model(record[\"model\"]).objects.filter(pk=record[\"pk\"]).exists()",
"def valid(records):\r\n return all(record.valid() for record in records)",
"def indel(record):\r\n\tfor i in record.ALT:\r\n\t\tif i:\r\n\t\t\tif len(i) > 1:\r\n\t\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\tif len(record.REF) >1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def isRecordExist(self):\n self.createConn()\n sql = \"SELECT * FROM Story WHERE book1='{b1}' AND book2='{b2}' AND title ='{t}'\".format(b1=self.book1, b2=self.book2, t=self.title)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def shouldFlush(self, record):\n return (len(self.buffer) >= self.capacity) or \\\n (record.levelno >= self.flushLevel) or \\\n (time.time() - self.last_flush > self.flush_period)",
"def account_id_valid(record, domain_obj):\n account_id = record['account_id']\n account_table = domain_obj.retrieve_records('accounts')\n details_in_database = False\n for account in account_table:\n if account['account_id'] == account_id and \\\n account['account_type'] in ['Client', 'Firm']:\n details_in_database = True\n break\n assert details_in_database",
"def isRecordExistSummary(self):\n self.createConn()\n sql = \"SELECT * FROM Summary WHERE book1='{b1}' AND book2='{b2}' \".format(b1=self.book1, b2=self.book2)\n self.c.execute(sql)\n data = self.c.fetchall()\n self.conn.close()\n if len(data) > 0:\n print('Record exist already, skip.')\n return True\n return False",
"def validate_record(data: dict):\n _LOGGER.debug(\"Validating DNS Record for %s\", data['domain_url'])\n new_ip_address = None\n try:\n current_ip_address = socket.gethostbyname(data['domain_url'])\n except socket.gaierror as e:\n _LOGGER.error(\"Error resolving hostname: %s\", e)\n else:\n new_ip_address = check_external_ip(current_ip_address)\n if new_ip_address is not None:\n return change_record(data, new_ip_address, current_ip_address)\n _LOGGER.debug(\"DNS Record is up-to-date\")\n return (True,)",
"def has_stockrecord(self):\n try:\n self.stockrecord\n return True\n except ObjectDoesNotExist:\n return False",
"def checker(self, account_num):\n try:\n self.db.loc[account_num]\n return True\n except KeyError:\n return False",
"def is_this_record_exist(table, id_):\n if id_[0] not in [record[0] for record in table]:\n\n ui.print_error_message(\"Record with this ID not found\")\n return False\n return True",
"def test_record_is_present_in_result(self):\n\n given = {\"A\": None}\n\n expected = False\n actual = self.dns_lookup.is_record_present_in_result(\"A\", given)\n\n self.assertEqual(expected, actual)\n\n actual = self.dns_lookup.is_record_present_in_result([\"A\", \"CNAME\"], given)\n\n self.assertEqual(expected, actual)\n\n expected = True\n actual = self.dns_lookup.is_record_present_in_result(\n \"A\", given, allow_empty=True\n )\n\n actual = self.dns_lookup.is_record_present_in_result(\n [\"A\", \"CNAME\"], given, allow_empty=True\n )\n\n self.assertEqual(expected, actual)\n\n given[\"A\"] = []\n\n actual = self.dns_lookup.is_record_present_in_result(\"A\", given)\n expected = False\n\n self.assertEqual(expected, actual)\n\n expected = True\n actual = self.dns_lookup.is_record_present_in_result(\n \"A\", given, allow_empty=True\n )\n\n self.assertEqual(expected, actual)",
"def validate(self):\n return self._record.validate(self._data)",
"def test_records_are_recorded(self):\n mutation_record = MutationRecord(\n 'foo', 'foo.py', 'operator',\n {'description': 'desc',\n 'line_number': 3},\n None)\n test_result = TestResult(Outcome.KILLED, 'ok')\n\n self._logger.handle_result(mutation_record, test_result)\n self._stop_logger()\n self._file_like.flush()\n self._file_like.seek(0)\n\n self.assertGreater(len(self._file_like.read()), 0)",
"def check(records):\n return [check_record(record) for record in records]",
"def check_file_validation(self):\r\n if self.snap is None:\r\n # file existence\r\n print('file for stock %s at date %s is not existed' % (self.code, self.date))\r\n return False\r\n elif self.snap.iloc[-1]['iTurnover'] == 0:\r\n # stock is traded or not\r\n print('stock %s has no trade record at date %s' % (self.code, self.date))\r\n return False\r\n else:\r\n return True",
"def filter(self, record: logging.LogRecord) -> bool:\n return record.levelno > self.level"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructor of the class. This method initializes the attributes needed to run the Algebraic version of PageRank algorithm. It uses the `tfgraph.TransitionMatrix` as transition matrix.
|
def __init__(self, sess: tf.Session, name: str, graph: Graph,
beta: float, writer: tf.summary.FileWriter = None,
is_sparse: bool = False) -> None:
name = name + "_alg"
T = TransitionMatrix(sess, name, graph)
PageRank.__init__(self, sess, name, beta, T, writer, is_sparse)
|
[
"def __init__(self, alphabet, states, metric_function, reduced=False):\n self.states = states\n self.alphabet = alphabet\n self.previous_states = []\n self.next_states = []\n self.metric_function = metric_function\n self.setup_trellis(reduced)",
"def __init__(self, prior, transition):\n self.prior = prior\n self.transition = transition\n self.new_w_vec = np.array([])\n self.new_theta_vec = np.array([])\n self.new_num = 0",
"def __init__(self, matrix, neighbor_function, weight_function):\n self.lattice = matrix\n self.row_dim = len(self.lattice)\n self.col_dim = len(self.lattice[0])\n self.neighbor_function = neighbor_function\n self.weight_function = weight_function\n self.consistency_check()\n self.build_adjacency_list()",
"def initialize(self):\n # setting the seed\n #pdb.set_trace()\n\n # create tf session\n self.sess = tf.Session()\n\n # tensorboard stuff\n self.add_summary()\n # initiliaze all variables\n init = tf.global_variables_initializer()\n self.sess.run(init)\n\n if self.config.use_baseline:\n self.baseline_network.set_session(self.sess)",
"def __init__(self, total_memory, ua, page_size, ptable, ftable, memmap):\n self.pg_size = page_size\n self.ua = ua\n self.total_memory = total_memory\n self.free_memory = total_memory\n self.used_memory = 0\n # ('L' or 'P', position, quantity)\n self.memmap = memmap\n self.pages_table = ptable\n self.frames_table = ftable\n self.id = 0",
"def __init__(self, nodes, probability):\n self.nodes = nodes\n self.probability = probability\n super().__init__()",
"def __init__(self, imatrix):\n self.imatrix = imatrix\n p, n = imatrix.shape\n num = imatrix.max()\n theta0 = np.random.rand(num)\n self._pid = PIdentity(p, n)\n TemplateStrategy.__init__(self, p+n, n, theta0)\n # should call set_params",
"def init_input(self):\n m1 = np.diagflat([-1] * (self.n - 1), -1)\n m2 = np.diagflat([-1] * (self.n - 1), 1)\n m3 = np.diagflat([self.gamma] * self.n)\n self.A = np.matrix((m1 + m2 + m3).astype(np.double))\n\n self.b = np.matrix(\n np.full((self.n, 1), self.gamma - 2).astype(np.double)\n )\n self.b[0] = self.gamma - 1\n self.b[self.n - 1] = self.gamma - 1\n\n self.x0 = np.matrix(\n np.full((self.n, 1), 0).astype(np.double)\n )",
"def __init__(self):\n\n # Initialize the parent class\n super(LearningSwitch, self).__init__()\n\n # initialize the forwarding table to empty.\n # This may need to be updated if a different topology is used.\n self.fwd_table = {}\n self.fwd_table['1'] = {}\n self.fwd_table['2'] = {}\n self.fwd_table['3'] = {}\n self.fwd_table['4'] = {}\n self.fwd_table['5'] = {}\n\n # only use one flood instance - this is the default policy\n self.flood = flood()\n\n # get the first packet from each new MAC address on a switch\n new_pkts = packets(1, ['srcmac', 'switch'])\n new_pkts.register_callback(self.learn_route)\n self.query = new_pkts\n\n # Initialize the policy\n self.push_rules()",
"def __init__(self, **kwargs):\n super().__init__()\n self.activation=torch.nn.Tanh()\n self.layers=torch.nn.ModuleList()\n\n self.set_hyperparams(**kwargs)\n num_units=self.hypers['num_units']\n num_layers=self.hypers['num_layers']\n in_dim=self.hypers['in_dim']\n out_dim=self.hypers['out_dim']\n self.set_reynolds_stress_fn()\n\n activation=self.hypers['activation']\n if activation == 'swish':\n self.activation=utils.Swish()\n\n # build architecture\n self.layers.append(torch.nn.Linear(in_dim, num_units)) # input layer\n for i in range(num_layers):\n self.layers.append(torch.nn.Linear(num_units, num_units)) # hidden layer\n self.layers.append(torch.nn.Linear(num_units, out_dim)) # output layer",
"def __init__(self, **kwargs):\n if not hasattr(self, \"graph\"):\n self.graph = -np.ones(self.keypoints_shape[0])\n if not hasattr(self, \"swap_index\"):\n self.swap_index = -np.ones(self.keypoints_shape[0])\n return",
"def __init__(self, original_nlp, primals_ordering):\n super(ProjectedNLP, self).__init__(original_nlp)\n self._primals_ordering = list(primals_ordering)\n self._original_idxs = None\n self._projected_idxs = None\n self._generate_maps()\n self._projected_primals = self.init_primals()\n self._jacobian_nz_mask = None\n self._hessian_nz_mask = None\n self._nnz_jacobian = None\n self._nnz_hessian_lag = None",
"def __init__(self, initial_states, rule_code):\n self._cells = [Cell(s) for s in initial_states]\n self.transition_function = self.wolfram_rule_code(rule_code)",
"def __init__(self, DFA):\n self.transtable, self.START_STATE, self.STOP_STATES = DFA",
"def __init__(self):\n self.G = nx.Graph()\n self.node_attr_dfs = dict()\n self.unique_relations = set()\n self.node_types = dict()\n self.normalized_node_id_map = dict()\n self.train_edges = list()\n self.valid_edges = list()\n self.test_edges = list()\n self.relation_to_id = dict()\n self.id_to_relation = dict()\n self.nodeid2rowid = dict()\n self.rowid2nodeid = dict()\n self.rowid2vocabid = dict()",
"def init(self):\n super().init()\n self.relative_poses = []\n self._iter = 0\n\n # Load the parameters of the model from the config\n state_dict = torch.load(self.checkpoint_path)\n self.prediction_module.load_state_dict(state_dict[\"prediction_module\"])",
"def __init__(self, atom: str, alphabet: list, states: list, initial_state: str, final_states: list, transitions: dict):\n self.atom = atom\n self.alphabet = alphabet\n self.states = states\n self.initial_state = initial_state\n self.final_states = final_states\n self.transitions = transitions",
"def __init__(self):\n self.action_space = [(0, 0)] + list(permutations([i for i in range(m)], 2))\n self.action_space = [list(i) for i in self.action_space]\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()",
"def __init__(self, adjacency_matrix=None, distance_matrix=None, input_type='dense', node_labels=None):\n # Type of input and name for nodes\n self.input_type = input_type\n self.node_labels = node_labels\n\n # Graph matrices storing data\n self.adjacency_matrix = adjacency_matrix\n self.distance_matrix = distance_matrix\n\n # Attributes\n self.nnodes = 0\n self.nedges = 0\n self.ncommunities = 0\n self.node_dict = {}\n self.nodeID_list = []\n self.modularity = 0\n self.community_dict = {}\n # Total weight of link in the network\n self.m = 0\n\n # If input is dense\n if self.input_type == 'dense':\n self.build_graph_from_dense(self.adjacency_matrix, self.distance_matrix)\n elif self.input_type == 'sparse_connectivities':\n # Check dim to see if sparse\n if self.adjacency_matrix.shape[0] == self.adjacency_matrix.shape[1]:\n self.build_graph_from_sparse(self.adjacency_matrix)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Admin urls set have to be updated or all new registered models will be shown as disabled in admin area
|
def update_admin_urls():
# Delete the old admin URLs
old_pattern = None
admin_regex = r'^admin/'
project_urls = import_module(settings.ROOT_URLCONF)
for url_item in project_urls.urlpatterns:
try:
if url_item.app_name == 'admin':
old_pattern = url_item
admin_regex = url_item.regex.pattern
project_urls.urlpatterns.remove(url_item)
break
except AttributeError:
# Bypass the non-admin URLconf
logger.error('Error when finding and removing old admin URLconf.')
# Reload updated admin URLs
try:
admin.autodiscover()
project_urls.urlpatterns.append(
url(admin_regex, include(admin.site.urls))
)
except:
logger.error('Error when updating new admin URLconfs.')
if old_pattern:
project_urls.urlpatterns.append(old_pattern)
|
[
"def get_admin_url(self, obj):\n info = (self.opts.app_label, self.opts.model_name)\n return reverse('admin:%s_%s_changelist' % info)",
"def getAdminView(self):\n return include(admin.site.urls)",
"def get_admin_urls_for_registration(self):\n urls = ()\n for instance in self.modeladmin_instances:\n urls += instance.get_admin_urls_for_registration()\n return urls",
"def get_urls(self):\n urls = super(TreeAdmin, self).get_urls()\n new_urls = patterns('',\n url('^move/$',\n self.admin_site.admin_view(self.move_node),),\n )\n return new_urls + urls",
"def get_urls(self):\n\n urls = super().get_urls()\n\n def wrap(view):\n def wrapper(*args, **kwargs):\n return self.admin_site.admin_view(view)(*args, **kwargs)\n return update_wrapper(wrapper, view)\n\n return patterns('',\n url(r'^(.+)/upvote/$', wrap(self.upvote_view), name=\"{}_{}_upvote\".format(self.model._meta.app_label, self.model._meta.model_name)),\n url(r'^(.+)/downvote/$', wrap(self.downvote_view), name=\"{}_{}_downvote\".format(self.model._meta.app_label, self.model._meta.model_name)),\n ) + urls",
"def getAdminUrl(self):\n if hasattr(settings,\"ADMIN_URL\"):\n return settings.ADMIN_URL",
"def get_urls(self):\n urls = super(EventAdmin, self).get_urls()\n extra_urls = patterns(\"\",\n url(\"^(?P<event_id>\\d+)/codes/$\",\n self.admin_site.admin_view(self.create_codes),\n name=\"create_codes\"),\n )\n return extra_urls + urls",
"def admin_change_url(obj) -> str:\n app_label = obj._meta.app_label\n model_name = obj._meta.model.__name__.lower()\n return reverse(f'admin:{app_label}_{model_name}_change', args=(obj.pk,))",
"def handle_admin_classes(self, cms_config):\n replace_admin_for_models(\n [versionable.content_model for versionable in cms_config.versioning],\n )",
"def get_admin_url(self):\n\n content_type = ContentType.objects.get_for_model(self)\n edit_url = urlresolvers.reverse(\n \"admin:fs_doc_\" + content_type.model + \"_change\",\n args=(self.id,))\n return edit_url",
"def _get_urls(func):\n def inner(self, *args, **kwargs):\n url_list = func(self, *args, **kwargs)\n info = self.model._meta.app_label, self.model._meta.model_name\n url_list.insert(0, url(\n r'^(.+)/unlock/$',\n self.admin_site.admin_view(self._unlock_view),\n name='{}_{}_unlock'.format(*info),\n ))\n return url_list\n return inner",
"def admin_import(self, request, queryset):\n pass",
"def get_urls(self):\n urls = super(ExportableAdmin, self).get_urls()\n app, mod = self.model._meta.app_label, self.model._meta.module_name\n # make a URL pattern for each export format\n new_urls = [\n url(\n r'^export/%s$' % format_name.lower(),\n self.admin_site.admin_view(self.changelist_view),\n name=\"%s_%s_export_%s\" % (app, mod, format_name.lower()),\n kwargs={'extra_context':{'export_delimiter':delimiter}},\n )\n for format_name, delimiter in self.export_formats\n ]\n for export_type in self.export_types:\n new_urls.append(url(\n r'^export/%s$' % export_type.lower(),\n self.admin_site.admin_view(self.changelist_view),\n name=\"%s_%s_export_%s\" % (app, mod, export_type.lower()),\n kwargs={'extra_context':{'export_type':export_type.lower()}},\n ))\n my_urls = patterns('', *new_urls)\n return my_urls + urls",
"def church_admin(self):",
"def admin(request):\n context_dict = {}\n context_dict['modules'] = []\n modules = Module.objects.all()\n\n for m in modules:\n context_dict['modules'].append(m)\n\n return render(request, 'lfs/modify/admin.html', context_dict)",
"def test_otoroshi_controllers_adminapi_users_controller_update_admin(self):\n pass",
"def urls(cls):\n cradmin_instance_registry.add(cls)\n urls = cls._get_app_urls()\n if not cls.__no_role_and_flatten_rolefrontpage_url():\n urls.append(re_path('^$', cls.get_instance_frontpage_view(),\n name='{}-frontpage'.format(cls.id)))\n\n return urls",
"def admin_view(self, *args, **kwargs):\n wrapped = super(AdminSite, self).admin_view(*args, **kwargs)\n return xact.xact(wrapped)",
"def test_admin_urls_are_configured():\n assert reverse(\"admin:index\") == \"/admin/\", \"admin urls are not configured\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test case for humangenes_get
|
def test_humangenes_get(self):
pass
|
[
"def test_humangenes_id_get(self):\n pass",
"def test_get_all_histories_using_get(self):\n pass",
"def test_uniformity_values(self, known_HU_dict):\n self.cbct.analyze()\n for key, roi in self.cbct.UN.ROIs.items():\n exp_val = known_HU_dict[key]\n meas_val = roi.pixel_value\n self.assertAlmostEqual(exp_val, meas_val, delta=5)",
"def test_musicals_get(self):\n pass",
"def test_mousegenes_get(self):\n pass",
"def test_heavyhitters_init_ce(self):\n hh1 = HeavyHitters(num_hitters=1000, confidence=0.96875, error_rate=0.002)\n self.assertEqual(hh1.width, 1000)\n self.assertEqual(hh1.depth, 5)\n self.assertEqual(hh1.confidence, 0.96875)\n self.assertEqual(hh1.error_rate, 0.002)\n self.assertEqual(hh1.elements_added, 0)\n self.assertEqual(hh1.heavy_hitters, dict())\n self.assertEqual(hh1.number_heavy_hitters, 1000)",
"def hetero_euchro_measures(regionmask: np.ndarray, intensity: np.ndarray, alpha: float = 1.0):\n high, low = np.percentile(intensity[regionmask], q=(80, 20))\n hc = np.mean(intensity[regionmask]) + (alpha * np.std(intensity[regionmask]))\n\n feat = {\n \"i80_i20\": high / low,\n \"nhigh_nlow\": np.sum(intensity[regionmask] >= high)/ np.sum(intensity[regionmask] <= low),\n \"hc_area_ec_area\": np.sum(intensity[regionmask] >= hc) / np.sum(intensity[regionmask] < hc),\n \"hc_area_nuc_area\": np.sum(intensity[regionmask] >= hc) / np.sum(intensity[regionmask] > 0),\n \"hc_content_ec_content\": np.sum(np.where(intensity[regionmask] >= hc, intensity[regionmask], 0))\n / np.sum(np.where(intensity[regionmask] < hc, intensity[regionmask], 0)),\n \"hc_content_dna_content\": np.sum(np.where(intensity[regionmask] >= hc, intensity[regionmask], 0))\n / np.sum(np.where(intensity[regionmask] > 0, intensity[regionmask], 0))\n\n }\n return feat",
"def observed_stat(heroes):\n\n return ...",
"def test_greenalgas_get(self):\n pass",
"def test_user_get_heatmap_data(self):\n pass",
"def test_hp(pakuri: Pakuri):\n EXPECTED = get_hp(name=pakuri.name, species=pakuri.species, level=pakuri.level)\n print(pakuri.hp)\n print(EXPECTED)\n assert pakuri.hp == EXPECTED",
"def test_device_readings_get_humidity(self):\n\n #If we make a valid request\n request = self.client().get('/devices/{}/readings/'.format(self.device_uuid), data=\n json.dumps({\n 'type': 'humidity',\n }))\n\n #Then we should receive a 200\n self.assertEqual(request.status_code, 200)\n\n #And receive 2 values\n self.assertEqual(len(json.loads(request.data)), 2)",
"def test_measure_intensity(self):\n result = {}\n result[\"result_bit\"] = True\n result[\"result_details\"] = []\n\n current_phase = self.bl_hwobj.diffractometer_hwobj.current_phase\n\n # 1. close guillotine and fast shutter -------------------------------\n self.bl_hwobj.collect_hwobj.close_guillotine(wait=True)\n self.bl_hwobj.fast_shutter_hwobj.closeShutter(wait=True)\n gevent.sleep(0.1)\n\n #2. move back light in, check beamstop position ----------------------\n self.bl_hwobj.back_light_hwobj.move_in()\n\n beamstop_position = self.bl_hwobj.beamstop_hwobj.get_position()\n if beamstop_position == \"BEAM\":\n self.bl_hwobj.beamstop_hwobj.set_position(\"OFF\")\n self.bl_hwobj.diffractometer_hwobj.wait_device_ready(30)\n\n #3. check scintillator position --------------------------------------\n scintillator_position = self.bl_hwobj.\\\n diffractometer_hwobj.get_scintillator_position()\n if scintillator_position == \"SCINTILLATOR\":\n #TODO add state change when scintillator position changed\n self.bl_hwobj.diffractometer_hwobj.\\\n set_scintillator_position(\"PHOTODIODE\")\n gevent.sleep(1)\n self.bl_hwobj.diffractometer_hwobj.\\\n wait_device_ready(30)\n\n #5. open the fast shutter --------------------------------------------\n self.bl_hwobj.fast_shutter_hwobj.openShutter(wait=True)\n gevent.sleep(0.3)\n\n #6. measure mean intensity\n self.ampl_chan_index = 0\n\n if True:\n intens_value = self.chan_intens_mean.getValue()\n intens_range_now = self.chan_intens_range.getValue()\n for intens_range in self.intensity_ranges:\n if intens_range['index'] is intens_range_now:\n self.intensity_value = intens_value[self.ampl_chan_index] - \\\n intens_range['offset']\n break\n\n #7. close the fast shutter -------------------------------------------\n self.bl_hwobj.fast_shutter_hwobj.closeShutter(wait=True)\n\n # 7/7 set back original phase ----------------------------------------\n self.bl_hwobj.diffractometer_hwobj.set_phase(current_phase)\n\n #8. Calculate --------------------------------------------------------\n energy = self.bl_hwobj._get_energy()\n detector_distance = self.bl_hwobj.detector_hwobj.get_distance()\n beam_size = self.bl_hwobj.collect_hwobj.get_beam_size()\n transmission = self.bl_hwobj.transmission_hwobj.getAttFactor()\n\n result[\"result_details\"].append(\"Energy: %.4f keV<br>\" % energy)\n result[\"result_details\"].append(\"Detector distance: %.2f mm<br>\" % \\\n detector_distance)\n result[\"result_details\"].append(\"Beam size %.2f x %.2f mm<br>\" % \\\n (beam_size[0], beam_size[1]))\n result[\"result_details\"].append(\"Transmission %.2f%%<br><br>\" % \\\n transmission)\n\n meas_item = [datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n \"%.4f\" % energy,\n \"%.2f\" % detector_distance,\n \"%.2f x %.2f\" % (beam_size[0], beam_size[1]),\n \"%.2f\" % transmission]\n\n air_trsm = numpy.exp(-self.air_absorption_coeff_per_meter(energy) * \\\n detector_distance / 1000.0)\n carb_trsm = self.carbon_window_transmission(energy)\n flux = 0.624151 * 1e16 * self.intensity_value / \\\n self.diode_calibration_amp_per_watt(energy) / \\\n energy / air_trsm / carb_trsm\n\n #GB correcting diode misscalibration!!!\n flux = flux * 1.8\n\n dose_rate = 1e-3 * 1e-14 * self.dose_rate_per_10to14_ph_per_mmsq(energy) * \\\n flux / beam_size[0] / beam_size[1]\n\n self.bl_hwobj.collect_hwobj.machine_info_hwobj.\\\n set_flux(flux, self.bl_hwobj.beam_info_hwobj.get_beam_info())\n\n msg = \"Intensity = %1.1e A\" % self.intensity_value\n result[\"result_details\"].append(msg + \"<br>\")\n logging.getLogger(\"user_level_log\").info(msg)\n result[\"result_short\"] = msg\n meas_item.append(\"%1.1e\" % self.intensity_value)\n\n msg = \"Flux = %1.1e photon/s\" % flux\n result[\"result_details\"].append(msg + \"<br>\")\n logging.getLogger(\"user_level_log\").info(msg)\n result[\"result_short\"] = msg\n meas_item.append(\"%1.1e\" % flux)\n\n msg = \"Dose rate = %1.1e KGy/s\" % dose_rate\n result[\"result_details\"].append(msg + \"<br>\")\n logging.getLogger(\"user_level_log\").info(msg)\n meas_item.append(\"%1.1e\" % dose_rate)\n\n msg = \"Time to reach 20 MGy = %d s = %d frames \" % \\\n (20000. / dose_rate, int(25 * 20000. / dose_rate))\n result[\"result_details\"].append(msg + \"<br><br>\")\n logging.getLogger(\"user_level_log\").info(msg)\n meas_item.append(\"%d, %d frames\" % \\\n (20000. / dose_rate, int(25 * 20000. / dose_rate)))\n\n self.intensity_measurements.insert(0, meas_item)\n result[\"result_details\"].extend(SimpleHTML.create_table(\\\n [\"Time\", \"Energy (keV)\", \"Detector distance (mm)\",\n \"Beam size (mm)\", \"Transmission (%%)\", \"Intensity (A)\",\n \"Flux (photons/s)\", \"Dose rate (KGy/s)\",\n \"Time to reach 20 MGy (sec, frames)\"],\n self.intensity_measurements))\n\n self.ready_event.set()\n\n return result",
"def get_anomaly_detection():",
"def test_inst_hr():\n from inst_hr import inst_hr\n peaks = test_find_max_peaks()\n\n hr = inst_hr(peaks, update_time=3) # in bpm\n assert hr == [100]",
"def test_get_recovery(self):\n u = np.random.rand(12, 17, 73, 144)\n up, uinfo = prep_data(u, 'tzyx')\n ur1 = recover_data(up, uinfo)\n recover = get_recovery(uinfo)\n ur2 = recover(up)\n err = error(ur1, ur2)\n assert_almost_equal(err, 0.)",
"def test_hist_val_check():\n hist = histogram(\"macuziywlbapodgevujnskptruz\")\n assert hist_val_check(hist,1,3) == 1",
"def test_heavyhitters_init_wd(self):\n hh1 = HeavyHitters(num_hitters=1000, width=1000, depth=5)\n self.assertEqual(hh1.width, 1000)\n self.assertEqual(hh1.depth, 5)\n self.assertEqual(hh1.confidence, 0.96875)\n self.assertEqual(hh1.error_rate, 0.002)\n self.assertEqual(hh1.elements_added, 0)\n self.assertEqual(hh1.heavy_hitters, dict())\n self.assertEqual(hh1.number_heavy_hitters, 1000)",
"def get_temp_hum_status(self):\n\n readings = []\n self.dht11.measure()\n time.sleep(0.15)\n readings.append(self.dht11.temperature())\n time.sleep(0.01)\n readings.append(self.dht11.humidity())\n gc.collect()\n return readings"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test case for humangenes_id_get
|
def test_humangenes_id_get(self):
pass
|
[
"def test_greenalgas_id_get(self):\n pass",
"def test_musicals_id_get(self):\n pass",
"def test_mousegenes_id_get(self):\n pass",
"def test_vicars_id_get(self):\n pass",
"def test_v1_supervision_identities_id_get(self):\n pass",
"def test_murderers_id_get(self):\n pass",
"def test_wineregions_id_get(self):\n pass",
"def test_chores_chore_id_get(self):\n pass",
"def test_cyclingcompetitions_id_get(self):\n pass",
"def test_administrativeregions_id_get(self):\n pass",
"def test_cultivatedvarietys_id_get(self):\n pass",
"def test_bridges_id_get(self):\n pass",
"def test_penaltyshootouts_id_get(self):\n pass",
"def test_windmills_id_get(self):\n pass",
"def test_bullfighters_id_get(self):\n pass",
"def test_gardens_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/gardens/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_humangenes_get(self):\n pass",
"def test_ethnicgroups_id_get(self):\n pass",
"def test_germansettlements_id_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get XML File from ZIP.
|
def getXmlFileFromZip(file_path: str, zipfiile_path: str):
zipfile = zf.ZipFile(zipfiile_path)
file_string = zipfile.read(file_path)
file_xml = lxml.etree.fromstring(file_string)
return file_xml
|
[
"def extract_from_zip(self, url):\n try:\n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n for name in z.namelist():\n log.info(f\"XML extracted from zip file is {name}\")\n self.extracted_xml_name = name\n ex_file = z.open(name) # this is a file like object\n content = ex_file.read()\n return etree.fromstring(content)\n except Exception:\n log.error(tb.format_exc())\n raise ZipFileError",
"def read_contents(self, filename):\n self.zipfile = zipfile.ZipFile(filename)\n xml_content = self.zipfile.read('word/document.xml')\n return etree.fromstring(xml_content)",
"def zipfile(self):\n ...",
"def unzip_document(self, filename):\n mode = \"r\"\n\n tmp_dir = self.get_tmp_dir()\n\n if tmp_dir:\n full_filename = tmp_dir + os.sep + filename\n else:\n full_filename = filename\n\n z = zipfile.ZipFile(full_filename)\n\n new_filename = None\n new_document = None\n\n for f in z.namelist():\n z.extract(f, tmp_dir)\n new_filename = f\n\n # Handle single or multiple files as zip contents\n if len(z.namelist()) == 1:\n # A single file inside\n new_document = new_filename\n elif len(z.namelist()) > 1:\n # Multiple files inside\n if new_document is None:\n new_document = []\n new_document.append(new_filename)\n\n z.close()\n\n return new_document",
"def get_xml_from_link(xml_link):\n first_xml = urlopen(xml_link)\n # Unzip XML file\n zipfile = ZipFile(BytesIO(first_xml.read()))\n # Get XML contents\n xml_string = zipfile.read(zipfile.namelist()[len(zipfile.namelist())-1]).decode(\"utf_8\")\n return xml_string",
"def getFileFromZip(file_path: str, zipfile_path: str) -> bytes:\n zipfile = zf.ZipFile(zipfile_path)\n file = zipfile.read(file_path)\n return file",
"def fetch_zip( filename, local_dir='.'):\n\n url = posixpath.join( URL_FILE_STORE, filename)\n path = os.path.join( local_dir, filename)\n if not os.path.exists( local_dir):\n os.makedirs( local_dir)\n\n if not os.path.exists(path):\n urllib.urlretrieve(url, path)\n\n Zip = zipfile.ZipFile(path)\n Zip.extractall(local_dir)\n return",
"def extractMetadata(filename):\n zf = ZipFile(filename)\n metadataFile = filter(lambda x: x.endswith('metadata.txt'), zf.namelist())[0]\n metadata = zf.open(metadataFile)\n\n config = ConfigParser.ConfigParser()\n config.readfp(metadata)\n\n root = etree.Element('pyqgis_plugin',\n version = config.get('general', 'version'),\n name = config.get('general', 'name'))\n\n \n values = [ ('description', 'description'),\n ('version', 'version'),\n ('qgisMinimumVersion', 'qgis_minimum_version'),\n ('qgisMaximumVersion', 'qgis_maximum_version'),\n ('author', 'author_name'),\n ('homepage', 'homepage')]\n\n for (mtd, xml) in values:\n attribute = etree.SubElement(root, xml)\n if config.has_option('general', mtd):\n attribute.text = config.get('general', mtd).decode('utf-8')\n\n download = etree.SubElement(root, 'download_url')\n download.text = os.path.join(repoURL, 'plugins', os.path.basename(filename))\n \n md5_sum = etree.SubElement(root, 'md5_sum')\n md5_sum.text = md5(filename)\n\n file_name = etree.SubElement(root, 'file_name')\n file_name.text = os.path.basename(filename)\n\n return root",
"def read_xml_file(self):\r\n\r\n #Find the root of xml tree.\r\n xml_tree = ET.parse(self.xml_file_path + \"pic{}.xml\".format(self.file_index))\r\n root = xml_tree.getroot()\r\n\r\n return root",
"def open_xml_file(\n self, filename_xml: str\n ) -> Union[gzip.GzipFile, BinaryIO]:\n\n msg_fmt = \"Opening XML file '{0}'\".format(filename_xml)\n self.logger.info(msg=msg_fmt)\n\n if filename_xml.endswith(\".gz\"):\n file_xml = gzip.GzipFile(filename=filename_xml, mode=\"rb\")\n else:\n file_xml = open(filename_xml, \"rb\")\n\n return file_xml",
"def get_tree_from_folder(folder_path):\n with zipfile.ZipFile(folder_path, \"r\") as zip_file:\n #access zipped file\n geogebra_xml = zip_file.extract(\"geogebra.xml\")\n return ET.parse(geogebra_xml)",
"def getArchive( self ):\n self._archive.close()\n return self._archive_stream.getvalue()",
"def process_demo_package(self):\n # TODO: Move to zip file field?\n\n # Derive a directory name from the zip filename, clean up any existing\n # directory before unpacking.\n new_root_dir = self.demo_package.path.replace('.zip','')\n if isdir(new_root_dir):\n rmtree(new_root_dir)\n\n # Load up the zip file and extract the valid entries\n zf = zipfile.ZipFile(self.demo_package.file)\n valid_entries = Submission.get_valid_demo_zipfile_entries(zf) \n\n for zi in valid_entries:\n if type(zi.filename) is unicode:\n zi_filename = zi.filename\n else:\n zi_filename = zi.filename.decode('utf-8', 'ignore')\n\n # HACK: Normalize demo.html to index.html\n if zi_filename == u'demo.html':\n zi_filename = u'index.html'\n\n # Relocate all files from detected root dir to a directory named\n # for the zip file in storage\n out_fn = u'%s/%s' % (new_root_dir, zi_filename)\n out_dir = dirname(out_fn)\n\n # Create parent directories where necessary.\n if not isdir(out_dir):\n makedirs(out_dir.encode('utf-8'), 0775)\n\n # Extract the file from the zip into the desired location.\n fout = open(out_fn.encode('utf-8'), 'wb')\n copyfileobj(zf.open(zi), fout)",
"def get_zip_file(self, link_of_anchor_tag):\r\n\r\n # Initiating Downloading of zip file\r\n try:\r\n zip_file_data = url2.urlopen(link_of_anchor_tag)\r\n zip_file = zip_file_data.read()\r\n\r\n # setting zip file name and writing the zip file\r\n zip_name = zip_file_data.url.split('/')[-1]\r\n print('attempting to download file %s' % zip_name)\r\n\r\n with open(zip_name, 'wb') as data:\r\n data.write(zip_file)\r\n print('File %s Download complete' % zip_name)\r\n except Exception as e:\r\n zip_name = ''\r\n print(e)\r\n return self.extract_csv(zip_name)",
"def test_resulting_file_is_zipfile(self):\n with storelet.ZipBackup(\"test\") as b:\n b.include_directory(self.get_data(\"simple\"))\n zipfile.is_zipfile(b._path)",
"def download_extract_zip(url, fname):\n response = requests.get(url)\n with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:\n return thezip.open(fname)",
"def toZip(directory, zipFile):\n\n pass",
"def unzip_and_find_data(self, in_zipfile):\n self.unzip(in_zipfile, self.download_workspace)\n\n shp_list = [x for x in os.listdir(self.download_workspace) if os.path.splitext(x)[1] == '.shp']\n tif_list = [x for x in os.listdir(self.download_workspace) if os.path.splitext(x)[1] == '.tif']\n\n if len(shp_list) == 1:\n source_file = os.path.join(self.download_workspace, shp_list[0])\n\n elif len(tif_list) == 1:\n source_file = os.path.join(self.download_workspace, tif_list[0])\n\n else:\n logging.error('Unknown output from zip file, {0} shps, {1} tifs.\\nMay need to define a custom function to '\n 'unpack this data source. Exiting now.'.format(len(shp_list), len(tif_list)))\n sys.exit(1)\n\n return source_file",
"def open_xml(self, file_name):\r\n tree = ET.parse(file_name)\r\n root = tree.getroot()\r\n return root"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get File From ZIP.
|
def getFileFromZip(file_path: str, zipfile_path: str) -> bytes:
zipfile = zf.ZipFile(zipfile_path)
file = zipfile.read(file_path)
return file
|
[
"def fetch_zip( filename, local_dir='.'):\n\n url = posixpath.join( URL_FILE_STORE, filename)\n path = os.path.join( local_dir, filename)\n if not os.path.exists( local_dir):\n os.makedirs( local_dir)\n\n if not os.path.exists(path):\n urllib.urlretrieve(url, path)\n\n Zip = zipfile.ZipFile(path)\n Zip.extractall(local_dir)\n return",
"def getXmlFileFromZip(file_path: str, zipfiile_path: str):\n zipfile = zf.ZipFile(zipfiile_path)\n file_string = zipfile.read(file_path)\n file_xml = lxml.etree.fromstring(file_string)\n return file_xml",
"def zipfile(self):\n ...",
"def get_buff(self, name):\n try:\n return self.zip_buff.read(name)\n except KeyError:\n raise FileNotFound(name)",
"def read_zip_file(file_url, fdate, date):\n\n filename = os.path.join(ZIP_DIR, \"EQ\" + str(fdate) + \".zip\")\n try:\n f = urllib.request.urlretrieve(file_url, filename)\n except urllib.error.HTTPError:\n raise Exception(\"No data exists for Date : {}\".format(\n str(datetime.strftime(date, \"%d-%m-%Y\"))\n ))\n file = ZipFile(filename, \"r\")\n file.extractall(CSV_DIR)\n file.close()\n print(\"Zip file extracted successfully.\")\n return CSV_DIR + \"/EQ\" + fdate + \".CSV\"",
"def get_zip_file_name(self):\n return self.zip_file_name",
"def _get_file(self, name):\n return self.dir.get_file(name)",
"def extract_from_zip(self, url):\n try:\n r = requests.get(url)\n z = zipfile.ZipFile(io.BytesIO(r.content))\n for name in z.namelist():\n log.info(f\"XML extracted from zip file is {name}\")\n self.extracted_xml_name = name\n ex_file = z.open(name) # this is a file like object\n content = ex_file.read()\n return etree.fromstring(content)\n except Exception:\n log.error(tb.format_exc())\n raise ZipFileError",
"def download_extract_zip(url, fname):\n response = requests.get(url)\n with zipfile.ZipFile(io.BytesIO(response.content)) as thezip:\n return thezip.open(fname)",
"def get_zip_file(self, link_of_anchor_tag):\r\n\r\n # Initiating Downloading of zip file\r\n try:\r\n zip_file_data = url2.urlopen(link_of_anchor_tag)\r\n zip_file = zip_file_data.read()\r\n\r\n # setting zip file name and writing the zip file\r\n zip_name = zip_file_data.url.split('/')[-1]\r\n print('attempting to download file %s' % zip_name)\r\n\r\n with open(zip_name, 'wb') as data:\r\n data.write(zip_file)\r\n print('File %s Download complete' % zip_name)\r\n except Exception as e:\r\n zip_name = ''\r\n print(e)\r\n return self.extract_csv(zip_name)",
"def extract_zipped_index_file(src_file):\n dst_file, ext = splitext(src_file)\n if ext == \".zip\":\n logger.debug(\"Extracting zip-file %s ...\", src_file)\n # NOTE: Avoid possible filesystem-to-filesystem copy.\n # ans store the file directly in the destination folder.\n tmp_file = dst_file + \".tmp\"\n timer = Timer()\n try:\n with closing(zipfile.ZipFile(src_file)) as archive:\n fin = archive.open(basename(dst_file))\n with open(tmp_file, \"wb\") as fout:\n shutil.copyfileobj(fin, fout, CHUNK_SIZE)\n size = fout.tell()\n os.rename(tmp_file, dst_file)\n except Exception as exc:\n logger.error(\"Extraction of %s failed: %s\", src_file, exc)\n raise\n else:\n os.remove(src_file)\n finally:\n if isfile(tmp_file):\n os.remove(tmp_file)\n logger.info(\n \"'%s' -> '%s' %dB %.3fs\", src_file, dst_file, size, timer.stop()\n )\n return dst_file\n else:\n return src_file",
"def unzip_and_find_data(self, in_zipfile):\n self.unzip(in_zipfile, self.download_workspace)\n\n shp_list = [x for x in os.listdir(self.download_workspace) if os.path.splitext(x)[1] == '.shp']\n tif_list = [x for x in os.listdir(self.download_workspace) if os.path.splitext(x)[1] == '.tif']\n\n if len(shp_list) == 1:\n source_file = os.path.join(self.download_workspace, shp_list[0])\n\n elif len(tif_list) == 1:\n source_file = os.path.join(self.download_workspace, tif_list[0])\n\n else:\n logging.error('Unknown output from zip file, {0} shps, {1} tifs.\\nMay need to define a custom function to '\n 'unpack this data source. Exiting now.'.format(len(shp_list), len(tif_list)))\n sys.exit(1)\n\n return source_file",
"def load_file_in_same_dir(ref_file, filename):\n path = split_path(ref_file)[:-1] + [filename]\n\n for i, p in enumerate(path):\n if p.endswith('.zip'):\n zfilename = os.path.join(*path[:i + 1])\n zfile = zipfile.ZipFile(zfilename)\n return u(zfile.read('/'.join(path[i + 1:])))\n\n return u(io.open(os.path.join(*path), encoding='utf-8').read())",
"def get_zip(url):\n logger.debug('Downloading %s' % url)\n r = yield from aiohttp.request('get', url)\n if r.status == 200:\n zip_bytes = yield from r.read_and_close()\n logger.debug('%s downloaded' % url)\n return zip_bytes\n logger.error('Fetching url %s failed beacuse %s'\n % (url, r.reason))\n return None",
"def __check_zip_for_single_sub(self, zip_file, rename_subtitle):\n # opens the zip file\n res = zip_file\n \"\"\"\n TODO: implement\n try:\n zf = zipfile.ZipFile(zip_file)\n self.__debug(\"ZIP file found.\")\n for member in zf.infolist():\n print member\n zf.close()\n except zipfile.BadZipfile:\n # rar file found\n self.__debug(\"Invalid zip file found\")\n \"\"\"\n return res",
"def unzip_document(self, filename):\n mode = \"r\"\n\n tmp_dir = self.get_tmp_dir()\n\n if tmp_dir:\n full_filename = tmp_dir + os.sep + filename\n else:\n full_filename = filename\n\n z = zipfile.ZipFile(full_filename)\n\n new_filename = None\n new_document = None\n\n for f in z.namelist():\n z.extract(f, tmp_dir)\n new_filename = f\n\n # Handle single or multiple files as zip contents\n if len(z.namelist()) == 1:\n # A single file inside\n new_document = new_filename\n elif len(z.namelist()) > 1:\n # Multiple files inside\n if new_document is None:\n new_document = []\n new_document.append(new_filename)\n\n z.close()\n\n return new_document",
"def get_file(self, commit, path, tag=None, URL=None):\n res = self._req(\n Service.PFS,\n \"GetFile\",\n file=pfs_proto.File(commit=commit_from(commit), path=path, tag=tag),\n URL=URL,\n )\n return PFSFile(io.BytesIO(next(res).value))",
"def unzip_file(self, src_zip, destination):\n zf = zipfile.ZipFile(src_zip)\n shp_file_name = None\n for name in zf.namelist():\n if os.path.splitext(name)[1] == \".shp\":\n shp_file_name = name\n outfile = open(os.path.join(destination, name), 'wb')\n outfile.write(zf.read(name))\n outfile.close()\n\n return shp_file_name",
"def get_link(self):\r\n\r\n header = {'user-agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\r\n 'Chrome/63.0.3239.108 Safari/537.36'}\r\n req = url2.Request(self.page_url, headers=header)\r\n resp = url2.urlopen(req)\r\n resp_data = resp.read()\r\n parsed_html = BeautifulSoup(resp_data, 'html.parser')\r\n zip_file_link = parsed_html.find('a', attrs={'id': 'btnhylZip'})['href'] # link of the zip file\r\n\r\n return self.get_zip_file(zip_file_link)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves the directory string from a path string.
|
def getDirectoryFromPath(path: str) -> str:
path_temp = path.rpartition("/")
new_path = path_temp[0] + path_temp[1]
return new_path
|
[
"def parse_directory(path):\n p = pathlib.Path(path)\n\n if p.parts[0] == \"gs:\":\n return str(pathlib.Path(* p.parts[2:-1], p.stem)), str(p.parts[1])\n else:\n return str(p), None",
"def first_dir(path_string):\n parts = path_string.split(os.path.sep)\n return parts[0]",
"def _dir_from_url(value):\n\n mediaurl_re = re.compile(r'^(%s)' % (MEDIA_URL))\n value = mediaurl_re.sub('', value)\n directory_re = re.compile(r'^(%s)' % (DIRECTORY))\n value = directory_re.sub('', value)\n return os.path.split(value)[0]",
"def _get_directory(group: ParseResult) -> str:\n files = group[0]\n if len(files) == 1:\n return os.path.dirname(files[0])\n else:\n return os.path.commonpath(files)",
"def get_dir_without_last_slash(path):\n return \"/\".join(path.split(\"/\")[:-1])",
"def dir_name(path):\n return os.path.dirname(path)",
"def getDirName(self, path, lastOnly=False, levelsUp=None):\n # self.log('Get directory name of path: %s' % path,9)\n if path is None:\n raise TypeError('Path is not passed in system.fs.getDirName')\n dname = os.path.dirname(path)\n dname = dname.replace(\"/\", os.sep)\n dname = dname.replace(\"//\", os.sep)\n dname = dname.replace(\"\\\\\", os.sep)\n if lastOnly:\n dname = dname.split(os.sep)[-1]\n return dname\n if levelsUp is not None:\n parts = dname.split(os.sep)\n if len(parts) - levelsUp > 0:\n return parts[len(parts) - levelsUp - 1]\n else:\n raise RuntimeError(\"Cannot find part of dir %s levels up, path %s is not long enough\" %\n (levelsUp, path))\n return dname + os.sep",
"def get_path(path_string):\n err = 1\n p = '.'\n f = ''\n split_head_tail = os.path.split(path_string)\n if split_head_tail != ('', ''):\n err = 0\n p = split_head_tail[0]\n f = split_head_tail[1]\n return err, p, f",
"def path_to_sub_directory(path, name):\n result = os.path.join(path, name)\n if os.path.isdir(result):\n return result",
"def getDirName(self, path,lastOnly=False,levelsUp=None):\n pylabs.q.logger.log('Get directory name of path: %s' % path,9)\n if path is None:\n raise TypeError('Path is not passed in system.fs.getDirName')\n #try:\n dname=os.path.dirname(path)\n dname=dname.replace(\"/\",os.sep)\n dname=dname.replace(\"//\",os.sep)\n dname=dname.replace(\"\\\\\",os.sep)\n if lastOnly:\n dname=dname.split(os.sep)[-1]\n return dname\n if levelsUp<>None:\n parts=dname.split(os.sep)\n if len(parts)-levelsUp>0:\n return parts[len(parts)-levelsUp-1]\n else:\n raise RuntimeError (\"Cannot find part of dir %s levels up, path %s is not long enough\" % (levelsUp,path))\n return dname+os.sep\n #except:\n #raise RuntimeError('Failed to get directory name of the given path: %s'% path)",
"def _get_directory(self, identifier: str) -> typing.Optional[str]:\n if not re.match(\"[_,\\w]+\", identifier):\n return None\n directory = identifier[1:3]\n return os.path.join(self.root, directory, identifier)",
"def get_directory(path_in_folder):\n current_directory = os.getcwd()\n\n parent_directory = os.path.split(os.path.split(current_directory)[0])[0] + \"\\\\\"\n file_path = os.path.join(parent_directory,path_in_folder)\n return file_path",
"def path2ParentDirectory(cls, path):\n return '/'.join(path.split('/')[:-1])",
"def get_path(root, path):\n\n return join(dirname(root), path)",
"def get_directory_for_filename(filename):\n return os.path.dirname(os.path.abspath(filename))",
"def convert_path (pathname):\r\n if os.sep == '/':\r\n return pathname\r\n if not pathname:\r\n return pathname\r\n if pathname[0] == '/':\r\n raise ValueError, \"path '%s' cannot be absolute\" % pathname\r\n if pathname[-1] == '/':\r\n raise ValueError, \"path '%s' cannot end with '/'\" % pathname\r\n\r\n paths = string.split(pathname, '/')\r\n while '.' in paths:\r\n paths.remove('.')\r\n if not paths:\r\n return os.curdir\r\n return apply(os.path.join, paths)",
"def folder_name(path):\r\n # Assume that there IS a valid path in the argument.\r\n all_folders = path.split('\\\\')\r\n i = len(all_folders) - 1\r\n if all_folders[i] == '':\r\n return all_folders[i-1]\r\n else:\r\n return all_folders[i]",
"def get_the_only_directory_under(dirpath):\n dirs = [name for name in os.listdir(dirpath) if isdir(join(dirpath, name))]\n if len(dirs) != 1:\n raise ValueError(\"In 'get_the_only_directory_under' call, \"\n \"found more than 1 directory under: %s\" % dirpath)\n return dirs[0]",
"def find_dir(path, curr_path):\n # type: (str, Optional[Union[str, Path]]) -> Optional[Path]\n path = Path(path)\n if path.is_absolute():\n return path\n\n curr_path = Path(curr_path).parent if curr_path else Path('.')\n\n ret = curr_path / path\n return ret if ret.is_dir() else None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
replaces the number in the path with the given number.
|
def changeFileNoInFilePath(path: str, fileNo: int) -> str:
separator = r"[0-9]+\."
splitted_path = re.split(separator, path, 1)
new_path = splitted_path[0] + str(fileNo) + "." + splitted_path[1]
return new_path
|
[
"def pat_number(self, number):\n self._pat_number(number)",
"def parse_num(path):\n bn = path.basename\n if bn.startswith(prefix):\n try:\n return int(bn[len(prefix):])\n except ValueError:\n pass",
"def replace_digits_with_paths(self, user_input):\n verbose = False\n verbose and print(user_input)\n _user_input = user_input[:]\n for before, num, after in re.findall(r'(\\/|\\b)([\\d.]+)(\\/|\\b)', _user_input):\n verbose and print(\"before: {}, num: {}, after: {}\".format(before, num, after))\n if not before and not after and num.isdigit():\n # If found a clean match replace with full path name of file\n try:\n file_path = self.list_current_directory().index(int(num)).full_path\n verbose and print(file_path)\n except IndexError:\n verbose and print(\"Got index error, wonder why?\")\n else:\n verbose and print(\"what is it doing?\")\n _user_input = _user_input.replace(num, '\"{0}\"'.format(self.join(self.current_directory,\n file_path)))\n return _user_input",
"def renum_images(dir_path, img_num=1):\n fnames = sort_files(dir_path)\n\n for f in fnames:\n\n # Add padding zero to image number if applicable\n new_img_num = str(img_num).zfill(3)\n\n # Construct the original absolute path for the image file\n dir_old = os.path.join(dir_path, f)\n\n # Replace original image number with new padded image number\n new_fname = 'image-{}.gif'.format(new_img_num)\n # new_fname = f.replace(img_num, new_img_num)\n\n # Construct the new abs path\n dir_new = os.path.join(dir_path, new_fname)\n\n # Rename the image file\n shutil.move(dir_old, dir_new)\n\n print('{} renamed as {}'.format(f.ljust(17), new_fname))\n\n img_num += 1",
"def replaceInPath(self, search, replace):\n\t\timport revitron\n\n\t\tfor _id in self.refs:\n\n\t\t\trefId = revitron.DB.ElementId(_id)\n\t\t\tref = self.refs[_id]\n\t\t\tnewPath = ref.path.replace(search, replace)\n\t\t\tself.data.SetDesiredReferenceData(\n\t\t\t refId,\n\t\t\t revitron.DB.FilePath(newPath),\n\t\t\t revitron.DB.PathType.Absolute,\n\t\t\t True\n\t\t\t)\n\n\t\tself.write()",
"def findreplace(name, size):\n # Find digits in name:\n digits = filter(str.isdigit, name)\n # Zero-pad update:\n zerodigs = \"{:0{}d}\".format(int(digits), size)\n # Replace:\n return name.replace(digits, zerodigs)",
"def rename_images_from_number(directory, start):\n\n\timport shutil\n\n\t# move the files in order\n\tfiles = glob.glob(os.path.join(directory, '*.jpg'))\n\tnumber = start\n\tfor filename in sorted(files):\n\t\t(basename, oldnumber, padding, extension) = split_number(filename)\n\t\tnewfilename = '{}{}.jpg'.format(\n\t\t\tbasename,\n\t\t\tstr(number).zfill(padding)\n\t\t)\n\n\t\t#shutil.move(filename, newfilename)\n\t\tprint 'Moving {} to {}'.format(filename, newfilename)\n\t\tnumber = number + 1",
"def setNumber(name, value):\n api_value_, api_value_n_ = _ivectordouble(value)\n ierr = c_int()\n lib.gmshOnelabSetNumber(\n c_char_p(name.encode()),\n api_value_, api_value_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOnelabSetNumber returned non-zero error code: \",\n ierr.value)",
"def set_node_set_variable_number(self, number):\n self.__ex_put_variable_param('EX_NODE_SET', number)\n return True",
"def snapshot_iterno(path):\n # Hack, not the most robust way of parsing the iter num\n last_part = basename(path).split('_')[-1]\n match = re.search(r'(?P<n>[0-9]+)', last_part)\n gd = match.groupdict()\n return int(gd['n'])",
"def setNumber(name, value):\n ierr = c_int()\n lib.gmshOptionSetNumber(\n c_char_p(name.encode()),\n c_double(value),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshOptionSetNumber returned non-zero error code: \",\n ierr.value)",
"def changevalue():\n global number\n number = 1",
"def change_stub_number(self, new_number: int):\n self.cfg.stub_number = new_number",
"def set_depth_path(new_path):\n file_output_node = bpy.context.scene.node_tree.nodes[2]\n file_output_node.base_path = new_path",
"def set_line_num(self, line, num):\n self._set_line_num(line, num)",
"def set_nmap_path(path):\n\n return add_to_path(path)",
"def update_review_url_for_page_no(url : str, \n page_no : int) -> str:\n old_page_no = get_review_page_number_from_url(url)\n for identifier in REVIEW_PAGE_NO_URL_IDENTIFIER:\n url = url.replace(f\"{identifier}{old_page_no}\", \n f\"{identifier}{page_no}\")\n return url",
"def _update_info_string_number(vcf_reader, key, new_number):\n orig = vcf_reader.infos[key]\n vcf_reader.infos[key] = vcf.parser._Info(\n orig.id, -1, orig.type, orig.desc)",
"def bump_patch_number(version_number: str) -> str:\n major, minor, patch = version_number.split(\".\")\n return f\"{major}.{minor}.{int(patch) + 1}\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Moses BLEU is 0 when there are no matching 4grams.
|
def test_less_than_four_bleu():
hypotheses = ['a b c']
references = [['a b c']]
bleu = bleu_score(hypotheses, references)
assert(bleu == 0.0)
|
[
"def unmatchedCount(sequence1, sequence2):\n # TODO: Write me\n if len(sequence1) == 0 or len(sequence2) == 0:\n return 0\n else:\n if matchingBase(sequence1[0]) == \"T\" and matchingBase(sequence2[0]) == \"A\":\n return 0 + unmatchedCount(sequence1[1:], sequence2[1:])\n elif sequence1[0] == \"T\" and sequence2[0] == \"A\":\n return 0 + unmatchedCount(sequence1[1:], sequence2[1:])\n elif sequence1[0] == \"C\" and sequence2[0] == \"G\":\n return 0 + unmatchedCount(sequence1[1:], sequence2[1:])\n elif sequence1[0] == \"G\" and sequence2[0] == \"C\":\n return 0 + unmatchedCount(sequence1[1:], sequence2[1:])\n else:\n return 1 + unmatchedCount(sequence1[1:], sequence2[1:])",
"def score(string):\n string = string.upper()\n o = 0\n for n in range(len(string) - 3):\n snip = string[n:n + 4]\n try:\n o += math.log10(int(VC.quadgrams[snip]))\n except KeyError:\n pass\n return o",
"def get_corpus_bleu_score(self) -> float:\n # Calculate corpus-level brevity penalty.\n bp = self.get_brevity_penalty()\n\n # Returns 0 if there's no matching 1-gram\n if self.no_of_correct_predicted[1] == 0:\n return 0\n\n n_gram_precision = self.get_smoothened_modified_precision()\n\n geometric_average_precision = math.exp(\n math.fsum((w_i * math.log(p_i) for w_i, p_i in zip(self.weights, n_gram_precision) if p_i > 0)))\n bleu_score = bp * geometric_average_precision\n\n return bleu_score",
"def get_modified_unigram_precision(reference, candidate):\n return sentence_bleu(reference, candidate, weights=(1, 0, 0, 0))",
"def score(motifs):\n columns = [''.join(seq) for seq in zip(*motifs)]\n max_count = sum([max([c.count(nucleotide) for nucleotide in 'ACGT']) for c in columns])\n\n return len(motifs[0])*len(motifs) - max_count",
"def __len__(self):\n return(len(self.nucleotides))",
"def _get_unilem_count(self, lemma):\n c = self.db_conn.cursor()\n c.execute(\"SELECT frequency FROM unigram_count WHERE lemma = ?\",\n (lemma,))\n result = c.fetchone()\n return float(result[0]) if result else 0",
"def count(qgram, genome):\r\n rev = reverse_complement(qgram)\r\n rev = rev.replace('N', '.')\r\n qgram = qgram.replace('N', '.')\r\n \r\n return len([m.start() for m in re.finditer(r'(?=(%s))' %qgram, genome)] + [m.start() for m in re.finditer(r'(?=(%s))' %rev, genome)])",
"def checkMatchStatistic(self):\n numOfNan = self.matches[self.matches['w_ace'].isnull() | self.matches['w_df'].isnull() |\n self.matches['w_svpt'].isnull() | self.matches['w_1stIn'].isnull() |\n self.matches['w_1stWon'].isnull() | self.matches['w_2ndWon'].isnull() |\n self.matches['w_SvGms'].isnull() | self.matches['w_bpSaved'].isnull() |\n self.matches['w_bpFaced'].isnull()].shape[0]\n\n numOfNan += self.matches[self.matches['l_ace'].isnull() | self.matches['l_df'].isnull() |\n self.matches['l_svpt'].isnull() | self.matches['l_1stIn'].isnull() |\n self.matches['l_1stWon'].isnull() | self.matches['l_2ndWon'].isnull() |\n self.matches['l_SvGms'].isnull() | self.matches['l_bpSaved'].isnull() |\n self.matches['l_bpFaced'].isnull()].shape[0]\n\n print(\"Sanity checking match statistic: \" + str(numOfNan))\n\n self.matches.dropna(\n subset=['w_ace', 'w_df', 'w_svpt', 'w_1stIn', 'w_1stWon', 'w_2ndWon', 'w_SvGms', 'w_bpSaved', 'w_bpFaced'],\n inplace=True)\n\n self.matches.dropna(\n subset=['l_ace', 'l_df', 'l_svpt', 'l_1stIn', 'l_1stWon', 'l_2ndWon', 'l_SvGms', 'l_bpSaved', 'l_bpFaced'],\n inplace=True)",
"def process_gain(self):\n return 1",
"def test_cochleagram_zero(self):\n cochleagram_loss = metrics.CochleagramLoss(fs=self.fs,\n stride=int(self.fs * 0.005))\n self.assertEqual(0, cochleagram_loss(self.x, self.x))",
"def complete_resp(): \n \n num_responding = np.zeros(6)\n for elem in responses.values():\n i = 0\n if elem[0] != -1:\n i += 1\n if elem[1] != -1:\n i += 1\n if elem[2] != -1:\n i += 1\n if elem[3] != -1:\n i += 1\n if elem[4] != -1:\n i += 1\n \n for x in range(0,i):\n num_responding[x] += 1\n \n \n return num_responding/float(len(responses))",
"def get_total_psram() -> int:",
"def english_freq_match_score(message: str) -> int:\n freq_order = get_frequency_order(message)\n match_score = 0\n for common_letter in ETAOIN[:6]:\n if common_letter in freq_order[:6]:\n match_score += 1\n\n for uncommon_letter in ETAOIN[-6:]:\n if uncommon_letter in freq_order[-6:]:\n match_score += 1\n\n return match_score",
"def MIN_BUBBLES():\n return 0",
"def test_naive_exact(self):\r\n\r\n p = \"word\"\r\n t = \"There would have been a time for such a word\"\r\n\r\n occurrences, matches, mismatches = rau.naive_exact(p, t)\r\n\r\n self.assertEqual(len(occurrences), 1)\r\n self.assertEqual(occurrences[0], 40)\r\n self.assertEqual(matches, 6)\r\n self.assertEqual(mismatches, 40)\r\n\r\n p = \"AAA\"\r\n t = \"AAATAA\"\r\n\r\n occurrences, matches, mismatches = rau.naive_exact(p, t)\r\n\r\n self.assertEqual(len(occurrences), 1)\r\n self.assertEqual(occurrences[0], 0)\r\n self.assertEqual(matches, 6)\r\n self.assertEqual(mismatches, 3)\r\n\r\n ''' take a long time, un-comment out for full testing\r\n # ----- Test artificial random reads ----- #\r\n\r\n genome = fau.read_genome(full_file_name)\r\n\r\n random_reads = dnau.get_random_reads(genome, 100, 100)\r\n\r\n self.assertEqual(len(random_reads), 100)\r\n\r\n num_matched = 0\r\n\r\n for random_read in random_reads:\r\n self.assertEqual(len(random_read), 100)\r\n\r\n occurrences, matches, mismatches = rau.naive_exact(random_read, genome)\r\n\r\n # since we KNOW there is an alignment there has to be one or more\r\n self.assertGreaterEqual(len(occurrences), 1)\r\n\r\n num_matched += 1\r\n\r\n self.assertEqual(num_matched, 100)\r\n\r\n # ----- Test real-world random reads ----- #\r\n\r\n data_dir2 = \"../fastq/data/\"\r\n test_file_name2 = \"ERR266411_1.first1000.fastq\"\r\n full_file_name2 = data_dir2 + test_file_name2\r\n\r\n phix_reads, _ = fqu.read_fastq(full_file_name2)\r\n\r\n num_matched = 0\r\n num_read = 0\r\n for read in phix_reads:\r\n occurrences, _, _ = rau.naive_exact(read, genome)\r\n num_read += 1\r\n\r\n if len(occurrences) > 0:\r\n num_matched += 1\r\n\r\n # only 7 reads matched exactly out of 1000 in the single side\r\n self.assertEqual(num_matched, 7)\r\n self.assertEqual(num_read, 1000)\r\n\r\n print ('%d / %d reads matched the genome', num_matched, num_read)\r\n\r\n # ----- Test real-world random reads first 30 bases only ----- #\r\n\r\n data_dir2 = \"../fastq/data/\"\r\n test_file_name2 = \"ERR266411_1.first1000.fastq\"\r\n full_file_name2 = data_dir2 + test_file_name2\r\n\r\n phix_reads, _ = fqu.read_fastq(full_file_name2)\r\n\r\n num_matched = 0\r\n num_read = 0\r\n for read in phix_reads:\r\n occurrences, _, _ = rau.naive_exact(read[:30], genome)\r\n num_read += 1\r\n\r\n if len(occurrences) > 0:\r\n num_matched += 1\r\n\r\n # only 439 reads matched exactly out of 1000 in the single side\r\n self.assertEqual(num_matched, 459)\r\n self.assertEqual(num_read, 1000)\r\n\r\n print ('%d / %d reads matched the genome', num_matched, num_read)\r\n\r\n # ----- Test real-world random reads on both sides of the DNA ----- #\r\n\r\n data_dir2 = \"../fastq/data/\"\r\n test_file_name2 = \"ERR266411_1.first1000.fastq\"\r\n full_file_name2 = data_dir2 + test_file_name2\r\n\r\n phix_reads, _ = fqu.read_fastq(full_file_name2)\r\n\r\n num_matched = 0\r\n num_read = 0\r\n for read in phix_reads:\r\n r = read[:30]\r\n occurrences, _, _ = rau.naive_exact(r, genome)\r\n\r\n occurrences_reverse_compliment, _, _ = rau.naive_exact(dnau.reverse_complement(r), genome)\r\n\r\n if len(occurrences_reverse_compliment) < 0:\r\n derp = 27\r\n\r\n occurrences.extend(occurrences_reverse_compliment)\r\n\r\n num_read += 1\r\n\r\n if len(occurrences) > 0:\r\n num_matched += 1\r\n\r\n # only 439 reads matched exactly out of 1000 in the single side\r\n self.assertEqual(num_matched, 932)\r\n self.assertEqual(num_read, 1000)\r\n\r\n print ('%d / %d reads matched the genome', num_matched, num_read)\r\n '''",
"def _calc_matching_prob(self):\n if not self.professional:\n return 1",
"def _count_true_pos(self, match_iou):\n\n n_true_positives0 = K.zeros((self.batch_size,))\n w_valid0 = K.ones_like(match_iou)\n i0 = K.constant(0, dtype=K.tf.int32)\n\n def add_if_iou_match(idx, w_valid, n_true_positives):\n tp_iou_remaining = match_iou[:, idx, :] * w_valid[:, idx, :]\n max_iou = K.max(tp_iou_remaining, -1, keepdims=True)\n increment = K.cast(K.greater(max_iou, self.iou_thresh), K.floatx())\n n_true_positives = n_true_positives + K.reshape(increment, (self.batch_size,))\n\n # We build a matrix that contains 0 for the column that is considered as match 1 otherwise\n # Multiplying it with w_valid zeros the matched box and avoids double counting of the same box\n max_idx_mask = K.equal(max_iou, tp_iou_remaining)\n min_iou_mask = K.greater_equal(tp_iou_remaining, self.iou_thresh)\n\n w_update = K.switch(K.tf.logical_and(max_idx_mask, min_iou_mask),\n K.zeros_like(tp_iou_remaining),\n K.ones_like(tp_iou_remaining))\n\n w_update = K.tile(K.expand_dims(w_update, 1), (1, K.shape(match_iou)[1], 1))\n\n w_valid = w_update * w_valid\n return idx + 1, w_valid, n_true_positives\n\n def remaining_true_boxes(idx, w_valid, n_true_positives):\n return K.any(K.greater(K.sum(match_iou[:, idx, :] * w_valid[:, idx, :], -1), 0))\n\n i, w, tp_total = K.tf.while_loop(remaining_true_boxes, add_if_iou_match,\n [i0, w_valid0, n_true_positives0], parallel_iterations=1)\n return tp_total",
"def __len__(self):\n return len(self.flowgram)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
ordinary points are just pairs (x, y) where x and y are both between 0 and n 1
|
def ordinary_points(n):
return [(x, y) for x in range(n) for y in range(n)]
|
[
"def GenAdjacentPoints(origin):\n for i in [1, 0, -1]:\n for j in [-1, 0, 1]:\n if i == 0 and j == 0:\n continue\n yield Point(origin.x + j, origin.y + i)",
"def get_adjacent(x, y):\n return [(x + 1, y), (x + 1, y + 1), (x + 1, y - 1),\n (x, y - 1), (x, y + 1), (x - 1, y),\n (x - 1, y + 1), (x - 1, y - 1)]",
"def neighbours(self, point):\n\n point_x, point_y = point[0], point[1]\n\n if point_x == 0 and point_y == 0:\n return (0, 1), (1, 1), (1, 0)\n if point_x == self.rows-1 and point_y == \\\n self.cols-1:\n return (point_x-1, point_y), \\\n (point_x-1, point_y-1), \\\n (point_x, point_y-1)\n if point_x == self.rows-1 and point_y == 0:\n return (point_x-1, 0), (point_x-1, 1), \\\n (point_x, 1)\n if point_x == 0 and point_y == self.cols-1:\n return (0, point_y-1), (1, point_y-1), \\\n (1, point_y)\n if point_x == 0:\n return (0, point_y - 1), (1, point_y-1), \\\n (1, point_y), (1, point_y+1), (0, point_y+1)\n if point_y == 0:\n return (point_x-1, 0), (point_x-1, 1), \\\n (point_x, 1), (point_x+1, 1), (point_x+1, 0)\n if point_x == self.rows-1:\n return (point_x-1, point_y), (point_x-1, point_y-1), \\\n (point_x, point_y-1), (point_x-1, point_y+1), \\\n (point_x, point_y+1)\n if point_y == self.cols-1:\n return (point_x, point_y-1), (point_x-1, point_y-1), \\\n (point_x-1, point_y), (point_x+1, point_y-1), \\\n (point_x+1, point_y)\n\n return (point_x-1, point_y-1), (point_x-1, point_y), \\\n (point_x-1, point_y+1), (point_x, point_y+1), \\\n (point_x+1, point_y+1), (point_x+1, point_y), \\\n (point_x+1, point_y-1), (point_x, point_y-1)",
"def antipodal_points(p1, p2, p3):\n # First we produce normal vecors to the planes going through the\n # perpedicular bisectors of p1-p2 and p1-p3.\n normal_p12 = p2 - p1\n normal_p13 = p3 - p1\n antipodal_vector = cross(normal_p12, normal_p13)\n res_p = antipodal_vector.normalize()\n return res_p, -res_p",
"def special_right_triangles(n):\n return [\n (x, y, z)\n for x in range(1, n)\n for y in range(x, n)\n for z in range(y, n)\n if x**2 + y**2 == z**2\n ]",
"def find_all_points(a,b,p):\n value_xy=[] # contains values of y when positive, where (x,y) £ E(p)\n x = 0\n while x< p:\n y2 = (((x**3) %p) + ((a*x) %p) + b )%p\n y = 0\n while y < p and y !=-1 :\n if( (y**2)%p == y2):\n # print(\"(%d,%d) (%d,%d)\"% (x,y,x,(-y)%p))\n if y != (-y)%p:\n value_xy.append((x,y))\n value_xy.append((x,(-y)%p))\n else:\n value_xy.append((x,y))\n y = -2\n y+=1\n x+=1\n return value_xy",
"def triangulate_points(x,y):\n \n centers,edges,tri,neighbors = md.delaunay(x,y)\n return tri",
"def pick_points_on_shape(self):\r\n a = self.a \r\n N = 81 # number of vertices\r\n t = np.linspace(-4,4,N)\r\n verts = np.zeros((N,2))\r\n verts[:,0] = a*(np.abs(t))**3 - 1.0\r\n verts[:,1] = t\r\n return t, verts",
"def points2D(self) -> tuple[Point2D, ...]:\n return tuple(map(Point2D, self._xs, self._ys))",
"def _neighbors(self,point):\n #row,col = self._point_to_coord(point)\n #if 0 <= row <= self.size+1 and 0 <= col <= self.size+1:\n return [point-1, point+1, point-self.NS, point+self.NS]\n #else:\n # raise ValueError(\"This point is out of range!\")",
"def generate_initial_points(x, y, num_points, link_length):\n x_all = [x]\n y_all = [y]\n for _ in range(num_points-1):\n phi = np.random.uniform(-np.pi/10, np.pi/10)\n x1, y1 = x + link_length * np.cos(phi), y + link_length * np.sin(phi)\n x_all.append(x1)\n y_all.append(y1)\n x, y = x1, y1\n \n return x_all, y_all",
"def int_pair(self):\n return tuple([int(self.x), int(self.y)])",
"def generate_data(no_points):\n\tX = np.zeros(shape=(no_points, 2))\n\tY = np.zeros(shape=no_points)\n\n\tfor i in range(no_points):\n\t\tX[i][0] = random.randint(-9,9)+0.5\n\t\tX[i][1] = random.randint(-9,9)+0.5\n\t\tY[i] = 1 if X[i][0]+X[i][1] >= 2 else 0\n\n\treturn X, Y",
"def _hemPoints(v, n):\n v = g.normalize(v)\n north, east = g.northEast(v)\n points = []\n for i in xrange(n):\n z = -1.0\n while z < 0.0:\n x = random.uniform(-1.0 + 1.0e-7, 1.0 - 1.0e-7)\n y = random.uniform(-1.0 + 1.0e-7, 1.0 - 1.0e-7)\n z = 1.0 - x * x - y * y\n z = math.sqrt(z)\n p = (z * v[0] + x * north[0] + y * east[0],\n z * v[1] + x * north[1] + y * east[1],\n z * v[2] + x * north[2] + y * east[2])\n points.append(g.normalize(p))\n return points",
"def points(self):\n return [self.point1, self.point2]",
"def pointvectors(self):\n return np.stack([self.x, self.y], axis=-1)",
"def _coords(self, x, y):\n return y, x * 2",
"def neighbours(self):# по отиз начин работи по - бързо от колкото с up.left, left... etc\n\t\tres = []\n\t\tfor x in xrange(self.x - 1, self.x + 2):\n\t\t\tres.append( Point( x, self.y+1 ) )\n\t\t\tres.append( Point( x, self.y - 1 ) )\n\t\tres.append( Point(self.x -1, self.y) )\n\t\tres.append( Point(self.x+1, self.y) )\n\t\treturn res",
"def from_homog(point):\n return [point[0]/point[2], point[1]/point[2]]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns the vertical line with the specified xcoordinate in the finite projective plane of degree n includes 'infinity infinity'
|
def vertical_line(x, n):
return [(x, y) for y in range(n)] + [u"∞"]
|
[
"def line_at_infinity(n):\n return points_at_infinity(n)",
"def render_visible(V):\n\n # make V into list sorted by slope: O(nlogn)\n V = sorted(V, key=lambda l: l.m)\n X = visible_intersections(V)\n\n # add point beyond left end point to have a support point for the line\n # with smallest slope\n X = [X[0]-5] + X\n\n # Calculate the corresponding Y values:\n Y = [ l.y(x) for l,x in zip(V,X)]\n\n # and now a support point for the lines with greatest slope:\n X.append( X[-1]+5 )\n Y.append( V[-1].y(X[-1]+5) )\n return X,Y",
"def get_vanishing_line(vanishing_pts: np.ndarray):\n assert vanishing_pts.shape == (2, 3)\n vanishing_line = np.empty(shape=3, dtype=vanishing_pts.dtype)\n \"\"\" YOUR CODE HERE \"\"\"\n vanishing_line = np.cross(vanishing_pts[0, :], vanishing_pts[-1, :])\n\n \"\"\" END YOUR CODE HERE \"\"\"\n assert len(vanishing_line) == 3\n return vanishing_line",
"def getNullcline(d, x, y):\r\n h = plt.figure() # a dummy figure to draw the contours on\r\n cs = plt.contour(x, y, d, levels=[0]) # creates a contour plot with a line at 0\r\n paths = cs.collections[0].get_paths() # get the path of the null-cline\r\n xy = paths[0].vertices # get the verticies of the path\r\n plt.close(h) # close the dummy figure\r\n return xy[:,0], xy[:,1]",
"def asInfiniteLine(self) -> \"adsk::core::Ptr< adsk::core::InfiniteLine3D >\" :\n return _core.Line3D_asInfiniteLine(self)",
"def voronoi_vertex(vy, vx, vertex_y, vertex_x):\n return numpy.argmin(numpy.hypot(vy - vertex_y, vx - vertex_x))",
"def find_x_given_y(line, y):\n dx = line[0][2] - line[0][0]\n dy = line[0][3] - line[0][1]\n \n return np.round(np.array([line[0][0] + (y - line[0][1])*dx/dy, y]))#.astype(np.uint16)",
"def findVanishingPoint(self, point_lines):\n lns = []\n\n for i in range(0, len(point_lines), 2):\n lns.append(self.__computeLineNormal(point_lines[i][None, :], point_lines[i + 1][None, :]))\n\n # ln1 = self.__computeLineNormal(point_line1[0, None], point_line1[1, None])\n # ln2 = self.__computeLineNormal(point_line2[0, None], point_line2[1, None])\n # ln3 = self.__computeLineNormal(point_line3[0, None], point_line3[1, None])\n\n # ln1 = ln1 / ln1[:, -1]\n # ln2 = ln2 / ln2[:, -1]\n # ln3 = ln3 / ln3[:, -1]\n\n lns = np.vstack(lns)\n # lns = lns / lns[:, -1]\n\n # A = np.vstack((ln1, ln2, ln3))\n A = lns\n vp = la.solve(np.dot(A[:, 0:2].T, A[:, 0:2]), np.dot(A[:, 0:2].T, -A[:, -1]))\n\n return np.vstack((vp[:, None], 1)).T",
"def find_xray_line(z, edge):\n intensity = 0\n line = ''\n for key, value in xray_lines(z).items() :\n if value.initial_level == edge.upper():\n if value.intensity > intensity:\n intensity = value.intensity\n line = key\n return xray_line(z, line[:-1])",
"def cutpoint(plane, line):\n oplane, nplane = plane\n oline, vline = (line[0], line[1] - line[0])\n f = dot(vline, nplane)\n if f != 0:\n d = dot((oplane - oline), nplane) / f\n p = d * vline + oline\n n = linalg.norm(line[0] - line[1])\n if allclose(n, linalg.norm(p - line[0]) + linalg.norm(p - line[1])):\n return p\n return None",
"def get_linear(self):\n return self._v_lin.copy()",
"def pure_vertical():\n beta = 0.9\n omega = sqrt(beta/(1-beta))\n P = 2*pi/omega\n N = 6\n num_periods = N/omega\n n = 600\n time_steps_per_period = omega*n\n epsilon = 0.1\n\n def y_exact(t):\n \"\"\"\n Returns the exact solution for the position of y,\n with a minus sign due to a positive epsilon stretching\n the pendulum in the negative y direction.\n \"\"\"\n return -epsilon*cos(omega*t)\n\n x, y, theta, t = simulate(Theta=0, epsilon=epsilon, num_periods=num_periods, time_steps_per_period=time_steps_per_period, plot=True)\n tol = 0.0001\n y_e = y_exact(t)\n assert abs(x.max()) < tol\n err = abs(y_e - y).max()\n assert err < tol",
"def plane_project(x,n):\n\treturn x-np.dot(x,n)/np.linalg.norm(n)*vecnorm(n)",
"def axvline(self, x=0, ymin=0, ymax=1, **kwargs):\n\n trans = mtrans.blend_xy_sep_transform( self.transData, self.transAxes )\n l, = self.plot([x,x], [ymin,ymax] , transform=trans, scaley=False, **kwargs)\n return l",
"def get_partition(self,x,y):\n N = int(np.sqrt(len(self.partitions)))\n\n if x>=self.xmax:\n # check upper limit\n xind = N-1\n else:\n # in case we are < xmin, don't let the index go negative\n xind = np.max([0,np.argmax(x<np.linspace(self.xmin,self.xmax,N+1))-1])\n\n if y>=self.ymax:\n # check upper limit\n yind = N-1\n else:\n # in case we are < xmin, don't let the index go negative\n yind = np.max([0,np.argmax(y<np.linspace(self.ymin,self.ymax,N+1))-1])\n\n ## print xind\n ## print yind\n\n #linear index\n return yind*N+ xind",
"def vline(self, x, y, h, color):\n # Confirm coordinates in boundary\n if x < 0 or y < 0 or (y + h - 1) > self.height:\n print(\"vline({}, {}) params out of screen\".format(x, y))\n return\n line = color.to_bytes(2, 'big') * h\n self._writeblock(x, y, x, y + h - 1, line)",
"def line_endpoints(line): \n start = shapely.get_point(line,0)\n end = shapely.get_point(line,-1)\n return start, end",
"def get_lane(self, x):\n # print(\"line\", self.map.left_line)\n # print(\"dist from left line\", self.dist_from_line(self.map.left_line, x, y))\n return int(np.floor((x / self.map.width) * self.lane_num))",
"def ipfline(center=[0,0],csym='cubic'):\n xc = []; yc = []\n if csym!='cubic': print(\"Only Cubic!\"); raise IOError\n xc.append( center[0])\n yc.append( center[1])\n\n for i in np.linspace(0.,1/math.sqrt(3.)):\n yaux = i\n xaux = math.sqrt((1. - yaux**2)/2.)\n zaux = xaux\n t1 = math.sqrt(1. - zaux) / math.sqrt(xaux**2 + yaux**2)\n t2 = t1/math.sqrt(1. + zaux)\n ## equal area\n # xc.append(xaux*t1)\n # yc.append(yaux*t1)\n ## stereo\n xc.append(xaux*t2)\n yc.append(yaux*t2)\n\n xc.append(center[0])\n yc.append(center[1])\n return np.array([xc,yc])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
the line at infinity just contains the points at infinity
|
def line_at_infinity(n):
return points_at_infinity(n)
|
[
"def testPlotCurveInfinite(self):\n tests = {\n 'y all not finite': ([0, 1, 2], [numpy.inf, numpy.nan, -numpy.inf]),\n 'x all not finite': ([numpy.inf, numpy.nan, -numpy.inf], [0, 1, 2]),\n 'x some inf': ([0, numpy.inf, 2], [0, 1, 2]),\n 'y some inf': ([0, 1, 2], [0, numpy.inf, 2])\n }\n for name, args in tests.items():\n with self.subTest(name):\n self.plot.addCurve(*args)\n self.plot.resetZoom()\n self.qapp.processEvents()\n self.plot.clear()",
"def is_lin(self):\n return np.all([d.is_lin for d in self])",
"def asInfiniteLine(self) -> \"adsk::core::Ptr< adsk::core::InfiniteLine3D >\" :\n return _core.Line3D_asInfiniteLine(self)",
"def line_endpoints(line): \n start = shapely.get_point(line,0)\n end = shapely.get_point(line,-1)\n return start, end",
"def test_nones(self):\n none_indexes = [1, 5, 8]\n\n # Try Nones in y_data first\n x_data, y_data = self.gen_data()\n expected_len = len(y_data) - len(none_indexes)\n values_to_be_cut = [x_data[i] for i in none_indexes]\n for i in none_indexes:\n y_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(len(line.y_data) == expected_len)\n for data in values_to_be_cut:\n self.assertTrue(data not in line.x_data)\n self.assertTrue(None not in line.y_data)\n\n # Try Nones in x_data next\n x_data, y_data = self.gen_data()\n expected_len = len(y_data) - len(none_indexes)\n values_to_be_cut = [y_data[i] for i in none_indexes]\n for i in none_indexes:\n x_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(len(line.y_data) == expected_len)\n for data in values_to_be_cut:\n self.assertTrue(data not in line.y_data)\n self.assertTrue(None not in line.x_data)\n\n # Try Nones in both\n x_data, y_data = self.gen_data()\n x_none_indexes = [1, 2]\n y_none_indexes = [3, 4]\n none_indexes = set(x_none_indexes).union(y_none_indexes)\n expected_len = len(x_data) - len(none_indexes)\n x_cut_values = []\n for i in x_none_indexes:\n x_cut_values.append(x_data[i])\n x_data[i] = None\n y_cut_values = []\n for i in y_none_indexes:\n y_cut_values.append(y_data[i])\n y_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(None not in line.x_data)\n self.assertTrue(len(line.y_data) == expected_len)\n self.assertTrue(None not in line.y_data)\n for v in x_cut_values:\n self.assertTrue(v not in line.x_data)\n for v in y_cut_values:\n self.assertTrue(v not in line.y_data)",
"def findVanishingPoint(self, point_lines):\n lns = []\n\n for i in range(0, len(point_lines), 2):\n lns.append(self.__computeLineNormal(point_lines[i][None, :], point_lines[i + 1][None, :]))\n\n # ln1 = self.__computeLineNormal(point_line1[0, None], point_line1[1, None])\n # ln2 = self.__computeLineNormal(point_line2[0, None], point_line2[1, None])\n # ln3 = self.__computeLineNormal(point_line3[0, None], point_line3[1, None])\n\n # ln1 = ln1 / ln1[:, -1]\n # ln2 = ln2 / ln2[:, -1]\n # ln3 = ln3 / ln3[:, -1]\n\n lns = np.vstack(lns)\n # lns = lns / lns[:, -1]\n\n # A = np.vstack((ln1, ln2, ln3))\n A = lns\n vp = la.solve(np.dot(A[:, 0:2].T, A[:, 0:2]), np.dot(A[:, 0:2].T, -A[:, -1]))\n\n return np.vstack((vp[:, None], 1)).T",
"def iterative_end_point_fit(self, list_of_points_for_lines, breakpoints, start_of_region, end_of_region):\n minimum_distance_to_be_a_corner = 0.06 # meter, the value set is a guess and may need adjusting\n N_min = 3 # this probably should be turned into a variable part of self\n if (end_of_region - start_of_region + 1) <= N_min:\n return None\n max_distance = 0\n farthest_point = -1\n # number_of_potential_corners = 0 # an attempt to ignore single points that disrupt clearly straight lines\n for potential_corner in range(start_of_region + 1, end_of_region):\n distance_to_line = self.distance_line_to_point(breakpoints[start_of_region][0], breakpoints[end_of_region][0], breakpoints[potential_corner][0])\n if distance_to_line > minimum_distance_to_be_a_corner:\n # number_of_potential_corners += 1\n if distance_to_line > max_distance:\n max_distance = distance_to_line\n farthest_point = potential_corner\n\n if farthest_point == -1: # or number_of_potential_corners < 2:\n list_of_points_for_lines.append(self.create_wall(breakpoints[start_of_region], breakpoints[end_of_region]))\n else:\n self.iterative_end_point_fit(list_of_points_for_lines, breakpoints, start_of_region, farthest_point)\n self.iterative_end_point_fit(list_of_points_for_lines, breakpoints, farthest_point, end_of_region)",
"def interiorpoint(self):",
"def _finishOutline(self, normList=None, finishedShape=None):\n if normList is None:\n normList = np.array([point.normalVector for point in self.iterPoints()], dtype=np.float)\n elif len(normList[(normList < np.inf)]) == 0:\n return\n if finishedShape is None:\n finishedShape = []\n\n \"\"\" Find the first index in normList that is not infinity. \"\"\"\n firstLineIndex = np.where(normList[:,0] < np.inf)[0][0]//2\n \n \"\"\" firstLine is needed to know if the last line closes the shape. \"\"\"\n firstLine = self[firstLineIndex]\n normList[firstLineIndex*2:firstLineIndex*2+2] = np.inf\n\n if not self.isInside(firstLine.getOffsetLine(c.EPSILON*2, c.INSIDE).getMidPoint()):\n \"\"\" Test if the inside (left) of the line is inside the part. If\n not flip the line. \"\"\"\n firstLine = firstLine.fliped()\n \n testPoint = firstLine.end\n finishedShape.append(firstLine)\n while len(normList[(normList < np.inf)]) > 0:\n\n distances = np.linalg.norm(normList-testPoint.normalVector, None, 1)\n index = np.argmin(distances)\n nearestLine = self[index//2]\n \n if distances[index] > c.EPSILON:\n raise Exception('Shape has a gap of ' + str(distances[index]) +\n ' at point ' + str(testPoint) + ', ' + \n str(p.Point(normList[index])))\n if index%2:\n \"\"\" If index is odd we are at the end of a line so the line needs to be flipped. \"\"\"\n nearestLine = nearestLine.fliped()\n \n testPoint = nearestLine.end\n finishedShape.append(nearestLine)\n \n index //= 2\n \"\"\" Instead of deleting elements from the NumPy array we set the used\n vectors to infinity so they will not appear in the min. \"\"\"\n normList[[index*2,index*2+1]] = np.inf\n \n if testPoint == firstLine.start:\n self._finishOutline(normList, finishedShape)\n return finishedShape\n dist = firstLine.start - finishedShape[-1].end\n if dist < c.EPSILON:\n return finishedShape\n raise Exception('Shape not closed. There is a gap of {:0.5f} at point {}'.format(dist, testPoint))",
"def get_vanishing_line(vanishing_pts: np.ndarray):\n assert vanishing_pts.shape == (2, 3)\n vanishing_line = np.empty(shape=3, dtype=vanishing_pts.dtype)\n \"\"\" YOUR CODE HERE \"\"\"\n vanishing_line = np.cross(vanishing_pts[0, :], vanishing_pts[-1, :])\n\n \"\"\" END YOUR CODE HERE \"\"\"\n assert len(vanishing_line) == 3\n return vanishing_line",
"def _update_trajectory(self, xm, ym, broken_streamlines=True):\n if self._current_xy != (xm, ym):\n if self[ym, xm] == 0:\n self._traj.append((ym, xm))\n self._mask[ym, xm] = 1\n self._current_xy = (xm, ym)\n else:\n if broken_streamlines:\n raise InvalidIndexError\n else:\n pass",
"def clear_points(self):\n self._points = []\n # clear points from the number line\n return",
"def test6(self):\n assert self.obj.doesLinesIntersect([1,1], [1,1]) == False, \"Co-Ordinates with dot is not a line\"",
"def infront(self, line):\n for wall in self.walls:\n wall_v = self.wall_vector(wall)\n point = self.intersection(line, wall_v)\n yield wall",
"def render_visible(V):\n\n # make V into list sorted by slope: O(nlogn)\n V = sorted(V, key=lambda l: l.m)\n X = visible_intersections(V)\n\n # add point beyond left end point to have a support point for the line\n # with smallest slope\n X = [X[0]-5] + X\n\n # Calculate the corresponding Y values:\n Y = [ l.y(x) for l,x in zip(V,X)]\n\n # and now a support point for the lines with greatest slope:\n X.append( X[-1]+5 )\n Y.append( V[-1].y(X[-1]+5) )\n return X,Y",
"def constraint_line(self,eps,color='r'):\n extremes = np.array([[(1+eps)/2, (1-eps)/2, 0],\n [eps,0,1-eps]])\n cart = self.bary2cart(extremes)\n plt.plot(cart[:,0],cart[:,1],color=color)",
"def nearest_point_on_line(point, line): \n return line.interpolate(line.project(point))",
"def init_draw_points(self):\n self.draw_points = np.vstack((self.points, self.points[0]))\n self.draw_points = self.draw_points.T\n\n # thick line for alignment purposes\n self.thick_line = self.draw_points[:, :2]\n for i in range(1, len(self.draw_points[0]) - 1):\n if math.dist(self.draw_points[:, i].flatten(), self.draw_points[:, i + 1].flatten()) > \\\n math.dist(self.thick_line[:, 0].flatten(), self.thick_line[:, 1].flatten()):\n self.thick_line = self.draw_points[:, i:i + 2]",
"def getNullcline(d, x, y):\r\n h = plt.figure() # a dummy figure to draw the contours on\r\n cs = plt.contour(x, y, d, levels=[0]) # creates a contour plot with a line at 0\r\n paths = cs.collections[0].get_paths() # get the path of the null-cline\r\n xy = paths[0].vertices # get the verticies of the path\r\n plt.close(h) # close the dummy figure\r\n return xy[:,0], xy[:,1]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialization code for autonomous mode may go here. Users may override this method for initialization code which will be called each time the robot enters autonomous mode, regardless of the selected autonomous mode. This can be useful for code that must be run at the beginning of a match.
|
def autonomousInit(self) -> None:
pass
|
[
"def autonomousInit(self) -> None:\n ...",
"def autonomous(self) -> None:\n\n self.__nt_put_mode(\"auto\")\n self.__nt_put_is_ds_attached(self.__is_ds_attached())\n\n self._on_mode_enable_components()\n\n try:\n self.autonomousInit()\n except:\n self.onException(forceReport=True)\n\n auto_functions: Tuple[Callable[[], None], ...] = (self._enabled_periodic,)\n\n if self.use_teleop_in_autonomous:\n auto_functions = (self.teleopPeriodic,) + auto_functions\n\n self._automodes.run(\n self.control_loop_wait_time,\n auto_functions,\n self.onException,\n watchdog=self.watchdog,\n )\n\n self._on_mode_disable_components()",
"def robotInit(self) -> None:\n ...",
"def initialize(self):\n self.initialized = False\n self.initialize_cameras()\n self.initialize_electronics()\n self.initialized = True",
"def initialize(self):\n\n \"*** YOUR CODE HERE\"\n #agent가 생성될때마다 agentNum을 하나씩 증가시킨다.\n MyAgent.agentNum = MyAgent.agentNum+1",
"def initialise(self):\n\n self.__initialise_chase_mode()\n self.__initialise_frightened_mode()\n Character.initialise(self)",
"def Initialize():\r\n pass",
"def initialize(self):\n self._change_state(\"initialize\")",
"def onGameModeReady(self, *args, **kwargs):\n self.loadArena()",
"def post_init_hook(self):\n # this Interaction may be activated by Receptor (actually it is binary intent classifier here)\n self.global_trigger_receptor = PhrasesMatcher(phrases=[\"ОПЕРАТОР\", \"Переключите меня на оператора\"],\n daemon_if_matched=self.start)\n self.ic.user_message_signal.connect(self.global_trigger_receptor, weak=False)",
"def setup_after_space_initialization(self):",
"def initialize(self):\n if self.dummy:\n self.logger.info('Dummy device initialized')\n else:\n self.rsc = serial.Serial(port=self._port,\n baudrate=self.DEFAULTS['baudrate'],\n timeout=self.DEFAULTS['read_timeout'],\n write_timeout=self.DEFAULTS['write_timeout']\n )\n\n self.logger.info('Initialized device AOTF at port {}.'.format(self._port))\n self._is_initialized = True",
"def initialize(self):\n print 'initializing FPGA...',\n self.openSerial()\n for i in range(14):\n self.setMode(i, 1)\n for i in range(8):\n self.setDelay(i, 20, 0)\n self.writeDelays()\n print 'done'",
"def InitDevice(self):\n # No need to init the device.\n pass",
"def _init(self, cmd, doHome=True):\n cmd.inform('text=\"setting motor config ...\"')\n self._setConfig(cmd)\n self.checkConfig(cmd)\n\n if doHome:\n self.moving(cmd, position='low')\n\n cmd.inform('text=\"setting origin at 0...\"')\n self._setHome(cmd=cmd)",
"def startCompetition(self) -> None:\n self.robotInit()\n hal.observeUserProgramStarting()\n\n # Loop forever, calling the appropriate mode-dependent function\n self._loop.run_until_complete(self._run_robot())",
"def _pre_init(self, **kwargs) -> None:\n raise NotImplementedError",
"def initial_setup():\n LOGGER.debug('initializing first mission')\n mission = get_running_mission()\n if isinstance(mission, MissionPath):\n LOGGER.info('building METAR for initial mission: %s', mission.orig_name)\n weather = elib_wx.Weather(str(mission.path))\n core.Status.metar = weather\n esst.atis.create.generate_atis(weather)\n else:\n LOGGER.error('no initial mission found')",
"def pre_mainloop(self):\n self.init_pygame()\n self.init_graphics()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Periodic code for test mode should go here.
|
def testPeriodic(self) -> None:
pass
|
[
"def testPeriodic(self) -> None:\n ...",
"def run_post_test(self):\n pass",
"def run_pre_test(self):\n pass",
"def run(self,test_mode):\n\n self._start_profile_prompt()\n self.checker._check_directories()\n \n\n if test_mode == 'check':\n data_path = './tmp_instance/data'\n self._make_PGServer(data_path)\n else:\n data_path = config.data_path\n self._check_DB_ready()\n\n if self.profile.use_schedule():\n schedule = self.profile.schedule()\n batch = schedule.next_batch()\n\n while batch:\n if batch.len() > 1:\n self._start_batch(batch)\n else:\n self._start_test(batch.tests()[0])\n batch = schedule.next_batch()\n else:\n\n case = self.profile.next_case()\n while case:\n self._start_test(case)\n case = self.profile.next_case()\n\n logger.debug(\"cases run out!\")\n \n if test_mode == 'check':\n self._clear_PGServer(data_path)\n\n self._end_profile_prompt()\n logger.info(\"calculate the report data\")\n self.checker._reportdata_gen(self._start_time,self._end_time)",
"def tests(self):\n pass",
"def setUp(self):\n #cbrandom.toggleDebugMode(True)",
"def pre_create_trial(self):",
"def test_mode(self):\n self.logger.debug('Starting unit_test on mode mode')\n modes = ['Modulation','Voltage1','Voltage2']\n for m in modes:\n self.inst.mode = m\n assert m == self.inst.mode\n self.logger.info('Mode assertion passed for mode: {}'.format(m))\n\n self.logger.info('Mode unit_test passed')",
"def _experiment(self):\n pass",
"def on_test(self):\r\n # Run the client's on_test() function if it exists\r\n self.call_handler('on_test')",
"def test_stop_run(self):\n pass",
"def main(cfg, mode):\n experiment = ExperimentLoop(cfg, mode)\n if mode == TRAIN:\n experiment.train()\n elif mode == VAL:\n experiment.validation()",
"def autonomousPeriodic(self) -> None:\n ...",
"def after_simulation(self):\n pass",
"def cond_test(self):\n self.vert_cond.home()\n self.horz_cond.home()\n # 4000 is the right step for cond_probe horizontal move to analyse\n self.horz_cond.move_to(4000)\n self.vert_cond.move_to(40000)\n print('conductivity analysing')\n time.sleep(10)\n self.vert_cond.home()\n time.sleep(10)\n # 34000 is the right step for cond_probe horizontal move to clean\n self.horz_cond.move_to(40000)\n self.vert_cond.move_to(40000)\n print('conductivity probe is cleaning')\n time.sleep(10)\n self.vert_cond.home()",
"def test_notify_run_status(self):\n pass",
"def test_restart_run(self):\n pass",
"def before_simulation_step(self):\n pass",
"def test_post_feature_flag(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Periodic code for all modes should go here. Users must override this method to utilize it but it is not required. This function gets called last in each mode. You may use it for any code you need to run during all modes of the robot (e.g NetworkTables updates) The default implementation will update SmartDashboard, LiveWindow and Shuffleboard.
|
def robotPeriodic(self) -> None:
watchdog = self.watchdog
self.__sd_update()
watchdog.addEpoch("SmartDashboard")
self.__lv_update()
watchdog.addEpoch("LiveWindow")
# self.__sf_update()
# watchdog.addEpoch("Shuffleboard")
|
[
"def autonomous(self) -> None:\n\n self.__nt_put_mode(\"auto\")\n self.__nt_put_is_ds_attached(self.__is_ds_attached())\n\n self._on_mode_enable_components()\n\n try:\n self.autonomousInit()\n except:\n self.onException(forceReport=True)\n\n auto_functions: Tuple[Callable[[], None], ...] = (self._enabled_periodic,)\n\n if self.use_teleop_in_autonomous:\n auto_functions = (self.teleopPeriodic,) + auto_functions\n\n self._automodes.run(\n self.control_loop_wait_time,\n auto_functions,\n self.onException,\n watchdog=self.watchdog,\n )\n\n self._on_mode_disable_components()",
"def refresh_power(self):",
"def robotPeriodic(self) -> None:\n ...",
"def refresh_sensors():\n # We refresh the sensors display.\n display_sensors()",
"def autonomousPeriodic(self) -> None:\n ...",
"def _automatic_refresh(self) -> None:\n self.refresh()",
"def _enabled_periodic(self) -> None:\n watchdog = self.watchdog\n\n for name, component in self._components:\n try:\n component.execute()\n except:\n self.onException()\n watchdog.addEpoch(name)\n\n self._do_periodics()\n\n for reset_dict, component in self._reset_components:\n component.__dict__.update(reset_dict)",
"def __change_mode(self, dt):\n\n self.chasing = not self.chasing\n self.current_direction = self.current_direction.get_opposite()\n self.mode_change_start = Clock.get_time()\n\n if self.chasing:\n self.mode_change_timer = self.chase_length\n Clock.schedule_once(self.__change_mode, self.mode_change_timer)\n else:\n self.mode_change_timer = self.scatter_length\n Clock.schedule_once(self.__change_mode, self.mode_change_timer)",
"def update(self):\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n\n if self.active_flag:\n self.sense_and_act()",
"def refresh(self):\n self.action = None\n self.check_clocks()\n if self.action is not None:\n self.step(self.action)",
"def start_mode_change_timer(self):\n\n Clock.unschedule(self.__change_mode)\n self.mode_change_timer = self.scatter_length\n Clock.schedule_once(self.__change_mode, self.mode_change_timer)",
"def __resume_mode_change(self):\n\n Clock.schedule_once(self.__change_mode, self.mode_time_remaining)",
"def _triggersensorupdate(self):\r\n\r\n\r\n self._sendPacket(self._activation_packet)\r\n self._sendPacket(b'\\x52\\x02\\x13\\x05\\x9a')\r\n\r\n # Sending OFF signal\r\n for dev_id, device in self.devices.items():\r\n self._hass.add_job(\r\n self.async_see(dev_id, STATE_OFF)\r\n )",
"def on_start(self):\n Clock.schedule_interval(self.update, 0.1)",
"def choose_mode( self, ):\r\n # =========== add your modes as desired starting here ========\r\n # ---------->> call modes here; I comment out ones I am not using. Makes it really easy to switch modes\r\n # these are modes I use, pretty much one for each micro-controller\r\n # project that I do. You can look at them as examples or delete the subroutines\r\n # pick one by un-commenting it. These are typically synced up with an Arduino app\r\n\r\n\r\n pass # if everything else is commented out\r\n #self.quick_start_mode()\r\n #self.tutorial_example_mode() # simple setup for documentation and basic terminal\r\n #self.accel_demo_mode() #\r\n #self.controlino_mode() #\r\n\r\n #self.ddclock_mode()\r\n #self.ddclock_david()\r\n #self.ddclock_test_mode()\r\n #self.ddclock_demo_1()\r\n #self.ddclock_demo_2()\r\n\r\n\r\n\r\n self.deer_me_dev()\r\n #self.deer_me_pi_deploy()\r\n\r\n #self.infra_red_mode() # not working, requires special modules from irtools\r\n #self.green_house_mode()\r\n #self.motor_driver_mode()\r\n #self.root_cellar_mode()\r\n #self.stepper_tester_mode()\r\n #self.serial_cmd_test() # for messing with master SerialCmd and SerialCmdMaster\r\n #self.terminal_mode()\r\n\r\n #self.two_axis_mode()\r\n #self.well_monitor_mode()\r\n\r\n\r\n # ---- additional stuff only for testing in addition to another mode\r\n #self.mode_plus_tests() # used only for testing change freely\r",
"def update_(self):\n #start = time.time()\n for function in self.functions:\n try:\n function()\n except:\n l.error(\"Could not update framework \" + function)\n #end = time.time()\n #print end - start",
"def update(self):\n # ic()\n # self.update_scans()\n self.update_data()",
"def data_updater():\n # This is a daemon thread so no need to explicitly\n # poll for any shutdown events.\n sleep_time = 0\n while True:\n interval = wallet['update_info']['interval']\n if time.time() > sleep_time + interval or \\\n wallet['update_info']['in_need']:\n do_update()\n sleep_time = time.time()\n time.sleep(1)",
"def setMode(self, c, mode):\n if mode not in self.collectionTime.keys(): raise Exception(\"Incorrect mode\")\n self.collectionMode = mode\n countRate = self.collectionTime[mode]\n yield self.inCommunication.acquire()\n if mode == 'Normal':\n #set the mode on the device and set update time for normal mode\n yield deferToThread(self.api.setModeNormal)\n yield deferToThread(self.api.setPMTCountRate, countRate)\n elif mode == 'Differential':\n yield deferToThread(self.api.setModeDifferential)\n self.clear_next_pmt_counts = 3 #assign to clear next two counts\n self.inCommunication.release()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
MagicRobot will do The Right Thing and automatically load all autonomous mode routines defined in the autonomous folder.
|
def autonomous(self) -> None:
self.__nt_put_mode("auto")
self.__nt_put_is_ds_attached(self.__is_ds_attached())
self._on_mode_enable_components()
try:
self.autonomousInit()
except:
self.onException(forceReport=True)
auto_functions: Tuple[Callable[[], None], ...] = (self._enabled_periodic,)
if self.use_teleop_in_autonomous:
auto_functions = (self.teleopPeriodic,) + auto_functions
self._automodes.run(
self.control_loop_wait_time,
auto_functions,
self.onException,
watchdog=self.watchdog,
)
self._on_mode_disable_components()
|
[
"def autonomousInit(self) -> None:\n ...",
"def RegisterAutonomous(self):\n\t\t# These run the whole time to spin the wheel and make sure the tilt\n\t\t# statys at the right position\n\t\tscheduler.RegisterAutonomousTask(\"ShooterContinuous\", shooter.ShooterContinuous, scheduler.PARALLEL_TASK)\n\t\tscheduler.RegisterAutonomousTask(\"TiltContinuous\", tilt.TiltContinuous, scheduler.PARALLEL_TASK)\n\n\t\t# Spin up wheel, tilt and wait 3 seconds for it to reach speed\n\t\tscheduler.RegisterAutonomousTask(\"Set Shooter Speed 0.5\", shooter.SetSpeed, scheduler.PARALLEL_TASK, shooter.SHOOTER_INFIELD)\n\t\tscheduler.RegisterAutonomousTask(\"Tilt\", tilt.TiltToValue, scheduler.PARALLEL_TASK, tilt.TILT_PYRIMID_SIDE_PRACTICE)\n\n\t\t# The wait.Wait() is just an enpty function. The scheduler just waits\n\t\t# for the registered empty task to timeout\n\t\tscheduler.RegisterAutonomousTimedTask(\"Wait 3 Seconds\", wait.Wait, 3.0)\n\n\t\t# Add 3 sequential shots\n\t\tscheduler.RegisterAutonomousTask(\"Shoot And Load1\", shooter.ShootAndLoad)\n\t\tscheduler.RegisterAutonomousTask(\"Shoot And Load2\", shooter.ShootAndLoad)\n\t\tscheduler.RegisterAutonomousTask(\"Shoot And Load3\", shooter.ShootAndLoad)\n\n\t\t# Tilt back down and put wheel in powersaver mode\n\t\tscheduler.RegisterAutonomousTask(\"SHOOTER POWER SAVING MODE\", shooter.SetSpeed, scheduler.PARALLEL_TASK, shooter.SHOOTER_POWER_SAVING_MODE)\n\t\tscheduler.RegisterAutonomousTask(\"Tilt\", tilt.TiltToValue, scheduler.PARALLEL_TASK, tilt.TILT_PYRIMID_SIDE_PRACTICE)",
"def autonomousInit(self) -> None:\n pass",
"def run_autoruns():\n extract_item('Autoruns', filter='autoruns*', silent=True)\n # Update AutoRuns settings before running\n for path, settings in AUTORUNS_SETTINGS.items():\n winreg.CreateKey(HKCU, path)\n with winreg.OpenKey(HKCU, path, access=winreg.KEY_WRITE) as key:\n for name, value in settings.items():\n winreg.SetValueEx(key, name, 0, winreg.REG_DWORD, value)\n popen_program(global_vars['Tools']['AutoRuns'], minimized=True)",
"def setup(bot: commands.Bot):\n bot.add_cog(AntiRaid(bot))\n print(\"Loaded Cog: AntiRaid\")",
"def robotInit(self) -> None:\n ...",
"def setAutoMethods(self):\n for m in IMultiEngine:\n IM = IMultiEngine[m]\n #first setup non-All methods\n if callable(IM) and m[-3:] != 'All'\\\n and getattr(self, m, None) is None:\n #only work on methods, not attributes, and only on methods\n #not already defined\n eSig = IEngineComplete[m].getSignatureString()\n defs = \"\"\"\ndef autoMethod(self, %s:\n '''%s'''\n log.msg('%s on %%s' %%targets)\n engines = self.engineList(targets)\n l = []\n for e in engines:\n l.append(e.%s%s)\n return gatherBoth(l)\n\"\"\"%(IM.getSignatureString()[1:], IM.getDoc(), IM.getName(), m, eSig)\n try:\n exec(defs)\n setattr(self, m, instancemethod(autoMethod, self, self.__class__))\n #del autoMethod\n except:\n log.msg(\"failed autogen method %s\" %m)\n raise\n addAllMethods(self)",
"def autogenerate_all(self):\n\n self.initialize_dir()\n self.generate_install()\n self.generate_uninstall()\n self.generate_readme()",
"def enable_autoreload(self):\n\n try:\n from watchdog.events import FileSystemEventHandler\n from watchdog.observers import Observer\n except ImportError:\n Logger.warn(\n \"{}: Autoreloader is missing watchdog\".format(self.appname)\n )\n return\n Logger.info(\"{}: Autoreloader activated\".format(self.appname))\n rootpath = self.get_root_path()\n self.w_handler = handler = FileSystemEventHandler()\n handler.dispatch = self._reload_from_watchdog\n self._observer = observer = Observer()\n for path in self.AUTORELOADER_PATHS:\n options = {\"recursive\": True}\n if isinstance(path, (tuple, list)):\n path, options = path\n observer.schedule(handler, join(rootpath, path), **options)\n observer.start()",
"def arm_drone(self):\n\n self.take_control()\n time.sleep(1) # To make sure we are in guided mode before arming\n self.arm()\n\n # Set the current global position as the home position\n self.set_home_as_current_position()\n\n if self.armed:\n self.state = States.ARMED\n else:\n print(\"Drone can not arm right now. \"\n \"Releasing control...\")\n #self.release_control()",
"def initialize_personality():\n download_personality_tarball()\n get_system_image_from_tarball()\n override_options_for_personality()",
"def run(self):\n rospy.init_node(\"dynamics\")\n self._setup()\n rospy.spin()",
"def __automodeThread(self):\n print(color.OKBLUE+\"starting autoMode\"+color.ENDC)\n if not self.autoMode.isSet():\n self.autoMode.set()\n while self.autoMode.isSet() and self.keepAlive.isSet():\n lat, lon = self.lastLatLon\n distance = self.gps.calculateDist(lat,lon)\n if distance != None:\n if distance>=self.distPhoto:\n lastLatLon=self.gps.getDegCoord()\n if not None in lastLatLon:\n self.takePic()\n self.lastLatLon = lastLatLon\n\n print(color.OKBLUE+\"stoping autoMode\"+color.ENDC)\n else:\n print(color.WARNING+\"Error : autoMode already set\"+color.ENDC)",
"def mainSetup():\n setupGlobals()\n setupCallbacks()",
"def start(self):\n \n if not exists(\"/sys/class/pwm/pwm{0}\".format(self._pwmId)):\n system(\"config-pin {0} pwm\".format(self._pinId))\n #SysfsWriter.writeOnce(\"pwm\", \"/sys/devices/ocp.*/{0}_pinmux.*/state\".format(self._pinId))\n SysfsWriter.writeOnce(str(self._pwmId), \"/sys/class/pwm/export\")\n \n SysfsWriter.writeOnce(\"0\", \"/sys/class/pwm/pwm{0}/duty_ns\".format(self._pwmId))\n SysfsWriter.writeOnce(\"0\", \"/sys/class/pwm/pwm{0}/run\".format(self._pwmId))\n SysfsWriter.writeOnce(str(Motor.PERIOD), \"/sys/class/pwm/pwm{0}/period_ns\".format(self._pwmId))\n SysfsWriter.writeOnce(\"1\", \"/sys/class/pwm/pwm{0}/run\".format(self._pwmId))\n \n SysfsWriter.writeOnce(\"out\", \"/sys/class/gpio/gpio{0}/direction\".format(self._gpioId))\n SysfsWriter.writeOnce(\"0\", \"/sys/class/gpio/gpio{0}/value\".format(self._gpioId))\n\n self._sysfsWriter = SysfsWriter(\"/sys/class/pwm/pwm{0}/duty_ns\".format(self._pwmId))\n\n self.setNeutralThrottle()\n \n logging.info(\"motor {0}: started\".format(self._motorId))",
"def initial_setup():\n LOGGER.debug('initializing first mission')\n mission = get_running_mission()\n if isinstance(mission, MissionPath):\n LOGGER.info('building METAR for initial mission: %s', mission.orig_name)\n weather = elib_wx.Weather(str(mission.path))\n core.Status.metar = weather\n esst.atis.create.generate_atis(weather)\n else:\n LOGGER.error('no initial mission found')",
"def setup(bot: commands.Bot) -> None:\n bot.add_cog(Speedrun(bot))\n log.info(\"Speedrun cog loaded\")",
"def startCompetition(self) -> None:\n self.robotInit()\n hal.observeUserProgramStarting()\n\n # Loop forever, calling the appropriate mode-dependent function\n self._loop.run_until_complete(self._run_robot())",
"def startup_once():\n # detect_screens(qtile)\n execute(\"urxvtd -q -o -f\")\n execute(\"compton\")\n execute(\"nm-applet\")\n execute(\"xautolock -time 5 -locker slock \"\n \"-notify 30 -notifier notify-send\")\n execute(\"xsetroot -cursor_name left_ptr\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run components and all periodic methods.
|
def _enabled_periodic(self) -> None:
watchdog = self.watchdog
for name, component in self._components:
try:
component.execute()
except:
self.onException()
watchdog.addEpoch(name)
self._do_periodics()
for reset_dict, component in self._reset_components:
component.__dict__.update(reset_dict)
|
[
"def run(self):\n for instance in self.component_instances:\n instance_thread = instance.spawn()\n instance_thread.start()",
"def do_main(self):\n self.pool.spawn_n(self._periodic_runner)\n super(Manager, self).do_main()",
"def robotPeriodic(self) -> None:\n watchdog = self.watchdog\n self.__sd_update()\n watchdog.addEpoch(\"SmartDashboard\")\n self.__lv_update()\n watchdog.addEpoch(\"LiveWindow\")\n # self.__sf_update()\n # watchdog.addEpoch(\"Shuffleboard\")",
"def robotPeriodic(self) -> None:\n ...",
"def run(self):\n\n\t\ttime.sleep(1)\n\t\tself.state = 'running' \t\t# update function state of biochemistry object\n\n\t\t#----------------------- Flowcell preparation ----------------------------------\n\n\t\tif self.cycle[0:2] == 'WL' and self.flowcell == 0:\t# if white light image cycle on flowcell 0\n\t\t\tself.init()\t\t\t\t\t# do only once at beginning\n\t\t\t#self.exo_start()\n\t\t\tself.logging.info(\"%s\\t%i\\t--> Device initialization and Exonuclease I digestion is done: [%s]\\n\" % (self.cycle_name, self.flowcell, self.state))\n\n\t\telif self.cycle[0:2] == 'WL' and self.flowcell == 1:\t# if white light image cycle on flowcell 1\n\t\t\t#self.exo_start()\n\t\t\tself.logging.info(\"%s\\t%i\\t--> Exonuclease I digestion is done: [%s]\\n\" % (self.cycle_name, self.flowcell, self.state))\n\n\t\telse:\n\t\t\tself.cycle_ligation() # perform query cycle on selected flowcell",
"def run_cycle(self):\n self.fodder_growth()\n self.sort_by_fitness()\n self.herb_feeding()\n self.carn_feeding()\n self.procreation_all()\n self.migration()\n self.aging()\n self.weight_loss()\n self.animal_death()",
"def run(self):\n self.run_mc()",
"def autonomousPeriodic(self) -> None:\n ...",
"def simulate(self):\r\n # First run\r\n for task in self.task_queue.tasks:\r\n instructions = self.compiler.compile(task)\r\n self.insts[task] = instructions\r\n \r\n # start up all the modules\r\n self.startup()\r\n\r\n while(True):\r\n curTick = self.evetq.nextTick()\r\n if(curTick < 0):\r\n return\r\n # print(\"Tick: \",curTick)\r\n self.evetq.setCurTick(curTick)\r\n cur_events = self.evetq.getEvents(curTick)\r\n # print(len(cur_events))\r\n for event in cur_events:\r\n event.process()\r\n #all the events within current cycle are processes\r\n #so we remove these events from the event queue\r\n self.evetq.removeEvents(curTick)",
"def main():\n obj = UnitySnapshotSchedule()\n obj.perform_module_operation()",
"def run(self):\n if self._proxy_lattice is None:\n message = 'A lattice is not added before run in JYULBEngine'\n raise RuntimeError(message)\n\n if not self._is_fdata_initialized:\n self._solver.init_field_data()\n self._is_fdata_initialized = True\n\n self._solver.evolve(self.CM[CUBA.NUMBER_OF_TIME_STEPS])",
"def run(self, steps_per_update=1):\n def loop(sim):\n sim.run(steps_per_update)\n self.loop(loop)",
"def _run_mpc(self):\n\n if self.verbose and self.k % self.print_interval == 0:\n elapsed_time = time.time() - self.last_print_time\n average_time = elapsed_time / self.print_interval\n self.last_print_time = time.time()\n print('\\nk = {}, avg time = {:.3f}, dt = {:.2f}, diff = {:.3f}'.format(\n self.k, average_time, self.dt, average_time - self.dt))\n\n for vehicle_id in self.vehicle_ids:\n # Get control input for the steering from the Frenet path tracking controller.\n angle_pwm = self._get_angle_pwm(vehicle_id)\n\n # Get control input for the velocity from the MPC controller.\n speed_pwm = self._get_speed_pwm(vehicle_id)\n\n # Publish speed and steering inputs.\n self.pwm_publisher.publish(vehicle_id, speed_pwm, angle_pwm, self.gear2_pwm)\n\n self.k += 1",
"def testPeriodic(self) -> None:\n ...",
"def run(self):\n r = rospy.Rate(10) \n while not rospy.is_shutdown():\n if self._action_loop != None:\n self._action_loop()\n r.sleep()",
"def main(self, useDelay = False):\n for currentTau in range(0,self.finalTau):\n #print(currentTau)\n self.runOneTimeStep(currentTau)\n if useDelay:\n time.sleep(self.tau)",
"def testPeriodic(self) -> None:\n pass",
"def run(self):\n\n \n try:\n # Spawn the tasks to run concurrently\n self.loop.create_task(self.listen()) # Listen to subscribed topics\n self.loop.create_task(self.run_tk()) # Run GUI\n self.loop.run_forever()\n except:\n pass",
"def run(self):\n while(self.running):\n self.os.run()\n sleep(1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns histogram of each color channel in RGB img
|
def color_histo(img):
# check if rgb(a):
if img.shape[2] in (3,4):
channels = (img[::1],img[::2],img[::3])
elif img.shape[2] == 1:
channels = img[::1]
# return channels:
else:
print "weird number of color channels going on: ", img.shape
return (histogram(chan) for chan in channels)
|
[
"def analyze_color_histogram(img):\n color = ('b', 'g', 'r')\n hist = [] \n for i, col in enumerate(color):\n hist.append(cv2.calcHist([img],[i],None,[256],[0,256])) \n \n blue = hist[0]\n green = hist[1]\n red = hist[2]\n \n return blue, green, red",
"def get_histogram(img):\n h, w = img.shape\n hist = [0.0] * 256\n for i in range(h):\n for j in range(w):\n hist[img[i, j]] += 1\n return np.array(hist) / (h * w)",
"def color_hist(img, nbins=32, bins_range=(0, 256)):\n \"\"\"Compute the histogram of the color channels separately.\"\"\"\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features",
"def color_hist(img, nbins=32):\n channel1_hist = np.histogram(img[:,:,0], bins=nbins)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features",
"def color_histogram(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features",
"def compute_histogram(self, image):\n #create empty array\n hist = [0]*256\n #get image dimensions \n row,col=image.shape[0:2]\n #count pixels for each intensity level\n for i in range(row):\n for j in range(col):\n hist[image[i,j]] = hist[image[i,j]] + 1\n return hist",
"def plot_color_histogram(img):\n color = ('b', 'g', 'r')\n for i, col in enumerate(color):\n hist = cv2.calcHist([img],[i],None,[256],[0,256]) \n plt.plot(hist, color = col)\n plt.xlim([0,256])\n \n plt.title('Color Histogram'), plt.xlabel(\"Scale\"), plt.ylabel(\"Quantity\")\n plt.grid(True)\n\n plt.show()",
"def calc_img_hist_stats(im):\n\n r_hist = calc_hist_stats(im[:,:,0].flatten())\n g_hist = calc_hist_stats(im[:,:,1].flatten())\n b_hist = calc_hist_stats(im[:,:,2].flatten())\n\n return r_hist + g_hist + b_hist",
"def lab_histogram(image):\n\n # Convert image from RGB color space to Lab\n image_lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n # Select the number of bins\n bins = 256\n descriptors = []\n # Compute histogram for every channel\n for i in range(3):\n hist = cv2.calcHist([image_lab], [i], None, [bins], [0, 256]).ravel()\n cv2.normalize(hist, hist)\n descriptors.append(np.array(hist, dtype=np.float32))\n\n # Retrieve the concatenation of channel histograms\n return descriptors",
"def ycrcb_histogram(image):\n\n bins = 256\n descriptors = []\n imageYCrCb = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n for i in range(3):\n hist = cv2.calcHist([imageYCrCb], [i], None, [bins], [0, 256]).ravel()\n cv2.normalize(hist, hist)\n descriptors.append(np.array(hist, dtype=np.float32))\n\n return descriptors",
"def build_histograms(preprocessed_images, num_channels):\n histogram_processed_train = np.zeros((len(preprocessed_images), num_channels ** 3))\n for i, img in enumerate(preprocessed_images):\n # chans = cv2.split(image)\n colors = (\"b\", \"g\", \"r\")\n hist = cv2.calcHist([img], [0, 1, 2],\n None, [num_channels, num_channels, num_channels], [0, 256, 0, 256, 0, 256])\n histogram_processed_train[i] = hist.flatten()\n return histogram_processed_train",
"def create_histogram_color(self, parent):\n if parent.loaded_image_type == \"gs\" or \\\n parent.loaded_image_type == 'b' or \\\n parent.loaded_image_type == 'gs3ch':\n return self.create_histogram_greyscale(parent)\n img = parent.histogram_image_data\n y_axis = [0 for i in range(256)]\n x_axis = [i for i in range(256)]\n red_channel = [i[0] for i in img[1]]\n green_channel = [i[1] for i in img[1]]\n blue_channel = [i[2] for i in img[1]]\n\n def compute_values_count(channel_name):\n for value in channel_name:\n luminence_value = int(value)\n y_axis[luminence_value] += 1\n\n compute_values_count(red_channel)\n\n plt.figure()\n plt.bar(x_axis, y_axis)\n plt.title(f'Histogram - kanał czerwony - {img[0]}') # Red channel\n\n y_axis = [0 for i in range(256)]\n compute_values_count(green_channel)\n plt.figure()\n plt.bar(x_axis, y_axis)\n plt.title(f'Histogram - kanał zielony - {img[0]}') # Green channel\n\n y_axis = [0 for i in range(256)]\n compute_values_count(blue_channel)\n plt.figure()\n plt.bar(x_axis, y_axis)\n plt.title(f'Histogram - kanał niebieski - {img[0]}') # Blue channel\n\n plt.show()",
"def histograma(sourceImage):\r\n\r\n #inicializacion del histograma y de los 256 de intensidad\r\n hist = np.zeros(256)\r\n x = np.array(range(256))\r\n\r\n #iteraremos sobre cada pixel de la imagen\r\n width, height = sourceImage.shape\r\n totalSize = width*height;\r\n\r\n for i in range(width):\r\n for j in range(height):\r\n brillo = f[i, j]\r\n #aumentamos la columna del histograma correspondiente a ese brillo en particular\r\n hist[brillo] += 1 \r\n hist = hist/totalSize \r\n return x, hist",
"def equalize_hist(input_img):\n colors = input_img.getcolors()\n pixel_count = input_img.size[0] * input_img.size[1]\n lookup = equalize(colors, pixel_count)\n return Image.eval(input_img, lambda x: lookup[x])",
"def plot_histogram(img):\n hist = cv2.calcHist([img],[0],None,[256],[0,256])\n\n plt.hist(hist,facecolor='green')\n plt.title('Histogram'), plt.xlabel(\"Scale\"), plt.ylabel(\"Quantity\")\n plt.grid(True)\n\n plt.show()",
"def test_rgb_hist(construct_dummy_images):\n\n red_image, green_image, blue_image, mix_image = construct_dummy_images\n\n num_bins = 5\n red_hist = hist_module.rgb_hist(red_image.astype('double'), num_bins)\n green_hist = hist_module.rgb_hist(green_image.astype('double'), num_bins)\n blue_hist = hist_module.rgb_hist(blue_image.astype('double'), num_bins)\n mix_hist = hist_module.rgb_hist(mix_image.astype('double'), num_bins)\n\n assert len(red_hist[red_hist == 1]) == 1\n assert len(green_hist[green_hist == 1]) == 1\n assert len(blue_hist[blue_hist == 1]) == 1\n assert len(mix_hist[mix_hist != 0]) == 3",
"def histogram_matrix(image_path, resize_dim=32, num_bins=16):\n img = read_image(image_path)\n resized_image = cv2.resize(img, (resize_dim, resize_dim))\n hist_array = []\n for band in range(3):\n hist = cv2.calcHist([resized_image], [band], None, [num_bins], [0, 256])\n hist_array.append(hist.T[0].astype(int))\n return hist_array",
"def create_histogram_greyscale(self, parent, img=None):\n if parent.loaded_image_type == 'gs3ch':\n # gets values of only first channel of greyscale 3 channel type image\n img = [parent.loaded_image_data[1][i][0] for i in range(len(parent.loaded_image_data[1]))]\n else:\n img = parent.loaded_image_data[1] # list containing image luminence avlues\n\n # List with occurrences of each luminance value\n values_count = [0 for i in range(256)]\n for value in img:\n values_count[value] += 1\n\n x_axis = list([i for i in range(256)])\n y_axis = values_count\n plt.title(f\"Histogram - {parent.loaded_image_data[0]}\")\n plt.bar(x_axis, y_axis)\n plt.show()",
"def imgHisto(imagePath):\n img=cv2.imread (imagePath)\n #RGB -> HSV.\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n #Déclaration des couleurs des courbes\n color = ('r','g','b')\n #Déclaration des noms des courbes.\n labels = ('h','s','v')\n #Pour col allant r à b et pour i allant de 0 au nombre de couleurs\n for i,col in enumerate(color):\n #Hist prend la valeur de l'histogramme de hsv sur la canal i.\n hist = cv2.calcHist([hsv],[i],None,[256],[0,256])\n # Plot de hist.\n plt.plot(hist,color = col,label=labels[i])\n plt.xlim([0,256])\n #Affichage.\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
blanks out any color in image within tolerancepercentage of color given
|
def remove_color(colored_pic, rgb_color, tolerance=0.7):
# surprisingly, a high tolerance works best for the training pic...
img = colored_pic.copy()
# create color tolerance limits based on rgb color
rlims,glims,blims = ((rgb_color[i]*(1.0-tolerance),rgb_color[i]*(1+tolerance)) for i in range(3))
# set to black where within tolerance limits
# rgb stored as [[(255,255,255), (0,0,0)], [(100,100,100), (5,5,5)], etc...]
img[((img[:, :, 0]>rlims[0]) & (img[:, :, 0]<rlims[1])) &
((img[:, :, 1]>glims[0]) & (img[:, :, 1]<glims[1])) &
((img[:, :, 2]>blims[0]) & (img[:, :, 2]<blims[1]))] = 255
return img
|
[
"def percentFilled(w, h, cnt):\n return cv2.contourArea(cnt) >= 0.7 * w * h",
"def percentage_open(image):\n red_count = 0\n for i in range(128):\n for j in range(128):\n r, g, b = image.getpixel((i,j))\n if r > b:\n red_count += 1\n return float(red_count)/(128.0 * 128.0)",
"def kray_fill(img_thresh, full_filling=False, full_left_right=True):\n img_for_count = img_thresh.copy()\n h, img_for_count = cv2.threshold(img_for_count, 1, 1, cv2.THRESH_BINARY_INV)\n\n ln = img_thresh.shape[1]\n counter_up = 0\n counter_bt = img_thresh.shape[0] - 1\n counter_lf = 0\n counter_rg = img_thresh.shape[1] - 1\n\n # count how many percent of column is white\n sm = img_for_count.sum(axis=1)\n for i in sm:\n counter_up += 1\n if i > int(ln / 100 * 40):\n break\n\n sm = sm[::-1]\n for i in sm:\n counter_bt -= 1\n if i > int(ln / 100 * 70):\n break\n\n ln = img_thresh.shape[0]\n sm = img_for_count.sum(axis=0)\n\n for i in sm:\n counter_lf += 1\n if i > int(ln / 100 * 60):\n break\n\n sm = sm[::-1]\n for i in sm:\n counter_rg -= 1\n if i > int(ln / 100 * 60):\n break\n\n if full_filling:\n if counter_rg < img_thresh.shape[1] / 100 * 80:\n counter_rg = 0\n elif counter_lf > img_thresh.shape[1] / 100 * 20:\n counter_lf = img_thresh.shape[1] - 1\n elif counter_up > img_thresh.shape[0] / 100 * 20:\n counter_up = img_thresh.shape[0] - 1\n elif counter_bt < img_thresh.shape[0] / 100 * 80:\n counter_bt = 0\n\n img_thresh[:counter_up] = 0\n img_thresh[counter_bt:] = 0\n if full_left_right:\n img_thresh[:, :counter_lf] = 0\n img_thresh[:, counter_rg:] = 0\n\n return img_thresh",
"def block(img):\n # FIXME: grid searchowac ten fragment?\n img = exposure.equalize_adapthist(img)\n img = exposure.adjust_gamma(img)\n img = unsharp_mask(img, radius=3, amount=2)\n img = ndimage.uniform_filter(img, size=2)\n return (img * 255).astype(np.uint8)",
"def isWhite(img, circle):\n circle = [int(X) for X in circle]\n xc, yc, r = circle\n cropImg = img[yc-r:yc+r, xc-r:xc+r]\n average_color = cv2.mean(cropImg)\n if manyConds(average_color, [BAD1, BAD2, BAD3, BAD4], [d1, d2, d3,\n d4]) or \\\n isClose(average_color, d):\n return True\n else:\n # print average_color\n return False",
"def remove_upper(img, percent):\n crop_img = img[img.shape[0]*percent/100:, 0:]\n return crop_img",
"def color_gradient_rv(self, percentage):\n redness = 255\n blueness = 0\n numberper = percentage\n redness -= 2.55*numberper\n blueness += 2.55*numberper\n if redness < 0:\n redness = 0\n if blueness > 255:\n blueness = 255\n for i in range(self.numPixels()):\n self.setPixelColor(i, Color(int(redness), 0, int(blueness)))\n self.show()",
"def percentage_colored(img, contour):\n mask = np.zeros(img.shape, dtype=\"uint8\")\n cv.drawContours(mask, [contour], -1, 1, -1) # Draw filled contour on mask\n area = (mask > 0).sum()\n extracted = cv.bitwise_and(img, mask)\n # TODO: We could multiply by a gaussian kernel here to give greater weight\n # to the central pixels\n return 1 - extracted.sum() / area",
"def color_thresh(input_img, rgb_thresh=(160, 160, 160),\n low_bound=(75, 130, 130), upp_bound=(255, 255, 255)):\n # Create arrays of zeros same xy size as input_img, but single channel\n nav_img = np.zeros_like(input_img[:, :, 0])\n obs_img = np.zeros_like(input_img[:, :, 0])\n\n # Convert BGR input_img to HSV for rock samples\n hsv_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2HSV)\n\n # Require that each of the R(0), G(1), B(2) pixels be above all three\n # rgb_thresh values such that pixpts_above_thresh will now contain a\n # boolean array with \"True\" where threshold was met\n pixpts_above_thresh = ((input_img[:, :, 0] > rgb_thresh[0])\n & (input_img[:, :, 1] > rgb_thresh[1])\n & (input_img[:, :, 2] > rgb_thresh[2]))\n\n pixpts_nonzero = ((input_img[:, :, 0] > 0)\n & (input_img[:, :, 1] > 0)\n & (input_img[:, :, 2] > 0))\n\n # obstacle pixels are those non-zero pixels where rgb_thresh was not met\n obs_pixpts = np.logical_and(\n pixpts_nonzero, np.logical_not(pixpts_above_thresh)\n )\n # Index the array of zeros with the boolean array and set to 1\n # those pixels where ROI threshold was met\n nav_img[pixpts_above_thresh] = 1\n obs_img[obs_pixpts] = 1\n\n # Threshold the HSV image to get only colors for gold rock samples\n rock_img = cv2.inRange(hsv_img, low_bound, upp_bound)\n\n # Return the threshed binary images\n ThreshedImages = namedtuple('ThreshedImages', 'nav obs rock')\n thresh_imgs = ThreshedImages(nav_img, obs_img, rock_img)\n\n return thresh_imgs",
"def colorThreshold(img, rbg_threshold = (60,60,60)):\n temp = np.zeros(img.shape)\n rflags_h = img[:,:]>rbg_threshold[0]\n\n temp[:,:][rflags_h] = 1\n \n return temp",
"def noFill() -> None:\n global __fill_color\n __fill_color = (0, 0, 0, 0)",
"def setNoBlue(img):\n def fn(pixel):\n pixel[BLUE] = 0\n return pixel\n return mapPixels(img, fn)",
"def threshold( self, snapshot ):\n import pygame\n snapshotMinusBackground = snapshot.copy()\n threshold_value = 40 # How close to the existing colour must each point be?\n pygame.transform.threshold( snapshotMinusBackground,\n snapshot,\n ( 0,0,0 ),\n [threshold_value]*3 ,\n ( 255,255,255 ),\n 1,\n self.background )\n # Median filter would be good here to remove salt + pepper noise...\n return snapshotMinusBackground",
"def darken(image, amount):\n rgb_adjustment = (-amount, -amount, -amount)\n colorFilter(image, rgb_adjustment)",
"def get_court_mask(img, thresh=50):\n hue = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))[0]\n return cv2.threshold(hue, thresh, 255, cv2.THRESH_BINARY_INV)[1]",
"def FindColors(img, min_U, max_U, min_V, max_V, kernel_size):\n img_Area = img.shape[0]*img.shape[1]\n \n (NI_thresh_U, NI_blackAndWhiteImage_U) = cv2.threshold(img[:,:,1], min_U, 255, cv2.THRESH_BINARY)\n (I_thresh_U, I_blackAndWhiteImage_U) = cv2.threshold(img[:,:,1], max_U, 255, cv2.THRESH_BINARY_INV)\n blackAndWhiteImage_U = cv2.bitwise_and(I_blackAndWhiteImage_U, NI_blackAndWhiteImage_U)\n \n (NI_thresh_V, NI_blackAndWhiteImage_V) = cv2.threshold(img[:,:,2], min_V, 255, cv2.THRESH_BINARY)\n (I_thresh_V, I_blackAndWhiteImage_V) = cv2.threshold(img[:,:,2], max_V, 255, cv2.THRESH_BINARY_INV)\n blackAndWhiteImage_V = cv2.bitwise_and(I_blackAndWhiteImage_V, NI_blackAndWhiteImage_V)\n \n # cv2.imshow('U', blackAndWhiteImage_U)\n # cv2.imshow('V', blackAndWhiteImage_V)\n \n blackAndWhiteImage_UV = cv2.bitwise_and(blackAndWhiteImage_V, blackAndWhiteImage_U)\n # cv2.imshow('UV', blackAndWhiteImage_UV)\n \n blackAndWhiteImage_Areas = blackAndWhiteImage_UV.sum()/255\n if (blackAndWhiteImage_Areas == 0):\n ratio = 22\n # print(\"Inf\")\n else:\n ratio = int(round(img_Area/blackAndWhiteImage_Areas))\n # print(ratio)\n if(ratio <= 21):\n #Morphological operations\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(kernel_size,kernel_size))\n closing = cv2.morphologyEx(blackAndWhiteImage_UV, cv2.MORPH_CLOSE, kernel, iterations = 1)\n # cv2.imshow('Closing', closing)\n return(closing)\n else:\n return(np.full((img.shape[0],img.shape[1]),255).astype(np.uint8))",
"def set_temp_color(pct: int):\n global AURORA\n AURORA = AURORA or nanoleaf.Aurora(AURORA_IP, os.environ[\"AURORA_AUTH_TOKEN\"])\n \n frac = pct/100\n # Scales between MIN_RGB and MAX_RGB based on the provided percentage.\n AURORA.rgb = tuple(frac*max_rgb + (1-frac)*min_rgb \\\n for min_rgb, max_rgb in zip(TEMP_MIN_RGB, TEMP_MAX_RGB)\n )",
"def colorWipe(color, wait_ms=50):\n for i in range(cloud.strip.numPixels()):\n \n cloud.strip.setPixelColor(i, color)\n cloud.strip.show()\n time.sleep(wait_ms/1000.0)",
"def color_threshold(img, s_thresh=(90, 255)):\n # Some other factors to consider 170 255\n # Convert to HLS color space and separate the V channel\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:, :, 2]\n # l_channel = hls[:, :, 1] #TODO (ivan) consider this in future improvements\n # Threshold color channel\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1\n return s_binary"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
use std blobdetection function to plot circles surrounding segmented blobs
|
def blobber(img):
blobs = blob_dog(img, min_sigma=20, threshold=.1)
blobs[:, 2] = blobs[:, 2] * sqrt(2)
fig, ax = plt.subplots()
ax.imshow(img, cmap="gray")
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color="0.75", linewidth=2, fill=False)
ax.add_patch(c)
|
[
"def blob_detection(post_masked_processed, draw):\n img = cv2.normalize(post_masked_processed, None, 0, 255, cv2.NORM_MINMAX)\n img = img.astype('uint8')\n img = cv2.medianBlur(img, 7)\n\n th2 = filters.threshold_sauvola(img)\n th2 = 255-th2\n th2 = th2.astype(\"uint8\")\n # Set our filtering parameters\n # Initialize parameter settiing using cv2.SimpleBlobDetector\n params = cv2.SimpleBlobDetector_Params()\n\n # Set Area filtering parameters\n params.filterByArea = True\n params.minArea = 10\n\n # Set Circularity filtering parameters\n params.filterByCircularity = True\n params.minCircularity = 0.1\n\n # Set Convexity filtering parameters\n params.filterByConvexity = True\n params.minConvexity = 0.1\n\n # Set inertia filtering parameters\n params.filterByInertia = True\n params.minInertiaRatio = 0.01\n\n # Create a detector with the parameters\n detector = cv2.SimpleBlobDetector_create(params)\n kernel = np.ones((5,5), np.uint8)\n erosion = cv2.erode(th2, kernel, iterations=2)\n dilation = cv2.dilate(erosion, kernel, iterations=6)\n\n # Detect blobs\n keypoints = detector.detect(dilation)\n\n # Save blob centers\n if draw==True:\n # Draw blobs on our image as red circles\n blank = np.zeros((1, 1))\n blobs = cv2.drawKeypoints(th2, keypoints, blank, (0, 0, 255),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n number_of_blobs = len(keypoints)\n text = \"Number of Circular Blobs: \" + str(len(keypoints))\n cv2.putText(blobs, text, (20, 550), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)\n return blobs\n else :\n result = []\n for point in keypoints:\n x = point.pt[0]\n y = point.pt[1]\n result.append([x,y])\n return result",
"def blob_detection2(post_masked_processed, draw):\n\n # Initial set up; no erosion or dilationn\n\n img = cv2.normalize(post_masked_processed, None, 0, 255, cv2.NORM_MINMAX)\n img = img.astype('uint8')\n img = cv2.medianBlur(img, 11)\n\n th2 = filters.threshold_sauvola(img)\n th2 = 255-th2\n th2 = th2.astype(\"uint8\")\n # Set our filtering parameters\n # Initialize parameter settiing using cv2.SimpleBlobDetector\n params = cv2.SimpleBlobDetector_Params()\n\n # Set Area filtering parameters\n params.filterByArea = True\n params.minArea = 10\n\n # Set Circularity filtering parameters\n params.filterByCircularity = False\n params.minCircularity = 0.1\n\n # Set Convexity filtering parameters\n params.filterByConvexity = True\n params.minConvexity = 0.1\n\n # Set inertia filtering parameters\n params.filterByInertia = True\n params.minInertiaRatio = 0.01\n\n # Create a detector with the parameters\n detector = cv2.SimpleBlobDetector_create(params)\n\n # Detect blobs\n keypoints = detector.detect(th2)\n\n if draw==True:\n # Draw blobs on our image as red circles\n blank = np.zeros((1, 1))\n blobs = cv2.drawKeypoints(th2, keypoints, blank, (0, 0, 255),cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n number_of_blobs = len(keypoints)\n text = \"Number of Circular Blobs: \" + str(len(keypoints))\n cv2.putText(blobs, text, (20, 550), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)\n return blobs\n else :\n result = []\n for point in keypoints:\n x = point.pt[0]\n y = point.pt[1]\n result.append([x,y])\n return result",
"def circles_with_stuff(sz=256, rad1=5, rad2=8, num_blobs=100):\n\n blobs = np.zeros((sz,sz))\n blob_count = 0\n\n for ii in range(num_blobs):\n\n r1 = np.around(np.random.rand()*sz)\n c1 = np.around(np.random.rand()*sz)\n\n r2 = np.around(np.random.rand()*sz)\n c2 = np.around(np.random.rand()*sz)\n\n\n new_blob1 = blobber2(r1,c1,rad1,sz)\n\n new_blob2 = blobber2(r2,c2,rad2,sz)*2\n\n #check to see if there is any overlap\n\n occupied1 = new_blob1 > 0\n\n occupied2 = new_blob2 > 0\n\n old_occupied = blobs > 0\n\n overlap1 = np.logical_and(occupied1,occupied2).any()\n overlap2 = np.logical_and(occupied1,old_occupied).any()\n overlap3 = np.logical_and(occupied2,old_occupied).any()\n\n if ( (overlap1 == False) and (overlap2==False) and (overlap3==False)):\n\n blobs+=new_blob1\n blobs+=new_blob2\n\n blob_count+=1\n\n print('Number of blobs:',blob_count)\n\n return blobs",
"def circle_in_circle(rad_list, sz=256, num_blobs=1000):\n\n blobs = np.zeros((sz,sz))\n blob_count = 0\n\n for ii in range(num_blobs):\n\n r = int(np.around(np.random.rand()*sz))\n c = int(np.around(np.random.rand()*sz))\n\n new_blob = blobber(r,c,rad_list,sz)\n\n #check to see if there is any overlap\n\n occupied1 = new_blob>0\n\n occupied2 = blobs>0\n\n overlap = np.logical_and(occupied1,occupied2)\n\n if np.any(overlap)==False:\n\n blobs+=new_blob\n\n blob_count+=1\n\n print('Blobs Generated:',blob_count)\n\n return blobs",
"def draw_spot_light_gray(img, blobs, size=0.2):\n\n if len(img.shape) == 3:\n\n img = rgb2grey(img)\n\n alpha_img = zeros_like(img)\n temp_img = zeros_like(img)\n\n for blob in blobs:\n\n y, x, r = blob\n rr, cc = circle(y, x, size)\n\n temp_img[rr, cc] += 0.5 * r\n alpha_img[rr, cc] = 1.\n\n kernel = getGaussianKernel(2, 1) * getGaussianKernel(2, 1).T\n\n _, ax = plt.subplots(1,3)\n\n ax[0].imshow(temp_img.copy())\n\n temp_img = filter2D(temp_img, -1, kernel)\n alpha_img = filter2D(alpha_img, -1, kernel)\n ax[1].imshow(temp_img)\n\n ax[2].imshow(alpha_img)\n\n plt.show()\n\n new_img = img.copy()\n\n new_img[temp_img > 0] = 0.2 * (temp_img[temp_img > 0] - temp_img[temp_img > 0].min())/(temp_img[temp_img > 0].max() - temp_img[temp_img > 0].min()) + 0.8\n new_img[temp_img > 0] = (1 - alpha_img[temp_img > 0]) * img[temp_img > 0] + alpha_img[temp_img > 0] * new_img[temp_img > 0]\n\n return new_img",
"def analyze(self, blobs):\n res = self.camera.resolution\n resizefactor=1.0\n cx=int(res[0]/2)\n cy=int(res[1]/2)\n\n red = (0, 0, 255)\n bcount = 0\n print( \"blobs=%s\" % blobs )\n self.blobs = self.filter( blobs )\n now = datetime.datetime.now()\n if self.debug:\n cv2.imshow( \"Analyze\", self.lastimage )\n cv2.waitKey(100) \n\n print( \"fblobs=%s\" % self.blobs ) \n for b in self.blobs:\n print( \" blob=pt=%s, size=%s \" % ( b.pt, b.size) )\n #bx=int(cx - int(b.pt[0] * resizefactor))\n #by=int(cy - int(b.pt[1] * resizefactor))\n bx=int(b.pt[0])\n by=int(b.pt[1]) \n print( \" - (x=%s , y=%s )\" % (bx,by) ) \n cv2.circle( self.lastimage, (bx,by), int(b.size), red )\n cv2.putText(self.lastimage, \"#{}\".format(bcount), (bx - 10, by - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.55, red, 1)\n \n bcount+=1\n\n cv2.putText( self.lastimage, \"%s\" % now, (20, res[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.35, red, 1 )\n\n imgcenter = (cx, cy)\n cv2.line( self.lastimage, (cx-5,cy),(cx+5, cy), red )\n cv2.line( self.lastimage, (cx,cy+5),(cx, cy-5), red )\n\n top_y=int(self.target_zone[0]*res[1])\n bot_y=int(self.target_zone[1]*res[1])\n\n cv2.line( self.lastimage, (0,top_y),(res[0],top_y), red )\n cv2.line( self.lastimage, (0,bot_y),(res[0],bot_y), red )\n \n self.active = bcount>0\n\n if self.active and self.debug:\n cv2.imshow( \"Analyze\", self.lastimage )\n cv2.waitKey(100)\n self.suggest_no += 1\n\n now = datetime.datetime.now()\n if self.active and \\\n ( not(self.lastsave) or (now - self.lastsave).seconds> 5.0 ) :\n self.lastsave = now\n f = \"images/%s.jpg\" % self.suggest_no\n cv2.imwrite( f, self.lastimage )\n print( \"Wrote %s\" % f )",
"def detect_spot_lights(img, verbose=0):\n\n if len(img.shape) == 3:\n\n if verbose > 1:\n print('\\t\\tconverting to gray scale')\n\n img_gray = rgb2grey(img)\n else:\n img_gray = img.copy()\n\n factor = 1.\n img_temp = img.copy()\n\n if max(img.shape) > 1000:\n\n factor = 1000./max(img.shape)\n img_gray = rescale(img_gray, factor)\n img_temp = rescale(img_temp, factor)\n\n if verbose > 1:\n print('\\t\\tresizing image | previous size: {}, new size: {}'.format(img.shape, img_gray.shape))\n\n img_gray[img_gray > 255] = 255.\n img_gray[img_gray < 0] = 0.\n\n if verbose > 1:\n print('\\t\\tlooking for blobs')\n print('\\t\\t', im_stats(img_gray))\n\n blobs = blob_doh(img_gray.astype('uint8'), max_sigma=50, num_sigma=100, threshold=0.01)\n\n if verbose > 1:\n print('\\t\\t{} blobs found'.format(len(blobs)))\n\n sizes = []\n brightness = []\n colors = []\n\n if verbose > 1:\n print('\\t\\tgetting the blobs colors and sizes')\n\n if verbose > 2:\n print('temp img stats =', im_stats(img_temp))\n plt.imshow(img_temp, interpolation='none')\n plt.show()\n\n for blob in blobs:\n y, x, r = blob\n window = imwindow(img_gray, y, x, (r+1, r+1))[0:2]\n brightness.append(img_gray[window].mean())\n colorwindow = img_temp[window[0], window[1],:]\n colorwindowselection = where(colorwindow.sum(2) <= 0.8*255*3)\n\n colors.append(colorwindow[colorwindowselection[0], colorwindowselection[1],:].mean(0))\n\n if verbose > 3:\n print('\\t\\t\\tx = {}, y = {}, r = {}'.format(x, y, r))\n print('\\t\\t\\twindow =', window)\n print('\\t\\t\\tcolorwindow shape =', colorwindow.shape)\n print('\\t\\t\\tcolor selection =', colorwindowselection,)\n print('\\t\\t\\tcolor =', colors[-1])\n print('\\t\\t\\tcolorwindow sums =', colorwindow.sum(2))\n plt.imshow(colorwindow/255., interpolation='none')\n plt.title('colorwindow')\n plt.show()\n\n sizes.append(r)\n\n blobs /= factor\n\n colors = array(colors)\n\n if verbose > 2:\n _, ax = plt.subplots(1,2)\n ax[0].hist(sizes, bins=100)\n ax[0].set_title('sizes')\n ax[1].hist(brightness, bins=100)\n ax[1].set_title('brightness')\n plt.show()\n\n if verbose > 1:\n print('\\t\\tcomputing the histograms')\n\n gh, gx = histogram(sizes, bins=100)\n\n h, x = histogram(brightness, bins=100)\n\n hmin = x[80]\n\n if verbose > 2:\n plt.imshow(img/255., interpolation='none')\n\n for blob in blobs:\n y, x, r = blob\n plt.scatter(x,y, color='b', lw=r)\n\n spots = where(brightness >= hmin)\n\n if verbose > 2:\n print('\\t\\t', im_stats(img))\n\n for blob in blobs[spots[0]]:\n y, x, r = blob\n plt.scatter(x,y, color='r', lw=r)\n\n plt.show()\n\n return blobs[spots[0]], colors[spots[0]]",
"def draw_spot_light_colorful(img, blobs, level=0.2, colors=None, verbose=0):\n\n if len(img.shape) == 1:\n\n blob_img = img.copy()\n img = zeros((img.shape[0], img.shape[1], 3))\n img[:,:,0] = blob_img.copy()\n img[:,:,1] = blob_img.copy()\n img[:,:,2] = blob_img.copy()\n\n blob_img = zeros_like(img)\n alpha_img = zeros_like(img)\n selection = zeros_like(img)\n\n rs = [b[2] for b in blobs]\n rmax = max(rs)\n rmin = min(rs)\n\n size = get_kernel_size(level, img.shape)\n\n for i, blob in enumerate(blobs):\n\n y, x, r = blob\n rr, cc = circle(y, x, size * 1.5)\n\n rr = safe_list_range(rr, (0, selection.shape[0]-1))\n cc = safe_list_range(cc, (0, selection.shape[1]-1))\n\n selection[rr, cc, :] = 1.\n\n if colors is not None:\n\n hsv = rgb2hsv(colors[i].reshape((1,1,3)))\n\n if rmax == rmin:\n hsv[0,0,2] = 0.9\n else:\n hsv[0,0,2] = 0.6 * (r - rmin)/(rmax - rmin) + 0.4\n\n hsv[0,0,1] *= 1.6\n rgb = hsv2rgb(hsv)[0,0,:]\n\n if verbose > 1:\n print('hsv =', hsv, ', rgb =', rgb)\n\n blob_img[rr, cc, :] = minimum(1., blob_img[rr, cc, :] + rgb)\n else:\n blob_img[rr, cc, :] += 0.5 * r\n\n alpha_img[rr, cc, 0] = 1.\n\n img /= 255.\n\n if verbose > 1:\n print('img = ', im_stats(img))\n print('blob img = ', im_stats(blob_img))\n\n if verbose > 2:\n _, ax = plt.subplots(1,2)\n ax[0].imshow(img, interpolation='none')\n ax[0].set_title('img')\n ax[1].imshow(blob_img, interpolation='none')\n ax[1].set_title('blob img')\n\n plt.show()\n\n ksize = 1.\n kernel = getGaussianKernel(int(ksize*3+1), ksize)\n kernel = kernel * kernel.T\n\n selection = filter2D(selection, -1, kernel)\n blob_img[blob_img == 0] = blob_img[selection > 0].max()\n blob_img = filter2D(blob_img, -1, kernel)\n noise_mask = random.randn(*blob_img.shape[:2])\n noise_mask = filter2D(noise_mask, -1, kernel) * 0.1\n blob_img[:,:,0] += noise_mask\n blob_img[:,:,1] += noise_mask\n blob_img[:,:,2] += noise_mask\n blob_img[blob_img > 1] = 1.\n blob_img[blob_img < 0] = 0.\n\n alpha_img[:,:,0] = filter2D(alpha_img[:,:,0], -1, kernel)\n alpha_img[:,:,1] = alpha_img[:,:,0]\n alpha_img[:,:,2] = alpha_img[:,:,0]\n\n new_img = img.copy()\n\n new_img[selection > 0] = 0.3 * (blob_img[selection > 0] - blob_img[selection > 0].min())/(blob_img[selection > 0].max() - blob_img[selection > 0].min()) + 0.7\n\n new_img[selection > 0] = (1. - alpha_img[selection > 0]) * img[selection > 0] + alpha_img[selection > 0] * new_img[selection > 0]\n\n return new_img * 255.",
"def hough_circle_detection(inp_pic, blur_strgth, hough_dp=1, minRadius=120, maxRadius=130):\n inp_pic_grey = cv.cvtColor(inp_pic, cv.COLOR_BGR2GRAY)\n if blur_strgth == \"low\":\n inp_pic_grey_blurred = cv.GaussianBlur(inp_pic_grey, (3,3), 1, 1)\n elif blur_strgth == \"high\":\n inp_pic_grey_blurred = cv.GaussianBlur(inp_pic_grey, (9,9), 2, 2)\n #cv.imshow(\"grey_blurred\", inp_pic_grey_blurred)\n # HoughCircles(image, method, dp, minDist, circles=None, param1=None, param2=None, minRadius=None, maxRadius=None)\n # if circles=None no circles found\n circles = cv.HoughCircles(inp_pic_grey_blurred, cv.HOUGH_GRADIENT, hough_dp, circles=1, minDist=20, minRadius=minRadius, maxRadius=maxRadius)\n if (circles is None):\n print(\"No circles found.\")\n raise Exception(\"No circles found.\")\n elif circles.shape == (4,1):\n # print(\"More than one circle found.\")\n # For some images, the detection fails and openCV returns a shape of (4,1).\n # I cannot find this behaviour in the documentation, so maybe it is a bug\n # Best fix so far: guess the circle's position\n y, x = inp_pic_grey.shape[:2]\n return int(x/2), int(y/2), int(min(x,y) / 2 * 0.95)\n else:\n circles = np.round(circles[0, :].astype(\"int\")) # rounding coordinates to integer values\n x_ctr, y_ctr, r = circles[0]\n #cv.circle(inp_pic, (125, 125), r, color=(0, 0, 0), thickness=4, lineType=8, shift=0)\n #cv.imshow('circle in inp_pic', inp_pic)\n # print(\"1 circle found. radius: \", r, \", center coordinate: (\", x_ctr, \",\", y_ctr, \")\")\n return x_ctr, y_ctr, r",
"def draw_circles(img_in, circles_array):\r\n img_out = cv2.cvtColor(img_in, cv2.COLOR_GRAY2BGR)\r\n\r\n for circle in circles_array:\r\n cv2.circle(img_out, (int(circle[1]), int(circle[0])), int(circle[2]), (0, 0, 255))\r\n\r\n return img_out",
"def segmentation(\n img,\n segmentation,\n title=None,\n save=None,\n figsize=(20, 10),\n linewidth=2,\n edgecolor=\"red\",\n):\n fig, ax = plt.subplots(figsize=figsize)\n ax.imshow(img)\n for i in range(len(segmentation)):\n s = segmentation[i]\n s.calculate_properties()\n cent = s.centroid\n patch = mpatches.Rectangle(\n (s.xrange[0], s.yrange[0]),\n s.xdiam,\n s.ydiam,\n fill=False,\n edgecolor=edgecolor,\n linewidth=linewidth,\n )\n ax.add_patch(patch)\n if title is not None:\n fig.suptitle(title, size=20)\n ax.set_axis_off()\n plt.tight_layout()\n fig.subplots_adjust(top=0.95)\n if save is not None:\n plt.savefig(save)\n plt.show()",
"def edgeDetectAndShowHough(imagePath):\n #base image\n img = cv2.imread(imagePath)\n #gray image\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray=cv2.GaussianBlur(gray,(5,5),0)\n showAndWait('Gray',gray)\n #edges\n edges = cv2.Canny(img,75,200,apertureSize=3,L2gradient=True)\n showAndWait('Edges',edges)\n #hough\n circles=cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,45)\n assert circles is not None, '-no circle found-'\n print('circle(x,y,radius',circles)\n showAndWait(\"Circles\",circles)\n\n #draw circle on image\n circles=np.uint16(np.around(circles))\n for i in circles[0,:]:\n #outer circle\n cv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)\n #center circle\n cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\n showAndWait('Circle Detection',img)",
"def find_blobs(img, dxy, nsd=1.0):\n # Initialize\n blobs = []\n for i in range(MAX_BLOBS):\n blobs.append(blob_struct())\n nBlobs = 0\n posList = []\n\n # Extract the parameters\n dx, dy = dxy\n n = dx*dy\n\n # Copy image data into a float array\n pImg = np.array(img)\n\n # Calculate mean and sd across (filtered) image to determine threshold\n avg = np.mean(pImg)\n sd = np.std(pImg)\n thres = avg +sd *nsd\n\n # Find blob(s) ...\n #\n # Mark all pixels above a threshold\n pPrb = (pImg -avg) /sd\n pMsk = (pImg >= thres) *255\n nThres = int(np.sum(pMsk) /255)\n\n # Check if these above-threshold pixels represent continuous blobs\n nLeft = nThres\n iBlob = 0\n while nLeft > 0 and iBlob < MAX_BLOBS:\n # As long as unassigned mask pixels are left, find the next one using\n # `np.argmax()`, which returns the index of the (first)\n # hightest value, which should be 255\n iPix = np.argmax(pMsk)\n x = iPix % dx\n y = iPix //dx\n\n # Unassigned pixel found ...\n posList.append((x, y))\n pMsk[x +y*dx] = iBlob\n nFound = 1\n bx = float(x)\n by = float(y)\n bp = pPrb[x +y*dx]\n\n # Find all unassigned pixels in the neighborhood of this seed pixel\n while len(posList) > 0:\n x0, y0 = posList.pop()\n for k in range(4):\n x1 = x0 +xoffs[k]\n y1 = y0 +yoffs[k]\n if((x1 >= 0) and (x1 < dx) and\n (y1 >= 0) and (y1 < dy) and\n (pMsk[int(x1 +y1*dx)] == 255)):\n # Add new position from which to explore\n posList.append((x1, y1))\n pMsk[int(x1 +y1*dx)] = iBlob\n nFound += 1\n bx += float(x1)\n by += float(y1)\n bp += pPrb[int(x1 +y1*dx)]\n # Update number of unassigned pixels\n nLeft -= nFound\n\n # Store blob properties (area, center of gravity, etc.); make sure that\n # the blob list remaines sorted by area\n k = 0\n if iBlob > 0:\n while (k < iBlob) and (blobs[k].area > nFound): k += 1\n if k < iBlob:\n blobs.insert(k, blob_struct())\n blobs[k].ID = iBlob\n blobs[k].area = nFound\n blobs[k].x = by /nFound\n blobs[k].y = bx /nFound\n blobs[k].prob = bp /nFound\n iBlob += 1\n nBlobs = iBlob\n\n # Copy blobs into list as function result\n tempL = []\n for i in range(nBlobs):\n if blobs[i].area > 0:\n tempL.append(blobs[i].as_list)\n\n # Return list of blobs, otherwise empty list\n return tempL",
"def drawCircle(image, center, radius=1, color=COLOR_YELLOW, thickness=-1):\n\n cv.circle(image, center, radius, color, thickness)",
"def filter(self, blobs):\n\n res=self.camera.resolution\n center=self.cp()\n top_y=self.target_zone[0]*res[1]\n bot_y=self.target_zone[1]*res[1]\n\n fblobs = []\n for b in blobs:\n if b.size>5.0: \n if b.pt[1] >= top_y and b.pt[1] <= bot_y:\n fblobs.append( b )\n\n self.blobs = fblobs\n\n return fblobs",
"def find_bright_circular_blobs(image, threshold):\n params = cv2.SimpleBlobDetector_Params()\n params.minThreshold = threshold\n params.thresholdStep = 0.01 * threshold\n params.filterByArea = True\n params.minArea = 2\n params.filterByCircularity = True\n params.minCircularity = 0.95\n params.filterByConvexity = True\n params.minConvexity = 0.95\n params.minDistBetweenBlobs = 2\n params.blobColor = 255\n\n detector = cv2.SimpleBlobDetector_create(params)\n\n keypoints = detector.detect(image)\n\n blobs = [{'size': keypoint.size, 'x_loc': keypoint.pt[0], 'y_loc': keypoint.pt[1]} for keypoint in keypoints]\n\n _, blobs, keypoints = sorted(zip([blob['size'] for blob in blobs], blobs, keypoints), reverse=True)\n\n return blobs, keypoints",
"def draw_centroids(img, centroids, radius=5, color=(255,0,0)):\n img_copy = img.copy()\n for c in centroids:\n cv2.circle(img_copy, c, radius, color, -1)\n return img_copy",
"def visualize_patch_segmentation_predictions(self, X, y=None, threshold=0.5, num_predictions=3):\n\n # Choose random samples\n random_samples = np.random.randint(0, len(X), num_predictions)\n X_rand = X[random_samples]\n y_pred = self.model.predict(X_rand)\n\n # Number of rows and columns for the figure\n ncols = 2\n nrows = num_predictions\n if y is not None:\n ncols = 3\n y_rand = y[random_samples]\n fig, axes = plt.subplots(nrows, ncols)\n\n if num_predictions == 1:\n if X_rand.shape[3] == 1:\n axes[0].imshow(X_rand[0, :, :, 0], cmap='gray')\n else:\n axes[0].imshow(X_rand[0])\n axes[0].set_xticks([])\n axes[0].set_yticks([])\n\n axes[1].imshow(y_pred[0, :, :, 0] > threshold, cmap='gray')\n axes[1].set_xticks([])\n axes[1].set_yticks([])\n\n axes[0].set_title(\"Original Image\")\n axes[1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[2].imshow(y_rand[0, :, :, 0], cmap='gray')\n axes[2].set_xticks([])\n axes[2].set_yticks([])\n axes[2].set_title(\"Ground Truth Mask\")\n else:\n for idx in range(num_predictions):\n if X_rand.shape[3] == 1:\n axes[idx, 0].imshow(X_rand[idx, :, :, 0], cmap='gray')\n else:\n axes[idx, 0].imshow(X_rand[idx])\n axes[idx, 0].set_xticks([])\n axes[idx, 0].set_yticks([])\n\n axes[idx, 1].imshow(y_pred[idx, :, :, 0] > threshold, cmap='gray')\n axes[idx, 1].set_xticks([])\n axes[idx, 1].set_yticks([])\n\n if idx == 0:\n axes[idx, 0].set_title(\"Original Image\")\n axes[idx, 1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[idx, 2].imshow(y_rand[idx, :, :, 0], cmap='gray')\n axes[idx, 2].set_xticks([])\n axes[idx, 2].set_yticks([])\n if idx == 0:\n axes[idx, 2].set_title(\"Ground Truth Mask\")\n\n plt.show()",
"def _hough_circle(*args, **kwargs): # real signature unknown\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Search for places that match query
|
def search():
# parses querie into key word array
q = request.args.get("q")
# parases query into an array
q_array = q.split(" ")
# remove any commas (if any)
query = []
for item in q_array:
if item[len(item) - 1] == ",":
item = item.replace(",", "")
query.append(item)
else:
query.append(item)
# Finds postal code, city and state that start within q
results = db.execute(
"SELECT * FROM places WHERE country_code LIKE :q OR postal_code LIKE :q OR place_name LIKE :q OR admin_name1 LIKE :q OR admin_code1 LIKE :q OR admin_name2 LIKE :q OR admin_code2 LIKE :q OR latitude LIKE :q OR longitude LIKE :q", q=query[0])
# for each word in query, search whole database results and find overlapping search results from other word queries
for i in range(1, len(query)):
results_cmp = db.execute(
"SELECT * FROM places WHERE country_code LIKE :q OR postal_code LIKE :q OR place_name LIKE :q OR admin_name1 LIKE :q OR admin_code1 LIKE :q OR admin_name2 LIKE :q OR admin_code2 LIKE :q OR latitude LIKE :q OR longitude LIKE :q", q=query[i])
results = intersection(results, results_cmp)
# returns results containing all word queries; if one keyword DNE in database, results will return empty set
return jsonify(results)
|
[
"def get_potential_matches_from_address(self, address):",
"def search_places(request):\n response = requests.get(f\"https://maps.googleapis.com/maps/api/place/textsearch/json?query={request}&key={places_key}\")\n formatted = response.json()\n lat = formatted['results'][0]['geometry']['location']['lat']\n lng = formatted['results'][0]['geometry']['location']['lng']\n keyword = 'attraction'\n response = requests.get(\n f\"https://maps.googleapis.com/maps/api/place/nearbysearch/json?key={places_key}&location={lat},{lng}&radius=5000&keyword={keyword}\"\n )\n data = response.json()['results']\n attractions = []\n for place in data[:12]:\n name = place.get('name', 'Could not find attraction')\n address = place.get('vicinity', 'Address is unknown')\n ratings = place.get('rating', 'Unrated')\n try:\n photo_key = place['photos'][0]['photo_reference']\n photo_url = f\"https://maps.googleapis.com/maps/api/place/photo?maxwidth=1600&photoreference={photo_key}&key={places_key}\"\n except KeyError:\n photo_url = \"https://dummyimage.com/250x250/000000/baffef&text=No+Image+Available\"\n\n item = {\n 'name': name,\n 'address': address,\n 'ratings': ratings,\n 'photo': photo_url\n }\n attractions.append(item)\n\n return attractions",
"def search_place():\n body = request.get_json()\n if body is None:\n abort(400, description=\"Not a JSON\")\n if body and len(body):\n states = body.get('states', None)\n cities = body.get('cities', None)\n amenities = body.get('amenities', None)\n if not body and len(body) or (not states and not cities and not amanities):\n places = storage.all(Place).values()\n place_list = []\n for place in places:\n place_list.append(place.to_dict())\n return jsonify(place_list)\n place_list = []\n if states:\n stateso = [storage.get(State, key) for key in states]\n for state in stateso:\n if state:\n for city in state.cities:\n if city:\n for place in city.places:\n place_list.append(place)\n if cities:\n cityo = [storage.get(City, key) for key in cities]\n for city in cityo:\n if city:\n for place in city.places:\n if place not in place_list:\n place_list.append(place)\n if amenities:\n if not place_list:\n place_list = storage.all(Place).values()\n amenitieso = [storage.get(Amenity, key) for key in amenities]\n place_list = [place for place in place_list if\n all([key in place.amenities for key in amenitieso])]\n places = []\n for value in place_list:\n data = value.to_dict()\n data.pop('amenities', None)\n places.append(data)\n return jsonify(places)",
"def _places(client, url_part, query=None, location=None, radius=None,\r\n keyword=None, language=None, min_price=0, max_price=4, name=None,\r\n open_now=False, rank_by=None, type=None, region=None, page_token=None):\r\n\r\n params = {\"minprice\": min_price, \"maxprice\": max_price}\r\n\r\n if query:\r\n params[\"query\"] = query\r\n if location:\r\n params[\"location\"] = convert.latlng(location)\r\n if radius:\r\n params[\"radius\"] = radius\r\n if keyword:\r\n params[\"keyword\"] = keyword\r\n if language:\r\n params[\"language\"] = language\r\n if name:\r\n params[\"name\"] = convert.join_list(\" \", name)\r\n if open_now:\r\n params[\"opennow\"] = \"true\"\r\n if rank_by:\r\n params[\"rankby\"] = rank_by\r\n if type:\r\n params[\"type\"] = type\r\n if region:\r\n params[\"region\"] = region\r\n if page_token:\r\n params[\"pagetoken\"] = page_token\r\n\r\n url = \"/maps/api/place/%ssearch/json\" % url_part\r\n return client._request(url, params)",
"def googleNearbyPlacesByKeyword(apiKey, location, radius, keyword):\n baseURL = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?'\n nearbyURL = baseURL + 'key=' + apiKey + '&location=' + location + '&radius=' + radius + '&keyword=' + keyword\n nearby = json.loads(urllib.urlopen(nearbyURL).read())\n return nearby",
"def search_places_by_id():\n if request.get_json() is None:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n\n data = request.get_json()\n\n if data and len(data):\n states = data.get('states', None)\n cities = data.get('cities', None)\n amenities = data.get('amenities', None)\n\n if not data or not len(data) or (\n not states and\n not cities and\n not amenities):\n places = storage.all(Place).values()\n list_places = []\n for place in places:\n list_places.append(place.to_dict())\n return jsonify(list_places)\n\n list_places = []\n if states:\n states_obj = [storage.get(State, s_id) for s_id in states]\n for state in states_obj:\n if state:\n for city in state.cities:\n if city:\n for place in city.places:\n list_places.append(place)\n\n if cities:\n city_obj = [storage.get(City, c_id) for c_id in cities]\n for city in city_obj:\n if city:\n for place in city.places:\n if place not in list_places:\n list_places.append(place)\n\n if amenities:\n if not list_places:\n list_places = storage.all(Place).values()\n amenities_obj = [storage.get(Amenity, a_id) for a_id in amenities]\n list_places = [place for place in list_places\n if all([am in place.amenities\n for am in amenities_obj])]\n\n places = []\n for p in list_places:\n d = p.to_dict()\n d.pop('amenities', None)\n places.append(d)\n\n return jsonify(places)",
"def search_places_coords(request):\n response = requests.get(f\"https://maps.googleapis.com/maps/api/place/textsearch/json?query={request}&key={places_key}\")\n # https://maps.googleapis.com/maps/api/place/nearbysearch/output?parameters\n formatted = response.json()\n lat = formatted['results'][0]['geometry']['location']['lat']\n lon = formatted['results'][0]['geometry']['location']['lng']\n coords = {\n 'lat': lat,\n 'lon': lon\n }\n return coords",
"def getPlaces(place):\r\n url = \"https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/autosuggest/v1.0/US/USD/en-US/\"\r\n querystring = {\"query\": place}\r\n\r\n return requests.request(\"GET\", url, headers = headers, params = querystring).json()['Places']",
"def search(api_key, term, location):\n\n url_params = {\n 'name': term.replace(' ', '+'),\n 'address1': location.replace(' ', '+'),\n 'city': 'Long Beach',\n 'state': 'US',\n 'country': 'US',\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, MATCH_PATH, api_key, url_params=url_params)",
"def test_places_search(self):\n with self.assertRaises(ValidationError) as e:\n search_places()\n self.assertEqual([u'One of the following args must be provided: query, latitude and longitude, or distance.'],\n e.exception.messages)\n\n # Sometimes facebook gives back incorrect page sizes. If I ask for 6,\n # I don't always get 6.\n places = search_places(query='coffee',\n latitude=39.042173020445,\n longitude=-94.590903251913,\n distance=1000,\n page_size=6)\n\n self.assertTrue(len(places) > 1)",
"def _search(self):",
"def _check_place_matching(self, item):\n return not self.lower_place or \\\n 'location' in item and self.lower_place in item['location'].lower() or \\\n 'venue' in item and 'city' in item['venue'] and self.lower_place in item['venue']['city'].lower()",
"def filter_stores(sp, lat_lng_ecef, initial_radius, inc_radius):\n\n matches = []\n radius = initial_radius\n while len(matches) < 1:\n results = sp.query(lat_lng_ecef, radius)\n if results is not None:\n if len(results):\n matches.extend(results)\n radius += inc_radius\n return matches",
"def query_phrase_search(query_params):\n results = []\n terms = query_params['query']\n # Prepare advanced search if any filters are provided\n filtered_movies = None\n if any(len(query_params.get(param, '')) > 0 for param in ['movie_title', 'year', 'actor', 'categories']):\n print('advanced search')\n filtered_movies = db.get_movie_ids_advanced_search(query_params)\n\n cursors = []\n for dist, term in enumerate(terms):\n cursor = db.get_indexed_documents_by_term(term, 0, BATCH_SIZE, sort_entries=True)\n index = next(cursor, None)\n cursors.append({\n 'cursor': cursor,\n 'index': index,\n 'm': 0, # movie index\n 's': 0, # sentence index\n 'p': 0 # position index,\n })\n\n # print(\"Cursors beginning:\")\n # print_cursors(cursors)\n\n # while all(c['index'] is not None for c in cursors): # continue until at least one cursor is fully exhausted\n start_time = time.time()\n while True: # continue until at least one cursor is fully exhausted\n for i in range(len(cursors) - 1):\n cur_i = cursors[i]\n cur_j = cursors[i+1]\n # catch up j with i\n # cur_j_before = cursor_to_tuple(cur_j)\n exhausted = catchup(cur_j, cur_i)\n # if cur_j_before != cursor_to_tuple(cur_j):\n # print(f\"Cursor {i+1} caught up with Cursor {i}:\")\n # print_cursors(cursors)\n if exhausted: # cur_j has been exhausted so there's no point in trying to find any more matches, abort.\n return order_results_by_popularity(results)\n # At this point, the term cursors should be ordered, e.g. \"i\" < \"am\" < \"your\" < \"father\".\n # Check if an exact phrase match was found.\n phrase_found = True\n start_cur = cursors[0]\n start_mov = start_cur['index']['movies'][start_cur['m']]\n start_sen = start_mov['sentences'][start_cur['s']]\n start_pos = start_sen['pos'][start_cur['p']]\n\n for i in range(1, len(cursors)):\n cur = cursors[i]\n if cur['index']['movies'][cur['m']]['_id'] != start_mov['_id'] or \\\n cur['index']['movies'][cur['m']]['sentences'][cur['s']]['_id'] != start_sen['_id'] or \\\n cur['index']['movies'][cur['m']]['sentences'][cur['s']]['pos'][cur['p']] - start_pos != i:\n phrase_found = False\n break\n if phrase_found and (filtered_movies is None or start_mov['_id'] in filtered_movies): # supports advanced search\n results.append({\n 'movie_id': start_mov['_id'],\n 'sentence_id': start_sen['_id']\n })\n # # Done. Now advance the first cursor (\"i\") to catch up with the last cursor (\"father\").\n end_cur = cursors[-1]\n end_mov = end_cur['index']['movies'][end_cur['m']]\n end_sen = end_mov['sentences'][end_cur['s']]\n end_pos = end_sen['pos'][end_cur['p']]\n if start_mov['_id'] < end_mov['_id']:\n advance_cursor_iterator(start_cur, 'm')\n elif start_mov['_id'] == end_mov['_id'] and start_sen['_id'] < end_sen['_id']:\n advance_cursor_iterator(start_cur, 's')\n elif start_mov['_id'] == end_mov['_id'] and start_sen['_id'] == end_sen['_id'] and start_sen['pos'][start_cur['p']] < end_pos:\n advance_cursor_iterator(start_cur, 'p')\n\n # print(\"Start cursor advanced:\")\n # print_cursors(cursors)\n\n if start_cur['cursor'] is None or time.time() - start_time > MAX_QUERY_TIME:\n return order_results_by_popularity(results)",
"def locations(term):",
"def search():\n #collect current user info\n flash(f\"These are all the potential teammates based on your location and activity interest!\")\n profile = crud.get_user_by_id(session['current_user'])\n #collect matching info\n potentials = []\n sport_potentials = crud.get_users_by_sport(profile.sport)\n city_potentials = crud.get_users_by_city(profile.city)\n users = crud.get_users()\n #check all players for matches\n for user in users:\n if (user in city_potentials) and (user in sport_potentials):\n potentials.append(user)\n return render_template('findteammates.html', potentials=potentials)",
"def search(self, q, **kw):\n return self.gnip.search(q, **kw)",
"def search(self):\n for preprint in self.data:\n if (\n any(keyword in preprint[\"Title\"].lower() for keyword in self.keywords)\n or any(\n keyword in preprint[\"Abstract\"].lower() for keyword in self.keywords\n )\n or any(author in preprint[\"Authors\"] for author in self.authors)\n ):\n self.hits.append(preprint)\n print(f\"Found {len(self.hits)} hit(s) in {len(self.data)} preprints.\\n\")",
"def getNearbyPlaces(self):\n\t\tself.nearby = self.gmaps.places_radar(\n\t\t\t(self.centre['lat'], self.centre['lng']),\n\t\t\tradius=self.radius*self.radius_factor,\n\t\t\tkeyword=self.keywords\n\t\t\t)['results']\n\n\t\t##Lets create a master dict in the object which has all the place info\n\t\t##we can then pull the day out after the fact instead of constantly adding terms\n\t\tfor i in xrange(len(self.nearby)):\n\t\t\tself.nearby[i]['place_info'] = self.gmaps.place(self.nearby[i]['place_id'])\n\n\t\t##some ratings are missing - lets add them in as zero\n\t\tfor i in xrange(len(self.nearby)):\n\t\t\tif 'rating' not in self.nearby[i]['place_info']['result'].keys():\n\t\t\t\tself.nearby[i]['place_info']['result']['rating'] = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the old word with a new word into the trie.
|
def update_word(self, old_word, new_word) -> None:
if self.__delitem__(old_word):
self.add(new_word)
|
[
"def change_weight(mytrie, word, updatefunc):\r\n assert isinstance(word, str), \"The word to change weight should be a string.\"\r\n\r\n node = Trie(mytrie).searchTrie(word)\r\n # if word doesn't exist in trie or it is not a full word in trie\r\n if node is None or node.weight == -1:\r\n return mytrie\r\n else:\r\n oldweight = node.weight\r\n newweight = updatefunc(oldweight)\r\n delete_term(mytrie, word)\r\n mytrie.insertWord(newweight, word)\r\n return mytrie",
"def update_word(self, word):\n\n self.word_count += 1\n length = len(self.nodes)\n\n # If we have no data. Let's just create a new node.\n if length < 1:\n self.nodes.append([1, [word]])\n return\n\n for i in range(length):\n current_node = self.nodes[i]\n words = current_node[1]\n\n # If our word is in the current node. Remove it then check more things.\n if word in words:\n words.remove(word)\n\n # If there are no following nodes. We are the greatest node and can just add a new node\n if i > length - 2:\n self.nodes.append([current_node[0] + 1, [word]])\n else:\n next_node = self.nodes[i + 1]\n\n # If the next nodes occurrences is equal to one more then the current node. We add our word to the\n # next node. If not, then we create a new node there.\n if next_node[0] == current_node[0] + 1:\n next_node[1].append(word)\n else:\n self.nodes.insert(i + 1, [current_node[0] + 1, [word]])\n\n # If there are no words left in this node. We delete the node.\n if len(words) < 1:\n del self.nodes[i]\n\n return\n\n # We check if the first nodes occurrences is 1. If it is we add our word. If not, we create a new node that\n # the occurrences is one\n if self.nodes[0][0] == 1:\n self.nodes[0][1].append(word)\n else:\n self.nodes.insert(0, [1, [word]])",
"def insert_or_update(mytrie, word, newweight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(newweight, int), \"The weight of the word should be an integer\"\r\n\r\n node = Trie(mytrie).searchTrie(word)\r\n\r\n if node is not False:\r\n weightplusone = node.weight + 1\r\n mytrie.insertWord(weightplusone, word)\r\n return\r\n else:\r\n mytrie.insertWord(newweight, word)",
"def add(self, word):\n current = self\n for letter in word:\n current = current._children.setdefault(letter, Trie())\n current._endsHere = True",
"def change_word(self, word):\n self.word = word",
"def insert(self, word):\n cur = self.head\n for w in word:\n c = self.getindex(w)\n if not cur.root[c]:\n cur.root[c] = self.TrieNode()\n cur = cur.root[c]\n cur.setend()",
"def add(self, word: str):\n current_node = self.root\n for c in word:\n children = current_node.children\n next_node = children.get(c, None)\n if not next_node:\n self.size += 1\n next_node = TrieNode(c, {}, current_node, False, current_node.depth + 1, self.size)\n current_node.children[c] = next_node\n if current_node.depth > self.depth:\n self.depth = current_node.depth\n current_node = next_node\n current_node.end_state = True\n return current_node",
"def add_term(mytrie, word, weight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(weight, int), \"The weight of the word should be an integer\"\r\n mytrie.insertWord(weight, word)",
"def add_words(self, train):\n for word in train:\n self._trie[word] = word",
"def insertTrie(root, word, inter):\n # print(word, inter)\n strlen = len(word)\n if not strlen:\n return\n\n index = ord(word[0])\n if strlen > 1:\n if not root.child[index]:\n root.child[index] = TrieNode()\n insertTrie(root.child[index], word[1:], inter)\n else:\n if root.child[index]:\n root.child[index].inter = inter\n return\n else:\n root.child[index] = TrieNode(inter)",
"def addword(self,word):\n self.dictionary[word]=to_tubbish(word)",
"def rescale_weight(mytrie, updatefunc):\r\n fulltrie = autocomplete(mytrie, \"\", 2^31-1)\r\n lst = []\r\n for i in range(0, len(fulltrie)):\r\n lst.append(fulltrie[i][1])\r\n for word in lst:\r\n change_weight(mytrie, word, updatefunc)\r\n return mytrie",
"def add_word(self, word):\n # if it's a new word\n if word not in self.word2index:\n # add it into word2index\n self.index2word[self.num_words] = word\n self.word2index[word] = self.num_words\n self.word_counts[word] = 1\n self.num_words += 1\n else: # it has been in the vocabulary\n self.word_counts[word] += 1",
"def append_to_word(self, prefix, word) -> None:\n # mark is_word to false\n self.__delitem__(prefix)\n\n # add/append the word\n self.add(prefix + word)",
"def test_insert_word_relationship(self):\n testword = (\"testword\", 4.87)\n testword2 = (\"testwordz\", 9.17)\n testword_node = self.trie.insert_word(testword[0], testword[1])\n testword2_node = self.trie.insert_word(testword2[0], testword2[1])\n self.assertEqual(testword_node, testword2_node.parent)",
"def update_guessword(guess_word, word, guessed_letter):\n for i, letter in enumerate(word):\n if letter == guessed_letter:\n guess_word[i] = letter\n return guess_word",
"def insert(self, word):\n n = len(self.node_list)\n node = Node(word)\n self.node_list.append(Node(word))\n self.sift_up(n)",
"def addWord(self, word):\n self.wordDict[len(word)].add(word)",
"def _add_word(self,word):\n word=word.lower() \n if word not in stop_words:\n steamed_word = self._stem(word) \n self.all_words.setdefault(steamed_word,0)\n self.all_words[steamed_word] += 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Appends the word to a prefix into the trie.
|
def append_to_word(self, prefix, word) -> None:
# mark is_word to false
self.__delitem__(prefix)
# add/append the word
self.add(prefix + word)
|
[
"def add(self, word):\n current = self\n for letter in word:\n current = current._children.setdefault(letter, Trie())\n current._endsHere = True",
"def add(self, word: str):\n current_node = self.root\n for c in word:\n children = current_node.children\n next_node = children.get(c, None)\n if not next_node:\n self.size += 1\n next_node = TrieNode(c, {}, current_node, False, current_node.depth + 1, self.size)\n current_node.children[c] = next_node\n if current_node.depth > self.depth:\n self.depth = current_node.depth\n current_node = next_node\n current_node.end_state = True\n return current_node",
"def insert(self, word):\n cur = self.head\n for w in word:\n c = self.getindex(w)\n if not cur.root[c]:\n cur.root[c] = self.TrieNode()\n cur = cur.root[c]\n cur.setend()",
"def addPrefix(self, prefix):\n \n pass",
"def add_term(mytrie, word, weight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(weight, int), \"The weight of the word should be an integer\"\r\n mytrie.insertWord(weight, word)",
"def add_words(self, train):\n for word in train:\n self._trie[word] = word",
"def add_all_prefixes(prefix_dict , word_list):\n for word in word_list:\n add_prefixes(prefix_dict, word)",
"def insertTrie(root, word, inter):\n # print(word, inter)\n strlen = len(word)\n if not strlen:\n return\n\n index = ord(word[0])\n if strlen > 1:\n if not root.child[index]:\n root.child[index] = TrieNode()\n insertTrie(root.child[index], word[1:], inter)\n else:\n if root.child[index]:\n root.child[index].inter = inter\n return\n else:\n root.child[index] = TrieNode(inter)",
"def add_prefixes(prefix_dict , input_string):\n for i in range(1, len(input_string) + 1):\n if input_string[:i] not in prefix_dict:#adds a prefix with the word to the dictionary\n prefix_dict[input_string[:i]] = [input_string]\n elif input_string not in prefix_dict[input_string[:i]]:#adds a word to the list of an exist prefix in the dictionary \n prefix_dict[input_string[:i]].append(input_string)",
"async def add_prefix(self, ctx, prefix: Prefix):\n prefixes = ctx.bot.get_raw_guild_prefixes(ctx.guild)\n if prefix in prefixes:\n return await ctx.send(f\"\\\"{prefix}\\\" was already a custom prefix...\")\n\n prefixes += (prefix, )\n await ctx.bot.set_guild_prefixes(ctx.guild, prefixes)\n await ctx.send(f\"Successfully added prefix \\\"{prefix}\\\"!\")",
"def append_word(self, word):\n\t\tself.fradStructure[self.type][self.topBottom][self.row][self.column][self.minor].append(word)",
"def addWord(self, word):\n self.wordDict[len(word)].add(word)",
"def addword(self, word):\n\t\tself.text.append(word)",
"def addword(self,word):\n self.dictionary[word]=to_tubbish(word)",
"def update_word(self, word):\n\n self.word_count += 1\n length = len(self.nodes)\n\n # If we have no data. Let's just create a new node.\n if length < 1:\n self.nodes.append([1, [word]])\n return\n\n for i in range(length):\n current_node = self.nodes[i]\n words = current_node[1]\n\n # If our word is in the current node. Remove it then check more things.\n if word in words:\n words.remove(word)\n\n # If there are no following nodes. We are the greatest node and can just add a new node\n if i > length - 2:\n self.nodes.append([current_node[0] + 1, [word]])\n else:\n next_node = self.nodes[i + 1]\n\n # If the next nodes occurrences is equal to one more then the current node. We add our word to the\n # next node. If not, then we create a new node there.\n if next_node[0] == current_node[0] + 1:\n next_node[1].append(word)\n else:\n self.nodes.insert(i + 1, [current_node[0] + 1, [word]])\n\n # If there are no words left in this node. We delete the node.\n if len(words) < 1:\n del self.nodes[i]\n\n return\n\n # We check if the first nodes occurrences is 1. If it is we add our word. If not, we create a new node that\n # the occurrences is one\n if self.nodes[0][0] == 1:\n self.nodes[0][1].append(word)\n else:\n self.nodes.insert(0, [1, [word]])",
"def insert_or_update(mytrie, word, newweight):\r\n assert isinstance(word, str), \"The word to be added should be a string.\"\r\n assert isinstance(newweight, int), \"The weight of the word should be an integer\"\r\n\r\n node = Trie(mytrie).searchTrie(word)\r\n\r\n if node is not False:\r\n weightplusone = node.weight + 1\r\n mytrie.insertWord(weightplusone, word)\r\n return\r\n else:\r\n mytrie.insertWord(newweight, word)",
"def insert(self, word):\n n = len(self.node_list)\n node = Node(word)\n self.node_list.append(Node(word))\n self.sift_up(n)",
"async def add(self, ctx, *new_prefixes):\n # self.bot.prefixes.setdefault(ctx.guild.id, ['%']).extend(new_prefixes)\n was_empty = False\n if ctx.guild.id not in self.bot.prefixes:\n was_empty = True\n self.bot.prefixes[ctx.guild.id] = ['%']\n current = self.bot.prefixes[ctx.guild.id]\n added = []\n for prefix in new_prefixes:\n if prefix not in current:\n current.append(prefix)\n added.append(prefix)\n if added:\n await ctx.send(f'Added {\", \".join(added)} to this server\\'s prefixes')\n query = '''INSERT INTO prefixes(guild, prefix)\n VALUES ($1, $2);'''\n if was_empty:\n added.insert(0, '%')\n prefixes_with_guild = [(ctx.guild.id, p) for p in added]\n await self.bot.pool.executemany(query, prefixes_with_guild)\n else:\n await ctx.send('No new prefix has been added')",
"def addword(T, w):\n \n #FIXME\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Postprocess the model output predictions.
|
def postprocess_predictions(self, predictions: Prediction, metadata: Union[None, ProcessingMetadata]) -> Prediction:
pass
|
[
"def postprocess(cls, output) -> \"OutputModel\":\n pass",
"def postprocess_model_outputs(self, predictions, expected):\n expected[\"y\"] = expected[\"y\"].numpy()\n expected[\"display_ids\"] = expected[\"display_ids\"].numpy()\n\n return predictions.numpy(), expected",
"def postprocess(output_val, orig_shape):\n\n \"\"\"\n Params:\n output_val - dictionary mapping output_data to output_layers\n \n Return:\n outputs - dictionary mapping transformed_output_data to output_layers\n \"\"\"\n ou_layer = list(output_val.keys())[0]\n output = output_val[ou_layer]\n\n if output.shape[2:] != orig_shape[2:]:\n output = output[:, :, :orig_shape[2], :orig_shape[3]]\n\n out = F.softmax(torch.from_numpy(output), dim=1)\n prediction = out.max(1)[1].cpu().numpy()[0]\n\n return {ou_layer: prediction}",
"def postprocess(self, prediction_dict, true_image_shapes, **params):\n field = fields.DetectionResultFields\n with tf.variable_scope('PostprocessInference'):\n detections_dict = {}\n # 1. Semantic prediction\n semantic_prediction, semantic_prediction_probability = \\\n self._postprocess_logits(\n prediction_dict['semantic_predictions'], true_image_shapes)\n detections_dict[field.detection_semantic] = semantic_prediction\n detections_dict[field.detection_semantic_heatmap] \\\n = semantic_prediction_probability\n if self._instance_segmentation:\n # 2. Instance prediction\n # instance_prediction, instance_prediction_probability = \\\n # self._postprocess_logits(\n # prediction_dict['instance_predictions'], true_image_shapes)\n # detections_dict[field.detection_masks] = instance_prediction\n # detections_dict[field.detection_masks_heatmap] \\\n # = instance_prediction_probability\n instance_prediction = \\\n self._postprocess_cluster(\n prediction_dict['instance_predictions'],\n semantic_prediction, true_image_shapes)\n detections_dict[field.detection_masks] = instance_prediction\n # 3. Panoptic prediction\n with tf.variable_scope('Panoptic'):\n sem_image = tf.cast(semantic_prediction, dtype=tf.uint8)\n ins_image = tf.cast(instance_prediction, dtype=tf.uint8)\n sem_mask = tf.ones_like(sem_image, dtype=sem_image.dtype)\n ins_mask = tf.where(\n tf.greater(sem_image, sem_mask*self.num_classes),\n tf.zeros_like(ins_image), ins_image)\n zero_image = tf.zeros_like(ins_image, dtype=tf.uint8)\n panoptic_image = tf.concat(\n [sem_image, ins_mask, zero_image], axis=-1)\n tf.summary.image('panoptic', panoptic_image)\n detections_dict[field.detection_masks_image] = ins_image\n detections_dict[field.detection_panoptic_image] = panoptic_image\n return detections_dict",
"def do_predictions(self):\n\n self.train_preds = self.tfmodel.predict(self.Data.X_train)\n self.test_preds = self.tfmodel.predict(self.Data.X_test)\n\n self.Helpers.logger.info(\n \"Training predictions: \" + str(self.train_preds))\n self.Helpers.logger.info(\n \"Testing predictions: \" + str(self.test_preds))\n print(\"\")",
"def update_predictions(self):\n\n\n assert self._models != dict(), \"model must be fitted or loaded before predictions are possible\"\n self._base.delete_predictions()\n data = self._base.get_not_predicted()\n i = 0\n while data.shape[0] != 0:\n print(\"UPDATING PREDICTIONS FOR CHUNK {}\".format(i))\n x = self.bow_preprocessing(data)\n print(\"- performing predictions\")\n y = self._predict(x)\n y_val = y.values\n ids = data[\"id\"].values.reshape(-1,1)\n if y_val.shape[0] != ids.shape[0]:\n raise RuntimeError(\"internal error on binding results to sentence ids\")\n result_df = pd.DataFrame(np.concatenate((ids, y_val), axis=1), columns=[\"sentence_id\", *y.columns])\n print(\"- updating data base\")\n self._base.update_predictions(result_df)\n\n i += 1\n data = self._base.get_not_predicted()\n\n self.predicted = True",
"def post_prediction_jobs():\n\n return post_jobs(PREDICTION)",
"def postprocess(result_path):\n tp, tn_1, tn_2 = 0, 0, 0\n\n result_list = os.listdir(result_path)\n result_list = filter(lambda x: x.endswith('.bin'), result_list)\n\n for result_name in result_list:\n # get true label\n true_label = [CHARS_DICT[c] for c in result_name.split('_')[0]]\n # inference result label\n rst_path = os.path.join(result_path, result_name)\n preb_label = parse_result(rst_path)\n\n if len(preb_label) != len(true_label):\n tn_1 += 1 # length error\n print(f'[ERROR1]true content: {parse_name(true_label)}, preb content: {parse_name(preb_label)}')\n continue\n if (np.asarray(preb_label) == np.asarray(true_label)).all():\n tp += 1 # content right\n print(f'[ INFO ]true content: {parse_name(true_label)}, preb content: {parse_name(preb_label)}')\n else:\n tn_2 += 1 # content error\n print(f'[ERROR2]true content: {parse_name(true_label)}, preb content: {parse_name(preb_label)}')\n\n accuracy = tp / (tp + tn_1 + tn_2)\n print('=' * 70)\n print('[ INFO ]Test Accuracy: {} [{}:{}:{}]'.format(\n accuracy, tp, tn_1, tn_2, (tp + tn_1 + tn_2)))\n print(\"=\" * 70)\n print('[\"ERROR1\" means predict result length is different from true content!]')\n print('[\"ERROR2\" means predict result content is different from true content!]')\n print('=' * 70)",
"def predict():\n if model:\n\n try:\n incoming_data = request.get_json()\n client_ip = request.environ['REMOTE_ADDR']\n # Keep only the variables contribution to model prediction\n repeat_contact = {key: [value] for key, value in incoming_data.items() if key.lower() not in config.NOT_TO_READ}\n \n with counter.get_lock():\n counter.value += 1\n out = counter.value\n predictions = predict_repeat_contact(repeat_contact, model, features_transform_pipe)\n app.logger.info(f\"The prediction has been served for request id {counter} with client ip {client_ip}\")\n \n # we can store the incoming_data and final predictions in the database \n\n return jsonify(predictions)\n except:\n return jsonify({'trace': traceback.format_exc()})\n else:\n return (\"No model loaded\")",
"def on_predict_end(self, state: State) -> None:\n pass",
"def process_predict(self):\n rn50_model = rn50()\n iV3_model = iV3(self.img_path)\n dog_filters = df(self.img_path)\n faces, BGR_img = self.Improved_OpenCV_face_detector()\n dogs = rn50_model.dog_detector(self.img_path)\n #if dog and human in the same image, model predicts dog breeds will always based on the dog\n #so we have to cropped the human image from the dog\n if(dogs != 0):\n print('Hello, dog!')\n u.show_upload_image(self.img_path)\n iV3_model.show_top5_result()\n if(len(faces) > 0):\n cropped_imgs = u.crop_detected_faces(BGR_img, faces)\n self.detect_face_on_cropped_imgs(cropped_imgs)\n u.delete_cropped_images()\n #if more than one people in the same image, model predicts dog breeds will always show one result\n #so we have to crop the human image to individuals\n else:\n if(len(faces) > 1):\n cropped_imgs = u.crop_detected_faces(BGR_img, faces)\n self.detect_face_on_cropped_imgs(cropped_imgs)\n u.delete_cropped_images()\n elif(len(faces) == 1):\n print('Hello, human!')\n dog_filters.apply_snapchat_filter()\n iV3_model.show_top5_result()\n else:\n print('No human. No dog.')\n u.show_test_image(self.img_path)",
"def build_output(self, model): # pylint: disable=no-self-use\n if model.mode != utils.INFER:\n model.score = tf.nn.softmax(model.logits, name=\"score\")\n model.preds = tf.argmax(model.logits, axis=-1)\n model.output_dict = {\"score\": model.score, \"preds\": model.preds}\n else:\n model.preds = model.logits\n model.output_dict = {\"preds\": model.preds}\n if hasattr(model, \"input_y\"):\n model.y_ground_truth = model.input_y",
"def on_predict_epoch_end(self, state: State) -> None:\n pass",
"def reduce_predictions_and_save(self, raw_path: str, output_dir: str) -> str:\n basename = os.path.basename(raw_path)\n # Read number of points only from las metadata in order to minimize memory usage\n nb_points = get_pdal_info_metadata(raw_path)[\"count\"]\n logits, idx_in_full_cloud = self.reduce_predicted_logits(nb_points)\n\n probas = torch.nn.Softmax(dim=1)(logits)\n\n if self.predicted_classification_channel:\n preds = torch.argmax(logits, dim=1)\n preds = np.vectorize(self.reverse_mapper.get)(preds)\n\n del logits\n\n # Read las after fetching all information to write into it\n las = self.load_full_las_for_update(src_las=raw_path)\n\n for idx, class_name in enumerate(self.classification_dict.values()):\n if class_name in self.probas_to_save:\n # NB: Values for which we do not have a prediction (i.e. artefacts) get null probabilities.\n las[class_name][idx_in_full_cloud] = probas[:, idx]\n\n if self.predicted_classification_channel:\n # NB: Values for which we do not have a prediction (i.e. artefacts) keep their original class.\n las[self.predicted_classification_channel][idx_in_full_cloud] = preds\n log.info(\n f\"Saving predicted classes to channel {self.predicted_classification_channel}.\"\n \"Channel name can be changed by setting `predict.interpolator.predicted_classification_channel`.\"\n )\n del preds\n\n if self.entropy_channel:\n # NB: Values for which we do not have a prediction (i.e. artefacts) get null entropy.\n las[self.entropy_channel][idx_in_full_cloud] = Categorical(probs=probas).entropy()\n log.info(\n f\"Saving Shannon entropy of probabilities to channel {self.entropy_channel}.\"\n \"Channel name can be changed by setting `predict.interpolator.entropy_channel`\"\n )\n del idx_in_full_cloud\n\n os.makedirs(output_dir, exist_ok=True)\n out_f = os.path.join(output_dir, basename)\n out_f = os.path.abspath(out_f)\n log.info(f\"Updated LAS ({basename}) will be saved to: \\n {output_dir}\\n\")\n log.info(\"Saving...\")\n pipeline = pdal.Writer.las(filename=out_f, extra_dims=\"all\", minor_version=4, dataformat_id=8).pipeline(las)\n pipeline.execute()\n log.info(\"Saved.\")\n\n return out_f",
"def _process_predictions(self, evaluation: Evaluation) -> None:\n if self._sample_n == 0:\n evaluation._predictions = None\n if evaluation.predictions is None:\n return # Predictions either not provided or removed because sample_n is 0.\n\n if isinstance(self._sample, str) and self._sample == \"not set\":\n # Happens only for the first evaluation with predictions.\n self.determine_sample_indices(self._sample_n, len(evaluation.predictions))\n\n if self._sample is not None:\n evaluation._predictions = evaluation.predictions[self._sample]",
"def ss_Predict(self):\n \n self.ukf.predict() \n self.forecasts.append(self.ukf.x)\n self.base_model.step()\n self.truths.append(self.base_model.get_state(sensor=\"location\"))",
"def predict(self, prediction_data: np.ndarray) -> np.ndarray:\n # Initialize result dimension\n size = len(prediction_data)\n n_out = self.__neuron_each_layer[self.__depth - 1]\n \n # Initialize result\n res = np.empty((size, n_out))\n\n for i in range(size):\n res[i] = self.forward(prediction_data[i])\n\n # If output is unilabel\n if n_out == 1:\n # Round probability to nearest integer\n res = np.rint(res)\n \n # Return array of class prediction\n return res.flatten()\n \n # If output is multilabel\n else:\n for i in range(size):\n # Round highest probability to 1 else 0\n res[i] = np.where(res[i] == np.amax(res[i]), 1, 0)\n\n return res",
"def predict_pipeline(prediction_pipeline_params: PredictPipelineParams):\n logger.info(\"Loading data...\")\n data = read_data(prediction_pipeline_params.input_data_path)\n\n logger.info(\"Loading pipeline...\")\n pipeline = deserialize_model(prediction_pipeline_params.model_path)\n\n logger.info(\"Making predictions...\")\n predictions = predict_model(pipeline, data)\n\n logger.info(\"Saving predictions...\")\n data[\"predictions\"] = predictions\n data.to_csv(prediction_pipeline_params.output_data_path)\n logger.info(f\"Predictions saved to {prediction_pipeline_params.output_data_path}\")\n logger.info(\"Done.\")",
"def post_predict_key(key='id'):\n return _run_predict(key)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the equivalent photometric preprocessing module for this processing. A photometric preprocessing apply a transformation to the image pixels, without changing the image size. This includes RGB > BGR, standardization, normalization etc. If a Processing subclass does not have change pixel values, it should return an nn.Identity module. If a Processing subclass does not have an equivalent photometric preprocessing, it should return None.
|
def get_equivalent_photometric_module(self) -> Optional[nn.Module]:
pass
|
[
"def get_image_preprocessor(self):\n image_size = self.model.get_image_size()\n input_data_type = tf.float32\n\n shift_ratio = 0\n if self.job_name:\n # shift_ratio prevents multiple workers from processing the same batch\n # during a step\n assert self.worker_hosts\n shift_ratio = float(self.task_index) / len(self.worker_hosts)\n\n processor_class = self.dataset.get_image_preprocessor()\n if processor_class is not None:\n return processor_class(\n image_size, image_size, self.batch_size,\n len(self.devices), dtype=input_data_type, train=(not FLAGS.eval),\n distortions=FLAGS.distortions, resize_method=self.resize_method,\n shift_ratio=shift_ratio)\n else:\n assert isinstance(self.dataset, datasets.SyntheticData)\n return None",
"def preprocessing(self):\n # type: () -> DolbyDigitalPreprocessing\n return self._preprocessing",
"def get_preprocess(self) -> Callable[[np.ndarray, rectangle.Rectangle, List[int]], np.ndarray]:\n return self.__preprocess_function",
"def getImageProcessor(self):\r\n return _osgDB.Registry_getImageProcessor(self)",
"def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n load_from_file = self.model_cfg.data.test.pipeline[0]\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = model_cfg.data.test.pipeline\n preprocess[0] = load_from_file\n return preprocess",
"def get_preprocessing_transform() -> T.Compose:\n composition = T.Compose(\n [\n T.ToTensor(),\n T.Lambda(lambda x: x - x.mean(dim=(1, 2)).reshape((3, 1, 1)) + COLOR_MEAN),\n T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ]\n )\n\n return composition",
"def input_processing_configuration(self) -> Optional['outputs.ApplicationInputProcessingConfiguration']:\n return pulumi.get(self, \"input_processing_configuration\")",
"def get_processor(self, name):\n if name is None:\n name = 'processor0'\n return self.processors.get(name, None)",
"def _preprocess(self, problem):\n return self.preprocess.apply(problem) if self.preprocess is not None else problem",
"def base_morphism(self):\n try:\n return self._base_morphism\n except AttributeError:\n from sage.categories.schemes import Schemes\n from sage.schemes.generic.spec import SpecZ\n SCH = Schemes()\n if hasattr(self, '_base_scheme'):\n self._base_morphism = self.Hom(self._base_scheme, category=SCH).natural_map()\n elif hasattr(self, '_base_ring'):\n self._base_morphism = self.Hom(AffineScheme(self._base_ring), category=SCH).natural_map()\n else:\n self._base_morphism = self.Hom(SpecZ, category=SCH).natural_map()\n return self._base_morphism",
"def create_preprocessing_model(output_shape=(224, 224, 3), model_variant=\"senet50\"):\n input_shape = (None, None, 3)\n input = Input(shape=input_shape, batch_size=1, name=\"input_image\")\n\n x = ChannelReversal()(input)\n x = Resizing(output_shape[0], output_shape[1], interpolation='bilinear', name=\"Resize\")(x)\n if model_variant == \"senet50\" or model_variant == \"resnet50\":\n output = DepthwiseNormalization([91.4953, 103.8827, 131.0912])(x)\n elif model_variant == \"vgg16\":\n output = DepthwiseNormalization([93.5940, 104.7624, 129.1863])(x)\n else:\n raise ValueError(f\"Unsupported model_variant: {model_variant}\")\n\n model = Model(input, output, name='preprocessing')\n return model",
"def get_preprocess(self, *args, **kwargs) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n pipeline = model_cfg.test_dataloader.dataset.pipeline\n meta_keys = [\n 'filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',\n 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg',\n 'valid_ratio'\n ]\n transforms = [\n item for item in pipeline if 'Random' not in item['type']\n and 'Annotation' not in item['type']\n ]\n for i, transform in enumerate(transforms):\n if transform['type'] == 'PackTextRecogInputs':\n meta_keys += transform[\n 'meta_keys'] if 'meta_keys' in transform else []\n transform['meta_keys'] = list(set(meta_keys))\n transform['keys'] = ['img']\n transforms[i]['type'] = 'Collect'\n if transform['type'] == 'Resize':\n transforms[i]['size'] = transforms[i].pop('scale')\n\n data_preprocessor = model_cfg.model.data_preprocessor\n transforms.insert(-1, dict(type='DefaultFormatBundle'))\n transforms.insert(\n -2,\n dict(\n type='Pad',\n size_divisor=data_preprocessor.get('pad_size_divisor', 1)))\n transforms.insert(\n -3,\n dict(\n type='Normalize',\n to_rgb=data_preprocessor.get('bgr_to_rgb', False),\n mean=data_preprocessor.get('mean', [0, 0, 0]),\n std=data_preprocessor.get('std', [1, 1, 1])))\n return transforms",
"def preprocess(self, image):\n return cv2.resize(image, (self.width, self.height), interpolation=self.interp)",
"def identity_morphism(self):\n from sage.schemes.generic.morphism import SchemeMorphism_id\n return SchemeMorphism_id(self)",
"def predictor(self) -> \"torch.nn.Module\": # type: ignore\n if self._predictor is None:\n self._predictor = torch.jit.load(self.path).eval()\n return self._predictor",
"def get_image_morphism(self,morphism_name):\r\n return self.morphisms_mapping[morphism_name]",
"def preprocessing_name(self) -> str:\n return \"preprocessing\"",
"def _get_compose_func(self):\n if self.compose_func == 'relu':\n compose_func_p = self.compose_relu\n else:\n if self.compose_func == 'tanh':\n compose_func_p = self.compose_tanh\n else:\n raise ValueError(\"Unknown Composition Function: {0}\".format(self.compose_func))\n\n return compose_func_p",
"def projective_transform(self):\r\n return transform.ProjectiveTransform(self.persp_matrix)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Infer the output image shape from the processing.
|
def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:
output_shape = None
for p in self.processings:
new_output_shape = p.infer_image_input_shape()
if new_output_shape is not None:
output_shape = new_output_shape
return output_shape
|
[
"def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape",
"def output_shape(self):\n return None",
"def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3",
"def get_image_shape(self) -> Tuple[int, int]:",
"def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)",
"def compute_output_shape(self, input_shape):\n return (\n input_shape[0],\n self.paddings[1][0] + input_shape[1] + self.paddings[1][1],\n self.paddings[2][0] + input_shape[2] + self.paddings[2][1],\n input_shape[3]\n )",
"def output_shape(self):\n if context.in_eager_mode():\n raise RuntimeError('Layer.output_shape not supported in Eager mode.')\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n output_shapes = self._inbound_nodes[0].output_shapes\n if len(output_shapes) == 1:\n return tuple(tensor_shape.TensorShape(output_shapes[0]).as_list())\n else:\n return [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in output_shapes\n ]\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)",
"def image_shape(self):\n return self.mri_imgs[0].shape",
"def get_images_shape():\n return (self.batch_size, self.OUTPUT_SIZE, self.OUTPUT_SIZE, self.NUM_CHANNELS)",
"def reshape_output(self, output):\n\t\treshaped_output = output.reshape((1,1,self.IMAGE_HEIGHT, \\\n\t\t\tself.IMAGE_WIDTH))\n\t\treshaped_output = reshaped_output.astype(theano.config.floatX)\n\t\treturn reshaped_output",
"def shape(self):\n return self._input.shape",
"def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape",
"def image_shape(fidelity=None):\n return [2 * Bridge.HEIGHT, Bridge.WIDTH]",
"def get_output_shape_at(self, node_index):\n if context.in_eager_mode():\n raise RuntimeError(\n 'Layer.get_output_shape_at not supported in Eager mode.')\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')",
"def compute_output_shape(observation_space, layers):\n # [None] adds a batch dimension to the random observation\n torch_obs = torch.tensor(observation_space.sample()[None])\n with torch.no_grad():\n sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)\n for layer in layers:\n # forward prop to compute the right size\n sample = layer(sample)\n\n # make sure batch axis still matches\n assert sample.shape[0] == torch_obs.shape[0]\n\n # return everything else\n return sample.shape[1:]",
"def reshape_output(self, output, batch_size, set_size):\n\n output_sizes = output.size()\n # print('output_sizes:',output_sizes)\n reshaped = output.view(batch_size, set_size, *output_sizes[1:])\n return reshaped",
"def output_shape(self, l_in):\r\n out_channel, l_out = self.in_channel, l_in\r\n for conv1d_unit in self.conv_layers:\r\n out_channel, l_out = conv1d_unit.output_shape(l_out)\r\n return l_out, out_channel",
"def inference(self, mode, reference_image, image_size=250):\n reference_image = load_image(reference_image, image_size=image_size)\n reference_image = normalize_m11(reference_image)\n reals = self.create_real_pyramid(reference_image, num_scales=len(self.model))\n\n dir = create_dir(os.path.join(self.result_dir, mode))\n if mode == 'random_sample':\n z_fixed = tf.random.normal(reals[0].shape)\n for n in range(self.num_samples):\n fake = self.SinGAN_generate(reals, z_fixed, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/random_sample_{n}.jpg') \n\n elif (mode == 'harmonization') or (mode == 'editing') or (mode == 'paint2image'):\n fake = self.SinGAN_inject(reals, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/inject_at_{self.inject_scale}.jpg') \n\n else:\n print('Inference mode must be: random_sample, harmonization, paint2image, editing')",
"def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:\n\n def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):\n if isinstance(tensor, (tuple, list)):\n return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]\n\n else:\n # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)\n axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: \"batch\"}\n if is_input:\n if len(tensor.shape) == 2:\n axes[1] = \"sequence\"\n else:\n raise ValueError(f\"Unable to infer tensor axes ({len(tensor.shape)})\")\n else:\n seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]\n axes.update({dim: \"sequence\" for dim in seq_axes})\n\n print(f\"Found {'input' if is_input else 'output'} {name} with shape: {axes}\")\n return axes\n\n tokens = nlp.tokenizer(\"This is a sample output\", return_tensors=framework)\n seq_len = tokens.input_ids.shape[-1]\n outputs = nlp.model(**tokens) if framework == \"pt\" else nlp.model(tokens)\n if isinstance(outputs, ModelOutput):\n outputs = outputs.to_tuple()\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n # Generate input names & axes\n input_vars = list(tokens.keys())\n input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}\n\n # flatten potentially grouped outputs (past for gpt2, attentions)\n outputs_flat = []\n for output in outputs:\n if isinstance(output, (tuple, list)):\n outputs_flat.extend(output)\n else:\n outputs_flat.append(output)\n\n # Generate output names & axes\n output_names = [f\"output_{i}\" for i in range(len(outputs_flat))]\n output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}\n\n # Create the aggregated axes representation\n dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)\n return input_vars, output_names, dynamic_axes, tokens"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Infer the output image shape from the processing.
|
def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:
return self.output_shape
|
[
"def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n output_shape = None\n for p in self.processings:\n new_output_shape = p.infer_image_input_shape()\n if new_output_shape is not None:\n output_shape = new_output_shape\n\n return output_shape",
"def output_shape(self):\n return None",
"def output_shape_for(self, input_shape):\n # N1, C1, W1, H1 = input_shape\n # output_shape = (N1, self.n_classes, W1, H1)\n x = input_shape\n\n # Encoder\n x = OutputShapeFor(self.convbnrelu1.cbr_unit)(x)\n x = OutputShapeFor(self.maxpool)(x)\n\n e1 = OutputShapeFor(self.encoder1)(x)\n e2 = OutputShapeFor(self.encoder2)(e1)\n e3 = OutputShapeFor(self.encoder3)(e2)\n e4 = OutputShapeFor(self.encoder4)(e3)\n\n # Decoder with Skip Connections\n d4 = OutputShapeFor(self.decoder4)(e4)\n # d4 += e3\n d3 = OutputShapeFor(self.decoder3)(d4)\n # d3 += e2\n d2 = OutputShapeFor(self.decoder2)(d3)\n # d2 += e1\n d1 = OutputShapeFor(self.decoder1)(d2)\n\n # Final Classification\n f1 = OutputShapeFor(self.finaldeconvbnrelu1)(d1)\n f2 = OutputShapeFor(self.finalconvbnrelu2)(f1)\n f3 = OutputShapeFor(self.finalconv3)(f2)\n return f3",
"def get_image_shape(self) -> Tuple[int, int]:",
"def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)",
"def compute_output_shape(self, input_shape):\n return (\n input_shape[0],\n self.paddings[1][0] + input_shape[1] + self.paddings[1][1],\n self.paddings[2][0] + input_shape[2] + self.paddings[2][1],\n input_shape[3]\n )",
"def output_shape(self):\n if context.in_eager_mode():\n raise RuntimeError('Layer.output_shape not supported in Eager mode.')\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n output_shapes = self._inbound_nodes[0].output_shapes\n if len(output_shapes) == 1:\n return tuple(tensor_shape.TensorShape(output_shapes[0]).as_list())\n else:\n return [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in output_shapes\n ]\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)",
"def image_shape(self):\n return self.mri_imgs[0].shape",
"def get_images_shape():\n return (self.batch_size, self.OUTPUT_SIZE, self.OUTPUT_SIZE, self.NUM_CHANNELS)",
"def reshape_output(self, output):\n\t\treshaped_output = output.reshape((1,1,self.IMAGE_HEIGHT, \\\n\t\t\tself.IMAGE_WIDTH))\n\t\treshaped_output = reshaped_output.astype(theano.config.floatX)\n\t\treturn reshaped_output",
"def shape(self):\n return self._input.shape",
"def build(self,input_shape):\r\n self.input_shape = input_shape\r\n return input_shape",
"def image_shape(fidelity=None):\n return [2 * Bridge.HEIGHT, Bridge.WIDTH]",
"def get_output_shape_at(self, node_index):\n if context.in_eager_mode():\n raise RuntimeError(\n 'Layer.get_output_shape_at not supported in Eager mode.')\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')",
"def compute_output_shape(observation_space, layers):\n # [None] adds a batch dimension to the random observation\n torch_obs = torch.tensor(observation_space.sample()[None])\n with torch.no_grad():\n sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)\n for layer in layers:\n # forward prop to compute the right size\n sample = layer(sample)\n\n # make sure batch axis still matches\n assert sample.shape[0] == torch_obs.shape[0]\n\n # return everything else\n return sample.shape[1:]",
"def reshape_output(self, output, batch_size, set_size):\n\n output_sizes = output.size()\n # print('output_sizes:',output_sizes)\n reshaped = output.view(batch_size, set_size, *output_sizes[1:])\n return reshaped",
"def output_shape(self, l_in):\r\n out_channel, l_out = self.in_channel, l_in\r\n for conv1d_unit in self.conv_layers:\r\n out_channel, l_out = conv1d_unit.output_shape(l_out)\r\n return l_out, out_channel",
"def inference(self, mode, reference_image, image_size=250):\n reference_image = load_image(reference_image, image_size=image_size)\n reference_image = normalize_m11(reference_image)\n reals = self.create_real_pyramid(reference_image, num_scales=len(self.model))\n\n dir = create_dir(os.path.join(self.result_dir, mode))\n if mode == 'random_sample':\n z_fixed = tf.random.normal(reals[0].shape)\n for n in range(self.num_samples):\n fake = self.SinGAN_generate(reals, z_fixed, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/random_sample_{n}.jpg') \n\n elif (mode == 'harmonization') or (mode == 'editing') or (mode == 'paint2image'):\n fake = self.SinGAN_inject(reals, inject_scale=self.inject_scale)\n imsave(fake, dir + f'/inject_at_{self.inject_scale}.jpg') \n\n else:\n print('Inference mode must be: random_sample, harmonization, paint2image, editing')",
"def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:\n\n def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):\n if isinstance(tensor, (tuple, list)):\n return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]\n\n else:\n # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)\n axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: \"batch\"}\n if is_input:\n if len(tensor.shape) == 2:\n axes[1] = \"sequence\"\n else:\n raise ValueError(f\"Unable to infer tensor axes ({len(tensor.shape)})\")\n else:\n seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]\n axes.update({dim: \"sequence\" for dim in seq_axes})\n\n print(f\"Found {'input' if is_input else 'output'} {name} with shape: {axes}\")\n return axes\n\n tokens = nlp.tokenizer(\"This is a sample output\", return_tensors=framework)\n seq_len = tokens.input_ids.shape[-1]\n outputs = nlp.model(**tokens) if framework == \"pt\" else nlp.model(tokens)\n if isinstance(outputs, ModelOutput):\n outputs = outputs.to_tuple()\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n # Generate input names & axes\n input_vars = list(tokens.keys())\n input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}\n\n # flatten potentially grouped outputs (past for gpt2, attentions)\n outputs_flat = []\n for output in outputs:\n if isinstance(output, (tuple, list)):\n outputs_flat.extend(output)\n else:\n outputs_flat.append(output)\n\n # Generate output names & axes\n output_names = [f\"output_{i}\" for i in range(len(outputs_flat))]\n output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}\n\n # Create the aggregated axes representation\n dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)\n return input_vars, output_names, dynamic_axes, tokens"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Processing parameters commonly used for training YoloX on COCO dataset.
|
def default_yolox_coco_processing_params() -> dict:
image_processor = ComposeProcessing(
[
ReverseImageChannels(),
DetectionLongestMaxSizeRescale((640, 640)),
DetectionBottomRightPadding((640, 640), 114),
ImagePermute((2, 0, 1)),
]
)
params = dict(
class_names=COCO_DETECTION_CLASSES_LIST,
image_processor=image_processor,
iou=0.65,
conf=0.1,
)
return params
|
[
"def default_ppyoloe_coco_processing_params() -> dict:\n\n image_processor = ComposeProcessing(\n [\n ReverseImageChannels(),\n DetectionRescale(output_shape=(640, 640)),\n NormalizeImage(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),\n ImagePermute(permutation=(2, 0, 1)),\n ]\n )\n\n params = dict(\n class_names=COCO_DETECTION_CLASSES_LIST,\n image_processor=image_processor,\n iou=0.65,\n conf=0.5,\n )\n return params",
"def setup_params(self):\n params = {}\n\n \"\"\"\n train_params defines the training parameters consisting of \n - the data provider that reads the data, preprocesses it and enqueues it into\n the data queue\n - the data queue that batches and if specified shuffles the data and provides \n the input to the model\n - other configuration parameters like the number of training steps\n It's arguments are\n data_params: defines how the data is read in.\n queue_params: defines how the data is presented to the model, i.e.\n if it is shuffled or not and how big of a batch size is used.\n targets: the targets to be extracted and evaluated in the tensorflow session\n num_steps: number of training steps\n thres_loss: if the loss exceeds thres_loss the training will be stopped\n validate_first: run validation before starting the training\n \"\"\"\n\n params['inter_op_parallelism_threads'] = 500\n\n params['train_params'] = {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': Combine_world,\n 'cfg_dataset': self.Config.datasets,\n 'group': 'train',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'train*.tfrecords',\n 'batch_size': 1, #self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 1,#sum(self.Config.datasets.values()),\n },\n 'queue_params': {\n 'queue_type': 'random',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.return_outputs,\n 'targets': [],\n },\n 'num_steps': self.Config.train_steps,\n 'thres_loss': self.Config.thres_loss,\n 'validate_first': False,\n }\n\n \"\"\"\n validation_params similar to train_params defines the validation parameters.\n It has the same arguments as train_params and additionally\n agg_func: function that aggregates the validation results across batches,\n e.g. to calculate the mean of across batch losses\n online_agg_func: function that aggregates the validation results across\n batches in an online manner, e.g. to calculate the RUNNING mean across\n batch losses\n \"\"\"\n \"\"\"\n params['validation_params'] = {\n 'topn_val': {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': ImageNetDataProvider,\n 'data_path': self.Config.data_path,\n 'group': 'val',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'validation*.tfrecords',\n 'batch_size': self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 4,\n },\n 'queue_params': {\n 'queue_type': 'fifo',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.in_top_k,\n },\n 'num_steps': self.Config.val_steps,\n 'agg_func': self.agg_mean, \n 'online_agg_func': self.online_agg_mean,\n }\n }\n \"\"\"\n params['validation_params'] = {}\n \"\"\"\n model_params defines the model i.e. the architecture that \n takes the output of the data provider as input and outputs \n the prediction of the model.\n\n You will need to EDIT alexnet_model in models.py. alexnet_model \n is supposed to define a standard AlexNet model in tensorflow. \n Please open models.py and fill out the missing parts in the alexnet_model \n function. Once you start working with different models you will need to\n switch out alexnet_model with your model function.\n \"\"\"\n params['model_params'] = {\n 'func': self.Config.ytn.inference,\n }\n\n \"\"\"\n loss_params defines your training loss.\n\n You will need to EDIT 'loss_per_case_func'. \n Implement a softmax cross-entropy loss. You can use tensorflow's \n tf.nn.sparse_softmax_cross_entropy_with_logits function.\n \n Note: \n 1.) loss_per_case_func is called with\n loss_per_case_func(inputs, outputs)\n by tfutils.\n 2.) labels = outputs['labels']\n logits = outputs['pred']\n \"\"\"\n def loss_wrapper(inputs, outputs):\n # coco\n predicts = outputs['bboxes']\n gt_boxes = tf.reshape(tf.cast(outputs['boxes'], tf.int32), [self.Config.batch_size, -1, 5])\n num_objects = outputs['num_objects']\n coco_loss, _, _ = self.Config.ytn.loss(predicts, gt_boxes, num_objects)\n # imagenet\n labels = outputs['labels']\n logits = outputs['logits']\n imagenet_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n print(imagenet_loss, coco_loss)\n return imagenet_loss + coco_loss\n \n params['loss_params'] = {\n 'targets': ['labels'],\n 'agg_func': tf.reduce_mean,\n 'loss_per_case_func': loss_wrapper,\n 'loss_per_case_func_params' : {'_outputs': 'outputs', \n '_targets_$all': 'inputs'},\n 'loss_func_kwargs' : {},\n }\n\n \"\"\"\n learning_rate_params defines the learning rate, decay and learning function.\n\n You will need to EDIT this part. Replace the exponential decay \n learning rate policy with a piecewise constant learning policy.\n ATTENTION: \n 1.) 'learning_rate', 'decay_steps', 'decay_rate' and 'staircase' are not\n arguments of tf.train.piecewise_constant! You will need to replace\n them with the appropriate keys. \n 2.) 'func' passes global_step as input to your learning rate policy \n function. Set the 'x' argument of tf.train.piecewise_constant to\n global_step.\n 3.) set 'values' to [0.01, 0.005, 0.001, 0.0005] and\n 'boundaries' to [150000, 300000, 450000] for a batch size of 256\n 4.) You will need to delete all keys except for 'func' and replace them\n with the input arguments to \n \"\"\"\n \n params['learning_rate_params'] = {\t\n 'func': tf.train.exponential_decay,\n 'learning_rate': 0.001,\n 'decay_steps': 5000, # FIX LATER,\n 'decay_rate': 0.95,\n 'staircase': True,\n }\n\n \"\"\"\n optimizer_params defines the optimizer.\n\n You will need to EDIT the optimizer class. Replace the Adam optimizer\n with a momentum optimizer after switching the learning rate policy to\n piecewise constant.\n \"\"\"\n params['optimizer_params'] = {\n 'func': optimizer.ClipOptimizer,\n 'optimizer_class': tf.train.AdamOptimizer,\n 'clip': False,\n }\n\n \"\"\"\n save_params defines how, where and when your training results are saved\n in the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n \"\"\"\n params['save_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'combined_fix',\n 'save_valid_freq': 10000,\n 'save_filters_freq': 5000,\n 'cache_filters_freq': 5000,\n 'save_metrics_freq': 200,\n 'save_initial_filters' : False,\n 'save_to_gfs': [],\n }\n\n \"\"\"\n load_params defines how and if a model should be restored from the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n\n If you want to restore your training these parameters should be the same \n as in 'save_params'.\n \"\"\"\n params['load_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'imagenet',\n 'do_restore': True,\n 'load_query': None,\n }\n\n return params",
"def locomotion():\n params = defaults()\n params['epochs'] = 312\n params['max_ep_len'] = 1000\n params['steps_per_epoch'] = 32 * 1000\n params['vf_lr'] = 3e-4 # default choice is Adam\n return params",
"def get_yolo_v2_params(self) -> dict:\n params = {}\n relevant_attributes = ['classes', 'coords', 'num']\n # pylint: disable=protected-access\n output_attributes = self.output_layers[0]._get_attributes()\n for attribute in relevant_attributes:\n params[attribute] = output_attributes.get(attribute)\n\n return params",
"def initialize_parameters(self):\n\n self.n_inputs = len(self.df.columns[:-1])\n self.n_hidden_per_layer = 3\n self.n_hidden = 2\n self.n_outputs = len(self.df.Class.unique()) if self.c_t == \"classification\" else 1\n self.learning_rate = .07\n self.epochs = 3\n self.momentum_factor = .5\n self.performance = 0",
"def process(self, training_example: dict) -> None:",
"def precompile_process():\r\n SystemParam.MODEL = \"Heisenberg\"\r\n #SystemParam.MODEL= \"Ising\"\r\n SystemParam.SYMMETRY = \"Z2\"\r\n SystemParam.USE_CUSTOM_RANDOM = False\r\n SystemParam.USE_REFLECTION = False\r\n SystemParam.NUM_OF_THREADS = None\r\n SystemParam.only_NN = True\r\n SystemParam.only_NNN = False",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n labelSize = len(trainingLabels)\n # count labels to find the count of Y --- make count objects from util.py\n labelCounter = util.Counter()\n conditionalCounter = util.Counter()\n\n for i in range(labelSize):\n label = trainingLabels[i]\n labelCounter[label] = labelCounter[label]+1\n\n # count the number of times a feature is true and specific label is used\n # values must be recorded for conditional probability calculations\n # the key for the counter should be a feature and its associated label so that we can represent the AND condition between them\n for feature in self.features:\n if trainingData[i][feature] == 1: # colored pixel\n conditionalCounter[(feature, label)] = conditionalCounter[(feature, label)]+1\n\n finalLabelProbabilities = labelCounter.copy()\n for label in self.legalLabels:\n for feature in self.features:\n finalLabelProbabilities[(feature, label)] = finalLabelProbabilities[(feature,label)] / labelSize\n self.labelProbabilities = finalLabelProbabilities\n\n probabilities = []\n accuracy = []\n validationSize = len(validationLabels)\n\n for k in kgrid:\n # divide conditionalCounter for each feature by the number of times each label appeared using labelCounter\n # |\n # --> = P (F | Y)\n \n tempCondCounter = util.Counter()\n for feature in self.features:\n for label in self.legalLabels:\n tempCondCounter[(feature, label)] = (conditionalCounter[(feature, label)]+k) / (labelCounter[label] + 2*k)\n\n self.conditionalProbabilities = tempCondCounter\n probabilities.append(tempCondCounter)\n\n # check if guess is correct\n guesses = self.classify(validationData)\n numCorrect = 0\n for label in range(validationSize):\n validationLabel = validationLabels[label]\n if validationLabel == guesses[label]:\n numCorrect = numCorrect + 1\n \n accuracy.append(numCorrect)\n \n index = accuracy.index(max(accuracy))\n self.conditionalProbabilities = probabilities[index]",
"def read_preprocess(params):\n\n if params.dataset == 'mnist':\n pcha = 1\n plen = 28\n else:\n pcha = 3\n plen = 32\n\n ratioHyper = params.ratioHyper\n ratioValid = params.ratioValid\n preProcess = params.preProcess\n preContrast = params.preContrast\n sigmoid = lambda x: 1./(1.+ np.exp(-x))\n \n # read data\n t1Data, t1Label, vData, vLabel, testD, testL = read(params)\n\n # permuting data \n vData, vLabel = permute(vData, vLabel, params)\n t1Data, t1Label = permute(t1Data, t1Label, params)\n\n # form datasets T1 and T2 \n if params.meta_bw:\n nVSamples = vData.shape[0]\n # set up t2+validation\n if ratioHyper > 1.:\n tempIndex = int(round((ratioHyper - 1.)*nVSamples))\n tempData = t1Data[:tempIndex]\n tempLabel = t1Label[:tempIndex]\n vData = np.concatenate((vData, tempData))\n vLabel = np.concatenate((vLabel, tempLabel))\n t1Data = t1Data[tempIndex:]\n t1Label = t1Label[tempIndex:]\n elif ratioHyper < 1.:\n tempIndex = int(round((1.-ratioHyper)*nVSamples))\n tempData = vData[:tempIndex]\n tempLabel = vLabel[:tempIndex]\n t1Data = np.concatenate((t1Data, tempData))\n t1Label = np.concatenate((t1Label, tempLabel))\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n # shuffle indices in t2+validation\n nVSamples = vData.shape[0]\n # set up t2 and validation\n if params.ratioValid > 0:\n tempIndex = int(round(nVSamples*(1.-ratioValid)))\n t2Data = vData[:tempIndex]\n t2Label = vLabel[:tempIndex]\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n else: \n tempIndex = int(round(nVSamples*(1.-ratioValid)))\n t2Data = vData\n t2Label = vLabel\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n\n else:\n t2Data = []\n t2Label = [] \n if not params.ratioValid > 0:\n t1Data = np.concatenate((vData, t1Data))\n t1Label = np.concatenate((vLabel, t1Label)) \n\n # global contrast normalization and ZCA \n if preProcess in ['global_contrast_norm', 'global_contrast_norm+zca', 'zca']:\n \n if not params.meta_bw:\n t2Data = t1Data[:5, :]\n #data = [t1Data, t2Data, testD, vData]\n if params.dataset == 'convnet':\n t1Data = t1Data.reshape(-1, pcha, plen, plen)\n t2Data = t2Data.reshape(-1, pcha, plen, plen)\n testD = testD.reshape(-1, pcha, pcha, plen)\n t1Data.astype(dtype=np.float64); t2Data.astype(dtype=np.float64); testD.astype(dtype=np.float64)\n \n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD) \n \n if preProcess in ['global_contrast_norm', 'global_contrast_norm+zca']:\n gcn = ContrastNorm()\n t1Data = gcn.apply(t1Data/np.float64(255.))\n t2Data = gcn.apply(t2Data/np.float64(255.))\n testD = gcn.apply(testD/np.float64(255.))\n\n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD) \n\n \n if preProcess in ['zca', 'global_contrast_norm+zca']: \n white = ZCA(3072, t1Data.copy())\n t1Data = white.apply(t1Data)\n t2Data = white.apply(t2Data)\n testD = white.apply(testD)\n \n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD), \n \n # other kinds of preprocessing \n else: \n scaler = {\n 'm0': preprocessing.StandardScaler(with_std = False).fit(t1Data),\n 'm0s1': preprocessing.StandardScaler().fit(t1Data),\n 'minMax': preprocessing.MinMaxScaler().fit(t1Data),\n 'None': 1.\n }[preProcess] \n if preProcess != 'None':\n t1Data = scaler.transform(t1Data)\n if params.meta_bw: t2Data = scaler.transform(t2Data)\n vData = scaler.transform(vData)\n testD = scaler.transform(testD)\n\n # contrast \n contrastFun = {\n 'tanh': np.tanh,\n 'arcsinh': np.arcsinh,\n 'sig': sigmoid,\n 'None': 1.\n }[preContrast]\n if preContrast != 'None':\n t1Data = contrastFun(t1Data)\n if params.meta_bw: t2Data = contrastFun(t2Data)\n vData = contrastFun(vData)\n testD = contrastFun(testD)\n\n\n print '- size T1, valid, T2'\n print t1Data.shape, vData.shape\n if params.meta_bw: print t2Data.shape\n \n\n\n # reshape if convnet\n if params.model == 'convnet':\n if params.dataset in ['mnist', 'not_mnist']:\n t1Data = t1Data.reshape(-1, 1, 28, 28)\n vData = vData.reshape(-1, 1, 28, 28)\n testD = testD.reshape(-1, 1, 28, 28)\n if params.meta_bw: \n t2Data = t2Data.reshape(-1, 1, 28, 28) \n \n if params.dataset in ['cifar10', 'svhn']:\n t1Data = t1Data.reshape(-1, 3, 32, 32)\n vData = vData.reshape(-1, 3, 32, 32)\n testD = testD.reshape(-1, 3, 32, 32)\n if params.meta_bw: \n t2Data = t2Data.reshape(-1, 3, 32, 32)\n \n # final shape \n print 'Elementary Set data shape: ', t1Data.shape, t1Label.shape\n if np.sum(np.isinf(t1Data)) > 0 : print 'Nan in T1 data!!'\n if np.sum(np.isinf(t1Label)) > 0 : print 'Nan in T1 label!!'\n\n if params.meta_bw: \n print 'Hyper Set data shape: ', t2Data.shape, t2Label.shape\n if np.sum(np.isinf(t2Data)) > 0 : print 'Nan in T2 data!!'\n if np.sum(np.isinf(t2Label)) > 0 : print 'Nan in T2 label!!'\n \n# show_samples(t1Data[:100]/255., 50) \n \n return t1Data, t1Label, t2Data, t2Label, vData, vLabel, testD, testL",
"def defaultParams(self):\n self.blurs = [[-1, self.fileRes], [-1, self.fileRes],[-1, self.fileRes]] \n self.gradient = [[False,True], [False,True], [False,True]]\n self.similarityMetric = [[\"CC\", \"CC\"],[\"CC\", \"CC\"],[\"CC\", \"CC\"]]\n self.weight = [[1,1],[1,1],[1,1]]\n self.radiusHisto = [[3,3],[3,3],[3,3]]\n self.transformationModel = [\"SyN[0.1]\", \"SyN[0.1]\", \"SyN[0.1]\"]\n self.regularization = [\"Gauss[2,1]\", \"Gauss[2,1]\", \"Gauss[2,1]\"]\n self.iterations = [\"100x100x100x0\", \"100x100x100x20\", \"100x100x100x100\"]\n self.useMask = [False, True, True]\n self.memoryRequired = [0.177, 1.385e-7, 2.1e-7]",
"def init_parameters(self):\r\n self.guessed_fishes_dict = {}\r\n self.train_index = 0",
"def config():\n model = None\n while model not in {\"NOTE\", \"NOTE_DURATION\"}:\n model = input(\"NOTE or NOTE_DURATION? (type one of the options in all caps): \")\n\n train = None\n while train not in {\"y\", \"n\"}:\n train = input(\"Do you want to train and save? (y/n): \")\n\n load = None\n while load not in {\"y\", \"n\"}:\n load = input(\"Do you want to load and generate? (only say yes if you said yes in the previous question, or have trained before) (y/n): \")\n\n if train == \"y\":\n composer = None\n while composer not in {\"Bach\", \"Mozart\", \"Beethoven\", \"Scarlatti\", \"Chopin\", \"Liszt\"}:\n composer = input(\"Pick a composer: Bach, Mozart, Beethoven, Scarlatti, Chopin, Liszt: \")\n\n one_track = \"n\"\n if composer != \"Scarlatti\":\n one_track = input(\"OneTrack (y/n): \")\n\n note_gen_epochs = int(input(\"How many NoteGen epochs? (int): \"))\n duration_gen_epochs = None\n if model == \"NOTE_DURATION\":\n duration_gen_epochs = int(input(\"How many DurationGen epochs? (int): \"))\n\n if one_track == \"y\":\n file_path_training_data = \"./OneTrackData/\" + composer\n file_path_save_data = \"./Dict Data/\" + \"OneTrack\" + composer\n file_path_save_weights = \"./Trained Weights/\" + model + \"_\" + \"OneTrack\" + composer + \",{},{}\".format(note_gen_epochs, duration_gen_epochs)\n file_path_read_weights = \"./Trained Weights/\" + model + \"_\" + \"OneTrack\" + composer + \",{},{}\".format(note_gen_epochs, duration_gen_epochs)\n else:\n file_path_training_data = \"./data/\" + composer\n file_path_save_data = \"./Dict Data/\" + \"MultiTrack\" + composer\n file_path_save_weights = \"./Trained Weights/\" + model + \"_\" + \"MultiTrack\" + composer + \",{},{}\".format(note_gen_epochs,\n duration_gen_epochs)\n file_path_read_weights = \"./Trained Weights/\" + model + \"_\" + \"MultiTrack\" + composer + \",{},{}\".format(note_gen_epochs,\n duration_gen_epochs)\n else:\n file_path_training_data = None\n note_gen_epochs = None\n duration_gen_epochs = None\n file_path_save_weights = None\n if load == 'n':\n exit()\n else:\n possible_files = os.listdir(\"./Trained Weights\")\n files_index = int(input(\"choose one number: \\n\" + \"\\n\".join([\"{} {}\".format(i, elm) for i, elm in enumerate(possible_files)]) + \"\\n\"))\n file_path_read_weights = \"./Trained Weights/\" + possible_files[files_index]\n\n possible_files = os.listdir(\"./Dict Data\")\n files_index = int(input(\n \"choose one number: \\n\" + \"\\n\".join([\"{} {}\".format(i, elm) for i, elm in enumerate(possible_files)]) + \"\\n\"))\n file_path_save_data = \"./Dict Data/\" + possible_files[files_index]\n\n return model, file_path_training_data, file_path_save_data, file_path_save_weights, file_path_read_weights, note_gen_epochs, duration_gen_epochs, train, load",
"def _init_param(self): \n \n param={}\n param['y'] = self.x_oris if self.FixedOris else self.x \n param['u'] = self.t\n for s in ['y','u']: param['N'+s] = len(param[s])\n param['sigma_d'] = self.sigma_d\n param['sigma_0'] = self.sigma_0\n param['m_0'] = np.log10(self.I_0)\n param['ell_0'] = self.ell_0\n param['tau_0'] = self.tau_0\n param['d'] = self.data.ravel() # d vector Nxt\n self._param = param\n # we set non-computed attributes to None\n # that way, we know if these attributess are already computed or need to be computed\n keys = ['MAP','samples','samples_stats', 'I_MAP', 'logZ', \n '_theta_MAP','_H_MAP','_H_MAP_D','_H_MAP_R', '_thetas']\n for key in keys: setattr(self, key, None)",
"def optimize_hyperparameters(benchmark_model, num_of_cylces=10, epochs=1):\n\n config_list = []\n\n log_path = benchmark_model.model_dir\n\n learning_rate_range = [0.0005, 0.0015]\n learning_momentum_range = [0.81, 0.99]\n weight_decay_range = [0.00007, 0.00014]\n\n hyperparameter_dict = {\"lr\": learning_rate_range, \"lm\": learning_momentum_range, \"wd\": weight_decay_range}\n\n # Sets the certain values to the specified config's values.\n config_hpo = OptimizeHyperparametersConfig()\n config_hpo.IMAGES_PER_GPU = benchmark_model.config.IMAGES_PER_GPU\n config_hpo.NUM_CLASSES = benchmark_model.config.NUM_CLASSES\n\n benchmark_model.config.STEPS_PER_EPOCH = config_hpo.STEPS_PER_EPOCH\n benchmark_model.config.NAME = \"Benchmark\"\n\n model_hpo = benchmark_model\n\n for index in range(num_of_cylces):\n\n \"\"\"Train the model.\"\"\"\n # Training dataset.\n dataset_train = ImageDetectionDataset()\n dataset_train.load_private(args.dataset, \"train\")\n dataset_train.prepare()\n\n # Validation dataset\n dataset_val = ImageDetectionDataset()\n dataset_val.load_private(args.dataset, \"val\")\n dataset_val.prepare()\n\n print(\"************\")\n print(\"Name:\", model_hpo.config.NAME)\n print(\"lr:\", model_hpo.config.LEARNING_RATE)\n print(\"lm:\", model_hpo.config.LEARNING_MOMENTUM)\n print(\"wd:\", model_hpo.config.WEIGHT_DECAY)\n print(\"\")\n\n print(\"Training network heads of\", index)\n\n model_hpo.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=epochs,\n layers='heads')\n\n history = model_hpo.history\n loss = history.history['loss']\n loss_config_name = (loss, model_hpo.config, model_hpo.config.NAME)\n config_list.append(loss_config_name)\n\n config_hpo.set_params(hyperparameter_dict, index)\n\n print(\"Training\", model_hpo.config.NAME, \"Successful\\n********************************************************\"\n , \"\\nThe total loss was:\", loss)\n\n model_hpo = MaskRCNN(mode=\"training\", config=config_hpo,\n model_dir=log_path)\n\n print(\"Now training\", model_hpo.config.NAME, \"\\n\\n\")\n\n opt_hyperparameters = config_list[0]\n for c in config_list:\n loss, con, name = c\n print(\"Name:\", name, \"\\nLoss:\", loss, \"\\nConfig:\", con, \"\\n***********************************************\\n\\n\")\n if loss < opt_hyperparameters[0]:\n opt_hyperparameters = c\n print(\"The optimal hyperparameters are approximately\", opt_hyperparameters[1])\n print(\"With a loss of\", opt_hyperparameters[0])\n print(\"The config was\", opt_hyperparameters[2])",
"def update(self, examples):\n\n batch_X, batch_y = get_prodigy_x_y(examples, self.cat2bin)\n\n if len(batch_X) != 0:\n # Update if the \n self.training_X = self.training_X + batch_X\n self.training_y = self.training_y + batch_y\n\n # Refit with collated old training data with new\n self.vectorizer = TfidfVectorizer(\n analyzer='word',\n token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 2)\n )\n train_X_vect = self.vectorizer.fit_transform(self.training_X)\n \n self.model = LogisticRegression(max_iter=1000)\n self.model = self.model.fit(train_X_vect, self.training_y)\n\n new_y_pred = self.model.predict(train_X_vect)\n test_y_pred = self.model.predict(self.vectorizer.transform(self.test_X))\n\n train_f1 = f1_score(self.training_y, new_y_pred, average='weighted')\n self.test_f1 = f1_score(self.test_y, test_y_pred, average='weighted')\n print(f\"Training F1: {round(train_f1, 3)}\")\n print(f\"Test F1: {round(self.test_f1, 3)}\")\n print(\"Train classification report:\")\n print(classification_report(self.training_y, new_y_pred))\n print(\"Test classification report:\")\n print(classification_report(self.test_y, test_y_pred))\n print(\"Test confusion:\")\n print(confusion_matrix(self.test_y, test_y_pred))",
"def InitializeMetaParameters(self):\n\n\n\t\t#To set Meta Parameters, as done in the paper.\n\t\t#Note:- \n\t\t#\tself.MiscParamList == [eta, tau_squared, sigma2, nu_1, nu_2]\n\n\n\t\twith torch.no_grad():\n\n\t\t\t#For MiscParamList\n\t\t\ttrain_pred = self.Model(self.TrainData[:,:self.D_in])\n\t\t\ttrain_truth = self.TrainData[:,self.D_in:]\n\t\t\teta = np.log( np.mean(np.var( np.array(train_pred - train_truth) )) )\n\t\t\ttau_squared = np.exp(eta)\n\t\t\tsigma_squared = 25\n\t\t\tnu_1 = 0\n\t\t\tnu_2 = 0\n\n\t\t\tself.MiscParamList = [eta, tau_squared, sigma_squared, nu_1, nu_2]\n\n\t\t\t#For CurrentPriorProb, Note that we entered the list of current model weights.\n\t\t\tself.CurrentPriorProb, _ = self.PriorLikelihood(self.MiscParamList, list(self.Model.state_dict().values()) )\n\n\t\t\t#For CurrentLikelihoodProb\n\t\t\tself.CurrentLikelihoodProb, _ = self.Likelihood(self.MiscParamList, list(self.Model.state_dict().values()) )",
"def optimise_parameters(classifiers, train):\n\n ps = \\\n [\n {\n # 'C': np.arange(15, 30, 0.5),\n 'C': [\n 0.1,\n 0.5,\n 1,\n 2,\n 4,\n 8,\n 16,\n 32,\n 64,\n 128,\n 256,\n 512\n ],\n 'kernel':\n [\n 'linear',\n 'poly',\n 'rbf'\n ]\n },\n # {\n # 'solver': [\"lbfgs\", \"sgd\", \"adam\"],\n # \"learning_rate\": [\"constant\", \"invscaling\", \"adaptive\"],\n # \"activation\": [\"identity\", \"logistic\", 'tanh', \"relu\"],\n # \"hidden_layer_sizes\": [\n # (500, 250, 100, 10),\n # (600, 400, 200, 100, 50, 10),\n # (8, 5, 2),\n # (50, 20, 10, 2),\n # (100, 50, 20, 10, 5, 2),\n # (10, 10, 10, 10, 10, 10, 10, 10, 10, 10)\n # ]\n # },\n {\n 'n_estimators': [\n 110, 120, 130, 140, 150, 160, 170, 180, 190,\n ],\n },\n {\n 'n_neighbors':\n [\n 10, 20, 30, 40, 50, 60, 70, 80, 90, 100,\n 110, 120, 130, 140, 150, 160, 170, 180, 190,\n 200, 210, 220, 230, 240, 250\n ],\n 'weights': ['uniform', 'distance'],\n 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'],\n 'metric': ['euclidean', 'minkowski', 'manhattan']\n }\n ]\n\n i = 0\n\n b_params = ['', '', '', '']\n f1_scorer = make_scorer(f1_score, pos_label=1)\n\n print(train.drop([\"headline\", \"origin\", \"truth\"], axis=1))\n\n while i < len(classifiers):\n grid = GridSearchCV(\n classifiers[i], param_grid=ps[i], cv=5, verbose=3, scoring=f1_scorer)\n grid.fit(train.drop([\"headline\", \"origin\", \"truth\"], axis=1).values, train['truth'].values)\n scores = grid.best_score_\n best_parameters = grid.best_estimator_.get_params()\n param_list = ''\n for param_name in sorted(ps[i].keys()):\n param_list += '\\t%s: %r\\n' % (param_name,\n best_parameters[param_name])\n\n b_params[i] = '%s\\nBest score: %0.3f \\nBest parameters set: %s' % (\n scores, grid.best_score_, param_list)\n\n i += 1\n\n for pars in b_params:\n print(pars)",
"def Use_YOLOv4(self, weights_path:str = None, classes_path:str = None, input_shape:int = 640,\n iou = 0.45, score = 0.25):\n \n if weights_path is None:\n path = 'xtreme_vision/weights/yolo.weights'\n if os.path.isfile(path):\n print('Found Existing weights file...\\nLoading existing file...')\n self.weights_path = path\n else:\n print('Downloading weights file...\\nPlease wait...')\n self.weights_path = tf.keras.utils.get_file('yolo.weights',\n 'https://github.com/Adeel-Intizar/Xtreme-Vision/releases/download/1.0/yolov4.weights',\n cache_subdir = 'weights/', cache_dir = 'xtreme_vision')\n else: \n if os.path.isfile(weights_path):\n self.weights_path = weights_path\n else:\n raise FileNotFoundError (\"Weights file doesn't exist at provided path. Please provide valid path.\")\n\n if classes_path is None:\n path = 'xtreme_vision/weights/coco.names'\n if os.path.isfile(path):\n print('Found Existing Classes File...\\nLoading Existing File...')\n self.yolo_classes = path\n else:\n print('Downloading Classes File...\\nPlease wait...')\n self.yolo_classes = tf.keras.utils.get_file('coco.names',\n 'https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names',\n cache_subdir = 'weights/', cache_dir = 'xtreme_vision')\n else:\n if os.path.isfile(classes_path):\n self.yolo_classes = classes_path\n else:\n raise FileNotFoundError (\"Classes File Doesn't Exist at Provided Path. Please Provide Valid Path.\")\n\n self.input_shape = input_shape\n self.iou = iou\n self.score = score\n \n self.model = YOLOv4()\n self.model.load_model(self.weights_path, self.yolo_classes, self.input_shape)\n self.modelLoaded = True\n self.modelType = 'yolo'",
"def coarseParams() -> retval:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Processing parameters commonly used for training PPYoloE on COCO dataset.
|
def default_ppyoloe_coco_processing_params() -> dict:
image_processor = ComposeProcessing(
[
ReverseImageChannels(),
DetectionRescale(output_shape=(640, 640)),
NormalizeImage(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
ImagePermute(permutation=(2, 0, 1)),
]
)
params = dict(
class_names=COCO_DETECTION_CLASSES_LIST,
image_processor=image_processor,
iou=0.65,
conf=0.5,
)
return params
|
[
"def process_epidemic_parameters(self):",
"def default_yolox_coco_processing_params() -> dict:\n\n image_processor = ComposeProcessing(\n [\n ReverseImageChannels(),\n DetectionLongestMaxSizeRescale((640, 640)),\n DetectionBottomRightPadding((640, 640), 114),\n ImagePermute((2, 0, 1)),\n ]\n )\n\n params = dict(\n class_names=COCO_DETECTION_CLASSES_LIST,\n image_processor=image_processor,\n iou=0.65,\n conf=0.1,\n )\n return params",
"def setup_params(self):\n params = {}\n\n \"\"\"\n train_params defines the training parameters consisting of \n - the data provider that reads the data, preprocesses it and enqueues it into\n the data queue\n - the data queue that batches and if specified shuffles the data and provides \n the input to the model\n - other configuration parameters like the number of training steps\n It's arguments are\n data_params: defines how the data is read in.\n queue_params: defines how the data is presented to the model, i.e.\n if it is shuffled or not and how big of a batch size is used.\n targets: the targets to be extracted and evaluated in the tensorflow session\n num_steps: number of training steps\n thres_loss: if the loss exceeds thres_loss the training will be stopped\n validate_first: run validation before starting the training\n \"\"\"\n\n params['inter_op_parallelism_threads'] = 500\n\n params['train_params'] = {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': Combine_world,\n 'cfg_dataset': self.Config.datasets,\n 'group': 'train',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'train*.tfrecords',\n 'batch_size': 1, #self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 1,#sum(self.Config.datasets.values()),\n },\n 'queue_params': {\n 'queue_type': 'random',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.return_outputs,\n 'targets': [],\n },\n 'num_steps': self.Config.train_steps,\n 'thres_loss': self.Config.thres_loss,\n 'validate_first': False,\n }\n\n \"\"\"\n validation_params similar to train_params defines the validation parameters.\n It has the same arguments as train_params and additionally\n agg_func: function that aggregates the validation results across batches,\n e.g. to calculate the mean of across batch losses\n online_agg_func: function that aggregates the validation results across\n batches in an online manner, e.g. to calculate the RUNNING mean across\n batch losses\n \"\"\"\n \"\"\"\n params['validation_params'] = {\n 'topn_val': {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': ImageNetDataProvider,\n 'data_path': self.Config.data_path,\n 'group': 'val',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'validation*.tfrecords',\n 'batch_size': self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 4,\n },\n 'queue_params': {\n 'queue_type': 'fifo',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.in_top_k,\n },\n 'num_steps': self.Config.val_steps,\n 'agg_func': self.agg_mean, \n 'online_agg_func': self.online_agg_mean,\n }\n }\n \"\"\"\n params['validation_params'] = {}\n \"\"\"\n model_params defines the model i.e. the architecture that \n takes the output of the data provider as input and outputs \n the prediction of the model.\n\n You will need to EDIT alexnet_model in models.py. alexnet_model \n is supposed to define a standard AlexNet model in tensorflow. \n Please open models.py and fill out the missing parts in the alexnet_model \n function. Once you start working with different models you will need to\n switch out alexnet_model with your model function.\n \"\"\"\n params['model_params'] = {\n 'func': self.Config.ytn.inference,\n }\n\n \"\"\"\n loss_params defines your training loss.\n\n You will need to EDIT 'loss_per_case_func'. \n Implement a softmax cross-entropy loss. You can use tensorflow's \n tf.nn.sparse_softmax_cross_entropy_with_logits function.\n \n Note: \n 1.) loss_per_case_func is called with\n loss_per_case_func(inputs, outputs)\n by tfutils.\n 2.) labels = outputs['labels']\n logits = outputs['pred']\n \"\"\"\n def loss_wrapper(inputs, outputs):\n # coco\n predicts = outputs['bboxes']\n gt_boxes = tf.reshape(tf.cast(outputs['boxes'], tf.int32), [self.Config.batch_size, -1, 5])\n num_objects = outputs['num_objects']\n coco_loss, _, _ = self.Config.ytn.loss(predicts, gt_boxes, num_objects)\n # imagenet\n labels = outputs['labels']\n logits = outputs['logits']\n imagenet_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n print(imagenet_loss, coco_loss)\n return imagenet_loss + coco_loss\n \n params['loss_params'] = {\n 'targets': ['labels'],\n 'agg_func': tf.reduce_mean,\n 'loss_per_case_func': loss_wrapper,\n 'loss_per_case_func_params' : {'_outputs': 'outputs', \n '_targets_$all': 'inputs'},\n 'loss_func_kwargs' : {},\n }\n\n \"\"\"\n learning_rate_params defines the learning rate, decay and learning function.\n\n You will need to EDIT this part. Replace the exponential decay \n learning rate policy with a piecewise constant learning policy.\n ATTENTION: \n 1.) 'learning_rate', 'decay_steps', 'decay_rate' and 'staircase' are not\n arguments of tf.train.piecewise_constant! You will need to replace\n them with the appropriate keys. \n 2.) 'func' passes global_step as input to your learning rate policy \n function. Set the 'x' argument of tf.train.piecewise_constant to\n global_step.\n 3.) set 'values' to [0.01, 0.005, 0.001, 0.0005] and\n 'boundaries' to [150000, 300000, 450000] for a batch size of 256\n 4.) You will need to delete all keys except for 'func' and replace them\n with the input arguments to \n \"\"\"\n \n params['learning_rate_params'] = {\t\n 'func': tf.train.exponential_decay,\n 'learning_rate': 0.001,\n 'decay_steps': 5000, # FIX LATER,\n 'decay_rate': 0.95,\n 'staircase': True,\n }\n\n \"\"\"\n optimizer_params defines the optimizer.\n\n You will need to EDIT the optimizer class. Replace the Adam optimizer\n with a momentum optimizer after switching the learning rate policy to\n piecewise constant.\n \"\"\"\n params['optimizer_params'] = {\n 'func': optimizer.ClipOptimizer,\n 'optimizer_class': tf.train.AdamOptimizer,\n 'clip': False,\n }\n\n \"\"\"\n save_params defines how, where and when your training results are saved\n in the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n \"\"\"\n params['save_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'combined_fix',\n 'save_valid_freq': 10000,\n 'save_filters_freq': 5000,\n 'cache_filters_freq': 5000,\n 'save_metrics_freq': 200,\n 'save_initial_filters' : False,\n 'save_to_gfs': [],\n }\n\n \"\"\"\n load_params defines how and if a model should be restored from the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n\n If you want to restore your training these parameters should be the same \n as in 'save_params'.\n \"\"\"\n params['load_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'imagenet',\n 'do_restore': True,\n 'load_query': None,\n }\n\n return params",
"def coarseParams() -> retval:\n ...",
"def locomotion():\n params = defaults()\n params['epochs'] = 312\n params['max_ep_len'] = 1000\n params['steps_per_epoch'] = 32 * 1000\n params['vf_lr'] = 3e-4 # default choice is Adam\n return params",
"def initialize_parameters(self):\n\n self.n_inputs = len(self.df.columns[:-1])\n self.n_hidden_per_layer = 3\n self.n_hidden = 2\n self.n_outputs = len(self.df.Class.unique()) if self.c_t == \"classification\" else 1\n self.learning_rate = .07\n self.epochs = 3\n self.momentum_factor = .5\n self.performance = 0",
"def precompile_process():\r\n SystemParam.MODEL = \"Heisenberg\"\r\n #SystemParam.MODEL= \"Ising\"\r\n SystemParam.SYMMETRY = \"Z2\"\r\n SystemParam.USE_CUSTOM_RANDOM = False\r\n SystemParam.USE_REFLECTION = False\r\n SystemParam.NUM_OF_THREADS = None\r\n SystemParam.only_NN = True\r\n SystemParam.only_NNN = False",
"def _init_param(self): \n \n param={}\n param['y'] = self.x_oris if self.FixedOris else self.x \n param['u'] = self.t\n for s in ['y','u']: param['N'+s] = len(param[s])\n param['sigma_d'] = self.sigma_d\n param['sigma_0'] = self.sigma_0\n param['m_0'] = np.log10(self.I_0)\n param['ell_0'] = self.ell_0\n param['tau_0'] = self.tau_0\n param['d'] = self.data.ravel() # d vector Nxt\n self._param = param\n # we set non-computed attributes to None\n # that way, we know if these attributess are already computed or need to be computed\n keys = ['MAP','samples','samples_stats', 'I_MAP', 'logZ', \n '_theta_MAP','_H_MAP','_H_MAP_D','_H_MAP_R', '_thetas']\n for key in keys: setattr(self, key, None)",
"def InitializeMetaParameters(self):\n\n\n\t\t#To set Meta Parameters, as done in the paper.\n\t\t#Note:- \n\t\t#\tself.MiscParamList == [eta, tau_squared, sigma2, nu_1, nu_2]\n\n\n\t\twith torch.no_grad():\n\n\t\t\t#For MiscParamList\n\t\t\ttrain_pred = self.Model(self.TrainData[:,:self.D_in])\n\t\t\ttrain_truth = self.TrainData[:,self.D_in:]\n\t\t\teta = np.log( np.mean(np.var( np.array(train_pred - train_truth) )) )\n\t\t\ttau_squared = np.exp(eta)\n\t\t\tsigma_squared = 25\n\t\t\tnu_1 = 0\n\t\t\tnu_2 = 0\n\n\t\t\tself.MiscParamList = [eta, tau_squared, sigma_squared, nu_1, nu_2]\n\n\t\t\t#For CurrentPriorProb, Note that we entered the list of current model weights.\n\t\t\tself.CurrentPriorProb, _ = self.PriorLikelihood(self.MiscParamList, list(self.Model.state_dict().values()) )\n\n\t\t\t#For CurrentLikelihoodProb\n\t\t\tself.CurrentLikelihoodProb, _ = self.Likelihood(self.MiscParamList, list(self.Model.state_dict().values()) )",
"def process(self, training_example: dict) -> None:",
"def init_parameters(self):\r\n self.guessed_fishes_dict = {}\r\n self.train_index = 0",
"def _update_trainable_params(self):\n self._trainable_params = list(self._par_info)",
"def test_ppo_loss_function(self):\n config = (\n ppo.PPOConfig()\n .environment(\"CartPole-v1\")\n .rollouts(\n num_rollout_workers=0,\n )\n .training(\n gamma=0.99,\n model=dict(\n fcnet_hiddens=[10],\n fcnet_activation=\"linear\",\n vf_share_layers=True,\n ),\n _enable_learner_api=False,\n )\n .rl_module(_enable_rl_module_api=False)\n )\n\n for fw, sess in framework_iterator(config, session=True):\n algo = config.build()\n policy = algo.get_policy()\n\n # Check no free log std var by default.\n if fw == \"torch\":\n matching = [\n v for (n, v) in policy.model.named_parameters() if \"log_std\" in n\n ]\n else:\n matching = [\n v for v in policy.model.trainable_variables() if \"log_std\" in str(v)\n ]\n assert len(matching) == 0, matching\n\n # Post-process (calculate simple (non-GAE) advantages) and attach\n # to train_batch dict.\n # A = [0.99^2 * 0.5 + 0.99 * -1.0 + 1.0, 0.99 * 0.5 - 1.0, 0.5] =\n # [0.50005, -0.505, 0.5]\n train_batch = compute_gae_for_sample_batch(\n policy, CARTPOLE_FAKE_BATCH.copy()\n )\n if fw == \"torch\":\n train_batch = policy._lazy_tensor_dict(train_batch)\n\n # Check Advantage values.\n check(train_batch[Postprocessing.VALUE_TARGETS], [0.50005, -0.505, 0.5])\n\n # Calculate actual PPO loss.\n if fw == \"tf2\":\n PPOTF2Policy.loss(policy, policy.model, Categorical, train_batch)\n elif fw == \"torch\":\n PPOTorchPolicy.loss(\n policy, policy.model, policy.dist_class, train_batch\n )\n\n vars = (\n policy.model.variables()\n if fw != \"torch\"\n else list(policy.model.parameters())\n )\n if fw == \"tf\":\n vars = policy.get_session().run(vars)\n expected_shared_out = fc(\n train_batch[SampleBatch.CUR_OBS],\n vars[0 if fw != \"torch\" else 2],\n vars[1 if fw != \"torch\" else 3],\n framework=fw,\n )\n expected_logits = fc(\n expected_shared_out,\n vars[2 if fw != \"torch\" else 0],\n vars[3 if fw != \"torch\" else 1],\n framework=fw,\n )\n expected_value_outs = fc(\n expected_shared_out, vars[4], vars[5], framework=fw\n )\n\n kl, entropy, pg_loss, vf_loss, overall_loss = self._ppo_loss_helper(\n policy,\n policy.model,\n Categorical if fw != \"torch\" else TorchCategorical,\n train_batch,\n expected_logits,\n expected_value_outs,\n sess=sess,\n )\n if sess:\n policy_sess = policy.get_session()\n k, e, pl, v, tl = policy_sess.run(\n [\n policy._mean_kl_loss,\n policy._mean_entropy,\n policy._mean_policy_loss,\n policy._mean_vf_loss,\n policy._total_loss,\n ],\n feed_dict=policy._get_loss_inputs_dict(train_batch, shuffle=False),\n )\n check(k, kl)\n check(e, entropy)\n check(pl, np.mean(-pg_loss))\n check(v, np.mean(vf_loss), decimals=4)\n check(tl, overall_loss, decimals=4)\n elif fw == \"torch\":\n check(policy.model.tower_stats[\"mean_kl_loss\"], kl)\n check(policy.model.tower_stats[\"mean_entropy\"], entropy)\n check(policy.model.tower_stats[\"mean_policy_loss\"], np.mean(-pg_loss))\n check(\n policy.model.tower_stats[\"mean_vf_loss\"],\n np.mean(vf_loss),\n decimals=4,\n )\n check(policy.model.tower_stats[\"total_loss\"], overall_loss, decimals=4)\n else:\n check(policy._mean_kl_loss, kl)\n check(policy._mean_entropy, entropy)\n check(policy._mean_policy_loss, np.mean(-pg_loss))\n check(policy._mean_vf_loss, np.mean(vf_loss), decimals=4)\n check(policy._total_loss, overall_loss, decimals=4)\n algo.stop()",
"def set_fittable_parameters(p, model, fpn):\n for i, param_name in enumerate(fpn):\n param = getattr(model, param_name)\n param.value = p[i]",
"def load_base_preprocessor_params(self):\n self.preprocessor_params = {\n 'cat': {\n 'columns': ['Shape', 'Cut', 'Color', 'Clarity', 'Polish', 'Symmetry', 'Fluorescence', 'Culet'],\n 'imputer_strategy': 'most_frequent',\n 'encoder_type': 'Ordinal',\n 'tune_params': None,\n },\n 'num': {\n 'columns': ['Carat', 'Depth', 'Table', 'L/W'],\n 'imputer_strategy': 'median',\n 'scaler_type': 'Standard',\n 'tune_params': None,\n },\n 'date': {\n 'split_cols': ['First Available Date'],\n 'delta_types': ['deliver_days', 'in_stock_days'],\n 'imputer_strategy': None,\n },\n }",
"def setCRFparams(self, crfParams):\n #weight vector for node features\n self.unaryWeights = crfParams['unaryWeights']\n \n #weight vector for edge features\n self.binaryWeights = crfParams['binaryWeights']\n \n #minimal epsilon weight for a feature\n self.epsWeight = crfParams['epsWeight']\n #training hyperparameters\n self.regNorm = crfParams['regNorm']\n self.regLambda = crfParams['regLambda']\n self.omega = crfParams['omega']",
"def parameters_updated(self):\n self.calculate_variables()\n termination = self.detect_termination()\n if termination is None:\n self.request_estimation()\n self.monitor_progress()\n else:\n self.callback.plp_terminated(termination)",
"def set_params(self, **values):\n pc, pe = {}, {}\n for k, v in values.items():\n if k.startswith('e_'):\n pe[k[2:]] = v\n elif k.startswith('c_'):\n pc[k[2:]] = v\n else:\n raise ValueError( # pragma: no cover\n f\"Unexpected parameter name '{k}'\")\n self.clus.set_params(**pc)\n self.estimator.set_params(**pe)",
"def _update_trainable_params(self):\n self._trainable_params = set(self._par_info)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the processing parameters for a pretrained model.
|
def get_pretrained_processing_params(model_name: str, pretrained_weights: str) -> dict:
if pretrained_weights == "coco":
if "yolox" in model_name:
return default_yolox_coco_processing_params()
elif "ppyoloe" in model_name:
return default_ppyoloe_coco_processing_params()
elif "yolo_nas" in model_name:
return default_yolo_nas_coco_processing_params()
if pretrained_weights == "coco_pose" and model_name in ("dekr_w32_no_dc", "dekr_custom"):
return default_dekr_coco_processing_params()
if pretrained_weights == "imagenet" and model_name in {"vit_base", "vit_large", "vit_huge"}:
return default_vit_imagenet_processing_params()
if pretrained_weights == "imagenet":
return default_imagenet_processing_params()
return dict()
|
[
"def get_preprocess(self) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n load_from_file = self.model_cfg.data.test.pipeline[0]\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = model_cfg.data.test.pipeline\n preprocess[0] = load_from_file\n return preprocess",
"def get_premort_params(self):\n\n return self.Np, self.mup, self.kp",
"def get_preprocess(self, *args, **kwargs) -> Dict:\n pass",
"def get_model_params(self):\n print(self.current_model.get_params())",
"def get_preprocess(self, *args, **kwargs) -> Dict:\n input_shape = get_input_shape(self.deploy_cfg)\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n pipeline = model_cfg.test_dataloader.dataset.pipeline\n meta_keys = [\n 'filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',\n 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg',\n 'valid_ratio'\n ]\n transforms = [\n item for item in pipeline if 'Random' not in item['type']\n and 'Annotation' not in item['type']\n ]\n for i, transform in enumerate(transforms):\n if transform['type'] == 'PackTextRecogInputs':\n meta_keys += transform[\n 'meta_keys'] if 'meta_keys' in transform else []\n transform['meta_keys'] = list(set(meta_keys))\n transform['keys'] = ['img']\n transforms[i]['type'] = 'Collect'\n if transform['type'] == 'Resize':\n transforms[i]['size'] = transforms[i].pop('scale')\n\n data_preprocessor = model_cfg.model.data_preprocessor\n transforms.insert(-1, dict(type='DefaultFormatBundle'))\n transforms.insert(\n -2,\n dict(\n type='Pad',\n size_divisor=data_preprocessor.get('pad_size_divisor', 1)))\n transforms.insert(\n -3,\n dict(\n type='Normalize',\n to_rgb=data_preprocessor.get('bgr_to_rgb', False),\n mean=data_preprocessor.get('mean', [0, 0, 0]),\n std=data_preprocessor.get('std', [1, 1, 1])))\n return transforms",
"def get_model_dict(self, model, models):\n return models[str(model)]['parms']",
"def get_parameters(self):\n\n data = self.trappars_model.trappars_table\n par0 = data[\"capture0\"].copy()\n par1 = data[\"capture1\"].copy()\n par2 = data[\"capture2\"].copy()\n par3 = data[\"decay_param\"].copy()\n\n return par0, par1, par2, par3",
"def parse_model_parameters(self):\n\t self.modelType = self.model['ModelType']\n\n\t if self.modelType == 'SolidMechanicsModel':\n\t self.modelParameters = self.model['Parameters']\n\t self.AnalysisMethod = self.modelParameters['AnalysisMethod']\n\t self.timeStep = self.modelParameters['TimeStep']\n\t self.searchKeys.append('AnalysisMethod', 'TimeStep')",
"def get_param_computation(self):\n return self._gram_computation.get_param_computation(keep_backpack_buffers=True)",
"def model_parameters(self):\n return SerializationTool.serialize_model(self._model)",
"def preprocessing(self):\n # type: () -> DolbyDigitalPreprocessing\n return self._preprocessing",
"def castep_input_parameters(self):\n return self.node.inputs.parameters.get_dict()",
"def get_parameters(self):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n return self.network.parameters()",
"def get_best_performing_model_params():\n model_params = {'max_vocab_size': float('inf'),\n 'min_frequency': 1,\n 'input_rep': 1,\n 'embedding_dimension': 100,\n 'num_of_layers': 2,\n 'output_dimension': 0,\n 'pretrained_embeddings_fn': 'glove.6B.100d.txt',\n 'data_fn': 'trainTestData' + path_separator + 'en-ud-train.upos.tsv'}\n return model_params",
"def parallax(self):\n if self.bjones:\n return self._attributes['bjones_par']\n else:\n return self._attributes.get('PAR', 0*u.mas)",
"def load_model_params(self):\n try:\n batch_table = grid_tools.load_model_table(self.batch, source=self.source)\n except FileNotFoundError:\n try:\n grid_table = grid_tools.load_grid_table('params', source=self.source)\n batch_table = grid_tools.reduce_table(grid_table,\n params={'batch': self.batch})\n except FileNotFoundError:\n self.print_warn('Model parameter table not found. '\n 'Has the source grid been analysed yet?')\n return\n\n model_row = grid_tools.reduce_table(batch_table, params={'run': self.run})\n params_dict = model_row.to_dict(orient='list')\n\n for key, value in params_dict.items():\n params_dict[key] = value[0]\n\n self.model_params = params_dict",
"def get_params(self):\n if not self.fitted:\n raise ValueError(\n \"Model's parameters have not been fit yet, please call the `fit()` function first\"\n )\n\n params = dict(\n zip(\n [\"attack_\" + team for team in self.teams]\n + [\"defence_\" + team for team in self.teams]\n + [\"home_advantage\", \"rho\", \"rue_salvesen\"],\n self._res[\"x\"],\n )\n )\n return params",
"def get_params(self):\n self._check_parameter_completeness()\n return self._fitted_params",
"def load_config():\n import json\n\n processing_params = config.DEFAULT_PROCESSING_PARAMS\n\n return processing_params"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initializes the class with a test_name, a config file, a results database, and a file_handle_object. Test_name should correspond to a directory with the name 'test_name'.
|
def __init__(self, test_name, config_file,
results_database, data_path=PERFORMERNAME,
file_handle_object=None):
self.__resultsdb = results_database
self.__config_file_lines = config_file.read().split("\n")
self.__test_name = test_name
self.__fho = file_handle_object
if not self.__fho:
self.__fho = fho.FileHandleObject()
# initialize the testname, params, circuit and input directory names:
self.__data_path = data_path
self.__testfile_dir_name = os.path.join(self.__data_path, "testfile")
self.__params_dir_name = os.path.join(self.__data_path, "keyparams")
self.__circuit_dir_name = os.path.join(self.__data_path, "circuit")
self.__input_dir_name = os.path.join(self.__data_path, "input")
self.__log_dir_name = os.path.join(self.__data_path, "logs")
# make the testname, params, circuit and input folders:
self.__fho.create_dir(self.__testfile_dir_name)
self.__fho.create_dir(self.__params_dir_name)
self.__fho.create_dir(self.__circuit_dir_name)
self.__fho.create_dir(self.__input_dir_name)
self.__fho.create_dir(self.__log_dir_name)
# create the map which maps line to line handler:
self.__line_to_handler = {"test_type": self.__handle_test_type,
"K": self.__handle_k,
"L": self.__handle_l,
"D": self.__handle_d,
"W": self.__handle_w,
"num_levels": self.__handle_num_levels,
"num_circuits": self.__handle_num_circuits,
"num_inputs": self.__handle_num_inputs,
"generate": self.__handle_generate,
"seed": self.__handle_seed}
# stores the latest param recorded, in order to detect changes:
self.__latest_params = None
# set all of the parameters to None:
self.__seed = None
self.__K = None
self.__L = None
self.__D = None
self.__W = None
self.__num_levels = None
self.__num_circuits = None
self.__num_inputs = None
self.__sec_param_id = None
self.__circuit_id = None
self.__input_id = None
self.__test_type = None
|
[
"def __init__(self, test_name, data=None):\n\n self.test_name = test_name\n self.json_file = os.path.join(config.benchmark_location,\n self.test_name + '.json')\n if data is None:\n self.data = self.get()\n else:\n self.data = data",
"def __init__(self, project_name=''):\n super(TxtReporter, self).__init__()\n logger.debug('enter TxtReporter.__init__')\n self.project_name = project_name\n self.report_file = ''\n self.report_pipe = ''\n\n self.start_time = ''\n self.end_time = 'NOT END YET'\n\n self.testresults = []\n self.suiteresults = []\n self.passed = 0\n self.failed = 0\n self.timeout = 0\n self.notrun = 0\n self.error = 0\n self.unknown = 0\n self.extra_message = '\\nExtra Notes:\\n'\n\n self.header = 'HEADER:\\n'\n self.summary = 'SUMMARY:\\n'\n self.body = 'BODY:\\n'\n self.extra = 'EXTRA:\\n'\n self.pre_body = 'PRE_BODY:\\n'\n\n logger.debug('exit TxtReporter.__init__')",
"def __init__(self, argument_list, path):\n\n global _the_qmtest\n \n _the_qmtest = self\n \n # Use the stadard stdout and stderr streams to emit messages.\n self._stdout = sys.stdout\n self._stderr = sys.stderr\n\n\t# Initialize a common NexTest execution log file\n self.nextestlog = '/tmp/nextest.log'\n## os.system('echo > %s' % self.nextestlog )\n self.log = logging.getLogger('nextestlog')\n if len(self.log.handlers) == 0: # first time creation\n hdlr = logging.FileHandler('/tmp/nextest.log')\n fmtr = logging.Formatter(\n '%(asctime)s %(module)s %(lineno)s %(levelname)s %(message)s')\n hdlr.setFormatter(fmtr)\n self.log.addHandler(hdlr)\n self.log.setLevel(logging.ERROR)\n\n\t##\n\t## Fix for Ticket 12186. Based on Python Documentation 'KeyboardInterrupt' \n\t## is received by an arbitrary thread but signal is always received by \n\t## main thread.\n\t\n\tself._assignSignalhandlers()\n\t\n # Build a trace object.\n self.__tracer = Tracer()\n\n # Build a command-line parser for this program.\n self.__parser = qm.cmdline.CommandParser(\n \"qmtest\",\n self.global_options_spec,\n self.commands_spec,\n self.conflicting_option_specs)\n # Parse the command line.\n components = self.__parser.ParseCommandLine(argument_list)\n # Unpack the results.\n ( self.__global_options,\n self.__command,\n self.__command_options,\n self.__arguments\n ) = components\n\n # If available, record the path to the qmtest executable.\n self.__qmtest_path = path\n \n # We have not yet loaded the database.\n self.__database = None\n # We have not yet computed the set of available targets.\n self.targets = None\n \n # The result stream class used for results files is the pickling\n # version.\n self.__file_result_stream_class_name \\\n = \"pickle_result_stream.PickleResultStream\"\n # The result stream class used for textual feed back.\n self.__text_result_stream_class_name \\\n = \"text_result_stream.TextResultStream\"\n # The result stream class used for HTML feed back.\n self.__html_result_stream_class_name \\\n = \"html_result_stream.HtmlResultStream\"\n # The expected outcomes have not yet been loaded.\n self.__expected_outcomes = None",
"def __init__(self, kim_code = None, pair = None, results = None, search=True):\n\n if pair and kim_code:\n raise SyntaxWarning, \"TestResult should have a pair, or a kim_code or neither, not both\"\n\n if pair:\n test, model = pair\n result = test.result_with_model(model)\n kim_code = result.kim_code\n\n else:\n if not kim_code:\n kim_code = database.new_test_result_id()\n search = False\n\n super(TestResult,self).__init__(kim_code,search=search)\n\n if not self.exists and not search:\n #If this TR doesn't exist and we have search off, create it\n self.create_dir()\n\n self.results = PersistentDict(os.path.join(self.path,self.kim_code),format='yaml')\n #if we recieved a json string, write it out\n if results:\n logger.debug(\"Recieved results, writing out to %r\", self.kim_code)\n\n if isinstance(results,dict):\n #we have a dict\n incoming_results = results\n else:\n #if it is a json string try to convert it\n try:\n incoming_results = simplejson.loads(results)\n except TypeError:\n #wasn't convertable\n raise PipelineResultsError, \"Could not understand the format of the results: {}\".format(results)\n\n #also move all of the files\n ### added these two lines, they're dumb\n self.results.update(incoming_results)\n incoming_results = self.results\n\n testname = incoming_results[\"test-extended-id\"]\n\n files = template.files_from_results(incoming_results)\n if files:\n logger.debug(\"found files to move\")\n testdir = Test(testname).path\n for src in files:\n logger.debug(\"copying %r over\", src)\n shutil.copy(os.path.join(testdir,src),self.path)\n\n self.results.update(incoming_results)\n self.results.sync()\n logger.info(\"Results created in %r\", self.kim_code)\n\n try:\n self.test = Test(self.results[\"test-extended-id\"])\n except KeyError:\n self.test = None\n try:\n self.model = Model(self.results[\"model-extended-id\"])\n except KeyError:\n self.model = None",
"def setUp(self):\n self.setUpPyfakefs()\n\n self.fs.CreateFile('/empty_file', contents='')\n\n self._test_contents = 'Hello, World! This is a test file.'\n self._file_size = len(self._test_contents)\n self.fs.CreateFile('/hello_world', contents=self._test_contents)",
"def init_test_results_db(self):\n\n dsn = self._mh.ext_cfg['Yoda']['db_results_dsn'].format(var_dir=syscfg.HTK_VAR_DIR)\n dmsg(self._mh._trn.msg('yoda_test_results_db_init', dsn))\n trdb = TestResultsDB(dsn)\n if trdb.db_check_ok() == False:\n raise Exception(\n self._mh._trn.msg('yoda_test_results_db_check_fail', dsn))\n else:\n dmsg(self._mh._trn.msg('yoda_test_results_db_check_ok', dsn))\n self._test_engine.test_results_db = trdb",
"def __init__(self, command_line_args=None):\n command_line_args = command_line_args or sys.argv[1:]\n\n runner_action, test_path, test_runner_args, other_opts = parse_test_runner_command_line_args(command_line_args)\n \n self.setup_logging(other_opts)\n \n runner = TestRunner(**test_runner_args)\n\n bucket_overrides = {}\n if other_opts.bucket_overrides_file:\n bucket_overrides = get_bucket_overrides(other_opts.bucket_overrides_file)\n\n try:\n runner.discover(test_path, bucket=other_opts.bucket, bucket_count=other_opts.bucket_count, bucket_overrides=bucket_overrides)\n except test_discovery.DiscoveryError, e:\n self.log.error(\"Failure loading tests: %s\", e)\n sys.exit(1)\n\n if runner_action == ACTION_LIST_SUITES:\n runner.list_suites()\n sys.exit(0)\n elif runner_action == ACTION_LIST_TESTS:\n runner.list_tests()\n sys.exit(0)\n elif runner_action == ACTION_RUN_TESTS:\n result = runner.run()\n sys.exit(not result)",
"def __init__(self,filename=None):\n if filename:\n self.dbFilename = filename",
"def __init__(self):\n self.db = self._read_db()\n self._setup_dirs()",
"def __init__(self):\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--config\", help=\"the name of the configuration file to use\")\r\n parser.add_argument(\"--log\", help=\"the name of the folder for storing log files\")\r\n\r\n try:\r\n args = parser.parse_args()\r\n except argparse.ArgumentError:\r\n print(\"Argument error. Exiting program.\")\r\n exit(1)\r\n except:\r\n print(\"Exiting program.\")\r\n exit(1)\r\n\r\n self.config = args.config\r\n self.log = args.log",
"def setUp(self):\n super(ExampleProcessTestCase, self).setUp()\n self.files_path = TEST_FILES_DIR",
"def setUp(self):\n self.convert = Convert()\n self.create_csv_test_file(self.TESTS_DATA)",
"def __init__(self, filename='', mode='a'):\n with open(filename, mode) as myfile:\n self.filehandle = myfile\n self.filename = filename",
"def setUp(self, *args, **kwargs):\n copy_for_test = self.make_copy(self.simple_asdf_file)\n self.fh: WeldxFile = WeldxFile(copy_for_test, *args, **kwargs)",
"def __init__(self, template_dir, title, endpoint, result_template=\"result_default.html\", maker=set_result):\n self._tr = abspath(template_dir.rstrip(\"/\"))\n self._title = title\n self._endpoint = endpoint\n self._html = open(self._tr + \"/result/\" + result_template, \"r\").read()\n self.results = []\n self._maker = maker",
"def __init__(self,\n aTaskHandler = None,\n aRunSlowTests = True,\n aRunSkipTests = True,\n aListOfTests = None,\n aStartID = \"\"):\n self.mTaskHandler = aTaskHandler\n self.mRunSlowTests = aRunSlowTests\n self.mRunSkipTests = aRunSkipTests\n self.mListOfTests = aListOfTests\n self.mRunningTestID = aStartID\n self.mStarted = (aStartID == \"\")\n self.mPreviousURIRef = None\n self.mPreviousImageRef = None\n self.mTestsExecuted = 0\n self.mNumberOfTests = 0\n self.mTests = []",
"def __init__(self, outDir= '.', resultsDbAddress=None, verbose=False):\n # Connect to database\n # for sqlite, connecting to non-existent database creates it automatically\n if resultsDbAddress is None:\n # Check for output directory, make if needed.\n if not os.path.isdir(outDir):\n os.makedirs(outDir)\n self.resultsDbAddress = 'sqlite:///' + os.path.join(outDir, 'resultsDb_sqlite.db')\n else:\n self.resultsDbAddress = resultsDbAddress\n engine = create_engine(self.resultsDbAddress, echo=verbose)\n Session = sessionmaker(bind=engine)\n self.session = Session()\n # Create the tables, if they don't already exist.\n try:\n Base.metadata.create_all(engine)\n except DatabaseError:\n raise ValueError(\"Cannot create a database at %s. Check directory exists.\" %(resultsDbAddress))\n\n self.metricObjs = None",
"def __init__(self,kim_code,*args,**kwargs):\n super(TestDriver,self).__init__(kim_code,*args,**kwargs)\n self.executable = os.path.join(self.path, cf.TEST_EXECUTABLE)",
"def setUp(self):\n self.parser = driver.setup_arg_parser()\n # We need to provide initial minimal args, but will change per test\n self.args = self.parser.parse_args(['ingest', '-d', 'null.sqlite',\n 'null.json'])\n self.created_db = \"fake.sqlite\"\n self.temp_parser = argparse.ArgumentParser(prog='sina_tester',\n description='A software package to process '\n 'data stored in the sina_model format.',\n formatter_class=argparse.RawTextHelpFormatter)\n self.subparsers = self.temp_parser.add_subparsers(\n title='subcommands', help='Available sub-commands.', dest='subparser_name')\n self.temp_subparser = self.subparsers.add_parser('eat', help='eat some food.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles a new randomness seed appropriately.
|
def __handle_seed(self, randseed):
sr.seed(int(randseed))
|
[
"def update_random_seed(self):\n iseed = self.run_card['iseed']\n if iseed == 0:\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'))\n iseed = int(randinit.read()[2:]) + 1\n randinit.close()\n randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w')\n randinit.write('r=%d' % iseed)\n randinit.close()",
"def set_seed(self, random_seed=None):\n if random_seed is None:\n self.cnt[0] += 1\n if self.cnt[1] > 0 and self.cnt[0] % self.cnt[1] == 0:\n if self.cnt[0] <= 0 or self.cnt[1] <= 1:\n if not hasattr(self, \"rng\"):\n self.rng = default_rng(choice(1e18, 1).item())\n self.random_seed[0] = self.rng.integers(1e18)\n if self.cnt[1] > 1:\n self.random_seed[1] = self.rng.integers(1e18)\n else:\n self.random_seed[0] = self.random_seed[1]\n self.random_seed[1] = self.rng.integers(1e18)\n else:\n self.rng = default_rng(random_seed)\n self.random_seed[0] = self.rng.integers(1e18)\n if self.cnt[1] > 1:\n self.random_seed[1] = self.rng.integers(1e18)\n self.cnt[0] = 0",
"def seed_rng(self, seed):\n self._index = seed if seed is not None else 0",
"def test_newrand_set_seed(seed, expected):\n set_seed_wrap(seed)\n generated = bounded_rand_int_wrap(100)\n assert generated == expected",
"def seed_random_state(self,seed):\n if (seed is None) or (isinstance(seed, int)):\n return np.random.RandomState(seed)\n elif isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError(\"%r can not be used to generate numpy.random.RandomState\"\n \" instance\" % seed)",
"def setSeed(self):\n self.seed = random.Random().random()",
"def initial_seed() -> int:\n return default_generator.initial_seed()",
"def initialize(seed = None):\n \n random.seed(seed)",
"def initial_seed():\n return default_generator.initial_seed()",
"def seed(self, a):\n assert(len(a) == 6)\n self._current_seed = a\n super().seed(a)",
"def _set_seed(self, seed):\n BARTNoising.set_random_seed(seed)",
"def reset_seed(cls):\n cls.seed = 144",
"def reset(self):\n self.RNG = np.random.RandomState(self.seed)",
"def _reset_random_seed():\n current_time = time.time() * 1e8\n\n np.random.seed(\n int(current_time % (2 ** 32 - 1))\n )",
"def seed(self, seed):\n self._env.seed(seed)",
"def _init_random_number_generators(seed=None):\n # Seed Python random (None as seed is okay), then use it to seed the others.\n random.seed(seed)\n if seed is None:\n seed = random.randint(0, 2**31 - 1)\n logging.info('using seed %d', seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n return jax_random.get_prng(seed)",
"def rand():\r\n global rand_seed\r\n rand_seed = (MULTIPLIER * rand_seed + INCREMENT)\r\n return (rand_seed >> 16) & 0x7FFF",
"def random_state(self, state):\n pass",
"def seeded_random(self, seed):\n assert isinstance(seed, float), \\\n 'seeded_random expects seed to be a floating point'\n\n if 0.0 < seed > 1.0:\n raise ValueError(\n 'seeded_random expects a floating point from 0.0 to 1.0'\n )\n\n if connection.vendor == 'postgresql':\n # The Postgres setseed seems to be session bound, but i could not\n # confirm this. I did some simple testing myself with sleep and\n # different sessions did not seem to interfere with eachother.\n\n # The Postgres implementation uses a seperate query to set the\n # internal seed for Postgres' random number generator.\n cursor = connection.cursor()\n cursor.execute('SELECT setseed({});'.format(seed))\n cursor.close()\n\n return self.order_by('?')\n\n elif connection.vendor == 'mysql':\n # Mysql uses an integer as the seed\n seed = int(seed * 1000)\n\n # The Mysql implementation adds an extra part to the queryset.\n return self.extra(\n select={'random_ordering': \"rand(%s)\"},\n select_params=(seed,),\n order_by=['random_ordering']\n )\n\n raise NotImplementedError(\n \"No seeded random implemented for database backend '{}'\".format(\n connection.vendor\n )\n )",
"def init(self,):\r\n self.random_seed_ = self.random_state\r\n self.random_state_ = check_random_state(self.random_seed_)\r\n return self"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles a new test type appropriately.
|
def __handle_test_type(self, test_type):
self.__test_type = igf.TEST_TYPES.value_to_number[test_type]
|
[
"def test_tool_types_create(self):\n pass",
"def set_testtype(self, name):\n self.testID['TESTTYPE'] = name",
"def test_instantiating_a_new_type_returns_expected_type():\n NewType = make_type(int, \"NewType\", [numeric.Minimum(0), numeric.Maximum(10)])\n instance = NewType(5)\n assert isinstance(instance, NewType)\n assert isinstance(instance, int)",
"def test_instance(self):\n self.assertIsInstance(self.newtest, Amenity)",
"def test_ticket_type_add_ok(self):\n self.execute('ticket_type add new_type')\n rv, output = self.execute('ticket_type list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)",
"def test_type_object_creation(self):\n\t\trestaurant_type = Type.objects.create(name=\"Test Restaurant Type\")\n\t\tself.assertIs(isinstance(restaurant_type, Type), True)\n\t\tself.assertEqual(restaurant_type.__str__(), restaurant_type.name)",
"def test_create_type_no_parent(self, app):\n\n with app.app_context():\n conn = get_connection(current_app)\n\n name = 'Book'\n desc = 'A physical or digital book'\n resp = conn.create_type(name, desc)\n\n assert type(resp) == LtpType\n assert str(resp.name) == name\n assert str(resp.description) == desc",
"def test_control_create(self):\r\n\r\n self.assertTrue(isinstance(self.DUT, Control))",
"def test_create_account_type_using_post(self):\n pass",
"def test_make_type_returns_a_new_type_that_is_a_subclass_of_the_base_type(type_):\n new = make_type(type_, \"NewType\", [])\n assert new != type_\n assert issubclass(new, type_)",
"def newType(self, article):\n\tself.nWithNewTypes += 1\n\tt = article.type\n\tif not self.newTypes.has_key(t):\n\t self.newTypes[t] = []\n\tself.newTypes[t].append(article.pmid)",
"def test_new(self):\n self.assertIsNone(submission.Submission('0000'))",
"def test_create(self):\n self.assertIsInstance(self.obj, State)",
"def test_change_adapter_type_success(self, init_type, new_type):\n\n faked_cpc = self.faked_cpc\n faked_adapter = self.add_ficon_fe6sp(faked_cpc)\n\n # Set the desired initial adapter type for the test\n faked_adapter.properties['type'] = init_type\n\n adapter_mgr = self.cpc.adapters\n adapter = adapter_mgr.find(name=faked_adapter.name)\n\n if new_type == init_type:\n with pytest.raises(HTTPError) as exc_info:\n\n # Execute the code to be tested\n adapter.change_adapter_type(new_type)\n\n exc = exc_info.value\n assert exc.http_status == 400\n assert exc.reason == 8\n else:\n\n # Execute the code to be tested.\n adapter.change_adapter_type(new_type)\n\n act_type = adapter.get_property('type')\n assert act_type == new_type",
"def create_test_volume_type(self, name=None, **kwargs):\n volume_type_rand_prefix = uuid.uuid4().hex\n # name = name or data_utils.rand_name(\n # volume_type_rand_prefix + '-volumetype')\n name = name or (volume_type_rand_prefix + '-volumetype')\n LOG.info(\"volume_type name prefix is %s\", volume_type_rand_prefix)\n volume_type = \\\n self.manager.volume_types_v2_client.create_volume_type(name,\n **kwargs)\n self.volume_types.append(volume_type)\n return volume_type",
"def test_add_object(self):\n pass",
"def test_tool_types_update(self):\n pass",
"def __init__(self, specs, test_class: Type[RacketTest] = RacketTest) -> None:\n super().__init__(specs, test_class)",
"def test_post_metric_type(self, request):\n self.database.datamodels.find_one.return_value = dict(\n _id=\"id\",\n metrics=dict(new_type=dict(addition=\"sum\", target=\"0\", near_target=\"1\", tags=[], sources=[\"source_type\"])))\n request.json = dict(type=\"new_type\")\n self.assertEqual(dict(ok=True), post_metric_attribute(\"report_uuid\", \"metric_uuid\", \"type\", self.database))\n self.database.reports.insert.assert_called_once_with(self.report)",
"def create(self, validated_data):\n\t\treturn Type.objects.create(**validated_data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles a new num_levels appropriately.
|
def __handle_num_levels(self, num_levels):
self.__num_levels = int(num_levels)
|
[
"def npl_changed(self, value):\n self.levels_new = value",
"def __set_levels(self):\n # only really needs to be called after all adding is done\n # max complexity is number of vertices\n\n i = 0\n for vertex in self.__graph_dict:\n if len(self.__in_graph_dict[vertex]) == 1:\n vertex.set_level(level=0)\n # edit is source\n for neighbor in self.__graph_dict[vertex]:\n\n if neighbor == vertex:\n continue\n if vertex.get_level() + 1 > neighbor.get_level():\n neighbor.set_level(vertex.get_level() + 1)\n if vertex.get_level() + 1 > self.__max_level:\n self.__max_level = vertex.get_level() + 1\n\n if self.__max_level == len(self.__graph_dict.keys()) - 1:\n break\n i += 1",
"def set_levels(self):\n\n for m in self.assets.keys():\n\n m_dict = self.assets[m]\n\n if \"levels\" in m_dict.keys():\n pass\n elif \"unique\" in m_dict.keys() and m_dict[\"unique\"]:\n self.assets[m][\"levels\"] = 0\n else:\n self.assets[m][\"levels\"] = 3",
"def new_level_game(self):\n self.canvas.delete(ALL)\n parameters = load.load_level(self.number_of_current_level) # Argument - number of the level\n self.current_level = Level(self.canvas, parameters[0], parameters[1],\n parameters[2], parameters[3], parameters[4])\n self.bind_all()\n self.game_tic()",
"def create_levels(levels):\n if levels is None:\n return\n logging.info(\"Found %s game level(s)\" % levels.get(\"count\"))\n for index, level_elem in enumerate(levels):\n # GameLevel 0 is created automatically by the bootstrap\n try:\n number = get_child_text(level_elem, \"number\")\n\n if number == \"0\" or GameLevel.by_number(number) is None:\n if number != \"0\":\n game_level = GameLevel()\n else:\n game_level = GameLevel.by_id(0)\n if game_level is None:\n game_level = GameLevel()\n game_level.number = number\n game_level.name = get_child_text(level_elem, \"name\")\n game_level.type = get_child_text(level_elem, \"type\")\n game_level.reward = get_child_text(level_elem, \"reward\", 0)\n game_level.buyout = get_child_text(level_elem, \"buyout\", 0)\n dbsession.add(game_level)\n else:\n logging.info(\"GameLevel %d already exists, skipping\" % int(number))\n except:\n logging.exception(\"Failed to import game level #%d\" % (index + 1))\n dbsession.flush()\n game_levels = GameLevel.all()\n for index, game_level in enumerate(game_levels):\n if index + 1 < len(game_levels):\n game_level.next_level_id = game_levels[index + 1].id\n logging.info(\"%r -> %r\" % (game_level, game_levels[index + 1]))\n dbsession.add(game_level)\n dbsession.commit()",
"def update_var_levels(view, edit, line, amount=+1):\n match = __level__.match(view.substr(line))\n if not match:\n return\n start = match.start(1)\n end = match.end(1)\n level_string = match.group(1)\n new_level = int(level_string, base=10) + amount\n if new_level < 1:\n new_level = 1\n new_level_string = str(new_level)\n level_region = sublime.Region(line.begin() + start, line.begin() + end)\n view.replace(edit, level_region, new_level_string)",
"def test_get_levels(self):\n pass",
"def set_new_level(self, level):\r\n\r\n self.property_set(\"level\",\r\n Sample(0, int(level), unit=\"%\"))",
"def newLevel(self):\n self.currentlevel += 1\n # if there is a file for the next level, load it\n if os.path.isfile(os.path.join('levels', 'level%d.level'%self.currentlevel)):\n self.loadLevel(self.currentlevel)\n self.paddle.reset()\n self.ball.reset()\n self.isReset = True\n\n # no file, show win message\n else:\n # game over! render a game over message and stop the game\n font = pygame.font.Font(None, 50) # load the default font, size 50\n endmessage = font.render(\"You Win!\", True, (255,150,80))\n endmessagerect = endmessage.get_rect()\n endmessagerect.center = (260, 135)\n\n # blit it on the background and flip (render) the display one last time\n self.window.blit(endmessage, endmessagerect)\n pygame.display.flip()\n\n # turn off all the gameplay\n self.playing = False\n\n # handle showing and inputing high scores\n self.handleHighScores()",
"def update(self, num_moves):\n idx = self.index_level\n\n if idx > self.last_level():\n self.scores[self.current_pack]['last_level'] = idx\n\n lev = self.scores[self.current_pack]['levels']\n while len(lev) < idx+1:\n lev.append(None)\n\n if lev[idx] is None or lev[idx] > num_moves:\n lev[idx] = num_moves\n\n self.save()",
"def update_level(self, level):\n # crazy high intial values so that the new value is always lower\n num_events = 100_000\n newIdx = 100_000\n for (plotobj, plotopts) in zip(self.plotobjs, self.plotopts):\n nn, _newIdx = plotobj.plot(self.index, getNumEvents=True, **plotopts)\n num_events = min(num_events, nn)\n newIdx = min(newIdx, _newIdx)\n self.numEvents = num_events\n self.currentIndex.setText(str(newIdx))\n self.updateIndex()",
"def test_create_level(self):\n pass",
"def setLevel(self):\n\t\tself.level = int(floor(sqrt(self.xp)))",
"def advance_level(self, level):\n self.curr_level += 1\n self.init_map(level)\n self.init_character()\n self.turn = 0",
"def _get_number_of_alpha_levels(self):\n return self._number_of_alpha_levels",
"def increase_level(self, levels, reason, victim=0, delay=False):\n if GunGameStatus.MATCH is not GunGameMatchStatus.ACTIVE:\n return\n if not isinstance(levels, int) or levels < 1:\n raise ValueError(\n 'Invalid value given for levels \"{levels}\".'.format(\n levels=levels,\n )\n )\n old_level = self.level\n new_level = old_level + levels\n if new_level > weapon_order_manager.max_levels:\n with GG_Win() as event:\n event.attacker = event.winner = self.userid\n event.userid = event.loser = victim\n return\n self.level = new_level\n if self.level != new_level:\n return\n self.multi_kill = 0\n if delay:\n Delay(\n delay=0,\n callback=self._fire_level_up,\n args=(victim, old_level, new_level, reason)\n )\n else:\n self._fire_level_up(victim, old_level, new_level, reason)",
"def update_level(self):\n\n # Since we obviously do not update the level of a leaf, the if self.leaf condition\n # can be omitted.\n if self.r_child is None:\n # Every node that is not a leaf has at least a left child, in case it does not\n # have a right child, the node's level is the increment by 1 of the level of\n # its left child.\n self.level = self.l_child.level + 1\n\n else:\n # In case the node has both children, it takes the increment by 1 of the\n # minimum level. The reason is that when the tree evolves by adding new\n # leaves, this node will eventually have its children change until reaching\n # the mentioned minimum level.\n self.level = min(self.l_child.level, self.r_child.level) + 1",
"def loadNextLevel(self):\n self.loadLevel(self.levelnumber + 1)",
"def monitor_level(self):\n\n # Update consecutive and total variables\n if self._correct:\n self._consec_right += 1\n self._consec_wrong = 0\n self._total_right += 1\n\n # Dynamically increase level\n if (self._consec_right == 3) and (self._max_bound != self._max_level):\n self._max_bound += 1\n self._consec_right = 0\n else:\n self._consec_wrong += 1\n self._consec_right = 0\n self._total_wrong += 1\n \n # Dynamically decrease level\n if (self._consec_wrong == 3) and (self._max_bound != self._start_max):\n self._max_bound -= 1\n self._consec_wrong = 0\n\n # Go home or carry on depending on selected game mode\n if (not self._correct and self._begun_unlimited):\n self._entry_win.go_home()\n else:\n self._entry_win.update_top_level()",
"def hold_levels():\n level = [[\n \"WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\",\n \"W W\",\n \"W WWWWWWWWWWWWWWWWWWWWWWWWWWWWW W\",\n \"W W W W\",\n \"W W W W\",\n \"W W IIIIIIIIIIIIIIIIIIIIIII W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W I I W W\",\n \"W W IIIIIIIIIIIIIIIIIIIIIII W W\",\n \"W W W W\",\n \"W W W W\",\n \"W WWWWWWWWWWWWWWWWWWWWWWWWWWWWW W\",\n \"W W\",\n \"WWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWWW\",\n ]]\n print level\n return level"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles a new number of circuits appropriately.
|
def __handle_num_circuits(self, num_circuits):
self.__num_circuits = int(num_circuits)
|
[
"def __make_circuits(self):\n # update the params if needed:\n self.__handle_new_params()\n # make self.__num_circuits circuits:\n for circuit_num in xrange(self.__num_circuits):\n # generate a random circuit:\n if self.__test_type == igf.TEST_TYPES.RANDOM:\n gen = igf.TEST_TYPE_TO_GENERATOR_BY_DEPTH[igf.TEST_TYPES.RANDOM]\n circ = gen(self.__L, self.__D, self.__W)\n else:\n gen = igf.TEST_TYPE_TO_GENERATOR_BY_LEVEL[self.__test_type]\n circ = gen(self.__L, self.__num_levels, self.__W)\n self.__write_circuit(circ)\n # for each circuit, make self.__num_inputs inputs:\n for input_num in xrange(self.__num_inputs):\n # generate a random input:\n inp = igf.make_random_input(self.__L, self.__W)\n self.__write_input(inp)",
"def _circuit_handler(event):\n if not event.build_flags or 'IS_INTERNAL' not in event.build_flags:\n if event.id == self._cid:\n probe.circs.append(event)\n if self._circuit_built.is_set():\n if event.status in ('FAILED', 'CLOSED'):\n self._circuit_finished.set()\n if not self._circuit_built.is_set():\n if event.status in ('FAILED', 'BUILT'):\n self._circuit_built.set()\n elif event.status == 'LAUNCHED' and not self._cid:\n self._cid = event.id\n probe.circs.append(event)\n self._manager.circ_launched.release()",
"def handle_new_client(client):\n if (len(playing_clients) < max_num_of_players):\n initialize_game_for_client(client)\n elif (len(waitlist) < size_of_wait_list):\n waitlist.append(client)\n send_acceptance_status(client,\"waitlist\")\n else:\n send_acceptance_status(client,\"reject\")",
"def push_new_block():\n for idx, client in viewitems(stratum_clients):\n try:\n logger.debug(\"Signaling new block for client {}\".format(idx))\n client.new_block_event.set()\n except AttributeError:\n pass",
"def count_switches(self):\n self.nswitches += 1",
"def _do_add_nodes(self, count, cloud):\n #if m_type in ['small', 'medium'] and cloud=='default':\n # cloud = \"cloud.amsterdam.\"+m_type\n startCloud = self._init_cloud(cloud)\n\n self.logger.info(str(self.controller.get_clouds()))\n vals = { 'action': '_do_add_nodes', 'count': count }\n self.logger.debug(self.ACTION_REQUESTING_NODES % vals)\n node_instances = self.controller.create_nodes(count, \n client.check_agent_process, self.AGENT_PORT, startCloud)\n\n # Startup agents\n for node in node_instances:\n self.logger.info(\"Adding node %s: \" % (node.id))\n\n client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)\n self.logger.info(\"Added node %s: %s \" % (node.id, node.ip))\n node_info.add_node_info('/etc/hosts', node.ip, node.id)\n\n m_type = self.config_parser.get(cloud, 'INST_TYPE')\n worker = Worker(node.id, node.ip, node.private_ip, node.cloud_name, m_type)\n self.service.add_worker(worker, int(node.id))\n\n self.logger.info(str(self.service))\n self.nodes += node_instances\n self.state = self.S_RUNNING\n if m_type in self.pool:\n self.pool[m_type]+=count\n else:\n self.pool[m_type]=count",
"def create_dummy_changes(self, number = 5):\n for i in range(number):\n r = Resource(uri=\"a\"+str(i), timestamp=1234.0*i) \n ce = ChangeEvent(random.choice(['create', 'update', 'delete']), r)\n self.changememory.notify(ce)",
"def __init__(self, num_selectors,\n label_text = [],\n label_template = \"Channel\",\n button_text = [],\n button_template = \"Port\",\n buttons = 1,\n title=\"MultiSwitch\"):\n super(MultiSelectorForm, self).__init__()\n self.num_selectors = num_selectors\n self.label_text = label_text\n self.label_template = label_template\n self.button_template = button_template\n if button_text:\n self.button_text = button_text\n else:\n self.button_text = [\"\"]*buttons\n self.title=title\n self.state = {}\n\n self.signal = SignalMaker()",
"def handle(self, *args, **kwargs):\n no_of_clients = kwargs['total']\n if no_of_clients < 1:\n self.stdout.write('Can not create -ive or zero number of clients.')\n\n else:\n existing_emails = Client.objects.all().values_list('email', flat=True)\n existing_phones = Client.objects.all().values_list('phone_number', flat=True)\n\n name_set = self.get_n_unique_names(no_of_clients)\n email_set = self.get_n_unique_emails(n=no_of_clients, existing_emails=existing_emails)\n phone_set = self.get_n_unique_numbers(n=no_of_clients, existing_phones=existing_phones)\n\n client_list = []\n\n for i in range(no_of_clients):\n client = Client(**{\n 'name': name_set[i],\n 'age': 30,\n 'gender': 'M',\n 'phone_number': phone_set[i],\n 'email': email_set[i],\n 'address': self.get_random_address(length=15)\n })\n client_list.append(client)\n\n Client.objects.bulk_create(client_list)\n\n # Adding measurements\n dummy_clients = Client.objects.filter(email__endswith=DUMMY_EMAIL_MARKER)\n for client in dummy_clients:\n male_measurements = MaleMeasurements.objects.create(**{\n 'client': client,\n 'unit': 'cm',\n 'shoulder': 12,\n 'armscye': 12,\n 'chest': 12,\n 'bust': 12,\n 'waist': 12,\n 'arm_length': 12,\n 'hips': 12,\n 'ankle': 12,\n 'neck': 12,\n 'back_width': 12,\n 'inseam': 12,\n 'wrist': 12,\n 'crutch_depth': 12,\n 'waist_to_knee': 12,\n 'knee_line': 12,\n 'biceps': 12,\n\n })\n male_measurements.save()\n\n self.stdout.write('Dummy clients created successfully with measurements.')",
"def cap_inrease(self,number):\r\n if number == 1:\r\n self.current_capacity += 1\r\n elif number == 2:\r\n self.service_two_capacity += 1\r\n elif number == 3:\r\n self.service_three_capacity += 1\r\n elif number == 4:\r\n self.service_four_capacity += 1\r\n elif number == 5:\r\n self.service_five_capacity += 1",
"def __call__(self, circ):\n print \" my circuit is in progress\", circ.id\n self.attacher.waiting_circuits.append((circ.id, self.d,\n self.stream_cc))",
"def test_split_circuits(self):\n max_circs = self.fake_api_backend.configuration().max_experiments\n\n circs = []\n for _ in range(max_circs+2):\n circs.append(self._qc)\n job_set = self._jm.run(circs, backend=self.fake_api_backend)\n job_set.results()\n statuses = job_set.statuses()\n\n self.assertEqual(len(statuses), 2)\n self.assertTrue(all(s is JobStatus.DONE for s in statuses))\n self.assertTrue(len(job_set.jobs()), 2)",
"def connectionMade(self):\n print \"IPC: New connection from\",self.transport.getPeer()\n\n if (len(self.factory.clients) >= 1):\n self.output(self.REPLY_BUSY, \" Too many connections. Aborting.\")\n self.transport.loseConnection()\n print \"IPC: Too many connections. Disconnecting...\"\n return\n\n self.factory.clients.append(self)\n self.peer = self.transport.getPeer()\n self.output(self.REPLY_BANNER,\"RADAC IPC server\")\n self.lastcommand = None\n self.lastparams = None\n self.sayReady()",
"def setup_clientbranches(n, addrs, lock_list, st_list):\n Clients = [Process(target=inf_client_branch, args=(addrs[i],lock_list[i],\n st_list[i])) for i in range(n)]\n for client in Clients :\n client.start()",
"def generate_circuit(config: Dict[str, Any]):\n print(\"-\" * 80)\n print(f\"Creating circuit number\")\n\n n_qubits = random.randint(config[\"min_n_qubits\"], config[\"max_n_qubits\"])\n n_ops = random.randint(config[\"min_n_ops\"], config[\"max_n_ops\"])\n\n if (config[\"strategy_program_generation\"] == \"uniform\" or\n config[\"strategy_program_generation\"] == \"weighted\"):\n gate_set = config[\"gate_set\"]\n if (config[\"strategy_program_generation\"] == \"uniform\"):\n for gate in gate_set.keys():\n gate_set[gate] = 1\n # generate a random circuit\n random_circuit_qasm_str = generate_randomly(\n n_qubits=n_qubits,\n n_ops=n_ops,\n gate_set=gate_set,\n random_state=np.random.RandomState(config[\"random_seed\"]))\n\n\n metadata_dict = {\n \"n_qubits\": n_qubits,\n \"n_ops\": n_ops,\n \"gate_set\": config[\"gate_set\"],\n \"strategy_program_generation\": config[\"strategy_program_generation\"]\n }\n\n print(f\"Saving circuit: with simulation results\")\n timestamp = int(time.time())\n qasm_file_name = config[\"program_id_pattern\"]\n qasm_file_name = \\\n qasm_file_name.replace(\"{{timestamp}}\", str(timestamp))\n qasm_file_name = \\\n qasm_file_name.replace(\"{{randint}}\", str(random.randint(0, 9999)).zfill(4))\n print(f\"qasm_file_name: {qasm_file_name}\")\n # get current timestamp as integer and use it as filename\n\n store_qasm(\n filename=qasm_file_name,\n qasm_content=random_circuit_qasm_str,\n out_folder=config[\"folder_generated_qasm\"],\n metadata_dict=metadata_dict\n )",
"def increment_served(self,new_serves):\r\n\t\tself.number_served += new_serves",
"def addNetworkResourceForScaleup(self, components):\n try:\n utility.execLog(\"\")\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"addComponent\"))), action=\"CLICK\")\n time.sleep(2)\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"scaleUpNetwork\"))), action=\"CLICK\")\n #self.handleEvent(EC.presence_of_element_located((By.XPATH,self.TemplatesObjects(\"dropdownToggle\"))),action=\"SELECT\",selectBy=\"VISIBLE_TEXT\",setValue=\"Network\")\n time.sleep(5)\n i = 0\n for item in components[\"select_network\"]:\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"ddlAvailableNetworks\"))), action=\"SELECT\", setValue=item,selectBy=\"VISIBLE_TEXT\")\n utility.execLog(\"Selected Value %s for network dropdown on add network resource page\"%str(item))\n time.sleep(1)\n self.handleEvent(EC.element_to_be_clickable((By.ID,self.TemplatesObjects(\"btn_addavailablenetwork\"))), action=\"CLICK\")\n utility.execLog(\"Added the item selected\")\n time.sleep(2)\n utility.execLog(\"Selecting port group\")\n try:\n try:\n self.handleEvent(EC.element_to_be_clickable((By.classname, \"form-control ddlPortGroup\")), action=\"SELECT\", setValue=components[\"port_group\"])\n utility.execLog(\"Selected %s value from the Port Group dropdown\"%str(components[\"port_group\"]))\n except:\n utility.execLog(\"port Group %s is not available to select in dropdown\"%str(components[\"port_group\"]))\n self.handleEvent(EC.element_to_be_clickable((By.classname, \"form-control ddlPortGroup\")), action=\"SELECT\", setValue=\"New Port Group\")\n time.sleep(1)\n utility.execLog(\"Selected 'New Port Group' value from the dropdown\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, \"//*[contains(@id,'txtPortGroup')]\")), action=\"SET_TEXT\", setValue=\"NewPortGroup\")\n utility.execLog(\"Entered new port group value to 'NewPortGroup' \")\n except:\n utility.execLog(\"Flow is not VDS flow so Port Group is not available\") \n utility.execLog(\"Selecting resource for the added netwrok %s\"%str(item))\n #self.handleEvent(EC.element_to_be_clickable((By.XPATH, \"(//div[@class='dropdown resourcedropdown']/button)[%s]\"%str(i+1))), action=\"CLICK\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH,self.TemplatesObjects(\"selectResource\"))),action=\"CLICK\")\n utility.execLog(\"Select resource button clicked\")\n time.sleep(1)\n if \"all\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding both host and VM to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, +self.TemplatesObjects(\"selctHost\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"Host resource selected\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"selcetVM\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"VM resource selected\")\n elif \"host\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding host to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, +self.TemplatesObjects(\"selctHost\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"Host resource selected\")\n elif \"vm\" in components[\"select_resource\"][i].lower():\n utility.execLog(\"Adding VM to the added network\")\n self.handleEvent(EC.element_to_be_clickable((By.XPATH, self.TemplatesObjects(\"selcetVM\")+\"[%s]\"%str(i+1))), action=\"CLICK\")\n utility.execLog(\"VM resource selected\")\n else:\n utility.execLog(\"select_resource value entered in json does not match any option\")\n i = i+1\n utility.execLog(\"Save the added networks\")\n self.handleEvent(EC.element_to_be_clickable((By.ID, self.TemplatesObjects(\"saveNetworkButton\"))), action=\"CLICK\")\n except Exception as e:\n utility.execLog(\"Error generated while adding network component(s) to scaleup service :: Error -> %s\"%(str(), str(e)))",
"def update_state(self):\n server_list = self.get_inventory()\n logger.debug(f\"Create handles for Ucsm Servers: { server_list }\")\n # we iterate over the range to be able to modify the list in the loop body\n rm_connections = set(self.handles.keys())\n for sid in range(len(server_list)):\n server = server_list[sid]\n active = self.update_handle(server)\n if not active:\n continue\n if server in rm_connections:\n rm_connections.remove(server)\n self.start_poll_thread(server)\n # refresh otherwise the handle gets stale\n\n # remove old connections no longer in server list\n for s in rm_connections:\n logger.info(f\"remove old server connection: { s }\")\n try:\n del self.handles[s]\n except: pass\n\n return self.handles",
"def __init__(self, count: 'unsigned int'):\n this = _coin.new_SbBarrier(count)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def compiler(circuit):\n #initialize an empty circuit with the same size of qubits and clbits \n decomposedCircuit = QuantumCircuit(circuit.num_qubits)\n if circuit.num_clbits >0:\n decomposedCircuit.add_register(ClassicalRegister(circuit.num_clbits))\n \n #extract the gates to compile them from the data list\n for item in circuit.data:\n \n #the gate object\n gate=item[0] \n \n #number of qubits of the gate\n numOfQubits=len(item[1]) \n \n #the indices that the gate applied on\n positions=[qubit.index for qubit in item[1]] \n \n #check if the gate is a single qubit gate\n if numOfQubits==1:\n #decompose the single qubit gate\n decomposition=oneQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n gates=[item[0] for item in decomposition.data]\n #append each gate to the new circuit at the same position note: len(positions)=1 \"single qubit gate\"\n [decomposedCircuit.append(gate,positions) for gate in gates]\n \n #check if the gate is a two qubit gate\n elif numOfQubits==2:\n #decompose the gate\n decomposition=twoQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n for item in decomposition.data:\n gate=item[0]\n if len(item[1])==2:\n #append each gate to the new circuit at the same positions note: len(positions)=2\n decomposedCircuit.append(gate,positions)\n else:\n #append a single qubit gate to the new circuit\n #get the index (0 or 1) means the gate is applied to the 1st qubit or the 2nd qubit from the positions list \n decomposedCircuit.append(gate,[positions[item[1][0].index]]) \n \n return decomposedCircuit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles a new number of inputs appropriately.
|
def __handle_num_inputs(self, num_inputs):
self.__num_inputs = int(num_inputs)
|
[
"def input_count(self, input_count):\n\n self._input_count = input_count",
"def inputs_changed(self, inputs):\n if DEBUG:\n logger.info(\"* %s\" % binstring(inputs))\n self.inputs = inputs\n self.limits.check(inputs=self.inputs)",
"def ask_numbers():",
"def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)",
"def process_input():\n # Read number of test cases\n testcase_count = int(input())\n\n # Read testcase_count testcases\n testcases_list = []\n for i in range(0, testcase_count):\n s_max, audience = str(input()).split(' ', 2)\n testcases_list.append(Testcase(s_max, audience))\n return testcases_list",
"def input_button_clicked(self):\n if self.is_added:\n self.delete_from_main_layout() # if widgets already added - remove them\n self.is_added = False\n else:\n self.is_added = True\n values = self.get_data_from_widgets()\n args = self.set_dict(values)\n lab4.arg = args\n lab4.main()",
"def process_last_input(self, event): \n self.hide_popups()\n if len(self.last_inputs) > 20:\n del self.last_inputs[0]\n self.last_inputs.append(pygame.key.name(event.key))\n self.check_easter_eggs()",
"def _track_inputs(self):\n\n def _input_tracker(*args, **kwargs):\n if args:\n prompt = args[0]\n else:\n prompt = \"\"\n print(prompt)\n if self.inputs:\n value_entered = self.inputs.pop(0)\n else:\n # TODO: Make this smarter, more elegant in choosing IF we should repeat 0\n value_entered = '0'\n self.input_contexts[self.call_id].append(value_entered)\n return value_entered\n\n return _input_tracker",
"def _handleInput(self, paramInput, dimensionTags=None, dimTagsPrefix=None):",
"def input_data(self, inputs):\n for i, x in enumerate(inputs):\n self.activations[0][i] = x",
"def setinputsizes(self, sizes):\n\t\tself._inputsize = sizes",
"def make_input(self, *args, **kwargs):\r\n self.add(input.Input(*args, **kwargs))",
"def _handleInput(self, paramInput):\n pass",
"def _update_inputs(self, external_input=None):\n self.inputs = self._rand_generator.normal(self._input_noise_mean, self._input_noise_std, self._num_rec)\n if external_input is not None:\n self.inputs += external_input",
"def input_events(self):\n request = self.winfo\n requestType = request['RequestType']\n #if request is montecarlo or Step0, the numer of\n #input events is by the requsted events\n if requestType == 'MonteCarlo' or requestType == 'LHEStepZero':\n if 'RequestNumEvents' in request:\n if request['RequestNumEvents']>0:\n return request['RequestNumEvents']\n if 'RequestSizeEvents' in request:\n return request['RequestSizeEvents']\n else:\n return 0\n if requestType == 'TaskChain':\n return handleTaskChain(request)\n\n #if request is not montecarlo, then we need to check the size\n #of input datasets\n #This loops fixes the white and blacklists in the workflow\n #information,\n for listitem in [\"RunWhitelist\", \"RunBlacklist\",\n \"BlockWhitelist\", \"BlockBlacklist\"]:\n if listitem in request:\n #if empty\n if request[listitem]=='[]' or request[listitem]=='':\n request[listitem]=[]\n #if there is not a list but some elements it creates a list\n if type(request[listitem]) is not list:\n # if doesn't contain \"[\" is a single block\n if '[' not in request[listitem]:\n #wrap in a list\n request[listitem] = [request[listitem]]\n #else parse a list\n else:\n request[listitem]= eval(request[listitem])\n #if not, an empty list will do \n else:\n request[listitem]=[]\n\n inputDataSet=request['InputDataset']\n \n #it the request is rereco, we valiate white/black lists\n if requestType=='ReReco':\n # if there is block whte list, count only the selected block\n if request['BlockWhitelist']:\n events = dbs3.getEventCountDataSetBlockList(inputDataSet,request['BlockWhitelist'])\n # if there is block black list, substract them from the total\n if request['BlockBlacklist']:\n events = (dbs3.getEventCountDataSet(inputDataSet) - \n dbs3.getEventCountDataSet(inputDataSet,request['BlockBlacklist']))\n return events\n # same if a run whitelist\n if request['RunWhitelist']:\n events = dbs3.getEventCountDataSetRunList(inputDataSet, request['RunWhitelist'])\n return events\n # otherwize, the full lumi count\n else:\n events = dbs3.getEventCountDataset(inputDataSet)\n return events\n \n events = dbs3.getEventCountDataSet(inputDataSet)\n # if black list, subsctract them \n if request['BlockBlacklist']:\n events=events-dbs3.getEventCountDataSetBlockList(inputDataSet, request['BlockBlacklist'])\n # if white list, only the ones in the whitelist.\n if request['RunWhitelist']:\n events=dbs3.getEventCountDataSetRunList(inputDataSet, request['RunWhitelist'])\n # if white list of blocks\n if request['BlockWhitelist']:\n events=dbs3.getEventCountDataSetBlockList(inputDataSet, request['BlockWhitelist'])\n\n if 'FilterEfficiency' in request:\n return float(request['FilterEfficiency'])*events\n else:\n return events",
"def refresh_input(self, new_input):\n \n if(len(new_input) != len(self.layers[INPUT])):\n raise ValueError(\"Attempted to refresh input layer with the wrong number of values.\")\n\n for (i, node) in self.layers[INPUT]:\n self.layers[INPUT][i] = input_node(new_input[i])",
"def refresh_inputs():\n _nx.hid_scan_input()",
"def _do_inputs(self):\n self._clock.tick()\n for action in self._actions.get_actions(InputAction):\n action.execute(self._actors, self._actions, self._clock, self)",
"def getInputCount(self):\n\t\tquery = 'SELECT * from inputs ORDER BY id DESC LIMIT 1'\n\t\tself.executeQuery(query)\n\t\trawInput = self.fetchOne()\n\t\treturn rawInput[0]",
"def increment_values(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Writes the new params (consisting of K, L, D (or num_levels)).
|
def __handle_new_params(self):
if self.__test_type == igf.TEST_TYPES.RANDOM:
sec_param_text = ",".join(["L" + "=" + str(self.__L),
"D" + "=" + str(self.__D),
"K" + "=" + str(self.__K)])
else:
sec_param_text = ",".join(["L" + "=" + str(self.__L),
"D" + "=" +
str(self.__num_levels),
"K" + "=" + str(self.__K)])
# Only update the params if there have been changes to it:
if sec_param_text != self.__latest_params:
self.__latest_params = sec_param_text
# find the security parameter id:
self.__sec_param_id = self.__resultsdb.get_next_params_id()
# write the security parameter to the results database:
self.__resultsdb.add_row(
t2s.PARAM_TABLENAME,
{t2s.PARAM_PID: self.__sec_param_id,
t2s.PARAM_TESTNAME: self.__test_name,
t2s.PARAM_K: self.__K,
t2s.PARAM_D: self.__D,
t2s.PARAM_L: self.__L})
# write the security parameter to a params file:
sec_param_file_name = os.path.join(self.__params_dir_name,
str(self.__sec_param_id)
+ ".keyparams")
sec_param_file = self.__fho.get_file_object(sec_param_file_name,
'w')
sec_param_file.write(sec_param_text)
self.__fho.close_file_object(sec_param_file)
# write the params location to the test file:
self.__test_file.write(
"".join(["KEY\n",
self.__get_testfile_path(sec_param_file_name), "\n"]))
|
[
"def _updateLevelSetParameters(self):\n parameters = LevelSetParameters()\n parameters.iterationNumber = self._iterationSpinBox.value\n parameters.inflation = self._inflationSlider.value\n parameters.attraction = self._attractionSlider.value\n parameters.curvature = self._curvatureSlider.value\n parameters.levelSetMethod = self._levelSetSegmentations[self._levelSetSegmentationChoice.currentText]\n parameters.initializationMethod = self._levelSetInitializations[self._levelSetInitializationChoice.currentText]\n\n self._logic.levelSetParameters = parameters",
"def upd_main_parameters(self):\n self.steps += 1\n self.k += 1\n self.mode = 'PS'\n\n self.gamma = 3 / (self.k + 2)\n self.gamma_next = 3 / (self.k + 3)\n \n L = self.defaults['L']\n M = self.defaults['M']\n D_tilde = self.defaults['D_tilde']\n T = ceil(M**2 * (self.k + 1)**3 / (D_tilde * L**2))\n self.T = int(T)\n \n self.P = 2 / ((self.T + 1) * (self.T + 2))\n self.beta = 9 * L * (1 - self.P) / (2 * (self.k + 1))",
"def write_params(verbose, wvtype, mults, npts, dt, align, shift, rot):\n\n file = open(\"raysum-params\", \"w\")\n file.writelines(\"# Verbosity\\n \" + str(int(verbose)) + \"\\n\")\n file.writelines(\"# Phase name\\n \" + wvtype + \"\\n\")\n file.writelines(\"# Multiples: 0 for none, 1 for Moho, \" +\n \"2 all first-order\\n \" + str(mults) + \"\\n\")\n file.writelines(\"# Number of samples per trace\\n \" + str(npts) + \"\\n\")\n file.writelines(\"# Sample rate (seconds)\\n \" + str(dt) + \"\\n\")\n file.writelines(\"# Alignment: 0 is none, 1 aligns on P\\n \" +\n str(align) + \"\\n\")\n file.writelines(\"# Shift or traces (seconds)\\n \" + str(shift) + \"\\n\")\n file.writelines(\"# Rotation to output: 0 is NEZ, 1 is RTZ, 2 is PVH\\n \" +\n str(rot) + \"\\n\")\n file.close()\n\n return",
"def write_level_to_dat(level, writer):\n #lower_layer is not reequired, so handle the case where it is None or 0 length\n # by making a default layer of all 0s\n if (level.lower_layer == None or len(level.lower_layer) == 0):\n level.lower_layer = [0]*1024\n level_bytes = calculate_level_byte_size(level)\n writer.write(level_bytes.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.level_number.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.time.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(level.num_chips.to_bytes(2, cc_classes.BYTE_ORDER))\n writer.write(b'\\x01\\x00') # Write the \"map detail\" which is always a 2 byte number set to 1\n write_layer_to_dat(level.upper_layer, writer)\n write_layer_to_dat(level.lower_layer, writer)\n total_field_byte_size = calculate_total_optional_field_byte_size(level.optional_fields)\n writer.write(total_field_byte_size.to_bytes(2, cc_classes.BYTE_ORDER))\n for field in level.optional_fields:\n write_field_to_dat(field, writer)",
"def write_parameters_to_settings(self):\n pass",
"def save_kwargs(self, kwargs: dict) -> None:\n d = kwargs.copy()\n d[\"eps\"] = self.eps\n d[\"torch_dtype\"] = self.torch_dtype\n d[\"importance_sampler\"] = self.importance_nested_sampler\n save_to_json(d, os.path.join(self.output, \"config.json\"))",
"def prepare_params_file(args,prefix,name_main_ldscore,params_file='/mnt/data/params.ldcts'):\n with open(params_file, 'w') as file:\n logging.debug('Save parameter file with prefix: ' + prefix + ' and ldscore: /mnt/data/outld/' + name_main_ldscore)\n file.write(prefix + \"\\t\" + '/mnt/data/outld/' + name_main_ldscore + '\\n')",
"def writeConfig(self, mapping, paths, unique_ID):\n OUTPUT_DIR, (CFG_FILE_OUT_ARCH, CFG_FILE_OUT_MAP, CFG_FILE_OUT_PROB, CFG_FILE_OUT_MODEL) = paths\n\n tiling, loop_orders, partitions = mapping\n numHierarchy = self.arch['numHierarchy']\n I,J,K,L = self.problem['dimension_sizes']\n\n\n # Extract\n dim_factors = [' factors: I={0} J={1} K={2} L={3}\\n'.format(*tiling[i]) for i in range(numHierarchy+1)]\n\n # Buffer sizes\n DRAM_factors, L2_factors, spatial_factors, L1_factors = dim_factors\n DRAM_orders, L2_orders, L1_orders = loop_orders\n L2_partitions, L1_partitions = partitions\n\n L2_A, L2_B, L2_C, L2_D = [int(self.arch['bank_sizes'][0]*L2_partitions[i]) for i in range(4)]\n L1_A, L1_B, L1_C, L1_D = [int(self.arch['bank_sizes'][0]*L1_partitions[i]) for i in range(4)]\n\n # Open the sample file\n with open(self.parameters.SAMPLE_CFG_FILE, 'r') as f:\n data = f.readlines()\n\n # Do the replacements\n data[20] = ' depth: {0}\\n'.format(L2_A)\n data[30] = ' depth: {0}\\n'.format(L2_B)\n data[40] = ' depth: {0}\\n'.format(L2_C)\n data[50] = ' depth: {0}\\n'.format(L2_D)\n data[63] = ' depth: {0}\\n'.format(L1_A)\n data[74] = ' depth: {0}\\n'.format(L1_B)\n data[85] = ' depth: {0}\\n'.format(L1_C)\n data[96] = ' depth: {0}\\n'.format(L1_D)\n data[112] = DRAM_factors\n data[113] = ' permutation: {0}\\n'.format(DRAM_orders)\n data[117] = L2_factors\n data[114] = ' - permutation: {0}\\n'.format(L2_orders)\n data[118] = ' - permutation: {0}\\n'.format(L2_orders)\n data[122] = ' - permutation: {0}\\n'.format(L2_orders)\n data[126] = ' - permutation: {0}\\n'.format(L2_orders)\n data[133] = spatial_factors\n data[134] = ' - permutation: {0}\\n'.format(L1_orders)\n data[138] = ' - permutation: {0}\\n'.format(L1_orders)\n data[142] = ' - permutation: {0}\\n'.format(L1_orders)\n data[146] = ' - permutation: {0}\\n'.format(L1_orders)\n data[137] = L1_factors\n data[241] = ' I: {0}\\n'.format(I)\n data[242] = ' J: {0}\\n'.format(J)\n data[243] = ' K: {0}\\n'.format(K)\n data[244] = ' L: {0}\\n'.format(L)\n\n data[248] = ' out_prefix: {0}'.format(unique_ID)\n\n # Write the file back\n with open(CFG_FILE_OUT_ARCH, 'w') as f:\n f.writelines(data[:109])\n with open(CFG_FILE_OUT_MAP, 'w') as f:\n f.writelines(data[109:215])\n with open(CFG_FILE_OUT_PROB, 'w') as f:\n f.writelines(data[215:246])\n with open(CFG_FILE_OUT_MODEL, 'w') as f:\n f.writelines(data[246:])\n\n os.chdir(OUTPUT_DIR)\n # print(OUTPUT_DIR)\n\n # Run the config file and check the validity\n command = [ self.parameters.COSTMODEL_EXECUTABLE,\n CFG_FILE_OUT_ARCH,\n CFG_FILE_OUT_MAP,\n CFG_FILE_OUT_PROB,\n CFG_FILE_OUT_MODEL\n ]\n DEVNULL = open(os.devnull, 'wb')\n prnt = sp.call(command, shell=False,stdout=DEVNULL , stderr=DEVNULL)\n # os.system(\"{0} {1} {2} {3} {4}\".format(COSTMODEL_EXECUTABLE, CFG_FILE_OUT_ARCH, CFG_FILE_OUT_MAP, CFG_FILE_OUT_PROB, CFG_FILE_OUT_MODEL))\n if(prnt ==0):\n return True\n else:\n return False\n # try:\n # DEVNULL = open(os.devnull, 'wb')\n # prnt = sp.call(command, shell=False,stdout=DEVNULL, stderr=STDOUT)\n # print(prnt)\n # # os.system(COSTMODEL_EXECUTABLE + ' ' + CFG_FILE_OUT)\n # except:\n # return False\n\n return True",
"def npl_changed(self, value):\n self.levels_new = value",
"def writeLageurreCoeffs(fn,coeffs,xc,size,beta,norder,pos=[0.,0.,0.,0.],mode='laguerre',info=''):\n d={ 'coeffs':coeffs,\n 'mode':mode,\n 'xc':xc,\n 'size':size,\n 'beta':beta,\n 'norder':norder,\n 'ra':pos[0],\n 'dec':pos[1],\n 'dra':pos[2],\n 'ddec':pos[2],\n 'info': info }\n fh=open(fn,'wb')\n pickle.dump(d,fh)\n fh.close()",
"def update_parameters(self, params):\n self.tbf.update_lengthscales(np.exp(params[:self.D])) # update TBF lengthscales\n self.tbf.update_amplitude(np.exp(2*params[self.D])) # update TBF amplitude\n self.var_n = np.exp(2*params[self.D + 1]) # update noise variance\n self.tbf.update_frequencies(params[self.D + 2:]) # update the TBF spectral frequencies",
"def updateParams(self,mapName):\n pass",
"def test_add_parameter():\n file = \"testlogs/parameterchange.ulg\"\n lm = DfUlg.create(\n filepath=file,\n topics=[\"vehicle_local_position\", \"vehicle_local_position_setpoint\"],\n )\n\n # we should have three different groups\n loginfo.add_param(lm, \"MPC_YAW_MODE\")\n group = lm.df.groupby(\"MPC_YAW_MODE\")\n assert group.ngroups == 3\n\n # should have two values of MPC_YAW_EXPO\n loginfo.add_param(lm, \"MPC_YAW_EXPO\")\n group = lm.df.groupby(\"MPC_YAW_EXPO\")\n assert group.ngroups == 2\n\n # should have only one value for MPC_XY_P\n loginfo.add_param(lm, \"MPC_XY_P\")\n lm.df[\"MPC_XY_P\"].to_csv(\"tt.txt\")\n group = lm.df.groupby(\"MPC_XY_P\")\n assert group.ngroups == 1",
"def write_v_info(self):\n with open(self.v_info_path, 'w') as v_info:\n v_info.write('%-15s\\t: %-11s' % ('param names', 'R-1'))\n v_info.write(' '.join(['%-11s' % elem for elem in [\n 'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',\n '2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',\n '2-sigma >', '2-sigma <', '95% CL >', '95% CL <']]))\n for index, name in zip(self.indices, self.info_names):\n v_info.write('\\n%-15s\\t: % .4e' % (name, self.R[index]))\n v_info.write(' '.join(['% .4e' % elem for elem in [\n self.bestfit[index], self.mean[index],\n (self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,\n self.bounds[index, 0, 0], self.bounds[index, 0, 1],\n self.bounds[index, 1, 0], self.bounds[index, 1, 1],\n self.mean[index]+self.bounds[index, 0, 0],\n self.mean[index]+self.bounds[index, 0, 1],\n self.mean[index]+self.bounds[index, 1, 0],\n self.mean[index]+self.bounds[index, 1, 1],\n self.mean[index]+self.bounds[index, -1, 0],\n self.mean[index]+self.bounds[index, -1, 1]]]))",
"def save_params(self) -> None:\n self._lib.save_params(self._device_handle)",
"def writeOpts(self, fname):\n file = open(fname, 'a')\n file.write('// --- Wing options ---\\n')\n for i in range(0, self.n):\n file.write('DefineConstant[ msLe{0:1d} = {{ {1:f}, Name \"leading edge mesh size on {2:1d}th spanwise station\" }} ];\\n'.format(i, self.chord[i]/100, i))\n file.write('DefineConstant[ msTe{0:1d} = {{ {1:f}, Name \"trailing edge mesh size on {2:1d}th spanwise station\" }} ];\\n'.format(i, self.chord[i]/100, i))\n file.write('DefineConstant[ gr{0:1d} = {{ {1:f}, Name \"growth ratio for {2:1d}th spanwise station\" }} ];\\n'.format(i, 1.5, i))\n file.write('\\n')\n file.close()",
"def update_params(self):\n # todo: sample theta and phi\n\n #sample theta from dirichlet (A_{d,k}+alpha), since dim(theta)=ndoc * ntopic , we need to update for each d, so for each row\n for d in range(self.n_docs):\n self.theta[d,:] = np.random.dirichlet(self.A_dk[d,:] + self.alpha)\n\n #sample phi from dirichlet (B_{k,w}+beta), dim(phi) = ntopics * nwords\n for k in range(self.n_topics):\n self.phi[k,:] = np.random.dirichlet(self.B_kw[k,:] + self.beta)\n\n\n self.update_topic_doc_words()\n #print('thishif',self.topic_doc_words_distr[0,0,:])\n self.sample_counts() #update A and B",
"def saveParameters(self):\n\n name = 'Hyteresis_Measurement_Parameters.txt'\n file = open(name, 'w') # Trying to create a new file or open one\n file.write(\"Voltage: {} V\\n\".format(str(Parameters['Voltage'])))\n file.write(\"Loops: {} \\n\".format(str(LoopParams['Loops'])))\n file.write(\"Measurementpoints: {} \\n\".format(\n str(LoopParams['MeasurementPoints'])))\n file.write(\"Set Fluenz: {} \\n\".format(\n str(MeasParams['Fluence'])))\n file.write(\"TimeZero: {} \\n\".format(\n str(MeasParams['timeZero'])))\n file.write(\"Pump-Angle: {} \\n\".format(\n str(MeasParams['angle'])))\n file.write(\"Samplename: {} \\n\".format(\n str(MeasParams['sampleName'])))\n\n if not self.Stage_ReadFromFile:\n file.write(\"StartPoint: {} ps\\n\".format(\n str(StageParams_ps['StartPoint'])))\n file.write(\"End Point: {} ps\\n\".format(\n str(StageParams_ps['EndPoint'])))\n file.write(\"Stepwidth: {} ps\\n\".format(\n str(StageParams_ps['StepWidth'])))\n file.write(\"Stage Velocity: {} \\n\".format(\n str(Stage_SpeedParams['Velocity'])))\n file.write(\"Stage Acceleration: {} \\n\".format(\n str(Stage_SpeedParams['Acceleration'])))\n\n if self.Stage_ReadFromFile:\n file.write(\"Start \\t Stop \\t Stepwidth ps\\n\")\n for idx, val in enumerate(self.saveVector):\n entry = ' '.join(str(e) for e in self.saveVector[idx])\n file.write(\"{}\\n\".format(entry))\n\n if self.Hysteresis_Check.isChecked():\n file.write(\"StartPoint: {} ps\\n\".format(\n str(HysteresisParameters['Stepwidth'])))\n file.write(\"Amplitude: {} ps\\n\".format(\n str(HysteresisParameters['Amplitude'])))\n file.write(\"@StageDelay\")\n for idx, val in enumerate(self.hystDelayVector_ps):\n entry = ' '.join(str(val))\n file.write(\"{}\\n\".format(entry))\n\n file.close()",
"def setparams(self, sampath, insertsize, stdvar, readlength):\n self.__insertsize = insertsize\n self.__readlength = readlength\n self.__stdvar = stdvar\n self.__threshold = self.__insertsize + 3 * self.__stdvar\n self.addreads(sampath)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Takes in a path, and returns the same path relative to the appropriate directory for the test file.
|
def __get_testfile_path(self, path):
path = os.path.relpath(
path, os.path.join(self.__data_path, os.pardir))
return path
|
[
"def subject_relative_path(path):\n directory = path\n subject = component_name(path)\n\n filename = os.path.basename(path)\n directory = os.path.dirname(path)\n parent = os.path.basename(directory)\n\n if re.match(r\"index(?:[-._](?:spec|unit|test|acceptance))?\\.jsx?$\", filename):\n if re.match(r\"__tests?__/?\", parent):\n return '..' + os.sep\n return '.' + os.sep\n\n if re.match(r\"__tests?__/?\", parent):\n return '..' + os.sep\n\n return os.path.join('.', subject)",
"def filedir(*path):\n return os.path.realpath(os.path.join(os.path.dirname(__file__), *path))",
"def tests_root_directory(path: Optional[PathOrString] = None) -> Path:\n root = Path(os.path.realpath(__file__)).parent.parent.parent / \"Tests\"\n return root / path if path else root",
"def tests_dir():\n return Path(os.path.realpath(__file__)).parent",
"def _GetSrcRelativePath(path):\n assert path.startswith(_GetToolsParentDir())\n return expand_owners.SRC + path[len(_GetToolsParentDir()) + 1:]",
"def P(path):\n return os.path.join(\n os.environ.get('abs_top_srcdir', \".\"),\n path)",
"def makeFilePath(self, file_path):\n return '%s/%s' % (os.path.dirname(__file__), file_path)",
"def rel_to_abs(path):\r\n current_dir = os.path.abspath(os.path.dirname(__file__))\r\n return os.path.join(current_dir, path)",
"def data_test_dir():\n return Path(__file__).absolute().parent.parent.parent / \"test_data\"\n # return os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), \"test_data\")",
"def get_path(root, path):\n\n return join(dirname(root), path)",
"def static_path(relative_path, test_path=os.path.dirname(__file__)):\n return os.path.join(test_path, relative_path)",
"def get_path(path, file_name=None, absolute=False):\n _p = os.path.join(os.environ['PROJECT_ROOT'], path)\n if file_name:\n _p = os.path.join(_p, file_name)\n if absolute:\n return os.path.abspath(_p)\n return os.path.relpath(_p)",
"def _get_test_template_dir():\n return os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'test_templates/')",
"def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())",
"def get_full_path(relative_path, package=\"chemper\"):\n if os.path.exists(relative_path):\n return os.path.abspath(relative_path)\n return get_data_path(relative_path, package)",
"def get_tests_dir_path(): \n fmod_path = ctbto.tests.__path__\n \n test_dir = \"%s/conf_tests\" % fmod_path[0]\n \n return test_dir",
"def resolve_file_path(file_path):\n if not os.path.isfile(file_path):\n # Allow loading config files relative to rltime/configs directory\n base_path = os.path.dirname(rltime.__file__)\n rel_file_path = os.path.join(base_path, \"configs\", file_path)\n if os.path.isfile(rel_file_path):\n return rel_file_path\n return file_path",
"def convert_to_relative(basePath, fileName):\r\n if fileName.startswith(basePath):\r\n fileName = fileName.replace(basePath, '')\r\n if fileName.startswith(os.path.sep):\r\n fileName = fileName[1:]\r\n return fileName",
"def get_full_filepath(test_filename):\n file_path = os.path.dirname(os.path.abspath(__file__))\n return_filepath = os.path.abspath(file_path + \"/responses/\" + test_filename)\n return return_filepath"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates circuits with the current parameters
|
def __make_circuits(self):
# update the params if needed:
self.__handle_new_params()
# make self.__num_circuits circuits:
for circuit_num in xrange(self.__num_circuits):
# generate a random circuit:
if self.__test_type == igf.TEST_TYPES.RANDOM:
gen = igf.TEST_TYPE_TO_GENERATOR_BY_DEPTH[igf.TEST_TYPES.RANDOM]
circ = gen(self.__L, self.__D, self.__W)
else:
gen = igf.TEST_TYPE_TO_GENERATOR_BY_LEVEL[self.__test_type]
circ = gen(self.__L, self.__num_levels, self.__W)
self.__write_circuit(circ)
# for each circuit, make self.__num_inputs inputs:
for input_num in xrange(self.__num_inputs):
# generate a random input:
inp = igf.make_random_input(self.__L, self.__W)
self.__write_input(inp)
|
[
"def generate_circuit(config: Dict[str, Any]):\n print(\"-\" * 80)\n print(f\"Creating circuit number\")\n\n n_qubits = random.randint(config[\"min_n_qubits\"], config[\"max_n_qubits\"])\n n_ops = random.randint(config[\"min_n_ops\"], config[\"max_n_ops\"])\n\n if (config[\"strategy_program_generation\"] == \"uniform\" or\n config[\"strategy_program_generation\"] == \"weighted\"):\n gate_set = config[\"gate_set\"]\n if (config[\"strategy_program_generation\"] == \"uniform\"):\n for gate in gate_set.keys():\n gate_set[gate] = 1\n # generate a random circuit\n random_circuit_qasm_str = generate_randomly(\n n_qubits=n_qubits,\n n_ops=n_ops,\n gate_set=gate_set,\n random_state=np.random.RandomState(config[\"random_seed\"]))\n\n\n metadata_dict = {\n \"n_qubits\": n_qubits,\n \"n_ops\": n_ops,\n \"gate_set\": config[\"gate_set\"],\n \"strategy_program_generation\": config[\"strategy_program_generation\"]\n }\n\n print(f\"Saving circuit: with simulation results\")\n timestamp = int(time.time())\n qasm_file_name = config[\"program_id_pattern\"]\n qasm_file_name = \\\n qasm_file_name.replace(\"{{timestamp}}\", str(timestamp))\n qasm_file_name = \\\n qasm_file_name.replace(\"{{randint}}\", str(random.randint(0, 9999)).zfill(4))\n print(f\"qasm_file_name: {qasm_file_name}\")\n # get current timestamp as integer and use it as filename\n\n store_qasm(\n filename=qasm_file_name,\n qasm_content=random_circuit_qasm_str,\n out_folder=config[\"folder_generated_qasm\"],\n metadata_dict=metadata_dict\n )",
"def compiler(circuit):\n #initialize an empty circuit with the same size of qubits and clbits \n decomposedCircuit = QuantumCircuit(circuit.num_qubits)\n if circuit.num_clbits >0:\n decomposedCircuit.add_register(ClassicalRegister(circuit.num_clbits))\n \n #extract the gates to compile them from the data list\n for item in circuit.data:\n \n #the gate object\n gate=item[0] \n \n #number of qubits of the gate\n numOfQubits=len(item[1]) \n \n #the indices that the gate applied on\n positions=[qubit.index for qubit in item[1]] \n \n #check if the gate is a single qubit gate\n if numOfQubits==1:\n #decompose the single qubit gate\n decomposition=oneQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n gates=[item[0] for item in decomposition.data]\n #append each gate to the new circuit at the same position note: len(positions)=1 \"single qubit gate\"\n [decomposedCircuit.append(gate,positions) for gate in gates]\n \n #check if the gate is a two qubit gate\n elif numOfQubits==2:\n #decompose the gate\n decomposition=twoQubitDecomppser(gate)\n #extract the decomposition gates from the received circuit\n for item in decomposition.data:\n gate=item[0]\n if len(item[1])==2:\n #append each gate to the new circuit at the same positions note: len(positions)=2\n decomposedCircuit.append(gate,positions)\n else:\n #append a single qubit gate to the new circuit\n #get the index (0 or 1) means the gate is applied to the 1st qubit or the 2nd qubit from the positions list \n decomposedCircuit.append(gate,[positions[item[1][0].index]]) \n \n return decomposedCircuit",
"def _build(self):\n if self._is_built:\n return\n\n super()._build()\n\n circuit = QuantumCircuit(*self.qregs, name=self.name)\n qr_state = circuit.qubits[: self.num_state_qubits]\n qr_target = [circuit.qubits[self.num_state_qubits]]\n # Ancilla for the comparator circuit\n qr_ancilla = circuit.qubits[self.num_state_qubits + 1 :]\n\n # apply comparators and controlled linear rotations\n for i, point in enumerate(self.breakpoints[:-1]):\n if i == 0 and self.contains_zero_breakpoint:\n # apply rotation\n poly_r = PolynomialPauliRotations(\n num_state_qubits=self.num_state_qubits,\n coeffs=self.mapped_coeffs[i],\n basis=self.basis,\n )\n circuit.append(poly_r.to_gate(), qr_state[:] + qr_target)\n\n else:\n # apply Comparator\n comp = IntegerComparator(num_state_qubits=self.num_state_qubits, value=point)\n qr_state_full = qr_state[:] + [qr_ancilla[0]] # add compare qubit\n qr_remaining_ancilla = qr_ancilla[1:] # take remaining ancillas\n\n circuit.append(\n comp.to_gate(), qr_state_full[:] + qr_remaining_ancilla[: comp.num_ancillas]\n )\n\n # apply controlled rotation\n poly_r = PolynomialPauliRotations(\n num_state_qubits=self.num_state_qubits,\n coeffs=self.mapped_coeffs[i],\n basis=self.basis,\n )\n circuit.append(\n poly_r.to_gate().control(), [qr_ancilla[0]] + qr_state[:] + qr_target\n )\n\n # uncompute comparator\n circuit.append(\n comp.to_gate().inverse(),\n qr_state_full[:] + qr_remaining_ancilla[: comp.num_ancillas],\n )\n\n self.append(circuit.to_gate(), self.qubits)",
"def build_model_circuit(width=3, depth=None):\n\tqreg = QuantumRegister( width, \"q\" )\n\tdepth = depth or width\n\n\tcircuit = QuantumCircuit( qreg )\n\n\tfor _ in range(depth):\n\t\t# Generate uniformly random permutation Pj of [0...n-1]\n\t\tperm = np.random.permutation(width)\n\n\t\t# For each pair p in Pj, generate Haar random U(4)\n\t\t# Decompose each U(4) into CNOT + SU(2)\n\t\tfor k in range(width // 2):\n\t\t\tU = random_unitary_matrix(4)\n\t\t\tfor gate in two_qubit_kak(U):\n\t\t\t\tqs = [qreg[int(perm[2 * k + i])] for i in gate[\"args\"]]\n\t\t\t\tpars = gate[\"params\"]\n\t\t\t\tname = gate[\"name\"]\n\t\t\t\tif name == \"cx\":\n\t\t\t\t\tcircuit.cx(qs[0], qs[1])\n\t\t\t\telif name == \"u1\":\n\t\t\t\t\tcircuit.u1(pars[0], qs[0])\n\t\t\t\telif name == \"u2\":\n\t\t\t\t\tcircuit.u2(*pars[:2], qs[0])\n\t\t\t\telif name == \"u3\":\n\t\t\t\t\tcircuit.u3(*pars[:3], qs[0])\n\t\t\t\telif name == \"id\":\n\t\t\t\t\tpass # do nothing\n\t\t\t\telse:\n\t\t\t\t\traise Exception(\"Unexpected gate name: %s\" % name)\n\treturn circuit",
"def compile(circuits, backend,\n config=None, basis_gates=None, coupling_map=None, initial_layout=None,\n shots=1024, max_credits=10, seed=None, qobj_id=None, hpc=None,\n pass_manager=None):\n if isinstance(circuits, QuantumCircuit):\n circuits = [circuits]\n\n backend_conf = backend.configuration\n backend_name = backend_conf['name']\n\n qobj = {}\n\n # step 1: populate the qobj-level `id`\n qobj_id = qobj_id or str(uuid.uuid4())\n qobj['id'] = qobj_id\n\n # step 2: populate the qobj-level `config`\n qobj['config'] = {'max_credits': max_credits,\n 'shots': shots,\n 'backend_name': backend_name}\n\n if hpc is not None and \\\n not all(key in hpc for key in ('multi_shot_optimization', 'omp_num_threads')):\n raise TranspilerError('Unknown HPC parameter format!')\n\n # step 3: populate the `circuits` in qobj, after compiling each circuit\n qobj['circuits'] = []\n if not basis_gates:\n basis_gates = backend_conf['basis_gates']\n if not coupling_map:\n coupling_map = backend_conf['coupling_map']\n\n for circuit in circuits:\n job = {}\n\n # step 1: populate the circuit-level `name`\n job[\"name\"] = circuit.name\n\n # step 2: populate the circuit-level `config`\n if config is None:\n config = {}\n job[\"config\"] = copy.deepcopy(config)\n # TODO: A better solution is to have options to enable/disable optimizations\n num_qubits = sum((len(qreg) for qreg in circuit.get_qregs().values()))\n if num_qubits == 1 or coupling_map == \"all-to-all\":\n coupling_map = None\n job[\"config\"][\"coupling_map\"] = coupling_map\n job[\"config\"][\"basis_gates\"] = basis_gates\n job[\"config\"][\"seed\"] = seed\n\n # step 3: populate the circuit `instructions` after compilation\n # step 3a: circuit -> dag\n dag_circuit = DAGCircuit.fromQuantumCircuit(circuit)\n\n # TODO: move this inside the mapper pass\n # pick a good initial layout if coupling_map is not already satisfied\n # otherwise keep it as q[i]->q[i]\n if (initial_layout is None and\n not backend_conf['simulator'] and\n not _matches_coupling_map(circuit.data, coupling_map)):\n initial_layout = _pick_best_layout(backend, num_qubits, circuit.get_qregs())\n\n # step 3b: transpile (dag -> dag)\n dag_circuit, final_layout = transpile(\n dag_circuit,\n basis_gates=basis_gates,\n coupling_map=coupling_map,\n initial_layout=initial_layout,\n get_layout=True,\n seed=seed,\n pass_manager=pass_manager)\n\n # step 3c: dag -> json\n # TODO: populate the Qobj object when Qobj class exists\n # the compiled circuit to be run saved as a dag\n # we assume that transpile() has already expanded gates\n # to the target basis, so we just need to generate json\n list_layout = [[k, v] for k, v in final_layout.items()] if final_layout else None\n job[\"config\"][\"layout\"] = list_layout\n json_circuit = DagUnroller(dag_circuit, JsonBackend(dag_circuit.basis)).execute()\n job[\"compiled_circuit\"] = json_circuit\n\n # set eval_symbols=True to evaluate each symbolic expression\n # TODO after transition to qobj, we can drop this\n job[\"compiled_circuit_qasm\"] = dag_circuit.qasm(qeflag=True,\n eval_symbols=True)\n\n # add job to the qobj\n qobj[\"circuits\"].append(job)\n\n return qobj",
"def randomCircuit(self,):\n circuit = []\n\n # hadamard zone\n for i in range(self.numAncillas):\n circuit.append(self.HAD(i))\n\n # cswap zone\n for i in range(int(self.depth)):\n control = random.randint(0, self.numAncillas - 1)\n targets = [random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)]\n t2 = random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)\n while t2 == targets[0]:\n t2 = random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)\n targets.append(t2)\n circuit.append(self.CSWAP(control, targets))\n\n return circuit",
"def circuit(self):\n raise NotImplementedError",
"def _generate_quantum_circuit(self, player_gates):\n if self._protocol == Protocol.Classical:\n return None\n player_gate_objects = []\n for i in range(len(player_gates)):\n player_gate_objects.append([])\n for j in player_gates[i]:\n player_gate_objects[i].append(generate_unitary_gate(j))\n self._quantum_game = QuantumGame(player_gate_objects, self._protocol, self._backend)\n self._quantum_game.circ.draw()\n return self._quantum_game.circ",
"def generate_params_combination(self):\n # cpu frequency and status\n \n core_status=dict()\n for key in self.big_cores.iterkeys():\n if key==\"core0\":\n core_status[key]=[self.ENABLE]\n else:\n core_status[key]=[self.ENABLE,self.DISABLE]\n core_frequency=self.big_core_freqs[:]\n \n # gpu status\n gpu_status=[self.ENABLE] \n # emmc status \n emc_status=[self.ENABLE] \n # gpu max frequency\n [a,b,c,d]=self.get_indices(self.gpu_freqs)\n gpu_freq=self.gpu_freqs[:] \n #emc max frequency\n \n emc_freq=self.emc_freqs[:]\n \n \"\"\"\n create configurable paramters set before permutation in a varibale named var\n index 0: core0 status\n index 1: core1 status \n index 2: core2 status \n index 3: core3 status \n index 4: core frequency\n index 5,6: gpu status, gpu frequency \n index 7,8: emc status, emc frequency \n \"\"\"\n status_var=[(self.ENABLE,self.DISABLE,self.DISABLE,self.DISABLE),\n (self.ENABLE,self.DISABLE,self.DISABLE,self.ENABLE),\n (self.ENABLE,self.DISABLE,self.ENABLE,self.DISABLE),\n (self.ENABLE,self.DISABLE,self.ENABLE,self.ENABLE),\n (self.ENABLE,self.ENABLE,self.DISABLE,self.DISABLE),\n (self.ENABLE,self.ENABLE,self.DISABLE,self.ENABLE),\n (self.ENABLE,self.ENABLE,self.ENABLE,self.DISABLE),\n (self.ENABLE,self.ENABLE,self.ENABLE,self.ENABLE)\n ]\n var=[\n core_frequency,\n gpu_status, gpu_freq,\n emc_status, emc_freq\n ]\n \n self.params=list(itertools.product(*var))\n self.params=list(itertools.product(status_var,self.params))\n for i in range(len(self.params)):\n self.params[i]=self.params[i][0]+self.params[i][1]",
"def test_assemble_multiple_circuits(self):\n q0 = QuantumRegister(2, name='q0')\n c0 = ClassicalRegister(2, name='c0')\n circ0 = QuantumCircuit(q0, c0, name='circ0')\n circ0.h(q0[0])\n circ0.cx(q0[0], q0[1])\n circ0.measure(q0, c0)\n\n q1 = QuantumRegister(3, name='q1')\n c1 = ClassicalRegister(3, name='c1')\n circ1 = QuantumCircuit(q1, c1, name='circ0')\n circ1.h(q1[0])\n circ1.cx(q1[0], q1[1])\n circ1.cx(q1[0], q1[2])\n circ1.measure(q1, c1)\n\n run_config = RunConfig(shots=100, memory=False, seed=6)\n qobj = assemble_circuits([circ0, circ1], run_config=run_config)\n self.assertIsInstance(qobj, Qobj)\n self.assertEqual(qobj.config.seed, 6)\n self.assertEqual(len(qobj.experiments), 2)\n self.assertEqual(qobj.experiments[1].config.n_qubits, 3)\n self.assertEqual(len(qobj.experiments), 2)\n self.assertEqual(len(qobj.experiments[1].instructions), 6)",
"def gen_cc_main(cls, ncoins, index_of_false_coin):\n # using the last qubit for storing the oracle's answer\n nqubits = ncoins + 1\n # Creating registers\n # qubits for querying coins and storing the balance result\n q_r = QuantumRegister(nqubits)\n # for recording the measurement on qr\n c_r = ClassicalRegister(nqubits)\n cccircuit = QuantumCircuit(q_r, c_r)\n\n # Apply Hadamard gates to the first ncoins quantum register\n # create uniform superposition\n for i in range(ncoins):\n cccircuit.h(q_r[i])\n\n # check if there are even number of coins placed on the pan\n for i in range(ncoins):\n cccircuit.cx(q_r[i], q_r[ncoins])\n\n # perform intermediate measurement to check if the last qubit is zero\n cccircuit.measure(q_r[ncoins], c_r[ncoins])\n\n # proceed to query the quantum beam balance if cr is zero\n cccircuit.x(q_r[ncoins]).c_if(c_r, 0)\n cccircuit.h(q_r[ncoins]).c_if(c_r, 0)\n\n # we rewind the computation when cr[N] is not zero\n for i in range(ncoins):\n cccircuit.h(q_r[i]).c_if(c_r, 2**ncoins)\n\n # apply barrier for marking the beginning of the oracle\n cccircuit.barrier()\n\n cccircuit.cx(q_r[index_of_false_coin], q_r[ncoins]).c_if(c_r, 0)\n\n # apply barrier for marking the end of the oracle\n cccircuit.barrier()\n\n # apply Hadamard gates to the first ncoins qubits\n for i in range(ncoins):\n cccircuit.h(q_r[i]).c_if(c_r, 0)\n\n # measure qr and store the result to cr\n for i in range(ncoins):\n cccircuit.measure(q_r[i], c_r[i])\n\n return cccircuit",
"def self_defined_noisy_circuit(qubits: int, gates: int) -> 'QEnv':\n\n # Create environment\n env = QEnv()\n # Choose backend Baidu local simulator\n env.backend(BackendName.LocalBaiduSim2)\n\n # Number of qubits, no larger than 20 \n num_qubit = qubits\n # Number of gates in each for loop\n gate_num = gates # Depth of circuit = num_qubit * gate_num\n\n assert num_qubit > 2\n assert gate_num > 2\n\n # Initialize a QCompute circuit\n q = env.Q.createList(num_qubit)\n\n # A noisy random H + CX + RX circuit\n for i in range(num_qubit - 1):\n H(q[i])\n CX(q[i], q[i + 1])\n # Random rotation angles\n rotation_list = [random.uniform(0, 6.28) for _ in range(gate_num - 2)]\n # random quantum registers\n qreg_list = [random.randint(0, num_qubit - 1) for _ in range(gate_num - 2)]\n for i in range(gate_num - 2):\n RX(rotation_list[i])(q[qreg_list[i]])\n\n # Measure with the computational basis\n MeasureZ(*env.Q.toListPair())\n\n # Define noise instances \n # Define 1-qubit noise instance\n bfobj = BitFlip(0.1)\n # Define a 2-qubit noise instance\n dpobj = Depolarizing(2, 0.1)\n\n # # Add noises\n env.noise(['H'], [bfobj])\n env.noise(['CX'], [dpobj])\n\n return env",
"def make_random_circuit(n_rows, n_cols, depth):\r\n return cirq.experiments.generate_boixo_2018_supremacy_circuits_v2_grid(\r\n n_rows=n_rows,\r\n n_cols=n_cols,\r\n cz_depth=depth - 2, # Account for beginning/ending Hadamard layers\r\n seed=SEED)",
"def test_build_circuit_product():\n qpu = cirq.Simulator(dtype=numpy.complex128)\n qubits = cirq.LineQubit.range(4)\n ops = QubitOperator('', 1.0)\n for i in range(4):\n ops *= QubitOperator('X' + str(i), 1.0)\n for j in ops.terms:\n circuit = cirq_utils.qubit_ops_to_circuit(j, qubits)\n init_state = numpy.zeros(2**4, dtype=numpy.complex128)\n init_state[0] = 1.0 + 0.0j\n result = qpu.simulate(circuit, qubit_order=qubits, initial_state=init_state)\n final_state = numpy.zeros(2**4, dtype=numpy.complex128)\n final_state[-1] = 1.0 + 0.0j\n assert list(result.final_state_vector) == list(final_state)",
"def create_combinations(baseline_dir, rsn_dir, combi_dir, version_id='',\n comments=''):\n\n baseinp = Model(baseline_dir).inp.path\n version_id += '_' + datetime.now().strftime(\"%y%m%d%H%M%S\")\n\n #create a list of directories pointing to each IP in each RSN\n RSN_dirs = [os.path.join(rsn_dir, rsn) for rsn in os.listdir(rsn_dir)]\n IP_dirs = [os.path.join(d, ip) for d in RSN_dirs for ip in os.listdir(d)]\n\n #list of lists of each IP within each RSN, including a 'None' phase.\n IPs = [[None] + os.listdir(d) for d in RSN_dirs]\n\n #identify all scenarios (cartesian product of sets of IPs between each RSN)\n #then isolate child scenarios with atleast 2 parents (sets with one parent\n #are already modeled as IPs within the RSNs)\n all_scenarios = [[_f for _f in s if _f] for s in itertools.product(*IPs)]\n child_scenarios = [s for s in all_scenarios if len(s) > 1]\n\n #notify user of what was initially found\n str_IPs = '\\n'.join([', '.join([_f for _f in i if _f]) for i in IPs])\n print(('Found {} implementation phases among {} networks:\\n{}\\n'\n 'This yeilds {} combined scenarios ({} total)'.format(len(IP_dirs),\n len(RSN_dirs),str_IPs,len(child_scenarios),len(all_scenarios) - 1)))\n\n # ==========================================================================\n # UPDATE/CREATE THE PARENT MODEL BUILD INSTRUCTIONS\n # ==========================================================================\n for ip_dir in IP_dirs:\n ip_model = Model(ip_dir)\n vc_dir = os.path.join(ip_dir, 'vc')\n\n if not os.path.exists(vc_dir):\n print('creating new build instructions for {}'.format(ip_model.name))\n inp.create_inp_build_instructions(baseinp, ip_model.inp.path,\n vc_dir,\n version_id, comments)\n else:\n #check if the alternative model was changed since last run of this tool\n #--> compare the modification date to the BI's modification date meta data\n latest_bi = vc_utils.newest_file(vc_dir)\n if not vc_utils.bi_is_current(latest_bi):\n #revision date of the alt doesn't match the newest build\n #instructions for this 'imp_level', so we should refresh it\n print('updating build instructions for {}'.format(ip_model.name))\n inp.create_inp_build_instructions(baseinp, ip_model.inp.path,\n vc_dir, version_id,\n comments)\n\n # ==========================================================================\n # UPDATE/CREATE THE CHILD MODELS AND CHILD BUILD INSTRUCTIONS\n # ==========================================================================\n for scen in child_scenarios:\n newcombi = '_'.join(sorted(scen))\n new_dir = os.path.join(combi_dir, newcombi)\n vc_dir = os.path.join(combi_dir, newcombi, 'vc')\n\n #parent model build instr files\n #BUG (this breaks with model IDs with more than 1 char)\n parent_vc_dirs = [os.path.join(rsn_dir, f[0], f, 'vc') for f in scen]\n latest_parent_bis = [vc_utils.newest_file(d) for d in parent_vc_dirs]\n build_instrcts = [inp.BuildInstructions(bi) for bi in latest_parent_bis]\n\n if not os.path.exists(new_dir):\n\n os.mkdir(new_dir)\n newinppath = os.path.join(new_dir, newcombi + '.inp')\n\n print('creating new child model: {}'.format(newcombi))\n new_build_instructions = sum(build_instrcts)\n new_build_instructions.save(vc_dir, version_id+'.txt')\n new_build_instructions.build(baseline_dir, newinppath)\n\n else:\n #check if the alternative model was changed since last run\n #of this tool --> compare the modification date to the BI's\n #modification date meta data\n latest_bi = vc_utils.newest_file(os.path.join(new_dir,'vc'))\n if not vc_utils.bi_is_current(latest_bi):\n #revision date of the alt doesn't match the newest build\n #instructions for this 'imp_level', so we should refresh it\n print('updating child build instructions for {}'.format(newcombi))\n newinppath = os.path.join(new_dir, newcombi + '.inp')\n new_build_instructions = sum(build_instrcts)\n new_build_instructions.save(vc_dir, version_id+'.txt')\n new_build_instructions.build(baseline_dir, newinppath)",
"def _naive_program_generator(qc: QuantumComputer, qubits: Sequence[int], permutations: np.ndarray,\n gates: np.ndarray) -> Program:\n # artificially restrict the entire computation to num_measure_qubits\n num_measure_qubits = len(permutations[0])\n # if these measure_qubits do not have a topology that supports the program, the compiler may\n # act on a different (potentially larger) subset of the input sequence of qubits.\n measure_qubits = qubits[:num_measure_qubits]\n\n # create a simple program that uses the compiler to directly generate 2q gates from the matrices\n prog = Program()\n for layer_idx, (perm, layer) in enumerate(zip(permutations, gates)):\n for gate_idx, gate in enumerate(layer):\n # get the Quil definition for the new gate\n g_definition = DefGate(\"LYR\" + str(layer_idx) + \"_RAND\" + str(gate_idx), gate)\n # get the gate constructor\n G = g_definition.get_constructor()\n # add definition to program\n prog += g_definition\n # add gate to program, acting on properly permuted qubits\n prog += G(int(measure_qubits[perm[gate_idx]]), int(measure_qubits[perm[gate_idx+1]]))\n\n ro = prog.declare(\"ro\", \"BIT\", num_measure_qubits)\n for idx, qubit in enumerate(measure_qubits):\n prog.measure(qubit, ro[idx])\n\n # restrict compilation to chosen qubits\n isa_dict = qc.device.get_isa().to_dict()\n single_qs = isa_dict['1Q']\n two_qs = isa_dict['2Q']\n\n new_1q = {}\n for key, val in single_qs.items():\n if int(key) in qubits:\n new_1q[key] = val\n new_2q = {}\n for key, val in two_qs.items():\n q1, q2 = key.split('-')\n if int(q1) in qubits and int(q2) in qubits:\n new_2q[key] = val\n\n new_isa = {'1Q': new_1q, '2Q': new_2q}\n\n new_compiler = copy(qc.compiler)\n new_compiler.target_device = TargetDevice(isa=new_isa, specs=qc.device.get_specs().to_dict())\n # try to compile with the restricted qubit topology\n try:\n native_quil = new_compiler.quil_to_native_quil(prog)\n except RPCErrorError as e:\n if \"Multiqubit instruction requested between disconnected components of the QPU graph:\" \\\n in str(e):\n raise ValueError(\"naive_program_generator could not generate a program using only the \"\n \"qubits supplied; expand the set of allowed qubits or supply \"\n \"a custom program_generator.\")\n raise\n\n return native_quil",
"def updateCircuit(circuit,\n verbose = False):\n if verbose:\n Warning(\"Currently only replaces to h,s,x,y,z gates\")\n possible_gates = list('hsxyz')\n \n # Convert circuit to qasm string so we can use string processing to switch\n qasm = circuit.qasm().split(';')\n \n \n # Make sure the gate you choose is not a cx gate\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n while qasm[gate_to_switch][1:3] == 'cx' or qasm[gate_to_switch][1:3] == 'ba':\n gate_to_switch = np.random.randint(3,len(qasm)-1)\n \n # Get a new gate and make sure it's different form the current gate\n this_gate = qasm[gate_to_switch][1]\n new_gate = np.random.choice(possible_gates)\n while new_gate == this_gate:\n new_gate = np.random.choice(possible_gates)\n \n qasm[gate_to_switch] = '\\n' + new_gate + ' ' + qasm[gate_to_switch].split(' ')[1]\n \n qasm = ';'.join(qasm) \n circuit = qk.QuantumCircuit.from_qasm_str(qasm)\n \n if verbose:\n print(circuit)\n \n return circuit",
"def gen_bv_main(nQubits, hiddenString):\n Q_program = QuantumProgram()\n # Creating registers\n # qubits for querying the oracle and finding the hidden integer\n qr = Q_program.create_quantum_register(\"qr\", nQubits)\n # for recording the measurement on qr\n cr = Q_program.create_classical_register(\"cr\", nQubits-1)\n\n circuitName = \"BernsteinVazirani\"\n bvCircuit = Q_program.create_circuit(circuitName, [qr], [cr])\n\n # Apply Hadamard gates to the first\n # (nQubits - 1) before querying the oracle\n for i in range(nQubits-1):\n bvCircuit.h(qr[i])\n\n # Apply 1 and Hadamard gate to the last qubit\n # for storing the oracle's answer\n bvCircuit.x(qr[nQubits-1])\n bvCircuit.h(qr[nQubits-1])\n\n # Apply barrier so that it is not optimized by the compiler\n bvCircuit.barrier()\n\n # Apply the inner-product oracle\n hiddenString = hiddenString[::-1]\n for i in range(len(hiddenString)):\n if hiddenString[i] == \"1\":\n bvCircuit.cx(qr[i], qr[nQubits-1])\n hiddenString = hiddenString[::-1]\n # Apply barrier\n bvCircuit.barrier()\n\n # Apply Hadamard gates after querying the oracle\n for i in range(nQubits-1):\n bvCircuit.h(qr[i])\n\n # Measurement\n for i in range(nQubits-1):\n bvCircuit.measure(qr[i], cr[i])\n\n return Q_program, [circuitName, ]",
"def circuit(self):\n return self.operations + self.measurements"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles writing a circuit, both to the circuit file and to the test file.
|
def __write_circuit(self, circ):
# find the circuit id:
self.__circuit_id = self.__resultsdb.get_next_circuit_id()
# write the circuit to the results database:
row = {t2s.CIRCUIT_TESTNAME: self.__test_name,
t2s.CIRCUIT_CID: self.__circuit_id,
t2s.CIRCUIT_PID: self.__sec_param_id,
t2s.CIRCUIT_W: self.__W,
t2s.CIRCUIT_NUMLEVELS: circ.get_num_levels(),
t2s.CIRCUIT_OUTPUTGATETYPE: circ.get_output_gate_func(),
t2s.CIRCUIT_TESTTYPE:
igf.TEST_TYPES.number_to_value[self.__test_type]}
num_gates = 0
for database_field in RESULTSDB_FIELDS_TO_GATE_TYPES.keys():
num_gates_this_type = circ.get_num_gates(
gate_func_name=RESULTSDB_FIELDS_TO_GATE_TYPES[database_field])
row[database_field] = num_gates_this_type
num_gates += num_gates_this_type
row[t2s.CIRCUIT_NUMGATES] = num_gates
self.__resultsdb.add_row(t2s.CIRCUIT_TABLENAME, row)
# write the circuit to the circuit file:
circuit_file_name = os.path.join(self.__circuit_dir_name,
str(self.__circuit_id) + ".cir")
circuit_file = self.__fho.get_file_object(circuit_file_name, 'w')
circuit_file.write(circ.display())
self.__fho.close_file_object(circuit_file)
# write the circuit location to the test file:
self.__test_file.write(
"".join(["CIRCUIT\n",
self.__get_testfile_path(circuit_file_name), "\n"]))
|
[
"def _write_component(component_spec: ComponentSpec, output_path: str):\n component_spec.save(output_path)",
"def test_write(self):\n reqs = Requirementz.from_lines(TEST_LINES)\n reqs.write(filename=TEST_FILE)",
"def write_products(self):\n if self.has_option('write.pattern'):\n try:\n self.write_scan_pattern()\n except Exception as err:\n log.warning(f\"Could not write scan pattern: {err}\")\n\n if self.configuration.get_bool('write.pixeldata'):\n out_file = os.path.join(self.configuration.work_path,\n f'pixel-{self.get_file_id()}.dat')\n try:\n self.channels.write_channel_data(\n out_file, header=self.get_ascii_header())\n except Exception as err:\n log.warning(f\"Could not write pixel data: {err}\")\n\n if self.configuration.get_bool('write.flatfield'):\n if self.has_option('write.flatfield.name'):\n out_name = self.configuration.get_string(\n 'write.flatfield.name')\n else:\n out_name = f'flat-{self.get_file_id()}.fits'\n out_file = os.path.join(self.configuration.work_path, out_name)\n try:\n self.channels.write_flat_field(out_file)\n except Exception as err:\n log.warning(f\"Could not write flat field: {err}\")\n\n if self.has_option('write.covar'):\n try:\n self.write_covariances()\n except Exception as err:\n log.warning(f\"Could not write covariances: {err}\")\n\n if self.configuration.get_bool('write.ascii'):\n try:\n self.write_ascii_time_stream()\n except Exception as err:\n log.warning(f'Could not write time stream data: {err}')\n\n if self.configuration.get_bool('write.signals'):\n for name, signal in self.signals.items():\n try:\n out_file = os.path.join(\n self.configuration.work_path,\n f'{signal.mode.name}-{self.get_file_id()}.tms')\n signal.write_signal_values(out_file)\n log.info(f\"Written signal data to {out_file}\")\n except Exception as err:\n log.warning(f\"Could not write signal data: {err}\")\n\n if self.has_option('write.spectrum'):\n window_name = self.configuration.get('write.spectrum',\n default='Hamming')\n window_size = self.configuration.get(\n 'write.spectrum.size',\n default=2 * self.frames_for(self.filter_time_scale))\n try:\n self.write_spectra(window_name=window_name,\n window_size=window_size)\n except Exception as err:\n log.warning(f\"Could not write spectra: {err}\")\n\n if self.has_option('write.coupling'):\n try:\n self.write_coupling_gains(\n self.configuration.get_list('write.coupling'))\n except Exception as err:\n log.warning(f\"Could not write coupling gains: {err}\")",
"def test_Component_write_to_file_simple(self, mock_f):\n\n comp = Component(\"test_component\", \"Arm\")\n\n comp._unfreeze()\n # Need to set up attribute parameters\n # Also need to categorize them as when created\n comp.parameter_names = []\n comp.parameter_defaults = {}\n comp.parameter_types = {}\n comp._freeze()\n\n with mock_f('test.txt', 'w') as m_fo:\n comp.write_component(m_fo)\n\n my_call = unittest.mock.call\n expected_writes = [my_call(\"COMPONENT test_component = Arm(\"),\n my_call(\")\\n\"),\n my_call(\"AT (0,0,0)\"),\n my_call(\" ABSOLUTE\\n\")]\n\n mock_f.assert_called_with('test.txt', 'w')\n handle = mock_f()\n handle.write.assert_has_calls(expected_writes, any_order=False)",
"def generate_circuit(config: Dict[str, Any]):\n print(\"-\" * 80)\n print(f\"Creating circuit number\")\n\n n_qubits = random.randint(config[\"min_n_qubits\"], config[\"max_n_qubits\"])\n n_ops = random.randint(config[\"min_n_ops\"], config[\"max_n_ops\"])\n\n if (config[\"strategy_program_generation\"] == \"uniform\" or\n config[\"strategy_program_generation\"] == \"weighted\"):\n gate_set = config[\"gate_set\"]\n if (config[\"strategy_program_generation\"] == \"uniform\"):\n for gate in gate_set.keys():\n gate_set[gate] = 1\n # generate a random circuit\n random_circuit_qasm_str = generate_randomly(\n n_qubits=n_qubits,\n n_ops=n_ops,\n gate_set=gate_set,\n random_state=np.random.RandomState(config[\"random_seed\"]))\n\n\n metadata_dict = {\n \"n_qubits\": n_qubits,\n \"n_ops\": n_ops,\n \"gate_set\": config[\"gate_set\"],\n \"strategy_program_generation\": config[\"strategy_program_generation\"]\n }\n\n print(f\"Saving circuit: with simulation results\")\n timestamp = int(time.time())\n qasm_file_name = config[\"program_id_pattern\"]\n qasm_file_name = \\\n qasm_file_name.replace(\"{{timestamp}}\", str(timestamp))\n qasm_file_name = \\\n qasm_file_name.replace(\"{{randint}}\", str(random.randint(0, 9999)).zfill(4))\n print(f\"qasm_file_name: {qasm_file_name}\")\n # get current timestamp as integer and use it as filename\n\n store_qasm(\n filename=qasm_file_name,\n qasm_content=random_circuit_qasm_str,\n out_folder=config[\"folder_generated_qasm\"],\n metadata_dict=metadata_dict\n )",
"def save_wires(wire_file, wire_network):\n wire_network.write_to_file(wire_file)",
"def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')",
"def write_to_file(self, data):",
"def WriteChem(self, fileName):\n print 'writing a .chem file', fileName\n chemhandle = TextFile.TextFile(fileName, 'w')\n \n chemhandle.write('! derived from the file:\\n')\n chemhandle.write('! ' + self.fileName + '\\n')\n for EACH in self.atomlist:\n #those with 999.000 don't have an assignment:\n if EACH.shift and EACH.shift != '999.000':\n## chemhandle.write('do ( store1 = ' + EACH.shift +\\\n## ' ) ( resid ' + EACH.residuenumber +\\\n## ' and name ' + EACH.atomname[0] + ' )\\n')\n\n\n## SHALL WE USE STORE 5 and 6 on top of store1 for the errors???\n if EACH.shifterror:\n outShiftError = EACH.shifterror\n else:\n outShiftError = '0.0'\n\n midshift = string.atof(EACH.shift)\n lowshift = string.atof(EACH.shift) - string.atof(outShiftError)\n upshift = string.atof(EACH.shift) + string.atof(outShiftError)\n chemhandle.write('do ( store1 = ' + str(midshift) +\\\n ' ) ( resid ' + EACH.residuenumber +\\\n ' and name ' + EACH.atomname[0] + ' )\\n')\n chemhandle.write('do ( store5 = ' + str(lowshift) +\\\n ' ) ( resid ' + EACH.residuenumber +\\\n ' and name ' + EACH.atomname[0] + ' )\\n')\n chemhandle.write('do ( store6 = ' + str(upshift) +\\\n ' ) ( resid ' + EACH.residuenumber +\\\n ' and name ' + EACH.atomname[0] + ' )\\n')\n \n chemhandle.write('\\n')\n chemhandle.close()",
"def _WriteTestToFile(self, output_file, testcase, msg):\n self.report_pipe = open(output_file, 'w')\n self.report_pipe.write(testcase)\n self.report_pipe.write('\\n\\n')\n self.report_pipe.write(msg)\n self.report_pipe.flush()\n self.report_pipe.close()",
"def __write_input(self, inp):\n # find the input id:\n self.__input_id = self.__resultsdb.get_next_input_id()\n # write the input to the results database:\n row = {t2s.INPUT_TESTNAME: self.__test_name,\n t2s.INPUT_IID: self.__input_id,\n t2s.INPUT_CID: self.__circuit_id,\n t2s.INPUT_NUMZEROS: inp.get_num_zeros(),\n t2s.INPUT_NUMONES: inp.get_num_ones()}\n self.__resultsdb.add_row(t2s.INPUT_TABLENAME, row)\n # write the input to an input file:\n input_file_name = os.path.join(self.__input_dir_name,\n str(self.__input_id) + \".input\")\n input_file = self.__fho.get_file_object(input_file_name, 'w')\n input_file.write(str(inp))\n self.__fho.close_file_object(input_file)\n # write the input location to the test file:\n self.__test_file.write(\n \"\".join([\"INPUT\\n\",\n self.__get_testfile_path(input_file_name), \"\\n\"]))",
"def test_Component_write_to_file_complex(self, mock_f):\n\n comp = setup_Component_with_parameters()\n\n # This setup has a required parameter.\n # If this parameter is not set, an error should be returned,\n # this will be tested in the next test.\n\n comp.new_par3 = \"1.25\"\n\n with mock_f('test.txt', 'w') as m_fo:\n comp.write_component(m_fo)\n\n my_call = unittest.mock.call\n expected_writes = [my_call(\"SPLIT 7 \"),\n my_call(\"COMPONENT test_component = Arm(\"),\n my_call(\"\\n\"),\n my_call(\" new_par1 = 1.5\"),\n my_call(\",\"),\n my_call(\" new_par2 = 3\"),\n my_call(\",\"),\n my_call(\"\\n\"),\n my_call(\" new_par3 = 1.25\"),\n my_call(\",\"),\n my_call(\" this_par = test_val\"),\n my_call(\",\"),\n my_call(\"\\n\"),\n my_call(\" that_par = \\\"txt_string\\\"\"),\n my_call(\")\\n\"),\n my_call(\"WHEN (1==2)\\n\"),\n my_call(\"AT (0.124,183.9,157)\"),\n my_call(\" RELATIVE home\\n\"),\n my_call(\"ROTATED (482,1240.2,0.185)\"),\n my_call(\" RELATIVE etc\\n\"),\n my_call(\"GROUP developers\\n\"),\n my_call(\"EXTEND %{\\n\"),\n my_call(\"nscat = 8;\\n\"),\n my_call(\"%}\\n\"),\n my_call(\"JUMP myself 37\\n\"),\n my_call(\"\\n\")]\n\n mock_f.assert_called_with('test.txt', 'w')\n handle = mock_f()\n handle.write.assert_has_calls(expected_writes, any_order=False)",
"def write_to_file(self, filename):\n\t\twrite_network(self.network, filename)",
"def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))",
"def test_write_to_file():\n\n test_filename = \"doby_config_test_write_to_file.txt\"\n test_directory = get_random.text().upper()\n test_content = get_random.text(100)\n\n write_to_file(test_filename, test_directory, test_content)\n\n with open(f\"{test_directory}/{test_filename}\") as read_file:\n config = read_file.read()\n\n assert config == test_content + \"\\n\"\n\n shutil.rmtree(test_directory)",
"def test_write_network(self, fn):\n test_path = \"_test_simple_cxx/\"\n # subdirectory of pynucastro/networks/tests/\n reference_path = \"_simple_cxx_reference/\"\n # files that will be ignored if present in the generated directory\n skip_files = []\n\n # remove any previously generated files\n shutil.rmtree(test_path, ignore_errors=True)\n fn.write_network(odir=test_path)\n compare_network_files(test_path, reference_path, skip_files)\n\n # clean up generated files if the test passed\n shutil.rmtree(test_path)",
"def writeInputFile(beam,lattice,fname='test.in'):\n if sum(beam.multi_charge.n_particles) != beam.n_particles:\n #print('input error <- sum(beam.multi_charge.n_particles) not qual to beam.n_particles')\n if beam.multi_charge.n_states == 1:\n #print(' ... enforcing beam.multi_charge.n_particles[0] to beam.n_particles')\n beam.multi_charge.n_particles[0]=beam.n_particles\n else:\n raise ValueError('program terminating...')\n \n if beam.multi_charge.n_states == 1 and beam.multi_charge.current[0] != beam.current :\n #print('input error <- beam.multi_charge.current[0] not qual to beam.current')\n #print(' ... enforcing beam.multi_charge.current[0] to beam.current')\n beam.multi_charge.current[0] = beam.current\n \n beamStr = _beam2str(beam)\n for i in range(len(beamStr)):\n beamStr[i].append('\\n')\n beamStr[i] = \" \".join(beamStr[i])\n \n latticeStr = []\n for i in range(len(lattice)):\n latticeStr.append(_elem2str(lattice[i]))\n latticeStr[i].append('/')\n latticeStr[i].append('\\n')\n latticeStr[i] = \" \".join(latticeStr[i])\n \n f=open(fname,'w') \n f.writelines(['!================= Beam & Control Parameters ================= \\n'])\n f.writelines(beamStr)\n f.writelines(['!========================== Lattice ========================== \\n'])\n f.writelines(latticeStr)\n f.close()",
"def write_testfile(self, filename, data0, data1, header):\n _header = healsparse.fits_shim._make_header(header)\n _header['EXTNAME'] = 'COV'\n healsparse.fits_shim.fits.writeto(filename, data0,\n header=_header)\n _header['EXTNAME'] = 'SPARSE'\n healsparse.fits_shim.fits.append(filename, data1,\n header=_header, overwrite=False)",
"def writeOpticalFlow(path, flow) -> retval:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles writing an input, both to the input file and to the test file.
|
def __write_input(self, inp):
# find the input id:
self.__input_id = self.__resultsdb.get_next_input_id()
# write the input to the results database:
row = {t2s.INPUT_TESTNAME: self.__test_name,
t2s.INPUT_IID: self.__input_id,
t2s.INPUT_CID: self.__circuit_id,
t2s.INPUT_NUMZEROS: inp.get_num_zeros(),
t2s.INPUT_NUMONES: inp.get_num_ones()}
self.__resultsdb.add_row(t2s.INPUT_TABLENAME, row)
# write the input to an input file:
input_file_name = os.path.join(self.__input_dir_name,
str(self.__input_id) + ".input")
input_file = self.__fho.get_file_object(input_file_name, 'w')
input_file.write(str(inp))
self.__fho.close_file_object(input_file)
# write the input location to the test file:
self.__test_file.write(
"".join(["INPUT\n",
self.__get_testfile_path(input_file_name), "\n"]))
|
[
"def cmd_write_inp(self):\n self.ensure_base_path()\n\n self.log.debug(\"Writing inp file\")\n self.write_inp()\n\n self.cmd_write_bloominp()\n self.cmd_write_runid()",
"def test_write_orbitize_input():\n input_file = os.path.join(orbitize.DATADIR, \"test_val.csv\")\n test_table = read_file(input_file)\n output_file = os.path.join(orbitize.DATADIR, \"temp_test_orbitize_input.csv\")\n # If temp output file already exists, delete it\n if os.path.isfile(output_file):\n os.remove(output_file)\n try: # Catch these tests so that we remove temporary file\n # Test that we were able to write the table\n write_orbitize_input(test_table, output_file)\n assert os.path.isfile(output_file)\n # Test that we can read the table and check if it's correct\n test_table_2 = read_file(output_file)\n _compare_table(test_table_2)\n finally:\n # Remove temporary file\n os.remove(output_file)",
"def writeParAndInputFiles(self):\n pass",
"def main(Input_file, Output_file):\n readFile(Input_file)\n writeFile(Output_file)\n print(Input_file+\"-> Input File processed. \\n\"+ Output_file +\"-> Output File generated\")",
"def testReadWriteFile(self):\n tools = Tools(self.out)\n tools.PrepareOutputDir(None)\n data = 'some context here' * 2\n\n fname = tools.GetOutputFilename('bang')\n tools.WriteFile(fname, data)\n\n # Check that the file looks correct.\n compare = tools.ReadFile(fname)\n self.assertEqual(data, compare)",
"def writeInput(self, fileName):\n print \"No Simulation:writeInput method defined for pure base class\"\n sys.exit(0)",
"def test_write():\n\n with open(FILE_DIR+FILE_NAME, mode='w', encoding='utf8')as f:\n f.write(DATA)",
"def _WriteTestToFile(self, output_file, testcase, msg):\n self.report_pipe = open(output_file, 'w')\n self.report_pipe.write(testcase)\n self.report_pipe.write('\\n\\n')\n self.report_pipe.write(msg)\n self.report_pipe.flush()\n self.report_pipe.close()",
"def write_to_file(self, data):",
"def test_write(self):\n reqs = Requirementz.from_lines(TEST_LINES)\n reqs.write(filename=TEST_FILE)",
"def store(contest: str, problem: str, io: List[Tuple[str, str]]):\n directory = '{}/.cf-samples/{}/{}'.format(\n os.path.expanduser('~'), contest, problem)\n if not os.path.exists(directory):\n os.makedirs(directory)\n for i, (inp, out) in enumerate(io):\n with open('{}/{}.in'.format(directory, i), 'w') as f:\n f.write(inp)\n with open('{}/{}.out'.format(directory, i), 'w') as f:\n f.write(out)",
"def cmd_write_inp(self):\n self.ensure_base_path()\n\n self.log.debug(\"Writing inp file\")\n self.write_inp()\n\n self.cmd_write_bloominp()\n self.cmd_write_runid()\n\n if self.n_bottom_layers and not self.bottom_grid:\n self.write_delwaqg()",
"def test_file_write(self):\n\n args = self.parser.parse_args([self.str_len, '--file', '--raw-output'])\n\n self.randstr_output(args).process_parsed_args()\n output = sys.stdout.getvalue()\n\n filename = os.path.join(self.test_dir, args.file)\n with open(filename, 'r') as f:\n random_string = f.read()\n\n self.assertIn(random_string, output)",
"def test_by_input_output_text(self, testcase_folder):\n input_root = path.join(root_path, \"inputs\")\n output_root = path.join(root_path, \"outputs\")\n expect_root = path.join(root_path, \"expects\")\n diff_root = path.join(root_path, \"diff\")\n testcase_full_dir = path.join(input_root, testcase_folder)\n # get and sort all request files\n request_files = []\n for request_file in os.listdir(testcase_full_dir):\n if request_file.endswith(\".txt\"):\n # ignore non-request text files, i.e. .ignore files\n request_files.append(request_file)\n request_files.sort()\n\n for request_file in request_files:\n # parse input files\n request_file_path = path.join(testcase_full_dir, request_file)\n log.info(\"Test by input file %s\" % request_file_path)\n method, url, headers, body = parse_test_input(request_file_path)\n log.debug(\"Parsed request:\")\n log.debug(\"%s %s\\n%s\\n%s\" % (method, url, headers, body))\n\n resp = self.request(method, url, headers, body)\n assert resp != None\n\n # write response dict to a ini format file\n output_file_dir = path.join(output_root, testcase_folder)\n os.makedirs(output_file_dir, exist_ok=True)\n output_filename = request_file.replace(\"request_\", \"response_\")\n output_file_path = path.join(output_file_dir, output_filename)\n dict_to_ini(resp, output_file_path)\n\n # compare\n expect_file_dir = path.join(expect_root, testcase_folder)\n expect_file_path = path.join(expect_file_dir, output_filename)\n ignore_filename = request_file.replace(\".txt\", \".ignore\")\n ignore_file_path = path.join(testcase_full_dir, ignore_filename) \n diff_file_dir = path.join(diff_root, testcase_folder)\n os.makedirs(diff_file_dir, exist_ok=True)\n diff_file_path = path.join(diff_file_dir, output_filename)\n\n actual = ini_to_dict(output_file_path)\n expected = ini_to_dict(expect_file_path)\n ignore = parse_ignore_file(ignore_file_path)\n diff = diff_simple_dict(\n expected, actual, ignore=ignore, output_file=diff_file_path\n )\n assert diff == \"\", \"response does not match expected output\"\n log.info(\"Test %s[%s] passed.\" % (inspect.stack()[0].function, testcase_folder))",
"def write_to_files(self,catalog,input):\n\n\n\n metadata = self.metadata({'filename':self.uuid})\n catalog.write(json.dumps(metadata) + \"\\n\")\n text = self.parsed.get_payload().replace(\"\\n\",\"\\\\n\\\\n\").replace(\"\\t\",\" \")\n input.write(metadata['filename'] + \"\\t\" + text.encode(\"utf-8\",\"ignore\") + \"\\n\")",
"def _writeInputs(self, inputs):\n try:\n \n dList = [\"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n\\n\"\"\"]\n dList.append(\"<inputs>\\n\")\n for input in inputs.keys():\n dList.append(\" <input>%s</input>\\n\" % str(input))\n dList.append(\"</inputs>\\n\\n\")\n data = ''.join(dList)\n fh = tempfile.NamedTemporaryFile(prefix=\"scriptrunner_\", suffix=\".xml\")\n fh.write(data.encode(\"utf-8\"))\n fh.flush()\n except:\n fh = None\n return fh",
"def test_write_to_file():\n\n test_filename = \"doby_config_test_write_to_file.txt\"\n test_directory = get_random.text().upper()\n test_content = get_random.text(100)\n\n write_to_file(test_filename, test_directory, test_content)\n\n with open(f\"{test_directory}/{test_filename}\") as read_file:\n config = read_file.read()\n\n assert config == test_content + \"\\n\"\n\n shutil.rmtree(test_directory)",
"def test_standardise_data(self):\n\t\tassert False, \"Write Test\"",
"def writeInputFile(beam,lattice,fname='test.in'):\n if sum(beam.multi_charge.n_particles) != beam.n_particles:\n #print('input error <- sum(beam.multi_charge.n_particles) not qual to beam.n_particles')\n if beam.multi_charge.n_states == 1:\n #print(' ... enforcing beam.multi_charge.n_particles[0] to beam.n_particles')\n beam.multi_charge.n_particles[0]=beam.n_particles\n else:\n raise ValueError('program terminating...')\n \n if beam.multi_charge.n_states == 1 and beam.multi_charge.current[0] != beam.current :\n #print('input error <- beam.multi_charge.current[0] not qual to beam.current')\n #print(' ... enforcing beam.multi_charge.current[0] to beam.current')\n beam.multi_charge.current[0] = beam.current\n \n beamStr = _beam2str(beam)\n for i in range(len(beamStr)):\n beamStr[i].append('\\n')\n beamStr[i] = \" \".join(beamStr[i])\n \n latticeStr = []\n for i in range(len(lattice)):\n latticeStr.append(_elem2str(lattice[i]))\n latticeStr[i].append('/')\n latticeStr[i].append('\\n')\n latticeStr[i] = \" \".join(latticeStr[i])\n \n f=open(fname,'w') \n f.writelines(['!================= Beam & Control Parameters ================= \\n'])\n f.writelines(beamStr)\n f.writelines(['!========================== Lattice ========================== \\n'])\n f.writelines(latticeStr)\n f.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read all atoms in pdb file
|
def get_all(self):
with open(self.filename) as pdb:
atoms = [atom(line)
for line in pdb if line.startswith('ATOM')]
return atoms
|
[
"def parse_pdb(path):\n\n pdb_dict = defaultdict(lambda: defaultdict(list))\n res_dict = defaultdict(list)\n with open(path) as o:\n lines = o.readlines()\n for line in lines:\n if line[:4] == 'ATOM':\n atom_info = process_atom_info(line)\n identifier = '{}{}'.format(\n atom_info['res_name'],\n atom_info['res_no']\n )\n pdb_dict[atom_info['chain']][identifier].append(atom_info)\n if identifier not in res_dict[atom_info['chain']]:\n res_dict[atom_info['chain']].append(identifier)\n return pdb_dict,res_dict",
"def get_nucleic(self):\n with open(self.filename) as pdb:\n atoms = [atom(line) for line in pdb if re.search\n ('(^ATOM)\\s*\\S*\\s*\\S*\\s*'\n '(DA5|DA3|DA|DT5|DT3|DT|DG5|DG3|DG|DC5|DC3|DC)', line)]\n return atoms",
"def get_pdb_coords(pdbname):\n coords = []\n for line in open(pdbname,\"r\"):\n if line[:3] in ['TER','END']:\n break\n else:\n if line[:4] == \"ATOM\":\n coords.append([float(line[31:39]),float(line[39:47]),float(line[47:55])]) \n\n return np.array(coords)",
"def build_atom_set(pdbfile):\n\n atom_set = set()\n with open(pdbfile) as handle:\n for line in handle:\n if not line.startswith('ATOM'):\n continue\n unique_id = _build_atom_unique_id(line)\n atom_set.add(unique_id)\n\n return atom_set",
"def read_abfdata(full_path):\n #import pdb; pdb.set_trace()\n \n print \"Patience please, loading \", full_path, \"....\"\n \n reader = neo.io.AxonIO(filename=full_path)\n block = reader.read_block()\n data = []\n \n \n for i in range(len(block.segments)):\n seg = block.segments[i]\n data.append(seg.analogsignals)\n #import pdb; pdb.set_trace()\n return data, len(block.segments)",
"def read_database2(data, dabapointer, invlist): #,invdict,atomlist):\n parseswitch = False\n for inv in invlist:\n data.give_daba_molecule(inv)\n\n for line in dabapointer.readlines():\n if any('!' + i + '\\n' in line for i in invlist):\n mol = line[1:][:-1]\n parseswitch = True\n if parseswitch and '!=' in line: parseswitch = False\n\n if parseswitch and not '!' in line:\n if 'Nam' in line: name, invname = line.split(' ') \\\n [-1][:-1], line.split(' ')[-2]\n if 'Pos' in line: pos = line.split(' ')[1:]\n if 'ADP' in line:\n adp = line.split(' ')[1:]\n pos = np.array([float(i) for i in list(pos)])\n adp = np.array([float(i) for i in list(adp)])\n #---------------------------------------------------------- try:\n #-------------------- data[mol].add_atom(name=name,cart=pos)\n #------------------------------------------------------- except:\n #mol=mol.replace('.',',')\n data[mol].give_atom(name=name,\n cart=pos,\n invariom_name=invname)\n data[mol].atoms[-1].give_adp(key='cart_int', value=adp)",
"def _parse_molecule(lines, file_extension):\n if file_extension == '.pdb':\n #Extract residue information and assign column\n i = 0\n column_for_res = {}\n res_for_column = {}\n name_for_res = {}\n atoms_in_res = {}\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n if atom_name in ['CA', 'CB', 'C', 'N', 'O']:\n altloc = line[16]\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n resname = line[17:20]\n if resname in list(CONVERT_RES_NAMES):\n resname = CONVERT_RES_NAMES[resname]\n if res not in list(column_for_res):\n column_for_res[res] = i\n res_for_column[i] = res\n name_for_res[res] = resname\n atoms_in_res[res] = set()\n i += 1\n atoms_in_res[res].add(atom_name)\n\n #Extract coordinates and atoms information\n alphas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n betas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n carbons = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n nitrogens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n oxygens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n side_chains = []\n coords_array = [] #For calculate grid size\n\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n\n # atomic coordinates\n try:\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n except Exception:\n raise Exception(\"Invalid or missing coordinate(s) at \\\n residue %s, atom %s\" % (res, name))\n coord = [x, y, z]\n\n if atom_name == \"CA\":\n # Coordinates for the grid\n coords_array.append(coord)\n # Coordinates for searching sites\n alphas[column_for_res[res]] = coord\n elif atom_name == \"CB\":\n # Coordinates for searching sites\n betas[column_for_res[res]] = coord\n elif atom_name == \"C\":\n # Coordinates for searching sites\n carbons[column_for_res[res]] = coord\n elif atom_name == \"N\":\n # Coordinates for searching sites\n nitrogens[column_for_res[res]] = coord\n elif atom_name == \"O\":\n # Coordinates for searching sites\n oxygens[column_for_res[res]] = coord\n else: # Atom belongs to a side-chain\n # Coordinates for discarding clashes\n side_chains.append(coord)\n\n coords_array = np.array(coords_array)\n centroid = np.mean(coords_array, axis=0)\n max_distance = np.max(np.linalg.norm(coords_array - centroid, axis=1)) \\\n + DIST_PROBE_ALPHA['ALL'][1]\n\n alphas = np.array(alphas)\n betas = np.array(betas)\n carbons = np.array(carbons)\n nitrogens = np.array(nitrogens)\n oxygens = np.array(oxygens)\n side_chains = np.array(side_chains)\n return centroid, max_distance, alphas, betas, carbons, nitrogens, \\\n oxygens, column_for_res, res_for_column, name_for_res, \\\n atoms_in_res, side_chains",
"def load_pdb(self, pdb_data):\n for line in pdb_data:\n self._process_line(line)\n for model in self.models:\n model.load_residue_connections()\n model.load_peptide_bonds()",
"def _read_and_fix_pdb(self, path):\n log = Logger()\n\n # Skip PDB fixing if it has been deactivated\n if not self.fix_pdb:\n with open(path) as pdb_file:\n pdb_block = pdb_file.read()\n\n return pdb_block\n\n # Fix PDB\n missing_element = False\n any_fail = False\n pdb_block = ''\n with open(path) as pdb_file:\n for line in pdb_file:\n if line.startswith('ATOM') or line.startswith('HETATM'):\n if len(line) < 78 or line[76:78] == ' ':\n missing_element = True\n atom_name = line[12:16]\n # Try to infer element from atom name\n inferred_element = ''.join([c for c in atom_name\n if not c.isdigit()\n and c != ' '])\n\n # Format properly the element identifier\n if len(inferred_element) == 1:\n inferred_element = inferred_element.upper()\n elif len(inferred_element) == 2:\n inferred_element = inferred_element[0].upper() + \\\n inferred_element[1].lower()\n else:\n # We were expecting an element identifier of 1 or 2 chars\n any_fail = True\n break\n\n # Remove line breaks, if any\n line = line.strip()\n\n # Fill a short line with white spaces\n while(len(line) < 79):\n line += ' '\n\n # Add element to line (right-justified)\n line = line[:76] + '{:>2s}'.format(inferred_element) \\\n + line[79:] + '\\n'\n\n pdb_block += line\n\n if missing_element:\n log.warning(\n \"Warning: input PDB has no information about atom \"\n + \"elements and they were inferred from atom names. \"\n + \"Please, verify that the resulting elements are \"\n + \"correct\")\n\n if any_fail:\n log.error(\"Error: PDB could not be fixed\")\n with open(path) as pdb_file:\n pdb_block = pdb_file.read()\n\n return pdb_block",
"def exercise_atom_xyz_9999():\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.7529999.9999999.999 1.00166.17 A\n\"\"\"))\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.752 78.2739999.999 1.00166.17 A\n\"\"\"))\n try:\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 9999.9999999.9999999.999 1.00166.17 A\nATOM 27954 ZN ZN A1508 9999.9999999.9999999.999 1.00166.17 A\n\"\"\"))\n except RuntimeError as e:\n assert str(e).find(\n \"IOTBX_ASSERT(! (xyz[0]>9999 && xyz[1]>9999 && xyz[2]>9999)) failure.\") >0\n else: raise Exception_expected",
"def load_atoms_hetams(self):\n i = 0\n modelNumber = 0\n for line in self.__file:\n if line.startswith(\"ENDMDL\"):\n modelNumber+=1\n self.__rest[i] = line\n else:\n if line.startswith(\"ATOM\"):\n self.atoms[i] = Atom(line,modelNumber)\n else:\n if line.startswith(\"HETATM\"):\n self.hetatms[i]= Hetatm(line,modelNumber)\n else:\n if not line.isspace():\n self.__rest[i] = line\n i+=1\n self.__numberOfLines = i\n self.__file.close()\n \n #To extract the atoms of a residue\n if len(self.atoms)>0:\n firstAtomOrder = min(self.atoms.keys())\n self.__firstResidue = (self.atoms[firstAtomOrder].resSeq,self.atoms[firstAtomOrder].iCode) \n #print \"atoms \" + str(len(self.atoms)) \n #print \"hetatms \" + str(len(self.hetatms))\n #print \"rest \" + str(len(self.__rest))",
"def getchains(pdbfile):\n try:\n read = open(pdbfile,'r')\n except IOError:\n print(\"getchains: Couldn't open file %s\"%(pdbfile))\n raise\n else:\n result = []\n for line in read:\n if line[0:4]=='ATOM':\n if line[21] not in result and line[21].isalnum():\n result.append(line[21])\n elif \"_\" not in result and not line[21].isalnum():\n result.append(\"_\")\n read.close()\n return result",
"def read_qe(in_name):\n with open(in_name) as file_qe:\n content = file_qe.readlines()\n\n last_pos = 0\n for line in content[::-1]:\n if \"ATOMIC_POSITIONS\" in line.split():\n last_pos = content[::-1].index(line)\n break\n\n atoms = []\n for line in content[-last_pos:]:\n if line == \"End final coordinates\\n\":\n break\n elem, xPos, yPos, zPos = line.split()\n atom_2_add = Atom(elem, xPos, yPos, zPos, 0)\n atoms.append(atom_2_add)\n return atoms",
"def load_pdb_into_using_file_object(self, file_obj):\n\n #source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S4,S2,S4,S4,S8,S8,S8,S6,S6,S10,S2,S2\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 4, 2, 4, 4, 8, 8, 8, 6, 6, 10, 2, 2])\n source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S5,S1,S4,S4,S8,S8,S8,S6,S6,S10,S2,S3\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 5, 1, 4, 4, 8, 8, 8, 6, 6, 10, 2, 3])\n \n if source_data.ndim == 0: source_data = source_data.reshape(1, -1) # in case the pdb file has only one line\n \n # get the ones that are ATOM or HETATOM in the record_name\n or_matrix = numpy.logical_or((source_data['record_name'] == \"ATOM \"), (source_data['record_name'] == \"HETATM\"))\n indices_of_atom_or_hetatom = numpy.nonzero(or_matrix)[0]\n self.__parent_molecule.set_atom_information(source_data[indices_of_atom_or_hetatom])\n\n # now, some of the data needs to change types\n # first, fields that should be numbers cannot be empty strings\n for field in self.__parent_molecule.get_constants()['i8_fields'] + self.__parent_molecule.get_constants()['f8_fields']:\n check_fields = self.__parent_molecule.get_atom_information()[field]\n check_fields = numpy.core.defchararray.strip(check_fields)\n indices_of_empty = numpy.nonzero(check_fields == '')[0]\n self.__parent_molecule.get_atom_information()[field][indices_of_empty] = '0'\n \n # now actually change the type\n old_types = self.__parent_molecule.get_atom_information().dtype\n descr = old_types.descr\n for field in self.__parent_molecule.get_constants()['i8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'i8')\n for field in self.__parent_molecule.get_constants()['f8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'f8')\n new_types = numpy.dtype(descr)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.get_atom_information().astype(new_types))\n \n # remove some of the fields that just contain empty data\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['empty', 'empty2']))\n \n # the coordinates need to be placed in their own special numpy array to facilitate later manipulation\n self.__parent_molecule.set_coordinates(numpy.vstack([self.__parent_molecule.get_atom_information()['x'], self.__parent_molecule.get_atom_information()['y'], self.__parent_molecule.get_atom_information()['z']]).T)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['x', 'y', 'z'])) # now remove the coordinates from the atom_information object to save memory\n \n # string values in self.__parent_molecule.information.get_atom_information() should also be provided in stripped format for easier comparison\n fields_to_strip = ['name', 'resname', 'chainid', 'element']\n for f in fields_to_strip: self.__parent_molecule.set_atom_information(append_fields(self.__parent_molecule.get_atom_information(), f + '_stripped', data=numpy.core.defchararray.strip(self.__parent_molecule.get_atom_information()[f])))",
"def open_pdb(file_location):\n\n try\n with open(file_location) as f:\n data = f.readlines()\n except\n raise Exception(\"Unable to find pdb file\")\n\n coordinates = []\n symbols = []\n try\n for line in data:\n if 'ATOM' in line[0:6] or 'HETATM' in line[0:6]:\n symbols.append(line[76:79].strip())\n atom_coords = [float(x) for x in line[30:55].split()]\n coordinates.append(coords)\n except\n raise Exception(\"pdb not formatted properly\")\n\n coords = np.array(coordinates)\n symbols = np.array(symbols)\n\n return symbols, coords",
"def _parse_atoms(self):\n ##\n ##Parse the number of atoms\n ##\n #format: 3 ints, first one is blocksize (must equal 4), second is natoms, third is blocksize (must equal 4)\n log.debug(\"---in dcd.py, parse_atoms()\")\n blocksize, self._natoms, blocksize2 = struct.unpack(\"iii\", self._fo.read(12))\n if blocksize != 4 or blocksize2 != 4:\n log.error(\"blocksizes in the number of atoms record is broken\\n\")",
"def parse_pdb(pdbfile, hydrogen=False):\n\n elements = []\n coordinates = []\n with open(pdbfile) as pdb:\n for line in pdb:\n # only ATOM lines are of interest to us\n if not line.startswith('ATOM'):\n continue\n\n element = line[76:78].strip()\n # if element is empty, use the first letter of the\n # atomname as element\n if not element:\n element = line[12:16].strip()[0]\n\n # no hydrogens\n if element == 'H' and not hydrogen:\n continue\n\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n\n elements.append(element)\n coordinates.append([x, y, z])\n\n return elements, asarray(coordinates, dtype=float64)",
"def ReadRegine(self, fileName):\n if _DoesFileExist(fileName) == 0:\n return\n print 'reading an Regine ppm file:\\n ', fileName\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n self.fileName = fileName\n fileStream = open(fileName)\n for eachLine in fileStream.readlines():\n lineList = string.split(eachLine)\n if len(lineList) < 4:\n continue\n ATOM = Atom()\n ATOM.shifterror = \"0.0\"\n if len(lineList) == 5: \n ATOM.shifterror = lineList[4]\n ATOM.shift = lineList[3]\n ATOM.aminoacid = string.upper(lineList[0])\n ATOM.atomname = (Nomenclature.ConvertCnsProtonNames(ATOM.aminoacid, lineList[2]),)\n ATOM.residuenumber = lineList[1]\n ATOM.segid = ' '\n self.AddAtom(ATOM)\n fileStream.close()",
"def write(self, atoms, out = open('atoms.pdb', 'w')):\n out.write('REMARK generated by pdb.py\\n')\n for atom in atoms:\n vals = (['ATOM', atom['atom_num'], atom['atom_name'],\n atom['res_name'], atom['res_num'],\n atom['x'], atom['y'], atom['z'],\n '1.00', '0.00', '\\n'])\n line = ' '.join(str(v) for v in vals)\n out.write(line)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read only nucleic acid atoms in pdb file
|
def get_nucleic(self):
with open(self.filename) as pdb:
atoms = [atom(line) for line in pdb if re.search
('(^ATOM)\s*\S*\s*\S*\s*'
'(DA5|DA3|DA|DT5|DT3|DT|DG5|DG3|DG|DC5|DC3|DC)', line)]
return atoms
|
[
"def exercise_atom_xyz_9999():\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.7529999.9999999.999 1.00166.17 A\n\"\"\"))\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.752 78.2739999.999 1.00166.17 A\n\"\"\"))\n try:\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 9999.9999999.9999999.999 1.00166.17 A\nATOM 27954 ZN ZN A1508 9999.9999999.9999999.999 1.00166.17 A\n\"\"\"))\n except RuntimeError as e:\n assert str(e).find(\n \"IOTBX_ASSERT(! (xyz[0]>9999 && xyz[1]>9999 && xyz[2]>9999)) failure.\") >0\n else: raise Exception_expected",
"def get_pdb_coords(pdbname):\n coords = []\n for line in open(pdbname,\"r\"):\n if line[:3] in ['TER','END']:\n break\n else:\n if line[:4] == \"ATOM\":\n coords.append([float(line[31:39]),float(line[39:47]),float(line[47:55])]) \n\n return np.array(coords)",
"def read_database2(data, dabapointer, invlist): #,invdict,atomlist):\n parseswitch = False\n for inv in invlist:\n data.give_daba_molecule(inv)\n\n for line in dabapointer.readlines():\n if any('!' + i + '\\n' in line for i in invlist):\n mol = line[1:][:-1]\n parseswitch = True\n if parseswitch and '!=' in line: parseswitch = False\n\n if parseswitch and not '!' in line:\n if 'Nam' in line: name, invname = line.split(' ') \\\n [-1][:-1], line.split(' ')[-2]\n if 'Pos' in line: pos = line.split(' ')[1:]\n if 'ADP' in line:\n adp = line.split(' ')[1:]\n pos = np.array([float(i) for i in list(pos)])\n adp = np.array([float(i) for i in list(adp)])\n #---------------------------------------------------------- try:\n #-------------------- data[mol].add_atom(name=name,cart=pos)\n #------------------------------------------------------- except:\n #mol=mol.replace('.',',')\n data[mol].give_atom(name=name,\n cart=pos,\n invariom_name=invname)\n data[mol].atoms[-1].give_adp(key='cart_int', value=adp)",
"def _read_and_fix_pdb(self, path):\n log = Logger()\n\n # Skip PDB fixing if it has been deactivated\n if not self.fix_pdb:\n with open(path) as pdb_file:\n pdb_block = pdb_file.read()\n\n return pdb_block\n\n # Fix PDB\n missing_element = False\n any_fail = False\n pdb_block = ''\n with open(path) as pdb_file:\n for line in pdb_file:\n if line.startswith('ATOM') or line.startswith('HETATM'):\n if len(line) < 78 or line[76:78] == ' ':\n missing_element = True\n atom_name = line[12:16]\n # Try to infer element from atom name\n inferred_element = ''.join([c for c in atom_name\n if not c.isdigit()\n and c != ' '])\n\n # Format properly the element identifier\n if len(inferred_element) == 1:\n inferred_element = inferred_element.upper()\n elif len(inferred_element) == 2:\n inferred_element = inferred_element[0].upper() + \\\n inferred_element[1].lower()\n else:\n # We were expecting an element identifier of 1 or 2 chars\n any_fail = True\n break\n\n # Remove line breaks, if any\n line = line.strip()\n\n # Fill a short line with white spaces\n while(len(line) < 79):\n line += ' '\n\n # Add element to line (right-justified)\n line = line[:76] + '{:>2s}'.format(inferred_element) \\\n + line[79:] + '\\n'\n\n pdb_block += line\n\n if missing_element:\n log.warning(\n \"Warning: input PDB has no information about atom \"\n + \"elements and they were inferred from atom names. \"\n + \"Please, verify that the resulting elements are \"\n + \"correct\")\n\n if any_fail:\n log.error(\"Error: PDB could not be fixed\")\n with open(path) as pdb_file:\n pdb_block = pdb_file.read()\n\n return pdb_block",
"def get_all(self):\n with open(self.filename) as pdb:\n atoms = [atom(line)\n for line in pdb if line.startswith('ATOM')]\n return atoms",
"def getchains(pdbfile):\n try:\n read = open(pdbfile,'r')\n except IOError:\n print(\"getchains: Couldn't open file %s\"%(pdbfile))\n raise\n else:\n result = []\n for line in read:\n if line[0:4]=='ATOM':\n if line[21] not in result and line[21].isalnum():\n result.append(line[21])\n elif \"_\" not in result and not line[21].isalnum():\n result.append(\"_\")\n read.close()\n return result",
"def read_abfdata(full_path):\n #import pdb; pdb.set_trace()\n \n print \"Patience please, loading \", full_path, \"....\"\n \n reader = neo.io.AxonIO(filename=full_path)\n block = reader.read_block()\n data = []\n \n \n for i in range(len(block.segments)):\n seg = block.segments[i]\n data.append(seg.analogsignals)\n #import pdb; pdb.set_trace()\n return data, len(block.segments)",
"def check_and_fix_pdbatomnames(pdb_file):\n with open(pdb_file) as pdb:\n content = pdb.readlines()\n check_duplicated_pdbatomnames(content)\n for i, line in enumerate(content):\n if line.startswith(\"HETATM\") and line[21:22] == \"L\":\n atom_name = line[12:16]\n if atom_name.strip().startswith(\"G\"):\n new_atom_name = line[77:78] + atom_name.strip()\n line_to_list = list(line)\n line_to_list[12:16] = new_atom_name + \" \" * (4-len(new_atom_name))\n line_to_list = \"\".join(line_to_list)\n content[i] = line_to_list\n check_duplicated_pdbatomnames(content)\n new_pdb = \"\".join(content)\n with open(pdb_file, \"w\") as writepdb:\n writepdb.write(\"{}\".format(new_pdb))",
"def GetPdbCoordinates( filename,\n select_atom = (\"CA\",),\n select_chain = None,\n renumber = None,\n only_coordinates = None):\n\n if not os.path.exists(filename):\n raise \"pdb file %s does not exist\" % filename\n\n if filename[-3:] == \".gz\":\n lines = os.popen(\"gunzip < %s\" % filename).readlines()\n else:\n lines = open(filename,\"r\").readlines()\n\n result = []\n\n current_number = 1\n \n for line in lines:\n if line[:6] not in (\"ATOM \", \"HETATM\"): continue\n\n chain = line[21]\n number = line[22:26]\n aa = line[17:20]\n atom = string.strip(line[13:17])\n \n x,y,z = map(string.atof, (line[30:38], line[38:46], line[46:54]))\n\n if select_chain and chain not in select_chain: continue\n if select_atom and atom not in select_atom: continue\n \n if renumber:\n number = current_number\n current_number += 1\n\n if AMINOACIDS.has_key(aa):\n aminoacid = AMINOACIDS[aa]\n else:\n sys.stderr.write( \"# error in PdbCoordinates: aminoacid %s not known\\n\" % aa )\n continue\n\n if only_coordinates:\n result.append( (x, y, z) )\n else:\n result.append( (number, aminoacid, x, y, z) ) \n \n return result",
"def read_fsa_db(db,fp,org_id) :\n\n cdsseq=\"\"\n tag=\"\"\n for line in fp :\n if line[0] == '>' :\n loaddb(cdsseq,org_id,tag,db)\n\n tag = line[1:].strip().split()[0]\n tag=tag.replace(\"ORFN:\",\"ORFP_\")\n cdsseq = \"\"\n else :\n cdsseq += line.strip()\n \n loaddb(cdsseq,org_id,tag,db)",
"def build_atom_set(pdbfile):\n\n atom_set = set()\n with open(pdbfile) as handle:\n for line in handle:\n if not line.startswith('ATOM'):\n continue\n unique_id = _build_atom_unique_id(line)\n atom_set.add(unique_id)\n\n return atom_set",
"def ExtractChainText(file_name):\n\n pdb_file = open(file_name, 'r')\n\n chain_atoms_found = {}\n chain_text = {}\n\n for line in pdb_file:\n if line[0:6] == 'ATOM ':\n chainID = line[21:22]\n if (not chainID in chain_atoms_found):\n chain_atoms_found[chainID] = {}\n chain_text[chainID] = []\n for atom_name in heavy_atoms:\n chain_atoms_found[chainID][atom_name] = False\n\n chain_text[chainID].append(line)\n\n atom_type = line[12:16]\n res_type = line[17:20]\n if res_type in res_types:\n #if ((atom_type in atoms_found) and (not atoms_found[atom_type])):\n # print('found atom_type=\\\"'+atom_type+'\\\", in res_type=\\\"'+res_type+'\\\"')\n chain_atoms_found[chainID][atom_type] = True\n\n\n for chainID in chain_atoms_found:\n search_criteria_satisfied = True\n for atom_type in chain_atoms_found[chainID]:\n if (not chain_atoms_found[chainID][atom_type]):\n search_criteria_satisfied = False\n if search_criteria_satisfied:\n sys.stderr.write(\" Chain \\\"\"+chainID+\"\\\" contains DNA.\\n\")\n # Then create a new PDB file with a name similar to the original:\n pdb_file_chain_name = file_name\n i = pdb_file_chain_name.lower().rfind('.pdb')\n if i != -1:\n pdb_file_chain_name = (pdb_file_chain_name[:i] +\n '_' + chainID +\n pdb_file_chain_name[i:])\n else:\n pdb_file_chain_name = file_name + '_' + chainID\n sys.stderr.write(' Creating file \\\"'+pdb_file_chain_name+'\\\"\\n')\n pdb_file_chain = open(pdb_file_chain_name, 'w')\n pdb_file_chain.write(''.join(chain_text[chainID]))\n pdb_file_chain.close()\n\n pdb_file.close()",
"def ReadChem(self, fileName):\n if _DoesFileExist(fileName) == 0:\n return\n print 'reading an ARIA chemical shift file', fileName\n\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n self.fileName = fileName\n\n #get the file without the comments:\n bigstring = DeleteCnsComments.GetString(fileName)\n# print bigstring #test\n #split the string in lines:\n lines = string.split(bigstring, '\\n')\n\n ppmAssign = re.compile('do\\s*\\(\\s*store1\\s*=\\s*([0-9-+.Ee]+)\\s*\\)\\s*\\(\\s*resid\\s*(\\d+)\\s*and\\s*name\\s*(\\S+)\\s*\\)')\n \n for line in lines:\n #for wrong or empty lines:\n if len(line) < 20:\n continue\n# print line #test\n linelist = string.split(line)\n ATOM = Atom()\n ppmSearch = ppmAssign.search(line)\n\n # new for store5 * store6 -> skip if it's not store1\n # and pattern doesn't match:\n if not ppmSearch:\n continue\n\n ATOM.residuenumber = ppmSearch.group(2)\n ATOM.aminoacid = None\n ATOM.segid = None\n ATOM.atomname = (ppmSearch.group(3), )\n ATOM.shift = ppmSearch.group(1)\n ATOM.shifterror = '0.0'\n self.AddAtom(ATOM)",
"def parse_pdb(path):\n\n pdb_dict = defaultdict(lambda: defaultdict(list))\n res_dict = defaultdict(list)\n with open(path) as o:\n lines = o.readlines()\n for line in lines:\n if line[:4] == 'ATOM':\n atom_info = process_atom_info(line)\n identifier = '{}{}'.format(\n atom_info['res_name'],\n atom_info['res_no']\n )\n pdb_dict[atom_info['chain']][identifier].append(atom_info)\n if identifier not in res_dict[atom_info['chain']]:\n res_dict[atom_info['chain']].append(identifier)\n return pdb_dict,res_dict",
"def load_atg_atb(c_id):\n import os\n from ve.config import data237_complex_root as complex_dir\n \n from ve.util.load_pdb import load_pdb_struct\n \n return load_pdb_struct(os.path.join(complex_dir, c_id, \"antigen.pdb\"), MyResidue), load_pdb_struct(os.path.join(complex_dir, c_id, \"antibody.pdb\"), MyResidue)",
"def _parse_molecule(lines, file_extension):\n if file_extension == '.pdb':\n #Extract residue information and assign column\n i = 0\n column_for_res = {}\n res_for_column = {}\n name_for_res = {}\n atoms_in_res = {}\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n if atom_name in ['CA', 'CB', 'C', 'N', 'O']:\n altloc = line[16]\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n resname = line[17:20]\n if resname in list(CONVERT_RES_NAMES):\n resname = CONVERT_RES_NAMES[resname]\n if res not in list(column_for_res):\n column_for_res[res] = i\n res_for_column[i] = res\n name_for_res[res] = resname\n atoms_in_res[res] = set()\n i += 1\n atoms_in_res[res].add(atom_name)\n\n #Extract coordinates and atoms information\n alphas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n betas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n carbons = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n nitrogens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n oxygens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n side_chains = []\n coords_array = [] #For calculate grid size\n\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n\n # atomic coordinates\n try:\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n except Exception:\n raise Exception(\"Invalid or missing coordinate(s) at \\\n residue %s, atom %s\" % (res, name))\n coord = [x, y, z]\n\n if atom_name == \"CA\":\n # Coordinates for the grid\n coords_array.append(coord)\n # Coordinates for searching sites\n alphas[column_for_res[res]] = coord\n elif atom_name == \"CB\":\n # Coordinates for searching sites\n betas[column_for_res[res]] = coord\n elif atom_name == \"C\":\n # Coordinates for searching sites\n carbons[column_for_res[res]] = coord\n elif atom_name == \"N\":\n # Coordinates for searching sites\n nitrogens[column_for_res[res]] = coord\n elif atom_name == \"O\":\n # Coordinates for searching sites\n oxygens[column_for_res[res]] = coord\n else: # Atom belongs to a side-chain\n # Coordinates for discarding clashes\n side_chains.append(coord)\n\n coords_array = np.array(coords_array)\n centroid = np.mean(coords_array, axis=0)\n max_distance = np.max(np.linalg.norm(coords_array - centroid, axis=1)) \\\n + DIST_PROBE_ALPHA['ALL'][1]\n\n alphas = np.array(alphas)\n betas = np.array(betas)\n carbons = np.array(carbons)\n nitrogens = np.array(nitrogens)\n oxygens = np.array(oxygens)\n side_chains = np.array(side_chains)\n return centroid, max_distance, alphas, betas, carbons, nitrogens, \\\n oxygens, column_for_res, res_for_column, name_for_res, \\\n atoms_in_res, side_chains",
"def readMOLFileWithConnectivity(filename):\n\tf = open(filename, 'r')\n\t#first three lines are irrelevant\n\tf.readline()\n\tf.readline()\n\tf.readline()\n\n\tline=f.readline()\n\tn = int(line[0:3])#read the number of atoms\n\tb = int(line[3:6])#read the number of bonds\n\t#initialize the atomTypes, connectivity, and atomCoords arrays\n\t#atomCoords = [[0.0 for j in range(3)] for i in range(n)]\n\tatomCoords = [[] for i in range(n)]\n\tconnectivity = [[0,0] for i in range(b)]\n\tatomTypes = [0 for i in range(n)]\n\n\t#read info from the mole file into the arrays\n\tfor i in range(n):\n\t\tsplitLine = f.readline().split()\n\t\tatomCoords[i] = [float(splitLine[0]), float(splitLine[1]), float(splitLine[2])]\n\t\tatomTypes[i] = atomicSymbolToNumber(splitLine[3])\n\n\t#read connectivity info from mole file\n\tfor i in range(b):\n\t\tline = f.readline()\n\t\tconnectivity[i][0] = int(line[0:3])\n\t\tconnectivity[i][1] = int(line[3:6])\n\n\tf.close() #close the file\n\n\treturn MolecularGeometry(atomTypes,atomCoords,connectivity) #return the MolecularGeometry object",
"def read_ExAC(args, db):\n db[\"exac\"] = {}\n dbsnpfiles = [\"/\" + db[\"exac_freqfile\"]]\n for dbsnpfile in dbsnpfiles:\n with open(dbsnpfile, \"r\") as fin:\n for line in fin:\n allele = {}\n line_l = line.strip().split()\n chrom, pos, rs, chrom19, pos19, allelelist = line_l\n chrom = chrom.strip(\"chr\")\n if (chrom, pos) not in db[\"scan\"]:\n continue\n if allelelist != \"NA\":\n for al in allelelist.split(\",\"):\n # al = population:allele:frequency\n p, a, f = al.split(\":\")\n if a not in allele:\n allele[a] = {}\n allele[a][p] = float(f)\n db[\"exac\"][chrom, pos] = [rs, allele, chrom19, pos19]",
"def load_pdb_into_using_file_object(self, file_obj):\n\n #source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S4,S2,S4,S4,S8,S8,S8,S6,S6,S10,S2,S2\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 4, 2, 4, 4, 8, 8, 8, 6, 6, 10, 2, 2])\n source_data = numpy.genfromtxt(file_obj, dtype=\"S6,S5,S5,S5,S1,S4,S4,S8,S8,S8,S6,S6,S10,S2,S3\", names=['record_name', 'serial', 'name', 'resname', 'chainid', 'resseq', 'empty', 'x', 'y', 'z', 'occupancy', 'tempfactor', 'empty2', 'element', 'charge'], delimiter=[6, 5, 5, 5, 1, 4, 4, 8, 8, 8, 6, 6, 10, 2, 3])\n \n if source_data.ndim == 0: source_data = source_data.reshape(1, -1) # in case the pdb file has only one line\n \n # get the ones that are ATOM or HETATOM in the record_name\n or_matrix = numpy.logical_or((source_data['record_name'] == \"ATOM \"), (source_data['record_name'] == \"HETATM\"))\n indices_of_atom_or_hetatom = numpy.nonzero(or_matrix)[0]\n self.__parent_molecule.set_atom_information(source_data[indices_of_atom_or_hetatom])\n\n # now, some of the data needs to change types\n # first, fields that should be numbers cannot be empty strings\n for field in self.__parent_molecule.get_constants()['i8_fields'] + self.__parent_molecule.get_constants()['f8_fields']:\n check_fields = self.__parent_molecule.get_atom_information()[field]\n check_fields = numpy.core.defchararray.strip(check_fields)\n indices_of_empty = numpy.nonzero(check_fields == '')[0]\n self.__parent_molecule.get_atom_information()[field][indices_of_empty] = '0'\n \n # now actually change the type\n old_types = self.__parent_molecule.get_atom_information().dtype\n descr = old_types.descr\n for field in self.__parent_molecule.get_constants()['i8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'i8')\n for field in self.__parent_molecule.get_constants()['f8_fields']:\n index = self.__parent_molecule.get_atom_information().dtype.names.index(field)\n descr[index] = (descr[index][0], 'f8')\n new_types = numpy.dtype(descr)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.get_atom_information().astype(new_types))\n \n # remove some of the fields that just contain empty data\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['empty', 'empty2']))\n \n # the coordinates need to be placed in their own special numpy array to facilitate later manipulation\n self.__parent_molecule.set_coordinates(numpy.vstack([self.__parent_molecule.get_atom_information()['x'], self.__parent_molecule.get_atom_information()['y'], self.__parent_molecule.get_atom_information()['z']]).T)\n self.__parent_molecule.set_atom_information(self.__parent_molecule.numpy_structured_array_remove_field(self.__parent_molecule.get_atom_information(), ['x', 'y', 'z'])) # now remove the coordinates from the atom_information object to save memory\n \n # string values in self.__parent_molecule.information.get_atom_information() should also be provided in stripped format for easier comparison\n fields_to_strip = ['name', 'resname', 'chainid', 'element']\n for f in fields_to_strip: self.__parent_molecule.set_atom_information(append_fields(self.__parent_molecule.get_atom_information(), f + '_stripped', data=numpy.core.defchararray.strip(self.__parent_molecule.get_atom_information()[f])))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write selected atoms to pdb
|
def write(self, atoms, out = open('atoms.pdb', 'w')):
out.write('REMARK generated by pdb.py\n')
for atom in atoms:
vals = (['ATOM', atom['atom_num'], atom['atom_name'],
atom['res_name'], atom['res_num'],
atom['x'], atom['y'], atom['z'],
'1.00', '0.00', '\n'])
line = ' '.join(str(v) for v in vals)
out.write(line)
|
[
"def write_pdb(self, which = 1):\n n = which\n for model in self.structure:\n if n == which:\n print(\"MODEL%9s\"%which)\n n += 1\n else:\n print(\"ENDMDL\\nMODEL%9s\"%n)\n n += 1\n for atom in model:\n print(\"%-6s%5s %4s %3s %s%4s %8s%8s%8s%6s%6s %3s\"%tuple(atom))\n print(\"ENDMDL\")",
"def writeMembranePDB(filename, membrane):\n length = membrane.shape[0]\n f = open(filename, 'w')\n for i in range(length):\n f.write('ATOM%7d Q1 NE1 Q%4d% 12.3f% 8.3f% 8.3f\\n' % (i,i,membrane[i,0],membrane[i,1],membrane[i,2]))\n f.close()",
"def toPDB(self, backboneOnly=False, CAlphaPlaceholders=True, verbose=True):\n #Change: uses a C-alpha placeholder atom entry with no x,y,z coordinates if the residue has no atoms\n #Not Thread-Safe\n if self.getPdbID()[0] == '_':\n header = ''\n else:\n header = ('HEADER' + ' '*56 + self.getPdbID()).ljust(80) + '\\n'\n atom_index=1\n Helix.serialNo=0 #This is what makes it not thread-safe\n dateTime = str(QtCore.QDateTime.currentDateTime().toString())\n gorgonLine1 = 'REMARK 5'.ljust(80) + '\\n'\n gorgonLine2 = ('REMARK 5 Gorgon (C) 2005-2008 output on %s' % dateTime).ljust(80) + '\\n'\n s = header + gorgonLine1 + gorgonLine2\n \n i = 1\n residueIndices = self.residueRange()\n for index in residueIndices[::13]:\n resList = [' ']*13\n for n in range(13):\n try:\n resList[n] = self[index+n].symbol3\n except (KeyError, IndexError):\n continue \n line = 'SEQRES %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s' % ( str(i).rjust(3), self.getChainID(), str(len(residueIndices)).rjust(4), \n resList[0], resList[1], resList[2], resList[3], resList[4], resList[5], resList[6], \n resList[7], resList[8], resList[9], resList[10], resList[11], resList[12] )\n line += ' '*10 + '\\n'\n s += line\n i += 1\n \n for serialNo in sorted(self.helices.keys()):\n helix=self.helices[serialNo]\n s=s+helix.toPDB()\n\n for sheetID in sorted(self.sheets.keys()):\n sheet=self.sheets[sheetID]\n s=s+sheet.toPDB(sheetID)\n\n #TODO: figure out how to handle orphan strands\n for strandID in sorted(self.orphanStrands.keys()):\n strand = self.orphanStrands[strandID]\n s=s+strand.toPDB()\n\n for residue_index in self.residueRange():\n residue=self[residue_index]\n\n if backboneOnly:\n atoms = ['CA']\n else:\n atoms=residue.getAtomNames()\n\n try:\n serial = str(atom_index).rjust(5)\n altLoc = ' '\n resName = residue.symbol3\n chainID = self.chainID\n resSeq = str(residue_index).rjust(4)\n iCode = ' '\n occupancy = ' '*6 #\"%6.2f \" %atom.getOccupancy()\n tempFactor = ' '*6 #\"%6.2f \" %atom.getTempFactor()\n \n if not atoms:\n if CAlphaPlaceholders:\n name = 'CA'.center(4)\n x = ' '*8\n y = ' '*8\n z = ' '*8\n element = ' C'\n charge = ' '\n \n line = 'ATOM %s %s%s%s %s%s%s %s%s%s%s%s %s%s\\n' % (serial, name, altLoc, resName, chainID, \n resSeq, iCode, x, y, z, occupancy, tempFactor, element, charge)\n s = s + line\n atom_index += 1\n for atom_name in atoms:\n atom=residue.getAtom(atom_name)\n \n serial = str(atom_index).rjust(5)\n \n name = str(atom_name).center(4)\n x = \"%8.3f\" %atom.getPosition().x()\n if len(x) > 8:\n raise ValueError\n y = \"%8.3f\" %atom.getPosition().y()\n if len(y) > 8:\n raise ValueError\n z = \"%8.3f\" %atom.getPosition().z()\n if len(z) > 8:\n raise ValueError\n element = atom.getElement().rjust(2)\n charge = ' '\n line = 'ATOM %s %s%s%s %s%s%s %s%s%s%s%s %s%s\\n' % (serial, name, altLoc, resName, chainID, \n resSeq, iCode, x, y, z, occupancy, tempFactor, element, charge)\n s = s + line\n atom_index += 1\n \n #Mike's method below:\n '''\n atom=residue.getAtom(atom_name)\n s=s+ \"ATOM\" + ' '\n s=s+ str(atom_index).rjust(6) + ' '\n atom_index=atom_index+1 \n s=s+ atom_name.rjust(3) + ' '\n s=s+ residue.symbol3.rjust(4) + ' '\n s=s+ self.chainID.rjust(1) + ' ' #chainID\n s=s+ str(residue_index).rjust(3) + ' '\n s=s+ \"%11.3f \" %atom.getPosition().x()\n s=s+ \"%7.3f \" %atom.getPosition().y()\n s=s+ \"%7.3f \" %atom.getPosition().z()\n s=s+ \"%5.2f \" %atom.getOccupancy()\n s=s+ \"%5.2f \" %atom.getTempFactor()\n s=s+ atom.getElement().rjust(11) + ' ' +\"\\n\"\n '''\n except KeyError:\n if verbose:\n print \"Chain.toPDB() warning: No atom record for %s in %s%s.\" %(atom_name,residue_index,residue.symbol3)\n if self.residueRange():\n s=s+ \"TER\\n\"\n\n return s",
"def write_pdb(fileobj, images):\n if isinstance(fileobj, str):\n fileobj = paropen(fileobj, 'w')\n\n if not isinstance(images, (list, tuple)):\n images = [images]\n\n if images[0].get_pbc().any():\n from ase.lattice.spacegroup.cell import cell_to_cellpar\n cellpar = cell_to_cellpar( images[0].get_cell())\n # ignoring Z-value, using P1 since we have all atoms defined explicitly\n format = 'CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1\\n'\n fileobj.write(format % (cellpar[0], cellpar[1], cellpar[2], cellpar[3], cellpar[4], cellpar[5]))\n\n # 1234567 123 6789012345678901 89 67 456789012345678901234567 890\n format = 'ATOM %5d %4s MOL 1 %8.3f%8.3f%8.3f 1.00 0.00 %2s \\n'\n\n # RasMol complains if the atom index exceeds 100000. There might\n # be a limit of 5 digit numbers in this field.\n MAXNUM = 100000\n\n symbols = images[0].get_chemical_symbols()\n natoms = len(symbols)\n \n for n,atoms in enumerate(images):\n fileobj.write('MODEL '+str(n+1)+'\\n')\n p = atoms.get_positions()\n for a in range(natoms):\n x, y, z = p[a]\n fileobj.write(format % (a % MAXNUM, symbols[a], x, y, z, symbols[a].rjust(2)))\n fileobj.write('ENDMDL\\n')",
"def save_pdb(self, path=None):\n\n if self.atoms is None:\n self.get_atoms()\n\n path = path if path else ''\n\n if path is not None and path != '':\n if not os.path.isdir(path):\n print('Directory does not exist. Will try creating it...')\n os.mkdir(path)\n\n ase.io.write(path + self.name + '.pdb', self.atoms,\n format='proteindatabank')",
"def savePDB(pdb, filename):\n prody.writePDB(filename, pdb)",
"def write_pdb(coor,beta,fr):\n\tglobal name_modifier\n\tglobal verbose\n\n\tif verbose >= 1:\n\t\tprint 'writing pdb...'\n\toutfile = open(\"emap_\"+str(fr)+str(name_modifier)+\".pdb\",\"w\")\n\tcount_zeros = 0\n\tfor i in range(len(coor)):\n\t\tif (coor[i][0]!=0 and coor[i][1]!=0 and coor[i][2]!=0):\n\t\t\tt1 = \"ATOM\"\t\t\t\t\t# ATOM\n\t\t\tt2 = 1\t\t\t\t\t\t# INDEX\n\t\t\tt3 = \"C\"\t\t\t\t\t# ATOM NAME\n\t\t\tt4 = \"\"\t\t\t\t\t\t# ALTERNATE LOCATION INDICATOR\n\t\t\tt5 = \"AAA\"\t\t\t\t\t# RESIDUE NAME\n\t\t\tt6 = \"X\"\t\t\t\t\t# CHAIN\n\t\t\tt7 = 0\t\t\t\t\t\t# RESIDUE NUMBER\n\t\t\tt8 = \"\"\t\t\t\t\t\t# INSERTION CODE\n\t\t\tt9 = float(coor[i][0])\t\t\t\t# X\n\t\t\tt10 = float(coor[i][1])\t\t\t\t# Y\n\t\t\tt11 = float(coor[i][2])\t\t\t\t# Z\n\t\t\tt12 = 0.0\t\t\t\t\t# OCCUPANCY\n\t\t\tt13 = beta[i]\t\t\t\t\t# TEMPERATURE FACTOR\n\t\t\tt14 = \"\"\t\t\t\t\t# ELEMENT SYMBOL\n\t\t\tt15 = \"\"\t\t\t\t\t# CHARGE ON ATOM\n\t\t\toutfile.write(\"{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\\n\".format(t1,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14,t15))\n\toutfile.close()\n\treturn 0",
"def write_lammps_atoms(self, atoms):\n \n fileobj = self.prefix + '_atoms'\n if isinstance(fileobj, str):\n fileobj = open(fileobj, 'w')\n\n # header\n fileobj.write(fileobj.name + ' (by ' + str(self.__class__) + ')\\n\\n')\n fileobj.write(str(len(atoms)) + ' atoms\\n')\n fileobj.write(str(len(atoms.types)) + ' atom types\\n')\n btypes, blist = self.get_bonds(atoms)\n if len(blist):\n fileobj.write(str(len(blist)) + ' bonds\\n')\n fileobj.write(str(len(btypes)) + ' bond types\\n')\n atypes, alist = self.get_angles()\n if len(alist):\n fileobj.write(str(len(alist)) + ' angles\\n')\n fileobj.write(str(len(atypes)) + ' angle types\\n')\n dtypes, dlist = self.get_dihedrals(alist, atypes)\n if len(dlist):\n fileobj.write(str(len(dlist)) + ' dihedrals\\n')\n fileobj.write(str(len(dtypes)) + ' dihedral types\\n')\n\n # cell\n p = prism(atoms.get_cell())\n xhi, yhi, zhi, xy, xz, yz = p.get_lammps_prism_str()\n fileobj.write('\\n0.0 %s xlo xhi\\n' % xhi)\n fileobj.write('0.0 %s ylo yhi\\n' % yhi)\n fileobj.write('0.0 %s zlo zhi\\n' % zhi)\n \n # atoms\n fileobj.write('\\nAtoms\\n\\n')\n tag = atoms.get_tags()\n for i, r in enumerate(map(p.pos_to_lammps_str,\n atoms.get_positions())):\n q = 0 # charge will be overwritten\n fileobj.write('%6d %3d %3d %s %s %s %s' % ((i + 1, 1,\n tag[i] + 1, \n q)\n + tuple(r)))\n fileobj.write(' # ' + atoms.types[tag[i]] + '\\n')\n\n # velocities\n velocities = atoms.get_velocities()\n if velocities is not None:\n fileobj.write('\\nVelocities\\n\\n')\n for i, v in enumerate(velocities):\n fileobj.write('%6d %g %g %g\\n' %\n (i + 1, v[0], v[1], v[2]))\n\n # masses\n fileobj.write('\\nMasses\\n\\n')\n for i, typ in enumerate(atoms.types):\n cs = atoms.split_symbol(typ)[0]\n fileobj.write('%6d %g # %s -> %s\\n' % \n (i + 1, \n atomic_masses[chemical_symbols.index(cs)],\n typ, cs))\n \n # bonds\n if len(blist):\n fileobj.write('\\nBonds\\n\\n')\n for ib, bvals in enumerate(blist):\n fileobj.write('%8d %6d %6d %6d ' %\n (ib + 1, bvals[0] + 1, bvals[1] + 1, \n bvals[2] + 1))\n fileobj.write('# ' + btypes[bvals[0]] + '\\n')\n\n # angles\n if len(alist):\n fileobj.write('\\nAngles\\n\\n')\n for ia, avals in enumerate(alist):\n fileobj.write('%8d %6d %6d %6d %6d ' %\n (ia + 1, avals[0] + 1, \n avals[1] + 1, avals[2] + 1, avals[3] + 1))\n fileobj.write('# ' + atypes[avals[0]] + '\\n')\n\n # dihedrals\n if len(dlist):\n fileobj.write('\\nDihedrals\\n\\n')\n for i, dvals in enumerate(dlist):\n fileobj.write('%8d %6d %6d %6d %6d %6d ' %\n (i + 1, dvals[0] + 1, \n dvals[1] + 1, dvals[2] + 1, \n dvals[3] + 1, dvals[4] + 1))\n fileobj.write('# ' + dtypes[dvals[0]] + '\\n')\n\n return btypes, atypes, dtypes",
"def convert_pdbqt_to_pdb(list_of_lines):\n printout = \"\"\n line_index_range = [x for x in range(0, 61)] + [x for x in range(70, 80)]\n\n for line in list_of_lines:\n if \"ATOM\" in line or \"HETATM\" in line:\n short_line = \"\"\n for i in line_index_range:\n # print(i)\n if i >= len(line):\n continue\n\n short_line = short_line + line[i]\n\n printout = printout + short_line\n elif \"REMARK x y z vdW Elec\" + \\\n \" q Type\" in line \\\n or \"REMARK _______ _______ _______ _____ _____\" + \\\n \" ______ ____\" in line:\n short_line = \"\"\n for i in line_index_range:\n # print(i)\n if i >= len(line):\n continue\n\n short_line = short_line + line[i]\n\n printout = printout + short_line + \"\\n\"\n else:\n printout = printout + line\n return printout",
"def save_M(M, f_out):\n _ATOM = '%s%5i %-4s%3s %c%4i%c %8.3f%8.3f%8.3f%6.2f%6.2f %4s%2s%2s\\n'\n\n def get_ATOM_line(atom_i, name, resid, x, y, z, aa_type):\n \"\"\"\n Write PDB ATOM line.\n \"\"\"\n args=('ATOM ', atom_i, name, aa_type, 'A', resid, ' ', x, y, z, 0.0, 0.0, 'X', ' ', ' ')\n s = _ATOM % args\n return s\n\n fp = open(f_out, 'w')\n for i in range(0, M.shape[0]):\n x, y, z = M[i]\n s = get_ATOM_line(i, 'CA', i, x, y, z, 'ALA') \n fp.write(s)\n fp.close()",
"def _print_pdb(sorted_data, filename):\n file_pdb = open(filename,\"w\")\n num_at = 0\n num_res = 0\n for one_result in sorted_data:\n chains = set()\n for r in one_result[0]:\n r = r.strip(\"_BCK\")\n chains.add(r.split(\":\")[1])\n cen_str = \"\"\n for r in one_result[1]:\n crd_center = \"{:.8s}\".format(str(round(float(r),3)))\n if len(crd_center)<8:\n crd_center = \" \"*(8-len(crd_center)) + crd_center\n cen_str += crd_center\n else:\n cen_str += crd_center\n num_at += 1\n num_res += 1\n for ch in chains:\n file_pdb.write(\"ATOM\" +\" \"*(7-len(str(num_at))) + \"%s HE SLN %s\" %(num_at, ch))\n file_pdb.write(\" \"*(3-len(str(num_res))) + \"%s %s 1.00 0.00 HE\\n\" %(num_res, cen_str))\n for prob in one_result[4]:\n num_at += 1\n prb_str = \"\"\n for p in prob:\n prb_center = \"{:.8s}\".format(str(round(float(p),3)))\n if len(prb_center)<8:\n prb_center = \" \"*(8-len(prb_center)) + prb_center\n prb_str += prb_center\n else:\n prb_str += prb_center\n for ch in chains:\n file_pdb.write(\"ATOM\" +\" \"*(7-len(str(num_at))) + \"%s XE SLN %s\" %(num_at, ch))\n file_pdb.write(\" \"*(3-len(str(num_res))) + \"%s %s 1.00 0.00 XE\\n\" %(num_res, prb_str))\n file_pdb.close()",
"def save_pdb(self, path, title=\"test\"):\n self._get_lines_for_protein()\n self.lines = [self._make_header(title)] + self.lines + [self._make_footer()]\n with open(path, \"w\") as outfile:\n outfile.write(\"\\n\".join(self.lines))",
"def save_structure_to_pdb(structure, filename):\n io = Bio.PDB.PDBIO()\n io.set_structure(structure)\n io.save(filename)",
"def write_geom_input(atoms):\n with open('geom.input', 'w') as new:\n for atom in atoms:\n new.write(\n f\"{atom.symbol:5s} {int(atom.atnum):3} {atom.x:>15.10f} {atom.y:>15.10f} {atom.z:>15.10f} \\n\"\n )",
"def print_all(m):\n for atom in m.atoms:\n print(\"{0} {1} {2}\".format(atom, atom.vp.index, atom.vp.type))",
"def exercise_atom_xyz_9999():\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.7529999.9999999.999 1.00166.17 A\n\"\"\"))\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 50.752 78.2739999.999 1.00166.17 A\n\"\"\"))\n try:\n pdb_inp = pdb.input(source_info=None, lines=flex.split_lines(\"\"\"\\\nATOM 10849 C ILE A1445 50.977 77.127 41.547 1.00129.33 A\nATOM 10850 O ILE A1445 50.257 76.569 42.421 1.00129.33 A\nATOM 10851 OXT ILE A1445 50.752 78.273 41.078 1.00189.50 A\nATOM 27953 ZN ZN A1506 9999.9999999.9999999.999 1.00166.17 A\nATOM 27954 ZN ZN A1508 9999.9999999.9999999.999 1.00166.17 A\n\"\"\"))\n except RuntimeError as e:\n assert str(e).find(\n \"IOTBX_ASSERT(! (xyz[0]>9999 && xyz[1]>9999 && xyz[2]>9999)) failure.\") >0\n else: raise Exception_expected",
"def write_model(self, which):\n print(\"MODEL%9s\"%which)\n for atom in self.structure[which-1]:\n print(\"%-6s%5s %4s %3s %s%4s %8s%8s%8s%6s%6s %3s\"%tuple(atom))\n print(\"ENDMDL\")",
"def synthesize_PDBQT(refpdbqt, coords, outputpdbqt):\n # read the reference ligand file to get the atoms table\n with open(refpdbqt, 'r') as f:\n lines = f.readlines()\n f.close()\n \n # plug in the coordinates of the docked pose and save\n acc = 0\n try:\n with open(outputpdbqt, 'w') as PDB:\n for aline in lines:\n if ('ATOM' in aline) or ('UNL' in aline):\n new_line = list(aline[:])\n new_coord = [str(i) for i in coords[acc].tolist()]\n new_line[30:38] = list(str(new_coord[0]).rjust(8))\n new_line[38:46] = list(str(new_coord[1]).rjust(8))\n new_line[46:54] = list(str(new_coord[2]).rjust(8))\n new_str = ''.join(new_line)\n acc += 1\n else:\n new_str = aline\n PDB.write(new_str)\n except IndexError:\n raise IndexError",
"def write_molecule(\r\n molecule: Union[AtomGroup], file_path: Union[str, Path], delete_segid: bool = True\r\n):\r\n molecule.write(file_path)\r\n\r\n if str(file_path).endswith(\"pdb\") and delete_segid:\r\n lines = []\r\n with open(file_path, \"r\") as read_file:\r\n for line in read_file.readlines():\r\n if line.startswith((\"ATOM\", \"HETATM\")):\r\n line = line[:67] + \" \" + line[76:]\r\n lines.append(line)\r\n with open(file_path, \"w\") as write_file:\r\n write_file.write(\"\".join(lines))\r\n\r\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Group atoms by helix strand Set n as starf of residue numbering
|
def strands(self, n = 2):
prev_atom = {'res_num': n}
for i, atom in enumerate(self.atoms):
if (atom['res_num'] != prev_atom['res_num'] and
atom['res_num'] == n):
strand1 = self.atoms[0:i]
strand2 = self.atoms[i:]
prev_atom = atom
for atom in self.atoms:
if atom in strand1:
atom['strand'] = 1
elif atom in strand2:
atom['strand'] = 2
return self.atoms
|
[
"def homotopy_group(self, n):\n if n not in ZZ or n < 2:\n raise ValueError(\"\"\"homotopy groups can only be computed\n for dimensions greater than 1\"\"\")\n lgens = __homotopy_list__(self._kenzo, n).python()\n if lgens is not None:\n trgens = [0 if i == 1 else i for i in sorted(lgens)]\n return AbelianGroup(trgens)\n else:\n return AbelianGroup([])",
"def homology(self, n):\n echcm1 = __echcm__(self._kenzo)\n m1 = __chcm_mat__(echcm1, n)\n m2 = __chcm_mat__(echcm1, n + 1)\n homology = __homologie__(m1, m2)\n lhomomology = [i for i in EclListIterator(homology)]\n res = []\n for component in lhomomology:\n pair = [i for i in EclListIterator(component)]\n res.append(pair[0].python())\n return HomologyGroup(len(res), ZZ, res)",
"def SymmetricGroupBruhatOrderPoset(n):\n if n < 10:\n element_labels = dict([[s,\"\".join(map(str,s))] for s in Permutations(n)])\n return Poset(dict([[s,s.bruhat_succ()]\n for s in Permutations(n)]),element_labels)",
"def regroup_numbered_streets(context):\n alpha_list = []\n numbered_streets = []\n for entry in context['alpha_list']:\n if entry['grouper'].lower() in string.ascii_lowercase:\n alpha_list.append(entry)\n else:\n numbered_streets.extend(entry['list'])\n if numbered_streets:\n entry = {'list': numbered_streets, 'grouper': '#'}\n alpha_list.insert(0, entry)\n context['alpha_list'] = alpha_list\n return ''",
"def org(cls, n):\n return cls.group_number_format(n, [3, 3, 3])",
"def group_by_ljtype(parm, mask):\n atoms = parm.view[mask]\n groups = group_by(atoms, lambda atom: atom.nb_idx)\n return groups",
"def E(n):\n assert 6 <= n <= 8\n\n g = Group.schlafli(*[3] * (n - 2), 2)\n g[2, n - 1] = 3\n return g",
"def horn(n):\n if n == 0:\n yield 'o', ()\n else:\n for k in range(0, n):\n for f, l in horn(k):\n for g, r in horn(n - 1 - k):\n yield g, ((f, l),) + r",
"def test_set_grouping(self):\n H = qml.Hamiltonian([1.0, 2.0, 3.0], [qml.PauliX(0), qml.PauliX(1), qml.PauliZ(0)])\n H.grouping_indices = [[0, 1], [2]]\n\n assert H.grouping_indices == ((0, 1), (2,))",
"def _molecules(self, line, lineno=0):\n # we need to keep the order here so cannot make it a dict\n # also mol names do not need to be unique\n name, n_mol = line.split()\n self.molecules.append((name, n_mol))",
"def get_bond_numbers(self): # fuse with adjacency list function ?\n s = set()\n for (a1, s1), (a2, s2) in self.bonds:\n if (a1, a2) in s:\n self.nbonds[(a1, a2)] += 1\n else:\n s.add((a1, a2))\n self.nbonds[(a1, a2)] = 1",
"def sam_reps(n):\n pm = \"\"\n if type(n) is str:\n #Implement \"0bXXXX\" logic\n header, digits = n[:2], n[2:]\n if header == \"0b\":\n sign_bit, mag = digits[0], digits[1:] #Binary string (unsigned rn)\n if sign_bit == '0':\n d = int(mag, base=2)\n sign = \"+\"\n else:\n d = -1 * int(mag, base=2)\n sign = \"-\"\n b = \"0b\" + digits\n h = \"0x\" + format(int(digits,base=2), 'X')\n elif header == \"0x\":\n bs = bin(int(digits,base=16))[2:] #hex -> bitstring\n sign_bit, mag = bs[0], bs[1:]\n if sign_bit == '0':\n d = int(mag, base=2)\n sign = \"+\"\n else:\n d = -1 * int(mag, base=2)\n sign = \"-\"\n b = \"0b\" + digits\n h = \"0x\" + format(int(n,base=2),'X')\n #convert hex -> bin.\n pm = mag\n else: #Implement decimal logic.\n if n < 0:\n mag = bin(n)[3:]\n padded_mag = mag\n while len(padded_mag) % 3 != 0:\n padded_mag = '0' + padded_mag\n b = \"0b1\" + padded_mag\n sign = \"-\"\n else:\n mag = bin(n)[2:]\n padded_mag = mag\n while len(padded_mag) % 3 != 0:\n padded_mag = '0' + padded_mag\n b = \"0b0\" + padded_mag\n sign = \"+\"\n pm = padded_mag\n h = \"0x\" + format(int(b,2), 'X')\n d = n\n print(\"DEC:\", d)\n print(\"BIN:\", b)\n print(\"HEX:\", h)\n print(\"Sign:\", sign)\n print(\"Magnitude:\", str(int(mag,2)) + \" (\" + str(pm) + \")\" )",
"def group_chrm(self, bed_file):\n # ####################################################################\n # group chromosomes with respect to name, strand and interval start\n # ####################################################################\n\n chrm_details = dict()\n\n \"\"\"\n reading bed_file to make unique keys and\n eventually to have independent/unique dataset to process\n independently for centroids computation.\n \"\"\"\n for b in bed_file:\n \"\"\"\n reads chromosome name, strand of input_file and start\n from merged_data respectively, in oder to make unique\n key for chromosome_pos_val i.e ('chrX', '+', '1').\n \"\"\"\n key_name_strand_pos = (b[0], b[2], b[3])\n\n \"\"\"\n chromosome_start_position value of an input bed data.\n \"\"\"\n chrm_pos_val = b[1]\n\n \"\"\"\n adding elements to the dictionary\n key_name_strand_pos is used as key for\n the dictionary chrm_details.\n \"\"\"\n if key_name_strand_pos in chrm_details:\n\n \"\"\"\n append to an existing key one by one\n {('chrX', '+', '1') : ['1', '2']}\n {('chrX', '+', '1') : ['1', '2', '3']}\n {('chrX', '+', '1') : ['1', '2', '3', '4']}\n {('chrX', '+', '1') : ['1', '2', '3', '4', '5']}\n \"\"\"\n chrm_details[key_name_strand_pos].append(chrm_pos_val)\n else:\n \"\"\"\n create a new array of values\n {('chrX', '+', '1') : ['1']}\n {('chrX', '+', '1'): ['1', '2', '3', '4', '5'],\n ('chrX', '-', '9'): ['9']}\n \"\"\"\n chrm_details[key_name_strand_pos] = [chrm_pos_val]\n\n return chrm_details",
"def test_number_residue(self):\n\n # Get the data pipe.\n dp = pipes.get_pipe('orig')\n\n # Create the first residue and add some data to its spin container.\n self.residue_fns.create(-10, 'His')\n\n # Rename the residue.\n self.residue_fns.number(res_id=':-10', number=10, force=True)\n\n # Test that the residue has been renumbered.\n self.assertEqual(dp.mol[0].res[0].num, 10)",
"def test_grouping_is_correct_compute_grouping(self):\n a = qml.PauliX(0)\n b = qml.PauliX(1)\n c = qml.PauliZ(0)\n obs = [a, b, c]\n coeffs = [1.0, 2.0, 3.0]\n\n H = qml.Hamiltonian(coeffs, obs, grouping_type=\"qwc\")\n H.compute_grouping()\n assert H.grouping_indices == ((0, 1), (2,))",
"def get_group_positions(u, indi):\n positions = []\n for i in indi.correction_groups:\n selstr = 'bynum %d' % i[0]\n for j in i[1:]:\n selstr += ' or bynum %d' % j\n positions.append(u.select_atoms(selstr).positions)\n return positions",
"def group(self,lane,mismatches=2,n=None,cutoff=None,\n seed_barcodes=None,exclude_reads=0.000001):\n # Initialise\n barcodes = self.filter_barcodes(lane=lane,cutoff=exclude_reads)\n nreads = self.nreads(lane=lane)\n groups = []\n # Update barcode list if 'seed' barcodes were provided\n if seed_barcodes:\n promoted_barcodes = []\n for barcode in seed_barcodes:\n try:\n barcodes.remove(barcode)\n promoted_barcodes.append(barcode)\n except ValueError:\n # Barcode doesn't appear in the list\n pass\n barcodes = promoted_barcodes + barcodes\n # Cutoff\n if cutoff is not None:\n cutoff_reads = int(float(nreads)*cutoff)\n else:\n cutoff_reads = 0\n # Iteratively assign barcodes to groups\n # until we run out\n cumulative = 0\n while barcodes:\n # Fetch next reference sequence\n group = BarcodeGroup(barcodes[0],\n self.counts(barcodes[0],lane))\n # Save non-matching sequences\n rejected = []\n # Iterate through the remaining sequences\n # looking for matches\n for seq in barcodes[1:]:\n if group.match(seq,mismatches):\n group.add(seq,self.counts(seq,lane))\n else:\n rejected.append(seq)\n # Finished checking sequences for this group\n # Reset sequences to check in next iteration\n barcodes = rejected\n # Check cutoff\n if group.counts >= cutoff_reads:\n groups.append(group)\n else:\n # Discard group\n pass\n # Sort groups into order of total counts\n groups = sorted(groups,cmp=lambda x,y: cmp(y.counts,x.counts))\n return groups",
"def _interpretations(self, tokens, index):\n number_of_occurences = {}\n max_number_of_occurences = 0\n result = set()\n \n for token in tokens:\n for org_id in index.get(token, set()):\n n = 1 + number_of_occurences.get(org_id, 0)\n logging.debug(\"org id %s occurs %d times after '%s'\", org_id, n, token)\n number_of_occurences[org_id] = n\n if n > max_number_of_occurences:\n max_number_of_occurences = n\n result = set([org_id])\n elif n == max_number_of_occurences:\n result.add(org_id)\n \n return result",
"def _get_synset_ids(self, split):\n return sorted([\n synset.wn_id for synset in imagenet_specification.get_leaves(\n self.dataset_spec.split_subgraphs[split])\n ])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get base pairs from start to end, inclusive
|
def get_pairs(self, start, end):
# Both strands are numbered 5' to 3'
return [a for a in self.atoms if a['pair'] in range(start, end + 1)]
|
[
"def key_range_iterator(key, start=\"\", finish=\"\", count=100):\r\n return (key.clone(key=k) for k in key_range(key, start, finish, count))",
"def inclusive_list(start, stop):\n return list(range(start, stop + 1))",
"def location_range(start: int, end: int) -> Iterable[int]:\n step = 1\n if start > end:\n step = -1\n\n return range(start, end + step, step)",
"def gen_ip_range(start, end):\n while True:\n yield start\n if start != end:\n start = increment_ip(start)\n else:\n break",
"def double_range(limit1, limit2): #y - x\n for i1 in range(limit1):\n for i2 in range(limit2):\n yield i1, i2",
"def gen_chunks(start, end, stride):\n for i, num in enumerate(xrange(start, end, stride)):\n yield num, num + stride",
"def get_row_indices(base):\n side = base*base\n return [list(range(i, i + side)) for i in np.arange(0, side*side, side)]",
"def extend_indeces(start, n, iInc, jInc):\n return [ (start[0]+k*iInc, start[1]+k*jInc) for k in xrange(0, n) ]",
"def block_ranges(\n start_block: BlockNumber, last_block: Optional[BlockNumber], step: int = 5\n) -> Iterable[Tuple[BlockNumber, BlockNumber]]:\n if last_block is not None and start_block > last_block:\n raise TypeError(\n \"Incompatible start and stop arguments.\",\n \"Start must be less than or equal to stop.\",\n )\n\n return (\n (BlockNumber(from_block), BlockNumber(to_block - 1))\n for from_block, to_block in segment_count(start_block, last_block + 1, step)\n )",
"def get_next_range_value(ranges):\n for start, stop in ranges:\n while start <= stop:\n yield start\n start += 1",
"def split_interval_at_values(start: T, end: T, offsets: Sequence[T]\n ) -> list[tuple[T, T]]:\n assert end > start\n assert offsets\n\n if offsets[0] > end or offsets[-1] < start:\n # no intersection, return the original time range\n return [(start, end)]\n\n out = []\n for offset in offsets:\n if offset >= end:\n break\n if start < offset:\n out.append((start, offset))\n start = offset\n if start != end:\n out.append((start, end))\n\n assert len(out) >= 1\n return out",
"def list_internal_between(node: Union[BTNode, None], start: int, end: int) -> list:",
"def split_n_range ( low , high , num ) :\n if high <= low or num < 1 : yield low , low\n elif 1 == num : yield low , high\n elif low < high and high <= num + low : yield low , high\n else : \n \n nn = high - low\n newn = nn // num\n for i in range ( 0 , num - 1 ) :\n nl = i * newn\n nh = nl + newn\n yield low + nl , low + nh \n yield low + num * newn - newn , high",
"def _intervals(start, updates, total):\n updates = min(updates, total)\n wrap = max(0, start + updates - total)\n if wrap > 0:\n return range(start, total), range(0, wrap)\n else:\n return range(start, start + updates), range(0, 0)",
"def get_crossed_points(start_point, end_point):\n dy = end_point[1] - start_point[1]\n dx = end_point[0] - start_point[0]\n # for simplicity, we need that start point is at left side, end point is at right side. If they do not meet this\n # requirement, we simply swap them\n if dx < 0:\n dx = -dx\n dy = -dy\n start_point, end_point = end_point, start_point\n if dx == 0: # special case, need to avoid 0-division error in the \"else\" logic\n step_y = 1 if dy >= 0 else -1\n for y in range(step_y, dy, step_y): # we can safely ignore y=0 and y=dy here\n yield (start_point[0], start_point[1] + y)\n else: # we must have dx > 0 here\n step = 1.0 * dy / dx\n for x in range(0, dx + 1, 1):\n if x == 0: # first x\n start_y = 1 if step >= 0 else -1\n elif step >= 0:\n start_y = int(0.5 + step * (x - 0.5))\n else:\n start_y = int(-0.5 + step * (x - 0.5))\n if x == dx: # last x\n end_y = dy - 1 if step >= 0 else dy + 1\n elif step >= 0:\n end_y = int(math.ceil(0.5 + step * (x + 0.5))) - 1\n else:\n end_y = int(math.floor(-0.5 + step * (x + 0.5))) + 1\n step_y = 1 if step >= 0 else -1\n for y in range(start_y, end_y + step_y, step_y):\n yield (start_point[0] + x, start_point[1] + y)",
"def choose_indices(n, max_):\n def choose_indices_inner(num_left, indices, min_, max_):\n if num_left == 0: \n yield indices\n else:\n start = indices[-1] + 1 if len(indices) > 0 else min_\n for i in range(start, max_):\n indices.append(i)\n for r in choose_indices_inner(num_left - 1, indices, min_, max_): \n yield r\n indices.pop()\n for i in choose_indices_inner(n, [], 0, max_):\n yield i",
"def gene_krupa_range(start, stop, even_step, odd_step):\n step = 0\n counter = start\n a_list = []\n while counter < stop:\n a_list.append(counter)\n if step % 2 == 1: \n counter += even_step\n else:\n counter += odd_step\n step += 1\n return a_list",
"def overlap(start_1, end_1, start_2, end_2):\n return range(max(start_1, start_2),\n min(end_1, end_2) + 1)",
"def circular_range(ini, end, length):\n if ini > end:\n return range(ini, length)+range(0, end+1)\n else:\n return range(ini, end+1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Minimize rotation using ksh's best_rotation
|
def minimize(self):
rotation = ksh.best_rotation(self.crdset)
best = rotation[0].calc_all()
self.res = best[1]
self.phi = rotation[1]
self.the = rotation[2]
self.best = best
return self.best
|
[
"def min_rotation(target_degrees, source_degrees):\n return (target_degrees - source_degrees + 180) % 360 - 180",
"def GetRotOfLowerSymm(new_latt, old_latt, old_rot):\n \n # Q = np.transpose(np.dot(old_latt, np.linalg.inv(new_latt)))\n P = np.dot(new_latt, np.linalg.inv(old_latt)) # // horizontal vec\n tolerance = abs(np.linalg.det(P)) / 10\n\n new_rot = []\n for i in old_rot:\n # tp_rot = np.dot(P, i)\n # tp_rot1 = np.dot(tp_rot, np.linalg.inv(P)) \n # new_rot W'=QWP\n tp_rot = np.dot(np.linalg.inv(np.transpose(P)), i)\n \n # standard horizontal\n tp_rot1 = np.dot(tp_rot, np.transpose(P)) \n tp_rot2 = tp_rot1.copy()\n \n # find matrix whose elements are all integer\n for j in range(3):\n for k in range(3):\n if tp_rot2[j, k] < 0.0:\n tp_rot2[j, k] = int(tp_rot2[j, k] - 0.5)\n else:\n tp_rot2[j, k] = int(tp_rot2[j, k] + 0.5)\n # print('1:', tp_rot1)\n # print('2:', tp_rot2)\n \n tp_flag = True\n for j in range(3):\n for k in range(3):\n if abs(tp_rot2[j, k] - tp_rot1[j, k]) > tolerance:\n tp_flag = False\n break\n if not tp_flag:\n break\n\n if tp_flag:\n if abs(np.linalg.det(tp_rot2)) == 1: # tp_rot1 or tp_rot2\n new_rot.append(tp_rot2)\n \n # print('check new len: ', len(new_rot))\n \n if len(new_rot) == 0:\n print('Error! Cnnot find any new rotations of primitive lattice!')\n print('Return old rotations of minimum lattice...')\n return old_rot\n else:\n return new_rot",
"def rotateSSorousW (ssorou):\r\n output = [ssorou[0]]\r\n working = ssorou[1:]\r\n\r\n minIndex = 0\r\n minWeight = findWeightofSorou(working[0][1])\r\n for index in range(len(working)):\r\n newWeight = findWeightofSorou(working[index][1])\r\n if (newWeight < minWeight and newWeight > 0) or minWeight == 0:\r\n minIndex = index\r\n minWeight = newWeight\r\n\r\n rotateOrder = ssorou[0] - working[minIndex][0]\r\n\r\n for index in range(len(working)):\r\n originalIndex = (index + rotateOrder) % ssorou[0]\r\n newSubSorou = [index, working[(index + rotateOrder) % ssorou[0]][1]]\r\n output.append(newSubSorou)\r\n\r\n return output",
"def rot90(self, k, dims): # real signature unknown; restored from __doc__\n pass",
"def rotor_cost(R):\n return val_rotor_cost_sparse(R.value)",
"def rotate_90_ccw(self, turns):\n\n######### Max piece width [Calibration Values]\n max_piece_width = 230 # 240 For hulk puzzle\n\n if turns > 0:\n # To ensure minimal 90 degree turns\n turns = turns%4\n\n # Open piece image\n img_piece = cv2.imread(self.file_name)\n\n # Get rotational matrix and perform ratation\n M = cv2.getRotationMatrix2D((max_piece_width/2, max_piece_width/2), 90*turns, 1)\n img_piece = cv2.warpAffine(img_piece, M, (max_piece_width, max_piece_width))\n\n # Save piece image\n cv2.imwrite(self.file_name, img_piece)\n\n # Rotate it_class array\n self.it_class = np.roll(self.it_class, -turns)\n\n # Adjust piece angle\n self.angle = (90*turns + self.angle)%360",
"def largest_degree_immunization(graph, k):\n\n degrees = np.array(graph.degree())\n return degrees.argsort()[::-1][:k]",
"def test_small_rotation(self):\n practice_name = 'Introduire une culture étouffant les adventices'\n mais = 'recsPtaEneeYVoEWx'\n tournesol = 'rec5MHmc9xIgAg8ha'\n soja = 'recwHs4aAiZc9okg9'\n orge = 'recfGVtMZSz05Rfl8'\n raygrass = 'recjzIBqwGkton9Ed'\n\n # When having more than 3 cultures, there is no bonus\n answers = {\n 'problem': 'DESHERBAGE',\n 'rotation': [mais, tournesol, soja, orge],\n 'weeds': raygrass,\n \"cattle\": \"Oui\",\n }\n engine = Engine(answers, [], [])\n result = next(filter(lambda x: x.practice.title == practice_name, engine.calculate_results()))\n initial_weight = result.weight\n\n # When having 3 or less cultures, the bonus is 1.1\n answers = {\n 'problem': 'DESHERBAGE',\n 'rotation': [mais, tournesol, soja],\n 'weeds': raygrass,\n \"cattle\": \"Oui\",\n }\n engine = Engine(answers, [], [])\n result = next(filter(lambda x: x.practice.title == practice_name, engine.calculate_results()))\n small_rotation_weight = result.weight\n\n self.assertEqual(initial_weight * 1.1, small_rotation_weight)",
"def rotateK(A, K, res):\r\n if K < 2: # bug fixed: we should not rotate first 0 and first 1 elements\r\n return\r\n res.append(K)\r\n left, right = 0, K-1\r\n while left < right:\r\n A[left], A[right] = A[right], A[left]\r\n left += 1\r\n right -= 1",
"def rotationOrder(self):\n\t\treturn 0",
"def rotation(self,t):\n pass",
"def alt_rotor_cost(V):\n logV = general_logarithm(V)\n scale_cost = np.abs(-2*logV[e45])\n scalefac = np.e**(-2*logV[e45])\n R = logV(e123)*e123\n rotation_cost = abs((R*~R)[0])\n translation_cost = scalefac*abs((logV - logV[e45]*e45 - logV(e123))|eo)\n return rotation_cost + scale_cost + translation_cost",
"def optimize_rotamers(self):\n\n tf = self.input_class.regions.get_basic_tf(self.pose)\n pack_mover=PackRotamersMover(self.score_class.score)\n pack_mover.task_factory(tf)\n pack_mover.score_function(self.score_class.score)\n\n self.run_protocol(pack_mover)",
"def rotation_ellipticity(self):\n if self.sigma_x > self.sigma_y:\n temp_rotation = self.__rotation % math.pi\n else:\n temp_rotation = (self.__rotation+(math.pi/2)) % math.pi\n return(temp_rotation)",
"def optimal_rotation_matrix(source, target, allow_mirror=False):\n correlation = np.dot(target.points.T, source.points)\n U, D, Vt = np.linalg.svd(correlation)\n R = np.dot(U, Vt)\n\n if not allow_mirror:\n # d = sgn(det(V * Ut))\n d = np.sign(np.linalg.det(R))\n if d < 0:\n E = np.eye(U.shape[0])\n E[-1, -1] = d\n # R = U * E * Vt, E = [[1, 0, 0], [0, 1, 0], [0, 0, d]] for 2D\n R = np.dot(U, np.dot(E, Vt))\n return R",
"def getRotationForRANSACPlane(self, maxstep, xyz, idx, justSVD=False):\n #first, apply RANSAC to fit a plane\n t = self.conf.RANSAC_fitplane_tfun(maxstep)\n d = self.conf.RANSAC_fitplane_planeratio*self.conf.RANSAC_fitplane_saferatio\n fun1 = r.fitPlaneSVD\n fun2 = r.errorPointsToPlane\n #self.conf.RANSAC_fitplane_debug=True\n if justSVD:\n bestplane = r.fitPlaneSVD(xyz)\n else:\n try:\n result = ransac.ransac(xyz, fun1,\n fun2, xyz.shape[1]+1, self.conf.RANSAC_fitplane_k, t, d, \n self.conf.RANSAC_fitplane_debug, True)\n except ValueError as ev:\n return (False, 'RANSAC fitplane error: '+ev.message)\n if result is None:\n self.log('\\n######################################\\nWARNING: It was not possible to fit a plane to the heightmap.\\nEither the heighmap was too rugged to fit a plane, or the parameters for the RANSAC algorithm were too tight\\nA plane will be fit to the whole heightmap, but this will degrade the \"planarness\" of the heightmaps,\\nand phase correlation, if used, may not work or lead to deceptive results.\\n######################################\\n')\n bestplane = r.fitPlaneSVD(xyz)\n else:\n bestplane, ransac_matches = result\n \n# bestplane = r.fitPlaneSVD(xyz)\n \n if self.conf.debugXYZ and (idx>=0):\n minx,miny,maxx,maxy = n.vstack((xyz[:,0:2].min(axis=0), xyz[:,0:2].max(axis=0))).flatten()\n rectangle = n.array([[minx,miny,0], [maxx,miny,0], [maxx,maxy,0], [minx,maxy,0]])\n for i in xrange(len(rectangle)):\n rectangle[i,2] = -(rectangle[i,0]*bestplane[0] + rectangle[i,1]*bestplane[1] + bestplane[3])/bestplane[2]\n w.writePLYPointsAndPolygons(self.conf.debugSavePath+(\"debug.FITTEDPLANE.%03d.ply\"%idx), n.vstack((rectangle, xyz)), [range(4)])\n \n# dists = r.distancePointsToPlane(xyz, bestplane)\n# r.showPointsAndPlane(xyz, 100, bestplane, values=dists, vmin=-10, vmax=10)\n \n #get a rotation matrix to rotate the fit plane onto the XY plane\n planeNormal = bestplane[:3]\n if planeNormal[2]<0:\n #if the plane is upside down, correct it!\n planeNormal = -planeNormal\n planeNormal /= n.linalg.norm(planeNormal)\n# self.log('best plane: %s\\n normal: %s\\n' % (str(bestplane), str(bestplane[0:3]/n.linalg.norm(bestplane[0:3]))))\n self.log('planeNormal: %s\\n' % str(planeNormal))\n R = r.rotateVectorToVector(planeNormal, n.array([0,0,1]))\n return (True, R)",
"def test_opt(self):\n coords = load_coords(['1ake', '4ake'])\n\n A = np.dot(coords[0].T,coords[1])\n R = fit(*coords)[0]\n\n func = spin.NearestUnitQuaternion(A)\n q_opt = func.optimum().dofs\n q_opt2 = spin.NearestRotation(A, spin.Quaternion()).optimum().dofs\n \n ## constrained optimization\n\n constraint = [{'type': 'eq', 'fun': lambda q : np.dot(q,q) - 1}]\n\n best = -1e308, None\n\n for n_trials in range(10):\n\n q_start = spin.Quaternion.random() \n result = opt.minimize(lambda q: -func(q), q_start, constraints=constraint)\n q_best = result['x'] * np.sign(result['x'][0])\n if abs(constraint[0]['fun'](q_best)) < 1e-10 and func(q_best) > best[0]:\n best = func(q_best), q_best\n\n _, q_best = best\n\n print(make_title('finding nearest rotation matrix / unit quaternion'))\n print(np.round(q_opt, 5))\n print(np.round(q_best, 5))\n print(np.round(q_opt2, 5))\n\n tol = 1e-5\n self.assertTrue(np.linalg.norm(q_opt - q_best) < tol)\n self.assertTrue(np.linalg.norm(q_opt - q_opt2) < tol)",
"def changerOrientationLatitude(self):\n\n # |valeur| ne doit pas etre superieur changementOrientationMax\n # et ne doit pas depasser les limites de l'orientation maximum\n if abs(self.deltaLatitude) > self.orientationMax:\n raise ValueError(\"Changement trop important\")\n elif self.rotMaxLat-self.deltaLatitude >= 0:\n if self.deltaLatitude + self.changementOrientationMax < self.rotMaxLat:\n self.deltaLatitude += self.changementOrientationMax\n else:\n self.deltaLatitude += self.changementOrientationMax - ((self.deltaLatitude + self.changementOrientationMax) - self.rotMaxLat)\n else:\n if self.deltaLatitude - self.changementOrientationMax > self.rotMaxLat:\n self.deltaLatitude = self.deltaLatitude - self.changementOrientationMax\n else:\n self.deltaLatitude = self.deltaLatitude - (self.changementOrientationMax - (-(self.deltaLatitude - self.changementOrientationMax) + self.rotMaxLat))\n self.calculPointageCamera()",
"def findRotation(a1,b1,c1,a2,b2,c2):\n translation = a2-a1 \n d1 = b1-a1\n d2 = b2-a2\n e1 = c1-a1\n e2 = c2-a2\n f1 = numpy.cross(d1,e1)\n f2 = numpy.cross(d2,e2)\n nd1 = numpy.linalg.norm(d1)\n nd2 = numpy.linalg.norm(d2)\n ne1 = numpy.linalg.norm(e1)\n ne2 = numpy.linalg.norm(e2)\n nf1 = numpy.linalg.norm(f1)\n nf2 = numpy.linalg.norm(f2)\n assert abs(nd2-nd1)+abs(ne2-ne1)+abs(nf2-nf1) < 1.0e-10, 'ERROR: the inputted vectors do no represent srigid body rotation %f, %f, %f' % (abs(nd2-nd1),abs(ne2-ne1),abs(nf2-nf1))\n d1 /= nd1\n d2 /= nd2\n e1 /= ne1\n e2 /= ne2\n f1 /= nf1\n f2 /= nf2\n g1 = numpy.vstack([d1,e1,f1]).T \n g2 = numpy.vstack([d2,e2,f2]).T \n # want to solve for M: g2 = M g1\n # M = g2*inv(g1)\n M = numpy.dot(g2,numpy.linalg.inv(g1))\n # we have a matrix of the rotation\n eigval,eigvec = numpy.linalg.eig(M) \n eigvalreal = numpy.real(eigval)\n index = 0\n for i in range(len(eigval)):\n if abs(eigval[i]-1.0) < 1.0e-10:\n index = i\n if abs(eigval[index]-1.0) > 1.0e-10:\n print 'ERROR: i did not find eigenvalue 1 for this matrix, i.e. it is not rotation '\n sys.exit(1)\n axis = numpy.real(eigvec[:,index])\n axis /= numpy.linalg.norm(axis) \n x = numpy.array([1.,0.,0.])\n y = numpy.array([0.,1.,0.])\n x = x - axis*numpy.dot(axis,x)\n y = y - axis*numpy.dot(axis,y)\n if numpy.linalg.norm(y) > numpy.linalg.norm(x):\n x = y \n xrot = numpy.dot(M,x)\n angle = findAngleOfRotation(axis,x,xrot)\n return translation,axis,angle"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write minimized fit to Molecule.fits
|
def write_minimize(self, Molecule):
Molecule.fits.append(self.best)
return Molecule.fits
|
[
"def write_fits(self, filename, moctool=''):\n datafile = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), 'data', 'MOC.fits')\n hdulist = fits.open(datafile)\n cols = fits.Column(name='NPIX', array=self._uniq(), format='1K')\n tbhdu = fits.BinTableHDU.from_columns([cols])\n hdulist[1] = tbhdu\n hdulist[1].header['PIXTYPE'] = ('HEALPIX ', 'HEALPix magic code')\n hdulist[1].header['ORDERING'] = ('NUNIQ ', 'NUNIQ coding method')\n hdulist[1].header['COORDSYS'] = ('C ', 'ICRS reference frame')\n hdulist[1].header['MOCORDER'] = (\n self.maxdepth, 'MOC resolution (best order)')\n hdulist[1].header['MOCTOOL'] = (moctool, 'Name of the MOC generator')\n hdulist[1].header['MOCTYPE'] = (\n 'CATALOG', 'Source type (IMAGE or CATALOG)')\n hdulist[1].header['MOCID'] = (' ', 'Identifier of the collection')\n hdulist[1].header['ORIGIN'] = (' ', 'MOC origin')\n time = datetime.datetime.utcnow()\n hdulist[1].header['DATE'] = (datetime.datetime.strftime(\n time, format=\"%Y-%m-%dT%H:%m:%SZ\"), 'MOC creation date')\n hdulist.writeto(filename, overwrite=True)\n return",
"def SaveSrcFreeFits(self):\n # run the command\n CreateSrcFreeMap(\n idnt_map = os.path.join(self.path2outdir, self.full_root_name + \".identical.fits\"),\n segm_map = os.path.join(self.path2outdir, self.full_root_name + \".segmentation.fits\"),\n bckg_map = os.path.join(self.path2outdir, self.full_root_name + \".background.fits\"),\n bckg_rms_map = os.path.join(self.path2outdir, self.full_root_name + \".background_rms.fits\"),\n objc_map = os.path.join(self.path2outdir, self.full_root_name + \".objects.fits\"),\n path2out_map = os.path.join(self.path2outdir, self.full_root_name + \".identical.srcfree.fits\"),\n )\n # diagnostic\n print\n print \"#\", \"Source-free image:\",\n print os.path.join(self.path2outdir, self.full_root_name + \".identical.srcfree.fits\")\n print",
"def save_file(self):\n self.data.write(\"DataFile.fits\",frames=[self.data.framename],clobber=True)",
"def fit_image(self):\n self.params = self.all_params['Fit 0']\n self.fit_results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n #report_fit(self.fit_results)\n sel.fparams = self.fit_results.params",
"def inspect_fits(data, fit, grid, prompt = '', outfile = 'out.csv', par_summary = True, **kwargs):\n ndata = len(data); n_models = len(fit[0]['modelindex_o'])\n distscale = (float(grid['o'].meta['DISTKPC']) / data['DKPC'])**2\n k = np.nonzero(['FILT_' in k for k in grid['o'].meta.keys()])[0]\n filternames = [f.split(',')[0].replace('(', '') for f in np.array(list(grid['o'].meta.values()))[k]]\n lpivot = np.array([float(f.split(',')[1].replace(')', '')) for f in np.array(list(grid['o'].meta.values()))[k]])\n\n plt = setPlotParams()\n plt.figure(figsize = (12, 12))\n color = {'o': 'blue', 'c': 'red'}\n xlim = [.1, 100]\n for i in range(ndata):\n ylim = np.nanmax(data[i]['FLUX'])\n chemtype = fit[i]['chemtype']\n modelindex = 'modelindex_' + chemtype\n scale = 'scale_' + chemtype\n # text = [r'$\\chi^2 = {}$'.format(np.round(fit[i]['chisq_' + chemtype][0], decimals = 1)), \\\n # r'$\\dot{M}_{\\rm d}/{\\rm M}_\\odot~{\\rm yr}^{-1} = {:0.1e}$'.format(fit[i]['DPR_' + chemtype]), \\\n # r'$L/{\\rm L}_\\odot = {:0.2e}$'.format(fit[i]['Lum_' + chemtype])]\n #Wrapper to ignore UserWarnings about converting Masked values to Nan.\n warnings.filterwarnings('ignore')\n title = 'ID = ' + str(fit[i]['ID']) + ', chemtype = ' + chemtype\n xscale = 'log'; yscale = 'log'\n xlabel = r'$\\lambda (\\mu$' + 'm)'; ylabel = r'$F_{\\nu}$' + '(Jy)'\n if par_summary:\n fig, (a0, a1) = plt.subplots(2, 1, gridspec_kw = {'height_ratios': [3, 1]}, constrained_layout = True)\n a0.set_title(title)\n a0.set_xscale(xscale); a0.set_yscale(xscale)\n a0.set_xlabel(xlabel); a0.set_ylabel(ylabel)\n _ = a0.set_xlim(xlim)\n _ = a0.set_ylim(1e-5 * ylim, 1.2 * ylim)\n else:\n a0 = plt.copy()\n a0.title(title)\n a0.xscale(xscale); a0.yscale(xscale)\n a0.xlabel(xlabel); a0.ylabel(ylabel)\n _ = a0.xlim(xlim)\n _ = a0.ylim(1e-5 * ylim, 1.2 * ylim)\n for j in range(n_models):\n _ = a0.plot(grid[chemtype][fit[modelindex][i, 0]]['Lspec'], \\\n grid[chemtype][fit[modelindex][i, j]]['Fspec'] * fit[scale][i, j] * distscale[i], color = 'grey', alpha = 0.5)\n #Best fit model\n _ = a0.plot(grid[chemtype][fit[modelindex][i, 0]]['Lspec'], \\\n grid[chemtype][fit[modelindex][i, 0]]['Fspec'] * fit[scale][i, 0] * distscale[i], color = color[chemtype])\n #Alternate best fit models from kwargs\n for kw in kwargs:\n pass\n #Overlay data\n _ = a0.plot(lpivot[data[i]['BANDMAP']], data[i]['FLUX'], 'ko', linestyle = '')\n _ = a0.errorbar(lpivot[data[i]['BANDMAP']], data[i]['FLUX'], fmt = 'ko', yerr = data[i]['DFLUX'], linestyle = '')\n #Overlay text\n loc = [0.2, ylim * 1.1]\n # for i in range(len(text)):\n # a0.text(loc[0], loc[1] / (i * 0.1 + 1), text[i])\n if par_summary:\n gramsfit.par_summary(a1, data[i], grid, fit[i], n_models = n_models)\n #fig.tight_layout()\n fig.show()\n else:\n plt.show()\n pass",
"def write_mask(self, file_name, format=\"fits\"):\n mask = np.short(self.to_mask())\n if format == 'fits':\n pyfits.writeto(file_name, mask, clobber=True)\n else:\n raise AttributeError(\"format not supported: %s\" % format)",
"def write(self, fits, extname):\n # First write the basic kwargs that works for all Outliers classes\n outliers_type = self.__class__.__name__\n write_kwargs(fits, extname, dict(self.kwargs, type=outliers_type))\n\n # Now do any class-specific steps.\n self._finish_write(fits, extname)",
"def save_fits(self, filename, header=\"\"):\n\n def _fits_to_disk(hdr, filename):\n \"\"\"Writes the FITS file to disk, with header.\n\n Args:\n hdr (fits.header.Header): FITS header.\n filename (str): Path of FITS file to be saved.\n \"\"\"\n hdr['DATE'] = time.strftime(\"%Y-%m-%dT%H:%m:%S\")\n hdr['SOFTWARE'] = \"pypahdb\"\n hdr['SOFT_VER'] = pypahdb.__version__\n hdr['COMMENT'] = \"This file contains results from a pypahdb fit\"\n hdr['COMMENT'] = \"Visit https://github.com/pahdb/pypahdb/ \" \\\n \"for more information on pypahdb\"\n hdr['COMMENT'] = \"The 1st plane contains the ionized fraction\"\n hdr['COMMENT'] = \"The 2nd plane contains the large fraction\"\n hdr['COMMENT'] = \"The 3rd plane contains the norm\"\n\n # write results to fits-file\n hdu = fits.PrimaryHDU(np.stack((self.ionized_fraction.value,\n self.large_fraction.value,\n self.norm.value), axis=0),\n header=hdr)\n hdu.writeto(filename, overwrite=True, output_verify='fix')\n\n return\n\n # save results to fits\n if isinstance(header, fits.header.Header):\n # should probably clean up the header\n # i.e., extract certain keywords only\n hdr = copy.deepcopy(header)\n else:\n hdr = fits.Header()\n\n _fits_to_disk(hdr, filename)\n\n return",
"def write_fit(self, output):\n fit = output.create_group('FitParams')\n fit.write_string('fit_format', self.__class__.__name__)\n fit.write_string_array('fit_parameter_names', self.fit_names)\n fit.write_string_array('fit_parameter_latex', self.fit_latex)\n fit.write_array('fit_boundary_low', np.array(\n [x[0] for x in self.fit_boundaries]))\n fit.write_array('fit_boundary_high', np.array(\n [x[1] for x in self.fit_boundaries]))\n\n # This is the last sampled value ... should not be recorded to avoid confusion.\n # fit.write_list('fit_parameter_values',self.fit_values)\n # fit.write_list('fit_parameter_values_nomode',self.fit_values_nomode)\n return output",
"def write_mask(self, file_name, format=\"fits\"):\n mask = np.short(self.to_mask())\n if format == 'fits':\n from astropy.io import fits\n try:\n fits.writeto(file_name, mask, overwrite=True)\n except TypeError:\n fits.writeto(file_name, mask, clobber=True)\n else:\n raise AttributeError(\"format not supported: %s\" % format)",
"def new_fits(outfile, **kwargs):\n # Fake data\n sci_data = numpy.arange(10000, dtype='float').reshape(100,100)\n err_data = numpy.sqrt(sci_data) # Poisson error\n dq_data = numpy.zeros(sci_data.shape, dtype='int16') # No bad pixel\n\n # Create individual extensions\n hdu_hdr = pyfits.PrimaryHDU()\n hdu_sci = pyfits.ImageHDU(sci_data)\n hdu_err = pyfits.ImageHDU(err_data)\n hdu_dq = pyfits.ImageHDU(dq_data)\n\n # Modify headers\n \n hdu_hdr.header['FILENAME'] = outfile\n hdu_hdr.header['NEXTEND'] = 3\n \n hdu_sci.header['BUNIT'] = 'COUNTS'\n hdu_sci.header['EXTNAME'] = 'SCI'\n hdu_sci.header['EXTVER'] = 1\n\n hdu_err.header['BUNIT'] = 'COUNTS'\n hdu_err.header['EXTNAME'] = 'ERR'\n hdu_err.header['EXTVER'] = 1\n\n hdu_dq.header['BUNIT'] = 'UNITLESS'\n hdu_dq.header['EXTNAME'] = 'DQ'\n hdu_dq.header['EXTVER'] = 1\n\n # Create multi-extension FITS\n hduList = pyfits.HDUList([hdu_hdr])\n hduList.append(hdu_sci)\n hduList.append(hdu_err)\n hduList.append(hdu_dq)\n\n # Write to file\n hduList.writeto(outfile, **kwargs)",
"def to_fits(self, filename, **kwargs):\n kwargs['flux_col'] = 'THROUGHPUT'\n kwargs['flux_unit'] = units.THROUGHPUT\n\n # There are some standard keywords that should be added\n # to the extension header.\n bkeys = {'expr': (str(self), 'synphot expression'),\n 'tdisp1': 'G15.7',\n 'tdisp2': 'G15.7'}\n\n if 'ext_header' in kwargs:\n kwargs['ext_header'].update(bkeys)\n else:\n kwargs['ext_header'] = bkeys\n\n specio.write_fits_spec(filename, self.wave, self.thru, **kwargs)",
"def save_full_pickle(self, verbose=True):\n try:\n import cPickle as pickle\n except:\n # Python 3\n import pickle\n\n root = self.grism_file.split('_flt.fits')[0].split('_cmb.fits')[0]\n root = root.split('_flc.fits')[0].split('_rate.fits')[0]\n root = root.split('_elec.fits')[0]\n \n if root == self.grism_file:\n # unexpected extension, so just insert before '.fits'\n root = self.grism_file.split('.fits')[0]\n \n hdu = pyfits.HDUList([pyfits.PrimaryHDU()])\n \n # Remove dummy extensions if REF found\n skip_direct_extensions = []\n if 'REF' in self.direct.data:\n if self.direct.data['REF'] is not None:\n skip_direct_extensions = ['SCI','ERR','DQ']\n \n for key in self.direct.data.keys():\n if key in skip_direct_extensions:\n hdu.append(pyfits.ImageHDU(data=None,\n header=self.direct.header,\n name='D'+key))\n else:\n hdu.append(pyfits.ImageHDU(data=self.direct.data[key],\n header=self.direct.header,\n name='D'+key))\n\n for key in self.grism.data.keys():\n hdu.append(pyfits.ImageHDU(data=self.grism.data[key],\n header=self.grism.header,\n name='G'+key))\n\n hdu.append(pyfits.ImageHDU(data=self.seg,\n header=self.grism.header,\n name='SEG'))\n\n hdu.append(pyfits.ImageHDU(data=self.model,\n header=self.grism.header,\n name='MODEL'))\n\n hdu.writeto('{0}.{1:02d}.GrismFLT.fits'.format(root, self.grism.sci_extn), overwrite=True, output_verify='fix')\n\n # zero out large data objects\n self.direct.data = self.grism.data = self.seg = self.model = None\n\n fp = open('{0}.{1:02d}.GrismFLT.pkl'.format(root, \n self.grism.sci_extn), 'wb')\n pickle.dump(self, fp)\n fp.close()\n\n self.save_wcs(overwrite=True, verbose=False)",
"def fit_spectrum(self, spectrum=None):\n\n if not spectrum:\n spectrum = self.path + self.spectrum\n \n f = open('xspec_script.xcm','w')\n\n xspec_cmds = \"source /homes/borgii/pscholz/.xspec/write_out.tcl\\ndata \" + spectrum +\\\n \"\\n@/homes/borgii/pscholz/bin/swiftmonitor/default_fit.xcm\\nwrite_out \" + self.path +\\\n self.obsroot + \"_xspecfit.txt\\nplot ldata delchi\\nexit\"\n\n f.write(xspec_cmds)\n f.close()\n\n timed_execute('xspec - xspec_script.xcm')\n cmd = 'mv pgplot.ps ' + self.path + self.obsroot + '_xspecfit.ps'\n timed_execute(cmd)\n timed_execute('rm xspec_script.xcm')\n\n cmd = 'gs -q -sDEVICE=png16m -r288 -dBATCH -dNOPAUSE -dFirstPage=1 -dLastPage=1 -sOutputFile=' +\\\n self.path + self.obsroot + '_xspecfit.png ' + self.path + self.obsroot + '_xspecfit.ps'\n timed_execute(cmd)\n cmd = 'convert %s -trim %s' % ( self.path + self.obsroot + '_xspecfit.png',self.path + self.obsroot + '_xspecfit.png' )\n timed_execute(cmd)\n\n self.spec_fit = self.obsroot + \"_xspecfit.txt\"",
"def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')",
"def fit(self, data, fit='quantiles'):\n if fit == 'MLE':\n self.setParamsMLE(data)\n self.setDistObj()\n isConverged = True # assume stats.fit will always return a distribution\n else:\n dataMoments = np.array([np.mean(data), np.std(data, ddof=1), moment(data, 3)])\n\n def objFunc(X):\n [self.shape, self.loc, self.scale] = X\n if self.fixedAtZero:\n self.loc = 0\n self.setDistObj()\n if fit == 'quantiles':\n obj = probPlotSqrErr(data, self, self.type, showPlots=False)[0]\n elif fit == 'MOM':\n distMoments = self.moments()\n weights = [1, 1,\n 0.1] # scale the influence of each moment # set last entry to remove skewness from the assessment\n # scale each moment error relative to the data moment value, but replace the data moment with a constant if it is close to zero\n obj = np.sum([abs(dataMoments[i] - distMoments[i]) / max(dataMoments[i], 1E-6) * weights[i] for i in\n range(\n self.nParams)]) # only use the number of moments needed to specify the distribution to match the data # np.sum((distMoments-dataMoments)**2) # np.sum([abs( (dataMoments[i]-distMoments[i])**(1/(i+1)) ) for i in range(3)]) #np.sum((dist.moments()-dataMoments)**2)\n return obj\n\n X = [self.shape, self.loc, self.scale]\n\n res = minimize(objFunc, X, method='SLSQP', options={'disp': True, 'maxiter': 600,\n 'ftol': 1e-8}) # , bounds=bnds, constraints=cons, # options={'maxiter': 500, 'gtol': 1e-6, 'disp': True}\n # method='SLSQP' 'TNC' 'L-BFGS-B' 'COBYLA' #\n # seems to ignore the constraint if bounds not included with method='SLSQP'\n isConverged = res.success\n if isConverged:\n [self.shape, self.loc, self.scale] = res.x\n else:\n [self.shape, self.loc, self.scale] = X # revert to previous values\n\n if self.fixedAtZero:\n self.loc = 0\n\n self.setDistObj()\n return isConverged",
"def write_to_fits(self, outfil, select=False, clobber=True, fill_val=0.):\n if self.nspec == 1:\n select = True\n\n # Flux\n if select:\n prihdu = fits.PrimaryHDU(self.data['flux'][self.select].filled(fill_val))\n #prihdu = fits.PrimaryHDU(self.data[self.select]['flux'])\n else:\n prihdu = fits.PrimaryHDU(self.data['flux'].filled(fill_val))\n hdu = fits.HDUList([prihdu])\n prihdu.name = 'FLUX'\n\n # Error (packing LowRedux style)\n if self.sig_is_set:\n if select:\n sighdu = fits.ImageHDU(self.data['sig'][self.select].filled(fill_val))\n else:\n sighdu = fits.ImageHDU(self.data['sig'].filled(fill_val))\n sighdu.name = 'ERROR'\n hdu.append(sighdu)\n\n # Wavelength\n if select:\n wvhdu = fits.ImageHDU(self.data['wave'][self.select].filled(fill_val))\n else:\n wvhdu = fits.ImageHDU(self.data['wave'].filled(fill_val))\n wvhdu.name = 'WAVELENGTH'\n hdu.append(wvhdu)\n\n if self.co_is_set:\n if select:\n cohdu = fits.ImageHDU(self.data['co'][self.select].filled(fill_val))\n else:\n cohdu = fits.ImageHDU(self.data['co'].filled(fill_val))\n cohdu.name = 'CONTINUUM'\n hdu.append(cohdu)\n\n # Use the header of the selected spectrum\n if self.header is not None:\n hdukeys = list(prihdu.header.keys())\n # Append ones to avoid\n hdukeys = hdukeys + ['BUNIT', 'COMMENT', '', 'NAXIS1', 'NAXIS2', 'HISTORY']\n for key in self.header.keys():\n # Use new ones\n if key in hdukeys:\n continue\n # Update unused ones\n try:\n prihdu.header[key] = self.header[key]\n except ValueError:\n raise ValueError('l.spectra.utils: Bad header key card')\n # History\n if 'HISTORY' in self.header.keys():\n # Strip \\n\n tmp = str(self.header['HISTORY']).replace('\\n', ' ')\n try:\n prihdu.header.add_history(str(tmp))\n except ValueError:\n import pdb\n pdb.set_trace()\n\n #\n if self.meta is not None and len(self.meta) > 0:\n prihdu.header['METADATA'] = meta_to_disk(self.meta)\n\n # Units, etc.\n prihdu.header['NSPEC'] = self.nspec\n prihdu.header['NPIX'] = self.npix\n units = self.units.copy()\n d = ltu.jsonify(units)\n # import pdb; pdb.set_trace()\n prihdu.header['UNITS'] = json.dumps(d)\n\n hdu.writeto(outfil, overwrite=clobber)\n print('Wrote spectrum to {:s}'.format(outfil))",
"def multiple_fits(self):\n self.subtract_background()\n k = 1\n for key in self.fit_names:\n #get params for this fit\n #with new lmfit might not need to do this\n self.params = copy.deepcopy(self.all_params[key])\n\n results = minimize(self.fit_dict[self.fit_type], self.params,\n args = ())\n self.params = results.params\n\n #then if k > num_fits copy result values to params dictionary and fit\n if k < self.num_fits:\n #update parameters\n next_key = self.fit_names[k]\n for i in self.all_params[next_key].keys():\n self.all_params[next_key][i].value = self.params[i].value\n\n #move to next iteration\n k = k + 1\n\n self.fit_results = results",
"def save_maps(self, dirname):\n try:\n os.makedirs(dirname)\n for f,m in self:\n filename = dirname+\"%dMHz.fits\" % int(1e3*f)\n m.to_fits(filename)\n except (IOError, OSError) as e:\n if e.errno != e.EEXIST:\n raise SystemExit('{}: {}'.format(e.filename, e.strerror))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
__init__() should create an RAMSTKUser model.
|
def test_ramstkuser_create(test_common_dao):
_session = test_common_dao.RAMSTK_SESSION(
bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)
DUT = _session.query(RAMSTKUser).first()
assert isinstance(DUT, RAMSTKUser)
# Verify class attributes are properly initialized.
assert DUT.__tablename__ == 'ramstk_user'
assert DUT.user_id == 1
assert DUT.user_lname == 'Tester'
assert DUT.user_fname == 'Johnny'
assert DUT.user_email == 'tester.johnny@reliaqual.com'
assert DUT.user_phone == '+1.269.867.5309'
assert DUT.user_group_id == '1'
|
[
"def _make_user_model(self):\n class User(UserBase):\n \"\"\"\n A User as defined by the response from Keystone.\n\n Note: This class is dynamically generated by :class:`FlaskKeystone`\n from the :class:`flask_keystone.UserBase` class.\n\n :param request: The incoming `flask.Request` object, after being\n handled by the :mod:`keystonemiddleware`\n :returns: :class:`flask_keystone.UserBase`\n \"\"\"\n pass\n\n User.generate_has_role_function(self.roles)\n User.generate_is_role_functions(self.roles)\n\n return User",
"def _create_user(self, request):\n # Should be implemented by subclass depending on data source for user\n raise SystemError(\"This method should not be called\")",
"def __init__(self, *args, **kwargs):\r\n record.Record.__init__(self, *args, **kwargs)\r\n self.key = UserKey()",
"def create_model(self, data):\n model = User()\n model.set_id(data[0])\n model.set_name(data[1])\n model.set_password(data[2])\n return model",
"def initialize_user():\n flask.g.user = readit.User(flask.session.get('session_key', None))\n flask.g.user.user_id = flask.session.get('user_id', None)",
"def __init__(self, user):\n super(UserItemData, self).__init__()\n self._user = user",
"def bootstrap_model(clean=False, user=None):\n create_tables(clean)\n if user:\n create_default_user(user)",
"def setUp(self):\n self.u = User.objects.create_user(\"Mittens\", \"mittensthekitten@gmail.com\", \"meow\")",
"def __init__(self):\n self._user_list = []\n self._current_user = None",
"def make_new_user(self,age,use_set_point):\n return User(age,use_set_point)",
"def test_user_base(self):\n xml = build_bonita_user_xml('user uuid', 'user pass', 'user name')\n\n user = BonitaUser._instanciate_from_xml(xml)\n\n assert isinstance(user, BonitaUser)\n assert user.is_modified is False\n assert user.uuid == u'user uuid'\n assert user.username == u'user name'\n assert user.password == u'user pass'",
"def __init__(self, original_user):\n\n super().__init__()\n self.original_user = original_user",
"def __init__(self, first_name, last_name, email, password):\n super().__init__(first_name, last_name, email, password)\n self.admin_privileges = Privileges()",
"async def initalize(cls, database: Database) -> BCJAIapi:\n\n try:\n #make a semaphore and a KDTree per user\n user_manager = {user_id: {\n 'kdtree': BCJAIapi._create_tree(\n await database.fetch_all(user_id, err=False)),\n 'lock': asyncio.BoundedSemaphore(1)\n }\n for user_id in await database.fetch_users()}\n except NotFoundError: #No users available\n user_manager = {}\n logger.info('Initialized BCJAIapi with user_manager: %s', user_manager)\n return cls(user_manager,database)",
"def _create_user(self, name, rf, cpf, rg, email, password, **extra_fields):\n if not rf:\n raise ValueError('The given RF must be set')\n email = self.normalize_email(email)\n name = self.model.normalize_username(name)\n user = self.model(name=name, rf=rf, cpf=cpf, rg=rg, email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user",
"def initalize_user_tables(db):\n \n from shotglass2.users.models import init_db as users_init_db \n users_init_db(db)",
"def create_user_to_test_with(self):\n user_object = User.objects.create_user(username='roy1',\n first_name='Roy',\n last_name='Hanley',\n email='rhanley8@gmail.com',\n password='small fat gibbon')\n user_object.save()\n user_extended_object = UserExtended(user=user_object)\n user_extended_object.save()\n return",
"def __init__(self, model, lexicon):\n if os.path.exists(model):\n self.model = pickle.load(open(model, \"rb\"))\n else:\n self.model = self.create_model(model, lexicon)",
"def __init__(self, user_data):\n\n self._data = user_data\n self._timestamp = datetime.now()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use this function to determine the step size between two successive points in a profile. Starting from the last estimate of the profile parameters, it determines the optimal step size between the last estimate and the next one. The step is null for all parameters except the one whose profile is being computed, and is optimal when the relative likelihood increase it induces is closest to a target value.
|
def init_step_size(parameters,parameter_index,bounds,likelihood_function,likelihood_args,d_par_init=0.1,d_likelihood=0.1,max_step=3,alpha=0.95):
likelihood = likelihood_function(parameters, *likelihood_args)
df = parameters.shape[0] #number of parameters = number of degrees of freedom
chi2_threshold = scipy.stats.chi2.ppf(alpha,df) #likelihood-threshold of the confidence interval
#initial guess for the step
param_tmp = np.copy(parameters)
d_par=d_par_init
param_tmp[parameter_index] = parameters[parameter_index] + d_par
#now we correct the initial guess if it is out of bonds.
lower_bound , upper_bound = bounds
if lower_bound==None:
lower_bound=-np.inf
if upper_bound==None:
upper_bound=np.inf
while param_tmp[parameter_index] > upper_bound or param_tmp[parameter_index] < lower_bound: #if the current step jumps out of the parameter's bounds, then we reduce it
print("Boundary reached")
d_par /= 2
param_tmp[parameter_index] = parameters[parameter_index] + d_par
print('New value: %.4g'%param_tmp[parameter_index])
d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood
step_evaluations = 0 #number of evaluations of the step size
#if the step is too big we reduce it
if d_chi2 > chi2_threshold*d_likelihood:
while d_chi2 > chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:
d_par /= 2
param_tmp[parameter_index] = parameters[parameter_index] + d_par
d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood
step_evaluations += 1
#otherwise we increase it
else:
while d_chi2 < chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:
d_par *= 2
param_tmp[parameter_index] = parameters[parameter_index] + d_par
d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood
step_evaluations += 1
d_par /= 2 #this is in Raue's algorithm but I don't really get it. Apparently the last doubling step is too much.
return(d_par)
|
[
"def _stepSize(self, gradientHist=None, prevStepSize=None, recommend=None, **kwargs):\n # grad0 = gradientHist[-1][1]\n # grad1 = gradientHist[-2][1] if len(gradientHist) > 1 else None\n # FIXME try using the step directions instead\n step0 = prevStepSize[-1]['versor']\n if step0 is None:\n step0 = gradientHist[-1][1]\n step1 = prevStepSize[-2]['versor'] if len(prevStepSize) > 1 else None\n gainFactor = self._fractionalStepChange(step0, step1, recommend=recommend)\n # gainFactor = self._fractionalStepChange(grad0, grad1, recommend=recommend)\n stepSize = gainFactor * prevStepSize[-1]['magnitude']\n return stepSize",
"def GetStepSizeProb(a1, a2, beta, p):\n step_size = (a2-a1)\n if abs(step_size)>10: return 0\n up_prob = max([0.001,0.5*(1-beta*p*a1)])\n up_prob = min(up_prob, 0.999) # Maximum value is 0.99 (allow for minimal probability of contraction at small alleles)\n down_prob = 1-up_prob\n if step_size>0: dir_prob = up_prob\n else: dir_prob = down_prob\n step_prob = geom.pmf(abs(step_size), p)\n return dir_prob*step_prob",
"def get_stepsize(self) -> float:\n return self.m_stepsize",
"def get_step_size(data, params, meta, fit_par):\n nvisit = int(meta.nvisit)\n nfree_param = data.nfree_param\n step_size = []\n ii = 0\n for i in range(len(fit_par)):\n if ii == len(fit_par):\n break\n if fit_par['fixed'][ii].lower() == \"false\":\n if str(fit_par['tied'][ii]) == \"-1\":\n step_size.append(fit_par['step_size'][ii])\n ii = ii + 1\n else:\n for j in range(nvisit):\n step_size.append(fit_par['step_size'][ii])\n ii = ii + 1\n else:\n ii = ii + 1\n\n return np.array(step_size)",
"def step_size(self) -> Timedelta:\n assert self._step_size is not None, \"No step size provided\"\n return self._step_size",
"def _sample_step_size(self):\n step_size = np.random.choice(self.step_sizes, size=self.chains_num, p=self.step_probabilities)\n step_size = self._adjust_step_size(step_size)\n\n # apply step size seek during burn in\n if self.seek_step_sizes and not self._burned_in():\n lower, upper = .90, .99\n change = max(min(10. / self.burn_in, .0001), .01)\n change *= (1 - self._get_burn_in_ratio(.35))\n inc, dec = 1. + change, 1. - change\n\n acr = self.avg_acceptance_rate\n self.step_multiplier *= (acr < lower).astype(np.float32) * dec + (acr >= lower).astype(np.float32)\n self.step_multiplier *= (acr > upper) * inc + (acr <= upper).astype(np.float32)\n\n step_size *= self.step_multiplier\n\n if self._burned_in() and self.anneal_step_sizes:\n t = self.sample_number - self.burn_in\n gamma = .51\n base = .01 * self.burn_in\n multiplier = base ** gamma / ((base + t) ** gamma)\n\n step_size *= multiplier\n\n self._current_step_size_value = step_size",
"def set_StepSize(self,stepSize,startPoint,endPoint = inf):\n self.a = startPoint\n self.b = endPoint\n self.h = stepSize\n self.N = int((endPoint-startPoint)/stepSize) + 1",
"def get_step_size(total_items, batch_size):\n return int(np.ceil(total_items / batch_size))",
"def Compute_Profile(parameters,parameter_index,likelihood_function,likelihood_args,bounds,target_sample_size=100,max_sample_size=1000,d_par_init=0.002,max_step=10,number_initial_guess_samples=30,alpha=0.95,verbose_success=False,verbose_error=False):\n\n chi2 = likelihood_function(parameters, *likelihood_args)\n df = parameters.shape[0] # number of parameters of the model\n chi2_threshold = scipy.stats.chi2.ppf(alpha,df) #likelihood-threshold of the confidence interval\n\n #we store the coordinates of the optimum\n params_backup = np.copy(parameters)\n chi2_backup = chi2\n\n #we intialize the output, and start filling it out\n Chi2PL=np.array([chi2])\n Parameters=np.transpose(np.array([parameters]))\n\n d_likelihood = 1/target_sample_size #the number of steps should be the inverse of the stepwise relative likelihood increase (see Supp. Inf. of raue et al., Bioinfo., 2009 for more detail)\n\n #For decreasing values of the parameter:\n params = np.copy(parameters)\n i=0\n #for i in range(sample_size):\n while i<max_sample_size and chi2-chi2_backup < 1.1*chi2_threshold:\n print(\"Computing point #%i of the profile\"%i)\n d_par=init_step_size(params, parameter_index, bounds[parameter_index], likelihood_function, likelihood_args, - d_par_init*np.abs(parameters[parameter_index]), d_likelihood, max_step, alpha)\n params[parameter_index] += d_par\n\n opt=CER.Sample_Estimate(profile_likelihood, df-1, args=(parameter_index, params[parameter_index], likelihood_function, likelihood_args), bounds = bounds[:parameter_index]+bounds[(parameter_index+1):], nsamples = number_initial_guess_samples, full_output = True, verbose_success = verbose_success, verbose_error=verbose_error, lhs=False)\n\n #We update stuff\n params=np.insert(opt['parameters'],parameter_index,params[parameter_index])\n Parameters=np.insert(Parameters,0,params,axis=1)\n chi2=opt['error']\n Chi2PL = np.insert(Chi2PL, 0, chi2)\n i+=1\n\n #Resetting the original values of stuff\n params = np.copy(params_backup)\n chi2 = chi2_backup\n\n #For increasing values of the parameter:\n i=0\n while i<max_sample_size and chi2 - chi2_backup < 1.1*chi2_threshold:\n print(\"Computing point #%i of the profile\"%i)\n d_par=init_step_size(params, parameter_index, bounds[parameter_index], likelihood_function, likelihood_args, d_par_init*np.abs(parameters[parameter_index]), d_likelihood, max_step, alpha)\n params[parameter_index] += d_par\n\n opt=CER.Sample_Estimate(profile_likelihood, df-1, args=(parameter_index, params[parameter_index], likelihood_function, likelihood_args), bounds = bounds[:parameter_index]+bounds[(parameter_index+1):], nsamples = number_initial_guess_samples, full_output = True, verbose_success = verbose_success, verbose_error = verbose_error, lhs=False)\n\n #We update stuff\n params=np.insert(opt['parameters'],parameter_index,params[parameter_index])\n Parameters=np.append(Parameters,np.transpose(np.array([params])),axis=1)\n chi2 = opt['error']\n Chi2PL = np.append(Chi2PL, chi2)\n i+=1\n\n return({'Parameters': Parameters, 'Profile_Likelihood':Chi2PL})",
"def step(self, prevOpt, gradientHist=None, prevStepSize=None, recommend=None, **kwargs):\n stepSize = self._stepSize(gradientHist=gradientHist, prevStepSize=prevStepSize,\n recommend=recommend, **kwargs)\n direction = dict((var, 0) for var in self._optVars)\n for t in range(self._gradTerms):\n if len(gradientHist) <= t:\n break\n decay = np.exp(-t * self._termDecay)\n for var in direction:\n direction[var] -= gradientHist[-(t+1)][1][var] * decay\n # gradient = gradientHist[-1][1]\n # use gradient, prev point, and step size to choose new point\n newOpt = {}\n for var in self._optVars:\n newOpt[var] = prevOpt[var] + stepSize * direction[var]\n return newOpt, stepSize, None",
"def test_line_search_step_size_should_decrease(self):\n p1 = torch.tensor([0.1])\n p2 = torch.tensor([0.1])\n params = [p1, p2]\n optimizer = ConjugateGradientOptimizer(params, 0.01)\n p1_history = []\n p2_history = []\n loss = 0\n\n first_time = True\n\n def f_loss():\n nonlocal loss, first_time\n if first_time:\n first_time = False\n else:\n p1_history.append(p1.clone())\n p2_history.append(p2.clone())\n loss += 1\n\n return torch.tensor(loss)\n\n def f_constrint():\n return torch.tensor(0)\n\n descent_step = torch.tensor([0.05, 0.05])\n optimizer._backtracking_line_search(params, descent_step, f_loss,\n f_constrint)\n\n p1_steps = []\n p2_steps = []\n for i in range(len(p1_history) - 1):\n p1_steps.append(p1_history[i + 1] - p1_history[i])\n p2_steps.append(p2_history[i + 1] - p2_history[i])\n\n for i in range(len(p1_steps) - 1):\n assert p1_steps[i] > p1_steps[i + 1]\n assert p2_steps[i] > p2_steps[i + 1]",
"def step_size(self) -> Callable[[], Timedelta]:\n return lambda: self._manager.step_size",
"def _next_sample_size_pairwise(self, sub_audit: PairwiseAudit, sprob=0.9):\n # NOTE: Numerical issues arise when sample results disagree to an extreme extent with the reported margin.\n start = 10**1\n subsequent_round = len(self.rounds) > 0\n previous_round = 0\n if subsequent_round:\n winner_ballots = self.sample_ballots[sub_audit.sub_contest.reported_winner][-1]\n loser_ballots = self.sample_ballots[sub_audit.sub_contest.reported_loser][-1]\n previous_round = winner_ballots + loser_ballots\n init_upper_bound = self.get_upper_bound(previous_round + 1, start)\n else:\n init_upper_bound = start\n upper_bound = init_upper_bound\n while upper_bound < 10**7:\n if len(self.rounds) > 0:\n # Ensure upper bound is sufficiently large.\n if upper_bound == init_upper_bound:\n estimate = self.binary_search_estimate(previous_round + 1, upper_bound, sprob, sub_audit)\n else:\n estimate = self.binary_search_estimate(upper_bound // 10, upper_bound, sprob, sub_audit)\n else:\n if upper_bound == init_upper_bound:\n estimate = self.binary_search_estimate(1, upper_bound, sprob, sub_audit)\n else:\n estimate = self.binary_search_estimate(upper_bound // 10, upper_bound, sprob, sub_audit)\n if estimate[0] > 0:\n return estimate\n upper_bound *= 10\n return 0",
"def StepsPerInch(self) -> float:",
"def y_step_size(self):\n return (self.y_upper - self.y_lower) / self.ny",
"def step(self) -> float:\n self.step_count += 1\n if self.step_count > self.warmup and self.val < self.final_val:\n self.val += self.step_per_epoch\n self.val = min(self.val, self.final_val)\n return self.val",
"def geometric_progression_for_stepsize(x, update, dist, decision_function,\n current_iteration):\n epsilon = dist / np.sqrt(current_iteration)\n while True:\n updated = x + epsilon * update\n success = decision_function(updated[None])[0]\n if success:\n break\n else:\n epsilon = epsilon / 2.0\n\n return epsilon",
"def estimate_size(self) -> int:\n raise NotImplementedError",
"def GetDefaultInputStepSize(self) -> \"double const &\":\n return _itkParametricPathPython.itkParametricPath2_GetDefaultInputStepSize(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function computes the Profile Likelihood of a dynamic model with respect to one of its parameters. Starting at the bestfit parameter set, it tries to increase the log Likelihood of the model up to the identifiability threshold on each side of the optimum, up to a certain extent. If it doesn't reach the identifiability threshold during the authorized number of step size evaluations, it considers the parameter profile likelihood as unbounded on that side, and stops computing it.
|
def Compute_Profile(parameters,parameter_index,likelihood_function,likelihood_args,bounds,target_sample_size=100,max_sample_size=1000,d_par_init=0.002,max_step=10,number_initial_guess_samples=30,alpha=0.95,verbose_success=False,verbose_error=False):
chi2 = likelihood_function(parameters, *likelihood_args)
df = parameters.shape[0] # number of parameters of the model
chi2_threshold = scipy.stats.chi2.ppf(alpha,df) #likelihood-threshold of the confidence interval
#we store the coordinates of the optimum
params_backup = np.copy(parameters)
chi2_backup = chi2
#we intialize the output, and start filling it out
Chi2PL=np.array([chi2])
Parameters=np.transpose(np.array([parameters]))
d_likelihood = 1/target_sample_size #the number of steps should be the inverse of the stepwise relative likelihood increase (see Supp. Inf. of raue et al., Bioinfo., 2009 for more detail)
#For decreasing values of the parameter:
params = np.copy(parameters)
i=0
#for i in range(sample_size):
while i<max_sample_size and chi2-chi2_backup < 1.1*chi2_threshold:
print("Computing point #%i of the profile"%i)
d_par=init_step_size(params, parameter_index, bounds[parameter_index], likelihood_function, likelihood_args, - d_par_init*np.abs(parameters[parameter_index]), d_likelihood, max_step, alpha)
params[parameter_index] += d_par
opt=CER.Sample_Estimate(profile_likelihood, df-1, args=(parameter_index, params[parameter_index], likelihood_function, likelihood_args), bounds = bounds[:parameter_index]+bounds[(parameter_index+1):], nsamples = number_initial_guess_samples, full_output = True, verbose_success = verbose_success, verbose_error=verbose_error, lhs=False)
#We update stuff
params=np.insert(opt['parameters'],parameter_index,params[parameter_index])
Parameters=np.insert(Parameters,0,params,axis=1)
chi2=opt['error']
Chi2PL = np.insert(Chi2PL, 0, chi2)
i+=1
#Resetting the original values of stuff
params = np.copy(params_backup)
chi2 = chi2_backup
#For increasing values of the parameter:
i=0
while i<max_sample_size and chi2 - chi2_backup < 1.1*chi2_threshold:
print("Computing point #%i of the profile"%i)
d_par=init_step_size(params, parameter_index, bounds[parameter_index], likelihood_function, likelihood_args, d_par_init*np.abs(parameters[parameter_index]), d_likelihood, max_step, alpha)
params[parameter_index] += d_par
opt=CER.Sample_Estimate(profile_likelihood, df-1, args=(parameter_index, params[parameter_index], likelihood_function, likelihood_args), bounds = bounds[:parameter_index]+bounds[(parameter_index+1):], nsamples = number_initial_guess_samples, full_output = True, verbose_success = verbose_success, verbose_error = verbose_error, lhs=False)
#We update stuff
params=np.insert(opt['parameters'],parameter_index,params[parameter_index])
Parameters=np.append(Parameters,np.transpose(np.array([params])),axis=1)
chi2 = opt['error']
Chi2PL = np.append(Chi2PL, chi2)
i+=1
return({'Parameters': Parameters, 'Profile_Likelihood':Chi2PL})
|
[
"def paramLikelihoodProfile(i=0,j=0,drug_type=0,n=20):\n print('parameter=',i,'patient=',j,'drug type =',drug_type)\n if drug_type == 0:\n reps = 1\n cost_reps =[None]*reps\n pat_data = [None]*2\n tot_dose = pk.CPT11_tot_dose[0]\n pat_data[0] = pk.CPT11[0]\n pat_data[1] = pk.SN38[0]\n data_time = pk.CPT11_time[0]\n drug_name = '_cpt11'\n cost_function = cpt.non_phys_param_cost_single_both\n vol = pk.vol[0]\n args = (tot_dose,data_time,pat_data,vol)\n k0 = np.loadtxt('data/pat_param_drug'+drug_name+'.txt')[0]\n best = cost_function(k0,tot_dose,data_time,pat_data,vol)\n np.save('bestcost'+drug_name,best)\n bounds = [0, 1e+8]\n paramRange = [np.linspace(1.2e3,6e+3,n),np.linspace(0, 0.8e+4,n),np.linspace(3.5e4, 1.4e+5,n),np.linspace(1.6e4, 7e+4,n),np.linspace(0.7e3, 3e+3,n),np.linspace(3.5e4, 1.2e+5,n)]\n elif drug_type == 1:\n reps = 3\n cost_reps =[None]*reps\n tot_dose = pk.FU_tot_dose[0]\n pat_data = pk.FU[0]\n data_time = pk.FU_time[0]\n cost_function = fu.non_phys_param_cost_single\n drug_name = '_5fu'\n vol = pk.vol[0]\n args = (tot_dose,data_time,pat_data,vol)\n k0 = np.loadtxt('data/pat_param_drug'+drug_name+'.txt')[0]\n best = cost_function(k0,tot_dose,data_time,pat_data,vol)\n np.save('bestcost'+drug_name,best)\n bounds = [0, 1e+8]\n paramRange = [np.linspace(1.2e4,6e5,n),np.linspace(0,1.8e5,n),np.linspace(2,1.2e2,n)]\n elif drug_type == 2:\n reps = 1\n cost_reps =[None]*reps\n pat_data = [None]*2\n tot_dose = pk.LOHP_tot_dose[0]\n pat_data[0] = pk.LOHP_free[0]\n pat_data[1] = pk.LOHP_total[0]\n data_time = pk.LOHP_time[0]\n cost_function = lohp.non_phys_param_cost_single_lohp\n drug_name = '_lohp'\n vol = pk.vol[0]\n args = (tot_dose,data_time,pat_data,vol)\n k0 = np.loadtxt('data/pat_param_drug'+drug_name+'.txt')[0]\n best = cost_function(k0,tot_dose,data_time,pat_data,vol)\n np.save('bestcost'+drug_name,best)\n bounds = [0, 1e+8]\n paramRange = [np.linspace(0.9e4,3e4,n),np.linspace(1.1e4,5.1e04,n),np.linspace(4e03,2e04,n),np.linspace(8e03,1.6e04,n),np.linspace(3e02,7e2,n)]\n\n\n cost = [0]*(n+1)\n pRange = np.sort(np.append(paramRange[i],k0[i]))\n for l in range(n+1):\n kiNew = pRange[l]\n k1 = k0\n for rep in range(reps):\n es = cma.CMAEvolutionStrategy(k1,1e04,{'bounds': bounds,'verb_log': 0,'verb_disp': 0,'tolfun':1e-02,'fixed_variables':{i:kiNew}})\n es.optimize(cost_function,args=args)\n# k1 = es.result[0]\n cost_reps[rep] = es.result[1]\n cost[l] = np.min(cost_reps)\n np.savetxt('param_'+str(i)+'patient_'+str(j)+'_likelihoodprofile'+drug_name+'.txt',cost)\n np.savetxt('param_'+str(i)+'patient_'+str(j)+'_profilerange'+drug_name+'.txt',pRange)\n return cost, pRange, k0[i], best",
"def _get_likelihood_profile_function(self, optimal_parameters, fixed):\n optimal_parameters_without_fixed = [p for i, p in enumerate(optimal_parameters) if i not in fixed]\n insert_fixed_parameters = lambda p, f: functools.reduce(lambda l, i: l[:i[1]] + [f[i[0]]] + l[i[1]:], enumerate(fixed), list(p))\n if self.ugly_and_fast:\n return lambda x, data, weights, ne: self.loss(insert_fixed_parameters(optimal_parameters_without_fixed, x), data, weights, ne, self.mapping)\n else:\n return lambda x, data, weights, ne: self._fit(optimal_parameters_without_fixed, data, weights, ne, lambda p: self.mapping(np.array(insert_fixed_parameters(p, x)))).fun",
"def ln_likelihood(par, per_obs=False):\n # if we are outside the allowable parameter ranges, return 0\n # likelihood.\n #import pdb; pdb.set_trace()\n for i,p in enumerate(par):\n if not (P['min'][i] < p < P['max'][i]):\n return -np.inf\n # force the cloud thickness to be < 1 Mpc\n coord = par[IND_PAR['NHI']], par[IND_PAR['nH']], par[IND_PAR['Z']], \\\n par[IND_PAR['aUV']]\n logNH = Ncloudy['NH'](coord) \n if (logNH - par[IND_PAR['nH']]) > log10_cm_per_Mpc:\n return -np.inf\n \n \n Nmodel = model(par)\n\n lnprobtot = np.zeros(np.asarray(par[0]).shape)\n\n for pname in priors:\n if pname.startswith('min ') or pname.startswith('max '):\n continue\n # only deals with two-sided gaussian priors at the moment\n pval, siglo, sighi = priors[pname]\n p = ln_pdf_siglohi(par[IND_PAR[pname]], pval, siglo, sighi)\n lnprobtot += p\n\n allprob = []\n for i,tr in enumerate(trans):\n Nobs, siglo, sighi = obs[tr]\n if siglo == 0:\n #print(tr, 'lower limit')\n p = ln_pdf_lolim(Nmodel[i], Nobs, SIG_LIMIT)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n elif sighi == 0:\n #print(tr, 'upper limit')\n p = ln_pdf_uplim(Nmodel[i], Nobs, SIG_LIMIT)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n else:\n #print(tr)\n siglo = max(siglo, MIN_SIG)\n sighi = max(sighi, MIN_SIG)\n p = ln_pdf_siglohi(Nmodel[i], Nobs, siglo, sighi)\n lnprobtot += p\n if per_obs:\n allprob.append(p)\n\n if per_obs:\n return lnprobtot, allprob\n else:\n return lnprobtot",
"def log_likelihood_function(self, instance):\r\n\r\n \"\"\"\r\n In the previous tutorial the instance was a single `Gaussian` profile, meaning we could create the model data \r\n using the line:\r\n\r\n model_data = instance.gaussian.model_data_1d_via_xvalues_from(xvalues=self.data.xvalues)\r\n\r\n In this tutorial our instance is comprised of multiple 1D Gaussians, because we will use a `Collection` to\r\n compose the model:\r\n\r\n model = Collection(gaussian_0=Gaussian, gaussian_1=Gaussian).\r\n\r\n By using a Collection, this means the instance parameter input into the fit function is a\r\n dictionary where individual profiles (and their parameters) can be accessed as followed:\r\n\r\n print(instance.gaussian_0)\r\n print(instance.gaussian_1)\r\n print(instance.gaussian_0.centre)\r\n\r\n In this tutorial, the `model_data` is therefore the summed `model_data` of all individual Gaussians in the \r\n model. The function `model_data_from_instance` performs this summation. \r\n \"\"\"\r\n model_data = self.model_data_from_instance(instance=instance)\r\n\r\n residual_map = self.data - model_data\r\n chi_squared_map = (residual_map / self.noise_map) ** 2.0\r\n chi_squared = sum(chi_squared_map)\r\n noise_normalization = np.sum(np.log(2 * np.pi * noise_map**2.0))\r\n log_likelihood = -0.5 * (chi_squared + noise_normalization)\r\n\r\n return log_likelihood",
"def _profile_likelihood_maximization(self,U, n_elbows):\n if type(U) == list: # cast to array for functionality later\n U = np.array(U)\n\n if n_elbows == 0: # nothing to do..\n return np.array([])\n\n if U.ndim == 2:\n U = np.std(U, axis=0)\n\n if len(U) == 0:\n return np.array([])\n\n elbows = []\n\n if len(U) == 1:\n return np.array(elbows.append(U[0]))\n\n # select values greater than the threshold\n U.sort() # sort\n U = U[::-1] # reverse array so that it is sorted in descending order\n n = len(U)\n\n U = U[:self.hyperparams['max_dimension']].copy()\n\n while len(elbows) < n_elbows and len(U) > 1:\n d = 1\n sample_var = np.var(U, ddof=1)\n sample_scale = sample_var ** (1 / 2)\n elbow = 0\n likelihood_elbow = 0\n while d < len(U):\n mean_sig = np.mean(U[:d])\n mean_noise = np.mean(U[d:])\n sig_likelihood = 0\n noise_likelihood = 0\n for i in range(d):\n sig_likelihood += norm.pdf(U[i], mean_sig, sample_scale)\n for i in range(d, len(U)):\n noise_likelihood += norm.pdf(U[i], mean_noise, sample_scale)\n\n likelihood = noise_likelihood + sig_likelihood\n\n if likelihood > likelihood_elbow:\n likelihood_elbow = likelihood\n elbow = d\n d += 1\n if len(elbows) == 0:\n elbows.append(elbow)\n else:\n elbows.append(elbow + elbows[-1])\n U = U[elbow:]\n\n if len(elbows) == n_elbows:\n return np.array(elbows)\n\n if len(U) == 0:\n return np.array(elbows)\n else:\n elbows.append(n)\n return np.array(elbows)",
"def max_log_likelihood_fit(self) -> FitImaging:\r\n instance = self.analysis.instance_with_associated_adapt_images_from(\r\n instance=self.instance\r\n )\r\n\r\n plane = self.analysis.plane_via_instance_from(instance=instance)\r\n\r\n return self.analysis.fit_imaging_via_plane_from(\r\n plane=plane,\r\n )",
"def likelihood_profile(self, parameter_values, data, weights=None, expected_number_of_events=None):\n if self.r is None:\n raise RuntimeError(\"Please call fit first\")\n if len(parameter_values) != len(self.r.x):\n raise RuntimeError(\"The number of provided values does not match the number of fitted parameters\")\n data, weights, expected_number_of_events = self._bin_data_if_necessary(*self._ensure_dimension(data, weights, expected_number_of_events))\n parameter_positions = [i for i, v in enumerate(parameter_values) if v is not None]\n likelihood_profile_function = self._get_likelihood_profile_function(list(self.r.x), parameter_positions)\n return np.array([likelihood_profile_function(list(parameters), data, weights, expected_number_of_events) for parameters in zip(*[v for v in parameter_values if v is not None])])",
"def likelihood_profiles(self, model, parameters=\"all\"):\n profiles = {}\n\n if parameters == \"all\":\n parameters = [par.name for par in model.paramaters]\n\n for parname in parameters:\n profiles[parname] = self.likelihood_profile(model, parname)\n return profiles",
"def bayesian_information_criteria(self):\n self.max_likelihood('diff_evo')\n l_hat = optimize.ln_likelihood(self.variable_medians, self.function, self.abscissa, self.ordinate)\n return np.log(self.x.size) * self.len_parameters - 2 * l_hat",
"def init_step_size(parameters,parameter_index,bounds,likelihood_function,likelihood_args,d_par_init=0.1,d_likelihood=0.1,max_step=3,alpha=0.95):\n\n likelihood = likelihood_function(parameters, *likelihood_args)\n df = parameters.shape[0] #number of parameters = number of degrees of freedom\n chi2_threshold = scipy.stats.chi2.ppf(alpha,df) #likelihood-threshold of the confidence interval\n\n #initial guess for the step\n param_tmp = np.copy(parameters)\n d_par=d_par_init\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n\n #now we correct the initial guess if it is out of bonds.\n lower_bound , upper_bound = bounds\n if lower_bound==None:\n lower_bound=-np.inf\n if upper_bound==None:\n upper_bound=np.inf\n while param_tmp[parameter_index] > upper_bound or param_tmp[parameter_index] < lower_bound: #if the current step jumps out of the parameter's bounds, then we reduce it\n print(\"Boundary reached\")\n d_par /= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n print('New value: %.4g'%param_tmp[parameter_index])\n \n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n\n step_evaluations = 0 #number of evaluations of the step size\n #if the step is too big we reduce it\n if d_chi2 > chi2_threshold*d_likelihood:\n while d_chi2 > chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:\n d_par /= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n step_evaluations += 1\n\n #otherwise we increase it\n else:\n while d_chi2 < chi2_threshold*d_likelihood and step_evaluations < max_step and param_tmp[parameter_index] > lower_bound and param_tmp[parameter_index] < upper_bound:\n d_par *= 2\n param_tmp[parameter_index] = parameters[parameter_index] + d_par\n d_chi2 = likelihood_function(param_tmp, *likelihood_args) - likelihood\n step_evaluations += 1\n d_par /= 2 #this is in Raue's algorithm but I don't really get it. Apparently the last doubling step is too much.\n\n return(d_par)",
"def OptExtProfile(spec1d,img0,varimg0,skyvar,rdnoise,gain,degree=3,display=False):\n \n print(\"Using Optimal Extraction\")\n \n img = img0.copy()\n varimg = varimg0.copy()\n\n nr,nc= img.shape #maybe have an axis keyword if the dispersion axis is in other direction, for now assume rows are spatial\n spec = spec1d.copy()\n \n pix = np.arange(nc)\n prof = np.zeros((nr,nc) ) # initialize variable with profile\n \n oldmask = np.zeros_like(img).astype('bool')\n l = 0\n iteratecrit = True\n while iteratecrit:\n spec_profile = img / np.tile(spec,(nr,1)) # divide spectra by initial estimate to normalize out the spectral features\n var_profile = varimg / np.tile(np.power(spec,2),(nr,1))\n \n # loop over each row\n for i in range(nr):\n #median filter of spectral profile as initial smoothing -- replaced with sigma clipping for profile\n #temprow = scipy.signal.medfilt(spec_profile[i,:],(9,))\n\n temprow = spec_profile[i,:]\n nanmask = np.isfinite(temprow)\n\n polymod = models.Polynomial1D(degree)\n sigmafit = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(),sigma_clip,niter=3,sigma=3)\n \n _ , polyout = sigmafit(polymod,pix[nanmask],temprow[nanmask],weights=1/var_profile[i,nanmask]) #uses weights\n \n prof[i,:] = polyout(pix)\n\n bottom = (prof[i,:] < 0).nonzero()[0]\n prof[i,bottom] = 0. # enforce positivity\n \n prof = prof/ np.tile(np.sum(prof,axis=0),(nr,1)) # normalizes the weights\n \n #update variance image:\n varimg = (rdnoise/gain)**2 + (spec*prof/gain) + skyvar\n badmask = np.power(img - np.tile(spec,(nr,1))*prof,2) > 25*varimg # 5 sigma overall clipping mask\n\n badmask = badmask | oldmask\n\n prof[badmask] = 0 #omit\n spec = np.sum(img*prof/varimg,axis=0) / np.sum(np.power(prof,2)/varimg,axis=0) #this is the variance weighted summation 'optimal'\n variance = np.sum(prof,axis=0) / (np.sum(np.power(prof,2)/varimg,axis=0))\n l += 1\n #print(l)\n if (l > 0) & np.all(badmask == oldmask):\n iteratecrit = False\n oldmask = badmask.copy()\n\n if display:\n fig, ax = plt.subplots(2,1)\n profmap = ax[0].imshow(prof)\n plt.colorbar(profmap,orientation='horizontal',ax=ax[0])\n ax[1].plot(np.median(prof,axis=1))\n ax[1].set_title(\"Median Profile\")\n\n return prof,spec,np.sqrt(variance)",
"def posteriorLikelihood(self, step):",
"def grad_log_likelihood(kc, cb, eval_request, eval_result, model_params):\n if eval_request.type != KN_RC_EVALGA:\n print(\"*** grad_log_likelihood incorrectly called with eval type %d\" %\n eval_request.type)\n return -1\n params = eval_request.x\n\n np.savetxt(\"current_pars_k.txt\", params)\n\n mus_and_maybe_grad = model_params.mus_and_maybe_grad\n bases_surplus = model_params.bases_surplus\n observed_matching = model_params.observed_matching\n\n ncat_men, ncat_women = bases_surplus.shape[:-1]\n n_prod_categories = ncat_men * ncat_women\n\n mus, _, dmus = mus_and_maybe_grad(params, model_params, gr=True)\n\n grad_loglik = grad_loglik_all_mus(observed_matching, mus)\n\n gradN = grad_loglik[-1]\n gradxy = grad_loglik[:n_prod_categories].reshape(\n (ncat_men, ncat_women)) + gradN\n gradx0 = grad_loglik[n_prod_categories:(\n n_prod_categories + ncat_men)] + gradN\n grad0y = grad_loglik[(n_prod_categories + ncat_men):-1] + gradN\n\n der_muxy = np.einsum('ij,ijk->k', gradxy, dmus.muxy)\n der_mux0 = np.einsum('i,ik->k', gradx0, dmus.mux0)\n der_mu0y = np.einsum('i,ik->k', grad0y, dmus.mu0y)\n\n eval_result.objGrad = -(der_muxy + der_mux0 + der_mu0y)\n\n return 0",
"def handle_new_best_parameters(self):\n if self.max_performance_measure < self.optimizer.res['max']['max_val']:\n print(\"\\t\\033[1;35mNew maximum found, outputting params and plots!\\033[0m\")\n self.max_performance_measure = self.optimizer.res['max']['max_val']\n # Dump the best parameter set currently known by the optimizer\n yaml.dump(self.max_rosparams, open(os.path.join(self._params['plots_directory'], \"best_rosparams_\" + self.iteration_string() + \".yaml\"), 'w'))\n # store the best known sample in the best_samples dict, for boxplots\n self.best_samples.append((self.iteration, self.max_sample))\n self.plot_all_new_best_params()",
"def fit(self):\n ln_l_all_array = [] #array of all log likelihoods\n ln_l_max = float(\"-inf\") #keep track of the maximum likelihood\n cp_parameter_array = None #parameters with the maximum likelihood\n #for multiple initial values\n for i in range(self.n_initial):\n print(\"initial value\", i)\n print(\"gradient descent\")\n super().fit() #regular gradient descent\n #copy the log likelihood\n for ln_l in self.ln_l_array:\n ln_l_all_array.append(ln_l)\n #check for convergence in the log likelihood\n ln_l = ln_l_all_array[len(ln_l_all_array)-1]\n if ln_l > ln_l_max:\n #the log likelihood is bigger, copy the parmeters\n ln_l_max = ln_l\n self.ln_l_max_index = len(ln_l_all_array)-1\n cp_parameter_array = self.copy_parameter()\n #do stochastic gradient descent to get a different initial value\n if i < self.n_initial-1:\n print(\"stochastic gradient descent\")\n #track when stochastic gradient descent was done for this entry\n #of ln_l_array\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n for j in range(self.n_stochastic_step):\n print(\"step\", j)\n self.m_stochastic_step()\n self.update_all_cp_parameters()\n ln_l_all_array.append(self.get_em_objective())\n #track when gradient descent was done\n #the E step right after this in super().fit() is considered part\n #of stochastic gradient descent\n self.ln_l_stochastic_index.append(len(ln_l_all_array)+1)\n else:\n self.ln_l_stochastic_index.append(len(ln_l_all_array))\n #copy results to the member variable\n self.ln_l_array = ln_l_all_array\n self.set_parameter(cp_parameter_array)\n self.e_step()",
"def log_likelihood(self, params, eval_gradient=True):\n if eval_gradient:\n segment_loglike = [c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec]\n # separate and sum the likelihoods and the gradients\n like = np.array([l[0] for l in segment_loglike])\n grad = np.array([l[1] for l in segment_loglike])\n if np.all(np.isfinite(like)):\n return np.sum(like), grad.sum(axis=0)\n else:\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6)\n else:\n return np.sum([c.log_likelihood(params, eval_gradient) for c in self.mlcross_spec])",
"def _evaluate_proposal(self, W_prop):\n if self.marginalize:\n return self.log_marginal_likelihood(self.X, W_prop)\n else:\n return self.log_likelihood(W=W_prop)",
"def log_likelihood(self, params, eval_gradient=True):\n c = self.cov_matrix(params)\n\n # add white noise along the leading diagonal\n # this should be the Poisson noise term when calculating a PSD\n if self.noise is not None:\n c += np.diag(self.noise)\n\n try:\n L = cho_factor(c, lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n try:\n # try doubling the noise first\n L = cho_factor(c + np.diag(self.noise), lower=True, check_finite=False)[0]\n except np.linalg.LinAlgError:\n #printmsg(2, \"WARNING: Couldn't invert covariance matrix with parameters \" + param2array(params))\n return (-1e6, np.zeros(len([p for p in params if params[p].vary])) - 1e6) if eval_gradient else -1e6\n except ValueError:\n return (np.inf, np.zeros(len([p for p in params if params[p].vary]))) if eval_gradient else -np.inf\n\n alpha = cho_solve((L, True), self.data, check_finite=False)\n\n log_likelihood_dims = -0.5 * np.einsum(\"ik,ik->k\", self.data, alpha)\n log_likelihood_dims -= np.log(np.diag(L)).sum()\n log_likelihood_dims -= c.shape[0] / 2 * np.log(2 * np.pi)\n log_likelihood = log_likelihood_dims.sum(-1)\n\n if eval_gradient:\n c_gradient = self.cov_matrix_deriv(params)\n tmp = np.einsum(\"ik,jk->ijk\", alpha, alpha)\n tmp -= cho_solve((L, True), np.eye(c.shape[0]))[:, :, np.newaxis]\n gradient_dims = 0.5 * np.einsum(\"ijl,ijk->kl\", tmp, c_gradient)\n gradient = gradient_dims.sum(-1)\n\n # note we return -log_likelihood, so we can minimize it!\n return (log_likelihood, gradient) if eval_gradient else log_likelihood",
"def evaluator(population, data_to_fit, config):\n is_parametric = config[\"model_generation\"][\"is_parametric\"]\n maximum_param_number = int(config[\"model_generation\"][\"maximum_param_number\"])\n maximum_complexity = int(config[\"model_generation\"][\"maximum_complexity\"])\n\n # split given data on dependent variables and independent one\n independent_var = data_to_fit[:,1:]\n independent_var = tuple(independent_var[:,column] for column in range(independent_var.shape[1]))\n #independent_var = (independent_var[:,0], independent_var[:,1])\n dependent_var = data_to_fit[:,0]\n\n for model in population:\n if (not hasattr(model, \"def_statement\")):\n def_repr = DefConstructor.def_constructor(model)\n setattr(model, \"def_statement\", def_repr)\n if (model.number_of_parameters > maximum_param_number or len(model) > maximum_complexity):\n setattr(model, \"is_deprecated\", True)\n continue\n\n import warnings\n\n def fxn():\n warnings.warn(\"deprecated\", DeprecationWarning)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fxn()\n if (is_parametric == 'True' and (not hasattr(model, \"optimal_params\")) and model.number_of_parameters > 0):\n is_multistart = eval(config[\"model_generation\"][\"multistart\"])\n bounds_included = eval(config[\"model_generation\"][\"bounds_included\"])\n if is_multistart:\n number_of_iterations = eval(config[\"model_generation\"][\"iterations_multistart\"])\n else:\n number_of_iterations = 1\n\n best_fit_params = []\n best_MSE = inf\n for i in range(number_of_iterations):\n try:\n #, model.curve_fit_init_params, model.curve_fit_bounds\n if is_multistart:\n model.curve_fit_init_params = 2 * random.rand(len(model.curve_fit_init_params)) - 1\n if bounds_included:\n popt, _ = curve_fit(model.def_statement, independent_var, dependent_var,\\\n p0 = model.curve_fit_init_params, bounds=model.curve_fit_bounds, \\\n ftol=0.01, xtol=0.01)\n else:\n try:\n popt, _ = curve_fit(model.def_statement, independent_var, dependent_var,\\\n p0 = model.curve_fit_init_params, \\\n ftol=0.01, xtol=0.01)\n except TypeError:\n print(model)\n raise\n\n except RuntimeError:\n popt = [nan for i in range(model.number_of_parameters)]\n except RuntimeWarning:\n popt = [nan for i in range(model.number_of_parameters)]\n except OptimizeWarning:\n popt = [nan for i in range(model.number_of_parameters)]\n except ZeroDivisionError:\n popt = [nan for i in range(model.number_of_parameters)]\n except ValueError:\n popt = [nan for i in range(model.number_of_parameters)]\n except IndexError:\n if hasattr(model, \"backup_handle\"):\n print(\"problem with simplification:\")\n print(model.backup_handle,'-->',model.handle)\n else:\n print(\"problem NOT with simplification\")\n print(model)\n raise\n setattr(model, \"optimal_params\", popt)\n QualityEstimator.quality_estimator([model], data_to_fit, config)\n if not isnan(model.MSE) and best_MSE > model.MSE:\n best_MSE = model.MSE\n best_fit_params = popt\n setattr(model, \"optimal_params\", best_fit_params)\n continue\n else:\n if not hasattr(model, \"optimal_params\"):\n setattr(model, \"optimal_params\", ones(model.number_of_parameters))\n\n if model.number_of_parameters == 0:\n model.def_statement_param = model.def_statement\n\n return population"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given the Profile of a Likelihood function with respect to one of its parameters, this function extracts the borders of its confidence interval at a certain confidence level alpha.
|
def Confidence_Interval(Profile, parameter_index, alpha=0.95):
df, number_points=Profile['Parameters'].shape #number of parameters of the model and number of points in the profile
opt_likelihood=np.min(Profile['Profile_Likelihood'])
opt_index=np.argmin(Profile['Profile_Likelihood']) #first index of an optimum
threshold=opt_likelihood + scipy.stats.chi2.ppf(alpha, df) #threshold for identifiability
distance_to_threshold=(Profile['Profile_Likelihood']-threshold)**2
lower_index=np.argmin(distance_to_threshold[:(opt_index + (opt_index==0))]) #index of the lower bound (we shift the limit by 1 in case the optimum is on the border of the profile)
upper_index=np.argmin(distance_to_threshold[(opt_index - (opt_index==number_points)):])+opt_index #index of the upper bound
return((Profile['Parameters'][parameter_index,lower_index], Profile['Parameters'][parameter_index,upper_index]))
|
[
"def confidence_interval(self, alpha=0.9):\n m, _, _ = scipy.stats.bayes_mvs(\n [r[self.metric_name_] for _, r in self.results_], alpha=alpha)\n return m",
"def confidence_interval(self, alpha=.05):\n return self.deltas_dist.percentiles([100 * alpha, 100 * (1-alpha)])",
"def _likelihood_ratio_confint(\n result: AmplitudeEstimationResult, alpha: float\n) -> tuple[float, float]:\n # Compute the two intervals in which we the look for values above\n # the likelihood ratio: the two bubbles next to the QAE estimate\n m = result.num_evaluation_qubits\n M = 2**m # pylint: disable=invalid-name\n qae = result.estimation\n\n y = int(np.round(M * np.arcsin(np.sqrt(qae)) / np.pi))\n if y == 0:\n right_of_qae = np.sin(np.pi * (y + 1) / M) ** 2\n bubbles = [qae, right_of_qae]\n\n elif y == int(M / 2): # remember, M = 2^m is a power of 2\n left_of_qae = np.sin(np.pi * (y - 1) / M) ** 2\n bubbles = [left_of_qae, qae]\n\n else:\n left_of_qae = np.sin(np.pi * (y - 1) / M) ** 2\n right_of_qae = np.sin(np.pi * (y + 1) / M) ** 2\n bubbles = [left_of_qae, qae, right_of_qae]\n\n # likelihood function\n a_i = np.asarray(list(result.samples.keys()))\n p_i = np.asarray(list(result.samples.values()))\n\n def loglikelihood(a):\n return np.sum(result.shots * p_i * np.log(pdf_a(a_i, a, m)))\n\n # The threshold above which the likelihoods are in the\n # confidence interval\n loglik_mle = loglikelihood(result.mle)\n thres = loglik_mle - chi2.ppf(1 - alpha, df=1) / 2\n\n def cut(x):\n return loglikelihood(x) - thres\n\n # Store the boundaries of the confidence interval\n # It's valid to start off with the zero-width confidence interval, since the maximum\n # of the likelihood function is guaranteed to be over the threshold, and if alpha = 0\n # that's the valid interval\n lower = upper = result.mle\n\n # Check the two intervals/bubbles: check if they surpass the\n # threshold and if yes add the part that does to the CI\n for a, b in zip(bubbles[:-1], bubbles[1:]):\n # Compute local maximum and perform a bisect search between\n # the local maximum and the bubble boundaries\n locmax, val = bisect_max(loglikelihood, a, b, retval=True)\n if val >= thres:\n # Bisect pre-condition is that the function has different\n # signs at the boundaries of the interval we search in\n if cut(a) * cut(locmax) < 0:\n left = bisect(cut, a, locmax)\n lower = np.minimum(lower, left)\n if cut(locmax) * cut(b) < 0:\n right = bisect(cut, locmax, b)\n upper = np.maximum(upper, right)\n\n # Put together CI\n return result.post_processing(lower), result.post_processing(upper)",
"def confidence_interval(df,param,coeff=2.42):\n \n df2=df.copy()\n\n df_stats=df2[param].describe().T\n stats=df_stats[['count','mean','std']]\n\n stats\n ci95_hi=stats['mean'] + coeff*stats['std']/math.sqrt(stats['count'])\n ci95_lo=stats['mean'] - coeff*stats['std']/math.sqrt(stats['count'])\n df6=df2.loc[(df2[param]>=ci95_lo)&(df2[param]<=ci95_hi)]\n return df6",
"def boostrapping_confidence_interval(pred_all, gs_all, eva_func, ci):\n import numpy as np\n import random\n # set random seed\n random.seed(0)\n\n # prediction-groundtruth pairs from all five fold cross validation\n tmp = np.array([pred_all, gs_all]).T\n # calculate overall correlation\n mb = eva_func(tmp[:,0], tmp[:,1])\n # start boostrapping ...\n eva_all = []\n for i in range(100):\n tmp_new = random.choices(tmp, k = len(tmp))\n tmp_new = np.array(tmp_new)\n eva = eva_func(tmp_new[:,0], tmp_new[:,1])\n eva_all.append(eva)\n eva_all = sorted(eva_all)\n #print(eva_all)\n lb = eva_all[round(100*(0.5-ci*0.5))]\n ub = eva_all[round(100*(0.5+ci*0.5))]\n return mb, lb, ub",
"def ci(self, alpha=.05, alternative='two-sided'):\n return self.zconfint_mean(alpha, alternative)[:2]",
"def get_conf_interval(model, actual, steps_ahead, predictions, exog_data, alpha = 0.05):\n predictions_int = model.get_forecast(steps=steps_ahead,exog=exog_data, alpha = alpha)\n\n conf_df = pd.concat([actual,predictions_int.predicted_mean, predictions_int.conf_int()], axis = 1)\n conf_df = conf_df.rename(columns={0: 'Predictions', 1: 'Lower CI', 2: 'Upper CI'})\n\n return conf_df.style.format(\"{:,.0f}\")",
"def bayesian_information_criteria(self):\n self.max_likelihood('diff_evo')\n l_hat = optimize.ln_likelihood(self.variable_medians, self.function, self.abscissa, self.ordinate)\n return np.log(self.x.size) * self.len_parameters - 2 * l_hat",
"def compute_distribution_bounds(history, parameter, alpha, run):\n if run is None:\n run = history.max_t\n\n magnitudes, probabilities = history.get_distribution(m=0, t=run)\n magnitudes[\"probabilities\"] = probabilities\n magnitudes_sorted = magnitudes.sort_values(by=parameter)\n magnitudes_sorted[\"cum_probabilities\"] = magnitudes_sorted[\"probabilities\"].cumsum()\n cut_magnitudes = magnitudes_sorted[\n (magnitudes_sorted[\"cum_probabilities\"] >= alpha / 2)\n & (magnitudes_sorted[\"cum_probabilities\"] <= 1 - alpha / 2)\n ]\n cut_indexed = cut_magnitudes.reset_index(drop=True)\n cut_magnitudes = cut_indexed[parameter]\n lower = cut_magnitudes[0]\n upper = cut_magnitudes[len(cut_magnitudes) - 1]\n\n return lower, upper",
"def get_border(func=None, count=10):\n if func is None:\n return partial(get_border, count=count)\n count = count if count else 2\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n return ('==' * count) + '\\n\\n' + func(*args, **kwargs) + '\\n\\n' + ('==' * count) + '\\n'\n return wrapper",
"def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]:\n initial: List[np.ndarray] = []\n bounds: List[List[int]]\n if self.es == 0:\n for count, (i, j) in enumerate(product(range(self.img_rows), range(self.img_cols))):\n initial += [i, j]\n for k in range(self.img_channels):\n if not self.estimator.channels_first:\n initial += [img[i, j, k]]\n else:\n initial += [img[k, i, j]]\n\n if count == limit - 1:\n break\n else:\n continue\n min_bounds = [0, 0]\n for _ in range(self.img_channels):\n min_bounds += [0]\n min_bounds = min_bounds * limit\n max_bounds = [self.img_rows, self.img_cols]\n for _ in range(self.img_channels):\n max_bounds += [255]\n max_bounds = max_bounds * limit\n bounds = [min_bounds, max_bounds]\n else:\n bounds = [[0, self.img_rows], [0, self.img_cols]]\n for _ in range(self.img_channels):\n bounds += [[0, 255]]\n bounds = bounds * limit\n return bounds, initial",
"def get_parameters_from_barrier_height(e):\n\n a = 1 / (4 * e)\n x_start = - np.sqrt(1 / (2 * a))\n\n return x_start, a",
"def _get_bounds(self, img: np.ndarray, limit) -> Tuple[List[list], list]:\n\n def bound_limit(value):\n return np.clip(value - limit, 0, 255), np.clip(value + limit, 0, 255)\n\n minbounds, maxbounds, bounds, initial = [], [], [], []\n\n for i, j, k in product(range(img.shape[-3]), range(img.shape[-2]), range(img.shape[-1])):\n temp = img[i, j, k]\n initial += [temp]\n bound = bound_limit(temp)\n if self.es == 0:\n minbounds += [bound[0]]\n maxbounds += [bound[1]]\n else:\n bounds += [bound]\n if self.es == 0:\n bounds = [minbounds, maxbounds]\n\n return bounds, initial",
"def minimum_bounding_box(img,alpha=1,mode=0):\n yproj = img.mean(axis=1)\n xproj = img.mean(axis=0)\n if mode == 0:\n \t_get_idx = lambda x: np.where(x>alpha)[0]\n else:\n \t_get_idx = lambda x: np.where(x<alpha)[0]\n\n def _get_bounds_on_proj(proj):\n idx = _get_idx(proj)\n return(idx[0],\n idx[-1])\n\n x1,x2 = _get_bounds_on_proj(xproj)\n y1,y2 = _get_bounds_on_proj(yproj)\n return([y1,x1,y2,x2])",
"def trace_profile(image, sigma=5., width_factor=1., check_vertical=False):\n if image.ndim > 2:\n image = image.sum(axis=0)\n if check_vertical:\n top_bottom_mean = np.mean(image[[0, image.shape[0] - 1], :])\n left_right_mean = np.mean(image[:, [0, image.shape[1] - 1]])\n if top_bottom_mean < left_right_mean:\n image = image.T\n top_distribution = nd.gaussian_filter1d(image[0], sigma)\n bottom_distribution = nd.gaussian_filter1d(image[-1], sigma)\n top_loc, top_whm = estimate_mode_width(top_distribution)\n bottom_loc, bottom_whm = estimate_mode_width(bottom_distribution)\n angle = np.arctan(np.abs(float(bottom_loc - top_loc)) / image.shape[0])\n width = np.int(np.ceil(max(top_whm, bottom_whm) * np.cos(angle)))\n profile = profile_line(image,\n (0, top_loc), (image.shape[0] - 1, bottom_loc),\n linewidth=width, mode='nearest')\n return profile",
"def line_profile(self):\n if self.fit_type == 'Mixture':\n calc = self.TF_2D(self.params) + self.gauss_2D(self.params) - self.params['offset'].value\n else:\n calc = self.stern_gerlach_2D(self.params)\n a = np.pad(calc, ((self.pad[0],self.pad[1]), (self.pad[2],self.pad[3])),\n mode='constant', constant_values=0)\n return a",
"def credibility_interval(post, alpha=.68):\n lower_percentile = 100 * (1 - alpha) / 2\n upper_percentile = 100 * (1 + alpha) / 2\n low, med, up = sp.percentile(\n post, [lower_percentile, 50, upper_percentile]\n )\n return med, low, up",
"def _get_likelihood_profile_function(self, optimal_parameters, fixed):\n optimal_parameters_without_fixed = [p for i, p in enumerate(optimal_parameters) if i not in fixed]\n insert_fixed_parameters = lambda p, f: functools.reduce(lambda l, i: l[:i[1]] + [f[i[0]]] + l[i[1]:], enumerate(fixed), list(p))\n if self.ugly_and_fast:\n return lambda x, data, weights, ne: self.loss(insert_fixed_parameters(optimal_parameters_without_fixed, x), data, weights, ne, self.mapping)\n else:\n return lambda x, data, weights, ne: self._fit(optimal_parameters_without_fixed, data, weights, ne, lambda p: self.mapping(np.array(insert_fixed_parameters(p, x)))).fun",
"def calcBCI(img, geometry, bandName = 'bci'):\n\n b = img.select('brightness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n b = b.getInfo()['brightness_min'], b.getInfo()['brightness_max']\n\n g = img.select('greenness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n g = g.getInfo()['greenness_min'], g.getInfo()['greenness_max']\n \n w = img.select('wetness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n w = w.getInfo()['wetness_min'], w.getInfo()['wetness_max']\n \n H = img.expression('(brightness - min)/(max - min)',{\n 'brightness': img.select('brightness'),\n 'min': b[0],\n 'max': b[1]\n }).rename('H')\n \n V = img.expression('(greenness - min)/(max - min)',{\n 'greenness': img.select('greenness'),\n 'min': g[0],\n 'max': g[1]\n }) .rename('V')\n\n L = img.expression('(wetness - min)/(max - min)',{\n 'wetness': img.select('wetness'),\n 'min': w[0],\n 'max': w[1]\n }).rename('L')\n\n HVL = ee.Image(H).addBands(V).addBands(L)\n\n BCI = HVL.expression('(0.5 * (H + L) - V)/(0.5* (H + L) + V)', {\n 'H': HVL.select('H'),\n 'V': HVL.select('V'),\n 'L': HVL.select('L')\n })\n \n return (img.addBands(\n HVL.expression('(0.5 * (H + L) - V)/(0.5* (H + L) + V)', {\n 'H': HVL.select('H'),\n 'V': HVL.select('V'),\n 'L': HVL.select('L')\n }).rename([bandName])))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reads the profile stored in a file into a friendly format
|
def Read_Profile(input_file):
Data=np.genfromtxt(input_file)
Data={'Parameters':Data[:-1], 'Profile_Likelihood':Data[-1]}
return(Data)
|
[
"def parse_profile(self, file):\n profile_file = open(file, \"r\")\n try:\n for line in profile_file:\n line.strip()\n words = line.split(\"=\")\n if words[0] == \"name\":\n self.name = words[1].strip()\n elif words[0] == \"spanmode\":\n wrd1 = words[1].strip().lower()\n if wrd1 == \"single\":\n self.spanmode = wrd1\n elif wrd1 == \"multi\":\n self.spanmode = wrd1\n else:\n G_LOGGER.info(\"Exception: unknown spanmode: %s \\\n in profile: %s\", words[1], self.name)\n elif words[0] == \"slideshow\":\n wrd1 = words[1].strip().lower()\n if wrd1 == \"true\":\n self.slideshow = True\n else:\n self.slideshow = False\n elif words[0] == \"delay\":\n self.delay_list = []\n delay_strings = words[1].strip().split(\";\")\n for delstr in delay_strings:\n self.delay_list.append(int(delstr))\n elif words[0] == \"sortmode\":\n wrd1 = words[1].strip().lower()\n if wrd1 == \"shuffle\":\n self.sortmode = wrd1\n elif wrd1 == \"sort\":\n self.sortmode = wrd1\n else:\n G_LOGGER.info(\"Exception: unknown sortmode: %s \\\n in profile: %s\", words[1], self.name)\n elif words[0] == \"offsets\":\n # Use PPI mode algorithm to do cuts.\n # Defaults assume uniform pixel density\n # if no custom values are given.\n self.ppimode = True\n self.manual_offsets = []\n self.manual_offsets_useronly = []\n # w1,h1;w2,h2;...\n offset_strings = words[1].strip().split(\";\")\n for offstr in offset_strings:\n res_str = offstr.split(\",\")\n self.manual_offsets.append((int(res_str[0]),\n int(res_str[1])))\n self.manual_offsets_useronly.append((int(res_str[0]),\n int(res_str[1])))\n elif words[0] == \"bezels\":\n bez_mm_strings = words[1].strip().split(\";\")\n for bezstr in bez_mm_strings:\n self.bezels.append(float(bezstr))\n elif words[0] == \"ppi\":\n self.ppimode = True\n # overwrite initialized arrays.\n self.ppiArray = []\n self.ppiArrayRelDensity = []\n ppi_strings = words[1].strip().split(\";\")\n for ppistr in ppi_strings:\n self.ppiArray.append(int(ppistr))\n elif words[0] == \"diagonal_inches\":\n self.ppimode = True\n # overwrite initialized arrays.\n self.ppiArray = []\n self.ppiArrayRelDensity = []\n inch_strings = words[1].strip().split(\";\")\n self.inches = []\n for inchstr in inch_strings:\n self.inches.append(float(inchstr))\n self.ppiArray = self.computePPIs(self.inches)\n elif words[0] == \"hotkey\":\n binding_strings = words[1].strip().split(\"+\")\n self.hkBinding = tuple(binding_strings)\n if DEBUG:\n G_LOGGER.info(\"hkBinding: %s\", self.hkBinding)\n elif words[0].startswith(\"display\"):\n paths = words[1].strip().split(\";\")\n paths = list(filter(None, paths)) # drop empty strings\n self.pathsArray.append(paths)\n else:\n G_LOGGER.info(\"Unknown setting line in config: %s\", line)\n finally:\n profile_file.close()",
"def load_profiles(profiles_file: TextIO, person_to_friends: Dict[str, List[str]], \\\n person_to_networks: Dict[str, List[str]]) -> None:\n\n current_line = profiles_file.readline()\n next_line = profiles_file.readline()\n name = convert_name(current_line.rstrip())\n while next_line != '':\n if ',' in next_line:\n friend = convert_name(next_line.rstrip())\n add_to_friends(name, friend, person_to_friends)\n next_line = profiles_file.readline()\n elif next_line != '\\n':\n network = next_line.rstrip()\n add_to_network(name, network, person_to_networks)\n next_line = profiles_file.readline()\n else:\n current_line = profiles_file.readline()\n next_line = profiles_file.readline()\n name = convert_name(current_line.rstrip())",
"def get_config_profiles(uplid, file_):\n\n loaded_value = json.load(file_)\n return filter_valid_profiles(uplid, loaded_value)",
"def load_profiles(profiles_file: TextIO, person_to_friends: Dict[str, List[str]], \\\n person_to_networks: Dict[str, List[str]]) -> None:\n lines = profiles_file.readlines()\n i = 0\n while i < len(lines):\n j = i\n while j < len(lines) and lines[j] != '\\n':\n name = reverse(lines[j])\n j = j+1\n while j < len(lines) and not(',' in lines[j]) and lines[j] != '\\n':\n help_function1(name, person_to_networks, lines, j)\n j = j+1\n while j < len(lines) and lines[j] != '\\n':\n help_function2(name, person_to_friends, lines, j)\n j = j+1\n i = j+1\n arrange(person_to_friends)\n arrange(person_to_networks)",
"def LoadProfile(self, filename=None, loadFromDb=True):\n bestProfileVersion = '0.1c'\n bestEntryVersion = '0.1b'\n bestSteamEntryVersion='Steam_0.1a'\n \n backupProfile = False\n defaultBackup = True # always make a default backup as safety for loading errors\n \n updateInfoBoxAlreadyShowed = False\n \n if filename is None: filename = self.profileName\n if not os.path.exists(filename):\n # suppress this box as this error will just evoke the profile selection dialog\n #QtGui.QMessageBox.critical(self, \"Error\", \"Profile '%s' not found!\" % filename)\n raise IOError('Profile not found')\n return True\n \n if defaultBackup:\n shutil.copyfile(filename, \"~\"+filename+\".bak\")\n \n # determine encoding\n with open(filename, 'r') as f:\n codepage = f.readline().strip()\n codepage = codepage.replace('# -*- coding:','').replace('-*-','').strip()\n if len(codepage) == 0:\n raise ValueError('Empty profile')\n return True\n \n # try to open file with this encoding\n try:\n f = codecs.open(filename, 'r', codepage)\n f.close()\n except LookupError: # unknown coding\n QtGui.QMessageBox.critical(self, \"Error\", \"Unknown codepage '%s' used in profile '%s'.\\nPlease fix this manually. Loading empty profile.\" % (codepage, filename))\n raise ValueError('Unknown codepage')\n return True\n \n p = ProfileSettings()\n fp = self.fileParser\n \n # open file\n with codecs.open(filename, 'r', codepage) as f:\n f.readline() # skip codepage\n \n profileVersion = f.readline().strip() # read file format version\n if profileVersion not in FileParser.profileFormats:\n QtGui.QMessageBox.critical(self, \"Profile error\", \"Unsupported file format (%s) for profile '%s'!\\nAborting load process.\" % (profileVersion, filename))\n raise ValueError('Unsupported file format')\n return True\n try: fp.ParseByVersion(file=f, handler=p, version=profileVersion, type='profile')\n except ValueError as e:\n QtGui.QMessageBox.critical(self, \"Profile loading error\", str(e))\n raise ValueError(str(e))\n return True\n \n if bestProfileVersion != profileVersion:\n count = fp.CompleteProfile(p, bestProfileVersion)\n backupProfile = True\n if count > 0 and not updateInfoBoxAlreadyShowed:\n QtGui.QMessageBox.information(self, \"Information\", \"Your profile has been updated to a newer version.\\n\"\\\n + \"Should any problems occur, a backup is available: %s\" % (filename+\".\"+profileVersion))\n updateInfoBoxAlreadyShowed = True\n \n for i in range(p.numEntries):\n entryVersion = f.readline().strip() # read file format version\n \n if entryVersion.startswith('Steam'):\n entryType = 'steam'\n if entryVersion not in FileParser.steamEntryFormats:\n QtGui.QMessageBox.critical(self, \"Profile error\", \"Unsupported file format (%s) for Steam entry in profile '%s'!\\nAborting load process.\" % (entryVersion, filename))\n raise ValueError('Unsupported file format')\n return True\n entry = SteamEntry()\n eHndlr = SteamEntrySettings()\n else:\n entryType = 'entry'\n if entryVersion not in FileParser.entryFormats:\n QtGui.QMessageBox.critical(self, \"Profile error\", \"Unsupported file format (%s) for entry in profile '%s'!\\nAborting load process.\" % (entryVersion, filename))\n raise ValueError('Unsupported file format')\n return True\n entry = AppStarterEntry(parentWidget=self.centralWidget())\n eHndlr = EntrySettings()\n \n try:\n fp.ParseByVersion(file=f, handler=eHndlr, version=entryVersion, type=entryType )\n except ValueError as e:\n QtGui.QMessageBox.critical(self, \"Profile loading error (entry)\", str(e))\n raise ValueError(str(e))\n return True\n except EOFError:\n QtGui.QMessageBox.critical(self, \"End of file error\", \"Unable to load entry %i from profile '%s'!\\nEntries might be incomplete.\" % (i+1, filename))\n raise EOFError('Incomplete entry')\n return True\n \n if type=='entry' and bestEntryVersion != entryVersion \\\n or type=='steam' and bestSteamEntryVersion != entryVersion:\n \n count = fp.CompleteEntry(eHndlr, bestEntryVersion if type=='entry' else bestSteamEntryVersion)\n backupProfile = True\n if count > 0 and not updateInfoBoxAlreadyShowed:\n QtGui.QMessageBox.information(self, \"Information\", \"Your profile has been updated to a newer version.\\n\"\\\n + \"Should any problems occur, a backup is available: %s\" % (filename+\".\"+profileVersion))\n updateInfoBoxAlreadyShowed = True\n \n # copy data from EntrySettings object to actual entry\n if entryType=='entry':\n for var, vartype in FileParser.entryFormats[bestEntryVersion]:\n setattr(entry, var, getattr(eHndlr, var))\n elif entryType=='steam':\n for var, vartype in FileParser.steamEntryFormats[bestSteamEntryVersion]:\n setattr(entry, var, getattr(eHndlr, var))\n \n failedToLoadIcon = False\n try: entry.LoadIcon(256) # always load largest icon because otherwise we would scale up when increasing icon size at runtime\n except IOError: # ignore icon loading errors, probably just opening the profile on another machine - just show the default icon\n failedToLoadIcon = True\n \n if entryType =='entry':\n # preferred icon only for non-steam entries\n if entry.preferredIcon != -1 and not failedToLoadIcon:\n # try to copy and save icon to a local folder in case the icon becomes unavailable in the future\n pm = entry.icon.pixmap(entry.icon.actualSize(QtCore.QSize(256,256)))\n \n iconFilename = stringToFilename(entry.label)\n i = 0\n while(os.path.exists(os.path.join(\"cache\", \"icons\", iconFilename))):\n iconFilename = \"%s%i\" % (stringToFilename(entry.label), i)\n \n fullName = os.path.join(\"cache\", \"icons\", iconFilename+\".png\")\n pm.save(fullName, \"PNG\", 100)\n entry.preferredIcon = -2\n entry.iconPath = fullName\n \n elif failedToLoadIcon:\n entry.icon=QtGui.QIcon(os.path.join(\"gfx\",\"noicon.png\"))\n \n self.centralWidget().AddEntry(entry, manuallySorted=True) # always add as manually sorted, might be overwritten later\n \n elif entryType=='steam':\n p.steamGames.append(entry)\n \n if backupProfile:\n shutil.copy(filename, filename+\".\"+profileVersion)\n \n # apply settings \n try: self.SetIconSize(p.iconSize)\n except ValueError: QtGui.QMessageBox.warning(self, \"Warning\", \"Invalid icon size in profile: %ix%ipx\" %(p.iconSize,p.iconSize))\n \n try: self.resize(p.windowSize[0], p.windowSize[1])\n except ValueError: QtGui.QMessageBox.warning(self, \"Warning\", \"Invalid window size in profile: %ix%i\" %(p.windowSize[0],p.windowSize[1]))\n \n try: self.move(p.windowPos[0], p.windowPos[1])\n except ValueError: QtGui.QMessageBox.warning(self, \"Warning\", \"Invalid window position in profile: %i, %i\" %(p.windowPos[0],p.windowPos[1]))\n \n if p.sortMode not in (\"manual\", \"title\", \"time\"):\n QtGui.QMessageBox.warning(self, \"Warning\", \"Invalid sort mode '%s' in profile, defaulting to manual sorting.\" % p.sortMode)\n self.centralWidget().sortMode = \"manual\"\n else: self.centralWidget().sortMode = p.sortMode\n self.centralWidget().SortByCurrentSortMode()\n self.toolsBar.sortComboBox.setCurrentIndex(1 if p.sortMode == \"title\" else 2 if p.sortMode == \"time\" else 0)\n self.toolsBar.setVisible(p.toolsVisible)\n \n self.profile = p\n \n # store as last profile\n codepage = 'utf-8'\n with codecs.open('lastprofile', 'w', codepage) as f:\n f.write(\"# -*- coding: %s -*-\\n\" % codepage)\n f.write(filename)\n \n # update window title\n self.setWindowTitle(os.path.splitext(filename)[0] + \" - FireStarter\")\n \n return False",
"def load_profiles(profiles_file= TextIO, person_to_friends= Dict[str, List[str]],\n person_to_networks= Dict[str, List[str]]):\n number_of_profiles = count_profiles(profiles_file)\n for _ in range(number_of_profiles):\n person_name = format_name(str(profiles_file.readline().strip()))\n list_of_networks, list_of_friends = [], []\n line = profiles_file.readline().strip()\n while line != '':\n if ',' not in line:\n list_of_networks.append(str(line))\n else:\n list_of_friends.append(format_name(str(line)))\n line = profiles_file.readline().strip()\n if format_list(list_of_friends) != list():\n person_to_friends[person_name] = format_list(list_of_friends)\n if format_list(list_of_friends) != list():\n person_to_networks[person_name] = format_list(list_of_networks)",
"def profile(args):\n _write(args, Profile.from_text(_read(args)))",
"def load_profiles_from_file(self, fqfn):\n if self.args.verbose:\n print('Loading profiles from File: {}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, fqfn))\n with open(fqfn, 'r+') as fh:\n data = json.load(fh)\n for profile in data:\n # force update old profiles\n self.profile_update(profile)\n if self.args.action == 'validate':\n self.validate(profile)\n fh.seek(0)\n fh.write(json.dumps(data, indent=2, sort_keys=True))\n fh.truncate()\n\n for d in data:\n if d.get('profile_name') in self.profiles:\n self.handle_error(\n 'Found a duplicate profile name ({}).'.format(d.get('profile_name'))\n )\n self.profiles.setdefault(\n d.get('profile_name'),\n {'data': d, 'ij_filename': d.get('install_json'), 'fqfn': fqfn},\n )",
"def profile_info(profile_info_file_path=None) -> Optional[dict]:\n\tpath = profile_info_file_path or config.KONFSAVE_CURRENT_PROFILE_PATH\n\ttry:\n\t\twith open(path) as f:\n\t\t\tinfo = json.load(f)\n\t\t\tassert info['name']\t\t\t\t\t\t\t# Must not be empty\n\t\t\tassert info['hash']\t\t\t\t\t\t\t# Must not be empty\n\t\t\tinfo['include'] = set(info['include'] or ())\n\t\t\tinfo['exclude'] = set(info['exclude'] or ())\n\t\t\treturn info\n\texcept (json.JSONDecodeError, KeyError, AssertionError) as e:\n\t\tsys.stderr.write(f'Warning: malformed profile info at {path};\\n{str(e)}\\n')\n\t\treturn None\n\texcept FileNotFoundError:\n\t\t# The current configuration is not saved. Consider this normal behavior.\n\t\treturn None",
"def load_test_profile(self, filename):\n if(os.path.exists(filename)):\n self.__test_profile = testprofile.TestProfile()\n self.__test_profile.readfp(open(filename))\n self.__test_profile.validate()\n self._verify_power_profile()\n else:\n print \"%s doesn't exist\" % filename\n return",
"def read_from_file(self, file_name):\n\t\twith open (self.user_folder + file_name, 'r') as file:\n\t\t\tnames_list = file.readlines()\n\t\t\tfor name in names_list:\n\t\t\t\tprint(name.strip())",
"def _read(self, profile_filename):\n # header=0 because docs say to if using skip rows and columns\n df = pd.read_csv(profile_filename, header=0,\n skiprows=self.hdr.header_pos,\n names=self.hdr.columns,\n encoding='latin')\n\n # Special SMP specific tasks\n depth_fmt = 'snow_height'\n is_smp = False\n if 'force' in df.columns:\n # Convert depth from mm to cm\n df['depth'] = df['depth'].div(10)\n is_smp = True\n # Make the data negative from snow surface\n depth_fmt = 'surface_datum'\n\n # SMP serial number and original filename for provenance to the comment\n f = basename(profile_filename)\n serial_no = f.split('SMP_')[-1][1:3]\n\n df['comments'] = f\"fname = {f}, \" \\\n f\"serial no. = {serial_no}\"\n\n # Standardize all depth data\n new_depth = standardize_depth(df['depth'],\n desired_format=depth_fmt,\n is_smp=is_smp)\n\n if 'bottom_depth' in df.columns:\n delta = df['depth'] - new_depth\n df['bottom_depth'] = df['bottom_depth'] - delta\n\n df['depth'] = new_depth\n\n delta = abs(df['depth'].max() - df['depth'].min())\n self.log.info('File contains {} profiles each with {} layers across '\n '{:0.2f} cm'.format(len(self.hdr.data_names), len(df), delta))\n return df",
"def generate_profile_file(name, hash, base_dir):\n with open(name) as fid:\n data = json.loads(fid.read().decode(\"utf-8\", \"ignore\"))\n with open(os.path.join(base_dir, hash + \"_profile.txt\"), \"w+\") as out:\n # For each query\n for key in data:\n for iteration in data[key]:\n out.write(iteration[\"runtime_profile\"])\n out.write(\"\\n\\n\")",
"def ReadAureliaUserInfo(self, fileName):\n if _DoesFileExist(fileName) == 0:\n return\n print 'reading an Aurelia User Info File:\\n ', fileName\n print 'We always use the following format for the User Info Files:'\n print ' # 8.17 NH 7 2FMR'\n print ' # ppm atomname residuenumber segid'\n print ' segid should contain 4 letters or should be blank'\n print ' other formats can not be read in by this method!'\n #important - clean atomlist and atomdicfa:\n self.atomlist = []\n self.atomdicfa = {}\n self.fileName = fileName\n auihandle = TextFile.TextFile(fileName)\n for line in auihandle:\n linelist = string.split(line)\n if len(linelist) < 4:\n continue\n ATOM = Atom()\n ATOM.shift = linelist[1]\n ATOM.atomname = linelist[2]\n ATOM.residuenumber = linelist[3]\n try:\n ATOM.segid = linelist[4]\n except:\n ATOM.segid = ' '\n self.AddAtom(ATOM)\n auihandle.close()",
"def parse_prof_tgff(filename):\n # profile information is stored in this dict:\n # {'comp': comp_cost, 'data': data, 'link quad': quadatic_profile}\n ret_profile = dict()\n\n f = open(filename, 'r')\n f.readline()\n f.readline()\n f.readline()\n\n # Calculate the amount of tasks\n task_names=set()\n\n\n # Build a communication dictionary\n data = {}\n line = f.readline()\n while line.startswith('\\tARC'):\n l = line.split()\n task_names = task_names.union(set([l[3],l[5]]))\n data[(l[3],l[5])] = float(l[7])\n line = f.readline()\n ret_profile['data'] = data\n\n\n while not f.readline().startswith('@computation_cost'):\n pass\n\n line = f.readline()\n processor_names = [int(pi) for pi in line.split()[3:]]\n num_of_processors = len(processor_names)\n\n # Build a computation dictionary\n comp_cost = {}\n line = f.readline()\n while line.startswith(' '):\n _cost = (list(map(int, line.split()[-num_of_processors:])))\n comp_cost[line.split()[0]] = _cost\n line = f.readline()\n ret_profile['comp'] = comp_cost\n\n \"\"\"\n Build a rate matrix\n rate = [[1 for i in range(num_of_processors)] for i in range(num_of_processors)]\n for i in range(num_of_processors):\n rate[i][i] = 0\n \"\"\"\n # Build a network profile matrix\n quadatic_profile = dict()\n while not f.readline().startswith('@quadratic'):\n pass\n line = f.readline()\n line = f.readline()\n\n while line.startswith(' '):\n info = line.strip().split()\n k = (int(info[0].strip('node')),int(info[1].strip('node')))\n a,b,c = [float(s) for s in info[2:]]\n quadatic_profile[k]= tuple([a,b,c])\n line = f.readline()\n ret_profile['link quad'] = quadatic_profile\n # print(quadatic_profile)\n assert num_of_processors == max(processor_names)+1\n return task_names, processor_names, ret_profile",
"def load(self, file):\n #read the account properties in the same order they were saved\n self._acctNo = int(file.readline().rstrip('\\n'))\n self._acctHolderName = file.readline().rstrip('\\n')\n self._balance = float(file.readline().rstrip('\\n'))\n self._annualIntrRate = float(file.readline().rstrip('\\n'))",
"def get_basic_information(file):\n\n # Pull out the location and year from the filename\n location, year = file[:-4].split()\n\n # Identify the number of profiles in the file\n num_profiles = 0\n with open(os.path.join(DATA_DIR, file)) as topo_file:\n for line in topo_file:\n if 'Cross' in line:\n num_profiles += 1\n\n # Make a folder for the location to place locations into\n new_folder = os.path.join('..', f'{location}', f'{year}', 'Profiles')\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n # Make a folder for the location to place locations into\n new_folder = os.path.join('..', f'{location}', f'{year}', 'Figures')\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n\n return location, year, num_profiles",
"def _get_profile_path(ini_file_path):\n ini_config = ConfigParser.ConfigParser()\n try:\n ini_config.read(ini_file_path)\n profile_path = ini_config.get('Profile0', 'Path')\n logger.debug('Load [{}]: {}'.format(ini_file_path, ini_config._sections))\n return profile_path\n except:\n raise Exception(\n 'Can not get profile path from [{}], content: {}'.format(ini_file_path, ini_config._sections))",
"def ReadWiredNetworkProfile(self, profilename):\n profile = {}\n profilename = misc.to_unicode(profilename)\n if self.config.has_section(profilename):\n if self.debug_mode:\n print \"Reading wired profile %s\" % profilename\n for x in self.config.options(profilename):\n profile[x] = misc.Noneify(self.config.get(profilename, x))\n profile['use_global_dns'] = bool(profile.get('use_global_dns'))\n profile['use_static_dns'] = bool(profile.get('use_static_dns'))\n profile['encryption_enabled'] = \\\n bool(profile.get('encryption_enabled'))\n profile['profilename'] = profilename\n self.WiredNetwork = profile\n self._cur_wired_prof_name = profilename\n return \"100: Loaded Profile\"\n else:\n self._cur_wired_prof_name = \"\"\n self.WiredNetwork = {}\n return \"500: Profile Not Found\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the profile of a parameter.
|
def Plot_Profile(Profile,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',maintitle=''):
plt.clf()
df=Profile['Parameters'].shape[0] #number of estimated parameters
threshold=np.min(Profile['Profile_Likelihood']) + chi2.ppf(alpha,df)
plt.plot(Profile['Parameters'][Parameter_index], Profile['Profile_Likelihood'], '.', c='0.2', linewidth=2)
plt.plot([Profile['Parameters'][Parameter_index, 0], Profile['Parameters'][Parameter_index, -1]], [threshold, threshold], '--', c='0.2', linewidth=2)
plt.xlabel(xtitle,fontsize=12)
plt.ylabel(ytitle,fontsize=12)
plt.title(maintitle,fontsize=12)
if output_file!=None:
plt.rcParams['figure.figsize']=5,5
plt.savefig(output_file,dpi='figure',bbox_inches='tight')
if show:
plt.show()
|
[
"def plot_likelihood_profile(self, parameter, ax=None, **kwargs):\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n ts_diff = self.likelihood_profiles[parameter][\"likelihood\"] - self.total_stat\n values = self.likelihood_profiles[parameter][\"values\"]\n\n ax.plot(values, ts_diff, **kwargs)\n unit = self.model.parameters[parameter].unit\n ax.set_xlabel(parameter + \"[unit]\".format(unit=unit))\n ax.set_ylabel(\"TS difference\")\n return ax",
"def plot_trace(param, param_name='parameter'):\n\n # Summary statistics\n mean = np.mean(param)\n median = np.median(param)\n cred_min, cred_max = np.percentile(param, 2.5), np.percentile(param, 97.5)\n #HDI, _ = hdi(\n\n # Plotting\n plt.subplot(2,1,1)\n plt.plot(param)\n plt.xlabel('samples')\n plt.ylabel(param_name)\n plt.axhline(mean, color='r', lw=2, linestyle='--')\n plt.axhline(median, color='c', lw=2, linestyle='--')\n plt.axhline(cred_min, linestyle=':', color='k', alpha=0.2)\n plt.axhline(cred_max, linestyle=':', color='k', alpha=0.2)\n plt.title('Trace and Posterior Distribution for {}'.format(param_name))\n\n plt.subplot(2,1,2)\n plt.hist(param, 30, density=True); sns.kdeplot(param, shade=True)\n plt.xlabel(param_name)\n plt.ylabel('density')\n plt.axvline(mean, color='r', lw=2, linestyle='--',label='mean')\n plt.axvline(median, color='c', lw=2, linestyle='--',label='median')\n plt.axvline(cred_min, linestyle=':', color='k', alpha=0.2, label='95% CI')\n plt.axvline(cred_max, linestyle=':', color='k', alpha=0.2)\n\n plt.gcf().tight_layout()\n plt.legend()",
"def plot_property(profiles, property_name):\n\n fig,ax=plt.subplots()\n\n for prof in profiles:\n\n index=cf.parameter_finder(prof.column_names, property_name)\n\n name=\"%s_%s\"%(prof.box,prof.name)\n ax.plot(prof.data[:,1]/np.max(prof.data[:,1]),prof.data[:,index], label=name)\n\n return fig,ax",
"def parameter_pairplot(df, p, profile, save=False):\r\n\r\n # Setup the figure\r\n fig, ax = plt.subplots(figsize=(inches * 2, inches * 2), dpi=dpi)\r\n pd.plotting.scatter_matrix(df[p.keys()],\r\n c=df['RMSE'],\r\n cmap='Reds_r',\r\n vmin=0, vmax=1,\r\n ax=ax)\r\n\r\n # Save and close the figure\r\n title = f'BGB{profile} Parameter Pairs'\r\n save_and_close(fig, title, profile, save)",
"def show_profile(self, var_key, coord_key, loc_list):\n\n # Horizontal profile\n if coord_key == 'x':\n xlabel = coord_key\n ylabel = var_key\n\n # Vertical profile\n elif coord_key == 'y':\n xlabel = var_key\n ylabel = coord_key\n\n else:\n xlabel = 'error'\n ylabel = 'error'\n assert (coord_key == 'x' or coord_key == 'y'), 'Invalid key for coordinates, ' \\\n 'must be x or y instead: %r' % coord_key\n\n # Get data for requested profile\n profile_data = get_profile_data(self, var_key, coord_key, loc_list)\n\n # Plot the profile\n plot.lining(*profile_data, xlabel=xlabel, ylabel=ylabel, title=self.case_name,\n line_label=var_key + ' at y = ' + str(loc_list))\n return",
"def plot_team_parameter(data, title, alpha=0.05, axes_colour='dimgray'):\n fig, ax = plt.subplots(figsize=(8, 6))\n\n upper = 1 - (alpha / 2)\n lower = 0 + (alpha / 2)\n\n # Sort by median values\n ordered_teams = data.median().sort_values().keys()\n\n for i, team in enumerate(ordered_teams):\n x_mean = np.median(data[team])\n x_lower = np.percentile(data[team], lower * 100)\n x_upper = np.percentile(data[team], upper * 100)\n\n ax.scatter(x_mean, i, alpha=1, color='black', s=25)\n ax.hlines(i, x_lower, x_upper, color='black')\n\n ax.set_ylim([-1, len(ordered_teams)])\n ax.set_yticks(list(range(len(ordered_teams))))\n ax.set_yticklabels(list(ordered_teams))\n\n # Add title\n fig.suptitle(title, ha='left', x=0.125, fontsize=18, color='k')\n\n # Change axes colour\n ax.spines[\"bottom\"].set_color(axes_colour)\n ax.spines[\"left\"].set_color(axes_colour)\n ax.tick_params(colors=axes_colour)\n\n # Remove top and bottom spines\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n\n return fig",
"def plot_parameter_space(self,x,y,z,trainID,valID):\n x=np.array(x); y=np.array(y); z=np.array(z)\n figgy = plt.figure(dpi=200)\n ax = Axes3D(figgy)\n xT = x[trainID]; xV = x[valID] \n yT = y[trainID]; yV = y[valID]\n zT = z[trainID]; zV = z[valID]\n ax.scatter(xT,yT,zT,c='g',label=\"Training Data\")\n ax.scatter(xV,yV,zV,c='purple',label=\"Validation Data\")\n ax.set_xlabel(\"τ (Time Constant)\")\n ax.set_ylabel(\"Kp (Gain)\")\n ax.set_zlabel(\"θ (Delay)\")\n ax.legend()",
"def Plot_Two_Profiles(Profile1,Profile2,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',label1='',label2='',maintitle=''):\n\n df=Profile1['Parameters'].shape[0] #number of estimated parameters\n\n threshold1=np.min(Profile1['Profile_Likelihood']) + chi2.ppf(alpha,df)\n threshold2=np.min(Profile2['Profile_Likelihood']) + chi2.ppf(alpha,df)\n\n plt.clf()\n plt.plot(Profile1['Parameters'][Parameter_index], Profile1['Profile_Likelihood'], '-', c='0.2', linewidth=2, label=label1)\n plt.plot(Profile2['Parameters'][Parameter_index], Profile2['Profile_Likelihood'], '-', c='#b50303', linewidth=2, label=label2)\n plt.plot([Profile1['Parameters'][Parameter_index, 0], Profile1['Parameters'][Parameter_index, -1]], [threshold1, threshold1], '--', c='0.2', linewidth=2)\n plt.plot([Profile2['Parameters'][Parameter_index, 0], Profile2['Parameters'][Parameter_index, -1]], [threshold2, threshold2], '--', c='#b50303', linewidth=2)\n plt.xlabel(xtitle,fontsize=12)\n plt.ylabel(ytitle,fontsize=12)\n plt.title(maintitle,fontsize=12)\n plt.legend(loc='best',fontsize=12)\n\n if output_file!=None:\n plt.rcParams['figure.figsize']=5,5\n plt.savefig(output_file,dpi='figure',bbox_inches='tight')\n if show:\n plt.show()",
"def profile(array, val, ax='x'):\n fig = plt.figure()\n im = plt.imshow(array,cmap=plt.cm.jet)\n cb = plt.colorbar()\n if ax == 'x':\n plt.axvline(val,c='k')\n profile = array[:,val]\n else:\n plt.axhline(val,c='k')\n profile = array[val,:]\n\n fig = plt.figure()\n plt.plot(profile,'b-',lw=2) #just show line, not points 'b.-' show points\n plt.axhline(0,c='k')\n plt.title('Profile {0}={1}'.format(ax,val))\n #plt.ylabel('deformation')\n plt.xlabel('Pixel #')\n plt.grid()\n #plt.show()\n\n # return datapoints!\n return profile",
"def plot_profile(self):\n\n # Create local variables.\n dp = self.data_point\n dp_scaled = self.data_point_scaled\n ax = self.profile_ax\n\n # Plot the bars representing the feature values.\n ax.barh(dp.index, dp_scaled, alpha=1, color='#E0E0E0', edgecolor='#E0E0E0')\n ax.set_xticklabels([]) # remove x-values\n ax.set_yticklabels([]) # remove y labels (they are in the shap plot)\n ax.set_xlim([0,1.08]) # ensure the scale is always the same, and fits the text (+0.08)\n ax.invert_yaxis() # put first feature at the top\n ax.set_title('These are all the feature values of this profile.', \n loc='left', fontsize=12)\n \n # Add feature value at the end of the bar.\n for i, dp_bar in enumerate(ax.containers[0].get_children()):\n ax.text(dp_bar.get_width()+0.01, dp_bar.get_y()+dp_bar.get_height()/2.,\n '{:.4g}'.format(dp[i]), ha='left', va='center')\n\n # Set local ax back to class ax\n self.profile_ax = ax",
"def Plot_Profile(profile_dataframe, line_color, xmin, xmax, ymin, ymax, aspect, shade):\r\n fig = plt.figure()\r\n \r\n plt.plot(profile_dataframe['Distance'], profile_dataframe['Z'], color = line_color)\r\n \r\n plt.xlabel('Distance (m)')\r\n plt.ylabel('Elevation (m)')\r\n \r\n plt.xlim(0, max(profile_dataframe['Distance']) + 5)\r\n plt.ylim(ymin, ymax)\r\n plt.tight_layout(pad=0)\r\n \r\n plt.gca().spines['right'].set_visible(False)\r\n plt.gca().spines['top'].set_visible(False)\r\n\r\n plt.gca().set_aspect(aspect)\r\n \r\n # This is key to getting the x limits to work with a set aspect ratio!\r\n plt.gca().set_adjustable(\"box\")\r\n \r\n # If statement for shading beneath profile line\r\n if shade:\r\n plt.gca().fill_between(profile_dataframe['Distance'], profile_dataframe['Z'], 0, facecolor= line_color, alpha = 0.1)\r\n \r\n return fig",
"def plotprofiles(proflist,varplot=False,time=np.nan,scale='log',fignum=np.nan,cindex=0):\n colorlist = ['b','g','r','c','m','y','k']\n if np.isnan(fignum):\n plt.figure()\n else:\n plt.figure(fignum)\n for ai in range(len(proflist)):\n p1 = proflist[ai].copy()\n if np.isnan(time):\n p1.time_integrate()\n if scale == 'log':\n plt.semilogx(p1.profile.flatten(),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n else:\n plt.plot(p1.profile.flatten(),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n if varplot:\n if scale == 'log':\n plt.semilogx(np.sqrt(p1.profile_variance.flatten()),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n plt.plot(np.sqrt(p1.profile_variance.flatten()),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n itime = np.argmin(np.abs(p1.time-time))\n if scale == 'log':\n plt.semilogx(p1.profile[itime,:],p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n else:\n plt.plot(p1.profile[itime,:],p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n if varplot:\n if scale == 'log':\n plt.semilogx(np.sqrt(p1.profile_variance[itime,:]),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n plt.plot(np.sqrt(p1.profile_variance[itime,:]),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n \n plt.grid(b=True);\n plt.legend()\n plt.ylabel('Range [m]')\n plt.xlabel(p1.profile_type)",
"def plot_param_dist(param_stats):\n\n fig, ax = plt.subplots(figsize=(15, 5))\n labels = param_stats.keys()\n parts = ax.violinplot(param_stats.values(), showmeans=True)\n ax.set_xticks(np.arange(1, len(labels) + 1))\n ax.set_xticklabels(labels, fontsize=8)\n ax.set_xlim(0.25, len(labels) + 0.75)\n ax.set_ylim(0, 1)\n return fig",
"def paramspace_line_visualization_plot(self, plot_name, param_name, value_range_list = []):\n # Get sampels, using the eval function\n x_axis = []\n samples = []\n fixed_params = self.obj_function.default_params.copy()\n value_range_list = [round(float(v), 2) for v in value_range_list]\n del(fixed_params[self._to_rosparam(param_name)])\n for params_dict, metric_value, sample in self.obj_function.samples_filtered(fixed_params):\n if value_range_list == [] or round(float(params_dict[self._to_rosparam(param_name)]), 2) in value_range_list:\n x_axis.append(params_dict[self._to_rosparam(param_name)])\n samples.append(sample)\n # Sort both lists by x_axis value before plotting\n temp_sorted_lists = sorted(zip(*[x_axis, samples]))\n x_axis, samples = list(zip(*temp_sorted_lists))\n fig, axes = self._samples_plot(x_axis, samples, np.arange(len(x_axis)), show_pm_values=False, xticklabels_spacing=2)\n #fig.suptitle(param_name, fontsize=16, fontweight='bold')\n axes[3].set_xlabel(param_name)\n axes[3].tick_params(labelsize=12) # Reduce the fontsize of the xticklabels, otherwise they overlap\n\n # Save and close\n path = os.path.join(self._params['plots_directory'], plot_name)\n fig.savefig(path)\n print(\"\\tSaved metric plot to\", path)\n plt.close(fig)",
"def plot_all_single_param(self):\n display_names = list(self._params['optimization_definitions'].keys())\n for display_name in display_names:\n self.plot_gpr_single_param(display_name.replace(\" \", \"_\") + \"_\" + self.iteration_string() + \".svg\", display_name)",
"def plot(self): # coverage: ignore\n import matplotlib.pyplot as plt\n\n with quantity_support():\n plt.figure()\n plt.scatter(self.bias.to(u.V), self.current.to(u.mA), marker=\".\", color=\"k\")\n plt.title(\"Probe characteristic\")",
"def plot(self, *args, type=\"marginalized_posterior\", **kwargs):\n return super(SamplesDict, self).plot(*args, type=type, **kwargs)",
"def plot_profiles(self):\n # if 'xportCoef' not in self.data['solpsData']:\n # print('Transport coefficients not yet calculated!! Calculating them using defaults')\n # self.calcXportCoef(plotit = False,debug_plots = False)\n\n headroom = 1.04\n \n # Load SOLPS profiles and transport coefficients\n\n psi_solps = self.data['solpsData']['psiSOLPS']\n neold = self.data['solpsData']['last10']['ne']\n dold = self.data['solpsData']['last10']['dn']\n teold = self.data['solpsData']['last10']['te']\n keold = self.data['solpsData']['last10']['ke']\n tiold = self.data['solpsData']['last10']['ti']\n kiold = self.data['solpsData']['last10']['ki']\n \n # Load experimental profiles\n\n psi_data_fit = self.data['pedData']['fitPsiProf']\n neexp = 1.0e20 * self.data['pedData']['fitProfs']['neprof']\n teexp = 1.0e3*self.data['pedData']['fitProfs']['teprof']\n tiexp = 1.0e3*self.data['pedData']['fitVals']['tisplpsi']['y']\n tiexppsi = self.data['pedData']['fitVals']['tisplpsi']['x']\n\n\n dnew_ratio = self.data['solpsData']['xportCoef']['dnew_ratio']\n kenew_ratio = self.data['solpsData']['xportCoef']['kenew_ratio']\n kinew = self.data['solpsData']['xportCoef']['kinew']\n\n\n # Find limits of Te, Ti for plots\n TS_inds_in_range = np.where(psi_data_fit > np.min(psi_solps))[0]\n Ti_inds_in_range = np.where(tiexppsi > np.min(psi_solps))[0]\n max_ne = np.max([np.max(neold), np.max(neexp[TS_inds_in_range])]) / 1.0e19\n max_Te = np.max([np.max(teold), np.max(teexp[TS_inds_in_range])])\n max_Ti = np.max([np.max(tiold), np.max(tiexp[Ti_inds_in_range])])\n\n\n f, ax = plt.subplots(2, sharex = 'all')\n ax[0].plot(psi_data_fit, neexp / 1.0e19, '--bo', lw = 1, label = 'Experimental Data')\n ax[0].plot(psi_solps, neold / 1.0e19, 'xr', lw = 2, mew=2, ms=8, label = 'SOLPS')\n ax[0].set_ylabel('n$_e$ (10$^{19}$ m$^{-3}$)')\n ax[0].legend(loc = 'best')\n ax[0].set_ylim([0, max_ne * headroom])\n ax[0].grid('on')\n\n # ax[1, 0].plot(psi_solps, dold, '-xr', lw = 2)\n # ax[1, 0].plot(psi_solps, dnew_ratio, '-ok', lw = 2, label = 'Data')\n # ax[1, 0].set_ylabel('D')\n # ax[1, 0].set_xlabel('$\\psi_N$')\n # ax[1, 0].grid('on')\n\n ax[1].plot(psi_data_fit, teexp, '--bo', lw = 1, label = 'Experimental Data')\n ax[1].plot(psi_solps, teold, 'xr', lw = 2, mew=2, ms=8, label = 'SOLPS')\n ax[1].set_ylabel('T$_e$ (eV)')\n ax[1].set_ylim([0, max_Te * headroom])\n ax[1].set_yticks(np.arange(0, max_Te * headroom + 200, 200))\n ax[1].grid('on')\n ax[1].set_xlabel('$\\psi_N$')\n\n # ax[1, 1].plot(psi_solps, keold, '-xr', lw = 2)\n # ax[1, 1].plot(psi_solps, kenew_ratio, '-ok', lw = 2, label = 'Data')\n # ax[1, 1].set_ylabel('$\\chi_e$')\n # ax[1, 1].set_xlabel('$\\psi_N$')\n # ax[1, 1].set_xlim([np.min(psi_solps) - 0.01, np.max(psi_solps) + 0.01])\n # ax[1, 1].grid('on')\n\n # ax[0, 2].plot(psi_solps, tiold, 'xr', lw = 2, label = 'SOLPS')\n # ax[0, 2].plot(tiexppsi, tiexp, '--bo', lw = 1, label = 'Data')\n # ax[0, 2].set_ylabel('T$_i$ (eV)')\n # ax[0, 2].set_ylim([0, max_Ti * headroom])\n # ax[0, 2].grid('on')\n\n # ax[1, 2].plot(psi_solps, kiold, '-xr', lw = 2)\n # ax[1, 2].plot(psi_solps, kinew, '-ok', lw = 2, label = 'Data')\n # ax[1, 2].set_ylabel('$\\chi_i$')\n # ax[1, 2].set_xlabel('$\\psi_N$')\n # ax[1, 2].set_xlim([np.min(psi_solps) - 0.01, np.max(psi_solps) + 0.01])\n # ax[1, 2].grid('on')\n\n ax[0].set_xticks(np.arange(0.84, 1.05, 0.04))\n ax[0].set_xlim([np.min(psi_solps) - 0.01, np.max(psi_solps) + 0.01])\n plt.tight_layout()\n\n plt.show(block = False)",
"def plot(self):\n import matplotlib.pyplot as plt\n plt.plot(self.lambdas, self.result['beta'] )\n plt.ylabel('Coefficient')\n plt.xlabel('Regularization Parameter')\n plt.suptitle('Regularization Path')\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the comparison of two profile likelihood curves for the same parameter
|
def Plot_Two_Profiles(Profile1,Profile2,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',label1='',label2='',maintitle=''):
df=Profile1['Parameters'].shape[0] #number of estimated parameters
threshold1=np.min(Profile1['Profile_Likelihood']) + chi2.ppf(alpha,df)
threshold2=np.min(Profile2['Profile_Likelihood']) + chi2.ppf(alpha,df)
plt.clf()
plt.plot(Profile1['Parameters'][Parameter_index], Profile1['Profile_Likelihood'], '-', c='0.2', linewidth=2, label=label1)
plt.plot(Profile2['Parameters'][Parameter_index], Profile2['Profile_Likelihood'], '-', c='#b50303', linewidth=2, label=label2)
plt.plot([Profile1['Parameters'][Parameter_index, 0], Profile1['Parameters'][Parameter_index, -1]], [threshold1, threshold1], '--', c='0.2', linewidth=2)
plt.plot([Profile2['Parameters'][Parameter_index, 0], Profile2['Parameters'][Parameter_index, -1]], [threshold2, threshold2], '--', c='#b50303', linewidth=2)
plt.xlabel(xtitle,fontsize=12)
plt.ylabel(ytitle,fontsize=12)
plt.title(maintitle,fontsize=12)
plt.legend(loc='best',fontsize=12)
if output_file!=None:
plt.rcParams['figure.figsize']=5,5
plt.savefig(output_file,dpi='figure',bbox_inches='tight')
if show:
plt.show()
|
[
"def compare_plot( xsc1, xsc2, title=\"comparison plot\", legend1=\"first file\", legend2=\"second file\",\n saveFile=None, legendXY = (0.05, 0.95) ):\n from fudge.vis.matplotlib import plot2d\n import matplotlib.pyplot as plt\n\n if xsc1.domain() != xsc2.domain():\n xsc1, xsc2 = xsc1.mutualify( 1e-8, 1e-8, 0, xsc2, 1e-8, 1e-8, 0 )\n diff = xsc1 - xsc2\n mean = (xsc1 + xsc2) / 2.0\n\n import numpy\n x1,y1 = map(numpy.array, diff.copyDataToXsAndYs())\n x2,y2 = map(numpy.array, mean.copyDataToXsAndYs())\n y2[ (y2==0)*(y1==0) ] = 1.0 # silence zero/zero division warnings\n relative_diff = zip(x1, y1/y2 * 100)\n \n \"\"\" # XYs division can take a long time, unnecessary in this case\n mean.setSafeDivide( True ) # control divide/0 errors\n relative_diff = (xsc1 - xsc2) / mean * 100\n \"\"\"\n\n plot1 = plot2d.DataSet2d( xsc1, legend=legend1, symbol=\"+\" )\n plot2 = plot2d.DataSet2d( xsc2, legend=legend2, lineStyle=\"--\", symbol=\"+\", color=\"red\" )\n reldiff_plot = plot2d.DataSet2d( relative_diff, legend=\"percent difference\" )\n\n xAxisSettings = plot2d.AxisSettings( label=\"\", isLog=True )\n yAxisSettings = plot2d.AxisSettings( label=\"Cross Section (barn)\", isLog=True )\n\n fig = plt.figure( figsize=(10,8) )\n fig.subplots_adjust( top=0.88, bottom=0.12, wspace=0.4 )\n\n ax1 = subplot2grid((4,1), (0,0), rowspan=3)\n mplot = plot2d.__makePlot2d( [plot1, plot2], xAxisSettings, yAxisSettings,\n legendOn=True, legendXY=legendXY, thePlot = ax1, minY=0 )\n plt.setp( ax1.get_xticklabels(), visible=False )\n plt.setp( ax1.get_label(), visible=False )\n\n # also plot the relative difference (needs different y-axis):\n xAxisSettings = plot2d.AxisSettings( label=\"$E_n$ (eV)\", isLog=True )\n yAxisSettings = plot2d.AxisSettings( label=\"% diff\" )\n\n ax2 = subplot2grid((4,1), (3,0), sharex=ax1)\n plot2d.__makePlot2d( [reldiff_plot], xAxisSettings, yAxisSettings,\n legendOn=False, thePlot = ax2, minY=0 )\n # tick marks may be too dense on this y-axis:\n #ax2.get_yaxis().set_ticks( [-0.2,0,0.2] )\n\n plt.suptitle( title, fontsize=24, fontweight='bold' )\n if saveFile: plt.savefig( saveFile )\n else: plt.show()",
"def Plot_Profile(Profile,Parameter_index,alpha=0.95,show=True,output_file=None,xtitle='',ytitle='',maintitle=''):\n\n\n plt.clf()\n df=Profile['Parameters'].shape[0] #number of estimated parameters\n threshold=np.min(Profile['Profile_Likelihood']) + chi2.ppf(alpha,df)\n plt.plot(Profile['Parameters'][Parameter_index], Profile['Profile_Likelihood'], '.', c='0.2', linewidth=2)\n plt.plot([Profile['Parameters'][Parameter_index, 0], Profile['Parameters'][Parameter_index, -1]], [threshold, threshold], '--', c='0.2', linewidth=2)\n plt.xlabel(xtitle,fontsize=12)\n plt.ylabel(ytitle,fontsize=12)\n plt.title(maintitle,fontsize=12)\n\n if output_file!=None:\n plt.rcParams['figure.figsize']=5,5\n plt.savefig(output_file,dpi='figure',bbox_inches='tight')\n if show:\n plt.show()",
"def plot_profiles(self, fig=None, figsize=(10, 10), title=None):\n saved_figsize = plt.rcParams['figure.figsize']\n saved_fontsize = plt.rcParams['font.size']\n try:\n if fig is None:\n plt.rcParams['figure.figsize'] = figsize\n plt.rcParams['font.size'] = 10\n fig = plt.figure()\n frame_axes = fig.add_subplot(111, frameon=False)\n if title is None:\n title = os.path.basename(self.fitsfile)\n outcome = \"tearing detected\" if self.has_tearing() else \"no tearing\"\n frame_axes.set_title(title + \": \" + outcome)\n frame_axes.set_xlabel('\\ny-pixel', fontsize=12)\n frame_axes.set_ylabel('ratio of counts for outer two columns\\n\\n',\n fontsize=12)\n frame_axes.get_xaxis().set_ticks([])\n frame_axes.get_yaxis().set_ticks([])\n for amp in self:\n prof1, prof2 = self[amp].ratio_profiles\n ratios1, ratios2 = self[amp].ratios\n stdevs1, stdevs2 = self[amp].stdevs\n ax = fig.add_subplot(4, 4, amp)\n plt.errorbar(range(len(prof1)), prof1, fmt='.', color='green',\n alpha=0.3, label='first edge', zorder=1)\n plt.errorbar(range(len(prof2)), prof2, fmt='.', color='blue',\n alpha=0.3, label='last edge', zorder=1)\n plt.errorbar(self[amp].ylocs, ratios1, yerr=stdevs1,\n xerr=3*[self[amp].dy], fmt='.', color='black',\n zorder=10, markersize=1)\n plt.errorbar(self[amp].ylocs, ratios2, yerr=stdevs2,\n xerr=3*[self[amp].dy], fmt='.', color='black',\n zorder=10, markersize=1)\n ax.annotate('amp %d' % amp, (0.65, 0.9),\n xycoords='axes fraction', fontsize='small')\n plt.tight_layout()\n finally:\n plt.rcParams['figure.figsize'] = saved_figsize\n plt.rcParams['font.size'] = saved_fontsize",
"def plot_likelihood_profile(self, parameter, ax=None, **kwargs):\n import matplotlib.pyplot as plt\n\n if ax is None:\n ax = plt.gca()\n\n ts_diff = self.likelihood_profiles[parameter][\"likelihood\"] - self.total_stat\n values = self.likelihood_profiles[parameter][\"values\"]\n\n ax.plot(values, ts_diff, **kwargs)\n unit = self.model.parameters[parameter].unit\n ax.set_xlabel(parameter + \"[unit]\".format(unit=unit))\n ax.set_ylabel(\"TS difference\")\n return ax",
"def plot_loglikelihood_vs_parameter(plotter: Plotter, mcmc_tables: List[pd.DataFrame], param_name: str, burn_in=0):\n ll_vals = mcmc_tables[0]['loglikelihood']\n p_vals = mcmc_tables[0][param_name]\n log_ll_vals = [-log(-x) for x in ll_vals]\n fig, axis, _, _, _ = plotter.get_figure()\n axis.plot(p_vals[burn_in:], log_ll_vals[burn_in:], '.')\n axis.set_xlabel(param_name)\n axis.set_ylabel('-log(-loglikelihood)')\n plotter.save_figure(\n fig, filename=f\"likelihood against {param_name}\", title_text=f\"likelihood against {param_name}\"\n )",
"def comparison_plot2D(\n u, f, # Function expressions in x and y\n value=0.5, # x or y equals this value\n variation='y', # independent variable\n n=100, # no of intervals in plot\n tol=1E-8, # tolerance for points inside the domain\n plottitle='', # heading in plot\n filename='tmp', # stem of filename\n ):\n v = np.linspace(-1+tol, 1-tol, n+1)\n # Compute points along specified line:\n points = np.array([(value, v_)\n if variation == 'y' else (v_, value)\n for v_ in v])\n u_values = [u(point) for point in points] # eval. Function\n f_values = [f(point) for point in points]\n plt.figure()\n plt.plot(v, u_values, 'r-', v, f_values, 'b--')\n plt.legend(['u', 'f'], loc='upper left')\n if variation == 'y':\n plt.xlabel('y'); plt.ylabel('u, f')\n else:\n plt.xlabel('x'); plt.ylabel('u, f')\n plt.title(plottitle)\n plt.savefig(filename + '.pdf')\n plt.savefig(filename + '.png')",
"def dot_plot(seq1, seq2, window=2):\n\n # compare subsequences\n data = [[(seq1[i:i+window] != seq2[j:j+window])\n for j in range(len(seq1)-window)]\n for i in range(len(seq2)-window)]\n gray()\n xlabel(\"%s (length %i bp)\" % (\"rno-mir-150\", len(seq1)))\n ylabel(\"%s (length %i bp)\" % (\"hsa-mir-150\", len(seq2)))\n imshow(data)",
"def compare_graph(self):\n df = self.compare_df()\n use_variables = [\n v for (i, (p, v)) in enumerate(zip(self.model.PRIORITIES, self.model.VARIABLES))\n if p != 0 and i != 0\n ]\n val_len = len(use_variables) + 1\n fig, axes = plt.subplots(\n ncols=1, nrows=val_len, figsize=(9, 6 * val_len / 2))\n for (ax, v) in zip(axes.ravel()[1:], use_variables):\n df[[f\"{v}_observed\", f\"{v}_estimated\"]].plot.line(\n ax=ax, ylim=(0, None), sharex=True,\n title=f\"{self.model.NAME}: Comparison of observed/estimated {v}(t)\"\n )\n ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))\n ax.ticklabel_format(style=\"sci\", axis=\"y\", scilimits=(0, 0))\n ax.legend(bbox_to_anchor=(1.02, 0),\n loc=\"lower left\", borderaxespad=0)\n for v in use_variables:\n df[f\"{v}_diff\"] = df[f\"{v}_observed\"] - df[f\"{v}_estimated\"]\n df[f\"{v}_diff\"].plot.line(\n ax=axes.ravel()[0], sharex=True,\n title=f\"{self.model.NAME}: observed - estimated\"\n )\n axes.ravel()[0].axhline(y=0, color=\"black\", linestyle=\"--\")\n axes.ravel()[0].yaxis.set_major_formatter(\n ScalarFormatter(useMathText=True))\n axes.ravel()[0].ticklabel_format(\n style=\"sci\", axis=\"y\", scilimits=(0, 0))\n axes.ravel()[0].legend(bbox_to_anchor=(1.02, 0),\n loc=\"lower left\", borderaxespad=0)\n fig.tight_layout()\n fig.show()",
"def meanplot1(meanV1,meanM1,y1,meanV2,meanM2,y2,y_just):\r\n def comp_UbRe(meanMn):\r\n \"\"\"\r\n Same function as in calib_writer\r\n \"\"\"\r\n meanMnp = np.array(meanMn)\r\n rho = 1.19; r = 0.05; nu = 15.0*10**(-6)\r\n A = pi*(r)**2; L = 2*r\r\n Ub = meanMnp/(rho*A); Re = (Ub*L)/nu\r\n return(Ub,Re)\r\n corr_fac = 10**(-3)/0.05\r\n Ub1,Re1 = comp_UbRe(meanM1)\r\n Ub2,Re2 = comp_UbRe(meanM2)\r\n U_std1 = meanV1/Ub1\r\n U_std2 = meanV2/Ub2\r\n ynp1 = np.array(y1);\r\n ynp2 = np.array(y2); ynp4 = np.array(y2)+y_just\r\n y_std1 = ynp1*corr_fac; y_std2 = ynp2*corr_fac; y_std4 = ynp4*corr_fac\r\n plt.subplot(2,2,1)\r\n plt.plot(U_std1,y_std1,'xr',U_std2,y_std2,'og',Usim,ysim,'m',Usim,-np.array(ysim),'m')\r\n plt.grid('on')\r\n plt.ylabel('y/r',fontsize=18)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(['Kommersiell probe','Probe 1','DNS'],fontsize=16)\r\n plt.subplot(2,2,3)\r\n plt.plot(U_std1,y_std1,'xr',U_std2,y_std4,'og',Usim,ysim,'m',Usim,-np.array(ysim),'m')\r\n plt.grid('on')\r\n plt.ylabel('y/r',fontsize=18)\r\n plt.xlabel('U/Ub',fontsize=18)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(['Kommersiell probe','Probe 1, y+2mm','DNS'],fontsize=16)\r\n return('Done')",
"def plot_comparison_embeddings(\n datapoints_a, datapoints_b, embedding_names, name=\"compare_bias\", save=False,\n):\n fig, ax = plt.subplots()\n ax.scatter(datapoints_b, datapoints_a, s=10)\n ax.set_ylim(min(datapoints_a) - 0.1, max(datapoints_a) + 0.1)\n ax.set_xlim(min(datapoints_b) - 0.1, max(datapoints_b) + 0.1)\n plt.xlabel(\"Gender axis {}\".format(embedding_names[1]), fontsize=12)\n plt.ylabel(\"Gender axis {}\".format(embedding_names[0]), fontsize=12)\n plt.title(\"Gender bias in professions across embeddings\", pad=18, fontsize=13)\n if save:\n fig.savefig(\"{}.png\".format(name))\n plt.show()",
"def plot_score_histo(title_local, list_score1, list_score2, list_score1_stdev, list_score2_stdev, score1_name, score2_name, xlabel_string, list_x_label, plot_option, **kwargs):\n FONTSIZE = 20\n \n pp_local = kwargs.get('pp', None)\n f_out_local = kwargs.get('f_out', None)\n \n cmap = CB2cm['BuVe']\n colors = [cmap(i) for i in np.linspace(0, 1, 2)]\n \n for i_temp, x_label in enumerate(list_x_label):\n if x_label[:2]=='MT':\n list_x_label[i_temp] = x_label[3:]\n if list_x_label[i_temp]=='Nested5foldCV':\n list_x_label[i_temp] = 'Nested-5foldCV'\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(title_local)\n width = 0.35\n list_x = []\n list_x_bis = []\n for i in range(len(list_score1)):\n list_x.append(i)\n list_x_bis.append(i+width)\n ## the bars\n print(list_x)\n print(list_score1)\n print(list_score1_stdev)\n rects1 = ax.bar(list_x, list_score1, width, color=colors[0], yerr=list_score1_stdev)\n rects2 = ax.bar(list_x_bis, list_score2, width, color=colors[1], yerr=list_score2_stdev)\n # axes and labels\n ax.set_xlim(min(list_x)-width,max(list_x_bis)+width)\n ax.set_ylim(min(min(list_score1),min(list_score2))-0.05,1)\n plt.yticks(fontsize=FONTSIZE)\n #ax.set_ylim(0,45)\n ax.set_ylabel('Scores', fontsize=FONTSIZE)\n ax.set_xlabel(xlabel_string, fontsize=FONTSIZE)\n xTickMarks = list_x_label\n ax.set_xticks([(list_x[i] + list_x_bis[i])/2 for i in range(len(list_x))])\n xtickNames = ax.set_xticklabels(xTickMarks)\n #plt.setp(xtickNames, rotation=45, fontsize=10)\n plt.setp(xtickNames, fontsize=FONTSIZE)\n ## add a legend\n lgd = ax.legend( (rects1[0], rects2[0]), (score1_name, score2_name), loc=\"upper right\")\n for label in lgd.get_texts():\n label.set_fontsize(FONTSIZE)\n lgd.get_title().set_fontsize(FONTSIZE)\n\n if plot_option==\"save\" or plot_option==\"print_and_save\":\n f_out_local.write(title_local+':\\n')\n for ind in range(len(list_x_label)):\n f_out_local.write(list_x_label[ind]+'\\t'+str(list_score1[ind])+'\\t'+str(list_score1_stdev[ind])+'\\t'+str(list_score2[ind])+'\\t'+str(list_score2_stdev[ind])+'\\n')\n\n return lgd, xtickNames",
"def f1Plot(result1, result2, resultLabel):\n import numpy as np\n import matplotlib.pyplot as plt\n plt.style.use('seaborn')\n\n # --- Constants\n index = np.arange(3)\n width = 0.35\n labels = ('Neg.', 'Neut.', 'Pos.')\n\n # --- Data\n prec1 = result1[0]\n rec1 = result1[1]\n f1_1 = result1[2]\n f1Avg1 = sum(f1_1)/len(f1_1)\n acc1 = result1[3]\n\n prec2 = result2[0]\n rec2 = result2[1]\n f1_2 = result2[2]\n f1Avg2 = sum(f1_2)/len(f1_2)\n acc2 = result2[3]\n\n\n fig1, ax = plt.subplots(ncols=3, figsize=(9,3), dpi=100)\n # --- Plot Precisions\n ax[0].bar(\n index - width/2,\n prec1,\n width, label=resultLabel[0], zorder=2\n )\n ax[0].bar(\n index + width/2,\n prec2,\n width, label=resultLabel[1], zorder=2\n )\n ax[0].set_title('Precision')\n\n # --- Plot Recalls\n ax[1].bar(\n index - width/2,\n rec1,\n width, label=resultLabel[0], zorder=2\n )\n ax[1].bar(\n index + width/2,\n rec2,\n width, label=resultLabel[1], zorder=2\n )\n ax[1].set_title('Recall')\n# ax[1].set_yticklabels('')\n\n # --- Plot F1-Score\n ax[2].bar(\n index - width/2,\n f1_1,\n width, label=resultLabel[0], zorder=2\n )\n ax[2].bar(\n index + width/2,\n f1_2,\n width, label=resultLabel[1], zorder=2\n )\n ax[2].set_title('F1-Score')\n# ax[2].set_yticklabels('')\n ax[2].legend(loc=\"best\", fontsize='small')\n\n for a in ax:\n a.set_xticks(index)\n a.set_xticklabels(labels)\n a.grid(linestyle=':')\n a.set_ylim([0, 1])\n\n\n# fig2, ax = plt.subplots(ncols=2, figsize=(6,3), dpi=100)\n# # --- Plot Average F1-Score\n# ax[0].bar(\n# [resultLabel[0], resultLabel[1]],\n# [f1Avg1, f1Avg2],\n# width, align = 'center', color=['#1f77b4', '#ff7f0e'], zorder=2\n# )\n# ax[0].set_title('Average F1-Score')\n## ax[0].set_yticklabels('')\n#\n# # --- Plot Accuracy\n# ax[1].bar(\n# [resultLabel[0], resultLabel[1]],\n# [acc1, acc2],\n# width, align = 'center', color=['#1f77b4', '#ff7f0e'], zorder=2\n# )\n# ax[1].set_title('Accuracy')\n# ax[1].set_yticklabels('')\n#\n# for a in ax:\n# a.grid(linestyle=':')\n# a.set_ylim([0, 1])\n\n plt.tight_layout()\n plt.show()",
"def plot_compare(mesa_runs, mesa_mdots, grid_source='mesa',\n params=None, bprops=('rate', 'fluence', 'peak'),\n mass=1.4, radius=10, verbose=True, display=True,\n grid_version=0, ls='-', offset=True):\n if params is None:\n print('Using default params')\n params = {'x': 0.7, 'z': 0.02, 'qb': 0.1, 'qnuc': 0.0}\n\n kgrid = grid_analyser.Kgrid(source=grid_source, verbose=verbose,\n grid_version=grid_version)\n grid_summ = kgrid.get_summ(params=params)\n grid_params = kgrid.get_params(params=params)\n\n xi, redshift = gravity.gr_corrections(r=radius, m=mass, phi=1)\n\n mesa_models = extract_bprops(runs=mesa_runs, mesa_mdots=mesa_mdots,\n verbose=verbose)\n\n n_bprops = len(bprops)\n figsize = (4.5, 2.5*n_bprops)\n fig, ax = plt.subplots(n_bprops, 1, figsize=figsize, sharex='all')\n\n for i, bprop in enumerate(bprops):\n u_bprop = f'u_{bprop}'\n\n unit_f = unit_factor(bprop)\n gr_f = gr_correction(bprop, xi=xi, redshift=redshift)\n\n # === kepler model ===\n # TODO: fix hack\n offset_x = 0.0\n if i > 0:\n offset_x = 0.003\n ax[i].errorbar(grid_params['accrate']*xi**2 - offset_x,\n grid_summ[bprop]*gr_f/unit_f, ls=ls,\n yerr=grid_summ[u_bprop]*gr_f/unit_f, marker='o',\n capsize=3, label=r'\\textsc{Kepler}')\n\n # === mesa model ===\n ax[i].errorbar(mesa_models['accrate'], mesa_models[bprop]/unit_f,\n yerr=mesa_models[u_bprop]/unit_f, marker='o',\n capsize=3, label=r'\\textsc{MESA}', ls=ls)\n\n ylabel = plot_tools.full_label(bprop)\n ax[i].set_ylabel(ylabel)\n\n ax[0].legend()\n ax[-1].set_xlabel(plot_tools.full_label('mdot'))\n\n plt.tight_layout()\n if display:\n plt.show(block=False)\n\n return fig, ax",
"def compare_profiles(var_key, coord_key, loc, *cases, var_scale=1, xlim=None, ylim=None, xlog=False, ylog=False, append_to_fig_ax=(False, False)):\n\n # Generate new plot or append to given\n fig, ax = plot.check_append_to_fig_ax(append_to_fig_ax)\n\n for case in cases:\n profile_data = get_profile_data(case, var_key, coord_key, loc)\n\n # Horizontal profile\n if coord_key == 'x' or coord_key == 'y+':\n xlabel = coord_key\n ylabel = var_key\n scale_x = 1\n scale_y = var_scale\n\n # Vertical profile\n elif coord_key == 'y':\n xlabel = var_key\n ylabel = coord_key\n scale_x = var_scale\n scale_y = 1\n\n else:\n xlabel = 'error'\n ylabel = 'error'\n scale_x = 1\n scale_y = 1\n assert (coord_key == 'x' or coord_key == 'y'), 'Invalid key for coordinates, ' \\\n 'must be x or y instead: %r' % coord_key\n\n plot.lining(*profile_data,\n append_to_fig_ax=(fig, ax),\n xlim=xlim, ylim=ylim,\n xlog=xlog, ylog=ylog,\n scale_x=scale_x, scale_y=scale_y,\n xlabel=xlabel, ylabel=ylabel,\n line_label=case.case_name)\n\n return fig, ax",
"def plotprofiles(proflist,varplot=False,time=np.nan,scale='log',fignum=np.nan,cindex=0):\n colorlist = ['b','g','r','c','m','y','k']\n if np.isnan(fignum):\n plt.figure()\n else:\n plt.figure(fignum)\n for ai in range(len(proflist)):\n p1 = proflist[ai].copy()\n if np.isnan(time):\n p1.time_integrate()\n if scale == 'log':\n plt.semilogx(p1.profile.flatten(),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n else:\n plt.plot(p1.profile.flatten(),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n if varplot:\n if scale == 'log':\n plt.semilogx(np.sqrt(p1.profile_variance.flatten()),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n plt.plot(np.sqrt(p1.profile_variance.flatten()),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n itime = np.argmin(np.abs(p1.time-time))\n if scale == 'log':\n plt.semilogx(p1.profile[itime,:],p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n else:\n plt.plot(p1.profile[itime,:],p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'-',label=p1.label)\n if varplot:\n if scale == 'log':\n plt.semilogx(np.sqrt(p1.profile_variance[itime,:]),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n else:\n plt.plot(np.sqrt(p1.profile_variance[itime,:]),p1.range_array.flatten(),colorlist[np.mod(ai+cindex,len(colorlist))]+'--',label=p1.label+' std.')\n \n plt.grid(b=True);\n plt.legend()\n plt.ylabel('Range [m]')\n plt.xlabel(p1.profile_type)",
"def plot_TPR_metrics(TPR_train, TPR_test, avg_diff_train, avg_diff_test,\n d_metrics, gan_metrics, plot_id=0, titles=True, dpi=600):\n\n fig = plt.figure(num='TPR_Metrics', figsize=(20, 6), facecolor='w', dpi=dpi)\n\n # 1.a Plot the TPR of the target model\n epochs = len(TPR_train)\n minTPR = min(TPR_test)\n min_idx = TPR_test.index(minTPR)\n ax1 = plt.subplot(1, 3, 1)\n if titles:\n ax1.set_title('TPR of the Target Model \\& Average \\# Changes per AE',\n fontsize=16, fontweight='bold')\n ax1.vlines(1, ymin=0, ymax=1, linestyles='dashed', linewidth=1) # Initial\n # plt.scatter(min_idx, minTPR, s=200, marker='o', c='None', ec='r')# Minimum\n # ax1.vlines(min_idx, ymin=0, ymax=1, linewidth=3, color='k') # Minimum\n # ax1.fill_between([0, 1], -1, 1)\n ax1.plot(range(epochs), TPR_train, c='darkred', linestyle='-',\n label='Training TPR', linewidth=2)\n ax1.plot(range(epochs), TPR_test, c='limegreen', linestyle='--',\n label='Test TPR', linewidth=2)\n ax1.set_ylabel('TPR', fontsize=14)\n ax1.set_ylim(0, 1)\n ax1.set_xlabel('Epoch', fontsize=14)\n ax1.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax1.legend(loc='upper left', bbox_to_anchor=(0.06, 1.))\n\n # 1.b Plot the avg # changes per AE\n ax1b = ax1.twinx()\n ax1b.plot(range(1, epochs), avg_diff_train, c='mediumblue',\n label='Training Set Changes', linewidth=2)\n ax1b.plot(range(1, epochs), avg_diff_test, c='magenta', linestyle='--',\n label='Test Set Changes', linewidth=2)\n ax1b.set_ylabel('Changes (L1 Distance)', fontsize=14)\n ax1b.set_ylim(0, int(max(max(avg_diff_train), max(avg_diff_test))) + 1)\n ax1b.legend(loc='upper right')\n\n # 2. Plot the metrics (loss & accuracy) of the GAN and the discriminator\n d_metrics = np.array(d_metrics)\n gan_metrics = np.array(gan_metrics)\n\n ax2 = plt.subplot(1, 3, 2)\n if titles:\n ax2.set_title('Training Loss', fontsize=16, fontweight='bold')\n ax2.plot(range(1, epochs), gan_metrics[:, 0], c='g',\n label='GAN', linewidth=2)\n ax2.plot(range(1, epochs), d_metrics[:, 0], c='r',\n label='Discriminator', linewidth=2)\n ax2.set_xlabel(\"Epoch\", fontsize=14)\n ax2.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax2.set_ylabel(\"Loss\", fontsize=14)\n ax2.legend()\n\n ax3 = plt.subplot(1, 3, 3)\n if titles:\n ax3.set_title('Training Accuracy', fontsize=16, fontweight='bold')\n ax3.plot(range(1, epochs), gan_metrics[:, 1], c='g',\n label='GAN', linewidth=2)\n ax3.plot(range(1, epochs), d_metrics[:, 1], c='r',\n label='Discriminator', linewidth=2)\n ax3.set_xlabel(\"Epoch\", fontsize=14)\n ax3.xaxis.set_major_locator(MaxNLocator(integer=True))\n ax3.set_ylabel(\"Accuracy\", fontsize=14)\n ax3.legend()\n\n plt.tight_layout()\n\n # plt.savefig(TPR_DIR + f'TPR_{plot_id}.png')\n plt.show()",
"def experiment_two(m=5, real_h=0.1, h=0.3, file_name =\"plot\",nr_iterations =25000,has_CW = \"No\",verify_variant=\"SPRT\"): \n gammas = [0.001,0.005,0.01,0.015,0.02,0.03,0.05,0.075,0.1,0.125,0.15,0.2,0.25,0.35,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.7,0.95,0.99]\n x_NTS = np.zeros(len(gammas))\n y_NTS = np.zeros(len(gammas))\n x_StV = np.zeros(len(gammas))\n y_StV = np.zeros(len(gammas))\n print(\"Progress for \"+str(file_name)+\" (.. out of \"+str(len(gammas))+\"): \")\n for i in range(0,len(gammas)):\n print(i,end=\",\")\n buf = experiments_one(m,h,gammas[i],nr_iterations,real_h=real_h,has_CW = has_CW)\n x_NTS[i] = buf[\"NTS_mean_time\"]\n x_StV[i] = buf[\"S_t_verify_mean_time\"]\n y_NTS[i] = buf[\"Acc_NTS\"]\n y_StV[i] = buf[\"Acc_S_t_verify\"] \n plt.plot(x_NTS,y_NTS, marker = \"^\", label=\"NTS\")\n plt.plot(x_StV, y_StV, marker = \"o\", label=\"SELECT-then-verify\")\n plt.xlabel(\"Iterations\")\n plt.ylabel(\"Success Rate\")\n plt.legend()\n plt.title(\"h=\"+str(h))\n plt.savefig(str(file_name)+\"_plot.png\",dpi=300)\n plt.show() \n np.savetxt(str(file_name)+\"_results.csv\",np.asarray([x_NTS,y_NTS,x_StV,y_StV]),delimiter=\",\")\n # print(x_NTS,y_NTS)\n print(\"Done.\")",
"def plot_training_stats(model):\n plt.figure(figsize=[15, 10])\n\n # Plot GLL\n plt.subplot(221)\n plt.plot(model.gll_history)\n plt.grid(\"on\")\n plt.ylabel(\"GLL\")\n plt.title(\"GLL\")\n\n # Plot trace and determinant of Sigma_b (covariance matrix)\n plt.subplot(222)\n det_sigmaB_history = [np.linalg.det(x) for x in model.D_hat_history]\n trace_sigmaB_history = [np.trace(x) for x in model.D_hat_history]\n plt.plot(det_sigmaB_history, label=\"det(Sigma_b)\")\n plt.plot(trace_sigmaB_history, label=\"trace(Sigma_b)\")\n plt.grid(\"on\")\n plt.legend()\n plt.title(\"Sigma_b_hat metrics\")\n\n plt.subplot(223)\n plt.plot(model.sigma2_hat_history)\n plt.grid(\"on\")\n plt.ylabel(\"sigma_e2_hat\")\n plt.xlabel(\"Iteration\")\n\n plt.subplot(224)\n plot_bhat(model, 1)",
"def line(list1, list2, plo=False, pri=False, **kwargs):\n import matplotlib.pyplot as mp\n [x1, y1] = list1\n [x2, y2] = list2\n a = (y2 - y1) / (x2 - x1)\n b = (x2*y1 - x1*y2) / (x2 - x1)\n label = str(a) + 'x + ' + str(b)\n if plo:\n mp.plot([x1, x2], [y1, y2], label=label, **kwargs)\n if pri:\n print label\n return a, b"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Look up a query in Freebase. The query should be a unicode string.
|
def lookup(self, query):
key = query.encode('utf8')
if key not in self.cache:
self.cache[key] = self.fetch(key)
return self.cache[key]
|
[
"def fqlQuery(self, query, callback):\n j = Json().put(u\"query\", query)\n self.callMethod(u\"fql.query\", j.getJavaScriptObject(), callback)",
"def handle_wolframalpha_search(self, query):\n\n client = wolframalpha.Client(app_id)\n res = client.query(query)\n\n if len(res.pods) > 0:\n string = \"\"\n pod = res.pods[1]\n if pod.text:\n string = pod.text\n else:\n self.speak(\"I'm sorry boss, but I'm unable to find a response.\")\n\n #string = string.encode('ascii', 'ignore')\n self.speak(\"The result is: \" + string)\n logger.debug(\"The result is: \"+ string)\n else:\n self.speak(\"I'm sorry boss, but I'm not sure about the answer.\")",
"def raw(self, query_str, *query_args, **query_options):\n i = self.interface\n return i.query(query_str, *query_args, **query_options)",
"def gql(cls, query_string, *args, **kwds):\n return pdb.GqlQuery('SELECT * FROM %s %s' % (cls.kind(), query_string),\n *args, **kwds)",
"def query(self,query_string, max_rows=1000):",
"def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)",
"def standard_search(query, **kw):\n logger.info(\"Standard Query -> %r\" % (query))\n pc = get_portal_catalog()\n return pc(query, **kw)",
"def run_shodan_query(api, query, logger=structlog.get_logger()):\n try:\n results = api.search(query)\n # print_prettified_json(results)\n return results['matches']\n\n except shodan.exception.APIError as shodan_error:\n logger.error(f'Error: {shodan_error}')",
"def locate(query):\n try:\n # Extract relevant words of user query.\n parser = Parser(query)\n logging.debug(\"Here are relevant words selected by parser : %s\",\n parser.query_relevant_words)\n\n if not parser.query_relevant_words:\n raise ParserError(\"Parser didn't find any relevant word ...\")\n\n except ParserError as error:\n logging.warning(\"ParserError : %s\", error)\n # If no relevant words found, error is True. Neither address,\n # nor summary are returned. End of process.\n return _return_infos(error=True,\n message=random.choice(\n PARSER_FAILURE_MESSAGES))\n\n try:\n # Ask data to Google Maps Geocoding API.\n gmaps_api_request = GmapsApiRequest(parser.query_relevant_words)\n address = gmaps_api_request.address\n lat = gmaps_api_request.lat\n lng = gmaps_api_request.lng\n logging.debug(\"Here are latitude and longitude returned by GoogleMaps \\\nAPI : %s, %s\", lat, lng)\n\n except GmapsApiError as error:\n logging.warning(\"GmapsApiError : %s\", error)\n # If there is no data returned from Google Maps Geocoding API,\n # then error becomes true. Neither address, nor summary are\n # returned. End of process.\n return _return_infos(error=True,\n message=random.choice(ADDRESS_FAILURE_MESSAGES))\n\n try:\n # Ask data to MediaWiki API.\n mediawiki_api_request = MediaWikiApiRequest(lat, lng)\n summary = mediawiki_api_request.summary\n\n except MediaWikiApiError as error:\n logging.warning(\"MediaWikiError : %s\", error)\n # If there is no data returned from MediaWiki API, then only\n # Google Maps data are returned.\n return _return_infos(address=address, lat=lat, lng=lng,\n summary_message=random.choice(\n SUMMARY_FAILURE_MESSAGES))\n\n # If Parser, GmapsApiRequest & MediaWikiApiRequest return data, then\n # all data are returned.\n return _return_infos(address=address, lat=lat, lng=lng,\n summary=summary)",
"def handle_failure(query):\n return \"Sorry, we're having trouble finding {query}. Can you be more specific?\".format(query=query)",
"def search_company(query):\n lookup = requests.get(SEARCH_QUERY, params={'query': query, 'limit': 10})\n if 200 <= lookup.status_code < 300:\n if len(lookup.json()) == 0:\n return None # Nothing found\n else:\n # Create dict with company name as key\n company_dict = {c['name'].lower(): c for c in lookup.json()}\n info, confidence = match_one(query.lower(), company_dict)\n # Return result if confidence is high enough, or query string\n # contained in company name eg Cisco > Cisco Systems\n if confidence > 0.5 or query.lower() in info['name'].lower():\n return info['symbol']\n else:\n # HTTP Status indicates something went wrong\n raise requests.HTTPError('API returned status code: '\n '{}'.format(lookup.status_code))",
"def get_user_input(query):\n return raw_input(query + \"\\n\")",
"def lookup(self, word):",
"def parse_query(query):\n text = \"\"\n if isinstance(query, Query):\n q = str(query.text.encode(\"utf8\")) if type(query.text) == unicode else str(query.text.decode(\"utf8\").encode(\"utf8\"))\n else:\n q = str(query.encode(\"utf8\")) if type(query) == unicode else str(query.decode(\"utf8\").encode(\"utf8\"))\n try:\n parsetree = splparse(q)\n except:\n logger.exception(\"Failed to parse query: \" + q)\n return None\n return parsetree",
"def search(query):\n\tlogging.debug(\"search(): searching names and snippets for query string\")\n\n\n\twith connection, connection.cursor() as cursor:\n\t\tcommand = \"SELECT * FROM snippets where keyword like \\'%%%s%%\\' or message like \\'%%%s%%\\'\" % (query, query)\n\t\tcursor.execute(command)\n\t\tfetchall = cursor.fetchall()\n\tif not fetchall:\n\t\tprint \"No queries found. Please try another search.\"\n\t\tlogging.debug(\"Search empty, returning None.\")\n\t\treturn None, None \n\telse:\n\t\tname, message = [x[0] for x in fetchall], [x[1] for x in fetchall]\n\t\tlogging.debug(\"Returning keywords in catalog database\")\n\t\treturn name, message",
"def match(self, query: str) -> HPOTerm:\n\n for term in self:\n if query == term.name:\n return term\n raise RuntimeError(\"No HPO entry with name {}\".format(query))",
"def autocomplete(q,limit,offset):\n conn = None\n try:\n # read the connection parameters\n params = credentials()\n # connect to the PostgreSQL server\n conn = psycopg2.connect(**params)\n cur = conn.cursor(cursor_factory=RealDictCursor)\n \n query_sql = \"SELECT * FROM branches WHERE branch LIKE '\"+q+\"%' ORDER BY ifsc ASC LIMIT \" + str(limit) +\" OFFSET \" + str(offset)+\" ;\" \n\n cur.execute(query_sql)\n results = cur.fetchall()\n\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n\n if results:\n results = [dict(row) for row in results]\n response = {\"branches\": results}\n return response\n else:\n return {\"response\":\"NO RESULTS FOUND\"}\n\n except (Exception, psycopg2.DatabaseError) as error:\n return {\"message\":\"Internal Server Error\"}\n finally:\n if conn is not None:\n conn.close()",
"def influx_query_(self, q):\n if self.influx_cli is None:\n self.err(\n self.influx_query_,\n \"No database connected. Please initialize a connection\")\n return\n try:\n return self.influx_cli.query(q)\n except Exception as e:\n self.err(e, self.influx_query_,\n \"Can not query database\")",
"def search(query, base_url):\n query = query.split()\n query = '+'.join(query)\n header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'}\n url = base_url + query\n\n return get_soup(url, header)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Do the actual communication with remote API. Query here should be already encoded to UTF8. Returns a triple, first item is the name recognized by Freebase, second is a most likely category, the last is the score. First two items in returned value are unicode, last item is a float.
|
def fetch(self, query):
self.params['query'] = query
url = SERVICE_URL + '?' + urllib.urlencode(self.params)
response = json.loads(urllib.urlopen(url).read())
for result in response['result'][0:2]:
if 'notable' in result:
name = result['name']
category = result['notable']['name']
return (name, category, result['score'])
return None
|
[
"def handle_wolframalpha_search(self, query):\n\n client = wolframalpha.Client(app_id)\n res = client.query(query)\n\n if len(res.pods) > 0:\n string = \"\"\n pod = res.pods[1]\n if pod.text:\n string = pod.text\n else:\n self.speak(\"I'm sorry boss, but I'm unable to find a response.\")\n\n #string = string.encode('ascii', 'ignore')\n self.speak(\"The result is: \" + string)\n logger.debug(\"The result is: \"+ string)\n else:\n self.speak(\"I'm sorry boss, but I'm not sure about the answer.\")",
"def get_wikidata(item, type_id, prop_id, prop_value):\n base_url = \"https://tools.wmflabs.org/openrefine-wikidata/en/api\"\n\n query = {\"query\": \"\"\"{\"query\":\"%s\",\n \"limit\":6,\n \"type\" : \"%s\",\n \"properties\" : [\n {\"pid\":\"%s\",\"v\":\"%s\"}\n ]\n }\"\"\" % (item, type_id, prop_id, prop_value)}\n\n r = requests.get(base_url, params=query)\n\n # print(r.url)\n\n try:\n json_result = r.json()\n except:\n return \"no json/error\"\n\n #print(json_result)\n\n try:\n qid = [d['id'] for d in json_result['result']]\n id_magnitude = [int(d['id'].replace(\"Q\", ''))\n for d in json_result['result']]\n name = [d['name'] for d in json_result['result']]\n score = [d['score'] for d in json_result['result']]\n match = [d['match'] for d in json_result['result']]\n main_type = [d['type'][0]['name'] for d in json_result['result']]\n\n df = pd.DataFrame({'qid': qid,\n 'name': name,\n 'id_magnitude': id_magnitude,\n 'score': score,\n 'match': match,\n 'main_type': main_type\n })\n\n # order by score then by inverse qid magnitude. NOTE : magnitude inutile depuis qu'Antonin a modifié l'API\n df.sort_values(['score', 'id_magnitude'], ascending=[\n False, True], inplace=True)\n\n # select the best match\n match = df[df['match'] == True].values\n\n if match.size > 0:\n best_match = match\n else:\n best_match = tuple(map(tuple, df.iloc[[0]].values))[0]\n\n return best_match\n\n except IndexError:\n return \"No match\"",
"def get(self, request, pk, format=None):\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", agent='Mozilla/5.0 (Macintosh; Intel Mac OS X '\n '10_11_5) AppleWebKit/537.36 (KHTML, '\n 'like Gecko) Chrome/50.0.2661.102 '\n 'Safari/537.36')\n sparql.setQuery(\"\"\"\n #Pokemon!\n #updated 2019-01-21\n\n # Gotta catch 'em all\n SELECT DISTINCT ?pokemon ?pokemonLabel ?pokedexNumber\n WHERE\n {\n ?pokemon wdt:P31/wdt:P279* wd:Q3966183 .\n ?pokemon p:P1685 ?statement.\n ?statement ps:P1685 ?pokedexNumber;\n pq:P972 wd:Q20005020.\n\n FILTER ( !isBLANK(?pokedexNumber) ) .\n FILTER ( ?pokedexNumber = \"%s\").\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\" }\n }\n ORDER BY (?pokedexNumber)\n LIMIT 1\n \"\"\" % pk)\n sparql.setReturnFormat(JSON)\n result = self.normalize(sparql.query().convert())\n\n if result is None:\n return Response(status=HTTP_404_NOT_FOUND)\n\n return Response(result, status=HTTP_200_OK)",
"def run(self):\n self._apiInitialize()\n q, self.qsplit = queryCleaner(self.target_poem)\n print \"Collecting possible songs from the Spotify API\"\n # start at MaxGram and work your way down\n n_list = range(findMaxGram(self.qsplit) + 1)[::-1][:-1]\n for n in n_list:\n ngram_list = ngrams(q, n)\n for ngram in ngram_list:\n # call pipeline methods\n # api_obj = ApiQueryPipeline(ngram)\n # api_obj.apiCaller()\n self.api_query_input = ngram\n if (\n '' in self.api_query_input and\n self.api_query_input.index('') == 0\n ):\n continue\n self.apiQuery()\n self.requestParser()\n if self.request_json is None:\n continue\n if not self.request_json:\n continue\n self.tracksAndLinks()\n self.levenshteinCheck()\n self.levenshteinMinimize()\n \n # check if the resulting track is a list or a string\n if isinstance(self.min_tracks, (str, unicode)):\n self.all_tracks.append(self.min_tracks)\n self.all_links.append(self.min_links)\n else:\n self.all_tracks.extend(self.min_tracks)\n self.all_links.extend(self.min_links)\n\n self.all_tracks, self.link_idxs = uniquifyList(self.all_tracks)\n self.all_links = [self.all_links[i] for i in self.link_idxs]",
"def get(self, request, format=None):\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", agent='Mozilla/5.0 (Macintosh; Intel Mac OS X '\n '10_11_5) AppleWebKit/537.36 (KHTML, '\n 'like Gecko) Chrome/50.0.2661.102 '\n 'Safari/537.36')\n sparql.setQuery(\"\"\"\n #Pokemon!\n #updated 2019-01-21\n\n # Gotta catch 'em all\n SELECT DISTINCT ?pokemon ?pokemonLabel ?pokedexNumber\n WHERE\n {\n ?pokemon wdt:P31/wdt:P279* wd:Q3966183 .\n ?pokemon p:P1685 ?statement.\n ?statement ps:P1685 ?pokedexNumber;\n pq:P972 wd:Q20005020.\n FILTER ( !isBLANK(?pokedexNumber) ) .\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE],en\" }\n }\n ORDER BY (?pokedexNumber)\n \"\"\")\n sparql.setReturnFormat(JSON)\n result = self.normalize(sparql.query().convert())\n\n return Response(result, status=HTTP_200_OK)",
"def get_wikidata(value, type_id, prop_id='', prop_value='', lang=\"fr\"):\n base_url = \"https://tools.wmflabs.org/openrefine-wikidata/%s/api\" % (lang)\n\n query = {\"query\": \"\"\"{\"query\":\"%s\",\n \"limit\":0,\n \"type\" : \"%s\"}\"\"\" % (value, type_id)}\n\n if prop_id or prop_value:\n query = {\"query\": \"\"\"{\"query\":\"%s\",\n \"limit\":0,\n \"type\" : \"%s\",\n \"properties\":[{\"pid\":\"%s\",\n \"v\":{\"id\":\"%s\"}}]}\"\"\" % (value, type_id, prop_id, prop_value)}\n\n r = requests.get(base_url, params=query)\n\n # print(r.url)\n\n json_result = r.json()\n\n # print(json_result)\n\n try:\n qid = [d['id'] for d in json_result['result']]\n name = [d['name'] for d in json_result['result']]\n score = [d['score'] for d in json_result['result']]\n match = [d['match'] for d in json_result['result']]\n main_type = [d['type'][0]['name'] for d in json_result['result']]\n\n df = pd.DataFrame({'qid': qid,\n 'name': name,\n 'score': score,\n 'match': match,\n 'main_type': main_type\n })\n\n # order by score\n df.sort_values(['score'], ascending=[\n False], inplace=True)\n\n # select the best match\n match = df[df['match'] == True].values\n\n if match.size > 0:\n best_match = tuple(map(tuple, match))[0]\n else:\n best_match = tuple(map(tuple, df.iloc[[0]].values))[0]\n\n return best_match\n\n except IndexError:\n return \"No match\"",
"def wolfram(query, verbose=False):\n if namespace:\n engine = namespace.get(internals[\"engine\"], float)\n else:\n engine = float\n url = \"https://api.wolframalpha.com/v2/query?format=plaintext&output=JSON\" \\\n \"&includepodid=Value&includepodid=Result&appid=%s&input=%s\" \\\n % (app_id, request.quote(query))\n if verbose:\n print(url)\n response = request.urlopen(url, timeout=timeout).read()\n result = json.loads(response.decode(\"utf-8\").strip())\n result = result[\"queryresult\"]\n if result[\"success\"]:\n if \"pods\" not in result:\n return None\n out = {}\n for pod in result['pods']:\n if verbose:\n print(\"\\n%s\" % pod[\"title\"])\n for subpod in pod[\"subpods\"]:\n try:\n lines = subpod[\"plaintext\"].replace(u\"×10^\", \"e\").splitlines()\n for line in lines:\n if verbose:\n print(\" \", line)\n if '|' in line:\n line = line.split(\"|\")\n key = line[0].strip()\n value = line[1]\n else:\n key = \"\"\n value = line\n v = value.split(' ')[0].strip()\n w = v.split()\n try:\n v = Value(engine(w[0]), \" \".join(w[1:]), original=True)\n except (ValueError, UnitParseError):\n pass\n if key not in out:\n out[key] = [v]\n else:\n out[key].append(v)\n except KeyError:\n pass\n new_out = {}\n for k, vs in out.items():\n # Now make sure all units found are equivalent\n us = {}\n for v in vs:\n if isinstance(v, Value):\n u = tuple(v.unit)\n if u in us:\n us[u].append(v)\n else:\n us[u] = [v]\n # Find out the version with the most identical units\n best_len = 0\n best_val = None\n for u, v in us.items():\n if len(v) > best_len:\n best_val = v[0]\n new_out[k] = best_val\n if new_out:\n if len(new_out) == 1:\n return list(new_out.values())[0]\n else:\n return new_out\n return None",
"def query_data():\n \n # Access JIRA backend and gets data\n db_accessor = JiraGT()\n db_accessor.connect()\n data = db_accessor.get_compliance_data()\n db_accessor.disconnect()\n return data",
"def send(self):\n if self._validate_params_exist():\n payload = self._format_query()\n\n r = requests.post(Query.BASE_URL,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n r = r.text\n\n text_file = open(\"flypy/data/test_data_multi_leg.json\", \"w\")\n text_file.write(r)\n text_file.close()\n query_response = QueryResponse(json.loads(r))\n\n return query_response\n else:\n print(\"All mandatory parameters not supplied.\")\n return False",
"def query(self, url, data_key_list, data_type_class=None, put_data=None):\n if not self.offline:\n response = self.get_response(url, put_data=put_data)\n response_json = response.json()\n logger.debug(\"Response (JSON): {}\".format(response_json))\n\n # handle if the yahoo query returns an error\n if response_json.get(\"error\"):\n response_error_msg = response_json.get(\"error\").get(\"description\")\n logger.error(\"ATTEMPT TO RETRIEVE DATA FAILED WITH ERROR: \\\"{}\\\"\".format(response_error_msg))\n # sys.exit()\n return False\n else:\n raw_response_data = response_json.get(\"fantasy_content\")\n\n # print(\"RAW RESPONSE JSON:\")\n # import pprint\n # pprint.pprint(raw_response_data)\n # print(\"~\" * 100)\n\n # extract data from \"fantasy_content\" field if it exists\n if raw_response_data:\n logger.debug(\"Data fetched with query URL: {}\".format(response.url))\n logger.debug(\"Response (Yahoo fantasy data extracted from: \\\"fantasy_content\\\"): {}\".format(\n raw_response_data))\n else:\n logger.error(\"NO DATA FOUND WHEN ATTEMPTING EXTRACTION FROM FIELD \\\"fantasy_content\\\"\")\n # sys.exit()\n return False\n\n # iterate through list of data keys and drill down to final desired data field\n for i in range(len(data_key_list)):\n if isinstance(raw_response_data, list):\n if isinstance(data_key_list[i], list):\n reformatted = reformat_json_list(raw_response_data)\n raw_response_data = [\n {data_key_list[i][0]: reformatted[data_key_list[i][0]]},\n {data_key_list[i][1]: reformatted[data_key_list[i][1]]}\n ]\n else:\n raw_response_data = reformat_json_list(raw_response_data)[data_key_list[i]]\n else:\n if isinstance(data_key_list[i], list):\n raw_response_data = [\n {data_key_list[i][0]: raw_response_data[data_key_list[i][0]]},\n {data_key_list[i][1]: raw_response_data[data_key_list[i][1]]}\n ]\n else:\n raw_response_data = raw_response_data.get(data_key_list[i])\n\n if raw_response_data:\n logger.debug(\"Response (Yahoo fantasy data extracted from: {}): {}\".format(data_key_list,\n raw_response_data))\n else:\n logger.error(\"NO DATA FOUND WHEN ATTEMPTING EXTRACTION FROM FIELDS {}\".format(data_key_list))\n # sys.exit()\n return False\n\n # unpack, parse, and assign data types to all retrieved data content\n unpacked = unpack_data(raw_response_data, YahooFantasyObject)\n logger.debug(\n \"Unpacked and parsed JSON (Yahoo fantasy data wth parent type: {}):\\n{}\".format(\n data_type_class, unpacked))\n\n self.executed_queries.append({\n \"url\": response.url,\n \"response_status_code\": response.status_code,\n \"response\": response\n })\n\n # cast highest level of data to type corresponding to query (if type exists)\n query_data = data_type_class(unpacked) if data_type_class else unpacked\n if self.all_output_as_json:\n return json.dumps(query_data, indent=2, default=complex_json_handler, ensure_ascii=False)\n else:\n return query_data\n\n else:\n logger.error(\"CANNOT RUN YAHOO QUERY WHILE USING OFFLINE MODE!\")",
"def wolfram_alpha_query(query, simple_result=False, results_limit=2):\n \n if results_limit < 1:\n raise bot_exception(EXCEPT_TYPE, \"Invalid number of results (1 to 8)\")\n results_limit += 1 # Increment to account for input result, which doesn't really count\n \n query_url = \"Query URL: http://www.wolframalpha.com/input/?i={}\\n\".format(urllib.parse.quote_plus(query))\n to_return = ''\n try:\n query_result = wolframalpha.Client(configmanager.config['wolfram_api_key']).query(query)\n except:\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION, \"Wolfram|Alpha is not configured for use right now, sorry\")\n result_root = query_result.tree.getroot()\n \n # Error handling\n if result_root.get('success') == 'false':\n try: # \n suggestion = result_root.find('didyoumeans').find('didyoumean').text\n except Exception as e: # TODO: Get proper exception type\n print(\"Something bad happened to the query:\\n\" + str(e)) # DEBUG\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION, \"Wolfram|Alpha could not interpret your query\\n{}\".format(query_url))\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION,\n \"Wolfram|Alpha could not interpret your query. Trying for first suggestion '{}'...\".format(suggestion),\n wolfram_alpha_query(suggestion))\n elif result_root.get('timedout'):\n if len(query_result.pods) == 0:\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION, \"Query timed out\", query_url)\n elif not simple_result:\n to_return += \"```\\nWarning: query timed out but returned some results:```\\n\"\n elif len(query_result.pods) == 0:\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION, \"No result given (general error)\", query_url)\n \n number_of_results = 0\n # Format answer\n if simple_result: # Return a straight, single answer\n if len(list(query_result.results)) > 0:\n to_return += list(query_result.results)[0].text + \"\\n\"\n else: # No explicit 'result' was found\n try:\n to_return += \"Closest result:\\n{}\\n\".format(list(query_result.pods)[1].text)\n except IndexError:\n to_return += \"No valid result returned. This is a bug! Avert your eyes!\"\n except Exception as e: # This shouldn't happen, really\n print(\"Something bad happened to the query (returning simple result):\\n\" + str(e)) # DEBUG\n raise bot_exception(WOLFRAM_ALPHA_EXCEPTION, \"Wolfram|Alpha is now dead. Nice work.\")\n else: # Full answer, up to 1800 characters long\n for pod in list(query_result.pods):\n for sub_pod in list(pod.node):\n image = sub_pod.find('img')\n if image is not None:\n to_return += \"{pod_title}: {image_url}\\n\".format(pod_title=pod.__dict__['title'], image_url=image.get('src'))\n number_of_results += 1\n if len(to_return) > 1800: # We're getting a very large result. Truncate.\n to_return += \"```\\nWarning: truncating very long result here...```\\n\"\n break\n if number_of_results >= results_limit:\n break\n to_return += query_url\n \n return to_return",
"def predict():\n # pass the song into the lclf object, like before\n\n # now, convert the results into json!\n\n # return the json data to the endpoint.\n return data",
"def _perform_single_query(khoros_object, query, fields=None, cursor=None):\n # Construct the entire LiQL query\n cursor = '' if not cursor else liql.structure_cursor_clause(cursor)\n query = f\"{query} {cursor}\" if cursor else query\n\n # Perform the API call and retrieve the data\n response = liql.perform_query(khoros_object, liql_query=query)\n data = liql.get_returned_items(response)\n\n # Get the cursor when present\n cursor = None\n if response.get('data').get('next_cursor'):\n cursor = response['data'].get('next_cursor')\n\n # Add missing columns to message data as needed\n data = _add_missing_cols(data, fields)\n try:\n data = sorted(data, key=itemgetter(*tuple(data[0].keys())))\n except KeyError as missing_key:\n logger.error(f'Could not sort the message data because the \\'{missing_key}\\' key was missing.')\n\n # Return the user data and cursor\n return data, cursor",
"def get_nlc_response(self, input_text):\n\n nlc_response = self.discovery_client.query(\n classifier_id=self.classifier_id,\n query_options={'query': input_text, 'count': DISCOVERY_QUERY_COUNT}\n )\n\n # Watson nlc assigns a confidence level to each result.\n # Based on data mix, we can assign a minimum tolerance value in an\n # attempt to filter out the \"weakest\" results.\n if self.nlc_score_filter and 'results' in nlc_response:\n fr = [x for x in nlc_response['results'] if 'score' in x and\n x['score'] > self.nlc_score_filter]\n\n nlc_response['matching_results'] = len(fr)\n nlc_response['results'] = fr\n\n response = self.format_nlc_response(nlc_response,\n self.dnlc_data_source)\n self.response_tuple = response\n\n fmt = \"{cart_number}) {name}\\n{image}\"\n formatted_response = \"\\n\".join(fmt.format(**item) for item in response)\n return {'nlc_result': formatted_response}",
"def api():\n url = \"https://twinword-word-graph-dictionary.p.rapidapi.com/definition/\"\n\n headers = {\n 'x-rapidapi-host': \"twinword-word-graph-dictionary.p.rapidapi.com\",\n 'x-rapidapi-key': \"340d528947mshe2fbc39eb0dd579p11a01bjsn46992916a685\"\n }\n\n querystring = {\"entry\": word}\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n\n r = json.loads(response.text)\n return r",
"def do(self) -> dict:\n\n query = self.build()\n\n try:\n response = self._connection.run_rest(\"/graphql\", REST_METHOD_POST, {\"query\": query})\n except RequestsConnectionError as conn_err:\n message = str(conn_err) + ' Connection error, query was not successful.'\n raise type(conn_err)(message).with_traceback(sys.exc_info()[2])\n if response.status_code == 200:\n return response.json() # success\n raise UnexpectedStatusCodeException(\"Query was not successful\", response)",
"def getFXRateWithYQL(pair=\"USDJPY\"):\n # URL of API for YQL\n url = \"https://query.yahooapis.com/v1/public/yql\"\n params = {\n \"q\": 'select * from yahoo.finance.xchange where pair in (\"{}\")'.format(pair),\n \"format\": \"json\",\n \"env\": \"store://datatables.org/alltableswithkeys\"\n }\n # Convert the contents of a dict object to URL.\n url += \"?\" + urllib.parse.urlencode(params) \n \n # Send a quary and receive the result.\n res = urllib.request.urlopen(url)\n\n # Convert the result to a json object.\n result = json.loads(res.read().decode('utf-8'))\n\n return result",
"async def get_fm_response(params):\n async with client_session.get(lastfm_root_url, params=params) as r:\n return await r.json()",
"def query_result() -> Any:\n query = request.args.get(\"query_string\", \"\")\n table = get_template_attribute(\"_query_table.html\", \"querytable\")\n contents, types, rows = g.ledger.query_shell.execute_query(query)\n if contents:\n if \"ERROR\" in contents:\n raise FavaAPIException(contents)\n table = table(g.ledger, contents, types, rows)\n\n if types and g.ledger.charts.can_plot_query(types):\n return {\n \"chart\": g.ledger.charts.query(types, rows),\n \"table\": table,\n }\n return {\"table\": table}"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructor for a Funder object Can be called with no arguments, creating a Funder object with no attributes set, or with a dict of information to be set at object creation.
|
def __init__(self, information_dict=None):
if information_dict is not None:
super(Funder, self).__init__(**information_dict)
else:
super(Funder, self).__init__()
|
[
"def constructor(self, **kwargs):\n if len(kwargs) > 0:\n self.__dict__.update(kwargs)",
"def __init__(self, *args):\n this = _coin.new_SoFieldData(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(\n self, event_time, time_zone, duration, level,\n geojson_file_path, data_source,\n flood_id=None, output_dir=None, output_basename=None):\n\n super(FloodHazard, self).__init__()\n self.localization = FloodHazardString()\n self._event_time = event_time\n self._time_zone = time_zone\n self._duration = duration\n self._level = level\n self._data_source = data_source\n\n if flood_id:\n self._flood_id = flood_id\n else:\n self._flood_id = FLOOD_ID_FORMAT.format(\n event_time=self.event_time,\n duration=self.duration,\n level=self.level)\n self._hazard_features = 0\n\n if not output_dir:\n output_dir = os.path.dirname(geojson_file_path)\n\n if not output_basename:\n output_basename = FLOOD_HAZARD_DEFAULT_BASENAME\n\n geojson_file = '{basename}.json'.format(basename=output_basename)\n\n # QgsVector can already read GeoJSON\n # InaSAFE layer are able to process GeoJSON\n self.hazard_path = os.path.join(output_dir, geojson_file)\n\n if not geojson_file_path == self.hazard_path:\n # Copy Hazard file first\n copy(geojson_file_path, self.hazard_path)\n\n # Insert metadata\n self.copy_style()\n self.write_keywords()\n\n # Calculate potential hazard features\n self.calculate_hazard_features()",
"def __init__(self, name, bart, bdamage, bangle, bspeed, evil):\n\t\tWeapon.__init__(self)\n\t\tself.name = name\n\t\tself.bart = bart\n\t\tself.bdamage, self.bangle, self.bspeed = bdamage, bangle, bspeed\n\t\tself.evil = evil\n\t\tself.shootangle = 0",
"def __init__(self, action=None, data=None):\n if action:\n self._action = action\n else:\n self._action = None\n if data:\n self._data = data\n else:\n self._data = {}",
"def __init__(self,\n theId,\n theInstName,\n theInstDep,\n theAttr,\n theCallback,\n thePriority=Priority.DEFAULT):\n self.id = theId\n \"\"\"\n :type: int\n\n Dependency ID\n \"\"\"\n\n self.instName = theInstName\n \"\"\"\n :type: str\n\n Dependency instance name that has dependecies\n \"\"\"\n\n self.instDep = theInstDep\n \"\"\"\n :type: str\n\n Dependency instance name that has dependant attributes\n \"\"\"\n\n self.attr = theAttr\n \"\"\"\n :type: list\n\n List with dependant attributes\n \"\"\"\n\n self.callback = theCallback\n \"\"\"\n :type: func\n\n Notification to be called when attributes are updated\n \"\"\"\n\n self.priority = thePriority\n \"\"\"\n :type: Priority\n\n Dependency priority\n \"\"\"",
"def __init__(self):\n self._field, self.ships = create_field()\n self._withships = field_with_ships(self._field)",
"def __init__(self, *args):\n this = _Field.new_Material(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n self.basename = self.basename or self.__class__.__name__.lower()\n self.set_fields()",
"def __init__(self, first_name, last_name, sex, age):\n self.first_name = first_name\n self.last_name = last_name\n self.sex = sex\n self.age = age",
"def __init__(self, name, ingredients, equipment, instructions, servings, cooking_time):\n\t\tself.name = name\n\t\tself.ingredients = ingredients \n\t\tself.equipment = equipment\n\t\tself.instructions = instructions\n\t\tself.servings = servings\n\t\t# self.nutrition_info, loops thru ingredients dict to retrieve nutrition info from each ingredient\n\t\tself.cooking_time = cooking_time",
"def __init__(self, wiki, getinfo=None, **data):\n self.wiki = wiki\n self.title = None\n self.__dict__.update(data)\n if getinfo is None:\n getinfo = GETINFO\n if getinfo:\n self.__dict__.update(self.info())",
"def __init__(self):\n this = _coin.new_SoSFName()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self):\n this = _coin.new_SoNurbsProfile()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def __init__(self, *args):\n this = _wali.new_Trans(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, wiki, title=None, getinfo=None, **data):\n self.wiki = wiki\n self.title = title\n self.__dict__.update(data)\n if getinfo is None:\n getinfo = GETINFO\n if getinfo:\n self.__dict__.update(self.info())",
"def __init__(self, ingredient_full=None, ingredient_name=None, amount=None,\n units=None):\n self.ingredient_full = ingredient_full\n self.ingredient = ingredient_name\n self.amount = amount\n self.units = units",
"def __init__(self, weight, fuel, fuel_consumption):\n self.weight = weight\n self.fuel = fuel\n self.fuel_consumption = fuel_consumption",
"def __init__(self, *args):\n this = _coin.new_ScXMLReferenceDataObj(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a string representation of a funder object
|
def __str__(self):
return "Funder #{id}: {name}".format(id=self.id, name=self.name)
|
[
"def __str__(self):\n\n out = \"\"\n\n # Include source object if set\n if self.source_object:\n out += \"({}/{}) -\".format(self.source_object.type.name, self.source_object.value)\n\n # Add fact type\n out += \"[{}\".format(self.type.name)\n\n # Add value if set\n if self.value and not self.value.startswith(\"-\"):\n out += \"/{}\".format(self.value)\n out += \"]\"\n\n # Add destination object if set\n if self.destination_object:\n\n # Use arrow unless bidirectional\n if self.bidirectional_binding:\n out += \"-\"\n else:\n out += \"->\"\n\n out += \" ({}/{})\".format(self.destination_object.type.name, self.destination_object.value)\n\n return out",
"def __str__(self):\r\n f = ', '.join([str(f) for f in self.features])\r\n if self._name:\r\n return '%s(%s)' %(self._name, f)\r\n # else this is just a Feature wrapper, no need to add anything\r\n return f",
"def __repr__(self):\r\n return f\"This level {self.level} {self.name} has {self.health} hit points\"\\\r\n + f\" remaining. It's a {self.type} type Pokemon.\"",
"def __str__(self) -> str: #__str__:a built-in function that computes the \"informal\" string representations of an object\n s = \"\"\n # Initialize with cofactor name\n s += \"Cofactor Name: {}\\n\".format(self.name) #\\n:new line in string\n s += \"------------ \\n\" #Draw a line between cofactor info (looks cuter!)\n # Print cofactor info, with state_id and relative redox potential\n for i in range(len(self.redox)):\n s += \"Redox State ID: {}, Oxidation Potential: {}\\n\".format(i, self.redox[i])\n\n return s",
"def __repr__(self) -> str:\n args = [\"{}={}\".format(k, v) for k, v in self.__dict__.items()]\n return type(self).__name__ + \"({})\".format(\", \".join(args))",
"def __repr__(self):\n\n result = \"\"\n for dessert in self.desserts:\n result += f\"{dessert}\\n\"\n return result",
"def __repr__(self):\n s = ''\n if self.tags != []:\n s += f'{\".\".join(self.tags)}.'\n s += f'{self.transfer_number}'\n return s",
"def __str__(self):\n return f\"-----------------------------------\\n\" \\\n f\"Ability: {self.name.title()}\\n\" \\\n f\"ID: {self.id}\\n\" \\\n f\"Generation: {self.generation}\\n\" \\\n f\"Short Effect: {self.effect_short}\\n\" \\\n f\"Effect: {self.__format_list(self.effect)}\\n\" \\\n f\"Pokemon: {self.pokemon}\\n\"",
"def __repr__(self):\n log_level = logging.getLevelName(self.log_level)\n fields = [self.time_stamp.astimezone().isoformat(), self.name, self.file_name, str(self.line_number), self.function_name, log_level]\n\n structured_string = \"\"\n if len(self.message) > 0:\n structured_string = self.message\n if len(self.structured_record) > 0:\n json_string = json.dumps(self.structured_record)\n structured_string += f\"{json_string} {structured_sentinel}\"\n if len(structured_string) > 0:\n fields.append(structured_string)\n\n output_string = \" - \".join(fields)\n return output_string",
"def dumps(obj, **kwargs) -> str:\n return FREDEncoder(**kwargs).encode(obj)",
"def __str__(self):\n return f\"-----------------------------------\\n\" \\\n f\"Stat: {self.name.title()}\\n\" \\\n f\"ID: {self.id}\\n\" \\\n f\"Is Battle Only: {self.is_battle_only}\\n\"",
"def debugstr( self ):\n\t\treturn repr( self ) + ' ' + str( self )",
"def __str__(self):\n return str(self.unitName + ' (' + self.hexLocation + ')')",
"def __str__(self):\n self.creatures.sort(creatureComp)\n s = self._fullname() + \":\"\n for ci in self.creatures:\n s += \" \" + str(ci)\n for ci in self.removed:\n s += \" \" + str(ci) + \"-\"\n return s",
"def __str__(self):\r\n formatted_str = \"\"\r\n\r\n goalkeepers = [player for player in self._squad if player.position == 'Goalkeeper']\r\n defenders = [player for player in self._squad if player.position == 'Defender']\r\n midfielders = [player for player in self._squad if player.position == 'Midfielder']\r\n forwards = [player for player in self._squad if player.position == 'Forward']\r\n pos_lists = {\"Goalkeepers\": goalkeepers,\r\n \"Defenders\": defenders,\r\n \"Midfielders\": midfielders,\r\n \"Forwards\": forwards}\r\n\r\n for pos, player_list in pos_lists.items():\r\n formatted_str += f\"============{pos}==========\\n\"\r\n for player in player_list:\r\n formatted_str += f\"Player: {player.name}, Cost: {player.cost}\\n\"\r\n formatted_str += \"\\n\"\r\n\r\n return formatted_str",
"def __str__(self):\n s = \"{0:15s} {1:30s}\".format(self.type, self.name)\n if (self.quantity):\n s += \" {0:10s}\".format(str(self.quantity))\n if (self.pct):\n s += \" ({0:5.1f}%)\".format(self.pct)\n if (len(self.properties) > 0):\n prop_strs = []\n for e in sorted(self.properties.keys()):\n prop_strs.append(self.properties[e].short_str())\n s += \" (\" + \", \".join(prop_strs) + \")\"\n return s",
"def __str__(self):\n return unit_format.Generic.to_string(self)",
"def _build_dump(self, indent: str) -> str:\n s = f\"<{type(self).__name__}@{id(self):x} {self.name}\\n\"\n if self.parent:\n parent_str = self.parent._build_dump(indent + ' ')\n else:\n parent_str = \"None\"\n s += f\"{indent}parent={parent_str}\\n\"\n s += f\"{indent}variables=[\\n\"\n for n, v in self._variables.items():\n s += f\"{indent} '{n}'={v:#x} {'(RO)' if n in self._ro_variables else '(RW)'}\\n\"\n s += f\"{indent}]>\"\n return s",
"def __repr__(self):\r\n return str(vars(self))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Instantiate SW and AW kernels according to parameters and flavour. Kernel flags are set based on the flavour.
|
def abb_init_kernels(L, U, N, flavour, plotname):
sw = ABBSW()
aw = ABBAW()
for k in [ sw, aw ]:
k.L, k.U = L, U
k.setup(MidPoint(), N)
k.measurements('kernels/abb/%s.csv' % plotname, plotname)
flags = flavours[flavour]
for flag in flags:
setattr(k, flag, flags[flag])
return sw, aw
|
[
"def _compile_kernels(self) -> None:\n ...",
"def _instantiate_kernels(self):\n self.kernels = {}\n for language in self.indus_languages:\n for code_shop in self.get_shops(language):\n self.kernels['{}_{}'.format(language, code_shop)] = DiamanKernel(language,\n code_shop)",
"def build_kernel(self):\n ...",
"def init_configurables(self):\n self.kernel_spec_manager = KernelSpecManager(parent=self)\n\n self.seed_notebook = None\n if self.seed_uri is not None:\n # Note: must be set before instantiating a SeedingMappingKernelManager\n self.seed_notebook = self._load_notebook(self.seed_uri)\n\n # Only pass a default kernel name when one is provided. Otherwise,\n # adopt whatever default the kernel manager wants to use.\n kwargs = {}\n if self.default_kernel_name:\n kwargs['default_kernel_name'] = self.default_kernel_name\n\n self.kernel_spec_manager = self.kernel_spec_manager_class(\n parent=self,\n )\n\n self.kernel_manager = self.kernel_manager_class(\n parent=self,\n log=self.log,\n connection_dir=self.runtime_dir,\n kernel_spec_manager=self.kernel_spec_manager,\n **kwargs\n )\n\n # Detect older version of notebook\n func = getattr(self.kernel_manager, 'initialize_culler', None)\n if not func:\n self.log.warning(\"Older version of Notebook detected - idle kernels will not be culled. \"\n \"Culling requires Notebook >= 5.1.0.\")\n\n self.session_manager = SessionManager(\n log=self.log,\n kernel_manager=self.kernel_manager\n )\n\n self.kernel_session_manager = self.kernel_session_manager_class(\n parent=self,\n log=self.log,\n kernel_manager=self.kernel_manager,\n config=self.config, # required to get command-line options visible\n **kwargs\n )\n\n # Attempt to start persisted sessions\n self.kernel_session_manager.start_sessions()\n\n self.contents_manager = None # Gateways don't use contents manager\n\n if self.prespawn_count:\n if self.max_kernels and self.prespawn_count > self.max_kernels:\n raise RuntimeError('cannot prespawn {}; more than max kernels {}'.format(\n self.prespawn_count, self.max_kernels)\n )\n\n api_module = self._load_api_module(self.api)\n func = getattr(api_module, 'create_personality')\n self.personality = func(parent=self, log=self.log)\n\n self.personality.init_configurables()",
"def definekernel():\n time_list, volt_list=importandseparate(10)\n time_sec=makenparray(time_list)\n volt_mV=makenparray(volt_list)\n volt_mV=removeDCoffset(volt_mV)\n kernel, kernel_size=createkernel(time_sec,volt_mV)\n return kernel, kernel_size",
"def manage():\n\n description = [\"Remote IKernel management utility\", \"\",\n \"Currently installed kernels:\"]\n existing_kernels = {}\n\n # Sort so they are always in the same order\n for kernel_name in sorted(ks.find_kernel_specs()):\n if kernel_name.startswith(RIK_PREFIX):\n spec = ks.get_kernel_spec(kernel_name)\n display = \" ['{kernel_name}']: {desc}\".format(\n kernel_name=kernel_name, desc=spec.display_name)\n existing_kernels[kernel_name] = spec\n description.append(display)\n\n # The raw formatter stops lines wrapping\n parser = argparse.ArgumentParser(\n prog='%prog manage', description=\"\\n\".join(description),\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('--show', '-s', help=\"Print the contents of the \"\n \"kernel.\")\n parser.add_argument('--add', '-a', action=\"store_true\", help=\"Add a new \"\n \"kernel according to other commandline options.\")\n parser.add_argument('--delete', '-d', help=\"Remove the kernel and delete \"\n \"the associated kernel.json.\")\n parser.add_argument('--kernel_cmd', '-k', help=\"Kernel command \"\n \"to install.\")\n parser.add_argument('--name', '-n', help=\"Name to identify the kernel,\"\n \"e.g. 'Python 2.7'.\")\n parser.add_argument('--language', '-l', help=\"Explicitly specify the \"\n \"language of the kernel.\")\n parser.add_argument('--cpus', '-c', type=int, help=\"Launch the kernel \"\n \"as a multi-core job with this many cores if > 1.\")\n parser.add_argument('--pe', help=\"Parallel environment to use on when\"\n \"running on gridengine.\")\n parser.add_argument('--host', '-x', help=\"The hostname or ip address \"\n \"running through an SSH connection. For non standard \"\n \"ports use host:port.\")\n parser.add_argument('--interface', '-i',\n choices=['local', 'ssh', 'pbs', 'sge', 'slurm', 'htcondor'],\n help=\"Specify how the remote kernel is launched.\")\n parser.add_argument('--system', help=\"Install the kernel into the system \"\n \"directory so that it is available for all users. \"\n \"Might need admin privileges.\", action='store_true')\n parser.add_argument('--workdir', help=\"Directory in which to start the \"\n \"kernel. If not specified it will use the current \"\n \"directory. This is important if the local and remote \"\n \"filesystems differ.\")\n parser.add_argument('--remote-precmd', help=\"Command to execute on the \"\n \"remote host before launching the kernel, but after \"\n \"changing to the working directory.\")\n parser.add_argument('--remote-launch-args', help=\"Arguments to add to the \"\n \"command that launches the remote session, i.e. the \"\n \"ssh or qlogin command, such as '-l h_rt=24:00:00' to \"\n \"limit job time on GridEngine jobs.\")\n parser.add_argument('--tunnel-hosts', '-t', nargs='+', help=\"Tunnel the \"\n \"connection through the given ssh hosts before \"\n \"starting the endpoint interface. Works with any \"\n \"interface. For non standard ports use host:port.\")\n parser.add_argument('--verbose', '-v', action='store_true', help=\"Running \"\n \"kernel will produce verbose debugging on the console.\")\n\n # Temporarily remove 'manage' from the arguments\n raw_args = sys.argv[:]\n sys.argv.remove('manage')\n args = parser.parse_args()\n sys.argv = raw_args\n\n if args.add:\n kernel_name = add_kernel(args.interface, args.name, args.kernel_cmd,\n args.cpus, args.pe, args.language, args.system,\n args.workdir, args.host, args.remote_precmd,\n args.remote_launch_args, args.tunnel_hosts,\n args.verbose)\n print(\"Installed kernel {0}.\".format(kernel_name))\n elif args.delete:\n if args.delete in existing_kernels:\n delete_kernel(args.delete)\n else:\n print(\"Can't delete {0}\".format(args.delete))\n print(\"\\n\".join(description[2:]))\n elif args.show:\n if args.show in existing_kernels:\n show_kernel(args.show)\n else:\n print(\"Kernel {0} doesn't exist\".format(args.show))\n print(\"\\n\".join(description[2:]))\n else:\n parser.print_help()",
"def setup_preprocessor(self, nb, resources, km=None):\n path = resources.get('metadata', {}).get('path', '') or None\n self.nb = nb\n\n # clear display_id map\n self._display_id_map = {}\n self.widget_state = {}\n self.widget_buffers = {}\n\n if km is None:\n kernel_name = nb['metadata']['kernelspec']['name']\n self.km, self.kc = start_new_kernel(cwd=path, \n kernel_name=kernel_name)\n try:\n # Yielding unbound args for more easier understanding\n # and downstream consumption\n yield nb, self.km, self.kc\n finally:\n # Below code commented out so that we don't stop the kernel\n\n #self.kc.stop_channels()\n #self.km.shutdown_kernel(now=self.shutdown_kernel == 'immediate')\n\n #for attr in ['nb', 'km', 'kc']:\n # delattr(self, attr)\n pass\n else:\n self.km = km\n if not km.has_kernel:\n km.start_kernel(extra_arguments=self.extra_arguments, **kwargs)\n self.kc = km.client()\n #print('has a kernel')\n self.kc.start_channels()\n try:\n self.kc.wait_for_ready(timeout=self.startup_timeout)\n except RuntimeError:\n self.kc.stop_channels()\n raise\n self.kc.allow_stdin = False\n try:\n yield nb, self.km, self.kc\n finally:\n #print(\"hereiam?\"*10)\n pass",
"def make_kernel(namespace, kernel_factory, \n out_stream_factory=None, display_hook_factory=None):\n # If running under pythonw.exe, the interpreter will crash if more than 4KB\n # of data is written to stdout or stderr. This is a bug that has been with\n # Python for a very long time; see http://bugs.python.org/issue706263.\n if sys.executable.endswith('pythonw.exe'):\n blackhole = file(os.devnull, 'w')\n sys.stdout = sys.stderr = blackhole\n sys.__stdout__ = sys.__stderr__ = blackhole \n\n # Install minimal exception handling\n sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor', \n ostream=sys.__stdout__)\n\n # Create a context, a session, and the kernel sockets.\n io.raw_print(\"Starting the kernel at pid:\", os.getpid())\n context = zmq.Context()\n # Uncomment this to try closing the context.\n # atexit.register(context.close)\n session = Session(username=u'kernel')\n\n reply_socket = context.socket(zmq.XREP)\n xrep_port = bind_port(reply_socket, namespace.ip, namespace.xrep)\n io.raw_print(\"XREP Channel on port\", xrep_port)\n\n pub_socket = context.socket(zmq.PUB)\n pub_port = bind_port(pub_socket, namespace.ip, namespace.pub)\n io.raw_print(\"PUB Channel on port\", pub_port)\n\n req_socket = context.socket(zmq.XREQ)\n req_port = bind_port(req_socket, namespace.ip, namespace.req)\n io.raw_print(\"REQ Channel on port\", req_port)\n\n hb = Heartbeat(context, (namespace.ip, namespace.hb))\n hb.start()\n hb_port = hb.port\n io.raw_print(\"Heartbeat REP Channel on port\", hb_port)\n\n # Helper to make it easier to connect to an existing kernel, until we have\n # single-port connection negotiation fully implemented.\n io.raw_print(\"To connect another client to this kernel, use:\")\n io.raw_print(\"-e --xreq {0} --sub {1} --rep {2} --hb {3}\".format(\n xrep_port, pub_port, req_port, hb_port))\n\n # Redirect input streams and set a display hook.\n if out_stream_factory:\n sys.stdout = out_stream_factory(session, pub_socket, u'stdout')\n sys.stderr = out_stream_factory(session, pub_socket, u'stderr')\n if display_hook_factory:\n sys.displayhook = display_hook_factory(session, pub_socket)\n\n # Create the kernel.\n kernel = kernel_factory(session=session, reply_socket=reply_socket, \n pub_socket=pub_socket, req_socket=req_socket)\n kernel.record_ports(xrep_port=xrep_port, pub_port=pub_port,\n req_port=req_port, hb_port=hb_port)\n return kernel",
"def _prepare_static_mode(self):\n place = paddle.get_device()\n if place == 'cpu':\n self._config.disable_gpu()\n else:\n self._config.enable_use_gpu(100, 0)\n self._config.switch_use_feed_fetch_ops(False)\n self._config.disable_glog_info()\n self.predictor = paddle.inference.create_predictor(self._config)\n self.input_handles = [\n self.predictor.get_input_handle(name)\n for name in self.predictor.get_input_names()\n ]\n self.output_handle = [\n self.predictor.get_output_handle(name)\n for name in self.predictor.get_output_names()\n ]",
"def generate_kernel(kernel, precisions, stockham_aot):\n\n args = [stockham_aot]\n # 2D single kernels always specify threads per transform\n if isinstance(kernel.length, list):\n args.append(','.join([str(f) for f in kernel.factors[0]]))\n args.append(','.join([str(f) for f in kernel.factors[1]]))\n args.append(','.join([str(f) for f in kernel.threads_per_transform]))\n else:\n args.append(','.join([str(f) for f in kernel.factors]))\n # 1D kernels might not, and need to default to 'uwide'\n threads_per_transform = getattr(kernel,'threads_per_transform', {\n 'uwide': kernel.length // min(kernel.factors),\n 'wide': kernel.length // max(kernel.factors),\n 'tall': 0,\n 'consolidated': 0\n }[getattr(kernel,'flavour', 'uwide')])\n args.append(str(threads_per_transform))\n\n # default half_lds to True only for CS_KERNEL_STOCKHAM\n half_lds = getattr(kernel, 'half_lds', kernel.scheme == 'CS_KERNEL_STOCKHAM')\n\n filename = kernel_file_name(kernel)\n\n args.append(str(kernel.threads_per_block))\n args.append(str(getattr(kernel, 'block_width', 0)))\n args.append('1' if half_lds else '0')\n args.append(kernel.scheme)\n args.append(filename)\n\n proc = subprocess.run(args=args, stdout=subprocess.PIPE, check=True)\n clang_format_file(filename)\n\n import json\n launchers = json.loads(proc.stdout.decode('ascii'))\n\n cpu_functions = []\n data = Variable('data_p', 'const void *')\n back = Variable('back_p', 'void *')\n for launcher_dict in launchers:\n launcher = NS(**launcher_dict)\n\n factors = launcher.factors\n length = launcher.lengths[0] if len(launcher.lengths) == 1 else (launcher.lengths[0], launcher.lengths[1])\n transforms_per_block = launcher.transforms_per_block\n threads_per_block = launcher.threads_per_block\n threads_per_transform = threads_per_block // transforms_per_block\n half_lds = launcher.half_lds\n scheme = launcher.scheme\n sbrc_type = launcher.sbrc_type\n sbrc_transpose_type = launcher.sbrc_transpose_type\n precision = 'dp' if launcher.double_precision else 'sp'\n runtime_compile = kernel.runtime_compile\n use_3steps_large_twd = getattr(kernel, '3steps', None)\n block_width = getattr(kernel, 'block_width', 0)\n\n params = LaunchParams(transforms_per_block, threads_per_block, threads_per_transform, half_lds)\n\n # make 2D list of threads_per_transform to populate FFTKernel\n tpt_list = kernel.threads_per_transform if scheme == 'CS_KERNEL_2D_SINGLE' else [threads_per_transform, 0]\n\n f = Function(name=launcher.name,\n arguments=ArgumentList(data, back),\n meta=NS(\n factors=factors,\n length=length,\n params=params,\n precision=precision,\n runtime_compile=runtime_compile,\n scheme=scheme,\n threads_per_block=threads_per_block,\n transforms_per_block=transforms_per_block,\n threads_per_transform=tpt_list,\n transpose=sbrc_transpose_type,\n use_3steps_large_twd=use_3steps_large_twd,\n block_width=block_width,\n ))\n\n cpu_functions.append(f)\n\n return cpu_functions",
"def precompile_process():\r\n SystemParam.MODEL = \"Heisenberg\"\r\n #SystemParam.MODEL= \"Ising\"\r\n SystemParam.SYMMETRY = \"Z2\"\r\n SystemParam.USE_CUSTOM_RANDOM = False\r\n SystemParam.USE_REFLECTION = False\r\n SystemParam.NUM_OF_THREADS = None\r\n SystemParam.only_NN = True\r\n SystemParam.only_NNN = False",
"def create_kernel_sets(API):\n kernel_sets = ( cMultiplyScalar() + \n cCopy() + \n cTensorCopy() +\n cTensorMultiply() + \n cMultiplyVec() +\n cHypot() + \n cAddScalar() + \n cSelect() + \n cAddVec() + \n cMultiplyVecInplace() + \n cMultiplyConjVecInplace() +\n cMultiplyRealInplace() + \n cMultiplyConjVec() + \n cDiff() + \n cSqrt() + \n cAnisoShrink() + \n cRealShrink() +\n cSpmv() + \n cSpmvh() + \n cCopyColumn()+\n cHadamard())\n# if 'cuda' is API:\n# print('Select cuda interface')\n# kernel_sets = atomic_add.cuda_add + kernel_sets\n# elif 'ocl' is API:\n# print(\"Selecting opencl interface\")\n# kernel_sets = atomic_add.ocl_add + kernel_sets\n kernel_sets = atomic_add(API) + kernel_sets\n return kernel_sets",
"def get_kernels():\n return ['linear', 'rbf']",
"def add_kernel(interface, name, kernel_cmd, cpus=1, pe=None, language=None,\n system=False, workdir=None, host=None, precmd=None,\n launch_args=None, tunnel_hosts=None, verbose=False):\n kernel_name = []\n display_name = []\n argv = [sys.executable, '-m', 'remote_ikernel']\n\n # How to connect to kernel\n if interface == 'local':\n argv.extend(['--interface', 'local'])\n kernel_name.append('local')\n display_name.append(\"Local\")\n elif interface == 'pbs':\n argv.extend(['--interface', 'pbs'])\n display_name.append('PBS')\n elif interface == 'sge':\n argv.extend(['--interface', 'sge'])\n kernel_name.append('sge')\n display_name.append(\"GridEngine\")\n elif interface == 'htcondor':\n argv.extend(['--interface', 'htcondor'])\n kernel_name.append('htcondor')\n display_name.append(\"HTCondor\")\n elif interface == 'ssh':\n if host is None:\n raise KeyError('A host is required for ssh.')\n argv.extend(['--interface', 'ssh'])\n argv.extend(['--host', host])\n kernel_name.append('ssh')\n kernel_name.append(host)\n display_name.append(\"SSH\")\n display_name.append(host)\n elif interface == 'slurm':\n argv.extend(['--interface', 'slurm'])\n kernel_name.append('slurm')\n display_name.append(\"SLURM\")\n else:\n raise ValueError(\"Unknown interface {0}\".format(interface))\n\n display_name.append(name)\n kernel_name.append(re.sub(r'\\W', '', name).lower())\n\n if pe is not None:\n argv.extend(['--pe', pe])\n kernel_name.append(pe)\n display_name.append(pe)\n\n if cpus and cpus > 1:\n argv.extend(['--cpus', '{0}'.format(cpus)])\n kernel_name.append('{0}'.format(cpus))\n display_name.append('{0} CPUs'.format(cpus))\n\n if workdir is not None:\n argv.extend(['--workdir', workdir])\n\n if precmd is not None:\n argv.extend(['--precmd', precmd])\n\n if launch_args is not None:\n argv.extend(['--launch-args', launch_args])\n\n if tunnel_hosts:\n # This will be a list of hosts\n kernel_name.append('via_{0}'.format(\"_\".join(tunnel_hosts)))\n display_name.append(\"(via {0})\".format(\" \".join(tunnel_hosts)))\n argv.extend(['--tunnel-hosts'] + tunnel_hosts)\n\n if verbose:\n argv.extend(['--verbose'])\n\n # protect the {connection_file} part of the kernel command\n kernel_cmd = kernel_cmd.replace('{connection_file}',\n '{host_connection_file}')\n argv.extend(['--kernel_cmd', kernel_cmd])\n\n # remote_ikernel needs the connection file too\n argv.append('{connection_file}')\n\n # Prefix all kernels with 'rik_' for management.\n kernel_name = RIK_PREFIX + '_'.join(kernel_name)\n # Having an @ in the string messes up the javascript;\n # so get rid of evrything just in case.\n kernel_name = re.sub(r'\\W', '_', kernel_name)\n kernel_json = {\n 'display_name': \" \".join(display_name),\n 'argv': argv,\n }\n\n if language is not None:\n kernel_json['language'] = language\n\n # Put the commandline in so that '--show' will show how to recreate\n # the kernel\n kernel_json['remote_ikernel_argv'] = sys.argv\n\n # False attempts a system install, otherwise install as the current user\n if system:\n username = False\n else:\n username = getpass.getuser()\n\n # kernel.json file installation\n with tempdir.TemporaryDirectory() as temp_dir:\n os.chmod(temp_dir, 0o755) # Starts off as 700, not user readable\n\n with open(path.join(temp_dir, 'kernel.json'), 'w') as kernel_file:\n json.dump(kernel_json, kernel_file, sort_keys=True, indent=2)\n\n ks.install_kernel_spec(temp_dir, kernel_name,\n user=username, replace=True)\n\n return kernel_name",
"def set_kernels_array_and_get_oldest_kernel(self):\n \n linuxFlavor = self.host.cat_etc_issue()\n #Initialize kernels and oldest kernel from Array\n if linuxFlavor == \"redhat6\":\n kernels = rh6kernels\n oldestKernelFromArr = kernels[0]\n # convert into rigth format and store in the same variable\n oldestKernelFromArr = self.host.getKernelNoRpm(oldestKernelFromArr)\n elif linuxFlavor == \"redhat5\":\n kernels = rh5kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = self.host.getKernelNo86(oldestKernelFromArr)\n elif linuxFlavor == \"sles11sp1\":\n kernels = sles11sp1Kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = get_sles_kernel_from_rpm(oldestKernelFromArr)\n elif linuxFlavor == \"sles11sp2\":\n kernels = sles11sp2Kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = get_sles_kernel_from_rpm(oldestKernelFromArr)\n elif linuxFlavor == \"sles11\":\n kernels = sles11Kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = get_sles_kernel_from_rpm(oldestKernelFromArr)\n elif linuxFlavor == \"sles10sp4\":\n kernels = sles10sp4Kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = get_sles_kernel_from_rpm(oldestKernelFromArr)\n elif linuxFlavor == \"sles10sp3\":\n kernels = sles10sp3Kernels\n oldestKernelFromArr = kernels[0]\n oldestKernelFromArr = get_sles_kernel_from_rpm(oldestKernelFromArr)\n else:\n print \"ERR: Unsupported linux flavr '%s',please use it for RH5/RH6/sles only\"%linuxFlavor\n sys.exit(1)\n \n\tself.setArray = 1\n self.kernelsArray = kernels \n return oldestKernelFromArr",
"def select_supported_kernels():\n supported_kernels = SUPPORTED_KERNELS\n kernel_version = get_kernel_version(level=1)\n try:\n instance_type = get_instance_type()\n # in case we are not in EC2, return the default\n # pylint: disable=broad-except\n except Exception:\n return supported_kernels\n\n if instance_type == \"c7g.metal\" and kernel_version == \"4.14\":\n supported_kernels = SUPPORTED_KERNELS_NO_SVE\n\n return supported_kernels",
"def base_launch_kernel(code, xrep_port=0, pub_port=0, req_port=0, hb_port=0,\n independent=False, extra_arguments=[]):\n # Find open ports as necessary.\n ports = []\n ports_needed = int(xrep_port <= 0) + int(pub_port <= 0) + \\\n int(req_port <= 0) + int(hb_port <= 0)\n for i in xrange(ports_needed):\n sock = socket.socket()\n sock.bind(('', 0))\n ports.append(sock)\n for i, sock in enumerate(ports):\n port = sock.getsockname()[1]\n sock.close()\n ports[i] = port\n if xrep_port <= 0:\n xrep_port = ports.pop(0)\n if pub_port <= 0:\n pub_port = ports.pop(0)\n if req_port <= 0:\n req_port = ports.pop(0)\n if hb_port <= 0:\n hb_port = ports.pop(0)\n\n # Build the kernel launch command.\n arguments = [ sys.executable, '-c', code, '--xrep', str(xrep_port), \n '--pub', str(pub_port), '--req', str(req_port),\n '--hb', str(hb_port) ]\n arguments.extend(extra_arguments)\n\n # Spawn a kernel.\n if sys.platform == 'win32':\n # Create a Win32 event for interrupting the kernel.\n interrupt_event = ParentPollerWindows.create_interrupt_event()\n arguments += [ '--interrupt', str(int(interrupt_event)) ]\n\n # If using pythonw, stdin, stdout, and stderr are invalid. Popen will\n # fail unless they are suitably redirected. We don't read from the\n # pipes, but they must exist.\n redirect = PIPE if sys.executable.endswith('pythonw.exe') else None\n\n if independent:\n proc = Popen(arguments, \n creationflags=512, # CREATE_NEW_PROCESS_GROUP\n stdout=redirect, stderr=redirect, stdin=redirect)\n else:\n from _subprocess import DuplicateHandle, GetCurrentProcess, \\\n DUPLICATE_SAME_ACCESS\n pid = GetCurrentProcess()\n handle = DuplicateHandle(pid, pid, pid, 0, \n True, # Inheritable by new processes.\n DUPLICATE_SAME_ACCESS)\n proc = Popen(arguments + ['--parent', str(int(handle))],\n stdout=redirect, stderr=redirect, stdin=redirect)\n\n # Attach the interrupt event to the Popen objet so it can be used later.\n proc.win32_interrupt_event = interrupt_event\n\n # Clean up pipes created to work around Popen bug.\n if redirect is not None:\n proc.stdout.close()\n proc.stderr.close()\n proc.stdin.close()\n\n else:\n if independent:\n proc = Popen(arguments, preexec_fn=lambda: os.setsid())\n else:\n proc = Popen(arguments + ['--parent'])\n\n return proc, xrep_port, pub_port, req_port, hb_port",
"def create(lucid_kernel=..., blur_kernel=...) -> retval:\n ...",
"def _create_init_kernel(self):\n shape = list(self.kernel_net.impulse.shape)\n shape[2] = self.kernel_net.kernel_size\n kernel_type = self.init_kernel_type\n kernel = create_init_kernel(kernel_type, self.scale_factor, shape)\n return kernel.cuda()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load a texture cube as described by the supplied ``TextureDescription```
|
def load(self):
pos_x = self._load_face(self.meta.pos_x, face_name="pos_x")
pos_y = self._load_face(self.meta.pos_y, face_name="pos_y")
pos_z = self._load_face(self.meta.pos_z, face_name="pos_z")
neg_x = self._load_face(self.meta.neg_x, face_name="neg_x")
neg_y = self._load_face(self.meta.neg_y, face_name="neg_y")
neg_z = self._load_face(self.meta.neg_z, face_name="neg_z")
self._validate([pos_x, pos_y, pos_z, neg_x, neg_y, neg_z])
texture = self.ctx.texture_cube(
(pos_x.width, pos_x.height),
pos_x.components,
pos_x.data + neg_x.data + pos_y.data + neg_y.data + pos_z.data + neg_z.data,
)
texture.extra = {"meta": self.meta}
if self.meta.mipmap_levels is not None:
self.meta.mipmap = True
if self.meta.mipmap:
if isinstance(self.meta.mipmap_levels, tuple):
texture.build_mipmaps(*self.meta.mipmap_levels)
else:
texture.build_mipmaps()
if self.meta.anisotropy:
texture.anisotropy = self.meta.anisotropy
return texture
|
[
"def make_cube_1(texture, texture_index): \n glBindTexture(GL_TEXTURE_2D,texture[texture_index])\t \n # Front Face (Each texture's corner is matched a quad's corner.) \n glBegin(GL_QUADS)\t \n\tglTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)\t# Bottom Left Of The Texture and Quad \n\tglTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0)\t# Bottom Right Of The Texture and Quad \n\tglTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0)\t# Top Right Of The Texture and Quad \n\tglTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0)\t# Top Left Of The Texture and Quad\t \n\tglEnd();",
"def readFromTextureNode(*args, **kwargs):\n \n pass",
"def polyCube(texture=int, depth=\"string\", axis=\"string\", width=\"string\", createUVs=int, height=\"string\", constructionHistory=bool, subdivisionsY=int, subdivisionsX=int, subdivisionsZ=int, name=\"string\"):\n pass",
"def loadTexture(filename, program):\n img = Image.open(filename) \n imgData = np.array(list(img.getdata()), np.int8)\n texture = glGenTextures(1)\n\n glUseProgram(program.pointer)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, img.size[0], img.size[1], \n 0, GL_RGB, GL_UNSIGNED_BYTE, imgData)\n\n glUseProgram(0)\n return texture",
"def update_fog_cube_texture(texture_path):\n for texture_name in bpy.data.textures.keys():\n # Change all possible textures since multiple copies may exist\n if texture_name.startswith(\"VoxelMaterialWT2\"):\n print(\"Texture name: \" + texture_name)\n print(\"BVOX file: \" + texture_path)\n bpy.data.textures[texture_name].voxel_data.filepath = texture_path",
"def from_file(klass, filename):\n surface = pygame.image.load(filename).convert_alpha()\n return Texture(surface)",
"def load_texture_by_id( self, id, texture=None ):\n if id in (\n 'textures/common/hintskip',\n 'textures/common/clip',\n 'textures/common/botclip',\n 'textures/common/nodraw',\n 'textures/common/skip',\n 'textures/common/donotenter',\n 'textures/common/invisible',\n 'textures/common/trigger',\n ):\n return self.brush_class( [ ('surfaceparam','nodraw')] )\n if texture is None:\n texture = self.textures[id]\n relative = (b''.join( texture['filename'] )).decode('utf-8')\n \n img = None\n img = self.load_script( id, relative )\n if img is None:\n img = self._load_image_file( relative )\n if not img:\n log.warn( \"Unable to find Image #%s: %s\", id, relative )\n return img",
"def process(\n cube: cli.inputcube,\n *,\n nbhood_radius: float = 20000.0,\n textural_threshold: float = 0.05,\n diagnostic_threshold: float = 0.8125,\n model_id_attr: str = None,\n):\n\n from improver.utilities.textural import FieldTexture\n\n field_texture = FieldTexture(\n nbhood_radius,\n textural_threshold,\n diagnostic_threshold,\n model_id_attr=model_id_attr,\n )(cube)\n return field_texture",
"def AddTexture(self):\n\t\t\n\t\tpass",
"def LoadTextures(): \n image_1 = open(\"/home/mikeodf/constr/images_opengl/steel_ball3.jpg\") \n image_2 = open(\"/home/mikeodf/constr/images_opengl/steel_green_ball3.jpg\") \n image_3 = open(\"/home/mikeodf/constr/images_opengl/steel_blue_ball3.jpg\") \n image_4 = open(\"/home/mikeodf/constr/images_opengl/steel_red_ball3.jpg\") \n\n image_1 = image_1.tostring(\"raw\", \"RGBX\", 0, -1) # convert bmp to the type needed for textures \n image_2 = image_2.tostring(\"raw\", \"RGBX\", 0, -1) # convert bmp to the type needed for textures \n image_3 = image_3.tostring(\"raw\", \"RGBX\", 0, -1) # convert bmp to the type needed for textures \n image_4 = image_4.tostring(\"raw\", \"RGBX\", 0, -1) # convert bmp to the type needed for textures \n glGenTextures(11, texture_1) # Create texture number and names and sizw. \n #===================================== \n texture_setup(image_1, 0, ix, iy) \n texture_setup(image_2, 1, ix, iy) \n texture_setup(image_3, 2, ix, iy) \n texture_setup(image_4, 3, ix, iy)",
"def read_texture(filename, attrs=None):\n filename = os.path.abspath(os.path.expanduser(filename))\n try:\n # initialize the reader using the extension to find it\n reader = get_reader(filename)\n image = standard_reader_routine(reader, filename, attrs=attrs)\n if image.n_points < 2:\n raise AssertionError(\"Problem reading the image with VTK.\")\n return pyvista.image_to_texture(image)\n except (KeyError, AssertionError):\n # Otherwise, use the imageio reader\n pass\n import imageio\n return pyvista.numpy_to_texture(imageio.imread(filename))",
"def load(self, item):\r\n if not isinstance(item, BasicTexture):\r\n logger.error(\"Can only load textures into BasicTexture type items!\")\r\n raise ItemTypeMismatch\r\n\r\n # Convert the image to RGB, make image the valid size for\r\n # OpenGL (if it isn't) and create a valid storable texture\r\n try:\r\n self.image = Image.open(self.filename)\r\n except IOError:\r\n logger.error(\"Texture data file '%s' not found!\" % self.filename)\r\n raise ItemFileNotFound\r\n\r\n self.__ensureRGB()\r\n item.width, item.height = self.image.size\r\n item.components, item.format = self.__getCompFormat()\r\n item.data = self.image.tostring(\"raw\", self.image.mode, 0, -1)",
"def read_texture(filename, attrs=None, progress_bar=False):\n filename = os.path.abspath(os.path.expanduser(filename))\n try:\n # initialize the reader using the extension to find it\n\n image = read(filename, attrs=attrs, progress_bar=progress_bar)\n if image.n_points < 2:\n raise ValueError(\"Problem reading the image with VTK.\")\n return pyvista.Texture(image)\n except (KeyError, ValueError):\n # Otherwise, use the imageio reader\n pass\n\n return pyvista.Texture(_try_imageio_imread(filename)) # pragma: no cover",
"def test_HasTextures(self):\n self.assertTrue(len(Blender.Texture.Get())>0,\"This mesh has no Blender Textures.\")",
"def read_from_cube(self):\n raise Exception(\"No function defined to read this quantity \"\n \"from a .cube file.\")",
"def load_file(self, filename):\n image = pygame.image.load(filename)\n virtual_texture = self.load_image(image)\n self.__filename_map[filename] = virtual_texture\n return virtual_texture",
"def world_texture(hdr_name):\r\n world=bpy.data.worlds['World']\r\n world.use_nodes = True\r\n links = world.node_tree.links\r\n nodes = world.node_tree.nodes\r\n for l in links:\r\n links.remove(l)\r\n for n in nodes:\r\n nodes.remove(n)\r\n world_output = nodes.new(type='ShaderNodeOutputWorld')\r\n background_node = nodes.new(type='ShaderNodeBackground')\r\n if hdr_name[-3:] == 'exr':\r\n background_node.inputs[1].default_value = 100\r\n env_node = nodes.new(type='ShaderNodeTexEnvironment')\r\n env_node.image = bpy.data.images.load(hdr_name)\r\n mapping_node = nodes.new(type='ShaderNodeMapping')\r\n mapping_node.inputs[2].default_value[1] = random.uniform(0, 3.14)\r\n cor_node = nodes.new(type='ShaderNodeTexCoord')\r\n links.new(cor_node.outputs['Generated'],mapping_node.inputs['Vector'])\r\n links.new(mapping_node.outputs['Vector'],env_node.inputs['Vector'])\r\n links.new(env_node.outputs['Color'],background_node.inputs['Color'])\r\n links.new(background_node.outputs['Background'],world_output.inputs['Surface'])\r\n return",
"def test_texture_map_atlas(self):\n device = torch.device(\"cuda:0\")\n\n obj_filename = TUTORIAL_DATA_DIR / \"cow_mesh/cow.obj\"\n\n # Load mesh and texture as a per face texture atlas.\n verts, faces, aux = load_obj(\n obj_filename,\n device=device,\n load_textures=True,\n create_texture_atlas=True,\n texture_atlas_size=8,\n texture_wrap=None,\n )\n atlas = aux.texture_atlas\n mesh = Meshes(\n verts=[verts],\n faces=[faces.verts_idx],\n textures=TexturesAtlas(atlas=[atlas]),\n )\n\n # Init rasterizer settings\n R, T = look_at_view_transform(2.7, 0, 0)\n cameras = FoVPerspectiveCameras(device=device, R=R, T=T)\n\n raster_settings = RasterizationSettings(\n image_size=512,\n blur_radius=0.0,\n faces_per_pixel=1,\n cull_backfaces=True,\n perspective_correct=False,\n )\n\n # Init shader settings\n materials = Materials(device=device, specular_color=((0, 0, 0),), shininess=0.0)\n lights = PointLights(device=device)\n\n # Place light behind the cow in world space. The front of\n # the cow is facing the -z direction.\n lights.location = torch.tensor([0.0, 0.0, 2.0], device=device)[None]\n\n # The HardPhongShader can be used directly with atlas textures.\n rasterizer = MeshRasterizer(cameras=cameras, raster_settings=raster_settings)\n renderer = MeshRenderer(\n rasterizer=rasterizer,\n shader=HardPhongShader(lights=lights, cameras=cameras, materials=materials),\n )\n\n images = renderer(mesh)\n rgb = images[0, ..., :3].squeeze()\n\n # Load reference image\n image_ref = load_rgb_image(\"test_texture_atlas_8x8_back.png\", DATA_DIR)\n\n if DEBUG:\n Image.fromarray((rgb.detach().cpu().numpy() * 255).astype(np.uint8)).save(\n DATA_DIR / \"DEBUG_texture_atlas_8x8_back.png\"\n )\n\n self.assertClose(rgb.cpu(), image_ref, atol=0.05)\n\n # Check gradients are propagated\n # correctly back to the texture atlas.\n # Because of how texture sampling is implemented\n # for the texture atlas it is not possible to get\n # gradients back to the vertices.\n atlas.requires_grad = True\n mesh = Meshes(\n verts=[verts],\n faces=[faces.verts_idx],\n textures=TexturesAtlas(atlas=[atlas]),\n )\n raster_settings = RasterizationSettings(\n image_size=512,\n blur_radius=0.0001,\n faces_per_pixel=5,\n cull_backfaces=True,\n clip_barycentric_coords=True,\n )\n images = renderer(mesh, raster_settings=raster_settings)\n images[0, ...].sum().backward()\n\n fragments = rasterizer(mesh, raster_settings=raster_settings)\n # Some of the bary coordinates are outside the\n # [0, 1] range as expected because the blur is > 0\n self.assertTrue(fragments.bary_coords.ge(1.0).any())\n self.assertIsNotNone(atlas.grad)\n self.assertTrue(atlas.grad.sum().abs() > 0.0)",
"def setTexture(self, texture):\n self._texture = texture"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validates each face ensuring components and size it the same
|
def _validate(self, faces):
components = faces[0].components
data_size = len(faces[0].data)
for face in faces:
if face.components != components:
raise ImproperlyConfigured(
"Cubemap face textures have different number of components"
)
if len(face.data) != data_size:
raise ImproperlyConfigured(
"Cubemap face textures must all have the same size"
)
return components
|
[
"def is_single_face_valid(img) -> int:\n # TODO stub\n return 0",
"def __check_correctness_face(self, face):\n first_number_face = face[0, 0]\n for number in nditer(face):\n if first_number_face != number:\n return False\n return True",
"def checkDegenerateFaces(self):\n print(\"Checking mesh for degenerate faces...\")\n\n for face in self.faces:\n\n seenPos = set()\n vList = []\n for v in face.adjVerts():\n pos = tuple(v.pos.tolist()) # need it as a hashable type\n if pos in seenPos:\n raise ValueError(\"ERROR: Degenerate mesh face has repeated vertices at position: \" + str(pos))\n else:\n seenPos.add(pos)\n vList.append(v.pos)\n\n # Check for triangular faces with colinear vertices (don't catch other such errors for now)\n if(len(vList) == 3):\n v1 = vList[1] - vList[0]\n v2 = vList[2]-vList[0]\n area = norm(cross(v1, v2))\n if area < 0.0000000001*max((norm(v1),norm(v2))):\n raise ValueError(\"ERROR: Degenerate mesh face has triangle composed of 3 colinear points: \\\n \" + str(vList))\n\n\n print(\" ...test passed\")",
"def test_valid_bounding_box(self):\n detection = TestFaceDetector.defaultDetector.detectOne(image=VLIMAGE_ONE_FACE)\n self.assertBoundingBox(detection.boundingBox)\n detection = TestFaceDetector.defaultDetector.detect(images=[VLIMAGE_ONE_FACE])[0][0]\n self.assertBoundingBox(detection.boundingBox)",
"def validate_components(components: dict):\n # Check that all image components have the same dimensions\n size = None\n for img in components.values():\n if size and img.size != size:\n raise ValueError(\"Image components must have the same dimensions!\")\n else:\n size = img.size",
"def test_faces_containing_size(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of faces containing.\n for n in st.faces_containing:\n assert len(st.faces_containing[n]) > 4",
"def test_detect_one_by_area_with_face(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_ONE_FACE, detectArea=GOOD_AREA)\n self.assertFaceDetection(detection, VLIMAGE_ONE_FACE)",
"def test_detect_one_with_image_of_several_faces(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detectOne(image=VLIMAGE_SEVERAL_FACE)\n self.assertFaceDetection(detection, VLIMAGE_SEVERAL_FACE)",
"def box_faces(img):\n k=face_detect(img)\n\n fig,ax = plt.subplots()\n ax.imshow(img)\n\n for i in range(len(k)):\n lst = numsfromrect(k[i])\n ax.add_patch(patches.Rectangle( (lst[0],lst[1]), lst[2]-lst[0], lst[3]-lst[1], fill=False))",
"def _validate(self):\n # check that element connectivity contains integers\n # NOTE: this is neccessary for some plotting functionality\n if not np.issubdtype(self.t[0, 0], np.signedinteger):\n msg = (\"Mesh._validate(): Element connectivity \"\n \"must consist of integers.\")\n raise Exception(msg)\n # check that vertex matrix has \"correct\" size\n if self.p.shape[0] > 3:\n msg = (\"Mesh._validate(): We do not allow meshes \"\n \"embedded into larger than 3-dimensional \"\n \"Euclidean space! Please check that \"\n \"the given vertex matrix is of size Ndim x Nvertices.\")\n raise Exception(msg)\n # check that element connectivity matrix has correct size\n nvertices = {'line': 2, 'tri': 3, 'quad': 4, 'tet': 4, 'hex': 8}\n if self.t.shape[0] != nvertices[self.refdom]:\n msg = (\"Mesh._validate(): The given connectivity \"\n \"matrix has wrong shape!\")\n raise Exception(msg)\n # check that there are no duplicate points\n tmp = np.ascontiguousarray(self.p.T)\n if self.p.shape[1] != np.unique(tmp.view([('', tmp.dtype)]\n * tmp.shape[1])).shape[0]:\n msg = \"Mesh._validate(): Mesh contains duplicate vertices.\"\n warnings.warn(msg)\n # check that all points are at least in some element\n if len(np.setdiff1d(np.arange(self.p.shape[1]), np.unique(self.t))) > 0:\n msg = (\"Mesh._validate(): Mesh contains a vertex \"\n \"not belonging to any element.\")\n raise Exception(msg)",
"def validate(self):\n for ens_mem in self.ensemble_members:\n ens_mem.validate()",
"def test_batch_detect_by_area_with_face(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detect(images=[ImageForDetection(image=VLIMAGE_ONE_FACE, detectArea=GOOD_AREA)])\n assert 1 == len(detection[0])\n self.assertFaceDetection(detection[0], VLIMAGE_ONE_FACE)",
"def validate_box(self, letters):\n \n if 2 in letters.values():\n self.two_letter_boxes += 1\n\n if 3 in letters.values():\n self.three_letter_boxes += 1",
"def test_batch_detect_with_image_of_several_faces(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n detection = detector.detect(images=[VLIMAGE_SEVERAL_FACE])\n self.assertFaceDetection(detection[0], VLIMAGE_SEVERAL_FACE)\n assert 1 == len(detection)\n assert 5 == len(detection[0])",
"def _check_errors(upper_face, lower_face):\n if not (upper_face or lower_face):\n raise ValueError(\n 'Either upper_face or lower_face must not be None.')",
"def check_validity(self):\n\n # light bar should be generally vertical\n if 15 < self.angle < 75 or -75 < self.angle < -15:\n return False\n\n # the ratio of width and height should remain in proper range\n if self.size[0] > self.size[1] * 10 or self.size[1] > self.size[0] * 10:\n return False\n\n # width < height\n if (self.size[0] > self.size[1] and -15 < self.angle < 15) \\\n or (self.size[0] < self.size[1] and (self.angle < -75 or self.angle > 75)):\n return False\n\n # light bar should not be too small\n area = self.size[0] * self.size[1]\n if area < 16 or area > 32400:\n return False\n return True",
"def test_faces_refrence_valid_nodes(st: SpaceTime):\n for f in st.faces:\n for n in f:\n assert n in st.nodes",
"def check_consistent_parameter_dimensions(self):\n if self.indices_per_axis is not None:\n if len(self.indices_per_axis) > len(self.array_shape):\n raise ValueError(\n \"Got len(self.indices_per_axis)=%s > len(self.array_shape)=%s, should be equal.\"\n %\n (len(self.indices_per_axis), len(self.array_shape))\n )\n if self.split_num_slices_per_axis is not None:\n if len(self.split_num_slices_per_axis) > len(self.array_shape):\n raise ValueError(\n (\n \"Got len(self.split_num_slices_per_axis)=%s > len(self.array_shape)=%s,\"\n +\n \" should be equal.\"\n )\n %\n (len(self.split_num_slices_per_axis), len(self.array_shape))\n )\n if self.tile_shape is not None:\n if len(self.tile_shape) != len(self.array_shape):\n raise ValueError(\n \"Got len(self.tile_shape)=%s > len(self.array_shape)=%s, should be equal.\"\n %\n (len(self.tile_shape), len(self.array_shape))\n )\n\n if self.sub_tile_shape is not None:\n if len(self.sub_tile_shape) != len(self.array_shape):\n raise ValueError(\n \"Got len(self.sub_tile_shape)=%s > len(self.array_shape)=%s, should be equal.\"\n %\n (len(self.sub_tile_shape), len(self.array_shape))\n )\n\n if self.max_tile_shape is not None:\n if len(self.max_tile_shape) != len(self.array_shape):\n raise ValueError(\n \"Got len(self.max_tile_shape)=%s > len(self.array_shape)=%s, should be equal.\"\n %\n (len(self.max_tile_shape), len(self.array_shape))\n )\n\n if self.array_start is not None:\n if len(self.array_start) != len(self.array_shape):\n raise ValueError(\n \"Got len(self.array_start)=%s > len(self.array_shape)=%s, should be equal.\"\n %\n (len(self.array_start), len(self.array_shape))\n )",
"def test_detect_one_invalid_rectangle(self):\n for detector in self.detectors:\n with self.subTest(detectorType=detector.detectorType):\n with pytest.raises(LunaSDKException) as exceptionInfo:\n detector.detectOne(image=VLIMAGE_ONE_FACE, detectArea=Rect())\n self.assertLunaVlError(exceptionInfo, LunaVLError.InvalidRect.format(\"Invalid rectangle\"))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Helper funcion to print legend according to invoice type.
|
def _get_legend(self, invoice):
legend = _('This document is a printed representation od the CFDI')
if invoice.journal_id.name.split('-')[1] =="NOTA DE CARGO":
legend = _("Nota Cargo")
else:
if invoice.type == 'out_invoice':
legend = _("Factura")
else:
legend = _("Nota Crédito")
return legend + ' ' + invoice.internal_number
|
[
"def getLegendTitle(self):\n\n if self.outlookType.startswith( 'Cat' ):\n return 'Categorical Outlook Legend'\n elif self.outlookType.startswith( 'Prob' ):\n return 'Total Severe Probability Legend (in %)'\n return f'{self.outlookType} Probability Legend (in %)'",
"def _get_legend(self):\n label_lines = [\n \"1.0 factory equals to:\",\n \"1.0 stone furnaces\",\n \"0.5 steel furnaces\",\n \"0.5 electric furnace\",\n \"2.0 assembling machines 1\",\n \"1.(3) assembling machines 2\",\n \"0.8 assembling machines 3\",\n ]\n label = \"\\\\l\".join(label_lines)\n return \"{ legend [shape=none, margin=0, label=\\\"\" + label + \"\\\\l\\\"]; }\"",
"def clegend(self, **extra):\n leg = self.current_figure.legend(**extra)\n self.current_figure_legend = {}\n sf = self.subfigures[self.current_figure] \n sf['legend'] = {}\n for label, line in zip(leg.get_lines(), self.current_figure.get_lines()):\n label.set_picker(5) # 5 pts tolerance\n sf['legend'][label] = line",
"def get_graph_legend(self, item):\n return item['description']",
"def _get_legend(dconf_dict, fig_def):\n legend = \"\"\n for key in dconf_dict:\n if ((key not in UNIV_DCONF_KEYS) and\n (key not in fig_def['locked variables'])):\n legend = legend + dconf_dict[key] + ', '\n legend = legend[:legend.rfind(', ')]\n return legend",
"def make_legend(parents,color_lookup):\n legend = []\n for p in parents:\n pcolor=color_lookup[p]\n legend+=[Line2D([0],[0],color=pcolor,lw=4,label=p)]\n #legend+=[Line2D([0],[0],color=(0.0,0.0,0.0),lw=4,label='Cent')]\n return legend",
"def legend(self, **kwargs):\n raise NotImplementedError",
"def legends():\n metric_util.print_legends('\\n')\n stream_util.print_legends('\\n')\n shard_util.print_legends('\\n')",
"def displayLegend(self, t):\n\t\t# for each dataset, draw a horizontal line below the graph in the matching colour\n\t\t# then write the dataset name next to that line\n\t\tlineY = -280 # starting y position for the legend\n\t\tlineX = [-250, -185, -170] # start of line, end of line, start of text\n\n\t\tfor i in range(0, len(self.sets)):\n\t\t\tt.color(self.colours[i])\n\t\t\tcurrY = lineY - i * 25\n\t\t\tt.pu()\n\t\t\tt.setpos(lineX[0], currY)\n\t\t\tt.pd()\n\t\t\tt.setpos(lineX[1], currY)\n\t\t\tt.pu()\n\t\t\tt.setpos(lineX[2], currY - 8)\n\t\t\tt.pd()\n\t\t\tt.color(\"black\")\n\t\t\tt.write(self.sets[i].name, font=('Arial', 12, 'normal'))",
"def snapshot_legend_simple(color_scheme: Dict[KappaAgent, Any], col_num: int) ->\\\n Tuple[List[mpp.Rectangle], List[matplotlib.text.Text]]:\n position = 0 # var to track indexing\n x_dim = 0.5 # size of rectangle serving as color key\n y_dim = 0.5\n rect_list = []\n text_list = []\n for agent, color in sorted(color_scheme.items(), key=itemgetter(0), reverse=False):\n y_pos, x_pos = divmod(position, col_num)\n legend_entry_rect = mpp.Rectangle(\n xy=(x_pos, -y_pos), width=x_dim, height=y_dim, edgecolor='#000000', fc=color)\n legend_entry_text = matplotlib.text.Text(\n x=x_pos + x_dim, y=-y_pos, text=agent.get_agent_name(), verticalalignment='baseline')\n rect_list.append(legend_entry_rect)\n text_list.append(legend_entry_text)\n position += 1\n return rect_list, text_list",
"def get_legend_padded(p: np.ndarray) -> str:\n legend = r\"$\\mu$: \" + f\"{np.mean(p) :.5f} GeV,\\n\"\n legend += r\"$\\sigma$: \" + f\"{np.std(p) :.5f} GeV,\\n\"\n legend += r\"$\\mathrm{Med}$: \" + f\"{np.median(p) :.5f} GeV\"\n return legend",
"def createLegend(self): \n if self.model.legend:\n template = \"\"\" {e}\n var legend = d3.legend({s})\n .csv(\"data/legend.csv\")\n .position({p})\n .{f}(\"{a}\");\n {s}.call(legend);\"\"\"\n\n func = \"shape\"\n arg = \"square\"\n \n # Find the main layer and check the first symbol to determine the correct JS function call\n m = self.model.getMainLayer()\n if m.renderers[0].symbols[0].hasImage() == True:\n func = \"svgImg\"\n head, tail = os.path.split(m.renderers[0].symbols[0].path)\n arg = \"img/{0}\".format(tail)\n else:\n arg = m.renderers[0].symbols[0].getShape() \n \n ext = \"\"\n svg = \"svg\"\n pos = self.model.selectedLegendPosition\n \n \n if self.model.selectedLegendPosition == 4:\n # external legend has to have a different hosting svg element\n ext = \"\"\"var extLgnd = d3.select(\"#extLgnd\")\n .append(\"svg\");\\n\"\"\"\n svg = \"extLgnd\"\n\n # format and return\n return template.format(\n e = ext,\n f = func,\n a = arg,\n s = svg,\n p = pos\n )\n \n else:\n return \"\"",
"def legendTag(text):\n \n dic={\"SBF\":\"SOUS BLOC FONCTIONNEL\",\"BF\":\"BLOC FONCTIONNEL\",\"REF\":\"REFERENCE\"}\n keys=dic.keys()\n temp=re.findall(r\"(?<=>).*(?=span>)\",text) #find TAGS which are located between \">\" and \"</span>\"\n tags=[res[:-2] for res in temp] #just to delete </ from the results\n temp=re.findall(r\"(?<=background:\\s).*(?=;)\",text) #find colors which are located between \"background: \" and \";\"\n colors=[res[:res.find(\";\")] for res in temp] #Just consider the first ; among the others which are found inside the last answers\n tempindexes=[colors.index(x) for x in set(colors)] #Find the first indexes of the occurences colors\n colors=[colors[i] for i in tempindexes] #Find the equivalent colors\n temptags=[tags[i] for i in tempindexes] #Find the equivalent tags\n temp1=[[temptags[i],colors[i]] for i in range(len(temptags)) if \"NOMENCLATURE\" in temptags[i]] #Find the [tags,colors] related to NOMENCLATURE\n temp2=[[temptags[i],colors[i]] for i in range(len(temptags)) if not([temptags[i],colors[i]] in temp1)] #Find the others\n temp2=[[l[0].split(\"--\")[-1],l[1]] for l in temp2] #This part take place here because we want to show one tag for BF and not all the tags for it\n for i in range(len(temp2)):\n a=temp2[i][0]\n if a in keys:\n temp2[i][0]=dic[a]\n final=temp1+temp2 #list of [TAG,COLOR]\n# final=[list(item) for item in set(tuple(row) for row in final)] #set list of list\n \n return final",
"def _get_legend_registry(self) -> Dict[str, Any]:\n return {\n 'fps': self._draw_fps,\n 'count': self._draw_count,\n 'zone_count': self._draw_zone_count\n }",
"def legend_labelcolor():\n plt.rcParams['legend.labelcolor'] = 'linecolor'\n\n fig = new_slide()\n slide_heading(fig, '3.5 Feature: legend label color rcParam')\n\n fig.text(0.05, 0.8, \"plt.rcParams['legend.labelcolor'] = 'linecolor'\",\n **CODE)\n\n # Make some fake data.\n a = np.arange(0, 3, .02)\n c = np.exp(a)\n d = c[::-1]\n\n ax = fig.subplots()\n fig.subplots_adjust(top=0.7)\n ax.plot(a, c, 'g--', label='Model length', linewidth=2)\n ax.plot(a, d, 'r:', label='Data length', linewidth=2)\n\n ax.legend(loc='upper center', fontsize=20)\n\n annotate_pr_author(fig, 'Carloscerq', pr=20084)\n\n return fig",
"def _classify_lines(self, receipt):\n labels = []\n for i, line in enumerate(receipt):\n line = str(line)\n a_chars = count(line, string.ascii_letters)\n num_chars = count(line, string.digits)\n punct_chars = count(line, string.punctuation)\n\n if 'bon fiscal' in line.lower():\n labels.append('unknown')\n #if 'subtotal' in line.lower():\n # labels.append('unknown')\n\n elif (re.search('S\\.?C\\.?(.+?)(S.?R.?L.?)|(S[:.,]?A[:.,]?)', line, re.IGNORECASE) or\\\n any(x in line.lower() for x in ['kaufland'])) and i < 5 and 'shop' not in labels:\n labels.append('shop')\n elif (re.search('(C[^\\w]?U[^\\w]?I[^\\w]?)|(C[^\\w]?F[^\\w]?)|(C[^\\w]?I[^\\w]?F[^\\w]?)|(COD FISCAL).+? (\\d){4,}', line) or\\\n re.search('\\d{8}', line)) and i < 6:\n labels.append('cui')\n elif (re.search('(STR)|(CALEA)|(B-DUL).(.+?)', line, re.IGNORECASE) and i < 7) or\\\n (re.search('(NR).(\\d+)', line, re.IGNORECASE) and i < 3):\n labels.append('address')\n\n\n elif 'TVA' in line:\n labels.append('tva')\n elif 'TOTAL' in line and 'SUBTOTAL' not in line:\n labels.append('total')\n elif re.search('DATA?.+?\\d{2,4}[.\\\\-]\\d{2,4}[.\\\\-]\\d{2,4}', line, re.IGNORECASE) or\\\n re.search('\\d{2}[./\\\\-]\\d{2}[./\\\\-]\\d{2,4}', line, re.IGNORECASE):\n labels.append('data')\n elif a_chars > 0 and num_chars/a_chars > 1 and 2 < i < len(receipt) - 7 and \\\n all(x not in line.lower() for x in ['tel', 'fax']) and 'total' not in labels:\n labels.append('price')\n elif 3 < i < len(receipt) - 8 and a_chars+punct_chars > 5 and 'total' not in labels and ((\\\n all(not re.search('(\\W|^)'+x, line.lower()) for x in ['tel', 'fax', 'subtotal', 'numerar', 'brut', 'net'] +\n days)\\\n and not re.search('\\d{5}', line)) or labels[-1] == 'price'):\n\n labels.append('name')\n else:\n labels.append('unknown')\n return labels",
"def print_chart(self):\n for fg in range(0, 7):\n for bg in range(0, 7):\n for attr in sorted(self.attribute_table.values()):\n demo_color = Color(foreground=fg, background=bg, attribute=attr,\n bright_foreground=False, bright_background=False)\n print demo_color(\"Hello World!\"), repr(demo_color)\n demo_color.bright_foreground = True\n print demo_color(\"Hello World!\"), repr(demo_color)\n demo_color.bright_background = True\n print demo_color(\"Hello World!\"), repr(demo_color)",
"def get_html_legend_table():\n # Sort the ranges\n sorted_keys = DICT_TRANS_VALUES.keys()\n sorted_keys.sort()\n # Generate the HTML string with the rows code\n rows = ''\n for key in sorted_keys[1:]:\n color = DICT_TRANS_VALUES[key][0]\n desc = DICT_TRANS_VALUES[key][1]\n rows += ROW_TABLE_LEGEND_HTML % (color, desc)\n # Generate the HTML string with the table code\n return TABLE_LEGEND_HTML % (rows)",
"def render_legend(context):\n category_service_lst = DefaultCategoryService.objects.all()\n context.update({'category_service_lst': category_service_lst})\n return context"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
encode all captions into one large array, which will be 1indexed. also produces label_start_ix and label_end_ix which store 1indexed and inclusive (Luastyle) pointers to the first and last caption for each image in the dataset.
|
def encode_captions(imgs, params, wtoi):
max_length = params['max_length']
# min_length = params['min_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i, img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j, s in enumerate(img['final_captions']):
# if len(s) <= min_length:
# continue
# else:
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k, w in enumerate(s):
if k < max_length:
Li[j, k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
assert counter-1 == img['cocoid']
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
|
[
"def _populate_caption_data(self, data: Dict[str, Any], image_id: int) -> None:\n data[\"caption\"] = []\n annotation_ids = self.captions.getAnnIds(imgIds=image_id)\n if annotation_ids:\n annotations = self.captions.loadAnns(annotation_ids)\n for annotation in annotations:\n data[\"caption\"].append(annotation['caption'])",
"def map_grouped_data_index_to_grouped_captions(caption_data, grouped_image_index):\n captions_by_image_id = {}\n for k, v in grouped_image_index.items():\n captions_by_image_id[k] = caption_data[v]\n return captions_by_image_id",
"def make_labels(self):\n for lab in self.label_ids: #init label objects\n self.labels[lab] = Label(self.id, lab)\n for sentence in self.sentences: #dump stuff into the label objects\n for i in range(1, len(sentence.labels)):\n lab = sentence.labels[i]\n self.labels[lab].add_sentence(sentence.words[i], sentence.probs[lab], sentence.surprisal[lab])",
"def createLabelSet(transcript):\n labels = set()\n for a in transcript.annotations:\n for l in a.labels:\n labels.add(l)\n return labels",
"def to_ctc_encoded(labels: np.ndarray) -> np.ndarray:\n\n # convert 1s to 2s. 2 denoted frame boundary\n labels[labels == 1] = 2\n\n # insert fake second frame if there are repeated labels adjacent to each other\n double = [(i, a) for i, (a, b) in enumerate(zip(labels[:-1], labels[1:])) if np.all(a == b)]\n\n if len(double) > 0:\n indices, values = zip(*double)\n values = [value / 2 for value in values] # 1 to indicate within phone boundary\n indices = [i + 1 for i in indices] # np inserts before index\n labels = np.insert(labels, indices, values, axis=0)\n\n return labels",
"def pickle_character_list_data(all_labels):\n\n all_labels.sort()\n\n with open(VN_DICT, 'r', encoding='utf8') as f:\n word_list = f.read().split()\n\n with open(CHAR_LABEL_MAP, 'r', encoding='utf8') as f:\n char_label_map = json.loads(f.read(), encoding='utf8')\n\n # Create lowercase, uppercase, camel case for each word\n word_list_all_forms = []\n for w in word_list:\n word_list_all_forms.append(w.lower())\n word_list_all_forms.append(w.upper())\n word_list_all_forms.append(w.title())\n if w not in word_list_all_forms:\n word_list_all_forms.append(w)\n\n print(\"Total words: \", len(word_list_all_forms))\n\n X = np.zeros((0, MAX_WORD_LENGTH, HEIGHT, WIDTH), dtype='float32')\n Y = np.zeros((0, MAX_WORD_LENGTH, len(all_labels) + 1), dtype='int')\n\n break_points = np.linspace(1, len(word_list_all_forms), 15, endpoint=True, dtype='int')[1:] - 1\n\n count = -1\n for w in word_list_all_forms:\n count += 1\n\n if len(w) > MAX_WORD_LENGTH: # if word is longer than MAX_WORD_LENGTH characters => skip\n continue\n\n all_imgs = []\n labels = np.zeros((SAMPLE_PER_WORD, MAX_WORD_LENGTH, len(all_labels) + 1))\n\n for i in range(len(w)):\n char = w[i]\n l = char_label_map[char] # label of character ith\n\n img_names = np.random.choice(os.listdir(DATA_FOLDER + l), SAMPLE_PER_WORD, replace=False)\n img_srcs = [DATA_FOLDER + \"{}/{}\".format(l, n) for n in img_names]\n imgs = [cv2.imread(src, cv2.IMREAD_GRAYSCALE) for src in img_srcs]\n imgs = np.array(imgs) / 255.0\n\n all_imgs.append(imgs)\n labels[:, i, all_labels.index(l)] = 1\n\n for i in range(len(w), MAX_WORD_LENGTH):\n all_imgs.append(np.zeros((SAMPLE_PER_WORD, HEIGHT, WIDTH), dtype='float32'))\n labels[:, i, len(all_labels)] = 1\n\n all_imgs = np.transpose(np.array(all_imgs), (1, 0, 2, 3)) # (SAMPLE_PER_WORD, MAX_WORD_LENGTH, HEIGHT, WIDTH)\n\n X = np.vstack((X, all_imgs))\n Y = np.vstack((Y, labels))\n\n if count in break_points:\n print(\"Saving up to {}...\".format(count + 1))\n X = X.reshape(X.shape[0], MAX_WORD_LENGTH, HEIGHT, WIDTH, 1)\n\n pickle_data([X, Y], DATA_NAME + '_temp_' + str(np.where(break_points == count)[0][0]))\n\n X = np.zeros((0, MAX_WORD_LENGTH, HEIGHT, WIDTH), dtype='float32')\n Y = np.zeros((0, MAX_WORD_LENGTH, len(all_labels) + 1), dtype='int')\n\n print(\"Merging to single file...\")\n X = np.zeros((0, MAX_WORD_LENGTH, HEIGHT, WIDTH, 1), dtype='float32')\n Y = np.zeros((0, MAX_WORD_LENGTH, len(all_labels) + 1), dtype='int')\n\n for i in range(break_points.shape[0]):\n filename = DATA_NAME + '_temp_' + str(i)\n with open(filename, 'rb') as f:\n X = np.vstack((X, pickle.load(f)))\n Y = np.vstack((Y, pickle.load(f)))\n os.remove(filename)\n\n pickle_data([X, Y], DATA_NAME)",
"def generate_caption(image, inception, caption_model, idx2word,\r\n exclude_from_prediction = None,\r\n caption_prefix=(DEFAULT_BOS_IDX,), end_token_idx = DEFAULT_EOS_IDX, \r\n temperature = 1, sample = True, max_len = 10):\r\n\r\n assert isinstance(image, np.ndarray) and np.max(image) <= 1\\\r\n and np.min(image) >=0 and image.shape[-1] == 3\r\n \r\n if not exclude_from_prediction:\r\n exclude_from_prediction = []\r\n\r\n with torch.no_grad():\r\n image = torch.tensor(image.transpose([2, 0, 1]), dtype=torch.float32)\r\n\r\n vectors_8x8, vectors_neck, logits = inception(image[None]) #### adding batch size, then channels, then h, w\r\n \r\n caption_prefix = list(caption_prefix)\r\n text_caption = []\r\n\r\n with torch.no_grad():\r\n # cлово за словом генерируем описание картинки\r\n # actually what is happening is that every step i append last predicted word but run the net from the beginning with the same init vec\r\n for _ in range(max_len):\r\n # 1. Представляем caption_prefix в виде матрицы\r\n # 2. Получаем из RNN-ки логиты, передав ей vectors_neck и матрицу из п.1\r\n # 3. Переводим логиты RNN-ки в вероятности (например, с помощью F.softmax)\r\n # 4. Сэмплируем следующее слово в описании, используя полученные вероятности. Можно сэмплировать жадно, можно сэмплировать из распределения\r\n # 5. Добавляем новое слово в caption_prefix\r\n # 6. Если RNN-ка сгенерила символ конца предложения, останавливаемся\r\n \r\n captions_ix_inp = torch.tensor(caption_prefix, dtype = torch.long).unsqueeze(0)\r\n logits_for_next = caption_model.forward(vectors_neck, captions_ix_inp)\r\n next_token_distr = F.softmax(logits_for_next[0, -1, :] / temperature, dim = -1).numpy() ### fetching only last prediction\r\n next_token_idx_hard = next_token_distr.argmax(axis = -1)\r\n\r\n # fixing prediction due to possibility of spec tokens\r\n zeroing_mask = np.zeros(next_token_distr.shape[0], dtype=bool)\r\n zeroing_mask[exclude_from_prediction] = True\r\n relocate_proba_sum = np.sum(next_token_distr[zeroing_mask])\r\n next_token_distr[zeroing_mask] = 0 #### zeroing out spec tokens\r\n next_token_distr[~zeroing_mask] += relocate_proba_sum / np.sum(~zeroing_mask) #### renormalizing probabilities\r\n\r\n next_token_idx_sampled = np.random.choice(np.arange(len(next_token_distr)), p = next_token_distr)\r\n \r\n if sample:\r\n if next_token_idx_sampled == end_token_idx:\r\n break\r\n text_caption.append(idx2word[next_token_idx_sampled])\r\n caption_prefix.append(next_token_idx_sampled)\r\n else:\r\n if next_token_idx_hard == end_token_idx:\r\n break\r\n text_caption.append(idx2word[next_token_idx_hard])\r\n caption_prefix.append(next_token_idx_hard)\r\n \r\n return ' '.join(text_caption)",
"def get_data_image_caption(obj, sample):\n caption = sample['caption'].strip().split(' ')\n coding_q = [0]\n coding_a = [obj.word_dict.get(word, 0) for word in caption]\n img_feat = get_image_feature(obj.features, sample['image_id'])\n if img_feat is not None:\n img_feat = img_feat * obj.feat_avg_norm_factor\n question_id = int(sample['image_id'])\n return question_id, img_feat, coding_q, coding_a",
"def createIndex(self):\n log.info(\"-------------------------------->\")\n stats = {\n 'total_labels': 0\n ,'total_annotations': 0\n ,'total_images': 0\n ,'total_unique_images': 0\n ,\"total_label_per_img\": defaultdict(list)\n ,\"total_img_per_label\": defaultdict()\n ,\"label_per_img\": defaultdict(list)\n ,\"total_annotation_per_label\": defaultdict()\n }\n\n unique_images = set()\n anns, cats, imgs = {}, {}, {}\n imgToAnns, catToImgs, cat_lblid_to_id, catToAnns = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)\n\n if 'annotations' in self.dataset:\n for ann in self.dataset['annotations']:\n imgToAnns[ann['img_id']].append(ann)\n anns[ann['ant_id']] = ann\n stats['total_annotations'] += 1\n\n # catToImgs[ann['lbl_id']].append(ann['img_id'])\n if 'categories' in self.dataset:\n catToImgs[ann['lbl_id']].append(ann['img_id'])\n catToAnns[ann['lbl_id']].append(ann['ant_id'])\n\n if 'images' in self.dataset:\n for img in self.dataset['images']:\n imgs[img['img_id']] = img\n stats['total_images'] += 1\n _ann = imgToAnns[img['img_id']]\n\n\n # if 'annotations' in self.dataset and 'categories' in self.dataset:\n # for ann in self.dataset['annotations']:\n # # catid = cat_lblid_to_id[ann['lbl_id']]\n # # catToImgs[catid].append(ann['img_id'])\n # catToImgs[ann['lbl_id']].append(ann['img_id'])\n # catToAnns[ann['lbl_id']].append(ann['ant_id'])\n\n ## categories and labels are synonymous and are used to mean the same thing\n if 'categories' in self.dataset:\n for cat in self.dataset['categories']:\n cats[cat['lbl_id']] = cat\n # cats[cat['id']] = cat\n # self.cat_lblid_to_id[cat['lbl_id']] = cat['id']\n stats['total_labels'] += 1\n stats['total_annotation_per_label'][cat['lbl_id']] = len(catToAnns[cat['lbl_id']])\n stats['total_img_per_label'][cat['lbl_id']] = len(catToImgs[cat['lbl_id']])\n\n log.info('index created!')\n log.info(\"stats: {}\".format(stats))\n\n # create class members\n self.anns = anns\n self.imgToAnns = imgToAnns\n self.catToImgs = catToImgs\n self.catToAnns = catToAnns\n self.imgs = imgs\n self.cats = cats\n self.minstats = stats",
"def select_captions(annotations, image_ids):\n\n # for fast lookup\n image_ids = set(image_ids)\n\n captions = []\n caption_image_ids = []\n\n for annotation in annotations:\n image_id = annotation['image_id']\n if image_id in image_ids:\n captions.append(annotation['caption'].replace('\\n', ''))\n caption_image_ids.append(image_id)\n\n return captions, caption_image_ids",
"def _encode_dataset(\n self,\n data: pd.DataFrame,\n image_name_column: str):\n\n empty_ocr_count = 0\n mask = np.zeros(len(data))\n encoded_data = np.zeros((len(data), self.vocabulary_size + self.layout_shape[0] * self.layout_shape[1]))\n\n counter = 0\n for index, row in data.iterrows():\n try:\n filename = data.loc[index, image_name_column]\n ocr_results = self.ocr_provider.get_ocr_results(filename)\n\n if len(ocr_results) == 0:\n empty_ocr_count += 1\n else:\n mask[counter] = 1\n encodings = self.encoder.encode_ocr_results(ocr_results)\n encoded_data[counter, :] = encodings\n\n except:\n logging.error(\"Could not locate blob: {}\".format(row[image_name_column]))\n raise\n\n counter += 1\n\n if empty_ocr_count > 0:\n logging.warning(\"Empty OCR results resulting in null entries for {} images\".format(empty_ocr_count))\n\n return encoded_data, mask",
"def _load_and_process_metadata(captions_file):\n with tf.gfile.FastGFile(captions_file, \"r\") as f:\n caption_data = json.load(f)\n\n sorted_annotations = sorted(caption_data['annotations'], key=lambda k: k['image_id'])\n\n with open(\"image_id_list.txt\", \"w\") as wfimg:\n with open(\"caption_list.txt\", \"w\") as wfcap:\n for annotation in sorted_annotations:\n caption = annotation['caption'].replace(\"\\n\", \" \")\n wfimg.write((\"%d\\n\" % annotation[\"image_id\"]))\n wfcap.write(caption.strip()+\"\\n\")\n\n # Extract the filenames.\n id_to_filename = [(x[\"id\"], x[\"file_name\"]) for x in caption_data[\"images\"]]\n\n # Extract the captions. Each image_id is associated with multiple captions.\n id_to_captions = {}\n for annotation in caption_data[\"annotations\"]:\n image_id = annotation[\"image_id\"]\n caption = annotation[\"caption\"]\n id_to_captions.setdefault(image_id, [])\n id_to_captions[image_id].append(caption)\n if image_id == 581921:\n print(caption)\n\n\n assert len(id_to_filename) == len(id_to_captions)\n assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())\n print(\"Loaded caption metadata for %d images from %s\" %\n (len(id_to_filename), captions_file))",
"def dictionary(raw_captions,threshold):\n caps = []\n for im in raw_captions:\n for s in raw_captions[im]:\n caps.append(s.split())\n\n word_freq = nltk.FreqDist(itertools.chain(*caps))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id",
"def encode_anchors(anchors):\n nanchors = len(anchors) // 4\n keys = np.empty(shape=(nanchors), dtype=np.int64)\n\n for i in range(nanchors):\n idx = 4*i\n anchor = anchors[idx:idx+4]\n keys[i] = encode_anchor(anchor)\n\n return keys",
"def encode_label(labels):\n y = np.zeros(len(labels))\n for i, l in np.ndenumerate(labels):\n if l == 'realDonaldTrump':\n y[i] = 1\n else:\n y[i] = 0\n return y",
"def _interview_text_data(self, caption):\n text_data = []\n for item_list in caption:\n text_data.append(item_list['text'])\n return ' '.join(text_data)",
"def custom_siamese_collate(batch):\n # Remember batch size for later reference\n batch_size = torch.tensor(len(batch), dtype=torch.int16)\n # Prepare the list of brains and labels\n images = []\n labels = []\n # Iterate over the channels dimension\n for i in range(53):\n # Prepare empty arrays for indices and values. Those items will be stored separately for each batch.\n indices_batch = []\n values_batch = []\n # Iterate over the batch\n for j in range(len(batch)):\n # Retrieve brains volume and single brain\n brain = batch[j][0][0][i]\n # Find nonzero indices. <as_tuple=True> is needed for advanced indexing, to retrieve the values of indices\n nonzero_indices = list(torch.nonzero(brain, as_tuple=True))\n # Find nonzero values.\n # Values must have the last dimension of the color channel. In this case is 1.\n values = brain[nonzero_indices].unsqueeze(-1)\n # Add batch index to indices tensor. Now tensor has dimension (N, 4) and the last dimension is filled with the batch index\n # This is needed by the InputLayer library. In the last dimension it needs the batch index:\n # Since every item in batch will be concatenated, it must be able to find the right batch item.\n # Stack indices. It will have the representation of (N, 3), which is the number of nonzero indices and the\n # dimension of the spatial size\n nonzero_indices.append(torch.full_like(nonzero_indices[0], j))\n indices = torch.stack(nonzero_indices, -1)\n indices_batch.append(indices)\n values_batch.append(values)\n if i == 0:\n # Add label to array but only once - so in the first pass of images\n labels.append(batch[j][1])\n\n indices_batch = torch.cat(indices_batch, dim=0)\n values_batch = torch.cat(values_batch, dim=0)\n images.append((indices_batch, values_batch, batch_size))\n\n labels = torch.stack(labels, dim=0)\n return images, labels",
"def build_label_container(\n *label_iterables: Optional[Iterable[OCMLabel]],\n) -> LabelContainer:\n merged_labels = {}\n for labels in label_iterables:\n for label in labels or []:\n merged_labels[label.key] = label\n return LabelContainer(labels=merged_labels)",
"def save_captions(images_dir, output_dir, image_id, caption):\n # TODO (aq): check if the given paths are valid and don't hardcode the paths.\n # TODO (aq): Display multiline text on images.\n caption = caption[1:]\n caption = \" \".join(caption)\n img_path = Path(images_dir / image_id[0])\n img = Image.open(img_path)\n width, height = img.size\n draw = ImageDraw.Draw(img)\n \n draw.rectangle([(0, 0),(width, 12)], outline=(255, 0, 0), fill=(255, 255, 255))\n draw.multiline_text((0, 0), caption, fill=(0, 0, 0), spacing=4)\n ouput_img_path = Path(output_dir / 'results' / image_id [0])\n img.save(ouput_img_path, \"JPEG\")",
"def create_labels_from_metadata(metadata):\n # get failure indicies\n failures_i = get_failure_indicies(metadata)\n # create labels linearly\n labels = []\n first = 0\n for i in failures_i:\n if first == 0:\n num_points = i - first + 1\n labels_subsequence = np.linspace(0, 1, num=num_points).tolist()\n else:\n num_points = i - first\n labels_subsequence = np.linspace(0, 1, num=num_points).tolist()\n labels.extend(labels_subsequence)\n first = i\n # last timeseries' subsequence is treated seperately\n num_points = len(metadata) - 1 - failures_i[-1]\n labels_subsequence = np.linspace(0, 1, num=num_points).tolist()\n labels.extend(labels_subsequence)\n return np.array(labels)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calculate BM25 score between caption and sentences in the article take into account the named entity rather than the pointer remove number and stopwords
|
def BM25_score(cap, sent, df_dict, stopwords, dataset):
if dataset == 'breakingnews':
N = 2423309
ave_sen_len = 20
else:
N = 5953950
ave_sen_len = 20
k1 = 2.0
k2 = 1.0
b = 0.75
sent_tf = {}
cap_tf = {}
score = 0
cleaned_cap = []
# remove number and stop words
for token in cap:
token = token.lower()
if not is_number(token) and token not in stopwords:
cleaned_cap.append(token)
cap_tf[token] = cap_tf.get(token, 0) + 1
for token in sent:
token = token.lower()
# ignore number and stop words
if not is_number(token) and token not in stopwords:
sent_tf[token] = sent_tf.get(token, 0) + 1
for token in cleaned_cap:
df = df_dict.get(token, 0)
qf = cap_tf[token]
W = math.log((N - df + 0.5) / (df + 0.5), 2)
K = k1 * (1 - b + b * len(sent) / ave_sen_len)
tf = sent_tf.get(token, 0)
try:
token_score = round((W * tf * (k1 + 1) / (tf + K)) * (qf * (k2 + 1) / (qf + k2)), 2)
except TypeError as e:
# print('token:%s' % token)
print('W:%.4f, tf:%d, K:%.4f, qf:%d' % (W, tf, K, qf))
exit(0)
score = score + token_score
# sorted_socres = sorted([(index, score) for index, score in scores.items()], reverse=True, key=lambda e: e[1])
return score
|
[
"def analyze_bbc():\n import nltk\n nltk.download('punkt')\n from nltk.tokenize import word_tokenize\n tokens = word_tokenize(article.text)\n # print(tokens)\n tokens = [w.lower() for w in tokens]\n import string\n table = str.maketrans(\"\",\"\", string.punctuation)\n stripped = [w.translate(table) for w in tokens]\n words = [word for word in stripped if word.isalpha()]\n import nltk\n nltk.download('stopwords')\n from nltk.corpus import stopwords\n stop_words = stopwords.words('english')\n words = [w for w in words if not w in stop_words]\n\n from collections import Counter\n Counter = Counter(words)\n most_occur_bbc = Counter.most_common(10)\n print(f'These are the top 10 words used in the BBC article: {most_occur_bbc}')\n print(f'')\n\n from nltk.sentiment.vader import SentimentIntensityAnalyzer\n paragraph = article.text\n score = SentimentIntensityAnalyzer().polarity_scores(paragraph)\n print(f'This is the sentiment report of the BBC article: {score}')",
"def scoreText(self, keyword, sentences):\n try:\n # Remove symbols from text\n sentences = self.cleanText(sentences)\n\n # Tokenization and Lennatization of the keyword\n keywordList = self.preProcessText(keyword)\n\n scoredSentencesList = []\n for i in range(len(sentences)):\n\n # Tokenization and Lennatization of the sentences\n wordlist = self.preProcessText(sentences[i])\n\n # list of keyword taken as reference\n reference = [keywordList]\n chencherry = SmoothingFunction()\n # sentence bleu calculates the score based on 1-gram,2-gram,3-gram-4-gram,\n # and a cumulative of the above is taken as score of the sentence.\n bleu_score_1 = sentence_bleu(\n reference, wordlist, weights=(1, 0, 0, 0), smoothing_function=chencherry.method1)\n bleu_score_2 = sentence_bleu(\n reference, wordlist, weights=(0.5, 0.5, 0, 0), smoothing_function=chencherry.method1)\n bleu_score_3 = sentence_bleu(\n reference, wordlist, weights=(0.33, 0.33, 0.34, 0), smoothing_function=chencherry.method1)\n bleu_score_4 = sentence_bleu(\n reference, wordlist, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=chencherry.method1)\n bleu_score = (4*bleu_score_4 + 3*bleu_score_3 +\n 2*bleu_score_2 + bleu_score_1)/10\n\n # append the score with sentence to the list\n scList = [bleu_score, sentences[i]]\n scoredSentencesList.append(scList)\n return scoredSentencesList\n\n except Exception as e:\n print(\"Error occured in score text\", e)",
"def predict(note):\n\n # Patterns for information extraction\n p = re.compile(r\"edss\", re.IGNORECASE)\n p_score = re.compile(r\"\\d\\.\\d\")\n p_num = re.compile(r\"zero|one|two|three|four|five|six|seven|eight|nine\", re.IGNORECASE)\n num_dict = {\n \"zero\":0,\n \"one\":1,\n \"two\":2,\n \"three\":3,\n \"four\":4,\n \"five\":5,\n \"six\":6,\n \"seven\":7,\n \"eight\":8,\n \"nine\":9\n }\n score = -1\n sentences = sent_tokenize(note)\n for sent in sentences:\n # Find sentence with \"EDSS\"\n if len(re.findall(p, sent)) > 0:\n # Find score with format \"x.x\"\n if len(re.findall(p_score, sent)) > 0:\n score = float(re.findall(p_score, sent)[0])\n break\n # Find score with format \"EDSS is x\"\n elif len(re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)) > 0:\n number = re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)[0]\n score = float(re.sub(r\"\\s|\\.|\\,|\\)\", r\"\", number))\n break\n # Find score writtent in \"zero/one ...\"\n elif len(re.findall(p_num, sent)) > 0:\n score = float(num_dict[re.findall(p_num, sent)[0].lower()])\n break\n \n if score not in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]:\n score = -1\n \n \n label_dict = {0.0:0,\n 1.0:1,\n 1.5:2,\n 2.0:3,\n 2.5:4,\n 3.0:5,\n 3.5:6,\n 4.0:7,\n 4.5:8,\n 5.0:9,\n 5.5:10,\n 6.0:11,\n 6.5:12,\n 7.0:13,\n 7.5:14,\n 8.0:15,\n 8.5:16,\n 9.0:17,\n 9.5:18,\n -1:-1}\n \n return label_dict[score]",
"def fraction_adverbs(row):\n text = row['text']\n text_splited = text.split(' ')\n text_splited = [''.join(c for c in s if c not in string.punctuation) for s in text_splited]\n text_splited = [s for s in text_splited if s]\n word_count = text_splited.__len__()\n pos_list = nltk.pos_tag(text_splited)\n verbs_count = len([w for w in pos_list if w[1] in ('RB','RBR','RBS')])\n return (verbs_count/word_count)",
"def comment_analyzer(comment_body):\r\n syllable_count = 0\r\n word_count = 0\r\n sentence_count = 0\r\n\r\n for word in comment_body.split():\r\n try:\r\n remove_digits = str.maketrans('', '', digits)\r\n word = word.translate(remove_digits) # Removes digits 1-9 from being checked\r\n # Once digits are stripped, they show up as ''.\r\n # This next line just says to ignore them if that's the case\r\n if word == '':\r\n continue\r\n endings_repeat = [\"..\", \"??\", \"!!\"]\r\n if any(i in word for i in endings_repeat):\r\n sentence_count += 1\r\n else:\r\n sentence_count += word.count(\".\")\r\n sentence_count += word.count(\"?\")\r\n sentence_count += word.count(\"!\")\r\n\r\n word_count += 1\r\n translator = str.maketrans('', '', string.punctuation)\r\n word = word.translate(translator) # Removes punctuation from word\r\n syllable_list = nsyl(word) # Flesh-Kincaid bit ( see nsyl() )\r\n syllable_count += syllable_list[0]\r\n except KeyError:\r\n pass\r\n\r\n if sentence_count == 0:\r\n sentence_count = 1\r\n try:\r\n print(\"Sentences: \" + str(sentence_count))\r\n average_words = word_count / sentence_count # Average words used per sentence\r\n print(\"Avg words: \" + str(average_words))\r\n average_syllables = syllable_count / word_count # Average syllables per word\r\n print(\"Avg syllables: \" + str(average_syllables))\r\n # All our step three stuff. ( See function details for more information )\r\n step_three_words = (average_words * .39)\r\n step_three_syllables = (average_syllables * 11.8)\r\n step_three_added = (step_three_words + step_three_syllables)\r\n # Find our final result, the round to the nearest integer.\r\n result = (step_three_added - 15.59)\r\n return int(round(result))\r\n except ZeroDivisionError as e:\r\n print(\"Comment contained zero words. Continuing.\")\r\n pass",
"def get_informative_words(nb_model):\n words = nb_model.decades[1930].keys()\n freq_not_zero = np.zeros((len(DECADES), len(words)))\n for i, dec in enumerate(DECADES):\n for j, word in enumerate(words):\n freq_not_zero[i,j] = 1.0 - nb_model.decades[dec][word][0]\n scores = np.where(freq_not_zero!=0, freq_not_zero, nb_model.dirichlet)\n scores /= np.min(scores, axis = 0)\n best_words = {}\n for i, dec in enumerate(DECADES):\n indices = np.argsort(scores[i,:])[-100:]\n best_words[dec] = [words[index] for index in list(indices)]\n return best_words",
"def fraction_verbs(row):\n text = row['text']\n text_splited = text.split(' ')\n text_splited = [''.join(c for c in s if c not in string.punctuation) for s in text_splited]\n text_splited = [s for s in text_splited if s]\n word_count = text_splited.__len__()\n pos_list = nltk.pos_tag(text_splited)\n verbs_count = len([w for w in pos_list if w[1] in ('VB','VBD','VBG','VBN','VBP','VBZ')])\n return (verbs_count/word_count)",
"def cal_sent_scores(self, sentence):\n word_count = 0\n max_word_pos_score = 0\n max_word_neg_score = 0\n for word, tag in sentence:\n pos_score = 0\n neg_score = 0\n synsets = self.iswn.senti_synsets(word, tag) \n num_synsets = len(synsets) \n word_pos_score = 0\n word_neg_score = 0\n if num_synsets >=1 : \n for synset in synsets:\n word_pos_score += synset.pos_score\n word_neg_score += synset.neg_score\n word_pos_score = word_pos_score/num_synsets #average synsets scores\n word_neg_score = word_neg_score/num_synsets\n if max_word_pos_score < word_pos_score :\n max_word_pos_score = word_pos_score\n if max_word_neg_score < word_neg_score :\n max_word_neg_score = word_neg_score\n \n return max_word_pos_score, max_word_neg_score",
"def cal_doc_scores(self, sentences) :\n doc_pos_score =0\n doc_neg_score = 0\n for label, pos, neg in sentences:\n if label != 0 :\n doc_pos_score += pos\n doc_neg_score += neg\n return doc_pos_score, doc_neg_score",
"def corpus_bleu_score(\n preds: torch.Tensor, gt: torch.Tensor, tokenizer, weights=(0.25, 0.25, 0.25, 0.25)\n):\n preds = [s.strip().split(\" \") for s in ids_to_captions(preds, tokenizer, True)]\n gt = [\n [s.strip().split(\" \") for s in ids_to_captions(lst, tokenizer, True)]\n for lst in gt\n ]\n return corpus_bleu(gt, preds, weights=weights)",
"def get_bm25_scores(self, query):\n k = 1.5\n b = 0.75\n scores = {}\n for id in self.ids:\n scores[id] = 0\n\n for term in re.findall(r\"[\\w']+|[.,!?;]\", query.strip()):\n term = term.lower()\n if not term in self.document_frequencies:\n continue\n df = self.document_frequencies[term]\n idf = np.log((self.num_documents - df + 0.5) / (df + 0.5))\n for id in self.ids:\n document_dict = self.term_frequencies[id]\n if not term in document_dict:\n scores[id] += 0\n continue\n tf = document_dict[term]\n wd = ((tf * (k+1)) / (tf + k*(1-b+b*self.document_length[id]/self.avg_length))) + 1\n scores[id] += idf * wd\n\n return scores",
"def analyseArticleSentiment(self, for_topics):\n\n if (for_topics):\n model = self.topic_model\n else:\n model = self.party_model\n\n # Store the original text, for use later\n original_text = self.article_text \n\n # Next, find overall most likely topics\n text_vectorized = self.getVectorised(self.article_text)\n topic_binary_predictions = model.predict(text_vectorized)\n\n likely_topics = np.nonzero(topic_binary_predictions == True)[1]\n\n # Create dictionary, key: topic index, value: [sentiment scores, counter (for averaging)]\n topic_sentiment_scores = {}\n \n # Then, split the original text into paragraphs and find the most likely topics\n paragraphs = original_text.split(\"\\n\")\n\n # Only consider a paragraph if it has five or more sentences\n # If it doesn't, collate paragraphs into bigger paragraphs\n composite_paragraph = \"\"\n\n for paragraph in paragraphs:\n\n original_paragraph = paragraph\n\n if composite_paragraph != \"\":\n paragraph = composite_paragraph + paragraph\n \n sentences = sent_tokenize(paragraph)\n\n if (len(sentences) < 5):\n composite_paragraph += original_paragraph + \"\\n\"\n continue\n else:\n composite_paragraph = \"\"\n \n # Vectorize the paragraph, and make topic/party predictions\n paragraph_vectorized = self.getVectorised(paragraph) \n paragraph_binary_predictions = model.predict(paragraph_vectorized)\n paragraph_probabilities = model.predict_proba(paragraph_vectorized)[0][0]\n\n likely_paragraph_topics = np.nonzero(paragraph_binary_predictions == True)[1]\n paragraph_probabilities = dict([(paragraph_index, round(paragraph_probabilities[paragraph_index], 1)) for paragraph_index in range(0, len(paragraph_probabilities)) if paragraph_index in likely_paragraph_topics])\n\n paragraph_sentiment_scores = {}\n\n for topic in likely_paragraph_topics:\n if (topic not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[topic] = 0\n\n # Next, get sentiment of each sentence\n for sentence in sentences:\n # Get the polarity of the sentence\n sentence_polarity = TextBlob(sentence).sentiment.polarity\n\n # If the sentence is likely talking about a topic found in the current paragraph, store weighted sentiment\n for topic_num in likely_paragraph_topics:\n # Get the probability of it being that topic\n paragraph_topic_weighting = paragraph_probabilities[topic_num]\n\n # Weight the polarity by the likelihood of the topic\n weighted_polarity = sentence_polarity * paragraph_topic_weighting\n paragraph_sentiment_scores[topic_num] += weighted_polarity\n\n # Following code deals with party entities (i.e. MPs), so skip if dealing with topic sentiment\n if (not for_topics):\n\n # Change to lower-case and strip accents\n preprocessed_sentence = self.preprocessor.changeToLower(sentence)\n preprocessed_sentence = self.preprocessor.stripAccents(sentence)\n\n # Check the entity tracker first, if we've already seen an MP previously\n for full_name, name_split in self.entity_tracker.items():\n search_forename = re.search(rf\".*{name_split[0]}.*\", preprocessed_sentence, re.IGNORECASE)\n search_surname = re.search(rf\".*{name_split[1]}.*\", preprocessed_sentence, re.IGNORECASE)\n search_full = re.search(rf\".*{full_name}.*\", preprocessed_sentence, re.IGNORECASE)\n\n if ((search_forename or search_surname) and not search_full): # If either parts of the name appear (but not together)\n party_num = name_split[2]\n party_num = int(party_num)\n if (party_num not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[party_num] = 0\n paragraph_sentiment_scores[party_num]+= sentence_polarity\n\n # If the sentence contains an MP from a political party, get sentiment \n for mp_name, party_num in self.mps.items():\n party_num = int(party_num)\n search = re.search(rf\".*{mp_name}.*\", preprocessed_sentence, re.IGNORECASE)\n if (search):\n if (party_num not in paragraph_sentiment_scores):\n paragraph_sentiment_scores[party_num] = 0\n paragraph_sentiment_scores[party_num] += sentence_polarity\n\n # Separate first and last name for advanced entity searching in future sentences in paragraph\n if (mp_name not in self.entity_tracker):\n self.entity_tracker[mp_name] = [mp_name.split(\" \")[0], mp_name.split(\" \")[1], party_num]\n\n for topic, score in paragraph_sentiment_scores.items():\n if (topic not in topic_sentiment_scores):\n topic_sentiment_scores[topic] = [0,0]\n \n topic_sentiment_scores[topic][0] += score\n topic_sentiment_scores[topic][1] += 1\n\n # Returned object, key: topic index, value: score\n articleTopicSentimentsMatrix = {}\n\n # Once the text has been fully analysed, average the sentiment scores\n for topic_index, score_and_counter in topic_sentiment_scores.items():\n sentiment_score = score_and_counter[0] / score_and_counter[1]\n if (topic_index != 0):\n if (sentiment_score < -1):\n sentiment_score = -1\n elif (sentiment_score > 1):\n sentiment_score = 1\n articleTopicSentimentsMatrix[topic_index] = sentiment_score\n\n # Return list of pairs of topic/party and overall sentiment score (for article)\n return (likely_topics, articleTopicSentimentsMatrix)",
"def parse_url_headline_name_score(url, soup):\n player_value = 'none'\n score_value = []\n body_list = re.sub(\"\\n\", \" \", soup.body.text)\n body_list = body_list.split(' ')\n\n # iterating through body of article in pairs of words at a time\n for place in range(0, len(body_list) - 2):\n \n # stripping words of puncuation and changing it to uppercase\n word_1 = (re.sub('[^\\w\\d\\s\\)\\(-]', '', body_list[place]\n .strip('\\n').upper()))\n word_2 = (re.sub('[^\\w\\d\\s\\)\\(-]', '', body_list[place + 1]\n .strip('\\n').upper()))\n \n # using regex to see if pair of words forms a potential match score \n word_1_status = re.search(\"^[0-9]-[0-9]$\", word_1)\n word_2_status = re.search(\"^[0-9]-[0-9]$\", word_2)\n word_2_status_tb = re.search(\"^\\([0-9]{1}-[0-9]{1}\\)$\", word_2)\n \n # checking if potential scores satisfy above regex and \n if ((word_1_status and (word_2_status or word_2_status_tb)) and \n score_value == []):\n # checking if they form complete sets\n if max([int(word_1[0]), int(word_1[2])]) > 5 and \\\n (len(word_2) > 3 or max([int(word_2[0]), int(word_2[2])]) > 5):\n \n score_value.append(word_1)\n score_value.append(word_2)\n \n # checking following words to see if part of match score\n counter = 2\n while (re.search(\"^[0-9]-[0-9]$\", re.sub('[^\\w\\d\\s\\)\\(-]', '',\n body_list[place + counter].strip('\\n').upper())) or \n re.search(\n \"^\\([0-9]{1}-[0-9]{1}\\)$\", re.sub('[^\\w\\d\\s\\)\\(-]', '', \n body_list[place + counter].strip('\\n').upper()))):\n \n score_value.append(re.sub('[^\\w\\d\\s\\)\\(-]', '', \n body_list[place + counter].strip('\\n').upper()))\n counter = counter + 1\n \n # removing brackets and dashes to find first player mentioned \n word_1 = re.sub('[^\\w\\d\\s]', '', body_list[place].strip('\\n').upper())\n word_2 = (re.sub('[^\\w\\d\\s]', '', body_list[place + 1]\n .strip('\\n').upper()))\n word_list = word_1 + ' ' + word_2\n \n if word_list in player_dict.keys() and player_value == 'none':\n player_value = word_list\n \n # triggers when player name and match score is found \n if (player_value != 'none') and (score_value != []):\n # calculating game difference by iterating through each set \n game_difference = 0\n for set_ in score_value:\n # selecting non tie breaker scores \n if set_[0] != '(':\n game_difference = (game_difference + \n int(set_[0]) - int(set_[2]))\n \n # incrementing number of articles written by player, \n # and increasing total game difference\n player_dict[player_value][1] = player_dict[player_value][1] + 1 \n player_dict[player_value][2] = (player_dict[player_value][2] + \n abs(game_difference))\n \n # changing score value into string representation \n str_score_value = ''\n for set_ in score_value:\n str_score_value = str_score_value + set_ + ' '\n score_value = str_score_value\n break \n \n \n return [url, soup.h1.text, player_value, score_value]",
"def analyze(self, text):\n # split sentences into words\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n \n score = 0\n \n for word in tokens:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n \n return score",
"def test_half_latin_sentence_is_50_percent_latin(self):\n u = ui.Ui()\n percentage = u.search_sentence(\"pax tamasanaeiolelatinaa\")\n self.assertEqual(percentage, 50.0)",
"def theorize_text(s, classifier, data, dict_result = True):\n\n\tpredictions = classifier.decision_function([s]) #we want to know probabilities! this returns a list of lists of values\n\tguess_values = defaultdict()\n\t\n\t#populate dictionary with decisiion function per author\n\tfor index1, prediction in enumerate(predictions): #loop through predictions (f there are multiple )\n\t\tfor index2, value in enumerate(prediction): #loop through each guess and the probability\n\t\t\tguess_values[data.target_names[index2]] = value #save prediction to dictionary, getting name of author corresponding to index in prediction \n\tif dict_result == True:\n\t\treturn guess_values #return dictionary of guesses for the given string\n\telse:\n\t\toutput = \"\"\n\t\tfor author, value in guess_values.items():\n\t\t\toutput += author + \": \" + str(value)+\"\\n\\n\"\n\treturn output",
"def nltk_sentiment_analyzer(summary):\n score = SentimentIntensityAnalyzer().polarity_scores(summary)\n print(score)",
"def _compare_similarity_keyword(caption_text, keywords, tokenLemma, domain, thres_ratio = 0.5):\n stop = stopwords.words('english')\n keywords_list = []\n # Replace '-' in keyword and domain and split \n for keyword in keywords:\n keywords_list.extend(keyword.lower().replace('-', ' ').split(' '))\n domain_list = domain.lower().replace('-', ' ').split(' ')\n \n # Accumulate similarity for normalization\n accumulated_sim = 0\n sim_dict = {}\n for keyword in keywords_list:\n # Calculate similarity of each combination of keyword and domain\n if keyword not in stop: \n sim_sum = 0\n for i in domain_list:\n try:\n # Some of the similarity(keyword, i) are minus but I still keep it to show the uncorrelated\n sim = google_model.similarity(i, keyword)\n # google_model.similarity is related to upper or lower case \n accumulated_sim += sim\n sim_sum += sim\n except:\n continue\n if keyword not in sim_dict:\n sim_dict[keyword] = sim_sum\n \n # Compute frequency of keywords at the same time\n if len(sim_dict)==0:\n return None, None\n max_sim = max(sim_dict.items(), key=lambda x:x[1])[1]\n # If one word whose similarity with domain larger than a half of the maximum similarity, count it\n keywords_thres = [i for i in sim_dict.keys() if sim_dict[i] > max_sim * thres_ratio]\n keywords_freq = 0\n for i in tokenLemma:\n if i in keywords_thres:\n keywords_freq += 1\n # Normalize the accumulated similarity and keyword number by dividing total number of context\n return accumulated_sim / len(keywords), keywords_freq / len(tokenLemma)",
"def splitSentences(self,txt):\n \n txt = txt.split()\n #txt = txt.split(\"\\s\") #DM to account for longer documents in formative evaluation - change back for impression sections only\n\n #attribute side header to each corresponding sentence\n sentences = []\n wordLoc = 0\n \n\n while(wordLoc < len(txt) ):\n currentWord = txt[wordLoc]\n if( currentWord[-1] in '.?!' ):\n if( currentWord in self.exceptionTerms ):\n wordLoc += 1\n # per discussion with A.G. dropped this exception, since assuming numbers only use decimal points if there \n # are actual decimal point digits expressed and thus the period would not be the last character of the word.\n #elif( self.digits.intersection(currentWord) and \n #not set('()').intersection(currentWord)): # word doesn't include parentheses. Is this necessary?\n #wordLoc += 1\n else:\n sentences.append(unicode(\" \"+' '.join(txt[:wordLoc+1]))) \n txt = txt[wordLoc+1:]\n wordLoc = 0\n else:\n wordLoc += 1\n\n # if any texts remains (due to failure to identify a final sentence termination,\n # then take all remaining text and put into a sentence\n if( txt ):\n sentences.append(unicode(\" \"+' '.join(txt)) )\n \n #print sentences;raw_input()\n return sentences",
"def get_summary(text):\n num_words = text.count(\" \")\n num_sentences = text.count(\".\")\n keywords = keyword_extraction(text, 5)\n# summary = summarize(text, max(1, num_sentences//10))[0]\n return keywords, None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a boolean for whether solar resource requires clearsky irrad. Returns
|
def clearsky(self):
if self._clearsky is None:
self._clearsky = False
for v in self.inputs.values():
self._clearsky = any((self._clearsky,
bool(v.get('clearsky', False))))
if self._clearsky:
logger.debug('Solar analysis being performed on clearsky '
'irradiance.')
return self._clearsky
|
[
"def zone_resilient(self) -> Optional[bool]:\n return pulumi.get(self, \"zone_resilient\")",
"def has_sres(self) -> bool:\n return self.check_sensi_orders((1,), MODE_RES)",
"def will_have_clear(self):\n return weather.any_status_is(self.forecast.weathers, \"sun\", self._wc_registry)",
"def is_rainy():\n if not no_rain.is_active:\n return True\n else:\n return False",
"def HasqRON(self):\n return self.__has('qRON')",
"def HasqDRR(self):\n return self.__has('qDRR')",
"def has_cachetune(self):\n return (self.l3_granularity is not None and self.l3_granularity > 0)",
"def is_reserved(self):\n return bool(self.current_cart())",
"def is_free_to_read(self):\n try:\n return self._json['is_free_to_read']\n except KeyError:\n return None\n except AttributeError:\n return None",
"def isKnownResource(self, resource):\n if self.getFirst(resource, None, None)!=None:\n return 1\n else:\n return 0",
"def sov(self) -> bool:\n return self._sov",
"def HasRON(self):\n return self.__has('RON')",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageRGBUC3___nonzero__(self)",
"def get_resident_key(self)->bool:\n return self._get_value(PREFERENCE_KEY.RK,False)",
"def chargeable(self):\n return not self.internal",
"def _has_calibration(self):\n return any(\n [\n self.calibrated_volume,\n self.aspirate_flowrate,\n self.dispense_flowrate,\n self.volume_calibration_curve,\n self.aspirate_flowrate_calibration_curve,\n self.dispense_flowrate_calibration_curve,\n ]\n )",
"def install_r(self):\n return bool(self.check_r.checkState())",
"def _check_requirements(self, resource_id):\n return len(self.cache[resource_id]) == len(self.required_meters)",
"def has_rubric(self):\n return # boolean"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a boolean for whether bifacial solar analysis is being run. Returns
|
def bifacial(self):
if self._bifacial is None:
self._bifacial = False
for v in self.inputs.values():
bi_flags = ('bifaciality', 'spe_is_bifacial',
'cec_is_bifacial', '6par_is_bifacial')
bi_bools = [bool(v.get(flag, 0)) for flag in bi_flags]
self._bifacial = any(bi_bools + [self._bifacial])
return self._bifacial
|
[
"def is_blanc(self):\n \n return self.binning is None",
"def is_solution(self):\n return self.state.is_solution()",
"def is_beta(self):\n if self.connective in logic.DISJ:\n return True\n elif self.connective in logic.CONJ:\n return False\n elif self.connective == \"not\":\n if self.subformula1.connective in logic.DISJ:\n return False\n elif self.subformula1.connective in logic.CONJ:\n return True\n return None",
"def has_finra_bp(trade):\n processes = acm.BusinessProcess.FindBySubjectAndStateChart(trade, FINRA_STATE_CHART)\n if len(processes) > 0:\n return True\n return False",
"def is_done(self):\n return not (self.patrn_bfs_queue and self.sub_bfs_queue)",
"def is_running(self, family: str) -> bool:\n latest_analysis = self.analyses(family=family).first()\n return latest_analysis and latest_analysis.status in TEMP_STATUSES",
"def has_CFI(self):\r\n return self.debug_frame_sec is not None",
"def __bool__(self):\n return all(bool(fns) for fns in self.tasks.values())",
"def is_q_annihilator(self):\n if self.is_above_fermi:\n return 1\n return 0",
"def __bool__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageFAF33___bool__(self)",
"def aquisition_running(self):\n return bool(int(self.query(\"ACQ:STATE?\").strip()))",
"def is_microbial(self):\n return self.application in MICROBIAL",
"def checkGoalState(self): \n #check if the place is AI Lab\n return self.place == \"AI Lab\"",
"def IsRunning(self):\n\t\tmask = self.readRegister(DAY);\n\t\tif((mask & OSCRUN) == OSCRUN): \t\t\t#If oscillator = already running, do nothing.\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def is_goal(self):\r\n return np.array_equal(PuzzleState.SOLVED_PUZZLE, self.puzzle)",
"def IsRunningMacro(self):\n debug('GCSCommands.IsRunningMacro()')\n answer = self.__msgs.read(chr(8))\n answer = convertvalue(answer, bool)\n debug('GCSCommands.IsRunningMacro = %r', answer)\n return answer",
"def fan_running(self):\n if self._data['hasFan']:\n return self._data['fanData']['fanIsRunning']\n else:\n return False",
"def _get_isPeriodic(self) -> \"bool\" :\n return _core.NurbsCurve3D__get_isPeriodic(self)",
"def is_flagged(self):\n return self._flagged"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get a boolean for whether wind generation is considering icing. Returns
|
def icing(self):
if self._icing is None:
self._icing = False
for v in self.inputs.values():
self._icing = any((self._icing,
bool(v.get('en_icing_cutoff', False))))
if self._icing:
logger.debug('Icing analysis active for wind gen.')
return self._icing
|
[
"def HasITD(self):\n return self.__has('ITD')",
"def is_worth_it(self):\n return True if self.immersion - self.crew * Ship.crew_immersion_multiplier > 20 else False",
"def HasIFC(self):\n return self.__has('IFC')",
"def issiso(self):\n return self.ninputs == 1 and self.noutputs == 1",
"def is_rainy():\n if not no_rain.is_active:\n return True\n else:\n return False",
"def HasSIC(self):\n return self.__has('SIC')",
"def is_irregular(self):\n return np.any([d.is_irregular for d in self])",
"def is_watering(self):\n return self._is_watering",
"def is_watering(self) -> bool:\n return self._is_watering",
"def is_iterative(self):\n return self.__is_iterative",
"def sagittalFlag(): \n slicingDim = params.WhichExperiment.Dataset.slicingInfo.slicingDim\n nucleus_index = params.WhichExperiment.Nucleus.Index[0]\n return (nucleus_index == 1) and (slicingDim == 2)",
"def is_at_cog(self):\n return np.all(self._point == self._cog)",
"def is_it_raining_at(lat, lon, conditions=None):\n if not conditions:\n conditions = current_conditions(lat, lon)\n\n precip_chance = conditions[\"precipProbability\"]\n return (precip_chance > 0.8)",
"def will_have_rain(self): \n return weather.any_status_is(self.forecast.weathers, \"rain\", self._wc_registry)",
"def __bool__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageVD24___bool__(self)",
"def __bool__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageUS2___bool__(self)",
"def __bool__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageO22___bool__(self)",
"def __is_in(self, figure):\n try:\n figure.transform_to(self.ground.frame)\n figure.to_cartesian_coords()\n self.ground.to_cartesian_coords()\n x = figure.x - self.ground.x\n y = figure.y - self.ground.y\n z = figure.z - self.ground.z\n return ((x / self.ground.space.a) ** 2 + (y / self.ground.space.b) ** 2 +\n (z / self.ground.space.c) ** 2) <= 1\n except AttributeError:\n raise LocationError(\"The operation 'is in' needs a figure with coordinates \"\n \"and a ground with a spanned space.\")",
"def __bool__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageSSRTD22___bool__(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Resolution to downscale NSRDB resource to. Returns dict | None Option for NSRDB resource downscaling to higher temporal resolution. The config expects a str entry in the Pandas frequency format, e.g. '5min' or a dict of downscaling kwargs
|
def downscale(self):
if self._downscale is None:
ds_list = []
for v in self.inputs.values():
ds_list.append(v.get('downscale', None))
self._downscale = ds_list[0]
ds_list = list({str(x) for x in ds_list})
if len(ds_list) > 1:
msg = ('Expecting a single unique value for "downscale" but '
'received: {}'.format(ds_list))
logger.error(msg)
raise SAMInputError(msg)
if isinstance(self._downscale, str):
self._downscale = {'frequency': self._downscale}
return self._downscale
|
[
"def regrid_downscale_static(self):\n # TODO: Get these next two from the environment if possible\n downscale_temp = True\n downscale_precip = True\n if downscale_temp or downscale_precip:\n elevation_difference = self._regrid_downscale_generate_elevation_difference()\n if downscale_temp:\n temperature = self._regrid_downscale_temperature(\n elevation_difference,\n lapse_rate=os.environ.get(\"DOWNSCALING_LAPSE_RATE\", -7.0/1000))\n else:\n temperature = self._regrid_interpolate_bilinear_array(\"air_temperature\")\n if downscale_precip:\n temperature_ref = self._regrid_downscale_temperature(\n elevation_difference,\n lapse_rate=os.environ.get(\"DOWNSCALING_LAPSE_RATE\", -7.0/1000))\n temperature_0 = self._regrid_interpolate_bilinear_array(\"air_temperature\")\n precipitation_0 = self._regrid_interpolate_bilinear_array(\"precipitation\")\n DOWNSCALING_GAMMA_FACTOR = os.environ.get(\"DOWNSCALING_GAMMA_FACTOR\", -0.07)\n precipitation = self._regrid_downscale_precipitation(\n temperature_ref,\n temperature_0,\n precipitation_0, \n gamma=DOWNSCALING_GAMMA_FACTOR)\n else:\n precipitation = self._regrid_interpolate_bilinear_array(\"precipitation\")\n forcing_file = self._finalize_forcing_for_PDD(temperature, precipitation)\n return forcing_file",
"def s_resolution(self):\n return self.get('s_resolution') * u.arcsec",
"def pool_autoscale_settings(config):\n # type: (dict) -> PoolAutoscaleSettings\n conf = pool_specification(config)\n conf = _kv_read_checked(conf, 'autoscale', {})\n ei = _kv_read_checked(conf, 'evaluation_interval')\n if util.is_not_empty(ei):\n ei = util.convert_string_to_timedelta(ei)\n else:\n ei = datetime.timedelta(minutes=15)\n scenconf = _kv_read_checked(conf, 'scenario')\n if scenconf is not None:\n mvc = _kv_read_checked(scenconf, 'maximum_vm_count')\n if mvc is None:\n raise ValueError('maximum_vm_count must be specified')\n mvipe = _kv_read_checked(\n scenconf, 'maximum_vm_increment_per_evaluation', default={})\n ndo = _kv_read_checked(\n scenconf, 'node_deallocation_option', 'taskcompletion')\n if (ndo is not None and\n ndo not in (\n 'requeue', 'terminate', 'taskcompletion', 'retaineddata')):\n raise ValueError(\n 'invalid node_deallocation_option: {}'.format(ndo))\n sli = _kv_read_checked(scenconf, 'sample_lookback_interval')\n if util.is_not_empty(sli):\n sli = util.convert_string_to_timedelta(sli)\n else:\n sli = datetime.timedelta(minutes=10)\n tr = _kv_read_checked(scenconf, 'time_ranges', default={})\n trweekday = _kv_read_checked(tr, 'weekdays', default={})\n trworkhour = _kv_read_checked(tr, 'work_hours', default={})\n scenario = PoolAutoscaleScenarioSettings(\n name=_kv_read_checked(scenconf, 'name').lower(),\n maximum_vm_count=_pool_vm_count(config, conf=mvc),\n maximum_vm_increment_per_evaluation=_pool_vm_count(\n config, conf=mvipe),\n node_deallocation_option=ndo,\n sample_lookback_interval=sli,\n required_sample_percentage=_kv_read(\n scenconf, 'required_sample_percentage', 70),\n rebalance_preemption_percentage=_kv_read(\n scenconf, 'rebalance_preemption_percentage', None),\n bias_last_sample=_kv_read(\n scenconf, 'bias_last_sample', True),\n bias_node_type=_kv_read_checked(\n scenconf, 'bias_node_type', 'auto').lower(),\n weekday_start=_kv_read(trweekday, 'start', default=1),\n weekday_end=_kv_read(trweekday, 'end', default=5),\n workhour_start=_kv_read(trworkhour, 'start', default=8),\n workhour_end=_kv_read(trworkhour, 'end', default=17),\n )\n else:\n scenario = None\n return PoolAutoscaleSettings(\n evaluation_interval=ei,\n formula=_kv_read_checked(conf, 'formula'),\n scenario=scenario,\n )",
"def s_resolution_max(self):\n rmax = self.get('s_resolution_max', default=None)\n return rmax if not rmax else rmax * u.arcsec",
"def getScaleForFile(f, config):\n if f.type == \"data\":\n return 1.\n else:\n mcScale = ( config[\"luminosity\"]*f.cross_section*f.branching_ratio / f.generated_events )\n if config.get(\"ignore-scales\", False):\n return mcScale\n else:\n return mcScale*config.get(\"scale\", 1.)*f.scale",
"def change_resolution_by(image, xyz_scales, xyz_resolution=1, \npad_to_match_res=True, err_to_higher_res=True, average_on_downsample=True, \ntruncate=False, return_true_resolution=False, **resample_kwargs):\n\n # Validate arguments.\n\n # Validate image.\n image = _validate_ndarray(image)\n\n # Validate xyz_scales.\n xyz_scales = _validate_scalar_to_multi(xyz_scales, size=image.ndim)\n for dim, scale in enumerate(xyz_scales):\n if scale < 0:\n xyz_scales[dim] = -1 / xyz_scales[dim]\n\n # Validate xyz_resolution.\n xyz_resolution = _validate_xyz_resolution(image.ndim, xyz_resolution)\n\n # Compute desired_xyz_resolution.\n desired_xyz_resolution = xyz_resolution / xyz_scales\n\n change_resolution_to_kwargs = dict(\n image=image,\n xyz_resolution=xyz_resolution,\n desired_xyz_resolution=desired_xyz_resolution,\n pad_to_match_res=pad_to_match_res,\n err_to_higher_res=err_to_higher_res,\n average_on_downsample=average_on_downsample,\n truncate=truncate,\n return_true_resolution=return_true_resolution,\n **resample_kwargs\n )\n\n return change_resolution_to(**change_resolution_to_kwargs)",
"def get_scale(self, **kwargs):\n return kwargs['r_s']",
"def DownstreamBinaryResolution(self):\r\n\t\treturn self._get_attribute('downstreamBinaryResolution')",
"def thumb_scale(self):\n if getattr(self.data, \"no_thumbs\", False):\n # Individual setting overrides ...\n return None\n thsize = getattr(self.data, \"thumb_scale\", \"\")\n if thsize:\n return thsize\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ISiteSchema, prefix=\"plone\", check=False)\n thumb_scale_portlet = settings.thumb_scale_portlet\n return thumb_scale_portlet",
"def rtl_timescale(self):\n if not hasattr(self, '_rtl_timescale'):\n self._rtl_timescale = '1ps'\n return self._rtl_timescale",
"def sweep_resolution(self):\n raise NotImplementedError",
"def _simpl_resolution_to_maxpeaks(resolution, sr):\n return (sr / 2.) / resolution",
"def curve_resolution(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.resolution\", self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)",
"def db_get_ts_config():\n db_connection = iopro.connect(**db_config)\n db_cursor = db_connection.cursor()\n \n db_cursor.execute(\"select * from dbo.vTransactionStats\") # Application needs to know, minimally, first and last overall transaction dates\n result = db_cursor.fetchone()\n ts_config[\"minPurchaseDate\"] = result.minPurchaseDate\n ts_config[\"maxPurchaseDate\"] = result.maxPurchaseDate # Assumes the most recent PurchaseDate applies to all products, so zeros can be filled in appropriately for trending\n db_connection.close()\n del(db_cursor)\n del(db_connection)",
"def get_scale(self, **kwargs):",
"def image_down_res_dmap(resolution: str = \"256\", img_path: str = \"\"):\n global cropping, cropped, x_start, y_start, x_end, y_end\n cropping = False\n cropped = False\n x_start, y_start, x_end, y_end = 0, 0, 0, 0\n\n res_tuple = res[resolution]\n\n # checks if img_path was given, if not, prompts user for path\n if not img_path:\n img_path = get_image_path()\n\n data_path = img_path[:-4] + \".txt\"\n\n image = cv2.imread(img_path)\n coor = np.loadtxt(data_path)\n\n img_downscaled, rf_downscale = image_rescale(image, res_tuple)\n coor_downscaled = coor_rescale(coor, rf_downscale)\n\n # Visual check that coordinates are at correct locations\n draw_points_on_image(img_downscaled.copy(), coor_downscaled)\n\n save_path = img_path[:-8]\n img_path_save = save_path + resolution + \".JPG\"\n coor_path_save = save_path + resolution + \".txt\"\n dmap_path_save = save_path + resolution + \"_dmap.txt\"\n cv2.imwrite(img_path_save, img_downscaled)\n np.savetxt(coor_path_save, coor_downscaled)\n\n dmap = generate_density_map(img_downscaled, coor_downscaled)\n np.savetxt(dmap_path_save, dmap)\n return img_path, img_downscaled, coor_downscaled, dmap",
"def config_scale(self, cnf={}, **kwargs):\n self._scale.config(cnf, **kwargs)\n # Update self._variable limits in case the ones of the scale have changed\n self._variable.configure(high=self._scale['to'],\n low=self._scale['from'])\n if 'orient' in cnf or 'orient' in kwargs:\n self._grid_widgets()",
"def rescale_480band(bio_optical_config, abs_cff, k):\n\n # rescaling variables to BioSNICAR resolution (10nm)\n wvl_rescaled = bio_optical_config.wvl[5::10]\n abs_cff_rescaled = abs_cff[5::10]\n k_rescaled = k[5::10]\n n_rescaled = (\n bio_optical_config.n_algae * np.ones(np.size(bio_optical_config.wvl))\n )[5::10]\n\n return wvl_rescaled, abs_cff_rescaled, k_rescaled, n_rescaled",
"def scale_down(self, id, scale):\n func_db = db_api.get_function(id)\n params = scale.to_dict()\n if len(func_db.workers) <= 1:\n LOG.info('No need to scale down function %s', id)\n return\n\n LOG.info('Starting to scale down function %s, params: %s', id, params)\n\n self.engine_client.scaledown_function(id, count=params['count'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the SAM input file(s) (JSON/JSON5/YAML/TOML) and return as a dictionary.
|
def inputs(self):
if self._inputs is None:
self._inputs = {}
for key, config in self.items():
# key is ID (i.e. sam_param_0) that matches project points json
# fname is the actual SAM config file name (with path)
if isinstance(config, str):
if not os.path.exists(config):
raise IOError('SAM config file does not exist: "{}"'
.format(config))
else:
config = load_config(config)
if not isinstance(config, dict):
raise RuntimeError('SAM config must be a file or a '
'pre-extracted dictionary, but got: {}'
.format(config))
SAMInputsChecker.check(config)
self._inputs[key] = config
return self._inputs
|
[
"def _read_input_file(self):\n with open(self.input_path) as input_file:\n return json.load(input_file)",
"def input_files():\n input_patterns = {} # The input values and expected results\n\n # do traversal of input_files\n absolute_path_prefix = path.dirname(path.realpath(__file__)) # Find the test directory of filesystem\n for filename in listdir(path.join(absolute_path_prefix, \"..\", \"input_files\")):\n name, ext = path.splitext(filename) # reveal pattern name\n\n with open(path.join(absolute_path_prefix, \"..\", \"input_files\", filename), \"r\") as json_data:\n # add each file's pattern-dict to collected-dict\n input_patterns[name] = json.load(json_data)\n\n return input_patterns",
"def parse(input_file):\n # TODO: is json or xml more suitable for the input file format?\n parameters = dict()\n\n try:\n # open and parse the file\n pass\n except FileNotFoundError:\n print(\"Input file '%s' not found\" % input_file)\n sys.exit(-1)\n # add other exceptions\n except:\n print(\"Unexpected error!\")\n traceback.print_exc()\n sys.exit(-1)\n finally:\n # close the file and maybe clean up\n pass\n\n return parameters",
"def gather_files():\n return glob.glob(\"input/*.json\")",
"def get_input_files(description):\n log.info(\"fixing input files in description\")\n files = {}\n if description['inFiles'] and description['inFiles'] != \"NULL\":\n in_files = split(description[\"inFiles\"])\n l = len(in_files)\n ddm_endpoint = split(description.get(\"ddmEndPointIn\"), min_len=l)\n destination_se = split(description.get(\"destinationSE\"), min_len=l)\n dispatch_dblock = split(description.get(\"dispatchDblock\"), min_len=l)\n dispatch_dblock_token = split(description.get(\"dispatchDBlockToken\"), min_len=l)\n datasets = split(description.get(\"realDatasetsIn\"), min_len=l, fill_last=True)\n dblocks = split(description.get(\"prodDBlocks\"), min_len=l)\n dblock_tokens = split(description.get(\"prodDBlockToken\"), min_len=l)\n size = split(description.get(\"fsize\"), min_len=l)\n c_sum = split(description.get(\"checksum\"), min_len=l)\n scope = split(description.get(\"scopeIn\"), min_len=l, fill_last=True)\n guids = split(description.get(\"GUID\"), min_len=l, fill_last=True)\n\n for i, f in enumerate(in_files):\n if f is not None:\n files[f] = {\n \"ddm_endpoint\": ddm_endpoint[i],\n \"storage_element\": destination_se[i],\n \"dispatch_dblock\": dispatch_dblock[i],\n \"dispatch_dblock_token\": dispatch_dblock_token[i],\n \"dataset\": datasets[i],\n \"dblock\": dblocks[i],\n \"dblock_token\": dblock_tokens[i],\n \"size\": size[i],\n \"checksum\": c_sum[i],\n 'scope': scope[i],\n \"guid\": guids[i]\n }\n return files",
"def _read(self, file_path: str) -> Iterable[Dict[str, Any]]:\n pass",
"def load_all_sample_data() -> Dict[str, Dict]:\n sample_data = {}\n for file_path in glob(join(SAMPLE_DATA_DIR, '*.json')):\n name = splitext(basename(file_path))[0]\n sample_data[name] = json.load(open(file_path))\n return sample_data",
"def json_input():\n\n data = json.load(sys.stdin)\n global ID # pylint: disable=global-statement\n ID = data.get('files')[0][0] # (id, file)\n # json.dump({'Reason': 'OK', 'Code': 0, 'id': ID,\n # 'Listing': pdm_gfal_ls(str(data.get('files')[0][1]), **data.get('options', {}))},\n # sys.stdout)\n # sys.stdout.write('\\n')\n # sys.stdout.flush()\n obj = {'Reason': 'OK', 'Code': 0, 'id': ID,\n 'Listing': pdm_gfal_ls(str(data.get('files')[0][1]), **data.get('options', {}))}\n dump_and_flush(obj)",
"def make_seqdict(input_file, format='fasta'):\n if is_gzipped(input_file):\n try:\n seq_handle = gzip.open(input_file, 'rb')\n except IOError:\n print('Cannot open fasta file')\n sys.exit()\n else:\n try:\n seq_handle = open(input_file, 'r')\n except IOError:\n print('Cannot open fasta file')\n sys.exit()\n\n seq_dict = SeqIO.to_dict(SeqIO.parse(seq_handle, format))\n return seq_dict",
"def read_json_files():\n\n jsons = dict()\n with open('json_files/config.json') as file:\n data_conf = json.load(file)\n jsons['base_url'] = data_conf['base_url']\n jsons['implicit_wait'] = data_conf['implicit_wait']\n jsons['os'] = data_conf['os']\n jsons['is_headless'] = (data_conf['headless'] == 'True')\n\n with open('json_files/state.json') as file:\n data_states = json.load(file)\n jsons['list_states'] = data_states['states']\n\n with open('json_files/district.json') as file:\n jsons['dict_districts'] = json.load(file)\n\n with open('json_files/sub_district.json') as file:\n jsons['dict_sub_districts'] = json.load(file)\n\n with open('json_files/gram_panchayat.json') as file:\n jsons['dict_gram_panchayats'] = json.load(file)\n\n with open('json_files/village.json') as file:\n jsons['dict_villages'] = json.load(file)\n\n return jsons",
"def get_openapi(src_file: Optional[TextIO] = None) -> dict[str, Any]:\n if src_file is None:\n return get_openapi_dict()\n else:\n return json.load(src_file)",
"def get_input_contents(self):\n try:\n ret_files = []\n return ret_files\n except Exception as ex:\n self.logger.error(ex)\n self.logger.error(traceback.format_exc())\n raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))",
"def handle_seq_get():\n if SEQRoot is None:\n files = []\n else:\n files = util.listAllFiles(SEQRoot, \".txt\")\n\n return json.dumps(sorted(files))",
"def GetInputs(file_path):\r\n ajson = open(file_path,'r')\r\n input_json = json.load(ajson)\r\n start_url = input_json['start']\r\n end_url = input_json['end']\r\n start_title = GetTitleOfLink(start_url)\r\n end_title = GetTitleOfLink(end_url)\r\n ajson.close()\r\n return start_title,end_title",
"def inputFiles(self):\n pass",
"def read_json_lines_files(self) -> Dict[str, pd.DataFrame]:\n return dict(zip(\n self.file_names,\n (pd.DataFrame(self._read_json_lines_file(file)) for file in self.file_paths),\n ))",
"def extract_json_from_file_or_input(self):\n file_fs = self.extract_argument(self.FILE_ARGUMENT)\n input_json = self.extract_argument(self.INPUT_ARGUMENT)\n if input_json:\n try:\n data = json.loads(input_json)\n except json.JSONDecodeError:\n raise CLICommandFailedException(\n \"Invalid Json file\")\n elif file_fs:\n try:\n data = json.load(file_fs)\n except json.JSONDecodeError:\n raise CLICommandFailedException(\n \"Invalid Json file\")\n else:\n raise CLICommandFailedException(\n \"Please provide input json using \"\n \"-f/--file or -i/--input\")\n return data",
"def readin(self):\n \n if self.filename.endswith('.fits'):\n # Assumes Science Verification data\n self.read_SV_fits()\n elif self.filename.endswith('.npz'): \n # Assumes DES Y3 Gold data\n self.read_Y3_2_2_npz()\n else: \n print('Unrecognized file type: ' + self.filename)",
"def asDict(self):\r\n personalityAsDict = { \"personality\": [] }\r\n \r\n for file in self.files:\r\n personalityAsDict['personality'].append(file.asDict) \r\n return personalityAsDict"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run checks on a SAM input json config.
|
def check(cls, config):
c = cls(config)
c._run_checks()
|
[
"def test_parse_config(self):\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n json.dump({\n 'analyzer': ['--analyzers', 'clangsa'],\n 'parse': ['--trim-path-prefix', '/workspace']},\n config_f)\n\n out, returncode = self.__run_analyze(self.config_file_json)\n\n self.assertEqual(returncode, 0)\n self.assertIn(\"clangsa analyzed simple.cpp\", out)\n self.assertNotIn(\"clang-tidy analyzed simple.cpp\", out)\n\n out, returncode = self.__run_parse(self.config_file_json)\n print(out)\n self.assertEqual(returncode, 2)",
"def train(self, config: Dict[str, Any]) -> None:\n try:\n _ = self.validator_train(config)\n except JsonSchemaException as ex:\n raise PipelineConfigError(ex.message) from ex",
"def config_file_check(config_json, no_ORCID, no_GoogleScholar, no_Crossref, no_PubMed):\n \n schema = copy.deepcopy(tracker_schema.config_schema)\n if no_ORCID:\n del schema[\"properties\"][\"ORCID_search\"]\n schema[\"required\"].remove(\"ORCID_search\")\n if no_Crossref and no_GoogleScholar:\n del schema[\"properties\"][\"Crossref_search\"]\n schema[\"required\"].remove(\"Crossref_search\")\n if no_PubMed:\n del schema[\"properties\"][\"PubMed_search\"]\n schema[\"required\"].remove(\"PubMed_search\")\n \n pattern_messages = {\"ORCID\":\" is not a valid ORCID. It must match the regex \\d{4}-\\d{4}-\\d{4}-\\d{3}[0,1,2,3,4,5,6,7,8,9,X]\"}\n tracker_validate(instance=config_json, schema=schema, pattern_messages=pattern_messages, format_checker=jsonschema.FormatChecker())",
"def main(argv):\n if len(argv) < 2:\n \"print(<Usage: python skeleton_json_parser.py <path to json files>>, sys.stderr)\" \n sys.exit(1)\n # loops over all .json files in the argument\n for f in argv[1:]:\n if isJson(f):\n parseJson(f)\n print(f\"Success parsing {f}\")",
"def __check_config__(self, config):\n self.__check_failed__(\"Not implementation for __check_config__\")",
"def test_cmd_multiple_checker_config_resolution(self):\n\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n config_f.write(\"\")\n\n out, returncode = self.__run_analyze(self.config_file_json,\n [\"--checker-config\",\n \"clangsa:\"\n \"core.CallAndMessage:\"\n \"CXXDeallocationArg=true\",\n \"--checker-config\",\n \"clangsa:\"\n \"core.CallAndMessage:\"\n \"ParameterCount=true\",\n \"--verbose\", \"debug_analyzer\"])\n\n self.assertNotEqual(returncode, 1)\n self.assertIn(\"core.CallAndMessage:CXXDeallocationArg=true\", out)\n self.assertIn(\"core.CallAndMessage:ParameterCount=true\", out)",
"def _validate_input(self) -> None:\n\n if self.config[\"input\"][\"data_type\"] == \"sftp\":\n sftp_config_keys = [\n \"sftp_host\",\n \"sftp_username\",\n \"sftp_source_path\",\n \"sftp_private_key\",\n ]\n for key in sftp_config_keys:\n if key not in self.config[\"input\"][\"config\"]:\n raise ValueError(f\"Key not present in the config: {key}\")\n\n elif self.config[\"input\"][\"data_type\"] == \"local\":\n if \"source_path\" not in self.config[\"input\"][\"config\"]:\n raise ValueError(\"Key not present in the config: source_path\")\n else:\n # Check if local_directory is absolute path. If not, then set it.\n local_directory = self.config[\"input\"][\"config\"][\"source_path\"]\n if isinstance(local_directory, list):\n local_directory = [\n self._get_absolute_path(local_path)\n for local_path in local_directory]\n for local_path in local_directory:\n if not os.path.exists(local_path):\n raise ValueError(f\"Path does not exist: {local_path}\")\n else:\n local_directory = utils.get_absolute_path(local_directory)\n if not os.path.exists(local_directory):\n raise ValueError(f\"Path does not exist: {local_directory}\")\n self.config[\"input\"][\"local_directory\"] = local_directory\n\n # Raise error if data_format it not valid input formats\n if self.config[\"input\"][\"data_format\"] not in self.valid_input_formats:\n raise ValueError(\"Invalid value for key in input: data_format\")\n\n elif self.config[\"input\"][\"data_type\"] == \"mock\":\n self._update_mock_data()\n\n else:\n raise ValueError(\"Invalid value for the key: data_location\")",
"def test_check_config(self):\n # We assume that the source file path is at least 2 levels deep. This\n # is true, since the test workspace directory is under the repo root\n # and it also contains some sub-directories.\n split_path = self.source_file.split(os.sep)\n path_prefix = os.path.join(os.sep, *split_path[:3])\n trimmed_file_path = os.path.join(*split_path[3:])\n\n with open(self.config_file_json, 'w+',\n encoding=\"utf-8\", errors=\"ignore\") as config_f:\n json.dump({\n 'analyzer': ['--analyzers', 'clangsa'],\n 'parse': ['--trim-path-prefix', path_prefix]},\n config_f)\n\n check_cmd = [self._codechecker_cmd, \"check\",\n \"-l\", self.build_json,\n \"-o\", self.reports_dir,\n \"--config\", self.config_file_json]\n\n # Run analyze.\n process = subprocess.Popen(\n check_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n encoding=\"utf-8\",\n errors=\"ignore\")\n out, _ = process.communicate()\n\n print(out)\n self.assertEqual(process.returncode, 2)\n self.assertIn(\"clangsa analyzed simple.cpp\", out)\n self.assertNotIn(\"clang-tidy analyzed simple.cpp\", out)\n\n self.assertNotIn(self.source_file, out)\n\n self.assertIn(trimmed_file_path, out)",
"def check_config(config):\n parameters = [\n \"output_dir\",\n \"timestamp\",\n \"name\",\n \"bbox\",\n \"epsg\",\n \"cloud_coverage\",\n \"ndvi_year\",\n \"output_dir\",\n ]\n for par in parameters:\n assert par in config.keys(), f\"Parameter '{par}' missing in config file.\"",
"def main():\n yaml = YAML()\n for data in list(yaml.load_all(sys.stdin)):\n if data is not None:\n # policy 1: validate_required_for_container.\n required = ['name', 'image', 'resources']\n if not validate_required_for_container(data=data, c_req=required):\n # policy 1: failed.\n sys.exit(2)",
"def alm_validate_configurations(self):\n pass",
"def verifyConfiguration(self):",
"def test_check_configs(configs):\n # GIVEN a config file\n # WHEN checking the configs\n config.check_configs(configs)\n # THEN assert that the function exits without exceptions\n assert True",
"def test_parsing_ok(self):\n\n config_dict = {\n \"source\": {\n \"type\": \"mssql\",\n \"serviceName\": \"test_mssql\",\n \"serviceConnection\": {\n \"config\": {\n \"type\": \"Mssql\",\n \"database\": \"master\",\n \"username\": \"sa\",\n \"password\": \"MY%password\",\n \"hostPort\": \"random:1433\",\n }\n },\n \"sourceConfig\": {\"config\": {\"type\": \"DatabaseMetadata\"}},\n },\n \"sink\": {\"type\": \"metadata-rest\", \"config\": {}},\n \"workflowConfig\": {\n \"loggerLevel\": \"WARN\",\n \"openMetadataServerConfig\": {\n \"hostPort\": \"http://localhost:8585/api\",\n \"authProvider\": \"no-auth\",\n },\n },\n }\n\n self.assertIsNotNone(parse_workflow_config_gracefully(config_dict))",
"def _read_config(self, jsonconfig):\n \n space = jsonconfig['hyperparameters']['space']\n defaults = [(x['config']['name'], x['config']['default']) for x in space]\n values = []\n for x in space:\n if x['class_name'] == 'Boolean':\n values.append([True, False])\n elif x['class_name'] == 'Int':\n min_value = x['config']['min_value']\n max_value = x['config']['max_value']\n step = x['config']['step']\n v = [x for x in range(min_value, max_value+step, step)]\n values.append(v)\n else:\n values.append(x['config']['values'])\n \n conditions = [x['config']['conditions'] if 'conditions' in x['config'].keys() else [] for x in space]\n self._config = [(x[0], x[1], y, z) for x, y, z in zip(defaults, values, conditions)]",
"def main():\n module = AnsibleModule(\n argument_spec=dict(\n var1=dict(type='dict', required=True)\n ),\n supports_check_mode=True\n )\n\n dic_temp = module.params['var1']\n for i in dic_temp.keys():\n in_new = module.params['var1'][i]['in_new']\n in_old = module.params['var1'][i]['in_old']\n dic_temp[i]['in_old'] = in_new\n dic_temp[i]['in_new'] = in_old\n out_new = module.params['var1'][i]['out_new']\n out_old = module.params['var1'][i]['out_old']\n dic_temp[i]['out_old'] = out_new\n dic_temp[i]['out_new'] = out_old\n\n\n if module.check_mode:\n module.exit_json(changed=False)\n\n module.exit_json(meta=dic_temp)",
"def validate(self):\n\n # load schema and validate it via jsonschema\n schema_path = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), NetworkerRunner.SCHEMA_FILE)\n schema = json.load(open(schema_path))\n jsonschema.validate(self.config, schema)",
"def _do_validate_config(self, cfg, cfg_block):\n return True",
"def check_config( self ) :\n\n self._logger.info( 'exercising execution engine...' )\n\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print braille data visually. (We don't use it in the final solution)
|
def print_braille_row(data):
assert len(data) == NUM_COLS
text = ""
rows = ["", "", "", ""]
for byte in data:
byte = ord(byte)
rows[0] += "O" if byte & (1 << 0) else "."
rows[1] += "O" if byte & (1 << 1) else "."
rows[2] += "O" if byte & (1 << 2) else "."
rows[3] += "O" if byte & (1 << 6) else "."
rows[0] += "O" if byte & (1 << 3) else "."
rows[1] += "O" if byte & (1 << 4) else "."
rows[2] += "O" if byte & (1 << 5) else "."
rows[3] += "O" if byte & (1 << 7) else "."
rows[0] += " "
rows[1] += " "
rows[2] += " "
rows[3] += " "
# Print all the rows
print rows[0]
print rows[1]
print rows[2]
print rows[3]
print ""
|
[
"def printBeskjed():\n print(\"Hvilken kolonne er tallet ditt i? (v/m/h) \") #Printer ut en beskjed.",
"def panda(self):\n print\n print 32 * ' ' + \".;;.\"\n print 31 * ' ' + \"/;;;;\\ ___ .;;. \" + \\\n Fore.GREEN + \" |\\\\\" + Fore.RESET\n print 30 * ' ' + \"|;(;;;-\\\"\\\" `'-.,;;;;;\\\\ \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 31 * ' ' + \"\\;'\" + 12 * ' ' + \"';;;);/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 31 * ' ' + \"/\" + 16 * ' ' + \"\\;;' \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 30 * ' ' + \"/ .;. .;. \\\\ \" + \\\n Fore.GREEN + \" |X| ___\" + Fore.RESET\n print 30 * ' ' + \"| ;;o;; ;;o;; | \" + \\\n Fore.GREEN + \" +-+ /MMMMMA.\" + Fore.RESET\n print 30 * ' ' + \"; '\\\"-'` `'-\\\"' | \" + \\\n Fore.GREEN + \" |X| /____ \" + Fore.RESET\n print 30 * ' ' + \"/\\ ._. / \" + \\\n Fore.GREEN + \" |X| / `VMMMA.\" + Fore.RESET\n print 28 * ' ' + \";;;;;_ ,_Y_, _.' \" + \\\n Fore.GREEN + \" |X|/ \" + Fore.RESET\n print 27 * ' ' + \"/;;;;;\\`--.___.--;. \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 26 * ' ' + \"/|;;;;;;;.__.;;;. \\\\\\\\ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"; \\;;;;;;;;;;;;;;\\ ;\\__ .;. \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"| ';;;;;;;;=;;;;' |-__;;;;/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 25 * ' ' + \"| `\\\"\\\"` .---._ /;/;;\\;;/ \" + \\\n Fore.GREEN + \" +-+\" + Fore.RESET\n print 24 * ' ' + \"/ ; /;;;;;;;-;/;;/|;/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 24 * ' ' + \"\\_,\\ |;;;;;;;;;;;;| | \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 28 * ' ' + \"'-...--';;;;;;;;;;;;\\/ \" + \\\n Fore.GREEN + \" |X|\" + Fore.RESET\n print 37 * ' ' + \"`\\\"\\\"\\\"` `\\\"` \"\n print\n print 30 * ' ' + \"Thanks for flying\" \n print Fore.GREEN + 27 * ' ' + \"B O N A P A R T I C L E\" + Fore.RESET\n print Fore.YELLOW + 27 * ' ' + \"-----------------------\" + Fore.RESET\n print",
"def show_in_console(self):\n print(' '+'_' * self.height * 2)\n for i in range(self.width):\n if i == 0:\n print(' .'+''.join(self.laby[i]))\n else:\n print(' '+'|'+''.join(self.laby[i]))",
"def show(self):\n for c in self.cards:\n print(c)",
"def print_cards(self):\n print(self, '\\b:\\t', end='')\n print('Cards : {}\\n'.format(self._cards))",
"def output(blocked):\n print('''digraph blockers {\n layout=neato;\n overlap=false;\n sep=\"+1\";''')\n for issue, blocked_info in blocked.iteritems():\n special = not blocked_info['is-blocked-by']\n if special:\n print('{}'.format(node(issue, special)))\n for out_issue in blocked_info[\"blocks\"]:\n print('{}'.format(node(out_issue)))\n print('{} -> {};'.format(node(issue), node(out_issue)))\n# for in_issue in blocked_info[\"is-blocked-by\"]:\n# print '\"{}\" -> \"{}\";'.format(in_issue, issue)\n print('}')",
"def printballots(self):\n\n print len(self.ballots), 'ballots:'\n for ballot in self.ballots:\n for ls in ballot:\n if (len(ls) == 1):\n print ls[0],\n else:\n print '(' + '/'.join(ls) + ')',\n print\n print",
"def dump(self):\n print(\"ROB\".ljust(48, '=').rjust(80,'='))\n temp = []\n for i in range(len(self.q)):\n if self.head == i and self.tail == i:\n prefix = \"head/tail->\"\n elif self.head == i:\n prefix = \"head------>\"\n elif self.tail == i:\n prefix = \"tail------>\"\n else:\n prefix = \" \"\n temp.append(f\"{prefix} ROB{i} {self.q[i]}\")\n if 0 != (len(temp) % 2):\n temp.append(\"\".ljust(20,' '))\n\n for i in range(0, len(temp), 2):\n print(temp[i].ljust(40,' ') ,end='')\n print(temp[i+1].ljust(40,' '))\n print()",
"def print(self):\n for card in self.deck:\n print(card)",
"def PrintDNA(self):\n print(\"\\nDNA strings:\")\n for i in self.data:\n print(\"{}\".format(i))",
"def parse_braille_row(data):\n assert len(data) == NUM_COLS\n text = \"\"\n\n for byte in data:\n byte = ord(byte)\n brl_chr = 0\n brl_chr |= BRL_DOT1 if byte & (1 << 0) else 0\n brl_chr |= BRL_DOT2 if byte & (1 << 1) else 0\n brl_chr |= BRL_DOT3 if byte & (1 << 2) else 0\n brl_chr |= BRL_DOT4 if byte & (1 << 3) else 0\n brl_chr |= BRL_DOT5 if byte & (1 << 4) else 0\n brl_chr |= BRL_DOT6 if byte & (1 << 5) else 0\n brl_chr |= BRL_DOT7 if byte & (1 << 6) else 0\n brl_chr |= BRL_DOT8 if byte & (1 << 7) else 0\n\n try:\n text += chr(brl_encoding[brl_chr])\n except:\n text += \"?\"\n\n print text",
"def phits_print(self):\n\t\tx = \" \".join(str(i) for i in self.x)\n\t\ty = \" \".join(str(i) for i in self.y)\n\t\tz = \" \".join(str(i) for i in self.z)\n\t\ttxt = \\\n\t\t\tf\" {self.sn} {self.trn} \" + \\\n\t\t\tf\"{self.symbol} {x} {y} {z}\" + \\\n\t\t\tf\" $ name: '{self.name}' \" + \\\n\t\t\t\"(Rectangular solid) [x_min x_max] [y_min y_max] [z_min z_max]\"\n\n\t\tif self.trn != \"\":\n\t\t\ttxt += f\" with tr{self.trn}\"\n\t\treturn txt",
"def show(self):\n\n #finds every element and stores it in order\n elements = [[0 for i in range(self.n)] for j in range(self.n)]\n for i in range(self.n * self.n):\n elements[self.array[0,i]][self.array[1,i]] = self.array[2,i]\n\n #prints the table\n for i in range(self.n):\n line = \"\"\n for j in range(self.n):\n line += str(elements[i][j])\n if j != self.n - 1:\n line += \"|\"\n print(line)\n print()",
"def showdata(self):\n print self.data",
"def print(self):\n print('(', end='')\n self.printBST()\n print(')', end=' ')",
"def print_data(x,y,results,A,ibrav,ylabel=\"E\"):\n if ibrav in (1,2,3): # cubic systems (a,a,a)\n print (\"a or V\",\"\\t\\t\\t\",ylabel,\"\\t\\t\\t\",ylabel+\"fit\",\"\\t\\t\\t\",ylabel+\"-\"+ylabel+\"fit\")\n for i in range(0,len(y)):\n s=sum(results[0]*A[i])\n print (\"{:.10e}\".format(x[i,0]),\"\\t\", \n \"{:.10e}\".format(y[i]),\"\\t\", \"{:.10e}\".format(s),\"\\t\", \"{:.10e}\".format(y[i]-s))\n elif ibrav in (4,6,7): # hexagonal or tetragonal systems (a,a,c)\n print (\"a\",\"\\t\\t\\t\",\"c\",\"\\t\\t\\t\",ylabel,\"\\t\\t\\t\",ylabel+\"fit\",\"\\t\\t\\t\",ylabel+\"-\"+ylabel+\"fit\")\n for i in range(0,len(y)):\n s=sum(results[0]*A[i])\n print (\"{:.10e}\".format(x[i,0]),\"\\t\", \"{:.10e}\".format(x[i,2]),\"\\t\", \n \"{:.10e}\".format(y[i]),\"\\t\", \"{:.10e}\".format(s),\"\\t\", \"{:.10e}\".format(y[i]-s))\n elif ibrav in (8,9,10,11): # orthorombic systems (a,b,c)\n print (\"a\",\"\\t\\t\\t\",\"b\",\"\\t\\t\\t\",\"c\",\"\\t\\t\\t\",ylabel,\"\\t\\t\\t\",ylabel+\"fit\",\"\\t\\t\\t\",ylabel+\"-\"+ylabel+\"fit\")\n for i in range(0,len(y)):\n s=sum(results[0]*A[i])\n print (\"{:.10e}\".format(x[i,0]),\"\\t\", \"{:.10e}\".format(x[i,1]),\"\\t\", \"{:.10e}\".format(x[i,2]),\"\\t\", \n \"{:.10e}\".format(y[i]),\"\\t\", \"{:.10e}\".format(s),\"\\t\", \"{:.10e}\".format(y[i]-s))\n else:\n print (\"ibrav not implememnted yet\")",
"def print_board():\n \n print \"\"\n print \" | | \"\n print \" \" + grid_status[(1,1)] + \" | \" + grid_status[(1,2)] + \" | \" + grid_status[(1,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(2,1)] + \" | \" + grid_status[(2,2)] + \" | \" + grid_status[(2,3)]\n print \"___|___|___\"\n print \" | | \"\n print \" \" + grid_status[(3,1)] + \" | \" + grid_status[(3,2)] + \" | \" + grid_status[(3,3)]\n print \" | | \"\n print \"\"",
"def print_melon(melon_data):\n\n for melon, info in melon_data.items():\n print \"\"\"%s\n seedless: %s\n price: %d\n flesh_color: %s\n weight: %s\n rind_color: %s\"\"\" % (melon, info[1],\n info[0], info[2],\n info[4], info[3])",
"def print_ballot(self, b, count=3):\n print \"Ballot worth %f\"%(b.get_value())\n num = 0\n for cand in b.list:\n print \"%s\"%(cand._name)\n num+=1\n if num==count:\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We get a braille data, and parse it as english. We use the 'en_nabcc' encoding from the BRLTTY project.
|
def parse_braille_row(data):
assert len(data) == NUM_COLS
text = ""
for byte in data:
byte = ord(byte)
brl_chr = 0
brl_chr |= BRL_DOT1 if byte & (1 << 0) else 0
brl_chr |= BRL_DOT2 if byte & (1 << 1) else 0
brl_chr |= BRL_DOT3 if byte & (1 << 2) else 0
brl_chr |= BRL_DOT4 if byte & (1 << 3) else 0
brl_chr |= BRL_DOT5 if byte & (1 << 4) else 0
brl_chr |= BRL_DOT6 if byte & (1 << 5) else 0
brl_chr |= BRL_DOT7 if byte & (1 << 6) else 0
brl_chr |= BRL_DOT8 if byte & (1 << 7) else 0
try:
text += chr(brl_encoding[brl_chr])
except:
text += "?"
print text
|
[
"def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char in 'àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ':\n self.trans[char] = 'a'\n for char in 'ȁǟ':\n self.trans[char] = 'ä'\n self.trans['ǻ'] = 'å'\n self.trans['ä'] = 'ae'\n self.trans['å'] = 'aa'\n for char in 'ḂḄḆƁƂ':\n self.trans[char] = 'B'\n for char in 'ḃḅḇƀɓƃ':\n self.trans[char] = 'b'\n for char in 'ĆĈĊÇČƇ':\n self.trans[char] = 'C'\n for char in 'ćĉċçčƈȼ':\n self.trans[char] = 'c'\n self.trans['Ḉ'] = 'Ç'\n self.trans['ḉ'] = 'ç'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ĎḊḌḎḐḒĐƉƊƋ':\n self.trans[char] = 'D'\n for char in 'ďḋḍḏḑḓđɖɗƌ':\n self.trans[char] = 'd'\n for char in 'ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ':\n self.trans[char] = 'E'\n for char in 'ỀẾỄỆỂ':\n self.trans[char] = 'Ê'\n for char in 'èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ':\n self.trans[char] = 'e'\n for char in 'ềếễệể':\n self.trans[char] = 'ê'\n for char in 'ḞƑ':\n self.trans[char] = 'F'\n for char in 'ḟƒ':\n self.trans[char] = 'f'\n for char in 'ǴḠĞĠĢǦǤƓ':\n self.trans[char] = 'G'\n for char in 'ǵḡğġģǧǥɠ':\n self.trans[char] = 'g'\n self.trans['Ĝ'] = 'Gx'\n self.trans['ĝ'] = 'gx'\n for char in 'ḢḤḦȞḨḪH̱ĦǶ':\n self.trans[char] = 'H'\n for char in 'ḣḥḧȟḩḫ̱ẖħƕ':\n self.trans[char] = 'h'\n for char in 'IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ':\n self.trans[char] = 'I'\n for char in 'ıìȉíîĩḭïḯīĭȋįǐiịỉɨ':\n self.trans[char] = 'i'\n for char in 'ĴJ':\n self.trans[char] = 'J'\n for char in 'ɟĵ̌ǰ':\n self.trans[char] = 'j'\n for char in 'ḰǨĶḲḴƘ':\n self.trans[char] = 'K'\n for char in 'ḱǩķḳḵƙ':\n self.trans[char] = 'k'\n for char in 'ĹĻĽḶḸḺḼȽŁ':\n self.trans[char] = 'L'\n for char in 'ĺļľḷḹḻḽƚłɫ':\n self.trans[char] = 'l'\n for char in 'ḾṀṂ':\n self.trans[char] = 'M'\n for char in 'ḿṁṃɱ':\n self.trans[char] = 'm'\n for char in 'ǸŃÑŅŇṄṆṈṊŊƝɲȠ':\n self.trans[char] = 'N'\n for char in 'ǹńñņňṅṇṉṋŋɲƞ':\n self.trans[char] = 'n'\n for char in 'ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ':\n self.trans[char] = 'O'\n for char in 'òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ':\n self.trans[char] = 'o'\n for char in 'ȌŐȪ':\n self.trans[char] = 'Ö'\n for char in 'ȍőȫ':\n self.trans[char] = 'ö'\n for char in 'ỒỐỖỘỔȎ':\n self.trans[char] = 'Ô'\n for char in 'ồốỗộổȏ':\n self.trans[char] = 'ô'\n for char in 'ṔṖƤ':\n self.trans[char] = 'P'\n for char in 'ṕṗƥ':\n self.trans[char] = 'p'\n self.trans['ᵽ'] = 'q'\n for char in 'ȐŔŖŘȒṘṚṜṞ':\n self.trans[char] = 'R'\n for char in 'ȑŕŗřȓṙṛṝṟɽ':\n self.trans[char] = 'r'\n for char in 'ŚṤŞȘŠṦṠṢṨ':\n self.trans[char] = 'S'\n for char in 'śṥşșšṧṡṣṩȿ':\n self.trans[char] = 's'\n self.trans['Ŝ'] = 'Sx'\n self.trans['ŝ'] = 'sx'\n for char in 'ŢȚŤṪṬṮṰŦƬƮ':\n self.trans[char] = 'T'\n for char in 'ţțťṫṭṯṱŧȾƭʈ':\n self.trans[char] = 't'\n for char in 'ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ':\n self.trans[char] = 'U'\n for char in 'ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ':\n self.trans[char] = 'u'\n for char in 'ȔŰǛǗǕǙ':\n self.trans[char] = 'Ü'\n for char in 'ȕűǜǘǖǚ':\n self.trans[char] = 'ü'\n self.trans['Û'] = 'Ux'\n self.trans['û'] = 'ux'\n self.trans['Ȗ'] = 'Û'\n self.trans['ȗ'] = 'û'\n self.trans['Ừ'] = 'Ù'\n self.trans['ừ'] = 'ù'\n self.trans['Ứ'] = 'Ú'\n self.trans['ứ'] = 'ú'\n for char in 'ṼṾ':\n self.trans[char] = 'V'\n for char in 'ṽṿ':\n self.trans[char] = 'v'\n for char in 'ẀẂŴẄẆẈ':\n self.trans[char] = 'W'\n for char in 'ẁẃŵẅẇẉ':\n self.trans[char] = 'w'\n for char in 'ẊẌ':\n self.trans[char] = 'X'\n for char in 'ẋẍ':\n self.trans[char] = 'x'\n for char in 'ỲÝŶŸỸȲẎỴỶƳ':\n self.trans[char] = 'Y'\n for char in 'ỳýŷÿỹȳẏỵỷƴ':\n self.trans[char] = 'y'\n for char in 'ŹẐŻẒŽẔƵȤ':\n self.trans[char] = 'Z'\n for char in 'źẑżẓžẕƶȥ':\n self.trans[char] = 'z'\n self.trans['ɀ'] = 'zv'\n\n # Latin: extended Latin alphabet\n self.trans['ɑ'] = 'a'\n for char in 'ÆǼǢ':\n self.trans[char] = 'AE'\n for char in 'æǽǣ':\n self.trans[char] = 'ae'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ƎƏƐ':\n self.trans[char] = 'E'\n for char in 'ǝəɛ':\n self.trans[char] = 'e'\n for char in 'ƔƢ':\n self.trans[char] = 'G'\n for char in 'ᵷɣƣᵹ':\n self.trans[char] = 'g'\n self.trans['Ƅ'] = 'H'\n self.trans['ƅ'] = 'h'\n self.trans['Ƕ'] = 'Wh'\n self.trans['ƕ'] = 'wh'\n self.trans['Ɩ'] = 'I'\n self.trans['ɩ'] = 'i'\n self.trans['Ŋ'] = 'Ng'\n self.trans['ŋ'] = 'ng'\n self.trans['Œ'] = 'OE'\n self.trans['œ'] = 'oe'\n self.trans['Ɔ'] = 'O'\n self.trans['ɔ'] = 'o'\n self.trans['Ȣ'] = 'Ou'\n self.trans['ȣ'] = 'ou'\n self.trans['Ƽ'] = 'Q'\n for char in 'ĸƽ':\n self.trans[char] = 'q'\n self.trans['ȹ'] = 'qp'\n self.trans[''] = 'r'\n self.trans['ſ'] = 's'\n self.trans['ß'] = 'ss'\n self.trans['Ʃ'] = 'Sh'\n for char in 'ʃᶋ':\n self.trans[char] = 'sh'\n self.trans['Ʉ'] = 'U'\n self.trans['ʉ'] = 'u'\n self.trans['Ʌ'] = 'V'\n self.trans['ʌ'] = 'v'\n for char in 'ƜǷ':\n self.trans[char] = 'W'\n for char in 'ɯƿ':\n self.trans[char] = 'w'\n self.trans['Ȝ'] = 'Y'\n self.trans['ȝ'] = 'y'\n self.trans['IJ'] = 'IJ'\n self.trans['ij'] = 'ij'\n self.trans['Ƨ'] = 'Z'\n for char in 'ʮƨ':\n self.trans[char] = 'z'\n self.trans['Ʒ'] = 'Zh'\n self.trans['ʒ'] = 'zh'\n self.trans['Ǯ'] = 'Dzh'\n self.trans['ǯ'] = 'dzh'\n for char in 'ƸƹʔˀɁɂ':\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in 'Cʗǃ':\n self.trans[char] = '!'\n\n # Punctuation and typography\n for char in '«»“”„¨':\n self.trans[char] = u'\"'\n for char in '‘’′':\n self.trans[char] = u\"'\"\n self.trans['•'] = '*'\n self.trans['@'] = '(at)'\n self.trans['¤'] = '$'\n self.trans['¢'] = 'c'\n self.trans['€'] = 'E'\n self.trans['£'] = 'L'\n self.trans['¥'] = 'yen'\n self.trans['†'] = '+'\n self.trans['‡'] = '++'\n self.trans['°'] = ':'\n self.trans['¡'] = '!'\n self.trans['¿'] = '?'\n self.trans['‰'] = 'o/oo'\n self.trans['‱'] = 'o/ooo'\n for char in '¶§':\n self.trans[char] = '>'\n self.trans['…'] = '...'\n for char in '‒–—―':\n self.trans[char] = '-'\n self.trans['·'] = ' '\n self.trans['¦'] = '|'\n self.trans['⁂'] = '***'\n self.trans['◊'] = '<>'\n self.trans['‽'] = '?!'\n self.trans['؟'] = ';-)'\n self.trans['¹'] = '1'\n self.trans['²'] = '2'\n self.trans['³'] = '3'\n\n # Cyrillic\n self.trans.update({'А': 'A', 'а': 'a', 'Б': 'B', 'б': 'b',\n 'В': 'V', 'в': 'v', 'Г': 'G', 'г': 'g',\n 'Д': 'D', 'д': 'd', 'Е': 'E', 'е': 'e',\n 'Ж': 'Zh', 'ж': 'zh', 'З': 'Z', 'з': 'z',\n 'И': 'I', 'и': 'i', 'Й': 'J', 'й': 'j',\n 'К': 'K', 'к': 'k', 'Л': 'L', 'л': 'l',\n 'М': 'M', 'м': 'm', 'Н': 'N', 'н': 'n',\n 'О': 'O', 'о': 'o', 'П': 'P', 'п': 'p',\n 'Р': 'R', 'р': 'r', 'С': 'S', 'с': 's',\n 'Т': 'T', 'т': 't', 'У': 'U', 'у': 'u',\n 'Ф': 'F', 'ф': 'f', 'х': 'kh', 'Ц': 'C',\n 'ц': 'c', 'Ч': 'Ch', 'ч': 'ch', 'Ш': 'Sh',\n 'ш': 'sh', 'Щ': 'Shch', 'щ': 'shch', 'Ь': \"'\",\n 'ь': \"'\", 'Ъ': '\"', 'ъ': '\"', 'Ю': 'Yu',\n 'ю': 'yu', 'Я': 'Ya', 'я': 'ya', 'Х': 'Kh',\n 'Χ': 'Kh'})\n\n # Additional Cyrillic letters, most occuring in only a few languages\n self.trans.update({\n 'Ы': 'Y', 'ы': 'y', 'Ё': 'Ë', 'ё': 'ë',\n 'Э': 'È', 'Ѐ': 'È', 'э': 'è', 'ѐ': 'è',\n 'І': 'I', 'і': 'i', 'Ї': 'Ji', 'ї': 'ji',\n 'Є': 'Je', 'є': 'je', 'Ґ': 'G', 'Ҝ': 'G',\n 'ґ': 'g', 'ҝ': 'g', 'Ђ': 'Dj', 'ђ': 'dj',\n 'Љ': 'Lj', 'љ': 'lj',\n 'Њ': 'Nj', 'њ': 'nj', 'Ћ': 'Cj', 'ћ': 'cj',\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n 'Ќ': 'Kj', 'ќ': 'kj', 'Ӣ': 'Ii', 'ӣ': 'ii',\n 'Ҳ': 'H', 'ҳ': 'h',\n 'Ҷ': 'Dz', 'ҷ': 'dz', 'Ө': 'Ô', 'Ӫ': 'Ô',\n 'ө': 'ô', 'ӫ': 'ô', 'Ү': 'Y', 'ү': 'y', 'Һ': 'H',\n 'һ': 'h', 'Ә': 'AE', 'Ӕ': 'AE', 'ә': 'ae',\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n 'ѝ': 'ì', 'Ѝ': 'Ì', 'Ӑ': 'A', 'ă': 'a', 'Ӓ': 'Ä',\n 'Ҽ': 'Ts', 'Ҿ': 'Ts', 'ҽ': 'ts', 'ҿ': 'ts',\n 'Ҙ': 'Dh', 'ҙ': 'dh', 'Ӏ': '', 'ӏ': '', 'Ӆ': 'L',\n 'ӆ': 'l', 'Ӎ': 'M', 'ӎ': 'm', 'Ӧ': 'Ö', 'ӧ': 'ö',\n 'Ҩ': 'u', 'ҩ': 'u', 'Ҧ': 'Ph', 'ҧ': 'ph', 'Ҏ': 'R',\n 'ҏ': 'r', 'Ҫ': 'Th', 'ҫ': 'th', 'Ҭ': 'T', 'ҭ': 't',\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n 'ӹ': 'u', 'Ҵ': 'Tts', 'ҵ': 'tts', 'Ӵ': 'Ch', 'ӵ': 'ch'})\n\n for char in 'ЈӤҊ':\n self.trans[char] = 'J'\n for char in 'јӥҋ':\n self.trans[char] = 'j'\n for char in 'ЏӁӜҶ':\n self.trans[char] = 'Dzh'\n for char in 'џӂӝҷ':\n self.trans[char] = 'dzh'\n for char in 'ЅӞӠӋҸ':\n self.trans[char] = 'Dz'\n for char in 'ѕӟӡӌҹ':\n self.trans[char] = 'dz'\n for char in 'ҒӶҔ':\n self.trans[char] = 'G'\n for char in 'ғӷҕ':\n self.trans[char] = 'g'\n for char in 'ҚҞҠӃ':\n self.trans[char] = 'Q'\n for char in 'қҟҡӄ':\n self.trans[char] = 'q'\n for char in 'ҢҤӉӇ':\n self.trans[char] = 'Ng'\n for char in 'ңҥӊӈ':\n self.trans[char] = 'ng'\n for char in 'ӖѢҌ':\n self.trans[char] = 'E'\n for char in 'ӗѣҍ':\n self.trans[char] = 'e'\n for char in 'ӲӰҮ':\n self.trans[char] = 'Ü'\n for char in 'ӳӱү':\n self.trans[char] = 'ü'\n\n # Archaic Cyrillic letters\n self.trans.update({\n 'Ѹ': 'Ou', 'ѹ': 'ou', 'Ѡ': 'O', 'Ѻ': 'O', 'ѡ': 'o',\n 'ѻ': 'o', 'Ѿ': 'Ot', 'ѿ': 'ot', 'Ѣ': 'E', 'ѣ': 'e',\n 'Ѥ': 'Ei', 'Ѧ': 'Ei', 'ѥ': 'ei', 'ѧ': 'ei', 'Ѫ': 'Ai',\n 'ѫ': 'ai', 'Ѯ': 'X', 'ѯ': 'x', 'Ѱ': 'Ps', 'ѱ': 'ps',\n 'Ѳ': 'Th', 'ѳ': 'th', 'Ѵ': 'Ü', 'Ѷ': 'Ü', 'ѵ': 'ü'})\n\n # Hebrew alphabet\n for char in 'אע':\n self.trans[char] = u\"'\"\n self.trans['ב'] = 'b'\n self.trans['ג'] = 'g'\n self.trans['ד'] = 'd'\n self.trans['ה'] = 'h'\n self.trans['ו'] = 'v'\n self.trans['ז'] = 'z'\n self.trans['ח'] = 'kh'\n self.trans['ט'] = 't'\n self.trans['י'] = 'y'\n for char in 'ךכ':\n self.trans[char] = 'k'\n self.trans['ל'] = 'l'\n for char in 'םמ':\n self.trans[char] = 'm'\n for char in 'ןנ':\n self.trans[char] = 'n'\n self.trans['ס'] = 's'\n for char in 'ףפ':\n self.trans[char] = 'ph'\n for char in 'ץצ':\n self.trans[char] = 'ts'\n self.trans['ק'] = 'q'\n self.trans['ר'] = 'r'\n self.trans['ש'] = 'sh'\n self.trans['ת'] = 'th'\n\n # Arab alphabet\n for char in 'اﺍﺎ':\n self.trans[char] = 'a'\n for char in 'بﺏﺐﺒﺑ':\n self.trans[char] = 'b'\n for char in 'تﺕﺖﺘﺗ':\n self.trans[char] = 't'\n for char in 'ثﺙﺚﺜﺛ':\n self.trans[char] = 'th'\n for char in 'جﺝﺞﺠﺟ':\n self.trans[char] = 'g'\n for char in 'حﺡﺢﺤﺣ':\n self.trans[char] = 'h'\n for char in 'خﺥﺦﺨﺧ':\n self.trans[char] = 'kh'\n for char in 'دﺩﺪ':\n self.trans[char] = 'd'\n for char in 'ذﺫﺬ':\n self.trans[char] = 'dh'\n for char in 'رﺭﺮ':\n self.trans[char] = 'r'\n for char in 'زﺯﺰ':\n self.trans[char] = 'z'\n for char in 'سﺱﺲﺴﺳ':\n self.trans[char] = 's'\n for char in 'شﺵﺶﺸﺷ':\n self.trans[char] = 'sh'\n for char in 'صﺹﺺﺼﺻ':\n self.trans[char] = 's'\n for char in 'ضﺽﺾﻀﺿ':\n self.trans[char] = 'd'\n for char in 'طﻁﻂﻄﻃ':\n self.trans[char] = 't'\n for char in 'ظﻅﻆﻈﻇ':\n self.trans[char] = 'z'\n for char in 'عﻉﻊﻌﻋ':\n self.trans[char] = u\"'\"\n for char in 'غﻍﻎﻐﻏ':\n self.trans[char] = 'gh'\n for char in 'فﻑﻒﻔﻓ':\n self.trans[char] = 'f'\n for char in 'قﻕﻖﻘﻗ':\n self.trans[char] = 'q'\n for char in 'كﻙﻚﻜﻛک':\n self.trans[char] = 'k'\n for char in 'لﻝﻞﻠﻟ':\n self.trans[char] = 'l'\n for char in 'مﻡﻢﻤﻣ':\n self.trans[char] = 'm'\n for char in 'نﻥﻦﻨﻧ':\n self.trans[char] = 'n'\n for char in 'هﻩﻪﻬﻫ':\n self.trans[char] = 'h'\n for char in 'وﻭﻮ':\n self.trans[char] = 'w'\n for char in 'یيﻱﻲﻴﻳ':\n self.trans[char] = 'y'\n # Arabic - additional letters, modified letters and ligatures\n self.trans['ﺀ'] = \"'\"\n for char in 'آﺁﺂ':\n self.trans[char] = u\"'a\"\n for char in 'ةﺓﺔ':\n self.trans[char] = 'th'\n for char in 'ىﻯﻰ':\n self.trans[char] = 'á'\n for char in 'یﯼﯽﯿﯾ':\n self.trans[char] = 'y'\n self.trans['؟'] = '?'\n # Arabic - ligatures\n for char in 'ﻻﻼ':\n self.trans[char] = 'la'\n self.trans['ﷲ'] = 'llah'\n for char in 'إأ':\n self.trans[char] = u\"a'\"\n self.trans['ؤ'] = \"w'\"\n self.trans['ئ'] = \"y'\"\n for char in '◌◌':\n self.trans[char] = \"\" # indicates absence of vowels\n # Arabic vowels\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'i'\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'iy'\n # Arab numerals\n for char in '٠۰':\n self.trans[char] = '0'\n for char in '١۱':\n self.trans[char] = '1'\n for char in '٢۲':\n self.trans[char] = '2'\n for char in '٣۳':\n self.trans[char] = '3'\n for char in '٤۴':\n self.trans[char] = '4'\n for char in '٥۵':\n self.trans[char] = '5'\n for char in '٦۶':\n self.trans[char] = '6'\n for char in '٧۷':\n self.trans[char] = '7'\n for char in '٨۸':\n self.trans[char] = '8'\n for char in '٩۹':\n self.trans[char] = '9'\n # Perso-Arabic\n for char in 'پﭙﭙپ':\n self.trans[char] = 'p'\n for char in 'چچچچ':\n self.trans[char] = 'ch'\n for char in 'ژژ':\n self.trans[char] = 'zh'\n for char in 'گﮔﮕﮓ':\n self.trans[char] = 'g'\n\n # Greek\n self.trans.update({\n 'Α': 'A', 'α': 'a', 'Β': 'B', 'β': 'b', 'Γ': 'G',\n 'γ': 'g', 'Δ': 'D', 'δ': 'd', 'Ε': 'E', 'ε': 'e',\n 'Ζ': 'Z', 'ζ': 'z', 'Η': 'I', 'η': 'i', 'θ': 'th',\n 'Θ': 'Th', 'Ι': 'I', 'ι': 'i', 'Κ': 'K', 'κ': 'k',\n 'Λ': 'L', 'λ': 'l', 'Μ': 'M', 'μ': 'm', 'Ν': 'N',\n 'ν': 'n', 'Ξ': 'X', 'ξ': 'x', 'Ο': 'O', 'ο': 'o',\n 'Π': 'P', 'π': 'p', 'Ρ': 'R', 'ρ': 'r', 'Σ': 'S',\n 'σ': 's', 'ς': 's', 'Τ': 'T', 'τ': 't', 'Υ': 'Y',\n 'υ': 'y', 'Φ': 'F', 'φ': 'f', 'Ψ': 'Ps', 'ψ': 'ps',\n 'Ω': 'O', 'ω': 'o', 'ϗ': '&', 'Ϛ': 'St', 'ϛ': 'st',\n 'Ϙ': 'Q', 'Ϟ': 'Q', 'ϙ': 'q', 'ϟ': 'q', 'Ϻ': 'S',\n 'ϻ': 's', 'Ϡ': 'Ss', 'ϡ': 'ss', 'Ϸ': 'Sh', 'ϸ': 'sh',\n '·': ':', 'Ά': 'Á', 'ά': 'á', 'Έ': 'É', 'Ή': 'É',\n 'έ': 'é', 'ή': 'é', 'Ί': 'Í', 'ί': 'í', 'Ϊ': 'Ï',\n 'ϊ': 'ï', 'ΐ': 'ï', 'Ό': 'Ó', 'ό': 'ó', 'Ύ': 'Ý',\n 'ύ': 'ý', 'Ϋ': 'Y', 'ϋ': 'ÿ', 'ΰ': 'ÿ', 'Ώ': 'Ó',\n 'ώ': 'ó'})\n\n # Japanese (katakana and hiragana)\n for char in 'アァあ':\n self.trans[char] = 'a'\n for char in 'イィい':\n self.trans[char] = 'i'\n for char in 'ウう':\n self.trans[char] = 'u'\n for char in 'エェえ':\n self.trans[char] = 'e'\n for char in 'オォお':\n self.trans[char] = 'o'\n for char in 'ャや':\n self.trans[char] = 'ya'\n for char in 'ュゆ':\n self.trans[char] = 'yu'\n for char in 'ョよ':\n self.trans[char] = 'yo'\n for char in 'カか':\n self.trans[char] = 'ka'\n for char in 'キき':\n self.trans[char] = 'ki'\n for char in 'クく':\n self.trans[char] = 'ku'\n for char in 'ケけ':\n self.trans[char] = 'ke'\n for char in 'コこ':\n self.trans[char] = 'ko'\n for char in 'サさ':\n self.trans[char] = 'sa'\n for char in 'シし':\n self.trans[char] = 'shi'\n for char in 'スす':\n self.trans[char] = 'su'\n for char in 'セせ':\n self.trans[char] = 'se'\n for char in 'ソそ':\n self.trans[char] = 'so'\n for char in 'タた':\n self.trans[char] = 'ta'\n for char in 'チち':\n self.trans[char] = 'chi'\n for char in 'ツつ':\n self.trans[char] = 'tsu'\n for char in 'テて':\n self.trans[char] = 'te'\n for char in 'トと':\n self.trans[char] = 'to'\n for char in 'ナな':\n self.trans[char] = 'na'\n for char in 'ニに':\n self.trans[char] = 'ni'\n for char in 'ヌぬ':\n self.trans[char] = 'nu'\n for char in 'ネね':\n self.trans[char] = 'ne'\n for char in 'ノの':\n self.trans[char] = 'no'\n for char in 'ハは':\n self.trans[char] = 'ha'\n for char in 'ヒひ':\n self.trans[char] = 'hi'\n for char in 'フふ':\n self.trans[char] = 'fu'\n for char in 'ヘへ':\n self.trans[char] = 'he'\n for char in 'ホほ':\n self.trans[char] = 'ho'\n for char in 'マま':\n self.trans[char] = 'ma'\n for char in 'ミみ':\n self.trans[char] = 'mi'\n for char in 'ムむ':\n self.trans[char] = 'mu'\n for char in 'メめ':\n self.trans[char] = 'me'\n for char in 'モも':\n self.trans[char] = 'mo'\n for char in 'ラら':\n self.trans[char] = 'ra'\n for char in 'リり':\n self.trans[char] = 'ri'\n for char in 'ルる':\n self.trans[char] = 'ru'\n for char in 'レれ':\n self.trans[char] = 're'\n for char in 'ロろ':\n self.trans[char] = 'ro'\n for char in 'ワわ':\n self.trans[char] = 'wa'\n for char in 'ヰゐ':\n self.trans[char] = 'wi'\n for char in 'ヱゑ':\n self.trans[char] = 'we'\n for char in 'ヲを':\n self.trans[char] = 'wo'\n for char in 'ンん':\n self.trans[char] = 'n'\n for char in 'ガが':\n self.trans[char] = 'ga'\n for char in 'ギぎ':\n self.trans[char] = 'gi'\n for char in 'グぐ':\n self.trans[char] = 'gu'\n for char in 'ゲげ':\n self.trans[char] = 'ge'\n for char in 'ゴご':\n self.trans[char] = 'go'\n for char in 'ザざ':\n self.trans[char] = 'za'\n for char in 'ジじ':\n self.trans[char] = 'ji'\n for char in 'ズず':\n self.trans[char] = 'zu'\n for char in 'ゼぜ':\n self.trans[char] = 'ze'\n for char in 'ゾぞ':\n self.trans[char] = 'zo'\n for char in 'ダだ':\n self.trans[char] = 'da'\n for char in 'ヂぢ':\n self.trans[char] = 'dji'\n for char in 'ヅづ':\n self.trans[char] = 'dzu'\n for char in 'デで':\n self.trans[char] = 'de'\n for char in 'ドど':\n self.trans[char] = 'do'\n for char in 'バば':\n self.trans[char] = 'ba'\n for char in 'ビび':\n self.trans[char] = 'bi'\n for char in 'ブぶ':\n self.trans[char] = 'bu'\n for char in 'ベべ':\n self.trans[char] = 'be'\n for char in 'ボぼ':\n self.trans[char] = 'bo'\n for char in 'パぱ':\n self.trans[char] = 'pa'\n for char in 'ピぴ':\n self.trans[char] = 'pi'\n for char in 'プぷ':\n self.trans[char] = 'pu'\n for char in 'ペぺ':\n self.trans[char] = 'pe'\n for char in 'ポぽ':\n self.trans[char] = 'po'\n for char in 'ヴゔ':\n self.trans[char] = 'vu'\n self.trans['ヷ'] = 'va'\n self.trans['ヸ'] = 'vi'\n self.trans['ヹ'] = 've'\n self.trans['ヺ'] = 'vo'\n\n # Japanese and Chinese punctuation and typography\n for char in '・·':\n self.trans[char] = ' '\n for char in '〃『』《》':\n self.trans[char] = u'\"'\n for char in '「」〈〉〘〙〚〛':\n self.trans[char] = u\"'\"\n for char in '(〔':\n self.trans[char] = '('\n for char in ')〕':\n self.trans[char] = ')'\n for char in '[【〖':\n self.trans[char] = '['\n for char in ']】〗':\n self.trans[char] = ']'\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in '•◦':\n self.trans[char] = '_'\n for char in '※*':\n self.trans[char] = '*'\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in ',、':\n self.trans[char] = ','\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in 'ეჱ':\n self.trans[char] = 'e'\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in 'ყ':\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in 'წ':\n self.trans[char] = u\"ts'\"\n for char in 'ჭ':\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in 'पप':\n self.trans[char] = 'p'\n self.trans['अ'] = 'a'\n for char in 'आा':\n self.trans[char] = 'aa'\n self.trans['प'] = 'pa'\n for char in 'इि':\n self.trans[char] = 'i'\n for char in 'ईी':\n self.trans[char] = 'ii'\n for char in 'उु':\n self.trans[char] = 'u'\n for char in 'ऊू':\n self.trans[char] = 'uu'\n for char in 'एे':\n self.trans[char] = 'e'\n for char in 'ऐै':\n self.trans[char] = 'ai'\n for char in 'ओो':\n self.trans[char] = 'o'\n for char in 'औौ':\n self.trans[char] = 'au'\n for char in 'ऋृर':\n self.trans[char] = 'r'\n for char in 'ॠॄ':\n self.trans[char] = 'rr'\n for char in 'ऌॢल':\n self.trans[char] = 'l'\n for char in 'ॡॣ':\n self.trans[char] = 'll'\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in 'टत':\n self.trans[char] = 't'\n for char in 'ठथ':\n self.trans[char] = 'th'\n for char in 'डद':\n self.trans[char] = 'd'\n for char in 'ढध':\n self.trans[char] = 'dh'\n for char in 'णन':\n self.trans[char] = 'n'\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in 'षस':\n self.trans[char] = 's'\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in 'क़':\n self.trans[char] = 'q'\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in 'डढ':\n self.trans[char] = 'r'\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in 'ख्':\n self.trans[char] = 'khn'\n self.trans['त'] = 'tn'\n for char in 'द्':\n self.trans[char] = 'dn'\n self.trans['श'] = 'cn'\n for char in 'ह्':\n self.trans[char] = 'fn'\n for char in 'अँ':\n self.trans[char] = 'm'\n for char in '॒॑':\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in 'Տ':\n self.trans[char] = u\"T'\"\n for char in 'տ':\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in 'க்':\n self.trans[char] = 'k'\n for char in 'ஙண்ந்ன்':\n self.trans[char] = 'n'\n self.trans['ச'] = 'c'\n for char in 'ஞ்':\n self.trans[char] = 'ñ'\n for char in 'ட்':\n self.trans[char] = 'th'\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in 'ம்':\n self.trans[char] = 'm'\n for char in 'ய்':\n self.trans[char] = 'y'\n for char in 'ர்ழ்ற':\n self.trans[char] = 'r'\n for char in 'ல்ள':\n self.trans[char] = 'l'\n for char in 'வ்':\n self.trans[char] = 'v'\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in 'க்ஷ':\n self.trans[char] = 'x'\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in 'আা':\n self.trans[char] = 'a'\n for char in 'ইিঈী':\n self.trans[char] = 'i'\n for char in 'উুঊূ':\n self.trans[char] = 'u'\n for char in 'ঋৃ':\n self.trans[char] = 'ri'\n for char in 'এেয়':\n self.trans[char] = 'e'\n for char in 'ঐৈ':\n self.trans[char] = 'oi'\n for char in 'ওো':\n self.trans[char] = 'o'\n for char in 'ঔৌ':\n self.trans[char] = 'ou'\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in 'টত':\n self.trans[char] = 't'\n for char in 'ঠথ':\n self.trans[char] = 'th'\n for char in 'ডদ':\n self.trans[char] = 'd'\n for char in 'ঢধ':\n self.trans[char] = 'dh'\n for char in 'ণন':\n self.trans[char] = 'n'\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in 'য়':\n self.trans[char] = '-'\n for char in 'ড়':\n self.trans[char] = 'r'\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in 'ขฃคฅฆ':\n self.trans[char] = 'kh'\n self.trans['ง'] = 'ng'\n for char in 'จฉชฌ':\n self.trans[char] = 'ch'\n for char in 'ซศษส':\n self.trans[char] = 's'\n for char in 'ญย':\n self.trans[char] = 'y'\n for char in 'ฎด':\n self.trans[char] = 'd'\n for char in 'ฏต':\n self.trans[char] = 't'\n for char in 'ฐฑฒถทธ':\n self.trans[char] = 'th'\n for char in 'ณน':\n self.trans[char] = 'n'\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in 'ผพภ':\n self.trans[char] = 'ph'\n for char in 'ฝฟ':\n self.trans[char] = 'f'\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in 'ลฬ':\n self.trans[char] = 'l'\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in 'หฮ':\n self.trans[char] = 'h'\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in 'อวโิ':\n self.trans[char] = 'o'\n for char in 'ะัา':\n self.trans[char] = 'a'\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in 'เ็':\n self.trans[char] = 'e'\n self.trans['แ'] = 'ae'\n for char in 'ใไ':\n self.trans[char] = 'ai'\n for char in '่้๊๋็์':\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans['ಅ'] = 'a'\n for char in 'ಆಾ':\n self.trans[char] = 'aa'\n for char in 'ಇಿ':\n self.trans[char] = 'i'\n for char in 'ಈೀ':\n self.trans[char] = 'ii'\n for char in 'ಉು':\n self.trans[char] = 'u'\n for char in 'ಊೂ':\n self.trans[char] = 'uu'\n for char in 'ಋೂ':\n self.trans[char] = u\"r'\"\n for char in 'ಎೆ':\n self.trans[char] = 'e'\n for char in 'ಏೇ':\n self.trans[char] = 'ee'\n for char in 'ಐೈ':\n self.trans[char] = 'ai'\n for char in 'ಒೊ':\n self.trans[char] = 'o'\n for char in 'ಓೋ':\n self.trans[char] = 'oo'\n for char in 'ಔೌ':\n self.trans[char] = 'au'\n self.trans['ಂ'] = \"m'\"\n self.trans['ಃ'] = \"h'\"\n self.trans['ಕ'] = 'k'\n self.trans['ಖ'] = 'kh'\n self.trans['ಗ'] = 'g'\n self.trans['ಘ'] = 'gh'\n self.trans['ಙ'] = 'ng'\n self.trans['ಚ'] = 'c'\n self.trans['ಛ'] = 'ch'\n self.trans['ಜ'] = 'j'\n self.trans['ಝ'] = 'ny'\n self.trans['ಟ'] = 'tt'\n self.trans['ಠ'] = 'tth'\n self.trans['ಡ'] = 'dd'\n self.trans['ಢ'] = 'ddh'\n self.trans['ಣ'] = 'nn'\n self.trans['ತ'] = 't'\n self.trans['ಥ'] = 'th'\n self.trans['ದ'] = 'd'\n self.trans['ಧ'] = 'dh'\n self.trans['ನ'] = 'n'\n self.trans['ಪ'] = 'p'\n self.trans['ಫ'] = 'ph'\n self.trans['ಬ'] = 'b'\n self.trans['ಭ'] = 'bh'\n self.trans['ಮ'] = 'm'\n self.trans['ಯ'] = 'y'\n self.trans['ರ'] = 'r'\n self.trans['ಲ'] = 'l'\n self.trans['ವ'] = 'v'\n self.trans['ಶ'] = 'sh'\n self.trans['ಷ'] = 'ss'\n self.trans['ಸ'] = 's'\n self.trans['ಹ'] = 'h'\n self.trans['ಳ'] = 'll'\n self.trans['೦'] = '0'\n self.trans['೧'] = '1'\n self.trans['೨'] = '2'\n self.trans['೩'] = '3'\n self.trans['೪'] = '4'\n self.trans['೫'] = '5'\n self.trans['೬'] = '6'\n self.trans['೭'] = '7'\n self.trans['೮'] = '8'\n self.trans['೯'] = '9'\n # Telugu\n self.trans['అ'] = 'a'\n for char in 'ఆా':\n self.trans[char] = 'aa'\n for char in 'ఇి':\n self.trans[char] = 'i'\n for char in 'ఈీ':\n self.trans[char] = 'ii'\n for char in 'ఉు':\n self.trans[char] = 'u'\n for char in 'ఊూ':\n self.trans[char] = 'uu'\n for char in 'ఋృ':\n self.trans[char] = \"r'\"\n for char in 'ౠౄ':\n self.trans[char] = 'r\"'\n self.trans['ఌ'] = \"l'\"\n self.trans['ౡ'] = 'l\"'\n for char in 'ఎె':\n self.trans[char] = 'e'\n for char in 'ఏే':\n self.trans[char] = 'ee'\n for char in 'ఐై':\n self.trans[char] = 'ai'\n for char in 'ఒొ':\n self.trans[char] = 'o'\n for char in 'ఓో':\n self.trans[char] = 'oo'\n for char in 'ఔౌ':\n self.trans[char] = 'au'\n self.trans['ం'] = \"'\"\n self.trans['ః'] = '\"'\n self.trans['క'] = 'k'\n self.trans['ఖ'] = 'kh'\n self.trans['గ'] = 'g'\n self.trans['ఘ'] = 'gh'\n self.trans['ఙ'] = 'ng'\n self.trans['చ'] = 'ts'\n self.trans['ఛ'] = 'tsh'\n self.trans['జ'] = 'j'\n self.trans['ఝ'] = 'jh'\n self.trans['ఞ'] = 'ñ'\n for char in 'టత':\n self.trans[char] = 't'\n for char in 'ఠథ':\n self.trans[char] = 'th'\n for char in 'డద':\n self.trans[char] = 'd'\n for char in 'ఢధ':\n self.trans[char] = 'dh'\n for char in 'ణన':\n self.trans[char] = 'n'\n self.trans['ప'] = 'p'\n self.trans['ఫ'] = 'ph'\n self.trans['బ'] = 'b'\n self.trans['భ'] = 'bh'\n self.trans['మ'] = 'm'\n self.trans['య'] = 'y'\n for char in 'రఱ':\n self.trans[char] = 'r'\n for char in 'లళ':\n self.trans[char] = 'l'\n self.trans['వ'] = 'v'\n self.trans['శ'] = 'sh'\n for char in 'షస':\n self.trans[char] = 's'\n self.trans['హ'] = 'h'\n self.trans['్'] = \"\"\n for char in 'ంఁ':\n self.trans[char] = '^'\n self.trans['ః'] = '-'\n self.trans['౦'] = '0'\n self.trans['౧'] = '1'\n self.trans['౨'] = '2'\n self.trans['౩'] = '3'\n self.trans['౪'] = '4'\n self.trans['౫'] = '5'\n self.trans['౬'] = '6'\n self.trans['౭'] = '7'\n self.trans['౮'] = '8'\n self.trans['౯'] = '9'\n self.trans['౹'] = '1/4'\n self.trans['౺'] = '1/2'\n self.trans['౻'] = '3/4'\n self.trans['౼'] = '1/16'\n self.trans['౽'] = '1/8'\n self.trans['౾'] = '3/16'\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans['ກ'] = 'k'\n for char in 'ຂຄ':\n self.trans[char] = 'kh'\n self.trans['ງ'] = 'ng'\n self.trans['ຈ'] = 'ch'\n for char in 'ສຊ':\n self.trans[char] = 's'\n self.trans['ຍ'] = 'ny'\n self.trans['ດ'] = 'd'\n self.trans['ຕ'] = 't'\n for char in 'ຖທ':\n self.trans[char] = 'th'\n self.trans['ນ'] = 'n'\n self.trans['ບ'] = 'b'\n self.trans['ປ'] = 'p'\n for char in 'ຜພ':\n self.trans[char] = 'ph'\n for char in 'ຝຟ':\n self.trans[char] = 'f'\n for char in 'ມໝ':\n self.trans[char] = 'm'\n self.trans['ຢ'] = 'y'\n for char in 'ຣຼ':\n self.trans[char] = 'r'\n for char in 'ລຼ':\n self.trans[char] = 'l'\n self.trans['ວ'] = 'v'\n self.trans['ຮ'] = 'h'\n self.trans['ອ'] = \"'\"\n for char in 'ະັ':\n self.trans[char] = 'a'\n self.trans['ິ'] = 'i'\n self.trans['ຶ'] = 'ue'\n self.trans['ຸ'] = 'u'\n self.trans['ເ'] = 'é'\n self.trans['ແ'] = 'è'\n for char in 'ໂົາໍ':\n self.trans[char] = 'o'\n self.trans['ຽ'] = 'ia'\n self.trans['ເຶ'] = 'uea'\n self.trans['ຍ'] = 'i'\n for char in 'ໄໃ':\n self.trans[char] = 'ai'\n self.trans['ຳ'] = 'am'\n self.trans['າ'] = 'aa'\n self.trans['ີ'] = 'ii'\n self.trans['ື'] = 'yy'\n self.trans['ູ'] = 'uu'\n self.trans['ເ'] = 'e'\n self.trans['ແ'] = 'ei'\n self.trans['໐'] = '0'\n self.trans['໑'] = '1'\n self.trans['໒'] = '2'\n self.trans['໓'] = '3'\n self.trans['໔'] = '4'\n self.trans['໕'] = '5'\n self.trans['໖'] = '6'\n self.trans['໗'] = '7'\n self.trans['໘'] = '8'\n self.trans['໙'] = '9'\n # Chinese -- note: incomplete\n for char in '埃挨哎唉哀皑癌蔼矮艾碍爱隘':\n self.trans[char] = 'ai'\n for char in '鞍氨安俺按暗岸胺案':\n self.trans[char] = 'an'\n for char in '肮昂盎':\n self.trans[char] = 'ang'\n for char in '凹敖熬翱袄傲奥懊澳':\n self.trans[char] = 'ao'\n for char in '芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸':\n self.trans[char] = 'ba'\n for char in '白柏百摆佰败拜稗':\n self.trans[char] = 'bai'\n for char in '斑班搬扳般颁板版扮拌伴瓣半办绊':\n self.trans[char] = 'ban'\n for char in '邦帮梆榜膀绑棒磅蚌镑傍谤':\n self.trans[char] = 'bang'\n for char in '苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆':\n self.trans[char] = 'bao'\n for char in '杯碑悲卑北辈背贝钡倍狈备惫焙被':\n self.trans[char] = 'bei'\n for char in '奔苯本笨':\n self.trans[char] = 'ben'\n for char in '崩绷甭泵蹦迸':\n self.trans[char] = 'beng'\n for char in '逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛':\n self.trans[char] = 'bi'\n for char in '鞭边编贬扁便变卞辨辩辫遍':\n self.trans[char] = 'bian'\n for char in '标彪膘表':\n self.trans[char] = 'biao'\n for char in '鳖憋别瘪':\n self.trans[char] = 'bie'\n for char in '彬斌濒滨宾摈':\n self.trans[char] = 'bin'\n for char in '兵冰柄丙秉饼炳病并':\n self.trans[char] = 'bing'\n for char in '玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳':\n self.trans[char] = 'bo'\n for char in '哺补埠不布步簿部怖':\n self.trans[char] = 'bu'\n for char in '猜裁材才财睬踩采彩菜蔡':\n self.trans[char] = 'cai'\n for char in '餐参蚕残惭惨灿':\n self.trans[char] = 'can'\n for char in '苍舱仓沧藏':\n self.trans[char] = 'cang'\n for char in '操糙槽曹草':\n self.trans[char] = 'cao'\n for char in '厕策侧册测':\n self.trans[char] = 'ce'\n for char in '层蹭':\n self.trans[char] = 'ceng'\n for char in '插叉茬茶查碴搽察岔差诧':\n self.trans[char] = 'cha'\n for char in '拆柴豺':\n self.trans[char] = 'chai'\n for char in '搀掺蝉馋谗缠铲产阐颤':\n self.trans[char] = 'chan'\n for char in '昌猖场尝常长偿肠厂敞畅唱倡':\n self.trans[char] = 'chang'\n for char in '超抄钞朝嘲潮巢吵炒':\n self.trans[char] = 'chao'\n for char in '车扯撤掣彻澈':\n self.trans[char] = 'che'\n for char in '郴臣辰尘晨忱沉陈趁衬':\n self.trans[char] = 'chen'\n for char in '撑称城橙成呈乘程惩澄诚承逞骋秤':\n self.trans[char] = 'cheng'\n for char in '吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽':\n self.trans[char] = 'chi'\n for char in '充冲虫崇宠':\n self.trans[char] = 'chong'\n for char in '抽酬畴踌稠愁筹仇绸瞅丑臭':\n self.trans[char] = 'chou'\n for char in '初出橱厨躇锄雏滁除楚储矗搐触处':\n self.trans[char] = 'chu'\n self.trans['揣'] = 'chuai'\n for char in '川穿椽传船喘串':\n self.trans[char] = 'chuan'\n for char in '疮窗幢床闯创':\n self.trans[char] = 'chuang'\n for char in '吹炊捶锤垂':\n self.trans[char] = 'chui'\n for char in '春椿醇唇淳纯蠢':\n self.trans[char] = 'chun'\n for char in '戳绰':\n self.trans[char] = 'chuo'\n for char in '疵茨磁雌辞慈瓷词此刺赐次':\n self.trans[char] = 'ci'\n for char in '聪葱囱匆从丛':\n self.trans[char] = 'cong'\n self.trans['凑'] = 'cou'\n for char in '粗醋簇促':\n self.trans[char] = 'cu'\n for char in '蹿篡窜':\n self.trans[char] = 'cuan'\n for char in '摧崔催脆瘁粹淬翠':\n self.trans[char] = 'cui'\n for char in '村存寸':\n self.trans[char] = 'cun'\n for char in '磋撮搓措挫错':\n self.trans[char] = 'cuo'\n for char in '搭达答瘩打大':\n self.trans[char] = 'da'\n for char in '呆歹傣戴带殆代贷袋待逮怠':\n self.trans[char] = 'dai'\n for char in '耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋':\n self.trans[char] = 'dan'\n for char in '当挡党荡档':\n self.trans[char] = 'dang'\n for char in '刀捣蹈倒岛祷导到稻悼道盗':\n self.trans[char] = 'dao'\n for char in '德得的':\n self.trans[char] = 'de'\n for char in '蹬灯登等瞪凳邓':\n self.trans[char] = 'deng'\n for char in '堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔':\n self.trans[char] = 'di'\n for char in '颠掂滇碘点典靛垫电佃甸店惦奠淀殿':\n self.trans[char] = 'dian'\n for char in '碉叼雕凋刁掉吊钓调':\n self.trans[char] = 'diao'\n for char in '跌爹碟蝶迭谍叠':\n self.trans[char] = 'die'\n for char in '丁盯叮钉顶鼎锭定订':\n self.trans[char] = 'ding'\n self.trans['丢'] = 'diu'\n for char in '东冬董懂动栋侗恫冻洞':\n self.trans[char] = 'dong'\n for char in '兜抖斗陡豆逗痘':\n self.trans[char] = 'dou'\n for char in '都督毒犊独读堵睹赌杜镀肚度渡妒':\n self.trans[char] = 'du'\n for char in '端短锻段断缎':\n self.trans[char] = 'duan'\n for char in '堆兑队对':\n self.trans[char] = 'dui'\n for char in '墩吨蹲敦顿囤钝盾遁':\n self.trans[char] = 'dun'\n for char in '掇哆多夺垛躲朵跺舵剁惰堕':\n self.trans[char] = 'duo'\n for char in '蛾峨鹅俄额讹娥恶厄扼遏鄂饿':\n self.trans[char] = 'e'\n for char in '恩嗯':\n self.trans[char] = 'en'\n for char in '而儿耳尔饵洱二贰':\n self.trans[char] = 'er'\n for char in '发罚筏伐乏阀法珐':\n self.trans[char] = 'fa'\n for char in '藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛':\n self.trans[char] = 'fan'\n for char in '坊芳方肪房防妨仿访纺放':\n self.trans[char] = 'fang'\n for char in '菲非啡飞肥匪诽吠肺废沸费':\n self.trans[char] = 'fei'\n for char in '芬酚吩氛分纷坟焚汾粉奋份忿愤粪':\n self.trans[char] = 'fen'\n for char in '丰封枫蜂峰锋风疯烽逢冯缝讽奉凤':\n self.trans[char] = 'feng'\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in ('夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋'\n '复傅付阜父腹负富讣附妇缚咐'):\n self.trans[char] = 'fu'\n for char in '噶嘎':\n self.trans[char] = 'ga'\n for char in '该改概钙盖溉':\n self.trans[char] = 'gai'\n for char in '干甘杆柑竿肝赶感秆敢赣':\n self.trans[char] = 'gan'\n for char in '冈刚钢缸肛纲岗港杠':\n self.trans[char] = 'gang'\n for char in '篙皋高膏羔糕搞镐稿告':\n self.trans[char] = 'gao'\n for char in '哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各':\n self.trans[char] = 'ge'\n self.trans['给'] = 'gei'\n for char in '根跟':\n self.trans[char] = 'gen'\n for char in '耕更庚羹埂耿梗':\n self.trans[char] = 'geng'\n for char in '工攻功恭龚供躬公宫弓巩汞拱贡共':\n self.trans[char] = 'gong'\n for char in '钩勾沟苟狗垢构购够':\n self.trans[char] = 'gou'\n for char in '辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇':\n self.trans[char] = 'gu'\n for char in '刮瓜剐寡挂褂':\n self.trans[char] = 'gua'\n for char in '乖拐怪':\n self.trans[char] = 'guai'\n for char in '棺关官冠观管馆罐惯灌贯':\n self.trans[char] = 'guan'\n for char in '光广逛':\n self.trans[char] = 'guang'\n for char in '瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽':\n self.trans[char] = 'gui'\n for char in '辊滚棍':\n self.trans[char] = 'gun'\n for char in '锅郭国果裹过':\n self.trans[char] = 'guo'\n self.trans['哈'] = 'ha'\n for char in '骸孩海氦亥害骇':\n self.trans[char] = 'hai'\n for char in '酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉':\n self.trans[char] = 'han'\n for char in '夯杭航':\n self.trans[char] = 'hang'\n for char in '壕嚎豪毫郝好耗号浩':\n self.trans[char] = 'hao'\n for char in '呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺':\n self.trans[char] = 'he'\n for char in '嘿黑':\n self.trans[char] = 'hei'\n for char in '痕很狠恨':\n self.trans[char] = 'hen'\n for char in '哼亨横衡恒':\n self.trans[char] = 'heng'\n for char in '轰哄烘虹鸿洪宏弘红':\n self.trans[char] = 'hong'\n for char in '喉侯猴吼厚候后':\n self.trans[char] = 'hou'\n for char in '呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户':\n self.trans[char] = 'hu'\n for char in '花哗华猾滑画划化话':\n self.trans[char] = 'hua'\n for char in '槐徊怀淮坏':\n self.trans[char] = 'huai'\n for char in '欢环桓还缓换患唤痪豢焕涣宦幻':\n self.trans[char] = 'huan'\n for char in '荒慌黄磺蝗簧皇凰惶煌晃幌恍谎':\n self.trans[char] = 'huang'\n for char in '灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘':\n self.trans[char] = 'hui'\n for char in '荤昏婚魂浑混':\n self.trans[char] = 'hun'\n for char in '豁活伙火获或惑霍货祸':\n self.trans[char] = 'huo'\n for char in ('击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几'\n '脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪'):\n self.trans[char] = 'ji'\n for char in '嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁':\n self.trans[char] = 'jia'\n for char in ('歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健'\n '舰剑饯渐溅涧建'):\n self.trans[char] = 'jian'\n for char in '僵姜将浆江疆蒋桨奖讲匠酱降':\n self.trans[char] = 'jiang'\n for char in '蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖':\n self.trans[char] = 'jiao'\n for char in '揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届':\n self.trans[char] = 'jie'\n for char in '巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲':\n self.trans[char] = 'jin'\n for char in '荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净':\n self.trans[char] = 'jing'\n for char in '囧炯窘':\n self.trans[char] = 'jiong'\n for char in '揪究纠玖韭久灸九酒厩救旧臼舅咎就疚':\n self.trans[char] = 'jiu'\n for char in '鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧':\n self.trans[char] = 'ju'\n for char in '捐鹃娟倦眷卷绢':\n self.trans[char] = 'juan'\n for char in '撅攫抉掘倔爵觉决诀绝':\n self.trans[char] = 'jue'\n for char in '均菌钧军君峻俊竣浚郡骏':\n self.trans[char] = 'jun'\n for char in '喀咖卡咯':\n self.trans[char] = 'ka'\n for char in '开揩楷凯慨':\n self.trans[char] = 'kai'\n for char in '刊堪勘坎砍看':\n self.trans[char] = 'kan'\n for char in '康慷糠扛抗亢炕':\n self.trans[char] = 'kang'\n for char in '考拷烤靠':\n self.trans[char] = 'kao'\n for char in '坷苛柯棵磕颗科壳咳可渴克刻客课':\n self.trans[char] = 'ke'\n for char in '肯啃垦恳':\n self.trans[char] = 'ken'\n for char in '坑吭':\n self.trans[char] = 'keng'\n for char in '空恐孔控':\n self.trans[char] = 'kong'\n for char in '抠口扣寇':\n self.trans[char] = 'kou'\n for char in '枯哭窟苦酷库裤':\n self.trans[char] = 'ku'\n for char in '夸垮挎跨胯':\n self.trans[char] = 'kua'\n for char in '块筷侩快':\n self.trans[char] = 'kuai'\n for char in '宽款':\n self.trans[char] = 'kuan'\n for char in '匡筐狂框矿眶旷况':\n self.trans[char] = 'kuang'\n for char in '亏盔岿窥葵奎魁傀馈愧溃':\n self.trans[char] = 'kui'\n for char in '坤昆捆困':\n self.trans[char] = 'kun'\n for char in '括扩廓阔':\n self.trans[char] = 'kuo'\n for char in '垃拉喇蜡腊辣啦':\n self.trans[char] = 'la'\n for char in '莱来赖':\n self.trans[char] = 'lai'\n for char in '蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥':\n self.trans[char] = 'lan'\n for char in '琅榔狼廊郎朗浪':\n self.trans[char] = 'lang'\n for char in '捞劳牢老佬姥酪烙涝':\n self.trans[char] = 'lao'\n for char in '勒乐':\n self.trans[char] = 'le'\n for char in '雷镭蕾磊累儡垒擂肋类泪':\n self.trans[char] = 'lei'\n for char in '棱楞冷':\n self.trans[char] = 'leng'\n for char in ('厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力'\n '璃哩'):\n self.trans[char] = 'li'\n self.trans['俩'] = 'lia'\n for char in '联莲连镰廉怜涟帘敛脸链恋炼练':\n self.trans[char] = 'lian'\n for char in '粮凉梁粱良两辆量晾亮谅':\n self.trans[char] = 'liang'\n for char in '撩聊僚疗燎寥辽潦了撂镣廖料':\n self.trans[char] = 'liao'\n for char in '列裂烈劣猎':\n self.trans[char] = 'lie'\n for char in '琳林磷霖临邻鳞淋凛赁吝拎':\n self.trans[char] = 'lin'\n for char in '玲菱零龄铃伶羚凌灵陵岭领另令':\n self.trans[char] = 'ling'\n for char in '溜琉榴硫馏留刘瘤流柳六':\n self.trans[char] = 'liu'\n for char in '龙聋咙笼窿隆垄拢陇':\n self.trans[char] = 'long'\n for char in '楼娄搂篓漏陋':\n self.trans[char] = 'lou'\n for char in '芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸':\n self.trans[char] = 'lu'\n for char in '峦挛孪滦卵乱':\n self.trans[char] = 'luan'\n for char in '掠略':\n self.trans[char] = 'lue'\n for char in '抡轮伦仑沦纶论':\n self.trans[char] = 'lun'\n for char in '萝螺罗逻锣箩骡裸落洛骆络漯':\n self.trans[char] = 'luo'\n for char in '驴吕铝侣旅履屡缕虑氯律率滤绿':\n self.trans[char] = 'lv'\n for char in '妈麻玛码蚂马骂嘛吗':\n self.trans[char] = 'ma'\n for char in '埋买麦卖迈脉':\n self.trans[char] = 'mai'\n for char in '瞒馒蛮满蔓曼慢漫谩':\n self.trans[char] = 'man'\n for char in '芒茫盲氓忙莽':\n self.trans[char] = 'mang'\n for char in '猫茅锚毛矛铆卯茂冒帽貌贸':\n self.trans[char] = 'mao'\n self.trans['么'] = 'me'\n for char in '玫枚梅酶霉煤没眉媒镁每美昧寐妹媚':\n self.trans[char] = 'mei'\n for char in '门闷们':\n self.trans[char] = 'men'\n for char in '萌蒙檬盟锰猛梦孟':\n self.trans[char] = 'meng'\n for char in '眯醚靡糜迷谜弥米秘觅泌蜜密幂':\n self.trans[char] = 'mi'\n for char in '棉眠绵冕免勉娩缅面':\n self.trans[char] = 'mian'\n for char in '苗描瞄藐秒渺庙妙':\n self.trans[char] = 'miao'\n for char in '蔑灭':\n self.trans[char] = 'mie'\n for char in '民抿皿敏悯闽':\n self.trans[char] = 'min'\n for char in '明螟鸣铭名命':\n self.trans[char] = 'ming'\n self.trans['谬'] = 'miu'\n for char in '摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌':\n self.trans[char] = 'mo'\n for char in '谋牟某':\n self.trans[char] = 'mou'\n for char in '拇牡亩姆母墓暮幕募慕木目睦牧穆':\n self.trans[char] = 'mu'\n for char in '拿哪呐钠那娜纳':\n self.trans[char] = 'na'\n for char in '氖乃奶耐奈':\n self.trans[char] = 'nai'\n for char in '南男难':\n self.trans[char] = 'nan'\n self.trans['囊'] = 'nang'\n for char in '挠脑恼闹淖':\n self.trans[char] = 'nao'\n self.trans['呢'] = 'ne'\n for char in '馁内':\n self.trans[char] = 'nei'\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in '妮霓倪泥尼拟你匿腻逆溺':\n self.trans[char] = 'ni'\n for char in '蔫拈年碾撵捻念':\n self.trans[char] = 'nian'\n for char in '娘酿':\n self.trans[char] = 'niang'\n for char in '鸟尿':\n self.trans[char] = 'niao'\n for char in '捏聂孽啮镊镍涅':\n self.trans[char] = 'nie'\n self.trans['您'] = 'nin'\n for char in '柠狞凝宁拧泞':\n self.trans[char] = 'ning'\n for char in '牛扭钮纽':\n self.trans[char] = 'niu'\n for char in '脓浓农弄':\n self.trans[char] = 'nong'\n for char in '奴努怒':\n self.trans[char] = 'nu'\n self.trans['暖'] = 'nuan'\n for char in '虐疟':\n self.trans[char] = 'nue'\n for char in '挪懦糯诺':\n self.trans[char] = 'nuo'\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in '欧鸥殴藕呕偶沤':\n self.trans[char] = 'ou'\n for char in '啪趴爬帕怕琶':\n self.trans[char] = 'pa'\n for char in '拍排牌徘湃派':\n self.trans[char] = 'pai'\n for char in '攀潘盘磐盼畔判叛':\n self.trans[char] = 'pan'\n for char in '乓庞旁耪胖':\n self.trans[char] = 'pang'\n for char in '抛咆刨炮袍跑泡':\n self.trans[char] = 'pao'\n for char in '呸胚培裴赔陪配佩沛':\n self.trans[char] = 'pei'\n for char in '喷盆':\n self.trans[char] = 'pen'\n for char in '砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰':\n self.trans[char] = 'peng'\n for char in '坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬':\n self.trans[char] = 'pi'\n for char in '篇偏片骗':\n self.trans[char] = 'pian'\n for char in '飘漂瓢票':\n self.trans[char] = 'piao'\n for char in '撇瞥':\n self.trans[char] = 'pie'\n for char in '拼频贫品聘':\n self.trans[char] = 'pin'\n for char in '乒坪苹萍平凭瓶评屏':\n self.trans[char] = 'ping'\n for char in '坡泼颇婆破魄迫粕剖':\n self.trans[char] = 'po'\n for char in '扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮':\n self.trans[char] = 'pu'\n for char in ('期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄'\n '弃汽泣讫'):\n self.trans[char] = 'qi'\n for char in '掐恰洽':\n self.trans[char] = 'qia'\n for char in '牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉':\n self.trans[char] = 'qian'\n for char in '枪呛腔羌墙蔷强抢':\n self.trans[char] = 'qiang'\n for char in '橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍':\n self.trans[char] = 'qiao'\n for char in '切茄且怯窃':\n self.trans[char] = 'qie'\n for char in '钦侵亲秦琴勤芹擒禽寝沁':\n self.trans[char] = 'qin'\n for char in '青轻氢倾卿清擎晴氰情顷请庆':\n self.trans[char] = 'qing'\n for char in '琼穷':\n self.trans[char] = 'qiong'\n for char in '秋丘邱球求囚酋泅':\n self.trans[char] = 'qiu'\n for char in '趋区蛆曲躯屈驱渠取娶龋趣去':\n self.trans[char] = 'qu'\n for char in '圈颧权醛泉全痊拳犬券劝':\n self.trans[char] = 'quan'\n for char in '缺炔瘸却鹊榷确雀':\n self.trans[char] = 'que'\n for char in '裙群':\n self.trans[char] = 'qun'\n for char in '然燃冉染':\n self.trans[char] = 'ran'\n for char in '瓤壤攘嚷让':\n self.trans[char] = 'rang'\n for char in '饶扰绕':\n self.trans[char] = 'rao'\n for char in '惹热':\n self.trans[char] = 're'\n for char in '壬仁人忍韧任认刃妊纫':\n self.trans[char] = 'ren'\n for char in '扔仍':\n self.trans[char] = 'reng'\n self.trans['日'] = 'ri'\n for char in '戎茸蓉荣融熔溶容绒冗':\n self.trans[char] = 'rong'\n for char in '揉柔肉':\n self.trans[char] = 'rou'\n for char in '茹蠕儒孺如辱乳汝入褥':\n self.trans[char] = 'ru'\n for char in '软阮':\n self.trans[char] = 'ruan'\n for char in '蕊瑞锐':\n self.trans[char] = 'rui'\n for char in '闰润':\n self.trans[char] = 'run'\n for char in '若弱':\n self.trans[char] = 'ruo'\n for char in '撒洒萨':\n self.trans[char] = 'sa'\n for char in '腮鳃塞赛':\n self.trans[char] = 'sai'\n for char in '三叁伞散':\n self.trans[char] = 'san'\n for char in '桑嗓丧':\n self.trans[char] = 'sang'\n for char in '搔骚扫嫂':\n self.trans[char] = 'sao'\n for char in '瑟色涩':\n self.trans[char] = 'se'\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in '莎砂杀刹沙纱傻啥煞':\n self.trans[char] = 'sha'\n for char in '筛晒':\n self.trans[char] = 'shai'\n for char in '珊苫杉山删煽衫闪陕擅赡膳善汕扇缮':\n self.trans[char] = 'shan'\n for char in '墒伤商赏晌上尚裳':\n self.trans[char] = 'shang'\n for char in '梢捎稍烧芍勺韶少哨邵绍':\n self.trans[char] = 'shao'\n for char in '奢赊蛇舌舍赦摄射慑涉社设':\n self.trans[char] = 'she'\n for char in '砷申呻伸身深娠绅神沈审婶甚肾慎渗':\n self.trans[char] = 'shen'\n for char in '声生甥牲升绳省盛剩胜圣':\n self.trans[char] = 'sheng'\n for char in ('师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝'\n '势是嗜噬适仕侍释饰氏市恃室视试'):\n self.trans[char] = 'shi'\n for char in '收手首守寿授售受瘦兽':\n self.trans[char] = 'shou'\n for char in (\n '蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕'):\n self.trans[char] = 'shu'\n for char in '刷耍':\n self.trans[char] = 'shua'\n for char in '摔衰甩帅':\n self.trans[char] = 'shuai'\n for char in '栓拴':\n self.trans[char] = 'shuan'\n for char in '霜双爽':\n self.trans[char] = 'shuang'\n for char in '谁水睡税':\n self.trans[char] = 'shui'\n for char in '吮瞬顺舜':\n self.trans[char] = 'shun'\n for char in '说硕朔烁':\n self.trans[char] = 'shuo'\n for char in '斯撕嘶思私司丝死肆寺嗣四伺似饲巳':\n self.trans[char] = 'si'\n for char in '松耸怂颂送宋讼诵':\n self.trans[char] = 'song'\n for char in '搜艘擞':\n self.trans[char] = 'sou'\n for char in '嗽苏酥俗素速粟僳塑溯宿诉肃':\n self.trans[char] = 'su'\n for char in '酸蒜算':\n self.trans[char] = 'suan'\n for char in '虽隋随绥髓碎岁穗遂隧祟':\n self.trans[char] = 'sui'\n for char in '孙损笋':\n self.trans[char] = 'sun'\n for char in '蓑梭唆缩琐索锁所':\n self.trans[char] = 'suo'\n for char in '塌他它她塔獭挞蹋踏':\n self.trans[char] = 'ta'\n for char in '胎苔抬台泰酞太态汰':\n self.trans[char] = 'tai'\n for char in '坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭':\n self.trans[char] = 'tan'\n for char in '汤塘搪堂棠膛唐糖倘躺淌趟烫':\n self.trans[char] = 'tang'\n for char in '掏涛滔绦萄桃逃淘陶讨套':\n self.trans[char] = 'tao'\n self.trans['特'] = 'te'\n for char in '藤腾疼誊':\n self.trans[char] = 'teng'\n for char in '梯剔踢锑提题蹄啼体替嚏惕涕剃屉':\n self.trans[char] = 'ti'\n for char in '兲天添填田甜恬舔腆':\n self.trans[char] = 'tian'\n for char in '挑条迢眺跳':\n self.trans[char] = 'tiao'\n for char in '贴铁帖':\n self.trans[char] = 'tie'\n for char in '厅听烃汀廷停亭庭挺艇':\n self.trans[char] = 'ting'\n for char in '通桐酮瞳同铜彤童桶捅筒统痛':\n self.trans[char] = 'tong'\n for char in '偷投头透':\n self.trans[char] = 'tou'\n for char in '凸秃突图徒途涂屠土吐兔':\n self.trans[char] = 'tu'\n for char in '湍团':\n self.trans[char] = 'tuan'\n for char in '推颓腿蜕褪退':\n self.trans[char] = 'tui'\n for char in '吞屯臀':\n self.trans[char] = 'tun'\n for char in '拖托脱鸵陀驮驼椭妥拓唾':\n self.trans[char] = 'tuo'\n for char in '挖哇蛙洼娃瓦袜':\n self.trans[char] = 'wa'\n for char in '歪外':\n self.trans[char] = 'wai'\n for char in '豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞':\n self.trans[char] = 'wan'\n for char in '汪王亡枉网往旺望忘妄':\n self.trans[char] = 'wang'\n for char in '威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫':\n self.trans[char] = 'wei'\n for char in '瘟温蚊文闻纹吻稳紊问':\n self.trans[char] = 'wen'\n for char in '嗡翁瓮':\n self.trans[char] = 'weng'\n for char in '挝蜗涡窝我斡卧握沃':\n self.trans[char] = 'wo'\n for char in '巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误':\n self.trans[char] = 'wu'\n for char in ('昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系'\n '隙戏细'):\n self.trans[char] = 'xi'\n for char in '瞎虾匣霞辖暇峡侠狭下厦夏吓':\n self.trans[char] = 'xia'\n for char in '掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线':\n self.trans[char] = 'xian'\n for char in '相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象':\n self.trans[char] = 'xiang'\n for char in '萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效':\n self.trans[char] = 'xiao'\n for char in '楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑':\n self.trans[char] = 'xie'\n for char in '薪芯锌欣辛新忻心信衅':\n self.trans[char] = 'xin'\n for char in '星腥猩惺兴刑型形邢行醒幸杏性姓':\n self.trans[char] = 'xing'\n for char in '兄凶胸匈汹雄熊':\n self.trans[char] = 'xiong'\n for char in '休修羞朽嗅锈秀袖绣':\n self.trans[char] = 'xiu'\n for char in '墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续':\n self.trans[char] = 'xu'\n for char in '轩喧宣悬旋玄选癣眩绚':\n self.trans[char] = 'xuan'\n for char in '靴薛学穴雪血':\n self.trans[char] = 'xue'\n for char in '勋熏循旬询寻驯巡殉汛训讯逊迅':\n self.trans[char] = 'xun'\n for char in '压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶':\n self.trans[char] = 'ya'\n for char in '焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验':\n self.trans[char] = 'yan'\n for char in '殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾':\n self.trans[char] = 'yang'\n for char in '邀腰妖瑶摇尧遥窑谣姚咬舀药要耀':\n self.trans[char] = 'yao'\n for char in '椰噎耶爷野冶也页掖业叶曳腋夜液':\n self.trans[char] = 'ye'\n for char in ('一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿'\n '役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎'):\n self.trans[char] = 'yi'\n for char in '茵荫因殷音阴姻吟银淫寅饮尹引隐印':\n self.trans[char] = 'yin'\n for char in '英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映':\n self.trans[char] = 'ying'\n self.trans['哟'] = 'yo'\n for char in '拥佣臃痈庸雍踊蛹咏泳涌永恿勇用':\n self.trans[char] = 'yong'\n for char in '幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂':\n self.trans[char] = 'you'\n for char in ('淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻'\n '峪御愈欲狱育誉浴寓裕预豫驭'):\n self.trans[char] = 'yu'\n for char in '鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院':\n self.trans[char] = 'yuan'\n for char in '曰约越跃钥岳粤月悦阅':\n self.trans[char] = 'yue'\n for char in '耘云郧匀陨允运蕴酝晕韵孕':\n self.trans[char] = 'yun'\n for char in '匝砸杂':\n self.trans[char] = 'za'\n for char in '栽哉灾宰载再在':\n self.trans[char] = 'zai'\n for char in '咱攒暂赞':\n self.trans[char] = 'zan'\n for char in '赃脏葬':\n self.trans[char] = 'zang'\n for char in '遭糟凿藻枣早澡蚤躁噪造皂灶燥':\n self.trans[char] = 'zao'\n for char in '责择则泽':\n self.trans[char] = 'ze'\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in '增憎曾赠':\n self.trans[char] = 'zeng'\n for char in '扎喳渣札轧铡闸眨栅榨咋乍炸诈':\n self.trans[char] = 'zha'\n for char in '摘斋宅窄债寨':\n self.trans[char] = 'zhai'\n for char in '瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽':\n self.trans[char] = 'zhan'\n for char in '樟章彰漳张掌涨杖丈帐账仗胀瘴障':\n self.trans[char] = 'zhang'\n for char in '招昭找沼赵照罩兆肇召':\n self.trans[char] = 'zhao'\n for char in '遮折哲蛰辙者锗蔗这浙':\n self.trans[char] = 'zhe'\n for char in '珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳':\n self.trans[char] = 'zhen'\n for char in '蒸挣睁征狰争怔整拯正政帧症郑证':\n self.trans[char] = 'zheng'\n for char in ('芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置'\n '帜峙制智秩稚质炙痔滞治窒'):\n self.trans[char] = 'zhi'\n for char in '中盅忠钟衷终种肿重仲众':\n self.trans[char] = 'zhong'\n for char in '舟周州洲诌粥轴肘帚咒皱宙昼骤':\n self.trans[char] = 'zhou'\n for char in '珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻':\n self.trans[char] = 'zhu'\n for char in '抓爪':\n self.trans[char] = 'zhua'\n self.trans['拽'] = 'zhuai'\n for char in '专砖转撰赚篆':\n self.trans[char] = 'zhuan'\n for char in '桩庄装妆撞壮状':\n self.trans[char] = 'zhuang'\n for char in '椎锥追赘坠缀':\n self.trans[char] = 'zhui'\n for char in '谆准':\n self.trans[char] = 'zhun'\n for char in '捉拙卓桌琢茁酌啄着灼浊':\n self.trans[char] = 'zhuo'\n for char in '兹咨资姿滋淄孜紫仔籽滓子自渍字':\n self.trans[char] = 'zi'\n for char in '鬃棕踪宗综总纵':\n self.trans[char] = 'zong'\n for char in '邹走奏揍':\n self.trans[char] = 'zou'\n for char in '租足卒族祖诅阻组':\n self.trans[char] = 'zu'\n for char in '钻纂':\n self.trans[char] = 'zuan'\n for char in '嘴醉最罪':\n self.trans[char] = 'zui'\n for char in '尊遵':\n self.trans[char] = 'zun'\n for char in '昨左佐柞做作坐座':\n self.trans[char] = 'zuo'\n # from:\n # https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans['ଂ'] = 'anusvara'\n self.trans['ઇ'] = 'i'\n self.trans['എ'] = 'e'\n self.trans['ગ'] = 'ga'\n self.trans['ਜ'] = 'ja'\n self.trans['ഞ'] = 'nya'\n self.trans['ଢ'] = 'ddha'\n self.trans['ધ'] = 'dha'\n self.trans['ਬ'] = 'ba'\n self.trans['മ'] = 'ma'\n self.trans['ଲ'] = 'la'\n self.trans['ષ'] = 'ssa'\n self.trans['਼'] = 'nukta'\n self.trans['ാ'] = 'aa'\n self.trans['ୂ'] = 'uu'\n self.trans['ે'] = 'e'\n self.trans['ੌ'] = 'au'\n self.trans['ൎ'] = 'reph'\n self.trans['ੜ'] = 'rra'\n self.trans['՞'] = '?'\n self.trans['ୢ'] = 'l'\n self.trans['૧'] = '1'\n self.trans['੬'] = '6'\n self.trans['൮'] = '8'\n self.trans['୲'] = 'quarter'\n self.trans['ൾ'] = 'll'\n self.trans['ਇ'] = 'i'\n self.trans['ഉ'] = 'u'\n self.trans['ઌ'] = 'l'\n self.trans['ਗ'] = 'ga'\n self.trans['ങ'] = 'nga'\n self.trans['ଝ'] = 'jha'\n self.trans['જ'] = 'ja'\n self.trans['؟'] = '?'\n self.trans['ਧ'] = 'dha'\n self.trans['ഩ'] = 'nnna'\n self.trans['ଭ'] = 'bha'\n self.trans['બ'] = 'ba'\n self.trans['ഹ'] = 'ha'\n self.trans['ଽ'] = 'avagraha'\n self.trans['઼'] = 'nukta'\n self.trans['ੇ'] = 'ee'\n self.trans['୍'] = 'virama'\n self.trans['ૌ'] = 'au'\n self.trans['੧'] = '1'\n self.trans['൩'] = '3'\n self.trans['୭'] = '7'\n self.trans['૬'] = '6'\n self.trans['൹'] = 'mark'\n self.trans['ਖ਼'] = 'khha'\n self.trans['ਂ'] = 'bindi'\n self.trans['ഈ'] = 'ii'\n self.trans['ઍ'] = 'e'\n self.trans['ଌ'] = 'l'\n self.trans['ഘ'] = 'gha'\n self.trans['ઝ'] = 'jha'\n self.trans['ଡ଼'] = 'rra'\n self.trans['ਢ'] = 'ddha'\n self.trans['ന'] = 'na'\n self.trans['ભ'] = 'bha'\n self.trans['ବ'] = 'ba'\n self.trans['ਲ'] = 'la'\n self.trans['സ'] = 'sa'\n self.trans['ઽ'] = 'avagraha'\n self.trans['଼'] = 'nukta'\n self.trans['ੂ'] = 'uu'\n self.trans['ൈ'] = 'ai'\n self.trans['્'] = 'virama'\n self.trans['ୌ'] = 'au'\n self.trans['൨'] = '2'\n self.trans['૭'] = '7'\n self.trans['୬'] = '6'\n self.trans['ੲ'] = 'iri'\n self.trans['ഃ'] = 'visarga'\n self.trans['ં'] = 'anusvara'\n self.trans['ଇ'] = 'i'\n self.trans['ഓ'] = 'oo'\n self.trans['ଗ'] = 'ga'\n self.trans['ਝ'] = 'jha'\n self.trans['?'] = '?'\n self.trans['ണ'] = 'nna'\n self.trans['ઢ'] = 'ddha'\n self.trans['ଧ'] = 'dha'\n self.trans['ਭ'] = 'bha'\n self.trans['ള'] = 'lla'\n self.trans['લ'] = 'la'\n self.trans['ଷ'] = 'ssa'\n self.trans['ൃ'] = 'r'\n self.trans['ૂ'] = 'uu'\n self.trans['େ'] = 'e'\n self.trans['੍'] = 'virama'\n self.trans['ୗ'] = 'mark'\n self.trans['ൣ'] = 'll'\n self.trans['ૢ'] = 'l'\n self.trans['୧'] = '1'\n self.trans['੭'] = '7'\n self.trans['൳'] = '1/4'\n self.trans['୷'] = 'sixteenths'\n self.trans['ଆ'] = 'aa'\n self.trans['ઋ'] = 'r'\n self.trans['ഊ'] = 'uu'\n self.trans['ਐ'] = 'ai'\n self.trans['ଖ'] = 'kha'\n self.trans['છ'] = 'cha'\n self.trans['ച'] = 'ca'\n self.trans['ਠ'] = 'ttha'\n self.trans['ଦ'] = 'da'\n self.trans['ફ'] = 'pha'\n self.trans['പ'] = 'pa'\n self.trans['ਰ'] = 'ra'\n self.trans['ଶ'] = 'sha'\n self.trans['ഺ'] = 'ttta'\n self.trans['ੀ'] = 'ii'\n self.trans['ો'] = 'o'\n self.trans['ൊ'] = 'o'\n self.trans['ୖ'] = 'mark'\n self.trans['୦'] = '0'\n self.trans['૫'] = '5'\n self.trans['൪'] = '4'\n self.trans['ੰ'] = 'tippi'\n self.trans['୶'] = 'eighth'\n self.trans['ൺ'] = 'nn'\n self.trans['ଁ'] = 'candrabindu'\n self.trans['അ'] = 'a'\n self.trans['ઐ'] = 'ai'\n self.trans['ക'] = 'ka'\n self.trans['ਸ਼'] = 'sha'\n self.trans['ਛ'] = 'cha'\n self.trans['ଡ'] = 'dda'\n self.trans['ઠ'] = 'ttha'\n self.trans['ഥ'] = 'tha'\n self.trans['ਫ'] = 'pha'\n self.trans['ર'] = 'ra'\n self.trans['വ'] = 'va'\n self.trans['ୁ'] = 'u'\n self.trans['ી'] = 'ii'\n self.trans['ੋ'] = 'oo'\n self.trans['ૐ'] = 'om'\n self.trans['ୡ'] = 'll'\n self.trans['ૠ'] = 'rr'\n self.trans['੫'] = '5'\n self.trans['ୱ'] = 'wa'\n self.trans['૰'] = 'sign'\n self.trans['൵'] = 'quarters'\n self.trans['ਫ਼'] = 'fa'\n self.trans['ઁ'] = 'candrabindu'\n self.trans['ਆ'] = 'aa'\n self.trans['ઑ'] = 'o'\n self.trans['ଐ'] = 'ai'\n self.trans['ഔ'] = 'au'\n self.trans['ਖ'] = 'kha'\n self.trans['ડ'] = 'dda'\n self.trans['ଠ'] = 'ttha'\n self.trans['ത'] = 'ta'\n self.trans['ਦ'] = 'da'\n self.trans['ର'] = 'ra'\n self.trans['ഴ'] = 'llla'\n self.trans['ુ'] = 'u'\n self.trans['ୀ'] = 'ii'\n self.trans['ൄ'] = 'rr'\n self.trans['ૡ'] = 'll'\n self.trans['ୠ'] = 'rr'\n self.trans['੦'] = '0'\n self.trans['૱'] = 'sign'\n self.trans['୰'] = 'isshar'\n self.trans['൴'] = '1/2'\n self.trans['ਁ'] = 'bindi'\n self.trans['આ'] = 'aa'\n self.trans['ଋ'] = 'r'\n self.trans['ഏ'] = 'ee'\n self.trans['ખ'] = 'kha'\n self.trans['ଛ'] = 'cha'\n self.trans['ട'] = 'tta'\n self.trans['ਡ'] = 'dda'\n self.trans['દ'] = 'da'\n self.trans['ଫ'] = 'pha'\n self.trans['യ'] = 'ya'\n self.trans['શ'] = 'sha'\n self.trans['ി'] = 'i'\n self.trans['ੁ'] = 'u'\n self.trans['ୋ'] = 'o'\n self.trans['ੑ'] = 'udaat'\n self.trans['૦'] = '0'\n self.trans['୫'] = '5'\n self.trans['൯'] = '9'\n self.trans['ੱ'] = 'addak'\n self.trans['ൿ'] = 'k'\n self.trans['ആ'] = 'aa'\n self.trans['ଊ'] = 'uu'\n self.trans['એ'] = 'e'\n self.trans['ਔ'] = 'au'\n self.trans['ഖ'] = 'kha'\n self.trans['ଚ'] = 'ca'\n self.trans['ટ'] = 'tta'\n self.trans['ਤ'] = 'ta'\n self.trans['ദ'] = 'da'\n self.trans['ପ'] = 'pa'\n self.trans['ય'] = 'ya'\n self.trans['ശ'] = 'sha'\n self.trans['િ'] = 'i'\n self.trans['െ'] = 'e'\n self.trans['൦'] = '0'\n self.trans['୪'] = '4'\n self.trans['૯'] = '9'\n self.trans['ੴ'] = 'onkar'\n self.trans['ଅ'] = 'a'\n self.trans['ਏ'] = 'ee'\n self.trans['କ'] = 'ka'\n self.trans['ઔ'] = 'au'\n self.trans['ਟ'] = 'tta'\n self.trans['ഡ'] = 'dda'\n self.trans['ଥ'] = 'tha'\n self.trans['ત'] = 'ta'\n self.trans['ਯ'] = 'ya'\n self.trans['റ'] = 'rra'\n self.trans['ଵ'] = 'va'\n self.trans['ਿ'] = 'i'\n self.trans['ു'] = 'u'\n self.trans['ૄ'] = 'rr'\n self.trans['ൡ'] = 'll'\n self.trans['੯'] = '9'\n self.trans['൱'] = '100'\n self.trans['୵'] = 'sixteenth'\n self.trans['અ'] = 'a'\n self.trans['ਊ'] = 'uu'\n self.trans['ഐ'] = 'ai'\n self.trans['ક'] = 'ka'\n self.trans['ଔ'] = 'au'\n self.trans['ਚ'] = 'ca'\n self.trans['ഠ'] = 'ttha'\n self.trans['થ'] = 'tha'\n self.trans['ତ'] = 'ta'\n self.trans['ਪ'] = 'pa'\n self.trans['ര'] = 'ra'\n self.trans['વ'] = 'va'\n self.trans['ീ'] = 'ii'\n self.trans['ૅ'] = 'e'\n self.trans['ୄ'] = 'rr'\n self.trans['ൠ'] = 'rr'\n self.trans['ਜ਼'] = 'za'\n self.trans['੪'] = '4'\n self.trans['൰'] = '10'\n self.trans['୴'] = 'quarters'\n self.trans['ਅ'] = 'a'\n self.trans['ഋ'] = 'r'\n self.trans['ઊ'] = 'uu'\n self.trans['ଏ'] = 'e'\n self.trans['ਕ'] = 'ka'\n self.trans['ഛ'] = 'cha'\n self.trans['ચ'] = 'ca'\n self.trans['ଟ'] = 'tta'\n self.trans['ਥ'] = 'tha'\n self.trans['ഫ'] = 'pha'\n self.trans['પ'] = 'pa'\n self.trans['ଯ'] = 'ya'\n self.trans['ਵ'] = 'va'\n self.trans['ି'] = 'i'\n self.trans['ോ'] = 'oo'\n self.trans['ୟ'] = 'yya'\n self.trans['൫'] = '5'\n self.trans['૪'] = '4'\n self.trans['୯'] = '9'\n self.trans['ੵ'] = 'yakash'\n self.trans['ൻ'] = 'n'\n self.trans['ઃ'] = 'visarga'\n self.trans['ം'] = 'anusvara'\n self.trans['ਈ'] = 'ii'\n self.trans['ઓ'] = 'o'\n self.trans['ഒ'] = 'o'\n self.trans['ਘ'] = 'gha'\n self.trans['ଞ'] = 'nya'\n self.trans['ણ'] = 'nna'\n self.trans['ഢ'] = 'ddha'\n self.trans['ਲ਼'] = 'lla'\n self.trans['ਨ'] = 'na'\n self.trans['ମ'] = 'ma'\n self.trans['ળ'] = 'lla'\n self.trans['ല'] = 'la'\n self.trans['ਸ'] = 'sa'\n self.trans['¿'] = '?'\n self.trans['ା'] = 'aa'\n self.trans['ૃ'] = 'r'\n self.trans['ൂ'] = 'uu'\n self.trans['ੈ'] = 'ai'\n self.trans['ૣ'] = 'll'\n self.trans['ൢ'] = 'l'\n self.trans['੨'] = '2'\n self.trans['୮'] = '8'\n self.trans['൲'] = '1000'\n self.trans['ਃ'] = 'visarga'\n self.trans['ଉ'] = 'u'\n self.trans['ઈ'] = 'ii'\n self.trans['ਓ'] = 'oo'\n self.trans['ଙ'] = 'nga'\n self.trans['ઘ'] = 'gha'\n self.trans['ഝ'] = 'jha'\n self.trans['ਣ'] = 'nna'\n self.trans['ન'] = 'na'\n self.trans['ഭ'] = 'bha'\n self.trans['ଜ'] = 'ja'\n self.trans['ହ'] = 'ha'\n self.trans['સ'] = 'sa'\n self.trans['ഽ'] = 'avagraha'\n self.trans['ૈ'] = 'ai'\n self.trans['്'] = 'virama'\n self.trans['୩'] = '3'\n self.trans['૨'] = '2'\n self.trans['൭'] = '7'\n self.trans['ੳ'] = 'ura'\n self.trans['ൽ'] = 'l'\n self.trans['ઉ'] = 'u'\n self.trans['ଈ'] = 'ii'\n self.trans['ഌ'] = 'l'\n self.trans['ઙ'] = 'nga'\n self.trans['ଘ'] = 'gha'\n self.trans['ജ'] = 'ja'\n self.trans['ਞ'] = 'nya'\n self.trans['ନ'] = 'na'\n self.trans['ബ'] = 'ba'\n self.trans['ਮ'] = 'ma'\n self.trans['હ'] = 'ha'\n self.trans['ସ'] = 'sa'\n self.trans['ਾ'] = 'aa'\n self.trans['ૉ'] = 'o'\n self.trans['ୈ'] = 'ai'\n self.trans['ൌ'] = 'au'\n self.trans['૩'] = '3'\n self.trans['୨'] = '2'\n self.trans['൬'] = '6'\n self.trans['੮'] = '8'\n self.trans['ർ'] = 'rr'\n self.trans['ଃ'] = 'visarga'\n self.trans['ഇ'] = 'i'\n self.trans['ਉ'] = 'u'\n self.trans['ଓ'] = 'o'\n self.trans['ഗ'] = 'ga'\n self.trans['ਙ'] = 'nga'\n self.trans['ઞ'] = 'nya'\n self.trans['ଣ'] = 'nna'\n self.trans['ധ'] = 'dha'\n self.trans['મ'] = 'ma'\n self.trans['ଳ'] = 'lla'\n self.trans['ഷ'] = 'ssa'\n self.trans['ਹ'] = 'ha'\n self.trans['ਗ਼'] = 'ghha'\n self.trans['ા'] = 'aa'\n self.trans['ୃ'] = 'r'\n self.trans['േ'] = 'ee'\n self.trans['ൗ'] = 'mark'\n self.trans['ଢ଼'] = 'rha'\n self.trans['ୣ'] = 'll'\n self.trans['൧'] = '1'\n self.trans['੩'] = '3'\n self.trans['૮'] = '8'\n self.trans['୳'] = 'half'\n for char in self.trans:\n value = self.trans[char]\n if value == '?':\n continue\n while (value.encode(encoding, 'replace').decode(encoding) == '?'\n and value in self.trans):\n assert value != self.trans[value], \\\n '{!r} == self.trans[{!r}]!'.format(value, value)\n value = self.trans[value]\n self.trans[char] = value",
"def convert(language='c'):",
"def get_data_encoding():",
"def get_babel_pages():\n BABEL = \"http://www.omniglot.com/babel/\"\n babel = urllib2.urlopen(MULTILING_URLS['babel']).read()\n return [(unicode(lang.text), BABEL+lang.get('href')) for lang in \\\n bs(unicode(bs(babel).findAll('ol')[0])).findAll('a')]",
"def parse_baxter(reading):\n\n initial = ''\n medial = ''\n final = ''\n tone = ''\n\n # determine environments\n inienv = True\n medienv = False\n finenv = False\n tonenv = False\n\n inichars = \"pbmrtdnkgnsyhzl'x\"\n\n\n chars = list(reading)\n for char in chars:\n\n # switch environments\n if char in 'jw' and not finenv:\n inienv,medienv,finenv,tonenv = False,True,False,False\n elif char not in inichars or finenv:\n if char in 'XH':\n inienv,medienv,finenv,tonenv = False,False,False,True\n else:\n inienv,medienv,finenv,tonenv = False,False,True,False\n\n # fill in slots\n if inienv:\n initial += char\n\n if medienv:\n medial += char\n\n if finenv:\n final += char\n\n if tonenv:\n tone += char\n\n # post-parse tone\n if not tone and final[-1] in 'ptk':\n tone = 'R'\n elif not tone:\n tone = 'P'\n\n # post-parse medial\n if 'j' not in medial and 'y' in initial:\n medial += 'j'\n\n # post-parse labial\n if final[0] in 'u' and 'w' not in medial:\n medial = 'w' + medial\n\n return initial,medial,final,tone",
"def test_parse_language(self):\n book_data = 'zotero/test_data/Chapter Test 8-9-16.rdf'\n for entry in ZoteroIngest(book_data):\n if entry.get('type_controlled')[0].lower() == 'booksection':\n self.assertIn('language', entry)",
"def parse_chinese_morphemes(seq, context=False):\n\n # get the tokens\n if isinstance(seq, list):\n tokens = [s for s in seq]\n else:\n tokens = lingpy.ipa2tokens(seq, merge_vowels=False)\n\n # get the sound classes according to the art-model\n arts = [int(x) for x in lingpy.tokens2class(tokens, _art, cldf=True)]\n\n # get the pro-string\n prostring = lingpy.prosodic_string(arts)\n\n # parse the zip of tokens and arts\n I,M,N,C,T = '','','','',''\n\n ini = False\n med = False\n nuc = False\n cod = False\n ton = False\n\n triples = [('?','?','?')]+list(zip(\n tokens,arts,prostring))+[('?','?','?')]\n\n for i in range(1,len(triples)-1): #enumerate(triples[1:-1]): #zip(tokens,arts,prostring):\n\n t,c,p = triples[i]\n _t,_c,_p = triples[i-1]\n t_,c_,p_ = triples[i+1]\n\n # check for initial entry first\n if p == 'A' and _t == '?':\n\n # now, if we have a j-sound and a vowel follows, we go directly to\n # medial environment\n if t[0] in 'jɥw':\n med = True\n ini,nuc,cod,ton = False,False,False,False\n else:\n ini = True\n med,nuc,doc,ton = False,False,False,False\n\n # check for initial vowel\n elif p == 'X' and _t == '?':\n if t[0] in 'iuy' and c_ == '7':\n med = True\n ini,nuc,cod,ton = False,False,False,False\n else:\n nuc = True\n ini,med,cod,ton = False,False,False,False\n\n # check for medial after initial\n elif p == 'C':\n med = True\n ini,nuc,cod,ton = False,False,False,False\n\n # check for vowel medial\n elif p == 'X' and p_ == 'Y':\n\n # if we have a medial vowel, we classify it as medial\n if t in 'iyu':\n med = True\n ini,nuc,cod,ton = False,False,False,False\n else:\n nuc = True\n ini,med,cod,ton = False,False,False,False\n\n # check for vowel without medial\n elif p == 'X' or p == 'Y':\n if p_ in 'LTY' or p_ == '?':\n nuc = True\n ini,med,cod,ton = False,False,False,False\n elif p == 'Y':\n nuc = True\n ini,med,cod,ton = 4 * [False]\n else:\n cod = True\n ini,med,nuc,ton = 4 * [False]\n\n # check for consonant\n elif p == 'L':\n cod = True\n ini,med,nuc,ton = 4 * [False]\n\n # check for tone\n elif p == 'T':\n ton = True\n ini,med,nuc,cod = 4 * [False]\n\n if ini:\n I += t\n elif med:\n M += t\n elif nuc:\n N += t\n elif cod:\n C += t\n else:\n T += t\n\n # bad conversion for output, but makes what it is supposed to do\n out = [I,M,N,C,T]\n tf = lambda x: x if x else '-'\n out = [tf(x) for x in out]\n\n # transform tones to normal letters\n tones = dict(zip('¹²³⁴⁵⁶⁷⁸⁹⁰₁₂₃₄₅₆₇₈₉₀','1234567890123456789'))\n\n # now, if context is wanted, we'll yield that\n ic = '1' if [x for x in I if x in 'bdgmnŋȵɳɴ'] else '0'\n mc = '1' if [m for m in M+N if m in 'ijyɥ'] else '0'\n cc = '1' if C in 'ptkʔ' else '0'\n tc = ''.join([tones.get(x, x) for x in T])\n\n IC = '/'.join(['I',ic,mc,cc,tc]) if I else ''\n MC = '/'.join(['M',ic,mc,cc,tc]) if M else ''\n NC = '/'.join(['N',ic,mc,cc,tc]) if N else ''\n CC = '/'.join(['C',ic,mc,cc,tc]) if C else ''\n TC = '/'.join(['T',ic,mc,cc,tc]) if T else ''\n\n if context:\n return out, [x for x in [IC,MC,NC,CC,TC] if x]",
"def _parse_bass(self, string):\n if not string:\n return None\n return self._NE.create_note(string)",
"def decode(self, frame):\n try: pos = frame.index(\"LAME\")\n except: return\n\n # check the info tag crc. if it's not valid, no point parsing much more.\n lamecrc = bin2dec(bytes2bin(frame[190:192]))\n if self._crc16(frame[:190]) != lamecrc:\n #TRACE_MSG('Lame tag CRC check failed')\n # read version string from the first 30 bytes, up to any\n # non-ascii chars, then strip padding chars.\n #\n # XXX (How many bytes is proper to read? madplay reads 20, but I've\n # got files with longer version strings)\n lamever = []\n for c in frame[pos:pos + 30]:\n if ord(c) not in range(32, 127):\n break\n lamever.append(c)\n self['encoder_version'] = ''.join(lamever).rstrip('\\x55')\n TRACE_MSG('Lame Encoder Version: %s' % self['encoder_version'])\n return\n\n TRACE_MSG('Lame info tag found at position %d' % pos)\n\n # Encoder short VersionString, 9 bytes\n self['encoder_version'] = lamever = frame[pos:pos + 9].rstrip()\n TRACE_MSG('Lame Encoder Version: %s' % self['encoder_version'])\n pos += 9\n\n # Info Tag revision + VBR method, 1 byte\n self['tag_revision'] = bin2dec(bytes2bin(frame[pos:pos + 1])[:5])\n vbr_method = bin2dec(bytes2bin(frame[pos:pos + 1])[5:])\n self['vbr_method'] = self.VBR_METHODS.get(vbr_method, 'Unknown')\n TRACE_MSG('Lame info tag version: %s' % self['tag_revision'])\n TRACE_MSG('Lame VBR method: %s' % self['vbr_method'])\n pos += 1\n\n # Lowpass filter value, 1 byte\n self['lowpass_filter'] = bin2dec(bytes2bin(frame[pos:pos + 1])) * 100\n TRACE_MSG('Lame Lowpass filter value: %s Hz' % self['lowpass_filter'])\n pos += 1\n\n # Replay Gain, 8 bytes total\n replaygain = {}\n\n # Peak signal amplitude, 4 bytes\n peak = bin2dec(bytes2bin(frame[pos:pos + 4])) << 5\n if peak > 0:\n peak /= float(1 << 28)\n db = 20 * log10(peak)\n replaygain['peak_amplitude'] = peak\n TRACE_MSG('Lame Peak signal amplitude: %.8f (%+.1f dB)' % (peak, db))\n pos += 4\n\n # Radio and Audiofile Gain, AKA track and album, 2 bytes each\n for gaintype in ['radio', 'audiofile']:\n name = bin2dec(bytes2bin(frame[pos:pos + 2])[:3])\n orig = bin2dec(bytes2bin(frame[pos:pos + 2])[3:6])\n sign = bin2dec(bytes2bin(frame[pos:pos + 2])[6:7])\n adj = bin2dec(bytes2bin(frame[pos:pos + 2])[7:]) / 10.0\n if sign:\n adj *= -1\n # XXX Lame 3.95.1 and above use 89dB as a reference instead of 83dB\n # as defined by the Replay Gain spec. Should this be compensated for?\n #if lamever[:4] == 'LAME' and lamevercmp(lamever[4:], '3.95') > 0:\n # adj -= 6\n if orig:\n name = self.REPLAYGAIN_NAME.get(name, 'Unknown')\n orig = self.REPLAYGAIN_ORIGINATOR.get(orig, 'Unknown')\n replaygain[gaintype] = {'name': name, 'adjustment': adj,\n 'originator': orig}\n TRACE_MSG('Lame %s Replay Gain: %s dB (%s)' % (name, adj, orig))\n pos += 2\n if replaygain:\n self['replaygain'] = replaygain\n\n # Encoding flags + ATH Type, 1 byte\n encflags = bin2dec(bytes2bin(frame[pos:pos + 1])[:4])\n self['encoding_flags'], self['nogap'] = self._parse_encflags(encflags)\n self['ath_type'] = bin2dec(bytes2bin(frame[pos:pos + 1])[4:])\n TRACE_MSG('Lame Encoding flags: %s' % ' '.join(self['encoding_flags']))\n if self['nogap']:\n TRACE_MSG('Lame No gap: %s' % ' and '.join(self['nogap']))\n TRACE_MSG('Lame ATH type: %s' % self['ath_type'])\n pos += 1\n\n # if ABR {specified bitrate} else {minimal bitrate}, 1 byte\n btype = 'Constant'\n if 'Average' in self['vbr_method']:\n btype = 'Target'\n elif 'Variable' in self['vbr_method']:\n btype = 'Minimum'\n # bitrate may be modified below after preset is read\n self['bitrate'] = (bin2dec(bytes2bin(frame[pos:pos + 1])), btype)\n TRACE_MSG('Lame Bitrate (%s): %s' % (btype, self['bitrate'][0]))\n pos += 1\n\n # Encoder delays, 3 bytes\n self['encoder_delay'] = bin2dec(bytes2bin(frame[pos:pos + 3])[:12])\n self['encoder_padding'] = bin2dec(bytes2bin(frame[pos:pos + 3])[12:])\n TRACE_MSG('Lame Encoder delay: %s samples' % self['encoder_delay'])\n TRACE_MSG('Lame Encoder padding: %s samples' % self['encoder_padding'])\n pos += 3\n\n # Misc, 1 byte\n sample_freq = bin2dec(bytes2bin(frame[pos:pos + 1])[:2])\n unwise_settings = bin2dec(bytes2bin(frame[pos:pos + 1])[2:3])\n stereo_mode = bin2dec(bytes2bin(frame[pos:pos + 1])[3:6])\n self['noise_shaping'] = bin2dec(bytes2bin(frame[pos:pos + 1])[6:])\n self['sample_freq'] = self.SAMPLE_FREQUENCIES.get(sample_freq, 'Unknown')\n self['unwise_settings'] = bool(unwise_settings)\n self['stereo_mode'] = self.STEREO_MODES.get(stereo_mode, 'Unknown')\n TRACE_MSG('Lame Source Sample Frequency: %s' % self['sample_freq'])\n TRACE_MSG('Lame Unwise settings used: %s' % self['unwise_settings'])\n TRACE_MSG('Lame Stereo mode: %s' % self['stereo_mode'])\n TRACE_MSG('Lame Noise Shaping: %s' % self['noise_shaping'])\n pos += 1\n\n # MP3 Gain, 1 byte\n sign = bytes2bin(frame[pos:pos + 1])[0]\n gain = bin2dec(bytes2bin(frame[pos:pos + 1])[1:])\n if sign:\n gain *= -1\n self['mp3_gain'] = gain\n db = gain * 1.5\n TRACE_MSG('Lame MP3 Gain: %s (%+.1f dB)' % (self['mp3_gain'], db))\n pos += 1\n\n # Preset and surround info, 2 bytes\n surround = bin2dec(bytes2bin(frame[pos:pos + 2])[2:5])\n preset = bin2dec(bytes2bin(frame[pos:pos + 2])[5:])\n if preset in range(8, 321):\n if self['bitrate'] >= 255:\n # the value from preset is better in this case\n self['bitrate'] = (preset, btype)\n TRACE_MSG('Lame Bitrate (%s): %s' % (btype, self['bitrate'][0]))\n if 'Average' in self['vbr_method']:\n preset = 'ABR %s' % preset\n else:\n preset = 'CBR %s' % preset\n else:\n preset = self.PRESETS.get(preset, preset)\n self['surround_info'] = self.SURROUND_INFO.get(surround, surround)\n self['preset'] = preset\n TRACE_MSG('Lame Surround Info: %s' % self['surround_info'])\n TRACE_MSG('Lame Preset: %s' % self['preset'])\n pos += 2\n\n # MusicLength, 4 bytes\n self['music_length'] = bin2dec(bytes2bin(frame[pos:pos + 4]))\n TRACE_MSG('Lame Music Length: %s bytes' % self['music_length'])\n pos += 4\n\n # MusicCRC, 2 bytes\n self['music_crc'] = bin2dec(bytes2bin(frame[pos:pos + 2]))\n TRACE_MSG('Lame Music CRC: %04X' % self['music_crc'])\n pos += 2\n\n # CRC-16 of Info Tag, 2 bytes\n self['infotag_crc'] = lamecrc # we read this earlier\n TRACE_MSG('Lame Info Tag CRC: %04X' % self['infotag_crc'])\n pos += 2",
"def translitArabic(text):\r\n buckFile = './BuckwalterSimplified.txt'\r\n buckFile = open(buckFile, 'r', encoding='utf-8').readlines()\r\n for translitPair in buckFile:\r\n translit = re.search('(.)\\t(.)', translitPair)\r\n arLetter = translit.group(1)\r\n latEquiv = translit.group(2)\r\n text = re.sub(arLetter, latEquiv, text)\r\n return(text)",
"def convertToYNAB(row):\n kartennummer = row[3]\n buchungstag = row[1]\n valutadatum = row[0]\n verwendungszweck = row[2]\n betrag = 0\n if row[5][-1] == '-':\n betrag = -float(row[5][0:-1].replace(\".\", \"\").replace(',', '.'))\n else:\n betrag = float(row[5][0:-1].replace(\".\", \"\").replace(',', '.'))\n\n ynab_date = valutadatum.replace('.', '/')\n #ynab_payee = beguenstigter_zahlungspflichtiger\n ynab_payee = str(verwendungszweck)\n ynab_category = categorizeRow(buchungstag, valutadatum, verwendungszweck, kartennummer, betrag)\n ynab_memo = \"\"\n ynab_outflow = \"\"\n ynab_inflow = \"\"\n if betrag < 0:\n ynab_outflow = -betrag\n else:\n ynab_inflow = betrag\n\n return [ynab_date, ynab_payee, ynab_category, ynab_memo, ynab_outflow, ynab_inflow, betrag]",
"def parse(self):\n nac = [None, [], None] # name, address, country\n\n self.translate_to_los()\n\n if self.check_simple_org_format():\n org_name = self.parse_arin_simple_org()\n nac[ORGNAME] = org_name\n else:\n ref_ser = self.find_referral_server()\n if ref_ser:\n server_name, port_number = ref_ser\n # raw_whois = self.receive_raw_whois(ip_address, server_name, port_number)\n whois_parser = self._manager.create_parser(self._ip_address, server_name, port_number)\n whois_parser.receive_raw_whois()\n nac = whois_parser.parse()\n else:\n self.parse_arin_org(nac)\n return nac",
"def test_po_parser_pt_BR_with_warning_messages(self):\r\n handler = POHandler('%s/pt_BR.po' % os.path.split(__file__)[0])\r\n handler.set_language(self.language_ar)\r\n handler.parse_file()\r\n self.assertTrue('nplural' in handler.warning_messages.keys())",
"def _extract_currency(description):\n try:\n pattern = '<br>Currency: '\n start_idx = description.index(pattern)+len(pattern)\n return description[start_idx : start_idx+3] #The currency string has always langht 3\n except:\n return \"unkown\"",
"def __init__(self, data, charset=None):\n self._validate_charset(data, charset)\n\n if charset in ('A', 'B'):\n charset *= len(data)\n elif charset in ('C',):\n charset *= (len(data) // 2)\n if len(data) % 2 == 1:\n # If there are an odd number of characters for charset C, encode the last character with charset B.\n charset += 'B'\n\n self.data = data\n self.symbol_values = self._encode(data, charset)",
"def test_qamats_gadol_accent():\n word = r\"נָ֤ע\" # na (Genesis 4:14)\n parts = [\"nun\", \"qamats-gadol\", \"ayin\"]\n assert parts == Parser().parse(word).flat()",
"def RuCoref2CoNLL(path, out_path, language='russian'):\n data = {\"doc_id\": [],\n \"part_id\": [],\n \"word_number\": [],\n \"word\": [],\n \"part_of_speech\": [],\n \"parse_bit\": [],\n \"lemma\": [],\n \"sense\": [],\n \"speaker\": [],\n \"entiti\": [],\n \"predict\": [],\n \"coref\": []}\n\n part_id = '0'\n speaker = 'spk1'\n sense = '-'\n entiti = '-'\n predict = '-'\n\n tokens_ext = \"txt\"\n groups_ext = \"txt\"\n tokens_fname = \"Tokens\"\n groups_fname = \"Groups\"\n\n tokens_path = os.path.join(path, \".\".join([tokens_fname, tokens_ext]))\n groups_path = os.path.join(path, \".\".join([groups_fname, groups_ext]))\n print('Convert rucoref corpus into conll format ...')\n start = time.time()\n coref_dict = {}\n with open(groups_path, \"r\") as groups_file:\n for line in groups_file:\n doc_id, variant, group_id, chain_id, link, shift, lens, content, tk_shifts, attributes, head, hd_shifts = line[\n :-1].split('\\t')\n\n if doc_id not in coref_dict:\n coref_dict[doc_id] = {'unos': defaultdict(list), 'starts': defaultdict(list), 'ends': defaultdict(list)}\n\n if len(tk_shifts.split(',')) == 1:\n coref_dict[doc_id]['unos'][shift].append(chain_id)\n else:\n tk = tk_shifts.split(',')\n coref_dict[doc_id]['starts'][tk[0]].append(chain_id)\n coref_dict[doc_id]['ends'][tk[-1]].append(chain_id)\n groups_file.close()\n\n # Write conll structure\n with open(tokens_path, \"r\") as tokens_file:\n k = 0\n doc_name = '0'\n for line in tokens_file:\n doc_id, shift, length, token, lemma, gram = line[:-1].split('\\t')\n \n if doc_id == 'doc_id':\n continue\n \n if doc_id != doc_name:\n doc_name = doc_id\n w = watcher()\n k = 0\n \n data['word'].append(token) \n data['doc_id'].append(doc_id)\n data['part_id'].append(part_id)\n data['lemma'].append(lemma)\n data['sense'].append(sense)\n data['speaker'].append(speaker)\n data['entiti'].append(entiti)\n data['predict'].append(predict)\n data['parse_bit'].append('-')\n\n opens = coref_dict[doc_id]['starts'][shift] if shift in coref_dict[doc_id]['starts'] else []\n ends = coref_dict[doc_id]['ends'][shift] if shift in coref_dict[doc_id]['ends'] else []\n unos = coref_dict[doc_id]['unos'][shift] if shift in coref_dict[doc_id]['unos'] else []\n s = []\n s += ['({})'.format(el) for el in unos]\n s += ['({}'.format(el) for el in opens]\n s += ['{})'.format(el) for el in ends]\n s = '|'.join(s)\n if len(s) == 0:\n s = '-'\n data['coref'].append(s)\n else:\n data['coref'].append(s)\n \n closed = w.mentions_closed(s)\n if gram == 'SENT' and not closed:\n data['part_of_speech'].append('.')\n data['word_number'].append(k)\n k += 1 \n \n elif gram == 'SENT' and closed:\n data['part_of_speech'].append(gram)\n data['word_number'].append(k)\n k = 0\n else:\n data['part_of_speech'].append(gram)\n data['word_number'].append(k)\n k += 1\n \n tokens_file.close()\n \n \n # Write conll structure in file\n conll = os.path.join(out_path, \".\".join([language, 'v4_conll']))\n with open(conll, 'w') as CoNLL:\n for i in tqdm(range(len(data['doc_id']))):\n if i == 0:\n CoNLL.write('#begin document ({}); part {}\\n'.format(data['doc_id'][i], data[\"part_id\"][i]))\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n elif i == len(data['doc_id']) - 1:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n CoNLL.write('\\n')\n CoNLL.write('#end document\\n')\n else:\n if data['doc_id'][i] == data['doc_id'][i + 1]:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n if data[\"word_number\"][i + 1] == 0:\n CoNLL.write('\\n')\n else:\n CoNLL.write(u'{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(data['doc_id'][i],\n data[\"part_id\"][i],\n data[\"word_number\"][i],\n data[\"word\"][i],\n data[\"part_of_speech\"][i],\n data[\"parse_bit\"][i],\n data[\"lemma\"][i],\n data[\"sense\"][i],\n data[\"speaker\"][i],\n data[\"entiti\"][i],\n data[\"predict\"][i],\n data[\"coref\"][i]))\n CoNLL.write('\\n')\n CoNLL.write('#end document\\n')\n CoNLL.write('#begin document ({}); part {}\\n'.format(data['doc_id'][i + 1], data[\"part_id\"][i + 1]))\n\n print('End of convertion. Time - {}'.format(time.time() - start))\n return None",
"def parse_block(block: str) -> str:\n try:\n match = pattern.search(block)\n charset, encoding, raw_text = match.groups()\n except AttributeError:\n # match is None so .groups fails\n raise ValueError(f\"Could not recognise format of: {block}\") from None\n\n if str.lower(encoding) == 'b':\n text = b64decode(raw_text)\n elif str.lower(encoding) == 'q':\n text = quopri.decodestring(raw_text)\n else:\n raise ValueError(f\"Unknown encoding '{encoding}'\") from None\n exit(1)\n\n decoded = text.decode(charset)\n return decoded",
"def getLanguageCodes(self): #$NON-NLS-1$\r"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse URB_INTERRUPT packet with the EuroBraille device. Adapted from the 'handleSystemInformation' function.
|
def parse_interrupt(data):
# Extract the data
seqnum = struct.unpack("<B", data[0])[0]
STX_index = 1
if data[1] == PAD:
STX_index = 2
assert data[STX_index] == STX
data_length = struct.unpack(">H",data[STX_index+1:STX_index+3])[0]
assert data[data_length+STX_index+1] == ETX
packet_data = data[STX_index+3:data_length+STX_index+1]
assert len(packet_data) == data_length - 2
# Maybe force-write
if packet_data[0] == 'R' and packet_data[1] == 'P':
print "seq {} - force-write".format(seqnum)
return
# Make sure it is system information
assert packet_data[0] == 'S'
subtype = packet_data[1]
# Print its meaning
if subtype == 'H':
print "seq {} - short name: {}".format(seqnum, packet_data[2:])
elif subtype == 'I':
print "seq {} - End".format(seqnum)
elif subtype == 'G':
cols = struct.unpack("<B", packet_data[2])[0]
print "seq {} - text columns = {}".format(seqnum, cols)
elif subtype == 'S':
print "seq {} - string".format(seqnum)
elif subtype == 'T':
identifier = struct.unpack("<B", packet_data[2])[0]
print "seq {} - identifier {}".format(seqnum, identifier)
if 0x0c == identifier:
print "model info:"
print """ { .modelIdentifier = EU_ESYS_80,
.modelName = "Esys 80",
.cellCount = 80,
.hasBrailleKeyboard = 1,
.isEsys = 1,
.keyTable = &KEY_TABLE_DEFINITION(esys_large)
},"""
else:
print "seq {} - unknown subtype {}".format(seqnum, subtype)
|
[
"def handle_ip(self, byte):\n self.log.debug('IAC IP: Interrupt Process')",
"def handle_abort(self, byte):\n self.log.debug('IAC ABORT: Abort')",
"def read_interrupt():\n#\n#--- read data\n#\n file = house_keeping + '/all_data'\n f = open(file, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n data.reverse()\n\n period = []\n interrupt_start = []\n interrupt_stop = []\n#\n#--- convert the date in yyyy:mm:hh:ss to seconds from 1998.1.1\n#\n for ent in data:\n atemp = re.split('\\s+', ent)\n btemp = re.split(':', atemp[1])\n start = tcnv.convertDateToTime2(int(btemp[0]), int(btemp[1]), int(btemp[2]), int(btemp[3]), int(btemp[4]), 0)\n btemp = re.split(':', atemp[2])\n stop = tcnv.convertDateToTime2(int(btemp[0]), int(btemp[1]), int(btemp[2]), int(btemp[3]), int(btemp[4]), 0)\n#\n#--- save the list again\n#\n period.append(atemp[0])\n interrupt_start.append(float(start))\n interrupt_stop.append(float(stop))\n\n return [period, interrupt_start, interrupt_stop]",
"def RPC__Engine__interrupt(self, uuid, cellid=None):\n self.call('interrupt', uuid, Args(cellid=cellid))",
"def extract_interruption(text, electoral_term, session, identity, text_position, frame):\n\n # Creates the Pattern modularly\n interruption_Pattern = (\n start_contributions_opening_bracket_Pattern.format(\n \"\" # Nothing to extend, so .format(\"\")\n )\n + base_interruption_Pattern\n + start_contributions_closing_bracket_Pattern.format(\n \"\" # Nothing to extend, so .format(\"\")\n )\n )\n\n # Find matches\n matches = list(regex.finditer(interruption_Pattern, text))\n\n # Iterate over matches\n for match in matches:\n # replace everything except the delimeters\n text = text.replace(match.group(\"delete\"), \" \")\n # Add entry to the frame\n frame = add_entry(\n frame,\n identity,\n \"Unterbrechung\",\n \"\",\n \"\",\n \"\",\n match.group(\"delete\"),\n text_position,\n )\n\n return frame, text",
"def _get_interrupts(tcl_name):\n result = _InterruptMap(tcl_name)\n return result.intc_parent, result.intc_pins",
"def handle_INT(self):\r\n self.lineBuffer = []\r\n self.lineBufferIndex = 0\r\n\r\n self.terminal.nextLine()\r\n self.terminal.write(\"KeyboardInterrupt\")\r\n self.terminal.nextLine()",
"def dome_interrupt(self):\n j = '\\n'\n self.msg2ars(msg=j, msgname='INTERRUPT')\n return",
"def decode(self, pdu):\n if _debug: BVLCI._debug(\"decode %s\", str(pdu))\n\n # copy the basics\n PCI.update(self, pdu)\n\n self.bvlciType = pdu.get()\n if self.bvlciType != 0x81:\n raise DecodingError(\"invalid BVLCI type\")\n\n self.bvlciFunction = pdu.get()\n self.bvlciLength = pdu.get_short()\n\n if (self.bvlciLength != len(pdu.pduData) + 4):\n raise DecodingError(\"invalid BVLCI length\")",
"def get_interrupts(start_time,end_time):\n attributes = {'start':start_time,'stop':end_time}\n req = requests.get(_url('/irqsummary'),params=attributes,\n headers={'content-type':'application/json','accept':'application/json'})\n #print(req.text)\n if 'irq_data' in req.json():\n irqdata = req.json()['irq_data']\n items = sorted(irqdata.items())\n print('{0:5} {1:>8} {2:>8}'.format('IRQ','CPU0','CPU1'))\n print('{0:5} {1:>8} {2:>8}'.format('-----','--------','--------'))\n for k,v in sorted(items):\n #print(k,v[0],v[1])\n print('{0:5} {1:8} {2:8}'.format(k,v[0],v[1]))\n #print(\"%s %s\" % [k,items[k]])\n if 'message' in req.json():\n print(req.json()['message'])",
"def _fetch_pending_interrupt(self):\n with self.interrupt_queue_mutex:\n if not self.interrupt_queue:\n return None\n\n # In disable-interrupts state we can process only non-maskable interrupts\n # (faults).\n if self.cr[self.CREG_INT_CONTROL] & 1 == 0:\n # Maskable interrupts disabled. Find a non-maskable one.\n for i, interrupt in enumerate(self.interrupt_queue):\n if interrupt not in self.MASKABLE_INTS:\n return self.interrupt_queue.pop(i)\n\n # No non-maskable interrupts found.\n return None\n\n # Return the first interrupt available.\n return self.interrupt_queue.pop()",
"def issue_frucontrol_diagnostic_interrupt(self, fruid=0):\n fruid = int(fruid)\n self._ipmi.fru_control_diagnostic_interrupt(fruid)",
"def __signal_handler(signal, frame):\n global INTERRUPTED\n INTERRUPTED = True",
"def __process_buffer__(self):\n index = 0\n buff = self.buffer[self.FIXED_DATA_LENGTH:]\n while index < len(buff):\n if not len(buff[index:]) >= self.TLV_ID_LENGTH + self.TLV_SIZE_LENGTH:\n raise InvalidP2PInformationElement(\"TLV invalid data.\")\n tlv_id = struct.unpack(\"B\", buff[index:index+self.TLV_ID_LENGTH])[0]\n index += self.TLV_ID_LENGTH\n tlv_size = struct.unpack(\"H\", buff[index:index + self.TLV_SIZE_LENGTH])[0]\n index += self.TLV_SIZE_LENGTH\n tlv_name = P2PElements.get_element_key(tlv_id)\n if tlv_name:\n self.__elements__[tlv_name] = buff[index:index + tlv_size]\n index += tlv_size",
"def OnKeyboardInterrupt(handler):\n\n signal.signal(signal.SIGINT, handler)\n yield",
"def INT(self, address):\n interruptNum = self.reg[address]\n self.pc = interruptNum - 2 # cause operands\n\n #self.reg[self.IS] = \"\"\n #pass",
"def irDecode(self, pinNum):\r\n if self.mcuType == 0:\r\n self.mcuserial.write('z' + chr(pinNum) + chr(0))\r\n pl = self.mcuserial.read(2)\r\n pl = int('0x' + pl, 0)\r\n return int(pl)\r\n else:\r\n sys.stderr.write('Your current pyMCU board does not support this feature.\\n')",
"def siginterrupt(space, signum, flag):\n check_signum_in_range(space, signum)\n if rffi.cast(lltype.Signed, c_siginterrupt(signum, flag)) < 0:\n errno = rposix.get_saved_errno()\n raise OperationError(space.w_RuntimeError, space.newint(errno))",
"def parse_enumeration(enumeration_bytes):\n # If subunit v2 is available, use it.\n if bytestream_to_streamresult is not None:\n return _v2(enumeration_bytes)\n else:\n return _v1(enumeration_bytes)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Ce je konec igre, vrne igralca, ki je zmagal, ce je remi, vrne 'remi', ce pa igre se ni konec, vrne False.
|
def konec_igre(plosca):
# preveri vrstice
vrstice_konec = preveri_vrstice(plosca)
if vrstice_konec: return vrstice_konec
# preveri stolpce
stolpci_konec = preveri_vrstice(list(zip(*plosca))) # preveri vrstice transponirane plosce
if stolpci_konec: return stolpci_konec
# preveri diagonali
diagonala_konec = preveri_diagonali(plosca)
if diagonala_konec: return diagonala_konec
if plosca_je_polna(plosca): return 'remi'
return False
|
[
"def __zavrsi (zadnji):\n\n self.__zavrsena = True\n\n if zadnji is not None:\n # \"Pocisti\" stol.\n while self.__stol:\n karta = self.__stol.pop()\n self.__igraci[zadnji]['skupljeno'] |= {karta}\n\n # Pronadi igraca sa strogo najvise skupljenih karata ako postoji.\n I = [0]\n self.__igraci[0]['max'] = (False, len(self.__igraci[0]['skupljeno']))\n for j in range(1, len(self.__igraci)):\n self.__igraci[j]['max'] = (False, len(self.__igraci[j]['skupljeno']))\n if len(self.__igraci[j]['skupljeno']) > len(self.__igraci[I[0]]['skupljeno']):\n I = [j]\n elif len(self.__igraci[j]['skupljeno']) == len(self.__igraci[I[0]]['skupljeno']):\n I.append(j)\n\n if len(I) == 1:\n # Evaluiraj strogo najveci broja skupljenih karata.\n self.__igraci[I[0]]['max'] = (True, len(self.__igraci[I[0]]['skupljeno']))",
"def __legalniPotez (i, karta, skupljeno, razlog = False):\n\n greske = list()\n\n # Provjeri je li karta u ruci.\n if not karta in self.__igraci[i]['ruka']:\n greske.append(karta)\n\n # Provjeri je li skupljeno na stolu.\n if not skupljeno <= self.__stol:\n greske.append(skupljeno)\n\n # Ako se skuplja sa stola, provjeri sume.\n if skupljeno:\n M = Tablic.moguciPotezi(skupljeno)\n if not karta.znak in M:\n greske.append(karta.znak)\n else:\n if not frozenset(skupljeno) in unijeDisjunktnih(M[karta.znak]):\n greske.append(False)\n\n # Ako treba, vrati ilegalnost poteza i greske.\n if razlog and greske:\n return (False, tuple(greske))\n\n # Vrati legalnost poteza.\n return not greske",
"def idudeddvivacanampragrhyam(self):\n self.Pragrhya = False\n # PMS: 1.1.11. IdUdeddvivacanam pragfhyam\n if self.External and self.Pada1 in pragrhya_set:\n self.Pragrhya = True",
"def crtaj_vrata (X1,Y1,Z1,koja_vrata=195 , rel_smjer = 0):\n #gdje sam\n radnaPozicija = mc.player.getPos()\t\t\n #kamo gledam\n smjerRada = mc.player.getDirection ()\t\t\t#uzmem kamo gledam\n\n #smjer gledanja radi preglednosti spremimo u \"vektor\"\"\n Vx=0\t\t\t\t\t\t\t\t\t\t\t\t#pocetne vrijednosti su nule\n Vz=0\n if abs (smjerRada.x) > abs (smjerRada.z): \t\t#nadje se dominanti smjer i spremi u vektor\n Vx=round(smjerRada.x)\n else:\n Vz=round(smjerRada.z)\n\n # rel_smjer == 0 naprijed 1 lijevo 2 desno 3 iza \n \n if Vx == 1 :\n pass \n if Vx == -1 : \n rel_smjer += 2\n if rel_smjer > 3 :\n rel_smjer -= 4\n \n \n if Vz == -1 : \n rel_smjer += 1\n if rel_smjer > 3 :\n rel_smjer -= 4 \n if Vz == 1 : \n rel_smjer += 3\n if rel_smjer > 3 :\n rel_smjer -= 4 \n \n if Vz != 0 :\n if rel_smjer == 1 :\n buffer = 3\n if rel_smjer == 3 :\n buffer = 1\n if ( rel_smjer == 1 ) or ( rel_smjer == 3 ) :\n rel_smjer = buffer\n\n \n\n \n #crtanje\n \n \n \n if abs ( Vx ) != abs ( Vz ) :\t\t# ne pod 45\n\n gdjeX1=radnaPozicija.x + Vx*X1 + Vz*Z1 # modificiraj pocetnu koordinatu\n gdjeY1=radnaPozicija.y + Y1\n gdjeZ1=radnaPozicija.z + Vx*Z1 + Vz*X1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 0 + rel_smjer ) # doljnji dio vrata\n gdjeY1=radnaPozicija.y + 1\n mc.setBlock ( gdjeX1 , gdjeY1 , gdjeZ1 , koja_vrata , 8 + rel_smjer ) # gornji dio vrata\n return 1",
"def es_mayuscula(self):\n\t\tif not self.__cadena.isalpha():\n\t\t\treturn False\n\t\treturn self.__cadena.upper() == self.__cadena",
"def naschavyaprasan(self):\n if (self.Linary[self.Index - 1] == sktn and (set_memberP(self.Linary[self.Index + 1], Chav)) and not (self.Pada1 == \"praSAn\")):\n self.Linary[self.Index - 1] = ru\n # PMS: 8.3.4. anunAsikAt paro \"nusvAraH\n self.insertary(sktanusvara, self.Index - 1)\n self.Index = self.Index + 1",
"def inicijalniBrojKarata_ruka (cls):\n\n return 6",
"def __pokreni ():\n\n self.__pokrenuta = True\n\n for i in range(Tablic.inicijalniBrojKarata_stol()):\n self.__stol |= {self.__spil.get()}",
"def kupvohXkXpau(self):\n # PMS: by 8.3.15, kharavasAnayorvisarjanIyaH, visarga occurs\n # before avasAna too. but Xk && Xp don't.\n if (self.Linary[self.Index - 1] == sktvisarga) and (set_memberP(self.Linary[self.Index + 1], Khar)):\n # PMS: Hence, khari is understood here too\n Apavada = False\n if (self.Index + 2 < self.linmax):\n if set_memberP(self.Linary[self.Index + 2], Schar):\n Apavada = True # PMS: 8.3.35. Sarpare visarjanIyaH.\n if not (Apavada):\n if set_memberP(self.Linary[self.Index + 1], Ku):\n self.Linary[self.Index - 1] = sktjihvamuliya\n elif set_memberP(self.Linary[self.Index + 1], Pu):\n self.Linary[self.Index - 1] = sktupadhmaniya",
"def atororhasica(self):\n if self.Index > 2:\n if (self.Linary[self.Index - 2] == skta) and (self.Linary[self.Index - 1] == ru):\n if set_memberP(self.Linary[self.Index + 1], Hasch_and_skta):\n self.Linary[self.Index - 2] = skto # PMS: Linary[Index-1=sktu; adguna\n self.deletary(self.Index - 1)\n self.Index = self.Index - 1",
"def atahkrkamikamsa(self):\n if self.Compound:\n if (self.Linary[self.Index - 1] == sktvisarga) and (set_memberP(self.Linary[self.Index + 1], Ku_and_Pu)) and (self.Index > 2):\n if (self.Linary[self.Index - 2] == skta):\n if self.Pada2 in [\"kAra\",\"kAma\",\"kaMsa\",\"kumBa\",\"kumBI\",\"pAtra\",\"kuSA\",\"karRI\"]:\n if self.Pada1 not in [\"svar\",\"antar\",\"prAtar\",\"punar\",\"sanutar\",\"hyas\",\"Svas\",\"avas\",\"aDas\"]:\n if not self.Uttarapada:\n self.Linary[self.Index - 1] = skts\n # PMS: miTas, namas, (tiraskAra by 8.3.42. avaskara, namaskAra?)\n # krtvasuc, suc, i.e. not avyaya",
"def est_premier(nombre):\n # si le nombre est inférieur à un, il ne peut pas être premier donc on retourne false\n if nombre <= 1:\n return False\n # si le nombre est 2 ou 3, on sait qu'il est premier donc on retourne true\n if nombre <= 3:\n return True\n # si le nombre est modulo 2 ou 3, on sait qu'il n'est pas premier puisqu'on a déjà exclu 2 et 3 précédement\n if nombre % 2 == 0 or nombre % 3 == 0:\n return False\n # on\n i = 5\n while i * i <= nombre:\n if nombre % i == 0 or nombre % (i + 2) == 0:\n return False\n i = i + 6\n return True",
"def rori(self):\n self.Dhralopa = False\n if (set_memberP(self.Linary[self.Index - 1], sktr_and_ru)) and (self.Linary[self.Index + 1] == sktr):\n self.deletary(self.Index - 1)\n self.Index = self.Index - 1\n self.Dhralopa = True",
"def rechercheHorizontale(mot):\n for i in range(len(matrice)):\n motAConstruire = \"\"\n for j in range(len(matrice[i])):\n motAConstruire += matrice[i][j]\n if mot.upper() in motAConstruire:\n return True\n return False",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageRGBUC3___nonzero__(self)",
"def minimax(self,maksimiziramo):\n stanje = self.igra.stanje\n if stanje == self.igra.ZMAGA:\n if self.igra.zmagovalec == self.jaz:\n return None,self.Zmaga\n else:\n return None,-self.Zmaga\n elif stanje == self.igra.NIHCENEZMAGA:\n return None,0\n else:\n if maksimiziramo:\n moznepoteze = self.igra.moznePoteze()\n random.shuffle(moznepoteze)\n top_poteza = None\n top_vred = -self.Neskonco\n for px,py in moznepoteze:\n self.igra.igrajPotezo(px,py)\n vrednost = self.minimax(not maksimiziramo)[1]\n self.igra.razveljavi()\n if vrednost > top_vred:\n top_vred = vrednost\n top_poteza = (px,py)\n else:\n moznepoteze = self.igra.moznePoteze()\n random.shuffle(moznepoteze)\n top_poteza = None\n top_vred = self.Neskonco\n for px,py in moznepoteze:\n self.igra.igrajPotezo(px,py)\n vrednost = self.minimax(not maksimiziramo)[1]\n self.igra.razveljavi()\n if vrednost < top_vred:\n top_vred = vrednost\n top_poteza = (px,py)\n return top_poteza,top_vred",
"def est_masculin(self):\n if self.race is not None:\n return self.race.genres[self.genre] == \"masculin\" or \\\n self.genre == \"aucun\"\n else:\n return self.genre == \"masculin\" or self.genre == \"aucun\"",
"def omtrek_cirkel(r):\n # Deze functie kan je gebruiken om het volume van de donut te berekenen.\n return 0",
"def __dohvatiPotez (i):\n\n # Inicijaliziraj ponovi i razlog na False, None respektivno.\n ponovi = False\n razlog = None\n\n while True:\n # Dohvati potez od igraca i provjeri njegovu legalnost.\n karta, skupljeno = self.__igraci[i]['igrac'].odigraj(copy.deepcopy(self.__igraci[i]['ruka']), copy.deepcopy(self.__stol), ponovi)\n legalno = __legalniPotez(i, karta, skupljeno, self.__igraci[i]['igrac'].hocuRazlog())\n if isinstance(legalno, tuple):\n legalno, razlog = legalno\n\n # Ako je potez legalan, prekini petlju. Inace postavi\n # vrijednost ponovi i udi u sljedecu iteraciju petlje.\n if legalno:\n break\n elif razlog is None:\n ponovi = True\n else:\n ponovi = (True, razlog)\n\n # Vrati dohvaceni potez.\n return (karta, skupljeno)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.