query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
construct a string summarizing the episode using its metadata, or just return the episode's description if needed.
def makeEpisodeSummary(episode): # using inverted pyramid strategy; more detail at bottom of description summary = episode['description'] + "\n\n" if episode['publisher'] != '': summary = "%sPublisher: %s\n" % (summary, episode['publisher']) if episode['season'] != '': summary = "%sSeason: %s\n" % (summary, episode['season']) if episode['keywords'] != '': summary = "%sKeywords: %s\n" % (summary, episode['keywords']) if summary != '': summary = "%s\n%s" % (summary, episode['description']) #Log.Debug(summary) return summary
[ "def getEpisodeDescription(self, seasonnum, episodenum):\r\n if (type(seasonnum) is not int) and (type(episodenum) is not int):\r\n return('Invalid input, season number and episode number must be integers.')\r\n try:\r\n episodename = showInformation.getEpisodeName(self, seasonnum, episodenum)\r\n return self.runtimedescriptionofepisodes[episodename][1]\r\n except IndexError:\r\n return('N/A (Description not found)')\r\n except KeyError:\r\n return('N/A (Description not found)')", "def summary(self):\n res = self._tvdb._make_request('/series/' + str(self._id) + '/episodes/summary', {})\n return models.SeriesEpisodesSummary(**res['data'])", "def _description_string(self) -> str:", "def description(self):\n info = self.info\n\n # If no info available, return simple description\n if not info: # pragma: no cover\n return \"DicomSeries containing %i images\" % len(self)\n\n fields = []\n # Give patient name\n if \"PatientName\" in info:\n fields.append(\"\" + info[\"PatientName\"])\n # Also add dimensions\n if self.shape:\n tmp = [str(d) for d in self.shape]\n fields.append(\"x\".join(tmp))\n # Try adding more fields\n if \"SeriesDescription\" in info:\n fields.append(\"'\" + info[\"SeriesDescription\"] + \"'\")\n if \"ImageComments\" in info:\n fields.append(\"'\" + info[\"ImageComments\"] + \"'\")\n\n # Combine\n return \" \".join(fields)", "def description(event_id):\n return DESCRIPTIONS.get(event_id, event_id)", "def _description(direction, required_metadata):\r\n return _direction(direction) + '_' + str(required_metadata)", "def full_str(self):\n outstr = self._title + \": \"\n outstr = outstr + str(self._date) + \"; \"\n outstr = outstr + str(self._time)\n return outstr", "def make_colored_descr(self):\n title = self.title or \"unknown\"\n title = \"{: <12}\".format(title)\n\n descr = self.descr or \"- no description -\"\n\n return \"\".join(p for p in [\n self._COLORED_DESCR_PARTS[0],\n title,\n self._COLORED_DESCR_PARTS[1],\n descr,\n self._COLORED_DESCR_PARTS[2],\n self.filename,\n self._COLORED_DESCR_PARTS[3]\n ])", "def get_info():\n title, speaker, publish_year, time, language, price = \\\n input(\"*Enter* title| speaker| publish_year| time| language| price : \\n\").split('|')\n media = PodcastEpisode(title, speaker, publish_year, int(time), language, price)\n return media", "def mkDescription(self):\n self.desc = \"----[Reported by %s]----\\n\" %(self.reporter)\n self.desc += self.summary\n for cmt in self.comments:\n self.desc += \"\\n\\n----[Update by %s on %s]----\\n%s\\n\" %(\n cmt.reporter, cmt.date, cmt.report)", "def _convert_trakt_episode_metadata(show_metadata, episode, banners=True):\n info = copy.deepcopy(show_metadata)\n info['episode'] = episode.get('number')\n info['title'] = episode.get('title', '')\n info['aired'] = episode.get('first_aired', '')\n info['premiered'] = episode.get('first_aired', '')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview', '')\n info['plotoutline'] = episode.get('overview', '')\n info['votes'] = episode.get('votes', '')\n if banners:\n info['poster'] = \"\"\n return info", "def get_next_episode_text(self, series_name):\n series_episodes = self.get_series_episodes(series_name)\n series_info = self.get_series_info(series_name)\n\n try:\n series_title = series_info['seriesName']\n except:\n series_title = '<series title unknown>'\n\n next_episode = self._find_next_episode(series_episodes)\n if next_episode is not None:\n text = '{series_title}: Episode {season}x{episode} \"{episode_title}\" will air on {date}'.format(\n series_title=series_title,\n season=next_episode.get('airedSeason', '<season unknown>'),\n episode=next_episode.get('airedEpisodeNumber', '<number unknown>'),\n episode_title=next_episode.get('episodeName', '<name unknown>'),\n date=next_episode.get('firstAired', '<no date>'))\n else:\n text = '{series_title}: No next episode found'.format(series_title=series_title)\n\n return text", "def get_episode_info(filename): \n episode_tag, season, episode = None, None, None\n episode_tag = find_episode_pattern(filename)\n if episode_tag is not None:\n pattern = episode_tag.lower().replace(\"s\",\" \").replace(\"e\",\" \")\n pattern_array = pattern.split()\n season = int(pattern_array[0])\n episode = int(pattern_array[1])\n season = \"{:0>2}\".format(season)\n episode = \"{:0>2}\".format(episode)\n return episode_tag, season, episode", "def get_entry_meta_description(entry, request):\n try:\n seoaddon = EntrySEOAddon.objects.get(entry=entry).seoaddon\n except EntrySEOAddon.DoesNotExist:\n pass\n else:\n return seoaddon.get_meta_description()\n\n # If there is no seo addon found, take the info from the excerpt\n placeholder = entry.placeholders.get(slot='excerpt')\n\n context = Context({'request': request})\n html = render_placeholder(placeholder, context)\n\n # we need to replace \" with ' otherwise the html markup would break when\n # the text contains \". E.g.: <meta content=\"This \"Test\" would fail.\">\n text = re.sub('<.*?>', '', html).replace('\"', '&quot;')\n\n if len(text) > 160:\n return '{}...'.format(text[:160])\n return text", "def getFriendlyDescription(self):\n if not self.description:\n return ''\n if len(self.title) > 65:\n return self.description[:120] + '...'\n return self.description[:200] + '...' if len(self.description) > 200 else self.description", "def summary(self):\n\n name='name:%s'%self.getName()\n damage='dégat:%s'%self.getDamage()\n ammos='munitions:%s'%self.getAmmos()\n return '\\n'.join([name, damage, ammos])", "def _convert_tvdb_episode_metadata(imdb_id , season_metadata, episode, banners=True):\n info = copy.deepcopy(season_metadata)\n info['episode'] = episode.get('episodenumber')\n info['title'] = episode.get('episodename', '')\n info['aired'] = episode.get('firstaired', '')\n info['premiered'] = episode.get('firstaired', '')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview', '')\n info['plotoutline'] = episode.get('overview', '')\n info['votes'] = episode.get('ratingcount', '')\n info['imdb_id'] = imdb_id\n if banners:\n info['poster'] = episode['filename']\n return info", "def get_room_description (self, persons = []):\n # Very basic right now, but will eventually include adjoining rooms\n # and furniture.\n article = \"the \"\n if len(self.owners) > 0:\n article = \"\"\n desc = \"You are standing %s %s%s.\\n\\n\" % (self.prep, article, self.name)\n desc += \"It is part of the manor's %s area.\\n\\n\" % self.section\n\n if len(self.furniture) == 0:\n desc += \"It is completely unfurnished.\\n\"\n else:\n desc += \"You see here %s.\\n\" % join_strings(self.furniture)\n desc += \"%s\\n\\n\" % self.describe_windows()\n\n desc += self.describe_exits()\n\n if self.description != \"\":\n desc += \"\\n\\n%s\" % self.description\n\n if len(persons) > 0:\n if len(persons) == 1:\n verb = \"is\"\n else:\n verb = \"are\"\n desc += \"\\n\\n%s also %s here.\" % (join_strings(persons), verb)\n\n return desc", "def get_launch_description(self, idx):\n return self.results[idx][\"mission\"][\"description\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
construct a directory item for a series existing in user's queue. Selecting this item leads to more details about the series, and the ability to remove it from the queue.
def makeQueueItem(queueInfo): Log.Debug("queueinfo: %s" % queueInfo) s = Dict['series'] sId = str(queueInfo['seriesId']) thumb = (s[sId]['thumb'] if (sId in s and s[sId]['thumb'] is not None) else R(CRUNCHYROLL_ICON)) art = (s[sId]['art'] if (sId in s and s[sId]['art'] is not None) else R(CRUNCHYROLL_ART)) queueItem = Function(DirectoryItem( QueueItemMenu, title=queueInfo['title'], summary=queueInfo['nextUpText'] + queueInfo['episodeDescription'], thumb=Function(GetThumb,url=thumb), art=Function(GetArt,url=art) ), queueInfo=queueInfo) return queueItem
[ "def QueueChangePopupMenu(sender, seriesId):\n\tlogin()\n\tdir = MediaContainer(title1=\"Queue\",title2=sender.itemTitle,disabledViewModes=[\"Coverflow\"])\n\tif isRegistered():\n\t\tqueueList = getQueueList()\n\t\tinQ = False\n\t\tfor item in queueList:\n\t\t\tif item['seriesId'] == seriesId:\n\t\t\t\tinQ = True\n\t\t\tbreak\n\t\t\n\t\tif inQ:\n\t\t\tdir.Append(\n\t\t\t\tFunction(DirectoryItem(RemoveFromQueue, title=\"Remove From Queue\", summary=\"Remove this series from your queue\"), seriesId=seriesId)\n\t\t\t)\n\t\telse:\n\t\t\tdir.Append(\n\t\t\t\tFunction(DirectoryItem(AddToQueue, title=\"Add To Queue\", summary=\"Add this series to your queue\" ), seriesId=seriesId)\n\t\t\t)\n\treturn dir", "def _additem(self):\n\n self.queue.put(self._genitem())", "def AddToQueue(sender,seriesId,url=None):\n\tlogin()\n\tresult = addToQueue(seriesId)\n\t\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Added to Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not add to Queue.')", "def __init__(self, json_node, series=None, build_full_object=False):\n self.series_id = 0\n self.series_name = None\n self.anidb_aid = 0\n self.anidb_eid = 0\n self.actors = []\n self.url = None\n self.item_type = 'episode'\n if series is not None:\n self.series_id = series.id\n self.series_name = series.name\n self.actors = series.actors\n self.anidb_aid = series.anidb_aid\n if series.is_movie:\n self.item_type = 'movie'\n\n Directory.__init__(self, json_node, True)\n # don't redownload info on an okay object\n if build_full_object and self.size < 0:\n json_node = self.get_full_object()\n Directory.__init__(self, json_node)\n # check again, as we might have replaced it above\n if isinstance(json_node, int) or pyproxy.is_unicode_or_string(json_node):\n eh.spam(self)\n return\n\n self.episode_number = pyproxy.safe_int(json_node.get('epnumber', ''))\n self.episode_type = json_node.get('eptype', 'Other')\n if self.anidb_aid == 0:\n self.anidb_aid = pyproxy.safe_int(json_node.get('aid', 0))\n self.anidb_eid = pyproxy.safe_int(json_node.get('eid', 0))\n self.date = model_utils.get_airdate(json_node)\n self.tvdb_episode = json_node.get('season', '0x0')\n self.update_date = None\n self.hash_content = None\n\n self.process_children(json_node)\n\n if self.name is None:\n self.name = 'Episode ' + str(self.episode_number)\n self.alternate_name = model_utils.get_title(json_node, 'x-jat', 'main')\n\n self.watched = pyproxy.safe_int(json_node.get('view', 0)) != 0\n self.watched_date = str(json_node.get('view_date', ''))\n self.year = pyproxy.safe_int(json_node.get('year', ''))\n\n self.rating = float(str(json_node.get('rating', '0')).replace(',', '.'))\n self.user_rating = float(str(json_node.get('userrating', '0')).replace(',', '.'))\n self.overview = model_utils.make_text_nice(pyproxy.decode(json_node.get('summary', '')))\n self.votes = pyproxy.safe_int(json_node.get('votes', ''))\n self.outline = \" \".join(self.overview.split(\".\", 3)[:2]) # first 3 sentence\n self.tags = model_utils.get_tags(json_node.get('tags', {}))\n\n if self.episode_type != 'Special':\n season = str(json_node.get('season', '1'))\n if 'x' in season:\n season = season.split('x')[0]\n else:\n season = '0'\n self.season = pyproxy.safe_int(season)\n\n eh.spam(self)", "def makeSeasonItem(season):\n\tart = R(CRUNCHYROLL_ART)\n\tif Dict['series'][str(season['seriesId'])]['tvdbId'] is not None:\n\t\tartUrl = getSeasonThumb(Dict['series'][str(season['seriesId'])]['tvdbId'], season['seasonnum'])\n\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\tif artUrl is not None:\n\t\t\tart = Function(GetArt,url=artUrl)\n\tseasonItem = Function(\n\n\t\tDirectoryItem(\n\t\t\tSeasonMenu,\n\t\t\tseason['title'],\n\t\t\tsummary=season['description'].encode(\"utf-8\"),\n\t\t\t#thumb=Function(getThumb,url=season['thumb']),\n\t\t\tart=art\n\t\t),\n\t\tseriesId=season['seriesId'],\n\t\tseason=season['seasonnum']\n\t)\n\treturn seasonItem", "def add_queues_from_dir(self, dirname):\n return self.cluster_queue_manager.add_objects_from_dir(dirname)", "def create(self):\n data = {\n \"description\": self._description,\n \"locked\": self._locked\n }\n if self._tags is not None:\n data[\"tags\"] = self._tags\n response = self._connection._post(get_url('disk folder'), json=data)\n if response.status_code == 403:\n raise MaxDiskException(response.json()['message'])\n else:\n raise_on_error(response)\n\n self._uuid = response.json()['uuid']\n self.update()", "def mk_queues_dir(self, dirname):\n self.cluster_queue_manager.mk_object_dir(dirname)", "def create_queue(self, queue):", "def queueSong(self):\n queueWindow = curses.newwin(5, 40, 5, 50)\n queueWindow.border()\n queueWindow.addstr(0,0, \"What is the file path?\", curses.A_REVERSE)\n self.stdscr.refresh()\n curses.echo()\n path = queueWindow.getstr(1,1, 30)\n curses.noecho()\n del queueWindow\n self.stdscr.touchwin()\n self.stdscr.refresh()\n \n try:\n self.library.add_tracks(path.decode(encoding=\"utf-8\"))\n except CLI_Exception.CLI_Audio_File_Exception:\n self.printError('Error queueing file or folder')", "async def _queue(self, ctx, *link : str):\n if link == ():\n queue_list = await self.queue_titles()\n await self.bot.say(\"Videos in queue: \\n\" + queue_list + \"\\n\\nType queue <link> to add a link or search terms to the queue.\")\n elif await self.check_voice(ctx.message.author, ctx.message):\n if not self.playlist:\n link = \" \".join(link)\n if \"http\" not in link or \".\" not in link:\n link = \"[SEARCH:]\" + link\n else:\n if not self.is_playlist_valid([link]):\n await self.bot.say(\"Invalid link.\")\n return\n self.queue.append(link)\n msg = ctx.message\n result = await self.get_song_metadata(link)\n try: # In case of invalid SOUNDCLOUD ID\n if result[\"title\"] != []:\n await self.bot.say(\"{} has been put into the queue by {}.\".format(result[\"title\"], msg.author))\n else:\n await self.bot.say(\"The song has been put into the queue by {}, however it may error.\".format(msg.author))\n except:\n await self.bot.say(\"A song has been put into the queue by {}.\".format(msg.author))\n\n else:\n await self.bot.say(\"I'm already playing a playlist.\")", "def construct_channel(self, *args, **kwargs):\n channel = self.get_channel(*args, **kwargs) # Create ChannelNode from data in self.channel_info\n\n scrape_directory(channel, FOLDER)\n\n raise_for_invalid_channel(channel) # Check for errors in channel construction\n\n return channel", "def add_to_playlist(self, path):\n # This is a little hack ...\n # XBMC wants to know if the item added is a file or a directory\n # so we try to add the item as a file and if this fails try adding\n # it as a directory\n try:\n self.call.AudioPlaylist.Add({'file': path})\n except jsonrpc.common.RPCError as e:\n if e.code != -32602:\n raise\n\n self.call.AudioPlaylist.Add({'directory': path})\n finally:\n self.playlist.update()", "def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')", "def add_to_queue(self):\n self.manager.client.song_q.put(self.get_text(None))", "def queue(position):\n global _playlist\n collection = get_collection()\n _playlist.append(collection[position])\n log.info(\"Adding : %s\" % collection[position])\n start_player()", "def queue_callback(self):\n selected_song, index = self._player.selected_song()\n response = requests.get('http://localhost:5000/song/' + selected_song)\n song_object = response.json()\n media_file = song_object['pathname']\n media_file = media_file.replace('/', '\\\\')\n song_name = song_object['title']\n self.queue_path.append(media_file)\n self.queue_name.append(song_name)\n self._player.list_songs_queue(self.queue_name)", "def _new_batch_item():\n item = Batch_Item()\n item.batch_start_time = log_time\n item.batch_status = BATCH_STATUS[1] #'Running'\n item.vessel_no = self.vessel_no\n self.batch_items.append(item)", "def _build_item_path(self, item: Item) -> Path:\n return self.sync_dir.joinpath(item.uuid)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show menu for browsing content of type=ANIME_TYPE or DRAMA_TYPE
def BrowseMenu(sender,type=None): if type==ANIME_TYPE: all_icon = ANIME_ICON elif type==DRAMA_TYPE: all_icon = DRAMA_ICON dir = MediaContainer(disabledViewModes=["coverflow"], title1="Browse %s" % type) dir.Append(Function(DirectoryItem(AlphaListMenu,"All", title1="All", thumb=R(all_icon)), type=type)) dir.Append(Function(DirectoryItem(RecentListMenu,"Recent", title1="Recent", thumb=R(all_icon)), type=type)) if type == ANIME_TYPE: dir.Append(Function(DirectoryItem(PopularListMenu,"Popular" , title1="Popular", thumb=R(all_icon)), type=type)) dir.Append(Function(DirectoryItem(GenreListMenu,"by Genre", title1="by Genre", thumb=R(CRUNCHYROLL_ICON)), type=type)) #dir.noCache = 1 return dir
[ "def showTypeMenu(self, menu):\n index = self.selectionModel().currentIndex()\n self.scrollTo(index)\n rect = self.visualRect(index)\n pt = self.mapToGlobal(QtCore.QPoint(rect.center().x(), rect.bottom()))\n menu.popup(pt)", "def loadTypeSubMenu(self):\n selectTypes = {node.formatName for node in\n self.currentSelectionModel().selectedNodes()}\n typeNames = self.model.formats.typeNames()\n self.typeSubMenu.clear()\n usedShortcuts = []\n for name in typeNames:\n shortcutPos = 0\n try:\n while [shortcutPos] in usedShortcuts:\n shortcutPos += 1\n usedShortcuts.append(name[shortcutPos])\n text = '{0}&{1}'.format(name[:shortcutPos], name[shortcutPos:])\n except IndexError:\n text = name\n action = self.typeSubMenu.addAction(text)\n action.setCheckable(True)\n if name in selectTypes:\n action.setChecked(True)", "def main_menu():\n print(\"This is pyfilter {version}\".format(version=VERSION))\n print(\"Pick the type of filter: \")\n filter_selector()\n return True", "def buildBookmarkMenu(type=\"string\", editor=\"string\"):\n pass", "def menu(self):\n variables = dict(**self.constants,**self.variables, **{'menu': True})\n return self._render(variables, md_to_html=False)", "def menuFormat(self):\n \n pass", "def show_filter_menu(self, ):\n pass", "def accessoriesMenu():\n pref = QtGui.QAction(mw)\n pref.setText(\"Command panel\")\n pref.setObjectName(\"CommandPanel\")\n pref.triggered.connect(onPreferences)\n try:\n import AccessoriesMenu\n AccessoriesMenu.addItem(\"CommandPanel\")\n except ImportError:\n a = mw.findChild(QtGui.QAction, \"AccessoriesMenu\")\n if a:\n a.menu().addAction(pref)\n else:\n mb = mw.menuBar()\n action = QtGui.QAction(mw)\n action.setObjectName(\"AccessoriesMenu\")\n action.setIconText(\"Accessories\")\n menu = QtGui.QMenu()\n action.setMenu(menu)\n menu.addAction(pref)\n\n def addMenu():\n \"\"\"Add accessories menu to the menu bar.\"\"\"\n mb.addAction(action)\n action.setVisible(True)\n\n addMenu()\n mw.workbenchActivated.connect(addMenu)", "def _main_menu_kb(self):\n custom_keyboard = [\n [ self._mcomm['explore_actvivities'] ],\n [ self._mcomm['add_activity'] ]\n ]\n return ReplyKeyboardMarkup(custom_keyboard)", "def topic_menu(self):\n print u'\\n░▒▓█ ■ Select a topic-viewing method.\\n'\n print u'░▒▓█ (1) top n lists - for each topic, display top n most probable words'\n print u'░▒▓█ (2) pyLDAvis interactive visualization'\n print u'░▒▓█ (3) ■ back to main menu ■\\n'\n pick_vis = get_input('> ', int, 1, 3)\n if pick_vis is 1:\n print u'\\n░░▒▒▓█ ■ How many words to display per topic (recommended: 5-15)?\\n'\n n = get_input('> ', int, 1, 200)\n self.driver.print_topics(n)\n self.topic_menu()\n if pick_vis is 2:\n print u'\\n░░▒▒▓█ ■ This will open in a browser window...\\n'\n self.driver.vis_ldavis()\n self.topic_menu()\n if pick_vis is 3:\n self.main_menu()", "def main():\n article = None\n word = search(run_dmenu())\n if word:\n article = run_dmenu(word)\n print(article)\n if article:\n display(article)", "def menus(context, kind='header', menu_type='dropdown'):\n t = get_template(\"menu/tags/%s.html\" % menu_type)\n\n footer = (kind == 'footer')\n menus = Menu.objects.filter(active=True, footer=footer)\n\n # path = context['request'].path\n # highlighted = any([path.startswith(e.destination) for e in menu.entries.all()])\n highlighted = False\n return t.render(template.Context({\n # 'menu': menu,\n 'menus': menus,\n 'highlighted': highlighted,\n 'request': context['request'],\n }))", "def menu(self):\n variables = dict(**self.variables, **{'menu': True})\n return self._render(variables)", "def _vendor_menu_contents(request):\n active = request.resolver_match.view_name\n menu_item = namedtuple(\"MenuItem\", \"name url active sub_items\")\n fill = lambda name, func, sub=None: menu_item(name, url.reverse(func) if func else None, func == active, sub)\n\n items = [\n fill(_(u\"Home\"), \"kirppu:vendor_view\"),\n fill(_(u\"Item list\"), \"kirppu:page\"),\n fill(_(u\"Box list\"), \"kirppu:vendor_boxes\"),\n ]\n\n manage_sub = []\n if request.user.is_staff or UserAdapter.is_clerk(request.user):\n manage_sub.append(fill(_(u\"Checkout commands\"), \"kirppu:commands\"))\n if request.user.is_staff:\n manage_sub.append(fill(_(u\"Clerk codes\"), \"kirppu:clerks\"))\n manage_sub.append(fill(_(u\"Lost and Found\"), \"kirppu:lost_and_found\"))\n manage_sub.append(fill(_(u\"Statistics\"), \"kirppu:stats_view\"))\n\n if manage_sub:\n items.append(fill(_(u\"Management\"), \"\", manage_sub))\n return items", "def gedit2_menu(xml):\n return MENU_UI.format(xml) # Splice in the examples menu", "def show_menu(menu=None):\n if menu==None: menu=menu_data\n for item in menu:\n\tlabel, title, fn = item\n label = str(label)\n print(\"%s. %s \" %(label, title))", "def search_menu(self):\n clr_screen() \n \n print (misc.SEARCH_MENU)\n\n for key in sorted(misc.search_menu):\n print (misc.search_menu[key])\n\n print('\\n')\n choice = input(\"Please select:\")\n\n if choice == '1':\n self.search_by_range_date()\n self.main_menu()\n elif choice == '2': \n self.find_by_time()\n self.main_menu()\n elif choice == '3':\n self.find_by_string()\n self.main_menu()\n elif choice == '4': \n self.find_by_pattern()\n self.main_menu()\n elif choice == '5': \n print (\"return to main menu\")\n self.main_menu()\n else: \n misc.option_error()\n self.main_menu()", "def side_menu():\n\n st.sidebar.markdown(\"**Configuration Panel**\")\n\n #### SCRAPER CONFIGURATION ####\n\n menu_scraper()\n\n #### GRAPH GENERATION ####\n\n menu_graph_generator()\n\n #### BOT SELECTION ####\n menu_bot_selection()\n \n #### SIMULATION WITH SOIL ####\n # TODO: test diretti e indiretti\n menu_soil_simulation_subroutine()\n\n #### PLOT GENERATIONS ####\n menu_plot_generations()\n\n #### COUNT STATISTICS ####\n count_statistics()", "def menu():\n print(\"lines - counting lines \")\n print(\"words - number of words \")\n print(\"letters - amout of letters \")\n print(\"word_frequency - 7 most frequent words \")\n print(\"letter_frequency - 7 most used letters \")\n print(\"all - show all menu choices \")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display a menu showing episodes available in a particular season.
def SeasonMenu(sender,seriesId=None,season=None): dir = MediaContainer(disabledViewModes=["Coverflow"], title1=sender.title1, title2="Series") epList = getSeasonEpisodeListFromFeed(seriesId, season) for episode in epList: dir.Append(makeEpisodeItem(episode)) return dir
[ "def load_episodes(self):\n self.episode_menu.clear_items()\n for episode in get_episode_list(self.show['pk']):\n self.episode_menu.add_item(MenuItem(\n \"%d x %d : %s \" % (\n episode['season_number'],\n episode['episode_number'],\n episode['name'],\n ), \n episode))\n self.episode_menu.select_item(0)", "def episode_menu(self, podcast): \r\n feed = feedparser.parse(podcast.url) \r\n titles = []\r\n\r\n for index, entry in enumerate(feed.entries):\r\n titles.append(entry['title'])\r\n \r\n titles.append(\"Back\")\r\n titles.append(\"Quit\")\r\n\r\n questions = [\r\n {\r\n 'type': 'list',\r\n 'name': 'episode',\r\n 'message': 'What episode do you want to listen to?',\r\n 'choices': titles\r\n }\r\n ]\r\n\r\n answers = prompt(questions)\r\n self.handle_choice(answers, 'episode')\r\n\r\n choice = titles.index(answers[\"episode\"])\r\n entry = feed.entries[choice]\r\n url = self.get_episode_media_url(entry)\r\n\r\n if type(url) is str:\r\n self.play_podcast(url)\r\n self.episode_menu(podcast)", "def seasons(self):\n return list(self.tv_show['episodes'].keys())", "def getEpisodesInSeason(self, seasonnum):\r\n if type(seasonnum) is not int:\r\n return('Invalid Input, must be integer.')\r\n try:\r\n return self.seasonsepisodedict[seasonnum]\r\n except KeyError:\r\n return('N/A (Does not exist)')", "def get_episode_links_from_season_link(self, season_link):\n\n self.browser.get(season_link)\n soup = BeautifulSoup(self.browser.page_source, \"html.parser\")\n\n elem_list = soup.find_all(\"a\", {\"class\": \"sonra\"})\n\n episode_links = {(link.getText(), link['href']) for link in elem_list}\n\n return episode_links", "def get_episode_links(self, show_name):\n\n self.search_function(show_name)\n\n print (\"Obtaining all season links...\", end =\"\")\n season_links = self.get_season_list()\n print (\"DONE\")\n\n links = []\n\n for title, season in season_links:\n\n print (\"Obtaining episode links for season: \" + title, end =\"...\")\n episode_links = self.get_episode_links_from_season_link(season)\n #for now ignore the link title\n links = links + [link for episode_title, link in episode_links]\n\n print (\"DONE\")\n\n return links", "def get_season(self, season: int) -> Season:\n if self.lang == \"de\":\n url = f\"https://www.southpark.de/feeds/carousel/video/e3748950-6c2a-4201-8e45-89e255c06df1/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"se\" and season < 23: # SE doesn't have the 23rd season.\n url = f\"https://www.southparkstudios.nu/feeds/carousel/video/9bbbbea3-a853-4f1c-b5cf-dc6edb9d4c00/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"uk\":\n url = f\"https://www.southparkstudios.co.uk/feeds/carousel/video/02ea1fb4-2e7c-45e2-ad42-ec8a04778e64/30/1/json/!airdate/season-{season}\"\n # cc.com is the ony one with jsons so descriptions will be in english\n else:\n url = f\"https://southpark.cc.com/feeds/carousel/video/06bb4aa7-9917-4b6a-ae93-5ed7be79556a/30/1/json/!airdate/season-{season}?lang={self.lang}\"\n\n season_data = json.loads(http_get(url))\n\n episodes = []\n for e in season_data[\"results\"]:\n episodes.append(Episode(\n id=e.get(\"itemId\").strip(),\n title=e.get(\"title\").strip(),\n description=e.get(\"description\").strip(),\n short_description=e.get(\"shortDescription\").strip(),\n thumbnail=e.get(\"images\").strip(),\n date=int(e.get(\"originalAirDate\", 0).strip()),\n episode_number=e.get(\"episodeNumber\").strip(),\n episode_number_in_season=e.get(\"episodeNumber\", \"0\")[-2:].strip(),\n season=e.get(\"episodeNumber\", \"0\")[:2].strip(),\n _lang=self.lang\n ))\n\n return Season(season, episodes)", "def test_season_with_episodes(self):\n season = Season(season_id=1, with_episodes=True)\n for episode in season.episodes:\n self.assertEqual(1, episode.season)", "def get_season_list(self):\n\n soup = BeautifulSoup(self.browser.page_source, \"html.parser\")\n elem_list = soup.find_all(\"a\", href=lambda value: value and value.startswith(SEASON_URL_PREFIX), rel=\"bookmark\")\n season_links = {(elem.getText(), elem['href']) for elem in elem_list}\n\n return season_links", "def show_agenda_items():", "def remove_season(self, title, season):\n title = re.sub(r'[?|$|!|:|#]', r'', title.encode('utf-8'))\n season = int(season)\n season_list = []\n episodes_list = []\n show_meta = '%s' % (title)\n for season_entry in self.db[self.series_label][show_meta]['seasons']:\n if season_entry != season:\n season_list.append(season_entry)\n self.db[self.series_label][show_meta]['seasons'] = season_list\n alt_title = self.db[self.series_label][show_meta]['alt_title']\n show_dir = self.nx_common.check_folder_path(\n path=os.path.join(self.tvshow_path, alt_title))\n if xbmcvfs.exists(show_dir):\n show_files = [f for f in xbmcvfs.listdir(show_dir) if xbmcvfs.exists(os.path.join(show_dir, f))]\n for filename in show_files:\n if 'S%02dE' % (season) in filename:\n xbmcvfs.delete(os.path.join(show_dir, filename))\n else:\n episodes_list.append(filename.replace('.strm', ''))\n self.db[self.series_label][show_meta]['episodes'] = episodes_list\n self._update_local_db(filename=self.db_filepath, db=self.db)\n return True", "def season_exists(self, title, season):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n if self.show_exists(title) is False:\n return False\n show_entry = self.db[self.series_label][title]\n return season in show_entry['seasons']", "def search_episode(self, show, season, episode):\n url = 'home/searchEpisode'\n params = {\n 'indexername': 'tvdb',\n 'seriesid': show['id']['tvdb'],\n 'season': season,\n 'episode': episode\n }\n return self.medusa.web_request(url=url, params=params)", "def season_display(self):\n return ', '.join(\n str(season.season_year) for season in self.season.all())", "def get_all_seasons():\n return session.query(Seasons).all()", "def main_menu(self):\r\n podcasts = PodcastDatabase.select()\r\n \r\n if podcasts.count() == 0:\r\n print(\"There are no podcast feeds found.\")\r\n print(\"To add a podcast use the 'add' parameter:\\n\")\r\n print(\"podcast add http://www.mypodcast.com/feed.rss\\n\")\r\n sys.exit(0)\r\n \r\n podcast_names = []\r\n\r\n for index, podcast in enumerate(podcasts):\r\n podcast_names.append(podcast.name)\r\n\r\n podcast_names.append(\"Quit\")\r\n\r\n questions = [\r\n {\r\n 'type': 'list',\r\n 'name': 'podcast',\r\n 'message': 'What podcast do you want to listen to?',\r\n 'choices': podcast_names\r\n }\r\n ]\r\n\r\n answers = prompt(questions)\r\n self.handle_choice(answers,'podcast')\r\n\r\n podcast = PodcastDatabase.select(PodcastDatabase.q.name == answers[\"podcast\"])\r\n self.episode_menu(list(podcast)[0])", "def show_episodes(self, show_id, limit=50, offset=0, market=None):\n\n trid = self._get_id(\"show\", show_id)\n return self._get(\n \"shows/\" + trid + \"/episodes/\", limit=limit, offset=offset, market=market\n )", "def makeSeasonItem(season):\n\tart = R(CRUNCHYROLL_ART)\n\tif Dict['series'][str(season['seriesId'])]['tvdbId'] is not None:\n\t\tartUrl = getSeasonThumb(Dict['series'][str(season['seriesId'])]['tvdbId'], season['seasonnum'])\n\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\tif artUrl is not None:\n\t\t\tart = Function(GetArt,url=artUrl)\n\tseasonItem = Function(\n\n\t\tDirectoryItem(\n\t\t\tSeasonMenu,\n\t\t\tseason['title'],\n\t\t\tsummary=season['description'].encode(\"utf-8\"),\n\t\t\t#thumb=Function(getThumb,url=season['thumb']),\n\t\t\tart=art\n\t\t),\n\t\tseriesId=season['seriesId'],\n\t\tseason=season['seasonnum']\n\t)\n\treturn seasonItem", "def episode_exists(self, title, season, episode):\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n if self.show_exists(title) is False:\n return False\n show_entry = self.db[self.series_label][title]\n episode_entry = 'S%02dE%02d' % (season, episode)\n return episode_entry in show_entry['episodes']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add some video tests to a MediaContainer
def addMediaTests(dir): if ENABLE_DEBUG_MENUS: testEpisodes = [ {'title': 'Bleach Episode 1', 'season': 'One', 'summary': "480p Boxee feed. This needs a premium account. No ads should show! Plex client should show a resolution of 853x480. (I do not know the 480p url, or if there is one, so it'll probably display at 720p). It must not have any black edges on top or bottom. Play, pause, and seeking should work.", 'link': 'http://www.crunchyroll.com/boxee_showmedia/543611&amp;bx-ourl=http://www.crunchyroll.com/bleach/543611', 'mediaId': '543611', }, {'title': 'Gintama 187', 'season': 'None', 'summary': "720p Boxee feed. This needs a premium account. No ads should show! Plex client should show a resolution of 1280x720, must not have any black edges on top or bottom. Play, pause, and seeking should work.", 'link': 'http://www.crunchyroll.com/boxee_showmedia/537056&amp;bx-ourl=http://www.crunchyroll.com/gintama/537056', 'mediaId': '537056', }, {'title': 'Bleach Episode 357', 'season': 'None', 'summary': "1080p Boxee feed. This needs a premium account. No ads should show! Plex client should show a resolution of exactly 1920x1080, must not have any black edges on top or bottom. Play, pause, and seeking should work.", 'link': 'http://www.crunchyroll.com/boxee_showmedia/588328&amp;bx-ourl=http://www.crunchyroll.com/bleach/588328', 'mediaId': '588328', }, {'title': 'Blue Exorcist Trailer', 'season': 'None', 'summary': '480p web page version. This needs a premium account. No ads should show! Should crop badly, as it is not a direct stream (we go direct with premium accounts).', 'link': 'http://www.crunchyroll.com/blue-exorcist/-blue-exorcist-blue-exorcist-official-trailer-577928?p480=1&small=0&wide=0', 'mediaId': "577928" }, {'title': 'Blue Exorcist Episode 1', 'season': 'None', 'summary': '360p web page version. You really should log out to test this. You should get ads. Plex client should show resolution of 619x348', 'link': 'http://www.crunchyroll.com/blue-exorcist/episode-1-the-devil-resides-in-human-souls-573636?p360=1&small=0&wide=0', 'mediaId': "577928" }, { 'title':'Shugo Chara Episode 1', 'season': "One", 'summary': "360p default web page version, freebie. Should show resolution of 619x348. Should look borked if you're logged in.", 'link': 'http://www.crunchyroll.com/shugo-chara/episode-1-a-guardian-character-is-born-509988?p360', 'mediaId': '509988' }, {'title': "Bleach 274 1080p", 'season': 'None', 'summary': "1080p direct stream. You need to log in and have your preference at CR.com set to view 1080p. No ads should show. Plex should report a resolution of 1920x1080. There MIGHT be small black bars at top and bottom due to ratio difference (but really shouldn't happen). Seek, play and pause should work.", 'link': "http://www.crunchyroll.com/swf/vidplayer.swf?config_url=http%3A%2F%2Fwww.crunchyroll.com%2Fxml%2F%3Freq%3DRpcApiVideoPlayer_GetStandardConfig%26media_id%3D542596%26video_format%3D0%26video_quality%3D0%26auto_play%3D1%26click_through%3D1&__qual=1080", 'mediaId': '542596' }, {'title': "Puffy AmiYumi Interview", 'season': 'None', 'summary': "Freebie web content with standard URL. You need to be logged out to view this without nasty cropping. LIKES TO CRASH PMS with BAD_ACCESS", #'link':"http://www.crunchyroll.com/media-565187?p360=1&t=0&small=0&wide=0", 'link':"http://www.crunchyroll.com/puffy-amiyumi/-puffy-amiyumi-puffy-amiyumi-interview-565187?p360=1&t=0&small=0&wide=0", 'mediaId': '565187' }, {'title': "Puffy AmiYumi Interview Redirected", 'season': 'None', 'summary': "Freebie web content with standard URL. This URL redirects at CrunchyRoll.com, and will probably crash PMS with BAD_ACCESS.", 'link':"http://www.crunchyroll.com/media-565187?p360=1&t=0&small=0&wide=0", #'link':"http://www.crunchyroll.com/puffy-amiyumi/-puffy-amiyumi-puffy-amiyumi-interview-565187?p360=1&t=0&small=0&wide=0", 'mediaId': '565187' } ] for episode in testEpisodes: dir.Append(ConstructTestVideo(episode)) vid = VideoClipObject( url="http://www.crunchyroll.com/another/episode-1-rough-sketch-589572", title="Another episode 1, services", summary = "This video will be fetched through services. It may just bug out. Who knows." ) # this actually borks consistently. I actually don't understand the point of VideoClipObject. #dir.Append(VideoItem("http://www.crunchyroll.com/another/episode-1-rough-sketch-589572", title="Test services", summary="This is a test of url services. It should play."))
[ "def test_api_videos_post(self):\n pass", "def test_api_videos_get(self):\n pass", "def addMedia(self, m):", "def test_video_metadata(self):\n with self.subTest(\"Test mkv video\"):\n self.mock_metadata.has.return_value = False\n self.mock_metadata._MultipleMetadata__groups._key_list = [\"video meta\", \"audio meta\"]\n video_metadata = self.file_media.video_metadata\n self.assertEqual(self.mock_metadata._MultipleMetadata__groups.__getitem__.return_value, video_metadata)\n self.mock_metadata._MultipleMetadata__groups.__getitem__.assert_called_once_with(\"video meta\")\n with self.subTest(\"Test other formats\"):\n del self.mock_metadata._MultipleMetadata__groups\n del self.file_media.__dict__[\"video_metadata\"] # clear cache\n self.assertEqual(self.mock_metadata, self.file_media.video_metadata)", "def test_add_media_repo(self):\n media_repos = self.dwrap.vmedia_repos\n\n self.assertEqual(1, len(media_repos))\n\n vmedia_repo = stor.VMediaRepos.bld(None, 'repo', 10.12345)\n self.assertIsNotNone(vmedia_repo)\n\n media_repos.append(vmedia_repo)\n self.dwrap.vmedia_repos = media_repos\n\n self.assertEqual(2, len(self.dwrap.vmedia_repos))\n\n # Make sure that the second media repo matches\n repo = self.dwrap.vmedia_repos[1]\n self.assertEqual('repo', repo.name)\n self.assertEqual(10.12345, repo.size)\n self.assertEqual(0, len(repo.optical_media))", "def test_add_video(self):\n query_string = [('url', 'url_example')]\n response = self.client.open(\n '/api/video',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_post_media(self):\n pass", "def video(obj):\n return match(obj, video_matchers)", "def test_video(self):\n\t\t_task, _prog, _file = mock_handler_request(self.dir, 'theshadowmoose.tumblr.com/post/184562318724/another-test-post-with-video')\n\t\tres = tumblr.handle(_task, _prog)\n\t\tself.assertTrue(res, \"Tumblr video download failed!\")\n\t\tself.assertTrue(_file.exists(), \"Tumblr video was not downloaded! %s\" % res.failure_reason)\n\t\tself.assertTrue(_file.relative().endswith('.mp4'), 'Failed to use .mp4 extension for video file!')", "def test__add_media_shared_fields(media_data):\n embed = Embed()\n add_media_shared_fields(embed, media_data)\n return embed", "def test_content_items_created(self):\n audio_found = Audio.objects.get(title='audio 1')\n video_found = Video.objects.get(title='video 1')\n text_found = Text.objects.get(title='text 1')\n\n self.assertEqual(audio_found.bitrate, 100)\n self.assertEqual(video_found.video_file, 'video1.mkv')\n self.assertEqual(text_found.content, 'Long text content')", "def test_video_wmv_should_return_true(self):\n\n video_name : str = \"video.wmv\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_write(self):\n\n # Dimensions, for testing purposes\n H = 480\n W = 640\n writer = cv2.VideoWriter(self.path_vid_out, cv2.VideoWriter_fourcc(*\"MJPG\"), 30, (W, H))\n for frame in tqdm.tqdm(range(400)):\n this_frame = np.random.randint(0, 255, (H, W, 3)).astype('uint8')\n writer.write(this_frame)\n writer.release()\n logging.debug(\"Wrote test video to {}\".format(self.path_vid_out))", "def test_check_new_videos(self, mock_resp):\n mock_resp.return_value = {\n 'media': [{\n 'device_name': 'foo',\n 'media': '/foo/bar.mp4',\n 'created_at': '1970-01-01T00:00:00+0:00'\n }]\n }\n sync_module = self.blink.sync['test']\n sync_module.cameras = {'foo': None}\n self.assertEqual(sync_module.motion, {})\n self.assertTrue(sync_module.check_new_videos())\n self.assertEqual(sync_module.last_record['foo'],\n {'clip': '/foo/bar.mp4',\n 'time': '1970-01-01T00:00:00+0:00'})\n self.assertEqual(sync_module.motion, {'foo': True})\n mock_resp.return_value = {'media': []}\n self.assertTrue(sync_module.check_new_videos())\n self.assertEqual(sync_module.motion, {'foo': False})\n self.assertEqual(sync_module.last_record['foo'],\n {'clip': '/foo/bar.mp4',\n 'time': '1970-01-01T00:00:00+0:00'})", "async def test_media_player_playback(\n hass, setup_plex_server, requests_mock, playqueue_created, player_plexweb_resources\n):\n requests_mock.get(\"http://1.2.3.5:32400/resources\", text=player_plexweb_resources)\n\n await setup_plex_server()\n\n media_player = \"media_player.plex_plex_web_chrome\"\n requests_mock.post(\"/playqueues\", text=playqueue_created)\n requests_mock.get(\"/player/playback/playMedia\", status_code=200)\n\n # Test movie success\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Movie 1\" }',\n },\n True,\n )\n\n # Test movie incomplete dict\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\"}',\n },\n True,\n )\n\n # Test movie failure with options\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Does not exist\" }',\n },\n True,\n )\n\n # Test movie failure with nothing found\n with patch(\"plexapi.library.LibrarySection.search\", return_value=None):\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MOVIE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Movies\", \"title\": \"Does not exist\" }',\n },\n True,\n )\n\n # Test movie success with dict\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\"}',\n },\n True,\n )\n\n # Test TV show episoe lookup failure\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_EPISODE,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"TV Shows\", \"show_name\": \"TV Show\", \"season_number\": 1, \"episode_number\": 99}',\n },\n True,\n )\n\n # Test track name lookup failure\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\", \"track_name\": \"Not a track\"}',\n },\n True,\n )\n\n # Test media lookup failure by key\n requests_mock.get(\"/library/metadata/999\", status_code=404)\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: \"999\",\n },\n True,\n )\n\n # Test invalid Plex server requested\n assert await hass.services.async_call(\n MP_DOMAIN,\n SERVICE_PLAY_MEDIA,\n {\n ATTR_ENTITY_ID: media_player,\n ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,\n ATTR_MEDIA_CONTENT_ID: '{\"plex_server\": \"unknown_plex_server\", \"library_name\": \"Music\", \"artist_name\": \"Artist\", \"album_name\": \"Album\"}',\n },\n True,\n )", "def test_video_m4v_should_return_true(self):\n\n video_name : str = \"video.m4v\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_video_mov_should_return_true(self):\n\n video_name : str = \"video.mov\"\n\n is_video : bool = script.is_video(video_name, debug_function = True)\n\n self.assertTrue(is_video)\n ...", "def test_put_current_tan_media(self):\n pass", "def test_get_video_info_is_working_properly(self):\n result = self.test_media_manager.get_video_info(\n self.test_video_path_mp4\n )\n self.assertEqual(\n result,\n {\n 'video_info': {\n 'TAG:encoder': 'Lavf55.19.104',\n 'nb_streams': '1',\n 'start_time': '0.000000',\n 'format_long_name':\n 'QuickTime/MPEG-4/Motion JPEG 2000 format',\n 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2',\n 'filename': self.test_video_path_mp4,\n 'TAG:compatible_brands': 'isomiso2avc1mp41',\n 'bit_rate': '2163440.000000',\n 'TAG:major_brand': 'isom',\n 'duration': '0.400000',\n 'TAG:minor_version': '512',\n 'size': '108172.000000'\n },\n 'stream_info': [\n {\n 'pix_fmt': 'yuv444p',\n 'index': '0',\n 'codec_tag': '0x31637661',\n 'level': '30',\n 'r_frame_rate': '25/1',\n 'start_time': '0.000000',\n 'time_base': '1/12800',\n 'codec_tag_string': 'avc1',\n 'codec_type': 'video',\n 'has_b_frames': '2',\n 'width': '640',\n 'codec_name': 'h264',\n 'codec_long_name':\n 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10',\n 'display_aspect_ratio': '8:5',\n 'sample_aspect_ratio': '1:1',\n 'TAG:language': 'und',\n 'height': '400',\n 'nb_frames': '10',\n 'codec_time_base': '1/50',\n 'duration': '0.320000',\n 'avg_frame_rate': '125/4'\n }\n ]\n }\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove seriesID from queue
def RemoveFromQueue(sender,seriesId): login() result = removeFromQueue(seriesId) if result: return MessageContainer("Success",'Removed from Queue') else: return MessageContainer("Failure", 'Could not remove from Queue.')
[ "def remove(self):\r\n self.queue.pop(0)", "def remove(self, node_id):\n for x,(y,z) in enumerate(self.queue):\n if z[1][-1] == node_id:\n del self.queue[x]\n return self.queue\n raise NotImplementedError", "def remove_from_queue(self, index):\r\n # TODO: what do these parameters actually do?\r\n updid = '0'\r\n objid = 'Q:0/' + str(index + 1)\r\n self.avTransport.RemoveTrackFromQueue([\r\n ('InstanceID', 0),\r\n ('ObjectID', objid),\r\n ('UpdateID', updid),\r\n ])", "def test_queues_remove_item_from_queue_v1(self):\n pass", "def pop_queue(self, container, obj, q_ts, q_record):\n q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)\n x_timestamp = slightly_later_timestamp(max(q_record, q_ts))\n self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',\n q_path, q_ts, x_timestamp)\n headers = {'X-Timestamp': x_timestamp}\n direct_delete_container_entry(\n self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,\n container, obj, headers=headers)", "def remove_song_from_queue(self, nr=0):\n q = self.get_queue()\n self.queue = q[:nr] + q[nr + 1 :]\n return q[nr]", "def deleteSeries(self, series_id):\n cursor = self.connection.cursor()\n series_id = (series_id,)\n\n cursor.execute('''DELETE FROM info\n WHERE series_id = ?''', series_id)\n\n self.connection.commit()\n cursor.close()", "def remove_job(self, job_id):", "def remove(self):\n return(self.queue.pop(0))", "def wipeQueue():\n\tq.clear()", "def cancel(self, event):\r\n self.queue.remove(event)\r\n heapq.heapify(self.queue)", "def test_queue_remove(self):\n q1 = self.party.enqueue_song(self.user, 't123')\n q2 = self.party.enqueue_song(self.user, 't456')\n q2.upvote(self.user2)\n next_entry = self.party.dequeue_next_song()\n self.assertEquals(next_entry, q2)\n self.party.save(self.redis)\n p = Party.get(self.redis, self.party.id)\n self.assertEquals(p.queue[0].id, q1.id)", "def clear_queue(self):\r\n self.avTransport.RemoveAllTracksFromQueue([\r\n ('InstanceID', 0),\r\n ])", "def removeTimerSensor(self, s: 'SoTimerQueueSensor') -> \"void\":\n return _coin.SoSensorManager_removeTimerSensor(self, s)", "def deadline_remover(server_Q, scheduler_Q, task_id):\n global number_of_deadlined_tasks,number_of_deadlined_tasks_class1,number_of_deadlined_tasks_class2\n type = 0\n for i in range(len(scheduler_Q)):\n if str(id(scheduler_Q[i])) == task_id:\n type = scheduler_Q[i].type\n del scheduler_Q[i]\n return\n for j in range(len(server_Q)):\n for i in range(len(server_Q[j])):\n if str(id(server_Q[j][i])) == task_id:\n type = server_Q[j][i].type\n del server_Q[j][i]\n return\n if number_of_departed_tasks > 5000 :\n number_of_deadlined_tasks += 1\n if(type == 1):\n number_of_deadlined_tasks_class1 += 1\n else:\n number_of_deadlined_tasks_class2 += 1", "def remove_bar(self):\n self.space.remove(self.pusher_body, self.pusher_shape)", "def removeIndex(self, index):\r\n item = self.queue.pop(index)\r\n return item", "def clear(self):\r\n self.queue = []", "def remove_from_client_queue(client_id, command_socket_tuple):\n\n if command_socket_tuple in RPCS.Northbound_Queue[client_id]:\n RPCS.Northbound_Queue[client_id].remove(command_socket_tuple)\n # Check if client entry can be removed form Northbound Queue\n if not RPCS.Northbound_Queue[client_id]:\n RPCS.Northbound_Queue.pop(client_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add seriesId to the queue.
def AddToQueue(sender,seriesId,url=None): login() result = addToQueue(seriesId) if result: return MessageContainer("Success",'Added to Queue') else: return MessageContainer("Failure", 'Could not add to Queue.')
[ "def add_to_queue(self, video_id):\n self.start_session_if_none()\n self._session.add_to_queue(video_id)", "def add_to_queue(self, sid, data):\n self.activation_queue.put((sid, data))", "def __add_to_queue(self, _id, url):\n payload = dumps(dict(\n id=str(_id),\n url=url\n ))\n self.chan.basic_publish(\n exchange='',\n routing_key=cfg.settings.mq.queue_name,\n body=payload,\n properties=pika.BasicProperties(\n delivery_mode=2\n )\n )", "def add_to_queue(self):\n self.manager.client.song_q.put(self.get_text(None))", "def queue_id(self, queue_id):\n\n self._queue_id = queue_id", "def push_to_queue(self):\n redis = self.redis_pool.get_connection()\n redis.publish(self.collection_name, self.worker_id)", "def add(self, process):\r\n self.queue.append(process)", "def add_job(self, job_id):\n\n self._count += 1\n self._pending_job_ids.append(job_id)", "def record(self, job_id: int) -> None:\n if self.current_size >= self.max_size:\n self._linked_list.popleft()\n self.current_size -= 1\n\n self._linked_list.append(job_id)\n self.current_size += 1", "def added_to_queue(self, link):", "def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')", "def add(self, frame_id, frame, quality = None):\n self._frames.append((str(frame_id), frame, quality))", "def add_to_queue(self, data):\n self.registration_queue.put(data)", "def add_to_cluster(self, doc_id):\n self.docs_ids.append(doc_id)", "def add(self, n):\n self.queue.append(n)", "def add_to_queue(self, uri, device_id=None):\n\n uri = self._get_uri(\"track\", uri)\n\n endpoint = \"me/player/queue?uri=%s\" % uri\n\n if device_id is not None:\n endpoint += \"&device_id=%s\" % device_id\n\n return self._post(endpoint)", "def _add_queue(self, name, register_event):\n q = queue.Queue()\n register_event(q.put)\n self._queues[name] = q", "def _additem(self):\n\n self.queue.put(self._genitem())", "def addLogRecord(self, items):\n self.queue.put(items)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Popup a Menu asking user if she wants to add or remove this series from her queue
def QueueChangePopupMenu(sender, seriesId): login() dir = MediaContainer(title1="Queue",title2=sender.itemTitle,disabledViewModes=["Coverflow"]) if isRegistered(): queueList = getQueueList() inQ = False for item in queueList: if item['seriesId'] == seriesId: inQ = True break if inQ: dir.Append( Function(DirectoryItem(RemoveFromQueue, title="Remove From Queue", summary="Remove this series from your queue"), seriesId=seriesId) ) else: dir.Append( Function(DirectoryItem(AddToQueue, title="Add To Queue", summary="Add this series to your queue" ), seriesId=seriesId) ) return dir
[ "def show_menu():\n\tchoice = 0\n\tarray_size = int(input(\"Enter size for array as interger value: \"))\n\tqueue_object = Queue(array_size)\n\t\n\twhile choice >= 0 and choice < 4:\n\t\tprint \"1. Insert\"\n\t\tprint \"2. Delete\"\n\t\tprint \"3. Display\"\n\t\tprint \"4. Exit\"\n\t\tchoice = int(input(\"Enter choice: \"))\n\n\t\ttry:\n\t\t\tif choice == 1:\n\t\t\t\tif queue_object.check_is_full():\n\t\t\t\t\traise QueueFullException(\"ERROR: Queue is full\")\n\t\t\t\tdata = int(input(\"Enter integer data: \"))\n\t\t\t\tqueue_object.insert(data)\n\t\t\telif choice == 2:\n\t\t\t\tprint \"Data deleted is: %s\" % queue_object.remove()\n\t\t\telif choice == 3:\n\t\t\t\tqueue_object.display()\n\t\texcept QueueEmptyException as qe:\n\t\t\tprint qe\n\t\texcept QueueFullException as qf:\n\t\t\tprint qf", "def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')", "async def dark_pool_shorts_menu(self, ctx: discord.ext.commands.Context, arg=\"\"):\n\n if cfg.DEBUG:\n print(\"!stocks.dps\")\n\n text = (\n \"0️⃣ !stocks.dps.shorted <NUM>\\n\"\n \"1️⃣ !stocks.dps.hsi <NUM>\\n\"\n \"2️⃣ !stocks.dps.pos <NUM> <SORT>\\n\"\n \"3️⃣ !stocks.dps.sidtc <NUM> <SORT>\\n\"\n )\n if arg:\n text += (\n f\"4️⃣ !stocks.dps.ftd {arg} <DATE_START> <DATE_END>\\n\"\n f\"5️⃣ !stocks.dps.dpotc {arg}\\n\"\n f\"6️⃣ !stocks.dps.spos {arg}\\n\"\n f\"7️⃣ !stocks.dps.psi {arg}\\n\"\n )\n else:\n text += (\n \"\\nMore commands available when providing a ticker with:\"\n \"\\n!stocks.dps <TICKER>\"\n )\n\n title = \"Dark Pool Shorts (DPS) Menu\"\n embed = discord.Embed(title=title, description=text, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n msg = await ctx.send(embed=embed)\n\n emoji_list = [\"0️⃣\", \"1️⃣\", \"2️⃣\", \"3️⃣\"]\n\n if arg:\n emoji_list += [\"4️⃣\", \"5️⃣\", \"6️⃣\", \"7️⃣\"]\n\n for emoji in emoji_list:\n await msg.add_reaction(emoji)\n\n def check(reaction, user):\n return user == ctx.message.author and str(reaction.emoji) in emoji_list\n\n try:\n reaction, user = await gst_bot.wait_for(\n \"reaction_add\", timeout=cfg.MENU_TIMEOUT, check=check\n )\n if reaction.emoji == \"0️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 0\")\n await shorted_command(ctx)\n elif reaction.emoji == \"1️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 1\")\n await hsi_command(ctx)\n elif reaction.emoji == \"2️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 2\")\n await pos_command(ctx)\n elif reaction.emoji == \"3️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 3\")\n await sidtc_command(ctx)\n elif reaction.emoji == \"4️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 4\")\n await ftd_command(ctx, arg)\n elif reaction.emoji == \"5️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 5\")\n await dpotc_command(ctx, arg)\n elif reaction.emoji == \"6️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 6\")\n await spos_command(ctx, arg)\n elif reaction.emoji == \"7️⃣\":\n if cfg.DEBUG:\n print(\"Reaction selected: 7\")\n await psi_command(ctx, arg)\n\n for emoji in emoji_list:\n await msg.remove_reaction(emoji, ctx.bot.user)\n\n except asyncio.TimeoutError:\n text = text + \"\\n\\nCommand timeout.\"\n embed = discord.Embed(title=title, description=text)\n await msg.edit(embed=embed)\n for emoji in emoji_list:\n await msg.remove_reaction(emoji, ctx.bot.user)", "def help(self, event):\n status = self.data.get(device)\n if not status:\n event.msg.reply('The {} isn\\'t being used by anyone yet!'.format(device))\n return\n\n self.data[device]['subs'].append(event.author)\n event.msg.reply(':ok_hand: you\\'ve been added to the queue!')", "def _addHistory(self):\n input = self._entryWidget.get()\n if input == '': \n self.command(input)\n else:\n Pmw.ComboBox._addHistory(*(self,))", "def add_to_queue(self):\n self.manager.client.song_q.put(self.get_text(None))", "def AddToQueue(sender,seriesId,url=None):\n\tlogin()\n\tresult = addToQueue(seriesId)\n\t\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Added to Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not add to Queue.')", "def display_stock():", "def added_to_queue(self, link):", "def onShipSelected(self, item):\n self.frame.mode.destroyTempFrames()\n if item:\n mySystems = self.buildSystemsAvailableData()\n self.populateListbox(self.lstSystems, mySystems)", "def _add_menu(self):\n self.menu_bar.set_menu()\n self.menu_bar.add_menu_action(\"Add\", self._show_create_library_dialog)\n self.menu_bar.add_menu_action(\"Exit\", self.close)", "async def _queue(self, ctx, *link : str):\n if link == ():\n queue_list = await self.queue_titles()\n await self.bot.say(\"Videos in queue: \\n\" + queue_list + \"\\n\\nType queue <link> to add a link or search terms to the queue.\")\n elif await self.check_voice(ctx.message.author, ctx.message):\n if not self.playlist:\n link = \" \".join(link)\n if \"http\" not in link or \".\" not in link:\n link = \"[SEARCH:]\" + link\n else:\n if not self.is_playlist_valid([link]):\n await self.bot.say(\"Invalid link.\")\n return\n self.queue.append(link)\n msg = ctx.message\n result = await self.get_song_metadata(link)\n try: # In case of invalid SOUNDCLOUD ID\n if result[\"title\"] != []:\n await self.bot.say(\"{} has been put into the queue by {}.\".format(result[\"title\"], msg.author))\n else:\n await self.bot.say(\"The song has been put into the queue by {}, however it may error.\".format(msg.author))\n except:\n await self.bot.say(\"A song has been put into the queue by {}.\".format(msg.author))\n\n else:\n await self.bot.say(\"I'm already playing a playlist.\")", "def menu_draw(self, context):\n self.layout.operator(EmbarkNewExportCollection.bl_idname, icon='COLLECTION_NEW')", "def addque(self, qkey, queue, update=False):\n if update or (qkey not in self.kqmap):\n self.kqmap[qkey] = queue", "def queueSong(self):\n queueWindow = curses.newwin(5, 40, 5, 50)\n queueWindow.border()\n queueWindow.addstr(0,0, \"What is the file path?\", curses.A_REVERSE)\n self.stdscr.refresh()\n curses.echo()\n path = queueWindow.getstr(1,1, 30)\n curses.noecho()\n del queueWindow\n self.stdscr.touchwin()\n self.stdscr.refresh()\n \n try:\n self.library.add_tracks(path.decode(encoding=\"utf-8\"))\n except CLI_Exception.CLI_Audio_File_Exception:\n self.printError('Error queueing file or folder')", "def main_menu(message=None):\n clear()\n print(\"WORK LOG\\n========\\n\")\n if message:\n print(message+\"\\n\")\n else:\n print(\"What would you like to do?\\n\")\n print(\"(A)dd a task\")\n if Task.select().count() != 0:\n print(\"(V)iew all tasks\")\n print(\"(S)earch for a task\")\n print(\"(Q)uit\")\n return input(\"> \")", "def listQueue(self):\n\n queue = self.library.list_tracks()\n if len(queue) > 0:\n listWin = curses.newwin(len(queue), 40, 5, 50)\n for i in range(len(queue)):\n listWin.addstr(i, 0, queue[i])\n self.stdscr.refresh()\n curses.echo()\n listWin.getch()\n curses.noecho()\n del listWin\n self.stdscr.touchwin()\n self.stdscr.refresh()\n else:\n self.printError('Nothing to list')", "def _enqueue(self):\n\t\tself._dataModel.makeUnit()\n\t\tself._inQueue += 1\n\t\tself._queueLabel.configure(text = \"In Queue: \" + str(self._inQueue))", "def CategorizedPopUpHndlr(self, event, whichList):\r\n menu = QMenu(self)\r\n newCatList = self.listCategories.selectedItems()\r\n if len(newCatList) == 0:\r\n str = 'None'\r\n else:\r\n str = newCatList[0].text()\r\n \r\n self.NewCatAct.setText(str)\r\n menu.addAction(self.NewCatAct)\r\n menu.addAction(self.NoneCatAct)\r\n if whichList.currentItem() == None:\r\n return\r\n selectedEntryStr = whichList.currentItem().text()\r\n self.newCatStr = str\r\n self.selectedEntry = self.cf.find(selectedEntryStr)\r\n #menu.addAction(copyAct)\r\n #menu.addAction(pasteAct)\r\n menu.show()\r\n what = menu.exec_(PyQt5.QtGui.QCursor.pos())\r\n if (what):\r\n what.trigger()\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
construct a URL to display at resolution based on videoInfo without checking for coherence to what the site's got or if the resolution is valid
def getVideoUrl(videoInfo, resolution): url = videoInfo['baseUrl']+"?p" + str(resolution) + "=1" # we always skip adult filtering (it's done in the presentation code before we reach here) url = url + "&skip_wall=1" url = url + ("&t=0" if Prefs['restart'] == 'Restart' else "") url = url + "&small="+("1" if videoInfo['small'] is True else "0") url = url + "&wide="+("1" if videoInfo['wide'] is True or JUST_USE_WIDE is True else "0") return url
[ "def get_video_url():\n return f'{API_URL}{quote(VIDEO_NAME)}'", "def stream_url(self) -> Optional[str]:\n video_streams = self.video\n if not video_streams:\n return None\n\n for quality in VIDEO_QUALITY_TYPES:\n video_stream_url = video_streams.get(quality)\n if video_stream_url:\n return video_stream_url\n\n return next(iter(video_streams.values()))", "def getStreamURL(self, **kwargs):\n if self.TYPE not in ('movie', 'episode', 'track', 'clip'):\n raise Unsupported(f'Fetching stream URL for {self.TYPE} is unsupported.')\n\n mvb = kwargs.pop('maxVideoBitrate', None)\n vr = kwargs.pop('videoResolution', '')\n protocol = kwargs.pop('protocol', None)\n\n params = {\n 'path': self.key,\n 'mediaIndex': kwargs.pop('mediaIndex', 0),\n 'partIndex': kwargs.pop('mediaIndex', 0),\n 'protocol': protocol,\n 'fastSeek': kwargs.pop('fastSeek', 1),\n 'copyts': kwargs.pop('copyts', 1),\n 'offset': kwargs.pop('offset', 0),\n 'maxVideoBitrate': max(mvb, 64) if mvb else None,\n 'videoResolution': vr if re.match(r'^\\d+x\\d+$', vr) else None,\n 'X-Plex-Platform': kwargs.pop('platform', 'Chrome')\n }\n params.update(kwargs)\n\n # remove None values\n params = {k: v for k, v in params.items() if v is not None}\n streamtype = 'audio' if self.TYPE in ('track', 'album') else 'video'\n ext = 'mpd' if protocol == 'dash' else 'm3u8'\n\n return self._server.url(\n f'/{streamtype}/:/transcode/universal/start.{ext}?{urlencode(params)}',\n includeToken=True\n )", "def show_video(req):\n board = req.user.board\n\n image_link = board.image_link()\n mid = str(board.mid)\n # if mid < 10:\n # mid = '0'+str(mid) \n # print 'mid',mid\n#\timage_link = board.image_link()\n\n return render(req, \"webcam/show_video.html\", {\"image_link\": image_link, \"mid\": mid})", "def video_resolution(self, res):\n self._video_resolution = tuple(res)\n self.process_image()\n self.clear_segments()", "def get_mp4_url(text):\n # type: (str) -> Optional[str]\n mp4 = re.search(r\"(http.*{}\\.mp4)\".format(VIDEO_DEFINITION), text)\n if not mp4:\n logger.debug(\"get_mp4_url no mp4: {}\".format(VIDEO_DEFINITION))\n swap = \"Low\" if VIDEO_DEFINITION == \"High\" else \"High\"\n mp4 = re.search(r\"(http.*{}\\.mp4)\".format(swap), text)\n if not mp4:\n logger.debug(\"get_mp4_url no mp4: {}\".format(swap))\n return\n return mp4.group(1)", "def get_frame_url(frame_number:int):\n return f'{get_video_url()}/frame/{frame_number}'", "def proxy_url(self, maxwidth, url):\n if self.local:\n return url\n else:\n return resize_url(url, maxwidth)", "def get_video_url_from_video_id(video_id):\n # from js\n data = [\"\"] * 256\n for index, _ in enumerate(data):\n t = index\n for i in range(8):\n t = -306674912 ^ unsigned_right_shitf(t, 1) if 1 & t else unsigned_right_shitf(t, 1)\n data[index] = t\n\n def tmp():\n rand_num = random.random()\n path = \"/video/urls/v/1/toutiao/mp4/{video_id}?r={random_num}\".format(video_id=video_id,\n random_num=str(rand_num)[2:])\n e = o = r = -1\n i, a = 0, len(path)\n while i < a:\n e = ord(path[i])\n i += 1\n if e < 128:\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ e)]\n else:\n if e < 2048:\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (192 | e >> 6 & 31))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]\n else:\n if 55296 <= e < 57344:\n e = (1023 & e) + 64\n i += 1\n o = 1023 & t.url(i)\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (240 | e >> 8 & 7))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 2 & 63))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | o >> 6 & 15 | (3 & e) << 4))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & o))]\n else:\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (224 | e >> 12 & 15))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | e >> 6 & 63))]\n r = unsigned_right_shitf(r, 8) ^ data[255 & (r ^ (128 | 63 & e))]\n\n return \"https://ib.365yg.com{path}&s={param}\".format(path=path, param=unsigned_right_shitf(r ^ -1, 0))\n\n while 1:\n url = tmp()\n if url.split(\"=\")[-1][0] != \"-\": # 参数s不能为负数\n return url", "def get_url_with_meta(self):\r\n url = self.get_url()\r\n if self.duration is not None:\r\n url += '@'+str(self.duration)\r\n return url", "def __extract_video_url(self):\n\n self.__logger.info('wait for %s seconds', self.__seconds)\n\n time.sleep(self.__seconds)\n\n self.__logger.info('Extract video url from %s', self.__args.url)\n\n try:\n req = requests.post(self.__args.url, data=self.__params)\n html_source = req.text\n except requests.exceptions.RequestException as error:\n self.__logger.error(error)\n sys.exit(1)\n\n self.__logger.debug(html_source)\n\n xpath_script = './/div[@id=\"player_code\"]/script[3]'\n script = lxml.etree.HTML(html_source).find(xpath_script).text\n\n self.__logger.debug(script)\n\n text = script.split(',')\n url = text[2]\n self.__video_url = url[9:-1]\n\n self.__logger.debug(self.__video_url)", "def EpisodeDetail(title, url):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n try:\n html = html_from_url(clean_url(url))\n except Exception as e:\n Log.Critical('* EpisodeDetail Error: %s' %str(e))\n message = 'This media has expired.' if ('HTTP Error' in str(e) and '404' in str(e)) else str(e)\n return MessageContainer('Warning', message)\n\n ptitle = html.xpath(\"//title/text()\")[0].rsplit(\" Streaming\",1)[0].rsplit(\" Download\",1)[0]\n thumb = html.xpath('//img[@id=\"nameimage\"]/@src')\n thumb = (thumb[0] if thumb[0].startswith('http') else clean_url(thumb[0])) if thumb else None\n\n wpm = html.xpath('//iframe[@id=\"wpm\"]/@src')\n if not wpm:\n return MessageContainer('Warning', 'No Video Source Found.')\n\n pass_html = html_from_url(clean_url(wpm[0]))\n video_urls = []\n source_iframe = pass_html.xpath('//iframe/@src')\n if source_iframe:\n part = 0\n if pass_html.xpath('//div[starts-with(@id, \"part\")]'):\n part = 1\n\n try:\n video_urls.append((part, html_from_url(clean_url(source_iframe[0])).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Error('* EpisodeDetail Error: %s' %str(e))\n pass\n\n if part != 0:\n base_iframe = source_iframe[0].split('.php')[0]\n count = 1\n more = True\n while more and (count < 5):\n count += 1\n try:\n video_urls.append((count, html_from_url(clean_url(base_iframe + '%i.php' %count)).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Warn('* EpisodeDetail Warning: %s' %str(e))\n more = False\n\n for p, u in sorted(video_urls):\n if 'prx.proxy' in u:\n u = 'https://docs.google.com/file/' + u.split('/file/')[1]\n oc.add(VideoClipObject(\n title='%i-%s' %(p, ptitle) if p != 0 else ptitle,\n thumb=Callback(get_thumb, url=thumb),\n url=u\n ))\n\n trailpm = html.xpath('//iframe[@id=\"trailpm\"]/@src')\n if trailpm:\n thtml = html_from_url(clean_url(trailpm[0]))\n yttrailer = thtml.xpath('//iframe[@id=\"yttrailer\"]/@src')\n if yttrailer:\n yttrailer_url = yttrailer[0] if yttrailer[0].startswith('http') else 'https:' + yttrailer[0]\n if 'prx.proxy' in yttrailer_url:\n yttrailer_url = 'http://www.youtube.com/embed/' + yttrailer_url.split('/embed/')[1]\n oc.add(VideoClipObject(url=yttrailer_url, thumb=R(ICON_SERIES), title=\"Watch Trailer\"))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Media Found')", "def get_movie_page_url(self, title):\n invalid_results = [\"(TV Episode)\", \"(TV Series)\", \"(TV Mini-Series)\", \"(Short)\", \"(Video Game)\"]\n search_page = self.get_search_page(title)\n\n try:\n for index, section in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div')):\n if len(section.xpath('h3/text()')) > 0:\n\n # Find the Div associated with Titles (rather than Characters, etc)\n if section.xpath('h3/text()')[0] == \"Titles\":\n\n # Select first in list which doesn't contain invalid_results\n for index, list_title in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr')):\n if not any(x in list_title.text_content() for x in invalid_results):\n endpoint = search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr[%i]/td/a' %(index+1))[0].attrib['href']\n return IMDB.BASE_URL + endpoint\n except IndexError:\n return", "def get_cam_name(cam: str) -> str:\n\n if type(cam) is str and cam.isdigit():\n cam = int(cam)\n\n # test video feed\n read_pass = _test_cam(cam)\n\n # if capture fails, try as YouTube Stream\n # https://pypi.org/project/pafy/\n if not read_pass:\n if '/' in cam and 'youtube' in cam: # a full video path was given\n cam = cam.split('/')[-1]\n try:\n video_pafy = pafy.new(cam)\n except Exception:\n raise Exception(\"No video stream found: {}\".format(cam))\n # get most reasonable stream h x w < 350k\n res_limit = 350000\n stream_num = 0\n\n # use pafy to get the url of the stream\n # find stream with resolution within res_limit\n for i, stream in enumerate(video_pafy.streams):\n x, y = np.array(stream.resolution.split('x'), dtype=int)\n if x * y < res_limit:\n stream_num = i\n else:\n break\n stream = video_pafy.streams[stream_num]\n\n # test stream\n read_pass = _test_cam(stream.url)\n\n if read_pass:\n cam = stream.url\n logger.info(\"YouTube Video Stream Detected!\")\n logger.info(\"Video Resolution : {}\".format(stream.resolution))\n\n logger.info(\"Video Test : {}\".format(\"OK\" if read_pass else \"FAIL - check that streamer is publishing\"))\n\n if not read_pass:\n raise Exception(\"Can't acquire video source: {}\".format(cam))\n\n return cam", "def get_vid_from_url(self, url):\n hit = re.search(r'live.qq.com/(\\d+)', url)\n if hit is not None:\n return hit.group(1)\n hit = re.search(r'live.qq.com/directory/match/(\\d+)', url)\n if hit is not None:\n return self.get_room_id_from_url(hit.group(1))\n html = get_content(url)\n room_id = match1(html, r'room_id\\\":(\\d+)')\n if room_id is None:\n log.wtf('Unknown page {}'.format(url))\n return room_id", "def get_movie_url(self, youtube_id):\n\n \n return 'http://www.youtube.com/watch?v=' + youtube_id", "def craftLink(info, seasonID, compare, cid):\n baseLink = \"https://badmintonnederland.toernooi.nl/sport/\"\n link = baseLink + info + \".aspx?id=\" + seasonID + compare + str(cid)\n return link", "def generate_subtitle_url_from_movie_name(self, name):\n base_url = 'http://subsmax.com/api/10/%s'\n filteredName = self.remove_non_ascii_chars(st=name)\n try:\n url = ('-'.join([str(x) for x in string.split(filteredName.lower(),\n \" \")]) + '-en')\n return base_url % self.remove_illegal_search_chars(url)\n except Exception as e:\n self.log.write_to_log(message=e, where=\"generate-url-from-name\")", "def show_video_to_admin(req, mid):\n checkadmin(req)\n board = Board.objects.get(mid=int(mid))\n image_link = board.image_link()\n mid = str(board.mid)\n return render(req, \"webcam/show_video.html\", {\"image_link\": image_link, \"mid\": mid})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct media objects from an episode.
def constructMediaObject(episode): if True or len(episode['availableResolutions']) == 0: episode['availableResolutions'] = getAvailResFromPage(episode['link']) # FIXME I guess it's better to have something than nothing? It was giving Key error # on episode number if str(episode['mediaId']) not in Dict['episodes']: Dict['episodes'][str(episode['mediaId'])] = episode Dict['episodes'][str(episode['mediaId'])]['availableResolutions'] = episode['availableResolutions'] videoInfo = getVideoInfo(episode['link'], episode['mediaId'], episode['availableResolutions']) videoInfo['small'] = (isPaid() and isPremium(episode.get('category'))) is False epsObject = EpisodeObject( url = videoInfo['baseUrl'], #dunno if this will work title = episode['title'], summary = episode['description'] ) for q in episode['availableResolutions']: dur = episode.get('duration') if not (dur and dur > 0): dur = 0 mo = MediaObject( duration = dur, video_resolution = q, protocol = Protocol.WebKit, parts = [ PartObject( key = WebVideoURL(getVideoUrl(videoInfo, q)) ) ] ) epsObject.add(mo) dir = ObjectContainer( objects = [epsObject]) return dir
[ "def build_from(lines:[str], number:int=0) -> object:\n have_chapter = any(REG_CHAPTER.fullmatch(line.strip()) for line in lines)\n lines = iter(lines)\n # get title, and waste the next line, that should be empty\n title = next(lines).strip()\n empty = next(lines).strip()\n assert not empty, f\"an empty line should follow any episode title, not '{empty}' !\"\n if have_chapter:\n chapters = Chapter.build_from(lines)\n else: # make a phony chapter, populate it with all text\n chapters = [Chapter(1, '', tuple(Line.build_from(lines)))]\n return Episode(number, title, tuple(chapters))", "def createobjects(files, ep_data):\n #__init__(self,season,episode,minor,name,filename):\n objects=[]\n for f in files:# only create for known files\n ed=ep_data[f]\n objects.append(episodes(ed['Season'],ed['Episode'],ed['Minor'],ed['Name'],f))\n return objects # use create_all method", "def init_parser():\r\n episode_list = [] # episode #'s\r\n title_list = [] # episode titles\r\n episode_date = [] # date of episode's release\r\n podcast_type = [] # Whether it's Hopped-Up Gaming or Super Hopped-Up\r\n duration_list = [] # Episode Length\r\n beer_list = [] # That Episode's Beer\r\n host_list = [] # Hosts in episode\r\n\r\n for url in urls:\r\n podcast_feed = feedparser.parse(urls[url])\r\n\r\n for entry in podcast_feed['entries']:\r\n podcast_type.append(url)\r\n # Parse episode number from title\r\n try:\r\n episode_list.append(int(entry[\"title\"].split(\" \")[1][:-1]))\r\n except ValueError:\r\n episode_list.append(0)\r\n\r\n # Parse episode name from title\r\n try:\r\n title_list.append(entry[\"title\"].split(\": \")[1])\r\n except IndexError:\r\n title_list.append(entry[\"title\"].split(\": \")[0])\r\n\r\n # Pull episode day, month, year\r\n episode_date.append(entry['published'][5:16])\r\n\r\n # Pull episode's duration\r\n duration_list.append(entry['itunes_duration_detail']['value'])\r\n\r\n # Pull episode content, (attempt to) parse hosts and beer\r\n try:\r\n beer_list.append(entry['content'][0]['value'].split(\"of the Week:\")[1].split(\"\\n\")[0])\r\n except IndexError:\r\n beer_list.append(\"Couldn't Parse\")\r\n try:\r\n host_list.append(entry['content'][0]['value'].split(\"Hosts: \")[1].split(\"\\n\")[0])\r\n except IndexError:\r\n host_list.append(\"Couldn't Parse\")\r\n\r\n # Throw results into pandas dataframe\r\n podcast_df = pd.DataFrame({\"Podcast Type\": podcast_type,\r\n \"Episode Number\": episode_list,\r\n \"Episode Title\": title_list,\r\n \"Episode Date\": episode_date,\r\n \"Episode Length\": duration_list,\r\n \"Hosts\": host_list,\r\n \"Episode Beer\": beer_list,\r\n })\r\n\r\n # Sort entries so latest from new podcast first\r\n podcast_df.sort_values(by=['Podcast Type', 'Episode Number'], ascending=False, inplace=True)\r\n # Re-index, convert to csv\r\n podcast_df.reset_index(drop=True, inplace=True)\r\n podcast_df.to_csv('podcast.csv')\r\n\r\n logfile = open(\"logfile\", \"a+\")\r\n logfile.write(str(datetime.datetime.now()) + \": New CSV file created\\n\")\r\n logfile.close()\r\n return", "def _convert_tvdb_episode_metadata(imdb_id , season_metadata, episode, banners=True):\n info = copy.deepcopy(season_metadata)\n info['episode'] = episode.get('episodenumber')\n info['title'] = episode.get('episodename', '')\n info['aired'] = episode.get('firstaired', '')\n info['premiered'] = episode.get('firstaired', '')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview', '')\n info['plotoutline'] = episode.get('overview', '')\n info['votes'] = episode.get('ratingcount', '')\n info['imdb_id'] = imdb_id\n if banners:\n info['poster'] = episode['filename']\n return info", "def get_info():\n title, speaker, publish_year, time, language, price = \\\n input(\"*Enter* title| speaker| publish_year| time| language| price : \\n\").split('|')\n media = PodcastEpisode(title, speaker, publish_year, int(time), language, price)\n return media", "def get_episode_media_url(self, podcast_entry):\r\n links = podcast_entry[\"links\"]\r\n\r\n for link in links:\r\n if \"audio\" in link[\"type\"]:\r\n return link[\"href\"]", "def __init__(self, json_node, series=None, build_full_object=False):\n self.series_id = 0\n self.series_name = None\n self.anidb_aid = 0\n self.anidb_eid = 0\n self.actors = []\n self.url = None\n self.item_type = 'episode'\n if series is not None:\n self.series_id = series.id\n self.series_name = series.name\n self.actors = series.actors\n self.anidb_aid = series.anidb_aid\n if series.is_movie:\n self.item_type = 'movie'\n\n Directory.__init__(self, json_node, True)\n # don't redownload info on an okay object\n if build_full_object and self.size < 0:\n json_node = self.get_full_object()\n Directory.__init__(self, json_node)\n # check again, as we might have replaced it above\n if isinstance(json_node, int) or pyproxy.is_unicode_or_string(json_node):\n eh.spam(self)\n return\n\n self.episode_number = pyproxy.safe_int(json_node.get('epnumber', ''))\n self.episode_type = json_node.get('eptype', 'Other')\n if self.anidb_aid == 0:\n self.anidb_aid = pyproxy.safe_int(json_node.get('aid', 0))\n self.anidb_eid = pyproxy.safe_int(json_node.get('eid', 0))\n self.date = model_utils.get_airdate(json_node)\n self.tvdb_episode = json_node.get('season', '0x0')\n self.update_date = None\n self.hash_content = None\n\n self.process_children(json_node)\n\n if self.name is None:\n self.name = 'Episode ' + str(self.episode_number)\n self.alternate_name = model_utils.get_title(json_node, 'x-jat', 'main')\n\n self.watched = pyproxy.safe_int(json_node.get('view', 0)) != 0\n self.watched_date = str(json_node.get('view_date', ''))\n self.year = pyproxy.safe_int(json_node.get('year', ''))\n\n self.rating = float(str(json_node.get('rating', '0')).replace(',', '.'))\n self.user_rating = float(str(json_node.get('userrating', '0')).replace(',', '.'))\n self.overview = model_utils.make_text_nice(pyproxy.decode(json_node.get('summary', '')))\n self.votes = pyproxy.safe_int(json_node.get('votes', ''))\n self.outline = \" \".join(self.overview.split(\".\", 3)[:2]) # first 3 sentence\n self.tags = model_utils.get_tags(json_node.get('tags', {}))\n\n if self.episode_type != 'Special':\n season = str(json_node.get('season', '1'))\n if 'x' in season:\n season = season.split('x')[0]\n else:\n season = '0'\n self.season = pyproxy.safe_int(season)\n\n eh.spam(self)", "def populate(self):\r\n seasons = [0]\r\n season = 0\r\n episodes = [0]\r\n namelist = [[0]]\r\n runtimelist = [[0]]\r\n episodedescriptionlist = [[0]]\r\n data = showInformation.getJson(self.episodesurl)\r\n for dicts in data:\r\n for keys in dicts:\r\n if keys == \"season\" and dicts[keys] not in seasons: \r\n seasons.append(dicts[keys])\r\n season = dicts[keys]\r\n episodes.append(0)\r\n namelist.append([0])\r\n runtimelist.append([0])\r\n episodedescriptionlist.append([0])\r\n if keys == \"number\":\r\n episodes[season] += 1\r\n namelist[season].append(dicts[\"name\"])\r\n runtimelist[season].append(dicts[\"runtime\"])\r\n episodedescriptionlist[season].append(self.stringsToRemove(dicts[\"summary\"]))\r\n \r\n for i in range(1, len(seasons)):\r\n self.seasonsepisodedict[seasons[i]] = episodes[i]\r\n\r\n for i in range(len(namelist)):\r\n for j in range(len(namelist[i])):\r\n self.runtimedescriptionofepisodes[namelist[i][j]] = [runtimelist[i][j], episodedescriptionlist[i][j]]\r\n \r\n self.cast = showInformation.populateCast(self)\r\n self.genres = showInformation.populateGenre(self)\r\n self.episodenamelist = namelist", "def parse_line(self, text):\n result = {}\n\n # Using _re_valid_show we will match both the Show and Episode\n show_matches = self._re_valid_show.match(text)\n if show_matches:\n distribution = show_matches.group(1)\n votes = int(show_matches.group(3))\n ratings = float(show_matches.group(4))\n\n show_title = show_matches.group(5)\n show_year = show_matches.group(6)\n\n result = {\n 'type': \"Show\",\n 'show_title': show_title,\n 'year': int(show_year),\n 'ratings': float(ratings),\n 'votes': int(votes),\n 'distribution': distribution\n }\n else:\n # Nothing more to do here\n return {}\n\n # If _re_valid_episode is a match we will add episode information\n episode_matches = self._re_valid_episode.match(text)\n if episode_matches:\n # Change the type from Show to Episode\n result['type'] = \"Episode\"\n\n #episode_details = self.parse_episode(episode_matches.group(1))\n \"\"\"\n The string containing episode details is not nicely formatted by IMDb\n It can be:\n \"episode_title\"\n \"episode_title(#2.3)\"\n \"episode_title(#3)\"\n \"(#2.3)\"\n \"(#3)\"\n \"\"\"\n\n split_results = self._re_episode_season_and_number.split(episode_matches.group(1))\n if len(split_results) == 1:\n # We have only the title\n result['episode_title'] = split_results[0]\n result['season'] = 0\n result['number'] = 0\n elif len(split_results) == 3:\n result[\"episode_title\"] = split_results[0]\n\n dot_split_result = split_results[1].split('.')\n if len(dot_split_result) == 2:\n result['season'] = int(dot_split_result[0])\n result['number'] = int(dot_split_result[1])\n else:\n result['season'] = 1\n result['number'] = int(dot_split_result[0])\n else:\n print(\"parse_episode unexpected split results, original text is: \" + text)\n\n return result", "def _add_episode(self, title, show_dir, season, episode, video_id, build_url):\n season = int(season)\n episode = int(episode)\n title = re.sub(r'[?|$|!|:|#]', r'', title)\n\n self.log('Adding S{}E{} (id={}) of {} (dest={})'\n .format(season, episode, video_id, title.encode('utf-8'),\n show_dir))\n\n # add season\n if self.season_exists(title=title, season=season) is False:\n self.log(\n 'Season {} does not exist, adding entry to internal library.'\n .format(season))\n self.db[self.series_label][title]['seasons'].append(season)\n\n # add episode\n episode_meta = 'S%02dE%02d' % (season, episode)\n episode_exists = self.episode_exists(\n title=title,\n season=season,\n episode=episode)\n if episode_exists is False:\n self.log(\n 'S{}E{} does not exist, adding entry to internal library.'\n .format(season, episode))\n self.db[self.series_label][title]['episodes'].append(episode_meta)\n\n # create strm file\n filename = episode_meta + '.strm'\n filepath = os.path.join(show_dir, filename)\n if xbmcvfs.exists(filepath):\n self.log('strm file {} already exists, not writing it'\n .format(filepath))\n return\n url = build_url({'action': 'play_video', 'video_id': video_id})\n self.write_strm_file(\n path=filepath,\n url=url,\n title_player=title + ' - ' + episode_meta)", "def episodes(self):\n res = []\n page = 1\n\n while True:\n resp = self._tvdb._make_request('/series/' + str(self.id) + '/episodes', {'page': page})\n res += self._tvdb._build_list_of_models(models.BasicEpisode, resp['data'])\n if not resp['links']['next']:\n break\n page = resp['links']['next']\n return self.__class__.EpisodesResult(self.id, res, self._tvdb)", "def get_episode_metadata(show_id, season_num, episode_num):\n metadata_provider = ADDON.getSetting(\"tv_metadata_provider\")\n info, created_time = fetch_episode_from_db(show_id,\n str(season_num),\n str(episode_num),\n metadata_provider, LANG)\n if info:\n return info\n elif created_time and float(created_time) <= time.time() + 3600:\n return info\n if info:\n return info\n try:\n if metadata_provider == \"Trakt\":\n headers = {\n 'Content-Type': 'application/json',\n 'trakt-api-version': '2',\n 'trakt-api-key': TRAKT_CLIENT_ID\n }\n url = '{0}/shows/{1}/seasons/{2}/episodes/{3}?extended=full'.format(\n TRAKT_API_ENDPOINT, show_id, season_num, episode_num)\n show_metadata = get_show_metadata(show_id)\n info = requests.get(url, headers=headers, verify=False).json()\n if LANG != \"en\":\n translation_url = TRAKT_API_ENDPOINT + \\\n \"/shows/{1}/seasons/{2}/episodes/{3}/translations/{4}\".format(\n TRAKT_API_ENDPOINT, show_id, season_num,\n episode_num, LANG)\n translation_info = requests.get(\n translation_url, headers=headers, verify=False).json()\n if translation_info:\n translation_info = translation_info[0]\n for key in translation_info.iterkeys():\n info[key] = translation_info[key]\n info = _convert_trakt_episode_metadata(show_metadata, info)\n elif metadata_provider == \"TVDB\":\n tvdb_id = tvdb.search_by_imdb(show_id)\n if tvdb_id:\n show = tvdb[tvdb_id]\n show_metadata = get_show_metadata(show_id)\n season = show[int(season_num)]\n season_metadata = _convert_tvdb_season_metadata(\n show_metadata, season, language=LANG)\n episode_metadata = _convert_tvdb_episode_metadata(\n show_id, season_metadata, season[int(episode_num)])\n info = episode_metadata\n except:\n pass\n save_episode_to_db(show_id, season_num, episode_num, metadata_provider,\n LANG, info)\n return info", "def create_tv_media(filename, extension, destination, remove_title = False):\n tv_file = None\n if find_episode_pattern(filename) is not None:\n episode_tag, season, episode = get_episode_info(filename)\n if episode_tag is None:\n raise Exception(\"[!] Failed to process filename as tv show pattern.\")\n tag_start = int(filename.find(episode_tag))\n tag_end = int(tag_start + len(episode_tag)) \n showname = (filename[:tag_start]).strip()\n showname = process_tvshow_name(showname) \n showname, tvshow_year = get_filename_year(showname)\n if tvshow_year is not None:\n showname = \"{0} ({1})\".format(showname, tvshow_year)\n episode_title = (filename[tag_end:(len(filename))]).strip() \n if remove_title == True or len(episode_title) < 1:\n episode_title = None\n tvshow_destination = find_tvshow_path(destination, showname)\n if tvshow_destination is None:\n tvshow_destination = destination\n tv_file = media.TVMedia(showname, season, episode, episode_title, extension, tvshow_destination)\n return tv_file", "def get_season(self, season: int) -> Season:\n if self.lang == \"de\":\n url = f\"https://www.southpark.de/feeds/carousel/video/e3748950-6c2a-4201-8e45-89e255c06df1/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"se\" and season < 23: # SE doesn't have the 23rd season.\n url = f\"https://www.southparkstudios.nu/feeds/carousel/video/9bbbbea3-a853-4f1c-b5cf-dc6edb9d4c00/30/1/json/!airdate/season-{season}\"\n elif self.lang == \"uk\":\n url = f\"https://www.southparkstudios.co.uk/feeds/carousel/video/02ea1fb4-2e7c-45e2-ad42-ec8a04778e64/30/1/json/!airdate/season-{season}\"\n # cc.com is the ony one with jsons so descriptions will be in english\n else:\n url = f\"https://southpark.cc.com/feeds/carousel/video/06bb4aa7-9917-4b6a-ae93-5ed7be79556a/30/1/json/!airdate/season-{season}?lang={self.lang}\"\n\n season_data = json.loads(http_get(url))\n\n episodes = []\n for e in season_data[\"results\"]:\n episodes.append(Episode(\n id=e.get(\"itemId\").strip(),\n title=e.get(\"title\").strip(),\n description=e.get(\"description\").strip(),\n short_description=e.get(\"shortDescription\").strip(),\n thumbnail=e.get(\"images\").strip(),\n date=int(e.get(\"originalAirDate\", 0).strip()),\n episode_number=e.get(\"episodeNumber\").strip(),\n episode_number_in_season=e.get(\"episodeNumber\", \"0\")[-2:].strip(),\n season=e.get(\"episodeNumber\", \"0\")[:2].strip(),\n _lang=self.lang\n ))\n\n return Season(season, episodes)", "def episodes(self, id):\n return Episodes(self, id)", "def test_multi_episodes_seperate():\n ep = EpisodeInfo(\n seriesname = 'Stargate SG-1',\n seasonnumber = 1,\n episodenumbers = [2, 3],\n episodename = [\n 'Children of the Gods (2)',\n 'The Enemy Within'],\n filename = 'stargate.example.file.avi')\n\n assertEquals(\n ep.generateFilename(),\n 'Stargate SG-1 - [01x02-03] - Children of the Gods (2), The Enemy Within.avi')", "def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None and Prefs['fanart'] is True:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl", "def build_episode_video(episode, segment_file_path_names, path=None):\n\n # Do nothing if final video existed or catching URL video error\n if segment_file_path_names == []:\n return None\n\n # Store episode_id to save file\n episode_id = episode.episode_id\n\n # Define path as var {output_path} to save file two cases\n if path is None:\n # If default as None, path identified by the first video segment\n direct_path = os.path.dirname(segment_file_path_names[0])\n output_dir_temp = os.path.join(direct_path, f'{episode_id}.ts')\n abs_path = os.path.abspath(os.path.expanduser(direct_path))\n # In case input path\n else:\n output_dir_temp = os.path.join(TEMP_DIR, f'{episode_id}.ts')\n abs_path = os.path.abspath(os.path.expanduser(path))\n\n # Identify where to download final fideo\n output_final = os.path.join(abs_path, f'{episode_id}.ts')\n\n # In case episode_id is an empty str, get a key to down\n if episode_id == \"\":\n episode_id = episode.key\n\n # Move final video from segment directory to final working dir if *.mp4\n if segment_file_path_names[0].split(\".\")[-1] == 'mp4':\n shutil.move(segment_file_path_names[0],\\\n os.path.join(abs_path, f'{episode_id}.mp4'))\n return segment_file_path_names[0]\n\n # Open each of segments to read byte downloads final video to TEMP_DIR\n with open(output_dir_temp, 'wb') as out_file:\n for segment in segment_file_path_names:\n with open(segment, 'rb') as seg_path:\n out_file.write(seg_path.read())\n\n # Remove video segments whenever the final video is downloaded\n if os.path.exists(output_dir_temp):\n for file_path in segment_file_path_names:\n os.remove(file_path)\n\n # Copy final video from TEMP_DIR to working directory\n if path is not None:\n shutil.copyfile(output_dir_temp, output_final)\n\n return output_final", "def EpisodeDetail(title, url):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n try:\n html = html_from_url(clean_url(url))\n except Exception as e:\n Log.Critical('* EpisodeDetail Error: %s' %str(e))\n message = 'This media has expired.' if ('HTTP Error' in str(e) and '404' in str(e)) else str(e)\n return MessageContainer('Warning', message)\n\n ptitle = html.xpath(\"//title/text()\")[0].rsplit(\" Streaming\",1)[0].rsplit(\" Download\",1)[0]\n thumb = html.xpath('//img[@id=\"nameimage\"]/@src')\n thumb = (thumb[0] if thumb[0].startswith('http') else clean_url(thumb[0])) if thumb else None\n\n wpm = html.xpath('//iframe[@id=\"wpm\"]/@src')\n if not wpm:\n return MessageContainer('Warning', 'No Video Source Found.')\n\n pass_html = html_from_url(clean_url(wpm[0]))\n video_urls = []\n source_iframe = pass_html.xpath('//iframe/@src')\n if source_iframe:\n part = 0\n if pass_html.xpath('//div[starts-with(@id, \"part\")]'):\n part = 1\n\n try:\n video_urls.append((part, html_from_url(clean_url(source_iframe[0])).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Error('* EpisodeDetail Error: %s' %str(e))\n pass\n\n if part != 0:\n base_iframe = source_iframe[0].split('.php')[0]\n count = 1\n more = True\n while more and (count < 5):\n count += 1\n try:\n video_urls.append((count, html_from_url(clean_url(base_iframe + '%i.php' %count)).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Warn('* EpisodeDetail Warning: %s' %str(e))\n more = False\n\n for p, u in sorted(video_urls):\n if 'prx.proxy' in u:\n u = 'https://docs.google.com/file/' + u.split('/file/')[1]\n oc.add(VideoClipObject(\n title='%i-%s' %(p, ptitle) if p != 0 else ptitle,\n thumb=Callback(get_thumb, url=thumb),\n url=u\n ))\n\n trailpm = html.xpath('//iframe[@id=\"trailpm\"]/@src')\n if trailpm:\n thtml = html_from_url(clean_url(trailpm[0]))\n yttrailer = thtml.xpath('//iframe[@id=\"yttrailer\"]/@src')\n if yttrailer:\n yttrailer_url = yttrailer[0] if yttrailer[0].startswith('http') else 'https:' + yttrailer[0]\n if 'prx.proxy' in yttrailer_url:\n yttrailer_url = 'http://www.youtube.com/embed/' + yttrailer_url.split('/embed/')[1]\n oc.add(VideoClipObject(url=yttrailer_url, thumb=R(ICON_SERIES), title=\"Watch Trailer\"))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Media Found')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Play a freebie video using the direct method. As long as crunchyroll.com delivers ads through the direct stream (they do as of Feb 14 2012), this is okay IMO. This gets around crashes with redirects/content changes of video page, and sacrifices the ability to use javascript in the site config.
def PlayVideoFreebie2(sender, mediaId): episode = getEpisodeDict(mediaId) infoUrl = episode['link'] + "?p360=1&skip_wall=1&t=0&small=0&wide=0" req = HTTP.Request(infoUrl, immediate=True, cacheTime=10*60*60) #hm, cache time might mess up login/logout match = re.match(r'^.*(<link *rel *= *"video_src" *href *= *")(http:[^"]+).*$', repr(req.content), re.MULTILINE) if not match: # bad news Log.Error("###########Could not find direct swf link, trying hail mary pass...") Log.Debug(req.content) theUrl = infoUrl else: theUrl = match.group(2) + "&__qual=360" Log.Debug("###pre-redirect URL: %s" % theUrl) # try a manual redirect since redirects crash entire PMS import urllib2 req = urllib2.urlopen(theUrl) theUrl = req.geturl() req.close() Log.Debug("####Final URL: %s" % theUrl) duration = episode.get('duration') if not duration: duration = 0 return Redirect(WebVideoItem(theUrl, title = episode['title'], duration = duration, summary = makeEpisodeSummary(episode) ))
[ "def send_video_to_vidly(video):\n notify_url = absolutify(reverse('flicks.videos.notify',\n args=[settings.NOTIFY_KEY]))\n shortlink = addMedia(video.upload_url, notify_url)\n\n if shortlink is None:\n video.state = 'error'\n video.save()\n else:\n video.shortlink = shortlink\n video.state = 'pending'\n video.save()", "def _how_to_use_video():\n if st.button(\"Click here for a short video on HowTo ask a question\"):\n video_file = open(HOWTO_VIDEO_PATH, \"rb\")\n video_bytes = video_file.read()\n st.video(video_bytes)", "def download_video(link, filepath):\n ydl = YoutubeDL({\"outtmpl\": f\"{filepath}\"})\n ydl.download([link])", "async def get_speedrun_video(self, ctx: commands.Context) -> None:\n await ctx.send(choice(LINKS))", "def play_random_video(self) -> None:\n videos = self._filter_flagged_videos(self._library.get_all_videos())\n if not videos:\n print(\"No videos available\")\n return\n\n video = choice(videos)\n self.play_video(video.video_id)", "def Video(self):\n self = self._cam._AcqMode.Video\n self.__call__(start=False)", "def show_video_to_admin(req, mid):\n checkadmin(req)\n board = Board.objects.get(mid=int(mid))\n image_link = board.image_link()\n mid = str(board.mid)\n return render(req, \"webcam/show_video.html\", {\"image_link\": image_link, \"mid\": mid})", "def viewVid(v_dir: str,v_name: str, a_name: str = None) -> None:\n\tvid = os.path.join(v_dir,v_name)\n\tplayer_args = settings['player_args'].split(' ')\n\tif os.path.exists(vid):\n\t\tcmd = [settings['player'],vid]+player_args\n\t\tif a_name:\n\t\t\ta_file = os.path.join(v_dir,a_name)\n\t\t\tif (os.path.exists(a_file)):\n\t\t\t\tcmd.append(settings['player_aud'].format(a_file))\n\t\tPopen(cmd)\n\telse:\n\t\tprint(\"No video file exists.\")", "def ffplay(self):\n assert self.hasfilename() or (self.hasurl() and self.download().hasfilename()) # triggers download if needed\n cmd = 'ffplay \"%s\"' % self.filename()\n print('[vipy.video.play]: Executing \"%s\"' % cmd)\n os.system(cmd)\n return self", "def test_video(self):\n\t\t_task, _prog, _file = mock_handler_request(self.dir, 'theshadowmoose.tumblr.com/post/184562318724/another-test-post-with-video')\n\t\tres = tumblr.handle(_task, _prog)\n\t\tself.assertTrue(res, \"Tumblr video download failed!\")\n\t\tself.assertTrue(_file.exists(), \"Tumblr video was not downloaded! %s\" % res.failure_reason)\n\t\tself.assertTrue(_file.relative().endswith('.mp4'), 'Failed to use .mp4 extension for video file!')", "def ad_removal_before_video(self):\n try:\n # self.driver.switch_to.frame(self.driver.find_element_by_id('player'))\n try:\n WebDriverWait(self.driver, 15).until(EC.element_to_be_clickable(\n (By.XPATH, \"//button[@class='ytp-ad-skip-button ytp-button']\"))).click()\n except TimeoutException as e:\n try:\n self.driver.execute_script(\"return document.getElementById('movie_player').playVideo()\")\n except:\n pass\n except NoSuchFrameException:\n print(\"The script could not find the video player. An ad may be playing right now!\")", "def play_movie(self, url):\n self.open_url(url)", "def show_video(req):\n board = req.user.board\n\n image_link = board.image_link()\n mid = str(board.mid)\n # if mid < 10:\n # mid = '0'+str(mid) \n # print 'mid',mid\n#\timage_link = board.image_link()\n\n return render(req, \"webcam/show_video.html\", {\"image_link\": image_link, \"mid\": mid})", "def play(self, verbose=False, notebook=False, ffplay=True, figure='video'):\n \n\n if not self.isdownloaded() and self.hasurl():\n self.download()\n if not self.isloaded() and (iswebp(self.filename()) or isgif(self.filename())):\n self.load()\n \n if notebook:\n # save to temporary video, this video is not cleaned up and may accumulate \n try_import(\"IPython.display\", \"ipython\"); import IPython.display\n if not self.hasfilename() or self.isloaded() or self._isdirty():\n v = self.saveas(tempMP4()) \n warnings.warn('Saving video to temporary file \"%s\" for notebook viewer ... ' % v.filename())\n return IPython.display.Video(v.filename(), embed=True)\n return IPython.display.Video(self.filename(), embed=True)\n elif ffplay and has_ffplay:\n if self.isloaded() or self._isdirty():\n f = tempMP4()\n if verbose:\n warnings.warn('%s - Saving video to temporary file \"%s\" for ffplay ... ' % ('Video loaded into memory' if self.isloaded() else 'Dirty FFMPEG filter chain', f))\n v = self.saveas(f)\n cmd = 'ffplay \"%s\"' % v.filename()\n if verbose:\n print('[vipy.video.play]: Executing \"%s\"' % cmd)\n os.system(cmd)\n if verbose:\n print('[vipy.video.play]: Removing temporary file \"%s\"' % v.filename()) \n os.remove(v.filename()) # cleanup\n elif self.hasfilename() or (self.hasurl() and self.download().hasfilename()): # triggers download\n self.ffplay()\n else:\n raise ValueError('Invalid video file \"%s\" - ffplay requires a video filename' % self.filename())\n return self\n\n else:\n \"\"\"Fallback player. This can visualize videos without ffplay, but it cannot guarantee frame rates. Large videos with complex scenes will slow this down and will render at lower frame rates.\"\"\"\n fps = self.framerate()\n assert fps > 0, \"Invalid display framerate\"\n with Stopwatch() as sw: \n for (k,im) in enumerate(self.load() if self.isloaded() else self.stream()):\n time.sleep(max(0, (1.0/self.framerate())*int(np.ceil((self.framerate()/fps))) - sw.since())) \n im.show(figure=figure)\n if vipy.globals._user_hit_escape():\n break \n vipy.show.close('video')\n return self", "def play_random_video(self):\n\n ids = []\n for video in self._video_library.get_all_videos():\n if not video.flag:\n ids.append(video.video_id)\n if not ids:\n print(\"No videos available\")\n return\n rand = random.randint(0, len(ids)-1)\n self.play_video(ids[rand])", "def get_film_link(): \n while True:\n # Get the film link using 'which_link' given by the user and\n # iterate over until we find a valid page with a link in\n req = s.get(vodlocker_link[int(which_link)], headers=headers) \n bs = BS(req.text, \"lxml\") # create a soup object\n #file_removed = r\"^http?://(*.*.*.*)/([a-zA-Z0-9]\\+\\)(v.mp4)\" \n # TODO it will be better if we can use a\n # regex search to search for a link \n # (e.g., http://177.272.45.91/vhxjhvjhv89dyf9s8eyuf98syfhs89y/v.mp4) \n # instead of .mp4 \n \n # If this is not in the page then we dont want this page\n file_removed = \"v.mp4\" \n if file_removed not in bs:\n # Print no video file\n print(\"\\n[!] We could not find a video file.\") \n else:\n break", "def _downloadDirect(self, destination):\n\n self._runDownload(self.item.get('url'), destination)", "def _video_served(self, video):\n pass", "async def play_url(self, url: str) -> None:\n if self.mass.streams.base_url not in url:\n # use base implementation if 3rd party url provided...\n await super().play_url(url)\n return\n\n self.logger.debug(\"play_url: %s\", url)\n if not self.powered:\n await self.power(True)\n\n if self.state in (PlayerState.PLAYING, PlayerState.PAUSED):\n await self.stop()\n self._attr_current_url = url\n # pylint: disable=protected-access\n await self.entity._kodi.play_item({\"file\": url})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
make a crunchyroll.com API request with the passed dictionary. Optionally, specify referer to prevent request from choking.
def makeAPIRequest(valuesDict,referer=None): h = API_HEADERS if not referer is None: h['Referer'] = referer h['Cookie']=HTTP.CookiesForURL(BASE_URL) req = HTTP.Request("https"+API_URL,values=valuesDict,cacheTime=0,immediate=True, headers=h) response = re.sub(r'\n\*/$', '', re.sub(r'^/\*-secure-\n', '', req.content)) return response
[ "def GET_request(action):\n\n # OAuth token of the user that requests will be made on behalf of\n\n\n # Login of the advertising agency client\n # Required parameter if requests are made on behalf of an advertising agency\n clientLogin = 'marketingdigital@zara.com'\n\n headers = {\n # OAuth token. The word Bearer must be used\n \"Authorization\": 'OAuth AQAAAABDFBfdAAcVB0yqdlcRyEzIu8BBs1TTLuE',\n # Login of the advertising agency client\n \"Client-Login\": clientLogin,\n # Language for response messages\n \"Accept-Language\": \"en\",\n # Mode for report generation\n \"processingMode\": \"auto\"\n # Format for monetary values in the report\n # \"returnMoneyInMicros\": \"false\",\n # Don't include the row with the report name and date range in the report\n # \"skipReportHeader\": \"true\",\n # Don't include the row with column names in the report\n # \"skipColumnHeader\": \"true\",\n # Don't include the row with the number of statistics rows in the report\n # \"skipReportSummary\": \"true\"\n }\n\n\n API_URL = 'https://api.webmaster.yandex.net/v4'\n\n\n\n retry_count = 0\n retry_max = 1\n\n try:\n resp = requests.get(API_URL + action, headers=headers)\n except Exception as message:\n if \"400\" or \"401\" in message:\n logging.error(f\"Could not retrieve html, authentication or token error: {message}\")\n sys.exit(1)\n elif retry_count < retry_max:\n print(f\"Retrying ... (count {retry_count})\")\n # sleep for fifteen minutes\n time.sleep(10)\n\n # increase the counter\n retry_count = retry_count + 1\n\n else:\n logging.error(f\"Could not retrieve response: {message}\")\n raise Exception(str(message))\n\n return resp.json()", "def make_api_request():\n cred_dict = {}\n for section_name in Config.sections():\n for name, value in Config.items(section_name):\n cred_dict[name] = value\n\n term = request.args.get('term')\n location = request.args.get('location')\n # could make validation more extensive\n if term and location:\n url_params = {\"term\": term, \"location\": location}\n bearer_token = cred_dict.get(\"bearer_token\")\n headers = {'Authorization': 'Bearer ' + bearer_token}\n response = requests.request('GET', SEARCH_URL, headers=headers, params=url_params)\n dict_response = response.json()\n json_response = jsonify(dict_response)\n\n return json_response\n else:\n return \"term or location have not been specified\"", "def base_request(state, date):\n session = requests.Session()\n retry = Retry(connect=4, backoff_factor=2)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount(\"http://\", adapter)\n session.mount(\"https://\", adapter)\n url = f\"https://brasil.io/api/dataset/covid19/caso/data/?state={state}&date={date}\"\n request = session.get(url)\n data = request.json()\n return data", "def send_har_request(self, request, followredirects=None, apikey=''):\n params = {'request': request, 'apikey': apikey}\n if followredirects is not None:\n params['followRedirects'] = followredirects\n return (self.zap._request_other(self.zap.base_other + 'exim/other/sendHarRequest/', params))", "def requester(payload):\n\n r = requests.post(url,headers=encabezado,json=payload)\n return r", "def gold_client(application_gold, api_client):\n return api_client(application_gold)", "def generateChargifyAPIURL(page_shortname, resource_id):\n url = 'https://%s.chargify.com/%s/%s.json' % (\n CONFIG['chargify_sub_domain'], page_shortname, resource_id)\n return url", "def WebRequest(self, script_url, **fields):\n\n return apply(WebRequest, (script_url, self.request), fields)", "def download_gen(url, referer=None):\n\n if not url:\n print \"Warning: empty URL\"\n return None\n\n # spoof some headers to make us seem ore like a browser\n headers = {\n 'Host': urlparse(url).netloc,\n 'User-Agent': (\n 'Mozilla/5.0 (X11; Linux x86_64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/34.0.1847.14 '\n 'Safari/537.36'\n ),\n 'DNT': '1',\n 'Cache-Control': 'max-age=0',\n 'Accept-Language': 'en-US,en;q=0.8',\n }\n\n if referer:\n headers['Referer'] = referer\n\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n #print \"download success: %s\" % url\n return r\n else:\n print \"download fail: (status %s) %s\" % (r.status_code, url)\n print \"Response content: %s\" % r.content\n return None", "def request(method, url, data=None, json=None, headers={}, stream=None, timeout=None):\n\t...", "def _build_request(self, req_res):\n u = urlparse.urlparse(req_res.response_url)\n if u.scheme not in ['http', 'https'] or u.netloc == '':\n raise CrawleUnsupportedScheme()\n\n address = socket.gethostbyname(u.hostname), u.port\n encrypted = u.scheme == 'https'\n\n url = urlparse.urlunparse(('', '', u.path, u.params, u.query, ''))\n if req_res.request_headers:\n headers = req_res.request_headers\n else:\n headers = {}\n if 'Accept' not in headers:\n headers['Accept'] = HEADER_DEFAULTS['Accept']\n if 'Accept-Encoding' not in headers:\n headers['Accept-Encoding'] = 'gzip'\n if 'Accept-Languge' not in headers:\n headers['Accept-Language'] = HEADER_DEFAULTS['Accept-Language']\n if 'Host' not in headers:\n if u.port == None:\n headers['Host'] = u.hostname\n else:\n headers['Host'] = '%s:%d' % (u.hostname, u.port)\n if 'User-Agent' not in headers:\n headers['User-Agent'] = HEADER_DEFAULTS['User-Agent']\n return address, encrypted, url, headers", "def __init__(self, key, proxies=None, base_url='https://io.adafruit.com'):\n self.key = key\n self.proxies = proxies\n # Save URL without trailing slash as it will be added later when\n # constructing the path.\n self.base_url = base_url.rstrip('/')", "def __make_request_url(self, teststep_dict, entry_json):\n request_params = utils.convert_list_to_dict(\n entry_json[\"request\"].get(\"queryString\", [])\n )\n\n url = entry_json[\"request\"].get(\"url\")\n if not url:\n logging.exception(\"url missed in request.\")\n sys.exit(1)\n\n parsed_object = urlparse.urlparse(url)\n if request_params:\n parsed_object = parsed_object._replace(query='')\n teststep_dict[\"request\"][\"url\"] = parsed_object.geturl()\n teststep_dict[\"request\"][\"params\"] = request_params\n else:\n teststep_dict[\"request\"][\"url\"] = url\n\n teststep_dict[\"name\"] = parsed_object.path", "def github_api_request(request_kwargs):\n headers = {\n 'Accept': 'application/vnd.github.v3+json',\n }\n request_kwargs.update({\n 'headers': headers,\n 'timeout': 60,\n })\n return request_with_retry(request_kwargs)", "def aci_app_proxy():\n if not g.user.is_authenticated: abort(401, \"Unauthorized\")\n if g.user.role != Roles.FULL_ADMIN: abort(403)\n \n # args can be provided via params or post data. If both are provided\n # then post data will be preferred\n is_json = False\n method = request.args.get(\"method\", \"get\").lower()\n url = request.args.get(\"url\", None)\n data = request.args.get(\"data\", {})\n params = request.args.get(\"params\", {})\n try:\n user_json = request.json\n if user_json is not None:\n if \"method\" in user_json: method = user_json[\"method\"]\n if \"url\" in user_json: url = user_json[\"url\"]\n if \"data\" in user_json: data = user_json[\"data\"]\n if \"params\" in user_json: params = user_json[\"params\"]\n except BadRequest as e: pass\n \n # force data from json and back to ensure it's properly formatted \n if data is not None and type(data) is not dict:\n try: data = json.loads(data)\n except Exception as e: abort(400, \"invalid value for 'data'\")\n data = json.dumps(data)\n # leave params as dict as required by requests methods\n if params is not None and type(params) is not dict:\n try: params = json.loads(params)\n except Exception as e: abort(400, \"invalid value for 'params'\")\n\n # validate url and methods\n if type(method) is not str and type(method) is not unicode:\n abort(400, \"invalid value for 'method'\")\n if url is None:\n abort(400, \"missing required attribute 'url'\")\n if type(url) is not str and type(url) is not unicode:\n abort(400, \"invalid value for 'url'\")\n if not re.search(\"^/\", url):\n abort(400, \"invalid value for 'url', must start with / character\") \n\n method = method.lower()\n url = \"%s%s\"%(current_app.config.get(\"PROXY_URL\", \"http://localhost\"),url)\n header = {}\n if \"/api/\" in url: \n header = {\"content-type\":\"application/json\"}\n is_json = True\n if method == \"get\":\n r = requests.get(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n elif method == \"post\":\n r = requests.post(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n elif method == \"delete\":\n r = requests.delete(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n else:\n abort(400, \"invalid value for 'method'\")\n \n if r.status_code != 200:\n # if json was provided in the status code with attribute error, \n # extract it and provide just the error text back to user\n text = r.text\n try: \n js = r.json()\n if \"error\" in js: text = js[\"error\"] \n except Exception as e: pass\n abort(r.status_code, text)\n if is_json:\n try: return jsonify(r.json())\n except Exception as e:\n r1 = re.search(\"https?://[^/]+(?P<clean>.*)\", r.url)\n if r1 is not None: clean = r1.group(\"clean\")\n else:clean = r.url\n abort(500, \"proxy to (%s)%s failed, received non-json reply\" % (\n method, clean))\n else:\n return make_response(r.text)", "def get_request(url, **kwargs):\n # print(\"get_request: received kwargs {}\".format(kwargs))\n # print(\"get_request: received url {}\".format(url))\n try:\n if 'cp_cl_api_key' in kwargs:\n # Cloudant service rest api request\n cp_cl_api_key = kwargs['cp_cl_api_key']\n # prepare payload\n del kwargs['cp_cl_api_key']\n # prepare header\n headers = {'Content-Type': 'application/json', 'cp_api_key': cp_cl_api_key}\n # call get method\n response = requests.get(url=url,headers=headers,params=kwargs)\n elif 'cp_wnlu_api_key' in kwargs:\n # WNLU service request\n cp_wnlu_api_key = kwargs['cp_wnlu_api_key']\n # prepare payload\n params = dict()\n params['text'] = kwargs['text']\n params['version'] = kwargs['version']\n params['features'] = kwargs['features']\n params['return_analyzed_text'] = kwargs['return_analyzed_text']\n if 'language' in kwargs:\n params['language'] = kwargs['language']\n # prepare header\n headers = {'Content-Type': 'application/json'}\n response = requests.get(url=url,headers=headers,params=kwargs,\\\n auth=HTTPBasicAuth('apikey',cp_wnlu_api_key))\n else:\n # no service key has been specified\n print(\"neither cp_cl_api_key nor cp_wnlu_api_key has been specified\")\n return {}\n except:\n # if any error occurs print it\n print(\"Network exception occurred with GET request!!!\")\n return {}\n status_code = response.status_code\n print(\"get_request: received response with status code {}\".format(status_code))\n json_data = json.loads(response.text)\n return json_data", "def testRefererInjection(requestURL):\n body = requests.get(requestURL, headers={'referer': fullPayload}).text.lower()\n xssInfo = getXSSInfo(body, requestURL, \"Referer\")\n ctxLog(xssInfo)", "def call(method, key=None, secret=None, **kwargs):\n params = kwargs.copy()\n\n if (key is not None) and (secret is not None):\n params['time'] = int(time.time())\n params['apiKey'] = key\n params['apiSig'] = _generate_api_sig(method, params, secret)\n\n url = urllib.parse.urljoin(CODEFORCES_API_URL, \"%s\" % method)\n print(url)\n print(1)\n res = requests.get(url, params=params)\n print(res)\n\n if res.status_code == 404:\n data = {'status': 'FAILED', 'comment': \"%s: No such method\" % method}\n elif res.status_code in (429, 503):\n time.sleep(1)\n\n return call(method, key, secret, **kwargs)\n else:\n data = json.loads(res.text)\n\n if data['status'] == 'FAILED':\n raise error.CodeforcesAPIError(data['comment'], method, kwargs)\n\n return data['result']", "def make_url(script_name, base_request=None, **fields):\n\n request = apply(WebRequest, (script_name, base_request), fields)\n return request.AsUrl()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
put a default authentication status structure into the global Dict{}. Every datum is least permissions on default.
def resetAuthInfo(): Dict['Authentication'] = {'loggedInSince':0.0, 'failedLoginCount':0, 'AnimePremium': False, 'DramaPremium': False}
[ "def create_default_values (self):\n\n self.default_values = {\"username\": '',\n \"password\": '',\n \"is_demo\": True,\n \"epic\": 'IX.D.DAX.IMF.IP',\n \"api_key\": '',\n \"proxies\": {\"https\": ''},\n \"account_nb\": '0'\n }\n for key in self.default_values:\n if key not in personal.__dict__:\n personal.__dict__[key] = self.default_values[key]", "def get_default_privileges_dict(self):\n # _DEFAULT tenant is created with two privileges\n return [{'datastore_url': auth_data_const.ALL_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0},\n {'datastore_url': auth_data_const.VM_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0}]", "def set_defaults(self):\n self._config[\"DEFAULT\"] = Config.Default\n\n if \"User\" not in self._config:\n Debug(self, \".set_defaults(): Creating empty User section\")\n self._config[\"User\"] = {}", "def auth(self):\n return dict(page='auth',subtitulo='Autenticacion')", "def load_default_permissions(permissions):\n default_permissions = {\n \"read\": [\n \"Help\",\n \"CustomAttributeDefinition\",\n {\n \"type\": \"CustomAttributeValue\",\n \"terms\": {\n \"list_property\": \"owners\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"contains\"\n },\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ],\n \"create\": [\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ],\n \"update\": [\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ]\n }\n collect_permissions(default_permissions, None, permissions)", "def install_defaults():\n normal = Level.get_severity(Level.normal)\n warning = Level.get_severity(Level.warning)\n error = Level.get_severity(Level.error)\n\n d = Status(name=\"Down\", slug=\"down\", image=\"cross-circle\", severity=error, \\\n description=\"The service is currently down\")\n u = Status(name=\"Up\", slug=\"up\", image=\"tick-circle\", severity=normal, \\\n description=\"The service is up\")\n w = Status(name=\"Warning\", slug=\"warning\", image=\"exclamation\", severity=warning, \\\n description=\"The service is experiencing intermittent problems\")\n\n d.put()\n u.put()\n w.put()\n\n s = Setting(name=\"installed_defaults\")\n s.put()", "def initial_permissions() -> [[str, str]]:\n return {'admin_all': ['user__Admin', 'resource__All'],\n 'guest_all': ['user__Guest', 'resource__All']}", "def add_default_user_values(self, user_values):\n\n defaults = {\n 'active': True,\n 'uses_parent_account': False,\n 'corporate_card_holder': False,\n 'metadata': {},\n 'account_holder_group_token': 'DEFAULT_AHG',\n 'status': 'ACTIVE'\n }\n\n return {**defaults, **user_values}", "def UserDataFirstUpdated(self):\n # appKey can be overwritten by a different thread - we need the original value\n # - that's why it needs to be copied to a local variable\n # connectionURL does not change - so it does not need to be synchronized \n with userDataLock:\n if userData.GetPrinterCount() == 1:\n userData.printers[0].status = userconfig_userStatus\n else:\n for printer in userData.GetPrinters():\n if printer.id in userconfig_printerStatus:\n printer.status = userconfig_printerStatus[printer.id]\n if userconfig_userStatus == 'on':\n UserOnline(self.originalAppKey, config.connectionURL, config.version, self.UserDataUpdated)\n else:\n UserOffline(self.originalAppKey, config.connectionURL, config.version, self.UserDataUpdated)", "def _load_defaults(self):\n module = self._do_import(self._defaults_module_path)\n self._defaults = {\n k: v for k, v in module.__dict__.items()\n if k.isupper() # ignore anything that doesn't look like a setting\n }", "def load(self):\n self.update({\n \"roles\": {\n \"default\": [],\n \"admin\": []\n },\n \"files\": {},\n \"general\": {\n \"users_remove_locks\": [],\n \"roles_remove_locks\": [\"default\", \"admin\"],\n \"users_add_locks\": [],\n \"roles_add_locks\": [\"default\", \"admin\"],\n \"users_write\": [],\n \"roles_write\": [\"default\", \"admin\"],\n \"users_grant\": [],\n \"roles_grant\": [\"default\", \"admin\"],\n \"users_modify_roles\": [],\n \"roles_modify_roles\": [\"default\", \"admin\"]\n }\n })\n try:\n self.update(json.load(open(self.__path)))\n except FileNotFoundError:\n # Log that loading the configuration failed\n Logger.info('MEG PERMISSIONS: Could not load permissions file <' + self.__path + '>, using default permissions')", "def addChecker(self, checker):\n if self._usingDefaultAuth:\n self[\"credCheckers\"] = []\n self[\"credInterfaces\"] = {}\n self._usingDefaultAuth = False\n super().addChecker(checker)", "def set_default_permissions( self, trans, **kwd ):\n if trans.user:\n if 'update_roles_button' in kwd:\n p = util.Params( kwd )\n permissions = {}\n for k, v in trans.app.model.Dataset.permitted_actions.items():\n in_roles = p.get( k + '_in', [] )\n if not isinstance( in_roles, list ):\n in_roles = [ in_roles ]\n in_roles = [ trans.sa_session.query( trans.app.model.Role ).get( x ) for x in in_roles ]\n action = trans.app.security_agent.get_action( v.action ).action\n permissions[ action ] = in_roles\n trans.app.security_agent.user_set_default_permissions( trans.user, permissions )\n return trans.show_ok_message( 'Default new history permissions have been changed.' )\n return trans.fill_template( 'user/permissions.mako' )\n else:\n # User not logged in, history group must be only public\n return trans.show_error_message( \"You must be logged in to change your default permitted actions.\" )", "def load_default(self):\n pass", "def get_default_state(self) -> dict:\n return {\n k: {k_: v_[\"default\"] for k_, v_ in v.items()}\n for k, v in self.options.items()\n }", "def add_default_group(data):\n uinfo = data.get('uinfo', {})\n if uinfo.get('groups', None):\n groups = uinfo.get('groups')\n query = \"SELECT uuid FROM actor WHERE uuid = ANY(%s::uuid[])\"\n values = [groups]\n groups_uuid = app.db.fetchall(query, values)\n\n if len(groups_uuid) != len(groups):\n invalid_groups = [group for group in groups if group not in groups_uuid]\n print_error_cli(message=\"Unknown group. core.auth_view.BaseAuth - add_default_group.\\n \"\n \"invalid_groups - %s\" % invalid_groups)\n response = create_response_message(message=_(\"There is no such groups %(invalid_groups)s\",\n invalid_groups=invalid_groups), error=True)\n return response\n\n if groups_uuid:\n # TODO: change from admin on alowed groups when frontend add this flag\n admin_group = get_static_group('ADMIN')\n if not admin_group:\n print_error_cli(message=\"There is no admin group. core.auth_view.BaseAuth - add_default_group.\\n\")\n response = create_response_message(message=_(\"Some error occurred with admin group.\"), error=True)\n return response\n\n if admin_group.get('uuid') in groups:\n print_error_cli(message=\"Default group is admin. core.auth_view.BaseAuth - add_default_group.\\n \"\n \"data - %s\" % data)\n response = create_response_message(message=_(\"This group can't be used by default \"\n \"for your service.\"), error=True)\n return response\n\n else:\n default_group = get_default_user_group()\n if default_group:\n uinfo['groups'] = [default_group.get('uuid')]\n else:\n uinfo['groups'] = []\n\n return uinfo", "def create_user_dict(self, username, path = []):\n \n self.log.info(\"Creating user dictionary for user %r.\" % username)\n\n section_data = self.get_section_data(username, path)\n custom_data = self.get_custom_data()\n\n default_user_data = {\"admin\": False,\n \"sections\": [section_data],\n \"custom\": custom_data,\n \"root\": copy.deepcopy(path)}\n\n return default_user_data", "def default_json_values(self):\r\n return {}", "def default_values(self):\n\n if self.production:\n return {\n 'official_status_index': '5',\n 'official_down_status': 'down_status[official_status_index]',\n 'official_up_status': 'up_status[official_status_index]',\n 'official_invalid_status': 'invalid_status[official_status_index]',\n 'adblock': 'False',\n 'auto_continue': 'True',\n 'command_before_end': \"''\",\n 'custom_ip': \"'0.0.0.0'\",\n 'days_between_db_retest': '1',\n 'debug': 'False',\n 'domain': \"''\",\n 'generate_hosts': 'True',\n 'header_printed': 'False',\n 'to_filter': \"''\",\n 'less': 'False',\n 'logs': 'True',\n 'plain_list_domain': 'False',\n 'quiet': 'False',\n 'referer': \"''\",\n 'seconds_before_http_timeout': '3',\n 'share_logs': 'False',\n 'show_execution_time': 'False',\n 'show_percentage': 'True',\n 'split_files': 'False',\n 'travis': 'False',\n 'travis_autosave_minutes': '15',\n 'travis_autosave_commit': '\"PyFunceble - Autosave\"',\n 'travis_autosave_final_commit': '\"PyFunceble - Results\"',\n 'unified_file': 'True',\n 'link_to_repo': \"'https://github.com/funilrys/PyFunceble'\",\n 'iana_server': \"'whois.iana.org'\",\n 'current_datetime': 'strftime(\"%a %d %b %H:%m:%S %Z %Y\")',\n 'number_of_tested': '0',\n 'number_of_up': '0',\n 'number_of_down': '0',\n 'number_of_invalid': '0',\n 'http_code_status': 'True',\n 'http_code': \"''\",\n 'cleaned_done': 'False',\n 'no_files': 'False',\n 'current_dir': \"'%%current_dir%%'\"}\n return {\n 'current_dir': \"'\" + repr(self.path).strip(\"'\") + \"'\"\n }", "def _set_global_defaults(self, vm):\n raise NotImplementedError(\"Subclasses need to implement this\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
does the user own a paid account of any type?
def hasPaid(): login() if not Dict['Authentication']: resetAuthInfo() authInfo = Dict['Authentication'] if (time.time() - authInfo['loggedInSince']) < LOGIN_GRACE: if authInfo['AnimePremium'] is True or authInfo['DramaPremium'] is True: return True return False
[ "def is_pro_account():\n try:\n windscribe.login(username, password)\n return \"Free\" in windscribe.account().plan\n except:\n return False", "def is_paid_via_app(self):\n return self.channel == SaleTrade.WX or self.channel == SaleTrade.ALIPAY or self.channel == SaleTrade.BUDGET", "def existsAccount(self, user: str) -> bool:\n pass", "def active_account_exists(public_address):\n return app.kin_sdk.check_account_exists(public_address) and app.kin_sdk.check_account_activated(public_address)", "def can_use_pricing(event, user, pricing):\n pricings = get_available_pricings(event, user)\n return pricings.filter(pk=pricing.pk).exists()", "def oaallowsauthorpaid(self):\n return self._entry.get('oaAllowsAuthorPaid')", "def isFormerAccount(account):\n return user_model.User.all().filter('former_accounts', account).count() > 0", "def return_payment_type_from_db(self):\n with sqlite3.connect('bangazon.db') as dbget:\n c = dbget.cursor()\n command = \"\"\"\n SELECT account_number\n FROM PaymentOption\n WHERE account_number = {}\n \"\"\".format(self.account_number)\n\n try:\n c.execute(command)\n except:\n return False\n\n account_info = c.fetchall()\n\n return True", "def check_permission(self):\n if self.committee and self.committee.premium:\n if self.premium_but_free():\n return True\n\n # must be authenticated\n if not current_user.is_authenticated:\n return False\n\n if not current_user.is_confirmed():\n return False\n\n # check subscription\n return current_user.subscribed_to_committee(self.committee)\n\n return True", "def isOwner(self, account):\n return self.email == account.email", "def credit(self, account):\n #stefan\n if self.account >= \"500\": # initialize self.account\n return True\n else:\n return False", "def is_pre_paid(self):\n return self._is_pre_paid", "def signup_eligible(user):\n req = Request.query.filter_by(user_id=user.id).order_by(Request.id.desc()).first()\n if req is not None:\n current_month = datetime.datetime.utcnow().month\n current_year = datetime.datetime.utcnow().year\n if req.request_date.month == current_month and req.request_date.year == current_year:\n return False\n else:\n return True\n else:\n #capture_message('checking eligibility failure for user /u/{}'.format(user.username))\n return True", "def _intermediary_account_exists(self):\n party_details = get_counterpartys_intermediary_details(self.acm_obj)\n if party_details.get('NAME'):\n return True\n return False", "def is_purchased(self):\n if self.purchase_result:\n return self.purchase_result.success\n return False", "async def paid(ctx):\n\n role = discord.utils.get(ctx.guild.roles, name='Paid')\n\n if role:\n await ctx.message.author.add_roles(role)\n await ctx.message.add_reaction('\\U00002705')\n\n else:\n await ctx.channel.send(\"```Paid Role does not exist in this guild```\")", "def has_bought_quiz(quiz: Quiz, user: User):\n return quiz.is_free or user.bought_quizzes.filter(pk=quiz.pk).exists()", "def has_account_id(self):\n if self.get_account_id():\n return True\n return False", "def owner_profile_account(self) -> bool:\n return pulumi.get(self, \"owner_profile_account\")", "def test_account_types_limited_to_user(self):\n user2 = get_user_model().objects.create_user(\n 'whoami@gmail.com',\n 'superpassword'\n )\n AccountType.objects.create(user=user2, name='Bank Account')\n account_type = AccountType.objects.create(\n user=self.user,\n name='Investments Account'\n )\n\n res = self.client.get(ACCOUNT_TYPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], account_type.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
change the preferred resolution serverside to integer res
def setPrefResolution(res): if hasPaid(): res2enum = {360:'12', 480:'20', 720:'21', 1080:'23'} response = jsonRequest( { 'req': "RpcApiUser_UpdateDefaultVideoQuality", 'value': res2enum[res] } ) if response.get('result_code') == 1: return True else: return False return False
[ "def s_resolution(self):\n return self.get('s_resolution') * u.arcsec", "def get_resolution():\n\treturn user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)", "def s_resolution_max(self):\n rmax = self.get('s_resolution_max', default=None)\n return rmax if not rmax else rmax * u.arcsec", "def __send_resolution(self):\n resolution_bytes = self.width.to_bytes(4, self.endianness) + self.height.to_bytes(4, self.endianness)\n self.socket.send(resolution_bytes)", "def _set_pixel_size(self) -> None:\n # Using az resolution\n if self.sensor_mode == CapellaSensorMode.SP:\n def_pixel_size = 0.35\n def_res = 0.5\n elif self.sensor_mode == CapellaSensorMode.SM:\n def_pixel_size = 0.6\n def_res = 1.0\n elif self.sensor_mode == CapellaSensorMode.SS:\n def_pixel_size = 0.8\n def_res = 1.2\n else:\n raise InvalidProductError(f\"Unknown sensor mode: {self.sensor_mode}\")\n self.pixel_size = def_pixel_size\n self.resolution = def_res", "def test_deviceResolution(ps):\n ps.setResolution(\"12\")\n assert ps.resolution == 1, \"Picoscope variable was not set.\"\n assert ps.getResolution() == \"12\", \"Picoscope resolution is wrong.\"\n assert ps.MIN_VALUE == -32736, \"Minimum adc value is wrong.\"\n assert ps.MAX_VALUE == 32736, \"Maximum adc value is wrong.\"\n print(\"Device resolution test passed.\")", "def set_resolution(self, resolution):\n self.resolution = resolution\n if self.fullscreen:\n self.display = pygame.display.set_mode(self.resolution, SCREEN_FLAGS.FULLSCREEN)\n else:\n self.display = pygame.display.set_mode(self.resolution, SCREEN_FLAGS.WINDOWED)\n ResizedSurface.clear_lut() #Clear the lut of resized surfaces\n for screen in self.screens:\n screen.set_resolution(resolution)\n for popup in self.popups.sprites():\n popup.set_canvas_size(resolution)\n LOG.log('DEBUG', \"Changed resolution to \", resolution)", "def set_resolution(width, height):\n\tdd = get_primary_device()\n\tdm = DEVMODE()\n\tdm.dmSize = ctypes.sizeof(dm)\n\tif not user32.EnumDisplaySettingsA(dd.DeviceName, ENUM_CURRENT_SETTINGS, ctypes.pointer(dm)):\n\t\traise Exception(\"Failed to get display settings.\")\n\n\tdm.dmPelsWidth = width\n\tdm.dmPelsHeight = height\n\tdm.dmFields = (DM_PELSWIDTH | DM_PELSHEIGHT)\n\tif user32.ChangeDisplaySettingsA(ctypes.byref(dm), CDS_TEST)!=DISP_CHANGE_SUCCESSFUL:\n\t\traise Exception(\"Graphics mode not supported.\")\n\n\treturn user32.ChangeDisplaySettingsA(ctypes.byref(dm), 0)==DISP_CHANGE_SUCCESSFUL", "def s_resolution_min(self):\n rmin = self.get('s_resolution_min', default=None)\n return rmin if not rmin else rmin * u.arcsec", "def setResolution(res, mask=defaultDetMask):\n\t\tglobal _edsResolution\n\t\tif _pt:\n\t\t\t_pt.setResolutionMode(_pt.ResolutionMode.valueOf(res), mask)\n\t\t\t_edsResolution = res\n\t\t\tupdateCalibration(False)", "def update_screen_resolution(settings):\n pygame.display.set_mode(settings.resolutions[settings.respointer])\n settings.arena_x = ((settings.resolution()[0]\n - settings.resolution()[1]) // 2)\n settings.arena_dimension = settings.resolution()[1]\n settings.fullscreen = False", "def test_set_resolution_02(fx_asset):\n with Image(filename=str(fx_asset.join('mona-lisa.jpg'))) as img:\n img.resolution = 100\n assert img.resolution == (100, 100)", "def get_resolution(self):\n return self._EXPECTED_RESOLUTION", "def output_resolutions(self):\n raise NotImplementedError", "def thumb_scale(self):\n if getattr(self.data, \"no_thumbs\", False):\n # Individual setting overrides ...\n return None\n thsize = getattr(self.data, \"thumb_scale\", \"\")\n if thsize:\n return thsize\n registry = getUtility(IRegistry)\n settings = registry.forInterface(ISiteSchema, prefix=\"plone\", check=False)\n thumb_scale_portlet = settings.thumb_scale_portlet\n return thumb_scale_portlet", "def video_resolution(self, res):\n self._video_resolution = tuple(res)\n self.process_image()\n self.clear_segments()", "def setDPI(self, dpi):\n self.dpi = dpi\n self.scanner.SetCapability(\n twain.ICAP_XRESOLUTION, twain.TWTY_FIX32, float(self.dpi))\n self.scanner.SetCapability(\n twain.ICAP_YRESOLUTION, twain.TWTY_FIX32, float(self.dpi))", "def find_screen_resolution(settings):\n monitor_w = pygame.display.Info().current_w\n monitor_h = pygame.display.Info().current_h\n for i in range(len(settings.resolutions)):\n # This checks if the monitors resolution matches any of the\n # avaliable ones.\n if settings.resolutions[i][0] == monitor_w and \\\n settings.resolutions[i][1] == monitor_h:\n settings.respointer = i\n\n if settings.respointer is None:\n # If a match resolutoin can't be found it will try to find one with\n # the same aspect ratio.\n settings.respointer = 1\n for i in range(len(settings.resolutions)):\n if (monitor_w // monitor_h ==\n settings.resolutions[i][0] // settings.resolutions[i][1]):\n respointer = i", "def human_readable_resolution(r_ms: int) -> str:\n switcher = {\n 60000: '1 Minute',\n 180000: '3 Minutes',\n 300000: '5 Minutes',\n 600000: '10 Minutes',\n 900000: '15 Minutes',\n 1800000: '30 Minutes',\n 2700000: '45 Minutes',\n 3600000: '1 Hour',\n 7200000: '2 Hours',\n 10800000: '3 Hours',\n 14400000: '4 Hours',\n 21600000: '6 Hours',\n 43200000: '12 Hour',\n 86400000: '1 Day'\n }\n\n return switcher.get(r_ms, f\"Undetectable resolution: {r_ms}\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove seriesID from queue
def removeFromQueue(seriesId): login() if not isRegistered(): return False response = makeAPIRequest2("req=RpcApiUserQueue_Delete&group_id=%s"%seriesId) #FIXME response should have meaning; do something here? Log.Debug("remove response: %s"%response) return True
[ "def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')", "def remove(self):\r\n self.queue.pop(0)", "def remove(self, node_id):\n for x,(y,z) in enumerate(self.queue):\n if z[1][-1] == node_id:\n del self.queue[x]\n return self.queue\n raise NotImplementedError", "def remove_from_queue(self, index):\r\n # TODO: what do these parameters actually do?\r\n updid = '0'\r\n objid = 'Q:0/' + str(index + 1)\r\n self.avTransport.RemoveTrackFromQueue([\r\n ('InstanceID', 0),\r\n ('ObjectID', objid),\r\n ('UpdateID', updid),\r\n ])", "def test_queues_remove_item_from_queue_v1(self):\n pass", "def pop_queue(self, container, obj, q_ts, q_record):\n q_path = '/%s/%s/%s' % (MISPLACED_OBJECTS_ACCOUNT, container, obj)\n x_timestamp = slightly_later_timestamp(max(q_record, q_ts))\n self.stats_log('pop_queue', 'remove %r (%f) from the queue (%s)',\n q_path, q_ts, x_timestamp)\n headers = {'X-Timestamp': x_timestamp}\n direct_delete_container_entry(\n self.swift.container_ring, MISPLACED_OBJECTS_ACCOUNT,\n container, obj, headers=headers)", "def remove_song_from_queue(self, nr=0):\n q = self.get_queue()\n self.queue = q[:nr] + q[nr + 1 :]\n return q[nr]", "def deleteSeries(self, series_id):\n cursor = self.connection.cursor()\n series_id = (series_id,)\n\n cursor.execute('''DELETE FROM info\n WHERE series_id = ?''', series_id)\n\n self.connection.commit()\n cursor.close()", "def remove_job(self, job_id):", "def remove(self):\n return(self.queue.pop(0))", "def wipeQueue():\n\tq.clear()", "def cancel(self, event):\r\n self.queue.remove(event)\r\n heapq.heapify(self.queue)", "def test_queue_remove(self):\n q1 = self.party.enqueue_song(self.user, 't123')\n q2 = self.party.enqueue_song(self.user, 't456')\n q2.upvote(self.user2)\n next_entry = self.party.dequeue_next_song()\n self.assertEquals(next_entry, q2)\n self.party.save(self.redis)\n p = Party.get(self.redis, self.party.id)\n self.assertEquals(p.queue[0].id, q1.id)", "def clear_queue(self):\r\n self.avTransport.RemoveAllTracksFromQueue([\r\n ('InstanceID', 0),\r\n ])", "def removeTimerSensor(self, s: 'SoTimerQueueSensor') -> \"void\":\n return _coin.SoSensorManager_removeTimerSensor(self, s)", "def deadline_remover(server_Q, scheduler_Q, task_id):\n global number_of_deadlined_tasks,number_of_deadlined_tasks_class1,number_of_deadlined_tasks_class2\n type = 0\n for i in range(len(scheduler_Q)):\n if str(id(scheduler_Q[i])) == task_id:\n type = scheduler_Q[i].type\n del scheduler_Q[i]\n return\n for j in range(len(server_Q)):\n for i in range(len(server_Q[j])):\n if str(id(server_Q[j][i])) == task_id:\n type = server_Q[j][i].type\n del server_Q[j][i]\n return\n if number_of_departed_tasks > 5000 :\n number_of_deadlined_tasks += 1\n if(type == 1):\n number_of_deadlined_tasks_class1 += 1\n else:\n number_of_deadlined_tasks_class2 += 1", "def remove_bar(self):\n self.space.remove(self.pusher_body, self.pusher_shape)", "def removeIndex(self, index):\r\n item = self.queue.pop(index)\r\n return item", "def clear(self):\r\n self.queue = []", "def remove_from_client_queue(client_id, command_socket_tuple):\n\n if command_socket_tuple in RPCS.Northbound_Queue[client_id]:\n RPCS.Northbound_Queue[client_id].remove(command_socket_tuple)\n # Check if client entry can be removed form Northbound Queue\n if not RPCS.Northbound_Queue[client_id]:\n RPCS.Northbound_Queue.pop(client_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add seriesId to the queue.
def addToQueue(seriesId): login() if not isRegistered(): return False Log.Debug("add mediaid: %s"%seriesId) response = makeAPIRequest2("req=RpcApiUserQueue_Add&group_id=%s"%seriesId) Log.Debug("add response: %s"%response) return True
[ "def AddToQueue(sender,seriesId,url=None):\n\tlogin()\n\tresult = addToQueue(seriesId)\n\t\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Added to Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not add to Queue.')", "def add_to_queue(self, video_id):\n self.start_session_if_none()\n self._session.add_to_queue(video_id)", "def add_to_queue(self, sid, data):\n self.activation_queue.put((sid, data))", "def __add_to_queue(self, _id, url):\n payload = dumps(dict(\n id=str(_id),\n url=url\n ))\n self.chan.basic_publish(\n exchange='',\n routing_key=cfg.settings.mq.queue_name,\n body=payload,\n properties=pika.BasicProperties(\n delivery_mode=2\n )\n )", "def add_to_queue(self):\n self.manager.client.song_q.put(self.get_text(None))", "def queue_id(self, queue_id):\n\n self._queue_id = queue_id", "def push_to_queue(self):\n redis = self.redis_pool.get_connection()\n redis.publish(self.collection_name, self.worker_id)", "def add(self, process):\r\n self.queue.append(process)", "def add_job(self, job_id):\n\n self._count += 1\n self._pending_job_ids.append(job_id)", "def record(self, job_id: int) -> None:\n if self.current_size >= self.max_size:\n self._linked_list.popleft()\n self.current_size -= 1\n\n self._linked_list.append(job_id)\n self.current_size += 1", "def added_to_queue(self, link):", "def RemoveFromQueue(sender,seriesId):\n\tlogin()\n\tresult = removeFromQueue(seriesId)\n\tif result:\n\t\treturn MessageContainer(\"Success\",'Removed from Queue')\n\telse:\n\t\treturn MessageContainer(\"Failure\", 'Could not remove from Queue.')", "def add(self, frame_id, frame, quality = None):\n self._frames.append((str(frame_id), frame, quality))", "def add_to_queue(self, data):\n self.registration_queue.put(data)", "def add_to_cluster(self, doc_id):\n self.docs_ids.append(doc_id)", "def add(self, n):\n self.queue.append(n)", "def add_to_queue(self, uri, device_id=None):\n\n uri = self._get_uri(\"track\", uri)\n\n endpoint = \"me/player/queue?uri=%s\" % uri\n\n if device_id is not None:\n endpoint += \"&device_id=%s\" % device_id\n\n return self._post(endpoint)", "def _add_queue(self, name, register_event):\n q = queue.Queue()\n register_event(q.put)\n self._queues[name] = q", "def _additem(self):\n\n self.queue.put(self._genitem())", "def addLogRecord(self, items):\n self.queue.put(items)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return an episode dict object identified by mediaId. If you know the mediaId, it SHOULD be in the cache already. If not, you could get None if recovery doesn't work. This might happen with mediaId's that come from the great beyond (queue items on server, e.g.) and are in series with a lot of episodes. Sry bout that.
def getEpisodeDict(mediaId): if str(mediaId) not in Dict['episodes']: # get brutal recoverEpisodeDict(mediaId) return Dict['episodes'].get(str(mediaId))
[ "def __getitem__(self, media_id):\n for media in self:\n if media.id == media_id:\n return media\n\n raise KeyError('No media with id {}'.format(media_id))", "def constructMediaObject(episode):\n\tif True or len(episode['availableResolutions']) == 0:\n\t\tepisode['availableResolutions'] = getAvailResFromPage(episode['link'])\n\n\t\t# FIXME I guess it's better to have something than nothing? It was giving Key error\n\t\t# on episode number\n\t\tif str(episode['mediaId']) not in Dict['episodes']:\n\t\t\tDict['episodes'][str(episode['mediaId'])] = episode\n\t\n\t\tDict['episodes'][str(episode['mediaId'])]['availableResolutions'] = episode['availableResolutions']\n\t\n\tvideoInfo = getVideoInfo(episode['link'], episode['mediaId'], episode['availableResolutions'])\n\tvideoInfo['small'] = (isPaid() and isPremium(episode.get('category'))) is False\n\t\n\tepsObject = EpisodeObject(\n\t\turl = videoInfo['baseUrl'], #dunno if this will work\n\t\ttitle = episode['title'],\n\t\tsummary = episode['description']\n\t)\n\n\tfor q in episode['availableResolutions']:\n\t\tdur = episode.get('duration')\n\t\tif not (dur and dur > 0):\n\t\t\tdur = 0\n\t\t\t\n\t\tmo = MediaObject(\n\t\t\t\tduration = dur,\n\t\t\t\tvideo_resolution = q,\n\t\t\t\tprotocol = Protocol.WebKit,\n\t\t\t\tparts = [\n\t\t\t\t\tPartObject(\t\t\t\t\n\t\t\t\t\t\tkey = WebVideoURL(getVideoUrl(videoInfo, q))\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\tepsObject.add(mo)\n\tdir = ObjectContainer( objects = [epsObject])\n\treturn dir", "def episode(self, episode_id, market=None):\n\n trid = self._get_id(\"episode\", episode_id)\n return self._get(\"episodes/\" + trid, market=market)", "def get_episode(self, episode_id):\n\n raw_response = requests_util.run_request('get', self.API_BASE_URL + '/episodes/%d' % episode_id,\n headers=self.__get_header_with_auth())\n\n return self.parse_raw_response(raw_response)", "def __getitem__(self, id):\n # First, check to see if enough time has passed since my\n # last query.\n self.limiter.wait()\n \n try:\n handle = NCBI.efetch(\n db=\"pubmed\", id=id, retmode='text', rettype='medlars')\n except IOError, x:\n # raise a KeyError instead of an IOError\n # XXX I really should distinguish between a real IOError and\n # if the id is not in the database.\n raise KeyError, x\n if self.parser is not None:\n return self.parser.parse(handle)\n return handle.read()", "def get_episode_metadata(show_id, season_num, episode_num):\n metadata_provider = ADDON.getSetting(\"tv_metadata_provider\")\n info, created_time = fetch_episode_from_db(show_id,\n str(season_num),\n str(episode_num),\n metadata_provider, LANG)\n if info:\n return info\n elif created_time and float(created_time) <= time.time() + 3600:\n return info\n if info:\n return info\n try:\n if metadata_provider == \"Trakt\":\n headers = {\n 'Content-Type': 'application/json',\n 'trakt-api-version': '2',\n 'trakt-api-key': TRAKT_CLIENT_ID\n }\n url = '{0}/shows/{1}/seasons/{2}/episodes/{3}?extended=full'.format(\n TRAKT_API_ENDPOINT, show_id, season_num, episode_num)\n show_metadata = get_show_metadata(show_id)\n info = requests.get(url, headers=headers, verify=False).json()\n if LANG != \"en\":\n translation_url = TRAKT_API_ENDPOINT + \\\n \"/shows/{1}/seasons/{2}/episodes/{3}/translations/{4}\".format(\n TRAKT_API_ENDPOINT, show_id, season_num,\n episode_num, LANG)\n translation_info = requests.get(\n translation_url, headers=headers, verify=False).json()\n if translation_info:\n translation_info = translation_info[0]\n for key in translation_info.iterkeys():\n info[key] = translation_info[key]\n info = _convert_trakt_episode_metadata(show_metadata, info)\n elif metadata_provider == \"TVDB\":\n tvdb_id = tvdb.search_by_imdb(show_id)\n if tvdb_id:\n show = tvdb[tvdb_id]\n show_metadata = get_show_metadata(show_id)\n season = show[int(season_num)]\n season_metadata = _convert_tvdb_season_metadata(\n show_metadata, season, language=LANG)\n episode_metadata = _convert_tvdb_episode_metadata(\n show_id, season_metadata, season[int(episode_num)])\n info = episode_metadata\n except:\n pass\n save_episode_to_db(show_id, season_num, episode_num, metadata_provider,\n LANG, info)\n return info", "def retrieve(request, pk):\n queryset = request.user.wantToWatchMediaItem.all()\n mediaItem = queryset.filter(mdbID=pk)\n # Retrieves only if it exists.\n obj = get_object_or_404(mediaItem)\n return Response(WantToWatchMediaItemSerializer(obj).data)", "def lookup_episode(self, seriesName, episodeName, id = 0):\n\n episodeName = episodeName.strip()\n\n if seriesName is None or episodeName is None:\n return (None,None)\n\n if seriesName != self.last_series_name:\n logging.info(\" - Searching thetvdb for:{}\".format(seriesName))\n search = tvdb.Search()\n response = search.series(seriesName)\n s = search.series\n\n if id > 0:\n for s in response:\n if s['id'] == id:\n show = tvdb.Series(id)\n break\n elif id == 0:\n show = tvdb.Series(s[0]['id'])\n else:\n logging.info(' - Unable to find series id:{} - terminating'.format(id))\n return (None,None)\n\n self.last_series_name = seriesName\n self.last_series_obj = show\n\n show = self.last_series_obj\n\n episodes = show.Episodes.all()\n logging.info(\" - Found {} episodes\".format(len(episodes)))\n if (len(episodes)==0):\n return (\"error\", \"no episodes found\")\n\n if episodes == []:\n return (None, None)\n\n for i, e in enumerate(episodes[::-1]):\n ep_name = e['episodeName']\n\n if ep_name is not None:\n n = Levenshtein.distance(episodeName, ep_name)\n\n if n <= MATCH_TOLERANCE:\n e_id = e['airedEpisodeNumber']\n s_id = e['airedSeason']\n logging.info(\" - Matched [{0}] to episode name: [{1}]\".format(episodeName, ep_name))\n return (str(s_id), str(e_id))\n\n logging.info(\" - UNABLE TO MATCH: {}\".format(episodeName))\n return (\"error\", \"expected series not found\")", "def find_episode_guid(self, guid: PlexGuid, lookup: TraktLookup):\n te = lookup.from_guid(guid)\n if te:\n return te\n\n logger.debug(f\"Retry using search for specific Plex Episode {guid.guid}\")\n if not guid.is_episode:\n return self.find_by_guid(guid)\n return None", "def determine_next_episode(\n\t\tself):\n\n\t\tresult = dict()\n\n\t\tsjmanager.log.log('Trying to determine next show to watch')\n\n\t\t# First up, check which season and which episode is in the watch cache.\n\t\trow = self.sql.execute(\"\"\"SELECT \n\t\t\tseason_title,\n\t\t\tepisode_title,\n\t\t\tfinished \n\t\t\tFROM last_watched \n\t\t\tWHERE show_url = ?\"\"\",\n\t\t\t(self.url,)).fetchone()\n\n\t\tsjmanager.log.log(\"Fetched the following row: {}, {}, {}, {}\".format(row['season_title'],row['episode_title'],row['finished'],row['finished'] == str(0)))\n\n\t\t# If it's not finished, this means there's a cache file lying around, so\n\t\t# return episode and season title so we can find it.\n\t\tif str(row['finished']) == '0':\n\t\t\tsjmanager.log.log(\"Previous show isn't finished, so taking that as new show\")\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = row['episode_title']\n\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t'Ok, season title is {}, episode title is {}'.format(\n\t\t\t\trow['season_title'],\n\t\t\t\trow['episode_title']))\n\n\t\t# Otherwise, if the episode title isn't numeric, there's no chance to know\n\t\t# which episode (or even season) is next. So we return nothing\n\t\tif not row['episode_title'].isnumeric():\n\t\t\tsjmanager.log.log('The episode title is not numeric, so returning nothing')\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\t# If the episode title _is_ numeric, there's two cases that can happen:\n\t\t#\n\t\t# 1. There's an episode in the current season with a number one higher than\n\t\t# the current episode\n\t\t# 2. No episode with a higher number exists. In that case, maybe we have\n\t\t# another season to continue to\n\t\tsjmanager.log.log('Cool, the episode title is numeric')\n\n\t\tseasons = self.seasons(\n\t\t\trow['season_title'])\n\n\t\t# Get all the mangled episode titles in the season\n\t\tepisode_titles = set()\n\t\tfor season in seasons:\n\t\t\tepisode_titles = episode_titles.union(\n\t\t\t\tseason.episode_titles())\n\n\t\tif str(int(row['episode_title']) + 1) in episode_titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Cool, we've got an episode called {}, continuing with that\".format(\n\t\t\t\t\tint(row['episode_title']) + 1))\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = str(int(row['episode_title']) + 1)\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"No higher episode found, checking if season is numeric\")\n\n\t\tif not row['season_title'].isnumeric():\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Season is not numeric, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"Season is numeric, checking if a higher season exists\")\n\n\t\ttitles = self.season_titles()\n\n\t\tif not str(int(row['season_title'])+1) in titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"No higher season exists, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"A higher season exists, returning this season but no episode\")\n\t\tresult['season_title'] = str(int(row['season_title'])+1)\n\t\tresult['episode_title'] = None\n\t\treturn result", "def get_episode_media_url(self, podcast_entry):\r\n links = podcast_entry[\"links\"]\r\n\r\n for link in links:\r\n if \"audio\" in link[\"type\"]:\r\n return link[\"href\"]", "def get_video_metadata(self,vid,url):\n try:\n self._logger.info('Ok!...Lets try to retrieve some metada from DailyMotion')\n id=_get_video_id(url)\n if id!='':\n srv = DailyMotionService(self._logger)\n item_meta=self._parse_entry(srv.get_video_entry(id))\n item_meta['video-id']=str(vid)\n return item_meta\n else:\n self._logger.error('Ouch!...An illegal url was provided. It was impossible to get the video id.')\n return None\n except:\n self._logger.exception('Dammit!...An error ocurred while retrieving metadata from DailyMotion...')\n return None\n else:\n self._logger.info('Great!...The DailyMotion search was succesfull...')", "def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None and Prefs['fanart'] is True:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl", "def get_program_media_object(program_id, media_id):\n\n\t#Build request for playout-endpoint\n\tpayload = {'app_id': app.config['YLE_APP_ID'], 'app_key': app.config['YLE_APP_KEY'], 'program_id': program_id, 'media_id': media_id, 'protocol': media_protocol}\n\n\treq = requests.get(api_urls['playouts'], params=payload)\n\t\n\ttry:\n\t\tjson = req.json()\n\t#TODO: Improve error handling\n\texcept (ValueError, RuntimeError, TypeError, NameError):\n\t\treturn 'JSON error'\n\n\t#YLE API might return multiple media objects for program, so pick up the one with the best bitrate\n\tvideos = json['data']\n\tvideo = {'bitrate': 0, 'width': 0, 'height': 0, 'url': ''}\n\n\tfor v in videos:\n\t\tif v['videoBitrateKbps'] > video['bitrate']:\n\t\t\tvideo['bitrate'] = v['videoBitrateKbps']\n\t\t\tvideo['width'] = v['width']\n\t\t\tvideo['height'] = v['height']\n\t\t\tvideo['url'] = v['url']\n\n\tvideo['url'] = decrypt_media_url(video['url'])\n\n\treturn video", "def get_artikel_by_id(self, id):\n with ArtikelMapper() as mapper:\n return mapper.find_by_id(id)", "def __delitem__(self, media_id):\n\n for idx, record in enumerate(self._data):\n if record['id'] == media_id:\n self._data.pop(idx)\n return\n\n raise KeyError('No media with id{}'.format(media_id))", "def get_movie_metadata(movie_id):\n metadata_provider = ADDON.getSetting(\"movie_metadata_provider\")\n info, created_time = fetch_from_db(movie_id, metadata_provider, LANG)\n if info:\n return info\n elif created_time and float(created_time) <= time.time() + 3600:\n return info\n try:\n if metadata_provider == \"Trakt\":\n headers = {\n 'Content-Type': 'application/json',\n 'trakt-api-version': '2',\n 'trakt-api-key': TRAKT_CLIENT_ID\n }\n url = TRAKT_API_ENDPOINT + \"/movies/\" + movie_id + '?extended=full'\n info = requests.get(url, headers=headers, verify=False).json()\n xbmc.log(\"info:\" + repr(info), xbmc.LOGNOTICE)\n if LANG != \"en\":\n translation_url = TRAKT_API_ENDPOINT + \\\n \"/movies/\" + movie_id + \"/translations/\" + LANG\n translation_info = requests.get(\n translation_url, headers=headers, verify=False).json()\n if translation_info:\n translation_info = translation_info[0]\n for key in translation_info.iterkeys():\n info[key] = translation_info[key]\n genres_dict = trakt_genres(\"movies\")\n info = __convert_trakt_movie_metadata(info, genres_dict)\n elif metadata_provider == \"TMDB\":\n genres_dict = tmdb_movie_genres(LANG)\n info = tmdbsimple.Find(movie_id).info(\n external_source=\"imdb_id\", language=LANG)[\"movie_results\"][0]\n if (not info or (info.get(\"overview\", None) is None and info.get('plot', None) is None) and\n LANG != \"en\"):\n info, created_time = fetch_from_db(movie_id, metadata_provider, \"en\")\n if info:\n return info\n else:\n info = tmdbsimple.Find(movie_id).info(\n external_source=\"imdb_id\",\n language=\"en\")[\"movie_results\"][0]\n info = _convert_tmdb_movie_metadata(\n info, movie_id, genres_dict)\n save_to_db(movie_id, metadata_provider, \"en\", info)\n info = _convert_tmdb_movie_metadata(info, movie_id, genres_dict)\n except:\n pass\n save_to_db(movie_id, metadata_provider, LANG, info)\n return info", "def MediaInfoLookup(url):\n\t\n\t# Get clean copy of URL user has played.\n\tdecoded_url = String.Decode(str(url))\n\t#Log(decoded_url)\n\t\n\t# See if the URL being played is on our recently browsed list.\n\titem = cerealizer.loads(Data.Load(BROWSED_ITEMS_KEY)).getByURL(decoded_url)\n\n\tif (item is None):\n\t\tLog(\"****** ERROR: Watching Item which hasn't been browsed to\")\n\t\treturn \"\"\n\t\n\t# Return the media info that was stored in the recently browsed item.\n\treturn demjson.encode(item[0])", "def EpisodeDetail(title, url):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n try:\n html = html_from_url(clean_url(url))\n except Exception as e:\n Log.Critical('* EpisodeDetail Error: %s' %str(e))\n message = 'This media has expired.' if ('HTTP Error' in str(e) and '404' in str(e)) else str(e)\n return MessageContainer('Warning', message)\n\n ptitle = html.xpath(\"//title/text()\")[0].rsplit(\" Streaming\",1)[0].rsplit(\" Download\",1)[0]\n thumb = html.xpath('//img[@id=\"nameimage\"]/@src')\n thumb = (thumb[0] if thumb[0].startswith('http') else clean_url(thumb[0])) if thumb else None\n\n wpm = html.xpath('//iframe[@id=\"wpm\"]/@src')\n if not wpm:\n return MessageContainer('Warning', 'No Video Source Found.')\n\n pass_html = html_from_url(clean_url(wpm[0]))\n video_urls = []\n source_iframe = pass_html.xpath('//iframe/@src')\n if source_iframe:\n part = 0\n if pass_html.xpath('//div[starts-with(@id, \"part\")]'):\n part = 1\n\n try:\n video_urls.append((part, html_from_url(clean_url(source_iframe[0])).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Error('* EpisodeDetail Error: %s' %str(e))\n pass\n\n if part != 0:\n base_iframe = source_iframe[0].split('.php')[0]\n count = 1\n more = True\n while more and (count < 5):\n count += 1\n try:\n video_urls.append((count, html_from_url(clean_url(base_iframe + '%i.php' %count)).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Warn('* EpisodeDetail Warning: %s' %str(e))\n more = False\n\n for p, u in sorted(video_urls):\n if 'prx.proxy' in u:\n u = 'https://docs.google.com/file/' + u.split('/file/')[1]\n oc.add(VideoClipObject(\n title='%i-%s' %(p, ptitle) if p != 0 else ptitle,\n thumb=Callback(get_thumb, url=thumb),\n url=u\n ))\n\n trailpm = html.xpath('//iframe[@id=\"trailpm\"]/@src')\n if trailpm:\n thtml = html_from_url(clean_url(trailpm[0]))\n yttrailer = thtml.xpath('//iframe[@id=\"yttrailer\"]/@src')\n if yttrailer:\n yttrailer_url = yttrailer[0] if yttrailer[0].startswith('http') else 'https:' + yttrailer[0]\n if 'prx.proxy' in yttrailer_url:\n yttrailer_url = 'http://www.youtube.com/embed/' + yttrailer_url.split('/embed/')[1]\n oc.add(VideoClipObject(url=yttrailer_url, thumb=R(ICON_SERIES), title=\"Watch Trailer\"))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Media Found')", "def _get_details_by_evid(self, evid):\n event_data = self._router_request(\n self._make_request_data(\n 'detail',\n dict(\n evid=evid,\n )\n )\n )\n\n return event_data['event'][0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
try everything possible to recover the episode info for mediaId and save it in Dict{}. If it fails, return none.
def recoverEpisodeDict(mediaId): Log.Debug("#######recovering episode dictionary for mediaID %s" % str(mediaId)) # get a link with title in it. #import urllib2 req = urllib2.urlopen(BASE_URL+"/media-" + str(mediaId) + "?pskip_wall=1") redirectedUrl = req.geturl() req.close redirectedUrl = redirectedUrl.replace("?pskip_wall=1", "") seriesName = redirectedUrl.split(".com/")[1].split("/")[0] seriesUrl = seriesTitleToUrl(seriesName) getEpisodeListFromFeed(seriesUrl) # for side-effect of caching episode if str(mediaId) in Dict['episodes']: return Dict['episodes'][str(mediaId)] # FIXME # not good so far, we need a feed that provides full episodes. Yikes. # try grabbing from boxee_feeds # need seriesID as in boxee_feeds/showseries/384855 # which can be retrieved from the seriesUrl contents, whew... # alternatively, use http://www.crunchyroll.com/series-name/episodes # which gives full episodes, but, well, is HTML and has less media info return None
[ "def get_episode_metadata(show_id, season_num, episode_num):\n metadata_provider = ADDON.getSetting(\"tv_metadata_provider\")\n info, created_time = fetch_episode_from_db(show_id,\n str(season_num),\n str(episode_num),\n metadata_provider, LANG)\n if info:\n return info\n elif created_time and float(created_time) <= time.time() + 3600:\n return info\n if info:\n return info\n try:\n if metadata_provider == \"Trakt\":\n headers = {\n 'Content-Type': 'application/json',\n 'trakt-api-version': '2',\n 'trakt-api-key': TRAKT_CLIENT_ID\n }\n url = '{0}/shows/{1}/seasons/{2}/episodes/{3}?extended=full'.format(\n TRAKT_API_ENDPOINT, show_id, season_num, episode_num)\n show_metadata = get_show_metadata(show_id)\n info = requests.get(url, headers=headers, verify=False).json()\n if LANG != \"en\":\n translation_url = TRAKT_API_ENDPOINT + \\\n \"/shows/{1}/seasons/{2}/episodes/{3}/translations/{4}\".format(\n TRAKT_API_ENDPOINT, show_id, season_num,\n episode_num, LANG)\n translation_info = requests.get(\n translation_url, headers=headers, verify=False).json()\n if translation_info:\n translation_info = translation_info[0]\n for key in translation_info.iterkeys():\n info[key] = translation_info[key]\n info = _convert_trakt_episode_metadata(show_metadata, info)\n elif metadata_provider == \"TVDB\":\n tvdb_id = tvdb.search_by_imdb(show_id)\n if tvdb_id:\n show = tvdb[tvdb_id]\n show_metadata = get_show_metadata(show_id)\n season = show[int(season_num)]\n season_metadata = _convert_tvdb_season_metadata(\n show_metadata, season, language=LANG)\n episode_metadata = _convert_tvdb_episode_metadata(\n show_id, season_metadata, season[int(episode_num)])\n info = episode_metadata\n except:\n pass\n save_episode_to_db(show_id, season_num, episode_num, metadata_provider,\n LANG, info)\n return info", "def constructMediaObject(episode):\n\tif True or len(episode['availableResolutions']) == 0:\n\t\tepisode['availableResolutions'] = getAvailResFromPage(episode['link'])\n\n\t\t# FIXME I guess it's better to have something than nothing? It was giving Key error\n\t\t# on episode number\n\t\tif str(episode['mediaId']) not in Dict['episodes']:\n\t\t\tDict['episodes'][str(episode['mediaId'])] = episode\n\t\n\t\tDict['episodes'][str(episode['mediaId'])]['availableResolutions'] = episode['availableResolutions']\n\t\n\tvideoInfo = getVideoInfo(episode['link'], episode['mediaId'], episode['availableResolutions'])\n\tvideoInfo['small'] = (isPaid() and isPremium(episode.get('category'))) is False\n\t\n\tepsObject = EpisodeObject(\n\t\turl = videoInfo['baseUrl'], #dunno if this will work\n\t\ttitle = episode['title'],\n\t\tsummary = episode['description']\n\t)\n\n\tfor q in episode['availableResolutions']:\n\t\tdur = episode.get('duration')\n\t\tif not (dur and dur > 0):\n\t\t\tdur = 0\n\t\t\t\n\t\tmo = MediaObject(\n\t\t\t\tduration = dur,\n\t\t\t\tvideo_resolution = q,\n\t\t\t\tprotocol = Protocol.WebKit,\n\t\t\t\tparts = [\n\t\t\t\t\tPartObject(\t\t\t\t\n\t\t\t\t\t\tkey = WebVideoURL(getVideoUrl(videoInfo, q))\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\tepsObject.add(mo)\n\tdir = ObjectContainer( objects = [epsObject])\n\treturn dir", "def determine_next_episode(\n\t\tself):\n\n\t\tresult = dict()\n\n\t\tsjmanager.log.log('Trying to determine next show to watch')\n\n\t\t# First up, check which season and which episode is in the watch cache.\n\t\trow = self.sql.execute(\"\"\"SELECT \n\t\t\tseason_title,\n\t\t\tepisode_title,\n\t\t\tfinished \n\t\t\tFROM last_watched \n\t\t\tWHERE show_url = ?\"\"\",\n\t\t\t(self.url,)).fetchone()\n\n\t\tsjmanager.log.log(\"Fetched the following row: {}, {}, {}, {}\".format(row['season_title'],row['episode_title'],row['finished'],row['finished'] == str(0)))\n\n\t\t# If it's not finished, this means there's a cache file lying around, so\n\t\t# return episode and season title so we can find it.\n\t\tif str(row['finished']) == '0':\n\t\t\tsjmanager.log.log(\"Previous show isn't finished, so taking that as new show\")\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = row['episode_title']\n\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t'Ok, season title is {}, episode title is {}'.format(\n\t\t\t\trow['season_title'],\n\t\t\t\trow['episode_title']))\n\n\t\t# Otherwise, if the episode title isn't numeric, there's no chance to know\n\t\t# which episode (or even season) is next. So we return nothing\n\t\tif not row['episode_title'].isnumeric():\n\t\t\tsjmanager.log.log('The episode title is not numeric, so returning nothing')\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\t# If the episode title _is_ numeric, there's two cases that can happen:\n\t\t#\n\t\t# 1. There's an episode in the current season with a number one higher than\n\t\t# the current episode\n\t\t# 2. No episode with a higher number exists. In that case, maybe we have\n\t\t# another season to continue to\n\t\tsjmanager.log.log('Cool, the episode title is numeric')\n\n\t\tseasons = self.seasons(\n\t\t\trow['season_title'])\n\n\t\t# Get all the mangled episode titles in the season\n\t\tepisode_titles = set()\n\t\tfor season in seasons:\n\t\t\tepisode_titles = episode_titles.union(\n\t\t\t\tseason.episode_titles())\n\n\t\tif str(int(row['episode_title']) + 1) in episode_titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Cool, we've got an episode called {}, continuing with that\".format(\n\t\t\t\t\tint(row['episode_title']) + 1))\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = str(int(row['episode_title']) + 1)\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"No higher episode found, checking if season is numeric\")\n\n\t\tif not row['season_title'].isnumeric():\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Season is not numeric, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"Season is numeric, checking if a higher season exists\")\n\n\t\ttitles = self.season_titles()\n\n\t\tif not str(int(row['season_title'])+1) in titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"No higher season exists, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"A higher season exists, returning this season but no episode\")\n\t\tresult['season_title'] = str(int(row['season_title'])+1)\n\t\tresult['episode_title'] = None\n\t\treturn result", "def _convert_tvdb_episode_metadata(imdb_id , season_metadata, episode, banners=True):\n info = copy.deepcopy(season_metadata)\n info['episode'] = episode.get('episodenumber')\n info['title'] = episode.get('episodename', '')\n info['aired'] = episode.get('firstaired', '')\n info['premiered'] = episode.get('firstaired', '')\n info['rating'] = episode.get('rating', '')\n info['plot'] = episode.get('overview', '')\n info['plotoutline'] = episode.get('overview', '')\n info['votes'] = episode.get('ratingcount', '')\n info['imdb_id'] = imdb_id\n if banners:\n info['poster'] = episode['filename']\n return info", "def get_info():\n title, speaker, publish_year, time, language, price = \\\n input(\"*Enter* title| speaker| publish_year| time| language| price : \\n\").split('|')\n media = PodcastEpisode(title, speaker, publish_year, int(time), language, price)\n return media", "def EpisodeDetail(title, url):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n try:\n html = html_from_url(clean_url(url))\n except Exception as e:\n Log.Critical('* EpisodeDetail Error: %s' %str(e))\n message = 'This media has expired.' if ('HTTP Error' in str(e) and '404' in str(e)) else str(e)\n return MessageContainer('Warning', message)\n\n ptitle = html.xpath(\"//title/text()\")[0].rsplit(\" Streaming\",1)[0].rsplit(\" Download\",1)[0]\n thumb = html.xpath('//img[@id=\"nameimage\"]/@src')\n thumb = (thumb[0] if thumb[0].startswith('http') else clean_url(thumb[0])) if thumb else None\n\n wpm = html.xpath('//iframe[@id=\"wpm\"]/@src')\n if not wpm:\n return MessageContainer('Warning', 'No Video Source Found.')\n\n pass_html = html_from_url(clean_url(wpm[0]))\n video_urls = []\n source_iframe = pass_html.xpath('//iframe/@src')\n if source_iframe:\n part = 0\n if pass_html.xpath('//div[starts-with(@id, \"part\")]'):\n part = 1\n\n try:\n video_urls.append((part, html_from_url(clean_url(source_iframe[0])).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Error('* EpisodeDetail Error: %s' %str(e))\n pass\n\n if part != 0:\n base_iframe = source_iframe[0].split('.php')[0]\n count = 1\n more = True\n while more and (count < 5):\n count += 1\n try:\n video_urls.append((count, html_from_url(clean_url(base_iframe + '%i.php' %count)).xpath('//iframe/@src')[0]))\n except Exception as e:\n Log.Warn('* EpisodeDetail Warning: %s' %str(e))\n more = False\n\n for p, u in sorted(video_urls):\n if 'prx.proxy' in u:\n u = 'https://docs.google.com/file/' + u.split('/file/')[1]\n oc.add(VideoClipObject(\n title='%i-%s' %(p, ptitle) if p != 0 else ptitle,\n thumb=Callback(get_thumb, url=thumb),\n url=u\n ))\n\n trailpm = html.xpath('//iframe[@id=\"trailpm\"]/@src')\n if trailpm:\n thtml = html_from_url(clean_url(trailpm[0]))\n yttrailer = thtml.xpath('//iframe[@id=\"yttrailer\"]/@src')\n if yttrailer:\n yttrailer_url = yttrailer[0] if yttrailer[0].startswith('http') else 'https:' + yttrailer[0]\n if 'prx.proxy' in yttrailer_url:\n yttrailer_url = 'http://www.youtube.com/embed/' + yttrailer_url.split('/embed/')[1]\n oc.add(VideoClipObject(url=yttrailer_url, thumb=R(ICON_SERIES), title=\"Watch Trailer\"))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Media Found')", "def parse_line(self, text):\n result = {}\n\n # Using _re_valid_show we will match both the Show and Episode\n show_matches = self._re_valid_show.match(text)\n if show_matches:\n distribution = show_matches.group(1)\n votes = int(show_matches.group(3))\n ratings = float(show_matches.group(4))\n\n show_title = show_matches.group(5)\n show_year = show_matches.group(6)\n\n result = {\n 'type': \"Show\",\n 'show_title': show_title,\n 'year': int(show_year),\n 'ratings': float(ratings),\n 'votes': int(votes),\n 'distribution': distribution\n }\n else:\n # Nothing more to do here\n return {}\n\n # If _re_valid_episode is a match we will add episode information\n episode_matches = self._re_valid_episode.match(text)\n if episode_matches:\n # Change the type from Show to Episode\n result['type'] = \"Episode\"\n\n #episode_details = self.parse_episode(episode_matches.group(1))\n \"\"\"\n The string containing episode details is not nicely formatted by IMDb\n It can be:\n \"episode_title\"\n \"episode_title(#2.3)\"\n \"episode_title(#3)\"\n \"(#2.3)\"\n \"(#3)\"\n \"\"\"\n\n split_results = self._re_episode_season_and_number.split(episode_matches.group(1))\n if len(split_results) == 1:\n # We have only the title\n result['episode_title'] = split_results[0]\n result['season'] = 0\n result['number'] = 0\n elif len(split_results) == 3:\n result[\"episode_title\"] = split_results[0]\n\n dot_split_result = split_results[1].split('.')\n if len(dot_split_result) == 2:\n result['season'] = int(dot_split_result[0])\n result['number'] = int(dot_split_result[1])\n else:\n result['season'] = 1\n result['number'] = int(dot_split_result[0])\n else:\n print(\"parse_episode unexpected split results, original text is: \" + text)\n\n return result", "def populate(self):\r\n seasons = [0]\r\n season = 0\r\n episodes = [0]\r\n namelist = [[0]]\r\n runtimelist = [[0]]\r\n episodedescriptionlist = [[0]]\r\n data = showInformation.getJson(self.episodesurl)\r\n for dicts in data:\r\n for keys in dicts:\r\n if keys == \"season\" and dicts[keys] not in seasons: \r\n seasons.append(dicts[keys])\r\n season = dicts[keys]\r\n episodes.append(0)\r\n namelist.append([0])\r\n runtimelist.append([0])\r\n episodedescriptionlist.append([0])\r\n if keys == \"number\":\r\n episodes[season] += 1\r\n namelist[season].append(dicts[\"name\"])\r\n runtimelist[season].append(dicts[\"runtime\"])\r\n episodedescriptionlist[season].append(self.stringsToRemove(dicts[\"summary\"]))\r\n \r\n for i in range(1, len(seasons)):\r\n self.seasonsepisodedict[seasons[i]] = episodes[i]\r\n\r\n for i in range(len(namelist)):\r\n for j in range(len(namelist[i])):\r\n self.runtimedescriptionofepisodes[namelist[i][j]] = [runtimelist[i][j], episodedescriptionlist[i][j]]\r\n \r\n self.cast = showInformation.populateCast(self)\r\n self.genres = showInformation.populateGenre(self)\r\n self.episodenamelist = namelist", "def get_video_metadata(self,vid,url):\n try:\n self._logger.info('Ok!...Lets try to retrieve some metada from DailyMotion')\n id=_get_video_id(url)\n if id!='':\n srv = DailyMotionService(self._logger)\n item_meta=self._parse_entry(srv.get_video_entry(id))\n item_meta['video-id']=str(vid)\n return item_meta\n else:\n self._logger.error('Ouch!...An illegal url was provided. It was impossible to get the video id.')\n return None\n except:\n self._logger.exception('Dammit!...An error ocurred while retrieving metadata from DailyMotion...')\n return None\n else:\n self._logger.info('Great!...The DailyMotion search was succesfull...')", "def get_movie_metadata(movie_id):\n metadata_provider = ADDON.getSetting(\"movie_metadata_provider\")\n info, created_time = fetch_from_db(movie_id, metadata_provider, LANG)\n if info:\n return info\n elif created_time and float(created_time) <= time.time() + 3600:\n return info\n try:\n if metadata_provider == \"Trakt\":\n headers = {\n 'Content-Type': 'application/json',\n 'trakt-api-version': '2',\n 'trakt-api-key': TRAKT_CLIENT_ID\n }\n url = TRAKT_API_ENDPOINT + \"/movies/\" + movie_id + '?extended=full'\n info = requests.get(url, headers=headers, verify=False).json()\n xbmc.log(\"info:\" + repr(info), xbmc.LOGNOTICE)\n if LANG != \"en\":\n translation_url = TRAKT_API_ENDPOINT + \\\n \"/movies/\" + movie_id + \"/translations/\" + LANG\n translation_info = requests.get(\n translation_url, headers=headers, verify=False).json()\n if translation_info:\n translation_info = translation_info[0]\n for key in translation_info.iterkeys():\n info[key] = translation_info[key]\n genres_dict = trakt_genres(\"movies\")\n info = __convert_trakt_movie_metadata(info, genres_dict)\n elif metadata_provider == \"TMDB\":\n genres_dict = tmdb_movie_genres(LANG)\n info = tmdbsimple.Find(movie_id).info(\n external_source=\"imdb_id\", language=LANG)[\"movie_results\"][0]\n if (not info or (info.get(\"overview\", None) is None and info.get('plot', None) is None) and\n LANG != \"en\"):\n info, created_time = fetch_from_db(movie_id, metadata_provider, \"en\")\n if info:\n return info\n else:\n info = tmdbsimple.Find(movie_id).info(\n external_source=\"imdb_id\",\n language=\"en\")[\"movie_results\"][0]\n info = _convert_tmdb_movie_metadata(\n info, movie_id, genres_dict)\n save_to_db(movie_id, metadata_provider, \"en\", info)\n info = _convert_tmdb_movie_metadata(info, movie_id, genres_dict)\n except:\n pass\n save_to_db(movie_id, metadata_provider, LANG, info)\n return info", "def save_episode_to_db(identifier, season, episode, provider, lang, meta):\n import time\n import koding\n\n koding.Remove_From_Table(\"episode_meta\", {\n \"identifier\": identifier,\n \"provider\": provider,\n \"season\": season,\n \"episode\": episode,\n \"lang\": lang\n })\n\n koding.Add_To_Table(\"episode_meta\", {\n \"identifier\": identifier,\n \"provider\": provider,\n \"season\": season,\n \"episode\": episode,\n \"lang\": lang,\n \"meta\": pickle.dumps(meta).replace(\"\\\"\", \"'\"),\n \"created\": time.time()\n })", "def get_track_info(self): # {\n self._prep_media_controller()\n track_info = \"\"\n if self.state == 'PLAYING' or self.state == 'PAUSED':\n try:\n self.mc.update_status()\n except (pychromecast.error.UnsupportedNamespace, \n pychromecast.error.NotConnected,\n pychromecast.error.ControllerNotRegistered) as error:\n logger.warning(\"Handled exception from: self.mc.update_status()!: %d\" % self.consecutive_update_status_exceptions)\n logger.warning(\" %s\" % error)\n track_info = (\"\", \"\", \"\", \"\", \"\") # artist, title, album, cur_time, duration\n if self.consecutive_update_status_exceptions == 0:\n self.update_status_exceptions_start_time = datetime.datetime.now()\n else:\n elapsed = datetime.datetime.now() - self.update_status_exceptions_start_time\n MAX_DURATION_EXCEPTIONS = 4\n if elapsed.seconds >= MAX_DURATION_EXCEPTIONS:\n logger.error(\"Got %d consecutive update status exceptions over %d seconds, disconnecting..\"\n % (self.consecutive_update_status_exceptions, elapsed.seconds))\n self.state = 'IDLE'\n return None\n self.consecutive_update_status_exceptions += 1\n else:\n artist = self.mc.status.artist\n artist = \"\" if artist is None else artist\n title = self.mc.status.title\n title = \"\" if title is None else title\n album = self.mc.status.album_name\n album = \"\" if album is None else album\n track_info = (artist, title, album,\n to_min_sec(self.mc.status.current_time),\n to_min_sec(self.mc.status.duration))\n self.consecutive_update_status_exceptions = 0\n return track_info", "def get_episode_info(p):\n season, episode = None, None\n\n _, name = os.path.split(p)\n\n for fmt in EPISODE_FMTS:\n match = re.search(fmt, name)\n\n if match:\n season = int(match.group(1))\n episode = int(match.group(2))\n break\n\n if not episode:\n raise ValueError(f'could not parse episode: {p}')\n\n return season, episode", "def save_fetched_info(self):\n res = None\n\n if self.response:\n\n res = self.response.json()['d'][0]\n self.movie_info = dict()\n\n try:\n self.movie_info['name'] = res['l']\n\n if self.movie_name.lower() not in self.movie_info['name'].lower():\n raise NameError()\n\n self.movie_info['image_url'] = res['i']['imageUrl']\n self.movie_info['rank'] = res['rank']\n self.movie_info['cast'] = res['s']\n self.movie_info['year'] = res['y']\n\n return 200\n\n except:\n print('NameErrror : Check movie name again...')\n return 404", "def get_episode_info(filename): \n episode_tag, season, episode = None, None, None\n episode_tag = find_episode_pattern(filename)\n if episode_tag is not None:\n pattern = episode_tag.lower().replace(\"s\",\" \").replace(\"e\",\" \")\n pattern_array = pattern.split()\n season = int(pattern_array[0])\n episode = int(pattern_array[1])\n season = \"{:0>2}\".format(season)\n episode = \"{:0>2}\".format(episode)\n return episode_tag, season, episode", "def get_episode_media_url(self, podcast_entry):\r\n links = podcast_entry[\"links\"]\r\n\r\n for link in links:\r\n if \"audio\" in link[\"type\"]:\r\n return link[\"href\"]", "def MediaInfoLookup(url):\n\t\n\t# Get clean copy of URL user has played.\n\tdecoded_url = String.Decode(str(url))\n\t#Log(decoded_url)\n\t\n\t# See if the URL being played is on our recently browsed list.\n\titem = cerealizer.loads(Data.Load(BROWSED_ITEMS_KEY)).getByURL(decoded_url)\n\n\tif (item is None):\n\t\tLog(\"****** ERROR: Watching Item which hasn't been browsed to\")\n\t\treturn \"\"\n\t\n\t# Return the media info that was stored in the recently browsed item.\n\treturn demjson.encode(item[0])", "def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None and Prefs['fanart'] is True:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl", "def lookup_episode(self, seriesName, episodeName, id = 0):\n\n episodeName = episodeName.strip()\n\n if seriesName is None or episodeName is None:\n return (None,None)\n\n if seriesName != self.last_series_name:\n logging.info(\" - Searching thetvdb for:{}\".format(seriesName))\n search = tvdb.Search()\n response = search.series(seriesName)\n s = search.series\n\n if id > 0:\n for s in response:\n if s['id'] == id:\n show = tvdb.Series(id)\n break\n elif id == 0:\n show = tvdb.Series(s[0]['id'])\n else:\n logging.info(' - Unable to find series id:{} - terminating'.format(id))\n return (None,None)\n\n self.last_series_name = seriesName\n self.last_series_obj = show\n\n show = self.last_series_obj\n\n episodes = show.Episodes.all()\n logging.info(\" - Found {} episodes\".format(len(episodes)))\n if (len(episodes)==0):\n return (\"error\", \"no episodes found\")\n\n if episodes == []:\n return (None, None)\n\n for i, e in enumerate(episodes[::-1]):\n ep_name = e['episodeName']\n\n if ep_name is not None:\n n = Levenshtein.distance(episodeName, ep_name)\n\n if n <= MATCH_TOLERANCE:\n e_id = e['airedEpisodeNumber']\n s_id = e['airedSeason']\n logging.info(\" - Matched [{0}] to episode name: [{1}]\".format(episodeName, ep_name))\n return (str(s_id), str(e_id))\n\n logging.info(\" - UNABLE TO MATCH: {}\".format(episodeName))\n return (\"error\", \"expected series not found\")", "def test_get_video_info_is_working_properly(self):\n result = self.test_media_manager.get_video_info(\n self.test_video_path_mp4\n )\n self.assertEqual(\n result,\n {\n 'video_info': {\n 'TAG:encoder': 'Lavf55.19.104',\n 'nb_streams': '1',\n 'start_time': '0.000000',\n 'format_long_name':\n 'QuickTime/MPEG-4/Motion JPEG 2000 format',\n 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2',\n 'filename': self.test_video_path_mp4,\n 'TAG:compatible_brands': 'isomiso2avc1mp41',\n 'bit_rate': '2163440.000000',\n 'TAG:major_brand': 'isom',\n 'duration': '0.400000',\n 'TAG:minor_version': '512',\n 'size': '108172.000000'\n },\n 'stream_info': [\n {\n 'pix_fmt': 'yuv444p',\n 'index': '0',\n 'codec_tag': '0x31637661',\n 'level': '30',\n 'r_frame_rate': '25/1',\n 'start_time': '0.000000',\n 'time_base': '1/12800',\n 'codec_tag_string': 'avc1',\n 'codec_type': 'video',\n 'has_b_frames': '2',\n 'width': '640',\n 'codec_name': 'h264',\n 'codec_long_name':\n 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10',\n 'display_aspect_ratio': '8:5',\n 'sample_aspect_ratio': '1:1',\n 'TAG:language': 'und',\n 'height': '400',\n 'nb_frames': '10',\n 'codec_time_base': '1/50',\n 'duration': '0.320000',\n 'avg_frame_rate': '125/4'\n }\n ]\n }\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
sort list of dict by key 'title' and return the result
def titleSort(dictList): res = sorted(dictList, key=lambda k: getSortTitle(k)) return res
[ "def sortByTitle(self,keys_list=None):\n\t\tif not keys_list:\n\t\t\tkeys_list = self.getEntryList()\n\t\t\t\n\t\tr_list = self.searchObjTypeDerive(keys_list,query_objType=\".obj.pub\")\n\t\tr_list.sort(key = lambda x: self.entries[x].title )\n\t\treturn r_list", "def arrange(l: Dict[str, List[str]]) -> None:\n for key in l:\n l[key].sort()", "def title_insertion_sort(unsorted_object):\n\n all_videos = unsorted_object\n\n for i in range(1, len(all_videos)):\n key = all_videos[i]\n j = i - 1\n\n while j >= 0 and str(all_videos[j].title) > str(key.title):\n # Shift elements upwards\n all_videos[j+1] = all_videos[j]\n j -= 1\n\n # Insert key into position\n all_videos[j + 1] = key\n\n return all_videos", "def dictsort(dict_list, sort_by_me):\r\n decorated = [(resolve_variable('var.' + sort_by_me, {'var' : item}), item) for item in dict_list]\r\n decorated.sort()\r\n return [item[1] for item in decorated]", "def test_query_order_by_related_titled(self):\n data_title = {\n \"object_name\": \"Audit\",\n \"order_by\": [{\"name\": \"program\"}, {\"name\": \"id\"}],\n \"filters\": {\"expression\": {}},\n }\n audits_title = self._get_first_result_set(data_title,\n \"Audit\", \"values\")\n\n data_unsorted = {\n \"object_name\": \"Audit\",\n \"filters\": {\"expression\": {}},\n }\n audits_unsorted = self._get_first_result_set(data_unsorted,\n \"Audit\", \"values\")\n\n # get titles from programs to check ordering\n data_program = {\n \"object_name\": \"Program\",\n \"filters\": {\"expression\": {}},\n }\n programs = self._get_first_result_set(data_program,\n \"Program\", \"values\")\n program_id_title = {program[\"id\"]: program[\"title\"]\n for program in programs}\n\n self.assertListEqual(\n audits_title,\n sorted(sorted(audits_unsorted, key=itemgetter(\"id\")),\n key=lambda a: program_id_title[a[\"program\"][\"id\"]]),\n )", "def sortfunc(pt1, pt2):\n return cmp(pt1.title, pt2.title)", "def arrange(json_list):\n j_list = []\n for key in json_list.keys():\n tupl = (json_list[key]['time_added'], json_list[key])\n j_list.append(tupl)\n sorted_list = sorted(j_list, key=lambda x: x[0], reverse=True)\n json_list = [x[1] for x in sorted_list]\n return json_list", "def sorted_stories_list(hnList):\r\n return sorted(hnList,key=lambda x:x['votes'],reverse=True)", "def get_movies_by_title(str):\r\n\r\n sort_by = \"popularity.desc\"\r\n\r\n url = f\"https://api.themoviedb.org/3/search/movie?api_key={TMDB_API_KEY}&language=en-US&sort_by={sort_by}&page=1&include_adult=false&query='{str}'\"\r\n\r\n response = requests.get(url)\r\n\r\n search_results = json.loads(response.text)\r\n\r\n recommendations = dict()\r\n count = 1\r\n \r\n for index, title in enumerate(search_results['results']):\r\n recommendations[(index + 1)] = title\r\n\r\n return recommendations", "def sort_species(species_dict):\n # first sort by Genus name using the \"sort\" attribute\n sorted_list = list(species_dict.items())\n sorted_list.sort()\n return sorted_list", "def sort_by_rating(payloads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n return sorted(payloads, key=lambda k: int(k[\"rating\"]), reverse=True)", "def canonsort_items(dict1, canonical_order=None):\r\n return [(k, dict1[k]) for \\\r\n k in canonsort_keys(dict1.keys(), canonical_order)]", "def __sort(self, subtitles):\n return sorted(subtitles, key=lambda subtitle: (subtitle.get_rating(), subtitle.get_hits()), reverse=True)", "def get_similar_titles(title: str) -> list:\n light_novel_results = BakaUpdates.get_similar_titles(title)\n visual_novel_results = VisualNovelDatabase.get_similar_titles(title)\n anime_results = MyAnimeList.get_similar_titles(title)\n\n results = []\n passed_titles = []\n\n for result_list in (light_novel_results, visual_novel_results, anime_results):\n for result in result_list:\n if result['title'] in passed_titles:\n results[passed_titles.index(result['title'])]['links'].append(result['link'])\n else:\n results.append({\n 'title': result['title'],\n 'links': [result['link']],\n 'similarity': result['similarity']\n })\n passed_titles.append(result['title'])\n\n results.sort(key=lambda item: item['similarity'], reverse=True)\n return results", "def sort(li):\n #first sort on document id\n li = sorted(li,key=lambda x: x[0])\n \n #then sort on document ranking\n li = sorted(li,key=lambda x: x[1], reverse=True)\n \n #sort on window length\n # li = sorted(li,key=lambda x: x[3])\n \n #then sort on number of present words\n # li = sorted(li,key=lambda x: x[2], reverse=True)\n return li", "def _sorted(items):\n sorted_items = items\n if len(items) > 1:\n if isinstance(items[0], str):\n sorted_items = sorted(items)\n elif isinstance(items[0], dict):\n sort_key = _sort_key(items[0])\n if sort_key is not None:\n sorted_items = sorted(items, key=lambda x: x[sort_key])\n return sorted_items", "def sort_songs(all_songs: list) -> List[dict]:\n # Get duplicate of the data\n # not to change the real order of the songs.\n all_songs = all_songs[:] \n\n all_songs.sort(key=lambda song: len(song.get('voted_users')), reverse=True)\n\n return all_songs", "def _get_titles(videos):\n return [_get_title(video)\n for video in videos.values()\n if _get_title(video)]", "def sort(self, movie_attribute, reverse = False):\r\n self.movies.sort(key=attrgetter(movie_attribute, \"title\"), reverse=reverse)", "def history_sort_key(history_item_dict):\n second_order = 0\n if \"prop_changed\" in history_item_dict:\n changed_property = history_item_dict[\"prop_changed\"]\n if changed_property == \"name\" or changed_property == \"what\":\n second_order = 1\n\n return history_item_dict[\"time\"], second_order" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return the best background art URL for the passed episode.
def getEpisodeArt(episode): seriesId = None for sk in Dict['series'].keys(): if Dict['series'][str(sk)]['title']==episode['seriesTitle']: seriesId = int(sk) if seriesId is not None: artUrl = "" if Dict['series'][str(seriesId)]['tvdbId'] is not None and Prefs['fanart'] is True: artUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False) #Log.Debug("arturl: %s"%artUrl) if artUrl is not None: art = Function(getArt,url=artUrl) if artUrl == "" or artUrl is None: artUrl = Dict['series'][str(seriesId)]['art'] if artUrl == "" or artUrl is None: artUrl = R(CRUNCHYROLL_ART) else: artUrl = R(CRUNCHYROLL_ART) Log.Debug("artUrl: %s"%artUrl) return artUrl
[ "def get_episode_media_url(self, podcast_entry):\r\n links = podcast_entry[\"links\"]\r\n\r\n for link in links:\r\n if \"audio\" in link[\"type\"]:\r\n return link[\"href\"]", "def get_background_art_urls():\n server = get_plex_server('XXXXXXXXX', 'XXXXXXXXX', 'XXXXXXXXX')\n\n log.info(\"Querying server for all media items\")\n # all_media_items = [library_section.all() for library_section in server.library.sections()]\n log.info(\"Parsing media items for background art urls\")\n all_arts_urls = [get_arts_url(server._baseurl, x.ratingKey, server._token) for x in media_items(server)]\n log.debug(f\"{len(all_arts_urls)} media items.\")\n log.info(\"Querying server for background art urls\")\n all_xml_results = [query_xml_endpoint(x) for x in all_arts_urls]\n log.info(\"Parsing XML response for background art urls\")\n all_photo_elements = [tree_item.iter('Photo') for tree_item in all_xml_results]\n all_photo_urls = [x.attrib['key'] for x in flatten(all_photo_elements)]\n clean_library_urls(all_photo_urls, server)\n return all_photo_urls", "def determine_next_episode(\n\t\tself):\n\n\t\tresult = dict()\n\n\t\tsjmanager.log.log('Trying to determine next show to watch')\n\n\t\t# First up, check which season and which episode is in the watch cache.\n\t\trow = self.sql.execute(\"\"\"SELECT \n\t\t\tseason_title,\n\t\t\tepisode_title,\n\t\t\tfinished \n\t\t\tFROM last_watched \n\t\t\tWHERE show_url = ?\"\"\",\n\t\t\t(self.url,)).fetchone()\n\n\t\tsjmanager.log.log(\"Fetched the following row: {}, {}, {}, {}\".format(row['season_title'],row['episode_title'],row['finished'],row['finished'] == str(0)))\n\n\t\t# If it's not finished, this means there's a cache file lying around, so\n\t\t# return episode and season title so we can find it.\n\t\tif str(row['finished']) == '0':\n\t\t\tsjmanager.log.log(\"Previous show isn't finished, so taking that as new show\")\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = row['episode_title']\n\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t'Ok, season title is {}, episode title is {}'.format(\n\t\t\t\trow['season_title'],\n\t\t\t\trow['episode_title']))\n\n\t\t# Otherwise, if the episode title isn't numeric, there's no chance to know\n\t\t# which episode (or even season) is next. So we return nothing\n\t\tif not row['episode_title'].isnumeric():\n\t\t\tsjmanager.log.log('The episode title is not numeric, so returning nothing')\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\t# If the episode title _is_ numeric, there's two cases that can happen:\n\t\t#\n\t\t# 1. There's an episode in the current season with a number one higher than\n\t\t# the current episode\n\t\t# 2. No episode with a higher number exists. In that case, maybe we have\n\t\t# another season to continue to\n\t\tsjmanager.log.log('Cool, the episode title is numeric')\n\n\t\tseasons = self.seasons(\n\t\t\trow['season_title'])\n\n\t\t# Get all the mangled episode titles in the season\n\t\tepisode_titles = set()\n\t\tfor season in seasons:\n\t\t\tepisode_titles = episode_titles.union(\n\t\t\t\tseason.episode_titles())\n\n\t\tif str(int(row['episode_title']) + 1) in episode_titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Cool, we've got an episode called {}, continuing with that\".format(\n\t\t\t\t\tint(row['episode_title']) + 1))\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = str(int(row['episode_title']) + 1)\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"No higher episode found, checking if season is numeric\")\n\n\t\tif not row['season_title'].isnumeric():\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Season is not numeric, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"Season is numeric, checking if a higher season exists\")\n\n\t\ttitles = self.season_titles()\n\n\t\tif not str(int(row['season_title'])+1) in titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"No higher season exists, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"A higher season exists, returning this season but no episode\")\n\t\tresult['season_title'] = str(int(row['season_title'])+1)\n\t\tresult['episode_title'] = None\n\t\treturn result", "def thumb_url(self, episode, timestamp):\n return u'{base}/img/{episode}/{timestamp}/small.jpg'.format(base=self.base, episode=episode,\n timestamp=timestamp)", "def episode(self):\n return self.global_episode", "def episode(self,title):\n\t\tassert isinstance(title,str)\n\n\t\tfor episode in self.episodes:\n\t\t\tsjmanager.log.log('Episode title is {}'.format(episode.title))\n\t\t\tif episode.title == title:\n\t\t\t\treturn episode\n\n\t\treturn None", "def last_episode():\n highest_episode = 125 # The one before the first regular video episode available online\n highest_date = datetime.date(2014, 11, 3)\n\n for filename in os.listdir(HARMONTOWN_DIRECTORY):\n matches = re.match('Harmontown - S01E(\\d+) - (\\d+)-(\\d+)-(\\d+)\\.mp4', filename)\n if matches and int(matches.group(1)) > highest_episode:\n highest_episode = int(matches.group(1))\n highest_date = datetime.date(\n int(matches.group(2)),\n int(matches.group(3)),\n int(matches.group(4))\n )\n\n return highest_episode, highest_date", "def featured_asset_thumbnail_url(self):\n try:\n featured_asset = self.featured_assets.select_subclasses()[0]\n return featured_asset.get_thumbnail_url(include_host=True)\n except IndexError:\n # No featured assets\n return None", "def get_entry_thumbnail_url(entry):\n enclosures = get_accepted_enclosures(entry)\n\n # Try the default enclosure's thumbnail.\n default_enclosure = get_default_enclosure(enclosures)\n if default_enclosure is not None:\n try:\n return get_item_thumbnail_url(default_enclosure)\n except KeyError:\n pass\n\n # Try to get any enclosure thumbnail\n for enclosure in enclosures:\n if enclosure is not default_enclosure:\n try:\n return get_item_thumbnail_url(enclosure)\n except KeyError:\n pass\n\n # Try to get the general thumbnail for the entry\n try:\n return get_item_thumbnail_url(entry)\n except KeyError:\n pass\n\n # Check the content\n if entry.get('link', '').find(u'youtube.com') != -1:\n if 'content' in entry:\n content = entry['content'][0]['value']\n elif 'summary' in entry:\n content = entry['summary']\n else:\n return None\n match = re.search(r'<img alt=\"\" src=\"([^\"]+)\" />',\n content)\n if match:\n return match.group(1)\n\n return None", "def fetch_episode_html_page(episode):\n return read_url(episode.page_url)", "def _find_next_episode(self, episodes):\n today = date.today()\n rw = None\n timespan = None\n\n # Search for the episode which airs next (air date is the closest to now)\n for episode in episodes:\n try:\n airdate = datetime.strptime(episode['firstAired'], '%Y-%m-%d')\n airdate = airdate.date()\n if airdate >= today:\n ctimespan = airdate - today\n if timespan is None or ctimespan < timespan:\n rw = episode\n timespan = ctimespan\n except:\n continue\n return rw", "def getPosterImageURL(movie):\n\n valid = \"image/movies/\" + movie.id + \"/poster.png\"\n invalid = \"image/movies/0000 Unknown/poster.png\"\n\n return valid if os.path.isfile(valid) else invalid", "def getMovieBoxartImageURL(movie):\n\n valid = \"image/movies/\" + movie.id + \"/boxart.png\"\n invalid = \"image/movies/0000 Unknown/boxart.png\"\n\n return valid if os.path.isfile(valid) else invalid", "def _fetch_comic_url_latest(comic_url_homepage):\n response = requests.get(comic_url_homepage)\n soup = BeautifulSoup(response.content, 'html.parser')\n latest_comic = soup.find('figure')\n # Grab the link for the individual page of the comic as we can start crawling from that\n comic_uri_part = latest_comic.find('meta', {'itemprop': 'contentUrl'})['content']\n comic_url = f\"{URL_BASE}{comic_uri_part}\"\n return comic_url", "def get_series_page_url(self, title):\n valid_results = [\"(TV Series)\", \"(TV Mini-Series)\"]\n search_page = self.get_search_page(title)\n\n try:\n for index, section in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div')):\n if len(section.xpath('h3/text()')) > 0:\n\n # Find the Div associated with Titles (rather than Characters, etc)\n if section.xpath('h3/text()')[0] == \"Titles\":\n\n # Select first in list which doesn't contain invalid_results\n for index, list_title in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr')):\n if any(x in list_title.text_content() for x in valid_results):\n\n # Some items listed as \"TV Episode\" also contain a link with the term \"TV Series\" below\n if \"(TV Episode)\" not in list_title.text_content():\n endpoint = search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr[%i]/td/a' %(index+1))[0].attrib['href']\n return IMDB.BASE_URL + endpoint\n except IndexError:\n return None", "def get_movie_page_url(self, title):\n invalid_results = [\"(TV Episode)\", \"(TV Series)\", \"(TV Mini-Series)\", \"(Short)\", \"(Video Game)\"]\n search_page = self.get_search_page(title)\n\n try:\n for index, section in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div')):\n if len(section.xpath('h3/text()')) > 0:\n\n # Find the Div associated with Titles (rather than Characters, etc)\n if section.xpath('h3/text()')[0] == \"Titles\":\n\n # Select first in list which doesn't contain invalid_results\n for index, list_title in enumerate(search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr')):\n if not any(x in list_title.text_content() for x in invalid_results):\n endpoint = search_page.xpath('//*[@id=\"main\"]/div[1]/div[2]/table[1]/tr[%i]/td/a' %(index+1))[0].attrib['href']\n return IMDB.BASE_URL + endpoint\n except IndexError:\n return", "def get_episode_dir(project_name, episode):\n\n root = avalon.api.Session[\"AVALON_PROJECTS\"]\n return \"{}/{}/shots/{}\".format(root, project_name, episode)", "def episode(self, episode_id, market=None):\n\n trid = self._get_id(\"episode\", episode_id)\n return self._get(\"episodes/\" + trid, market=market)", "def get_wallpaper():\n\t# Put together gsettings call\n\tcmd = [\n\t\t'gsettings', \n\t\t'get', \n\t\t'org.gnome.desktop.background', \n\t\t'picture-uri'\n\t]\n\t# The call will return a string of the form \"'file://PATH'\\n\", so we need\n\t# to clean it up\n\turi = subprocess.check_output(cmd).strip().strip(\"'\")\n\t# Get rid of the 'file://' prefix\n\tpath = uri[len('file://'):]\n\treturn path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Try to find a better thumb than the one provided via url. The thumb data returned is either an URL or the image data itself.
def getThumb(url,tvdbId=None): ret = None if (tvdbId is not None and Prefs['fanart'] is True): thumb = fanartScrapper.getRandImageOfTypes(tvdbId,['tvthumbs']) if thumb is None: thumb = url url=thumb if url==R(CRUNCHYROLL_ICON): ret = url else: if url is not None: try: data = HTTP.Request(url, cacheTime=CACHE_1WEEK).content if url.endswith(".jpg"): ret = DataObject(data, 'image/jpeg') elif url.endswith(".png"): ret = DataObject(data, 'image/png') except Exception, arg: Log.Error("#####Thumbnail couldn't be retrieved:") Log.Error("#####" + repr(Exception) + repr(arg) + url) ret = None if ret is None: return R(CRUNCHYROLL_ICON) else: return ret
[ "def parsethumbfromdescription(descriptionelement):\n soup = bs4.BeautifulSoup(descriptionelement.text,'html.parser')\n img = soup.find('img')\n if not img: return None\n return img.attrs['src']", "def _findBestImage(self, url):\n largeUrls = [\n url.replace('100x100', '600x600'),\n url.replace('100x100', '400x400'),\n url.replace('100x100', '200x200'),\n ]\n\n for largeUrl in largeUrls:\n try:\n self.countLookupCall('image')\n urllib2.urlopen(largeUrl)\n return largeUrl\n except urllib2.HTTPError:\n pass\n except LookupRequiredError:\n return url\n return url", "def _get_best_streamer_large_picture(streamer):\n\tif streamer.image_url_large:\n\t\treturn _resize_large_picture(streamer, 150)", "def get_image_url(text, catalogue_number):\n # type: (str, int) -> Optional[str]\n image = re.search(r\"image: \\\"(http.*)\\\"\", text)\n url = image.group(1) if image else None\n thumb = EAFA_LOW_RES_THUMB_TEMPLATE.format(catalogue_number)\n if not url and catalogue_number:\n return thumb\n with Cache() as c:\n cached = c.get(url)\n if cached and (cached[\"fresh\"] or cached[\"immutable\"]):\n return cached[\"blob\"]\n r = requests.head(url)\n if 200 == r.status_code:\n c.set(url, url, r.headers)\n return url\n c.set(url, thumb, None)\n return thumb", "def _get_image_thumbnail(self, geometry: str, options: dict) -> str:\n FALLBACK = static(\"images/thumb.400x300.png\")\n\n # own albums -> files are not deleted\n if self.photo_id:\n return get_thumbnail(self.photo.image, geometry, **options).url\n\n if self.image_gone or not self.photo_url:\n return FALLBACK\n\n # try to obtain a thumbnail from the remote\n thumbnail = get_thumbnail(self.photo_url, geometry, **options)\n if thumbnail.exists():\n return thumbnail.url\n\n # at this point, we couldn't write a thumbnail - mark the photo URL as broken\n # and fall back instead\n self.image_gone = True\n self.save()\n\n return FALLBACK", "def _get_best_streamer_small_picture(streamer):\n\tif streamer.image_url_small:\n\t\treturn streamer.image_url_small\n\telif streamer.image_url_large:\n\t\treturn _resize_large_picture(streamer, 28)", "def thumbnail(self, item):\n try:\n scales = item.restrictedTraverse(\"@@images\")\n return scales.scale(\"image\", \"tile\")\n except AttributeError:\n return None", "def get_thumbnail(self):\r\n thumbnail_file = self.thumbnail\r\n if thumbnail_file:\r\n thumbnail_url_path = 'content/items/' + self.itemid + '/info/' + thumbnail_file\r\n if thumbnail_url_path:\r\n return self._portal.con.get(thumbnail_url_path, try_json=False, force_bytes=True)", "def get_entry_thumbnail_url(entry):\n enclosures = get_accepted_enclosures(entry)\n\n # Try the default enclosure's thumbnail.\n default_enclosure = get_default_enclosure(enclosures)\n if default_enclosure is not None:\n try:\n return get_item_thumbnail_url(default_enclosure)\n except KeyError:\n pass\n\n # Try to get any enclosure thumbnail\n for enclosure in enclosures:\n if enclosure is not default_enclosure:\n try:\n return get_item_thumbnail_url(enclosure)\n except KeyError:\n pass\n\n # Try to get the general thumbnail for the entry\n try:\n return get_item_thumbnail_url(entry)\n except KeyError:\n pass\n\n # Check the content\n if entry.get('link', '').find(u'youtube.com') != -1:\n if 'content' in entry:\n content = entry['content'][0]['value']\n elif 'summary' in entry:\n content = entry['summary']\n else:\n return None\n match = re.search(r'<img alt=\"\" src=\"([^\"]+)\" />',\n content)\n if match:\n return match.group(1)\n\n return None", "def get_thumbnail(url, filename):\n\n if url == \"\":\n print('error: url cannot be an empty string.' % (filename, thumb_dir))\n else:\n try:\n urlretrieve(url, os.path.join(thumb_dir, filename))\n print('success : %s downloaded to %s directory.' % (filename, thumb_dir))\n except AttributeError:\n print('error : %s could not be downloaded to %s directory.' % (filename, thumb_dir))\n return url", "def get_thumbnail(self, instance):\n return instance.link_meta.thumbnail if instance.link_meta is not None else None", "def thumbnail_url(self, url):\n if not url or '/storage/' not in url:\n return url\n\n image_id = url.split('/storage/')[-1]\n\n # image file ids are generated from the random_token function\n if len(image_id) == RANDOM_TOKEN_LENGTH:\n return self.request.class_link(\n ImageFile, {'id': image_id}, name='thumbnail')\n else:\n return url", "def thumb_url(self, episode, timestamp):\n return u'{base}/img/{episode}/{timestamp}/small.jpg'.format(base=self.base, episode=episode,\n timestamp=timestamp)", "def thumbnail_url(self):\n return self.image.thumbnail_url", "def thumbnail(self):\n # get thumbnail\n tag_root = 'root.ImageList.0'\n tn_size = int( self.tags[\"%s.ImageData.Data.Size\" % tag_root] )\n tn_offset = int( self.tags[\"%s.ImageData.Data.Offset\" % tag_root] )\n tn_width = int( self.tags[\"%s.ImageData.Dimensions.0\" % tag_root] )\n tn_height = int( self.tags[\"%s.ImageData.Dimensions.1\" % tag_root] )\n\n if self.debug > 0:\n print \"Notice: tn data in %s starts at %s\" % (\n os.path.split(self._filename)[1], hex(tn_offset)\n )\n print \"Notice: tn size: %sx%s px\" % (tn_width, tn_height)\n\n if (tn_width*tn_height*4) != tn_size:\n raise Exception(\"Cannot extract thumbnail from %s\" \n % os.path.split(self._filename)[1])\n else:\n self._f.seek( tn_offset )\n rawdata = self._f.read(tn_size)\n # - read as 16-bit LE unsigned integer\n tn = Image.frombytes( 'F', (tn_width, tn_height), rawdata, \n 'raw', 'F;32' )\n # - rescale and convert px data\n tn = tn.point(lambda x: x * (1./65536) + 0)\n tn = tn.convert('L')\n # - return image\n return tn", "def get_thumbnail(url, geometry_string, **kwargs):\n from thummer.models import WebpageSnapshot\n try:\n webpage_snapshot = WebpageSnapshot.objects.filter(\n url=url).valid().latest()\n except WebpageSnapshot.DoesNotExist:\n webpage_snapshot = WebpageSnapshot(url=url)\n webpage_snapshot.save()\n return webpage_snapshot.get_thumbnail(geometry_string, **kwargs)", "def get_thumbnail(self):\r\n thumbnail_file = self.thumbnail\r\n if thumbnail_file:\r\n thumbnail_url_path = 'community/users/' + self._user_id + '/info/' + thumbnail_file\r\n if thumbnail_url_path:\r\n return self._portal.con.get(thumbnail_url_path, try_json=False, force_bytes=True)", "def get_thumbnail_path(self):\n\n thumb_attr = artellapipe.ShotsMgr().config.get('data', 'thumb_attribute')\n thumb_path = self._shot_data.get(thumb_attr, None)\n\n return thumb_path", "def _fetch_image(url: str) -> Optional[Union[PNGSongImage, JPEGSongImage]]:\n response = requests.get(url)\n if response.headers['Content-Type'] == 'image/png':\n return PNGSongImage(response.content)\n elif response.headers['Content-Type'] == 'image/jpeg':\n return JPEGSongImage(response.content)\n\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given an url of a page where video is watched, return a list of integers of available heights. If user is a guest, just return 360, which is all they get ;)
def getAvailResFromPage(url): if not Prefs['username'] or not Prefs['password']: return [360] login() availRes = [360] link = url.replace(BASE_URL, "") req = HTTP.Request(url=url, immediate=True, cacheTime=3600*24) html = HTML.ElementFromString(req) try: small = not isPremium() except: small = False if small is False: try: if len(html.xpath("//a[@token='showmedia.480p']")) > 0: availRes.append(480) if len(html.xpath("//a[@token='showmedia.720p']")) > 0: availRes.append(720) if len(html.xpath("//a[@token='showmedia.1080p']")) > 0: availRes.append(1080) except Exception,arg: Log.Error("####getAvalResFromPage() we got ourselves an exception:") Log.Error(repr(Exception) + repr(arg)) return availRes
[ "def get_video_information():\n url = get_video_url()\n response = requests.get(url)\n info =response.json()\n number_frames = info['frames']\n return number_frames", "def get_pages(url):\n return url.json()['size'] // 10", "def get_height():\n while True:\n val = get_int(\"Height: \")\n if 0 <= val <= 23:\n return val", "def get_dimensions(input_file):\n deets = get_video_details(input_file)\n dimensions = deets['width'],deets['height']\n width = int(dimensions[0])\n height = int(dimensions[1])\n return width, height", "def _get_viewers(self):\n total_viewers = 0\n for stream in self.result_set:\n total_viewers += stream.viewers\n return total_viewers", "def resolution_of_videofile(self):\n p = self.probe()\n assert 'streams' in p and len(['streams']) > 0\n (H,W) = (p['streams'][0]['height'], p['streams'][0]['width']) # (height, width) in pixels\n return (W,H) if ('tags' in p['streams'][0] and 'rotate' in p['streams'][0]['tags'] and p['streams'][0]['tags']['rotate'] in ['90','270']) else (H,W)", "def heights(self):\n if not self.__landscaped:\n self.landscape()\n if not self.__expounded:\n self.expound()\n return self.__landscape_data.heights", "def get_ads_sizes():\n\n sizes = set()\n\n for advert in filter(lambda adv: adv.state.is_running, Advert.objects.all()):\n sizes.add((advert.width, advert.height))\n\n return [{'width': w, 'height': h} for w, h in sizes]", "def get_video_size(self):\n\n self.logger.info(f'Getting video size for {self.in_path}')\n probe = ffmpeg.probe(self.in_path)\n video_info = next(\n s for s in probe['streams'] if s['codec_type'] == 'video')\n width = int(video_info['width'])\n height = int(video_info['height'])\n return width, height", "def getFrameHeight(self):\n hebs = self.getGitesForProprio()\n hebCount = len(hebs)\n height = 135\n if hebCount > 1:\n baseHeight = 100\n height = baseHeight + (hebCount * 55)\n return height", "def _get_page_size(self):\n width_entry = (self.document_tree.xpath(\"//body/doc/page/@width\"))\n height_entry = (self.document_tree.xpath(\"//body/doc/page/@height\"))\n try:\n width, height = float(width_entry[0]), float(height_entry[0])\n except KeyError:\n width, height = -1, -1\n return width, height", "def get_my_latest_height(self, user):\n body_physique = self.get_user_latest_body_physique(self.request.user)\n if body_physique:\n return body_physique.height_in_centimeters\n return None", "def fetchLatLonHeight(self):\n if 'Config' not in self.tables:\n print('Cannot access Config table to retrieve site parameters; using sims.utils.Site instead.')\n site = Site(name='LSST')\n lat = site.latitude_rad\n lon = site.longitude_rad\n height = site.elev\n else:\n table = self.tables['Config']\n lat = table.query_columns_Array(colnames=['paramValue'],\n constraint=\"paramName = 'latitude'\")\n lat = float(lat['paramValue'][0])\n lon = table.query_columns_Array(colnames=['paramValue'],\n constraint=\"paramName = 'longitude'\")\n lon = float(lon['paramValue'][0])\n height = table.query_columns_Array(colnames=['paramValue'],\n constraint=\"paramName = 'height'\")\n height = float(height['paramValue'][0])\n return lat, lon, height", "def screen_metrics():\r\n ctypes.windll.user32.SetProcessDPIAware()\r\n user32 = ctypes.windll.user32\r\n width = int(user32.GetSystemMetrics(0))\r\n height = int(user32.GetSystemMetrics(1))\r\n wto1920 = width/1920\r\n hto1080 = height/1080\r\n metrics = [width, height, wto1920, hto1080]\r\n return metrics", "def requested_url_scaled_calculation(self):\n\n totalLinks = 0\n externalLinks = 0\n\n m = []\n\n for p in self.soup.find_all(\"img\"):\n if p.has_attr(\"src\") and \"http\" in p.get(\"src\")[:4]:\n m.append(p.get('src'))\n\n for p in self.soup.find_all(\"video\"):\n for q in p.find_all(\"source\"):\n if q.has_attr(\"src\") and \"http\" in q.get(\"src\")[:4]:\n m.append(q.get('src'))\n\n for p in self.soup.find_all(\"audio\"):\n for q in p.find_all(\"source\"):\n if q.has_attr(\"src\") and \"http\" in q.get(\"src\")[:4]:\n m.append(q.get('src'))\n\n for link in m:\n if self.domain not in link:\n externalLinks += 1\n totalLinks += 1\n\n if totalLinks != 0:\n percentage = externalLinks / totalLinks\n\n self.requestedScaledWeight = percentage\n else:\n self.requestedScaledWeight = 0", "def video_count(self) ->int:\n return int(self._statistics.get('videoCount'))", "def get_talks_gt_one_hour(videos):\n return [video for video in videos if \"H\" in video.duration]", "def fetch_goalies(link):\n url = '{0}{1}'.format(NHL_API_URL_BASE, link)\n response = requests.get(url)\n stuff = response.json()\n try:\n home_goalie = stuff['liveData']['boxscore']['teams']['home']['goalies']\n away_goalie = stuff['liveData']['boxscore']['teams']['away']['goalies']\n except requests.exceptions.RequestException:\n print(\"Error encountered getting live stats\")\n return home_goalie, away_goalie", "def maxHeightOfGround(self, vertices=[\"nearMiddle\"]):\n\t\th = []\n\t\tfor v in vertices:\n\t\t\th.append(self.dots[v].heightFromGround)\n\t\th.sort(reverse=True)\n\t\treturn h[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train Kalman Filter Decoder
def fit(self,X_kf_train,y_train): #First we'll rename and reformat the variables to be in a more standard kalman filter nomenclature (specifically that from Wu et al, 2003): #xs are the state (here, the variable we're predicting, i.e. y_train) #zs are the observed variable (neural data here, i.e. X_kf_train) X=np.matrix(y_train.T) Z=np.matrix(X_kf_train.T) #number of time bins nt=X.shape[1] #Calculate the transition matrix (from x_t to x_t+1) using least-squares, and compute its covariance #In our case, this is the transition from one kinematic state to the next X2 = X[:,1:] X1 = X[:,0:nt-1] A=X2*X1.T*inv(X1*X1.T) #Transition matrix W=(X2-A*X1)*(X2-A*X1).T/(nt-1)/self.C #Covariance of transition matrix. Note we divide by nt-1 since only nt-1 points were used in the computation (that's the length of X1 and X2). We also introduce the extra parameter C here. #Calculate the measurement matrix (from x_t to z_t) using least-squares, and compute its covariance #In our case, this is the transformation from kinematics to spikes H = Z*X.T*(inv(X*X.T)) #Measurement matrix Q = ((Z - H*X)*((Z - H*X).T)) / nt #Covariance of measurement matrix params=[A,W,H,Q] self.model=params
[ "def __generate_kalman_data(self):\n if self.print_kf_progress:\n print('Generating inputs...')\n self.kf_timestamps, initial_state, initial_covariance, transition_matrices, transition_covariances, observation_matrices, observation_covariances, self.kf_measurements = self.__generate_kalman_input()\n\n g = 9.81\n self.kf = KalmanFilter(\n n_dim_state = 9,\n n_dim_obs = 9,\n initial_state_mean = initial_state,\n initial_state_covariance = initial_covariance,\n transition_matrices = transition_matrices,\n transition_covariance = transition_covariances,\n observation_matrices = observation_matrices,\n observation_covariance = observation_covariances,\n transition_offsets=np.zeros(9),\n observation_offsets=np.array([0, 0, 0, 0, 0, 0, 0, 0, g]),\n )\n\n if self.print_kf_progress:\n print('Running EM algorithm...')\n\n # TODO Not sure if we should use that\n #self.kf = self.kf.em(self.kf_measurements, n_iter=5, em_vars=['transition_covariance', 'observation_covariance'])#, 'initial_state_mean', 'initial_state_covariance'])", "def autoencoder_feedforward(theta, visible_size, hidden_size, data):\n\n ### YOUR CODE HERE ###\n # theta is an array with order [{W(1)}, {W(2)}, {b(1)}, {b(2)}]\n # in W, ROWS INDICATE \"TO\" NODES AND COLUMNS INDICATE \"FROM\" NODES\n # Pull values from theta vector and reshape:\n W1 = theta[0:(hidden_size * visible_size)]\n W1 = numpy.reshape(W1, (hidden_size, visible_size))\n \n W2 = theta[(hidden_size * visible_size):((hidden_size * visible_size) + (visible_size * hidden_size))]\n W2 = numpy.reshape(W2, (visible_size, hidden_size))\n \n b1 = theta[((hidden_size * visible_size) + (visible_size * hidden_size)):(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size)]\n b2 = theta[(((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size) : (((hidden_size * visible_size) + (visible_size * hidden_size)) + hidden_size + visible_size)]\n \n ##########################################################################################################################################\n # FEED FORWARD/FORWARD PROPOGATION:\n # in W, ROWS INDICATE \"TO\" NODES (i) AND COLUMNS INDICATE \"FROM\" NODES (j)\n # Activations at layer 1 = inputs, i.e., aSup1 = x\n # Number of neurons = number of input data points (pixels), e.g. 784, which we can also say is the visible size?\n \n # In the sequel, we also let z^{(l)}_i denote the total weighted sum of inputs to unit i in layer l, including the bias term (e.g., \\textstyle z_i^{(2)} = \\sum_{j=1}^n W^{(1)}_{ij} x_j + b^{(1)}_i), so that a^{(l)}_i = f(z^{(l)}_i).\n # http://ufldl.stanford.edu/wiki/index.php/Neural_Networks\n \n # Number of training points\n m = data.shape[1]\n \n # note that activations at the first layer are equal to the input data:\n # a_i^{(1)} = x_i\n # Compute z values at second layer\n # zSup2 (i.e., z^{(2)}) is the matrix of z values at layer 2\n # zSup2 = W^{(1)} x + b^{(1)}\n zSup2 = W1.dot(data) + numpy.tile(b1, (m, 1)).transpose()\n \n # Compute activations at second layer by mapping z^{(2)} to sigmoid(z^{(2)})\n aSup2 = sigmoid(zSup2)\n \n #Compute z at third layer, z^{(3)}\n zSup3 = W2.dot(aSup2) + numpy.tile(b2, (m, 1)).transpose()\n # z at third layer is the total weighted sum of inputs to unit i in layer 3,\n # hypothesis = activation at the third layer: hypothesis = f(z^{(3)})\n output_activations = sigmoid(zSup3)\n \n return output_activations", "def kperceptron_train(X, y, E, K):\n \n # Initialize dual parameters alpha with zero's.\n alpha = # YOUR CODE HERE\n \n for e in range(E):\n # YOUR CODE HERE\n \n \n \n \n \n return alpha", "def train(self, features):", "def train_conv_net(datasets,\n U,\n word_idx_map,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=11, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True,\n pi_params=[1.,0],\n C=1.0,\n patience=20): \n rng = np.random.RandomState(3435)\n # 其实为句子的长度sent_len\n img_h = len(datasets[0][0])-1\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs:\n filter_shapes.append((feature_maps, 1, filter_h, filter_w))\n # 在img_h×img_w大小的图片上进行s=1,f=f_h×f_w的卷积操作时,\n # 所得的卷积结果图大小为(img_h-f_h+1)×(img_w-f_w+1)\n # 然后经过大小为(img_h-f_h+1)×(img_w-f_w+1)的池化层后,就只剩下一个“点”了\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1))\n # [('image shape', 61, 300), ('filter shape', [(100, 1, 3, 300), (100, 1, 4, 300), (100, 1, 5, 300)]),\n # ('hidden_units', [100, 2]), ('dropout', [0.4]), ('batch_size', 50), ('non_static', True),\n # ('learn_decay', 0.95), ('conv_non_linear', 'relu'), ('non_static', True), ('sqr_norm_lim', 9),\n # ('shuffle_batch', True), ('pi_params', [0.95, 0]), ('C', 6.0)]\n parameters = [(\"image shape\",img_h,img_w), (\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\",dropout_rate), (\"batch_size\",batch_size), (\"non_static\",non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\",conv_non_linear), (\"non_static\",non_static),\n (\"sqr_norm_lim\",sqr_norm_lim), (\"shuffle_batch\",shuffle_batch), (\"pi_params\",pi_params),\n (\"C\",C)]\n print(parameters) \n \n #define model architecture\n index = T.lscalar()\n # shape=([sent_sum|batch_size], [sent_len|img_h]): 即共有sent_sum句话,每句由sent_len个单词的id组成\n x = T.matrix('x')\n # shape=(sent_sum, 1) \n y = T.ivector('y')\n # shape=(vocal_size, word_size)\n Words = theano.shared(value = U, name = \"Words\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(img_w)\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))], allow_input_downcast=True)\n # x.flatten(): 将 x 按行展开\n # shape=(sent_sum,1,sent_len,word_size)\n # 对应于图像,其意思即为:共有sent_sum张图像,每张图像的通道为1且大小为sent_len×word_size\n layer0_input = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0], 1, x.shape[1], Words.shape[1]))\n conv_layers = []\n layer1_inputs = []\n # 第1层输入有filter_hs种卷积核\n for i in xrange(len(filter_hs)):\n # value=[filter_sum,filter_layer,filter_h,filter_w]\n # 即共有filter_sum个卷积核,每个卷积核的大小为word_h×word_w且层数/通道为filter_layer\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n # image_shape is actually the shape of input\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input, image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n # flatten(axis):axis>0, 即将tensor从axis维度开始的所有维度进行“坍缩”,具体如下\n # conv_layer.output: shape=(sent_sum,filter_sum)\n # layer1_input: shape=(sent_sum,filter_sum)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n # shape=(sent_sum, filter_sum*len(filter_hs)=300)\n layer1_input = T.concatenate(layer1_inputs, 1)\n hidden_units[0] = feature_maps*len(filter_hs)\n # 实际上,这里的CNN仅有两层:input-conv(-max_pool)-output\n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n # build the feature of BUT-rule\n # shape=([sent_sum|batch_size], [sent_len|img_h]): 即共有sent_sum句话,每句由sent_len个单词的id组成\n f_but = T.fmatrix('f_but')\n # shape=(batch_size,1)\n f_but_ind = T.fmatrix('f_ind') # indicators\n f_but_layer0_input = Words[T.cast(f_but.flatten(),dtype=\"int32\")].reshape((f_but.shape[0],1,f_but.shape[1],Words.shape[1]))\n f_but_pred_layers = []\n for conv_layer in conv_layers:\n # shape=(batch_size, filter_sum=filter_shape[0], 1, 1)\n # after flatten: shape=(batch_size, filter_sum)\n f_but_layer0_output = conv_layer.predict(f_but_layer0_input, batch_size)\n f_but_pred_layers.append(f_but_layer0_output.flatten(2))\n # shape=(batch_size, filter_sum*len(filter_hs)=300)\n f_but_layer1_input = T.concatenate(f_but_pred_layers, 1)\n # shape=(batch_size, class=2)\n f_but_y_pred_p = classifier.predict_p(f_but_layer1_input)\n # shape=(batch_size, label+class=1+2=3)\n f_but_full = T.concatenate([f_but_ind,f_but_y_pred_p], axis=1) # batch_size x 1 + batch_size x K\n f_but_full = theano.gradient.disconnected_grad(f_but_full)\n\n # add logic layer\n nclasses = 2\n rules = [FOL_But(nclasses, x, f_but_full)]\n rule_lambda = [1]\n new_pi = get_pi(cur_iter=0, params=pi_params)\n logic_nn = LogicNN(rng, input=x, network=classifier, rules=rules, rule_lambda=rule_lambda, pi=new_pi, C=C)\n \n # define parameters of the model and update functions using adadelta\n # list\n params_p = logic_nn.params_p\n for conv_layer in conv_layers:\n # append list\n params_p += conv_layer.params\n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params_p += [Words]\n # 公式 (2)——objective function\n cost_p = logic_nn.negative_log_likelihood(y) \n dropout_cost_p = logic_nn.dropout_negative_log_likelihood(y) \n grad_updates_p = sgd_updates_adadelta(params_p, dropout_cost_p, lr_decay, 1e-6, sqr_norm_lim)\n \n # shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate\n # extra data (at random)\n np.random.seed(3435)\n # training data\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n # shuffle both train data and features\n permutation_order = np.random.permutation(datasets[0].shape[0])\n train_set = datasets[0][permutation_order]\n extra_data = train_set[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_fea = {}\n train_fea = datasets[3]\n for k in train_fea.keys():\n train_fea_k = train_fea[k][permutation_order]\n extra_fea = train_fea_k[:extra_data_num]\n new_fea[k] = np.append(train_fea[k],extra_fea,axis=0)\n train_text = datasets[6][permutation_order]\n extra_text = train_text[:extra_data_num]\n new_text=np.append(datasets[6],extra_text,axis=0)\n else:\n new_data = datasets[0]\n new_fea = datasets[3]\n new_text = datasets[6]\n # shuffle both training data and features\n permutation_order = np.random.permutation(new_data.shape[0])\n new_data = new_data[permutation_order]\n for k in new_fea.keys():\n new_fea[k] = new_fea[k][permutation_order]\n new_text = new_text[permutation_order]\n n_batches = new_data.shape[0] / batch_size\n n_train_batches = n_batches\n train_set = new_data\n train_set_x, train_set_y = shared_dataset((train_set[:,:img_h],train_set[:,-1]))\n train_fea = new_fea\n train_fea_but_ind = train_fea['but_ind'].reshape([train_fea['but_ind'].shape[0],1])\n train_fea_but_ind = shared_fea(train_fea_but_ind)\n for k in new_fea.keys():\n if k!='but_text':\n train_fea[k] = shared_fea(new_fea[k])\n\n # val data\n if datasets[1].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[1].shape[0] % batch_size\n # shuffle both val data and features\n permutation_order = np.random.permutation(datasets[1].shape[0])\n val_set = datasets[1][permutation_order]\n extra_data = val_set[:extra_data_num]\n new_val_data=np.append(datasets[1],extra_data,axis=0)\n new_val_fea = {}\n val_fea = datasets[4]\n for k in val_fea.keys():\n val_fea_k = val_fea[k][permutation_order]\n extra_fea = val_fea_k[:extra_data_num]\n new_val_fea[k] = np.append(val_fea[k],extra_fea,axis=0)\n val_text = datasets[7][permutation_order]\n extra_text = val_text[:extra_data_num]\n new_val_text = np.append(datasets[7],extra_text,axis=0)\n else:\n new_val_data = datasets[1]\n new_val_fea = datasets[4]\n new_val_text = datasets[7]\n val_set = new_val_data\n val_set_x, val_set_y = shared_dataset((val_set[:,:img_h],val_set[:,-1]))\n n_batches = new_val_data.shape[0] / batch_size\n n_val_batches = n_batches\n val_fea = new_val_fea\n val_fea_but_ind = val_fea['but_ind'].reshape([val_fea['but_ind'].shape[0],1])\n val_fea_but_ind = shared_fea(val_fea_but_ind)\n for k in val_fea.keys():\n if k!='but_text':\n val_fea[k] = shared_fea(val_fea[k])\n\n # test data\n test_set_x = datasets[2][:,:img_h] \n test_set_y = np.asarray(datasets[2][:,-1],\"int32\")\n test_fea = datasets[5]\n test_fea_but_ind = test_fea['but_ind']\n test_fea_but_ind = test_fea_but_ind.reshape([test_fea_but_ind.shape[0],1])\n test_text = datasets[8]\n\n ### compile theano functions to get train/val/test errors\n val_model = theano.function([index], logic_nn.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size],\n f_but: val_fea['but'][index * batch_size: (index + 1) * batch_size],\n f_but_ind: val_fea_but_ind[index * batch_size: (index + 1) * batch_size,:] },\n allow_input_downcast=True,\n on_unused_input='warn')\n \n test_model = theano.function([index], logic_nn.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n f_but: train_fea['but'][index * batch_size: (index + 1) * batch_size],\n f_but_ind: train_fea_but_ind[index * batch_size: (index + 1) * batch_size,:]},\n allow_input_downcast=True,\n on_unused_input='warn')\n\n train_model = theano.function([index], cost_p, updates=grad_updates_p,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size],\n f_but: train_fea['but'][index*batch_size:(index+1)*batch_size],\n f_but_ind: train_fea_but_ind[index*batch_size:(index+1)*batch_size,:]},\n allow_input_downcast = True,\n on_unused_input='warn')\n\n ### setup testing\n test_size = test_set_x.shape[0]\n print('test size ', test_size) \n test_pred_layers = []\n test_layer0_input = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1]))\n f_but_test_pred_layers = []\n f_but_test_layer0_input = Words[T.cast(f_but.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1]))\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n f_but_test_layer0_output = conv_layer.predict(f_but_test_layer0_input, test_size)\n f_but_test_pred_layers.append(f_but_test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n f_but_test_layer1_input = T.concatenate(f_but_test_pred_layers, 1)\n f_but_test_y_pred_p = classifier.predict_p(f_but_test_layer1_input)\n f_but_test_full = T.concatenate([f_but_ind,f_but_test_y_pred_p],axis=1) # Ns x 1 + Ns x K\n\n # transform to shared variables\n test_set_x_shr, test_set_y_shr = shared_dataset((test_set_x,test_set_y))\n\n test_q_y_pred, test_p_y_pred = logic_nn.predict(test_layer1_input,\n test_set_x_shr,\n [f_but_test_full])\n test_q_error = T.mean(T.neq(test_q_y_pred, y))\n test_p_error = T.mean(T.neq(test_p_y_pred, y))\n test_model_all = theano.function([x,y,f_but,f_but_ind],\n [test_q_error, test_p_error], allow_input_downcast = True,\n on_unused_input='warn')\n \n ### start training over mini-batches\n print('... training')\n epoch = 0\n batch = 0\n best_val_q_perf = 0\n val_p_perf = 0\n val_q_perf = 0\n cost_epoch = 0 \n stop_count = 0\n while (epoch < n_epochs):\n start_time = time.time()\n epoch = epoch + 1\n # train\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n batch = batch + 1\n new_pi = get_pi(cur_iter=batch*1./n_train_batches, params=pi_params)\n logic_nn.set_pi(new_pi)\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n batch = batch + 1\n new_pi = get_pi(cur_iter=batch*1./n_train_batches, params=pi_params)\n logic_nn.set_pi(new_pi)\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n # eval\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_losses = np.array(train_losses)\n train_q_perf = 1 - np.mean(train_losses[:,0])\n train_p_perf = 1 - np.mean(train_losses[:,1])\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_losses = np.array(val_losses)\n val_q_perf = 1 - np.mean(val_losses[:,0])\n val_p_perf = 1 - np.mean(val_losses[:,1])\n print('epoch: %i, training time: %.2f secs; (q): train perf: %.4f %%, val perf: %.4f %%; (p): train perf: %.4f %%, val perf: %.4f %%' % \\\n (epoch, time.time()-start_time, train_q_perf * 100., val_q_perf*100., train_p_perf * 100., val_p_perf*100.))\n test_loss = test_model_all(test_set_x,test_set_y,test_fea['but'],test_fea_but_ind)\n test_loss = np.array(test_loss)\n test_perf = 1 - test_loss\n print('test perf: q %.4f %%, p %.4f %%' % (test_perf[0]*100., test_perf[1]*100.))\n if val_q_perf > best_val_q_perf:\n best_val_q_perf = val_q_perf\n ret_test_perf = test_perf\n stop_count = 0\n else:\n stop_count += 1\n if stop_count == patience:\n break\n return ret_test_perf", "def transition_layer(X, nb_filters, compression):\n He = K.initializers.he_normal()\n layer = K.layers.BatchNormalization()(X)\n layer = K.layers.Activation('relu')(layer)\n nb_filters = int(nb_filters * compression)\n layer = K.layers.Conv2D(filters=nb_filters, kernel_size=1, padding='same',\n kernel_initializer=He)(layer)\n layer = K.layers.AveragePooling2D(pool_size=2, padding='same')(layer)\n return layer, nb_filters", "def _prepare_for_train(self, lGraph, lGraph_vld):\n traceln('ECN Training ', self.sName)\n traceln(\"\\t- computing features on training set\")\n traceln(\"\\t\\t #nodes=%d #edges=%d \" % Graph.getNodeEdgeTotalNumber(lGraph))\n chronoOn()\n \n lX, lY = self.get_lX_lY(lGraph)\n self._computeModelCaracteristics(lX) # we discover here dynamically the number of features of nodes and edges\n # self._tNF_EF contains the number of node features and edge features\n traceln(\"\\t\\t %s\" % self._getNbFeatureAsText())\n traceln(\"\\t [%.1fs] done\\n\" % chronoOff())\n nb_class = len(lGraph[0].getLabelNameList()) #Is it better to do Y.shape ?\n traceln(\"\\t- %d classes\" % nb_class)\n \n traceln(\"\\t- retrieving or creating model...\")\n\n self.model_config['node_dim'] = self._tNF_EF[0]\n self.model_config['edge_dim'] = self._tNF_EF[1]\n self.model_config['nb_class'] = nb_class\n\n if False: \n with open ('linear_reg', 'wb') as save_file:\n pickle.dump((lX,lY), save_file, pickle.HIGHEST_PROTOCOL)\n \n #This converts the lX,lY in the format necessary for GCN Models\n gcn_graph = self.convert_lX_lY_to_GCNDataset(lX,lY,training=True)\n\n #Save the label Binarizer for prediction usage\n fd_lb =open(self.getlabelBinarizerFilename(),'wb')\n pickle.dump(self.labelBinarizer,fd_lb)\n fd_lb.close()\n\n #TODO Save the validation set too to reproduce experiments\n random.shuffle(gcn_graph)\n \n if lGraph_vld:\n gcn_graph_train = gcn_graph\n lX_vld, lY_vld = self.get_lX_lY(lGraph_vld)\n gcn_graph_val = self.convert_lX_lY_to_GCNDataset(lX_vld, lY_vld, test=True)\n del lX_vld, lY_vld\n else:\n #Get a validation set from the training set\n split_idx = max(1, int(self.model_config['ratio_train_val'] * len(gcn_graph)))\n traceln(\" - using %d train graphs as validation graphs\" % split_idx)\n gcn_graph_train = []\n gcn_graph_val = []\n gcn_graph_val.extend(gcn_graph[:split_idx])\n gcn_graph_train.extend(gcn_graph[split_idx:])\n traceln(\"%d training graphs -- %d validation graphs\"%(len(gcn_graph_train), len(gcn_graph_val)))\n self._cleanTmpCheckpointFiles()\n\n return gcn_graph_train, gcn_graph_val", "def _decoder(self, z):\n nn = fully_connected(z,50,activation_fn=tf.nn.relu)\n nn = fully_connected(nn,100,activation_fn=tf.nn.relu)\n f = fully_connected(nn,self._ndims,activation_fn=tf.nn.sigmoid)\n ####### Implementation Here ######\n return f", "def train_model(pattern, batch_size, epochs):\r\n\r\n # hyperparameters\r\n embedding_dim = 128\r\n filter_sizes = [3, 4, 5]\r\n num_filters = 512\r\n drop = 0.5\r\n\r\n #load data\r\n print('Loading data')\r\n x, y, _, vocabulary_inv = load_data(pattern)\r\n sequence_length = x.shape[1]\r\n vocabulary_size = len(vocabulary_inv)\r\n\r\n #split data into training data, testing data and validation data\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=42)\r\n x_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train, test_size=0.1, random_state=42)\r\n\r\n # save training and test data for predicting testing data and other training methods\r\n np.savetxt('splitted_data/' + pattern + 'x_train.txt', x_train)\r\n np.savetxt('splitted_data/' + pattern + 'y_train.txt', y_train)\r\n np.savetxt('splitted_data/' + pattern + 'x_test.txt', x_test)\r\n np.savetxt('splitted_data/' + pattern + 'y_test.txt', y_test)\r\n\r\n # build CNN model layer by layer\r\n print(\"Creating Model...\")\r\n inputs = Input(shape=(sequence_length,), dtype='int32')\r\n embedding = Embedding(input_dim=vocabulary_size, output_dim=embedding_dim, input_length=sequence_length)(inputs)\r\n reshape = Reshape((sequence_length,embedding_dim,1))(embedding)\r\n\r\n conv_0 = Conv2D(num_filters, kernel_size=(filter_sizes[0], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_1 = Conv2D(num_filters, kernel_size=(filter_sizes[1], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n conv_2 = Conv2D(num_filters, kernel_size=(filter_sizes[2], embedding_dim), padding='valid', kernel_initializer='normal', activation='relu')(reshape)\r\n\r\n maxpool_0 = MaxPool2D(pool_size=(sequence_length - filter_sizes[0] + 1, 1), strides=(1,1), padding='valid')(conv_0)\r\n maxpool_1 = MaxPool2D(pool_size=(sequence_length - filter_sizes[1] + 1, 1), strides=(1,1), padding='valid')(conv_1)\r\n maxpool_2 = MaxPool2D(pool_size=(sequence_length - filter_sizes[2] + 1, 1), strides=(1,1), padding='valid')(conv_2)\r\n\r\n concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])\r\n flatten = Flatten()(concatenated_tensor)\r\n dropout = Dropout(drop)(flatten)\r\n output = Dense(units=2, activation='softmax')(dropout)\r\n\r\n # create the model\r\n model = Model(inputs=inputs, outputs=output)\r\n\r\n #checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto')\r\n adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # optimizer\r\n\r\n model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # train the model by fitting the data into the model\r\n print(\"Training Model\")\r\n model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_validation, y_validation))\r\n\r\n # display the structure of model\r\n model.summary()\r\n\r\n #save the model\r\n print(\"Save model to disk\")\r\n model_json = model.to_json()\r\n with open(pattern +\"model.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n #serialize weights to HDF5\r\n model.save_weights(pattern + \"model.hdf5\")", "def kalman_filter(y, model, everything=False):\n TT, RR, C, ZZ, D, QQ, EE = [v.value for v in model.System.values()]\n\n if model.model_type == 'HANK':\n # Adjust TT to account for continuous time discretization\n TT = np.eye(TT.shape[0]) + TT * 1/model.settings['state_simulation_freq'].value\n track_lag = model.settings['track_lag'].value\n\n TT, RR, C= transform_transition_matrices(model, TT, RR, C, track_lag=track_lag)\n\n Ns = TT.shape[0] ## Num. of states\n Ny, Nt = y.shape ## Periods\n\n x_forecast = np.zeros([Ns, Nt])\n P_forecast = np.zeros([Ns, Ns, Nt])\n x_filter = np.zeros([Ns, Nt])\n P_filter = np.zeros([Ns, Ns, Nt])\n log_lik_vals = np.zeros(Nt)\n\n x_0, P_0 = initial_state(TT, RR, C, QQ)\n\n x_t, P_t = x_0, P_0\n\n for t in range(Nt):\n ## Forecast\n x_filt, P_filt = x_t, P_t\n x_t = TT @ x_filt + C\n P_t = TT @ P_filt @ TT.T + RR @ QQ @ RR.T\n x_forecast[:, t] = x_t\n P_forecast[:,:,t] = P_t\n\n ## Update\n y_obs = y[:, t]\n x_pred, P_pred = x_t, P_t\n y_pred = ZZ @ x_pred + D\n V_pred = ZZ @ P_pred @ ZZ.T + EE\n V_pred = (V_pred + V_pred.T)/2\n V_pred_inv = np.linalg.inv(V_pred)\n diff_y = y_obs - y_pred\n PZV = P_pred.T @ ZZ.T @ V_pred_inv\n x_t = x_pred + PZV @ diff_y\n P_t = P_pred - PZV @ ZZ @ P_pred\n x_filter[:, t] = x_t\n P_filter[:, :, t] = P_t\n\n log_lik_vals[t] = -(Ny * np.log(2 * np.pi) + np.log(np.linalg.det(V_pred)) \\\n + diff_y.T @ V_pred_inv @ diff_y) * 0.5\n\n if everything:\n return log_lik_vals, x_forecast, P_forecast, x_filter, P_filter, x_0, P_0, x_t, P_t\n\n return log_lik_vals", "def inception_block(A_prev, filters):\n initializer = K.initializers.he_normal(seed=None)\n\n F1 = K.layers.Conv2D(filters=filters[0],\n kernel_size=(1, 1),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(A_prev)\n\n F3R = K.layers.Conv2D(filters=filters[1],\n kernel_size=(1, 1),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(A_prev)\n\n F3 = K.layers.Conv2D(filters=filters[2],\n kernel_size=(3, 3),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(F3R)\n\n F5R = K.layers.Conv2D(filters=filters[3],\n kernel_size=(1, 1),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(A_prev)\n\n F5 = K.layers.Conv2D(filters=filters[4],\n kernel_size=(5, 5),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(F5R)\n\n MP = K.layers.MaxPool2D(pool_size=(3, 3),\n strides=(1, 1),\n padding='same')(A_prev)\n\n FPP = K.layers.Conv2D(filters=filters[5],\n kernel_size=(1, 1),\n padding='same',\n activation='relu',\n kernel_initializer=initializer,\n )(MP)\n\n output = K.layers.concatenate([F1, F3, F5, FPP], axis=-1)\n return output", "def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n kernel = \"he_normal\"\n con1 = K.layers.Conv2D(F1, (1, 1), activation='relu',\n kernel_initializer=kernel)(A_prev)\n\n con2_1 = K.layers.Conv2D(F3R, (1, 1), activation='relu',\n kernel_initializer=kernel)(A_prev)\n con2_2 = K.layers.Conv2D(F3, (3, 3), padding=\"same\", activation='relu',\n kernel_initializer=kernel)(con2_1)\n\n con3_1 = K.layers.Conv2D(F5R, (1, 1), activation='relu',\n kernel_initializer=kernel)(A_prev)\n con3_2 = K.layers.Conv2D(F5, (5, 5), padding=\"same\", activation='relu',\n kernel_initializer=kernel)(con3_1)\n\n pool4 = K.layers.MaxPool2D((3, 3), (1, 1), padding=\"same\")(A_prev)\n con4_2 = K.layers.Conv2D(FPP, (1, 1), (1, 1), activation='relu',\n kernel_initializer=kernel)(pool4)\n\n return K.layers.concatenate([con1, con2_2, con3_2, con4_2])", "def glm_kernelized (x_train, y_train, x_valid, y_valid, x_test, y_test, l=5):\n # construct a larger train dataset (do not need validation, lambda predetermined)\n x = np.vstack((x_train, x_valid))\n y = np.vstack((y_train, y_valid))\n\n # length of dataset x; N\n N = len(x)\n\n # construct gram matrix\n K = np.empty((N,N)) # N by N\n for i in range(N):\n for j in range(i+1):\n # using property of kernels, k(a,b) = k(b,a) for all a, b\n k = _k(x[i],x[j])\n K[i,j] = k\n K[j,i] = k\n \n # find cholesky\n L = np.linalg.cholesky(K+l*np.eye(N))\n\n # construct alpha vector\n z = solve_triangular(L, y, lower=True) # solve the lower triangular matrix\n a = solve_triangular(np.transpose(L),z)\n\n # find k matrix for running test predictions\n K_test = np.zeros((len(x_test),N))\n for i in range(len(x_test)):\n for j in range(N):\n K_test[i][j] = _k(x_test[i],x[j])\n \n # find the test predictions\n y_pred = np.dot(K_test,a)\n error = _rmse(y_pred,y_test)\n print(\"the test error is {}\".format(error))\n\n # plot the predicted vs actual\n _plot2(x_test, y_pred, y_test, \"predicted values\", \"actual values\", \"x values\", \"y values\", \"Mauna Loa with kernelized GLM\")\n\n # plot the kernel\n z = np.linspace(-0.1,0.1,100)\n k1 = np.empty((0,1), float)\n k2 = np.empty((0,1), float)\n for i in range(len(z)):\n k1 = np.vstack((k1,_k(0,z[i])))\n plt.ylabel(\"k\")\n plt.xlabel(\"z\")\n plt.title(\"k vs z\")\n plt.figure()\n plt.plot(z, k1)\n for i in range(len(z)):\n k2 = np.vstack((k2,_k(1,z[i]+1)))\n plt.ylabel(\"k\")\n plt.xlabel(\"z\")\n plt.title(\"k vs z\")\n plt.figure()\n plt.plot(z, k2)\n plt.show()", "def __init__(self, params, model,\n name=\"fully_connected_decoder\", mode='train'):\n super(FullyConnectedSCDecoder, self).__init__(params, model, name, mode)", "def test_kc(dm: pl.LightningDataModule) -> None:\n trainer = pl.Trainer(fast_dev_run=True)\n enc = Encoder(input_shape=(3, 64, 64), initial_hidden_channels=64, levels=3, encoding_dim=128)\n clf = EmbeddingClf(encoding_dim=128, out_dim=2)\n model = KC(\n encoder=enc,\n clf=clf,\n weight_decay=1e-8,\n lr=1e-3,\n )\n trainer.fit(model, datamodule=dm)\n trainer.test(model=model, datamodule=dm)", "def S2_build_embedding_compressor_model():\n input_layer = Input(shape=(1024,))\n x = Dense(128)(input_layer)\n x = ReLU()(x)\n model = Model(inputs=[input_layer], outputs=[x])\n return model", "def pre_train(self, X_train, y_train=None):\n print(\"Pre-training D for a couple of iterations...\", end='')\n sz = X_train.shape[0]//200\n x1 = np.concatenate([np.random.permutation(X_train)[:sz], self.G.predict(z_noise(sz))])\n self.D.fit(x1, [0]*sz + [1]*sz, batch_size=128, nb_epoch=1, verbose=0)\n print(\"done.\")", "def learn(self, Xtrain, ytrain):\n Ktrain = None\n\n ### YOUR CODE HERE\n #Creating centers and sending to kernel\n \n self.n = Xtrain[0:self.num_of_center]\n Ktrain = self.kernel(Xtrain, self.n)\n ### END YOUR CODE\n\n self.weights = np.zeros(Ktrain.shape[1],)\n\n ### YOUR CODE HERE\n for i in range(self.iteration):\n self.weights -= (self.learning_rate / (i+1)) * self.kernel_logit_cost_grad(self.weights, Ktrain, ytrain)\n #print(\"Loss: \"+str(self.kernel_logit_cost(self.weights, Ktrain, ytrain)))\n ### END YOUR CODE\n\n self.transformed = Ktrain # Don't delete this line. It's for evaluation.", "def action_trainHam(self, message):\n message.trainClean()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect to the given address.
async def connect(self, address: Tuple[str, int]): ...
[ "def connect(self, addr):\n sock = socket.socket(self.address_family, self.socket_type)\n sock.connect(addr)\n if VERBOSE: print \"Connected to \" + str(addr)\n self.sock = sock", "def connect(addr):\n port = 1\n s = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n s.connect((addr, port))\n\n return(s)", "def connect():\n print(\"Connect to...\")\n address = input(\"address (ip): \")\n if address == \"\":\n address = \"localhost\"\n try:\n port = int(input(\"port: \"))\n connect_to = \"tcp://{}:{}\".format(address, port)\n except:\n print(\"## Error! Should look like '192.168.0.1 5556'\")\n return\n\n start_listener(connect_to)", "def connect(self):\n self.socket.connect((self.host, self.port))\n print(\"Conectado a \" + self.host + \" con el puerto \"+str(self.port)+\"\\n\")", "def connect(self, address=None, suppress_exceptions=False):\n\n if address is not None:\n self.address = address\n\n if self.address is None:\n return False\n\n #self.close()\n\n try:\n self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\n self.sock.connect((self.address, self.port))\n return True\n except bluetooth.BluetoothError as error:\n if str(error) == \"(111, 'Connection refused')\":\n #self.pin = str(input(\"Please enter a pin for the device: \"))\n print(\"Connection refused, has the device been paired?\")\n if self.sock is not None:\n self.sock.close()\n self.sock = None\n if suppress_exceptions:\n print(error)\n return False\n else:\n print(error)\n # raise BluetoothException(error.message)\n return False", "def connect_to_node(self, addr, first_message='hello'):\n log.info('Trying to connect: %s', addr)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(addr)\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\n self.selector.register(sock, events)\n\n conn = Connection(sock, known=True)\n self.connections[addr] = conn\n self.send(identifier(*addr), {\n 'do': first_message,\n 'listen_addr': self.listen_addr,\n })\n return addr\n except (ConnectionRefusedError, ConnectionAbortedError, TimeoutError) as error:\n log.info('Could not establish connection to %s: %s' % (addr, error))\n return None", "def connect(self, mac_address):\r\n try:\r\n out = self.get_output(\"connect \" + mac_address, 2)\r\n except BluetoothctlError, e:\r\n print(e)\r\n return None\r\n else:\r\n res = self.child.expect([\"Failed to connect\", \"Connection successful\", pexpect.EOF])\r\n success = True if res == 1 else False\r\n return success", "def connect(self):\n\n self.socket.connect(\n (self.server_address, self.server_port)\n )", "def connect(cls, host, port):\n return cls(socket.create_connection((host, port)))", "def connect(self, peers_address: Dict):\n pass", "def connect_telnet(self, ip_address, user='micro', pwd='python'):\n qprint(\"Connecting via telnet to '{}' ...\".format(ip_address))\n b = Board(self.config)\n b.connect_telnet(ip_address, user, pwd)\n self._boards.append(b)\n if not self._default_board: self._default_board = b", "def connect():\n global adb_socket\n if adb_socket is not None:\n raise RuntimeError('connection already existed')\n\n host, port = config.HOST, config.PORT\n\n connection = socket.socket()\n try:\n connection.connect((host, port))\n except ConnectionError as _:\n warn_msg = 'failed when connecting to adb server: {}:{}, retrying ...'.format(host, port)\n warnings.warn(warn_msg)\n reboot_adb_server()\n connect()\n return\n\n adb_socket = connection", "def connect(self):\n\t\tself.stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.stream.connect((self.host, self.port))\n\t\t# timeout after 5 seconds\n\t\tself.stream.settimeout(5)", "def connect(self):\n self.receiver_socket.bind(self.receiver_address)", "def connect(self, host):\n if not self.app.connect(host):\n command = 'Connect({0})'.format(host).encode(\"utf-8\")\n self.exec_command(command)\n self.last_host = host", "def connect(**kwargs):\n\n global _controller\n\n address = kwargs['address'] if 'address' in kwargs else '127.0.0.1'\n proxy_port = kwargs['proxy_port'] if 'proxy_port' in kwargs else 9050\n ctrl_port = kwargs['ctrl_port'] if 'ctrl_port' in kwargs else 9051\n\n # connect to controller\n try:\n\n _controller = stem.control.Controller.from_port(\n address=address,\n port=ctrl_port\n )\n\n _controller.authenticate()\n\n except Exception as ex:\n\n raise ConnectionErrorException(\n \"failed to connect to control port; %s\" % str(ex)\n )\n\n # connect to proxy\n try:\n\n socks.setdefaultproxy(\n socks.PROXY_TYPE_SOCKS5,\n address,\n proxy_port,\n True\n )\n\n socket.socket = socks.socksocket\n\n except urllib.error.URLError as ex:\n\n raise ConnectionErrorException(\n \"failed to connect to proxy; %s\" % str(ex)\n )", "def connect(cls, uds_address='/var/run/wmediumd.sock'):\n # type: (str) -> None\n if cls.connected:\n raise Exception(\"Already connected to wmediumd server\")\n cls.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n print '*** Connecting to wmediumd server %s' % uds_address\n cls.sock.connect(uds_address)\n cls.connected = True", "def open(self):\n self.socket.connect(self.addr)\n logger.info(\"%s socket connected to %s\", self.name, self.addr)", "def connect(self, peer_address, connection_params=None) -> PeripheralConnectionWaitable:\r\n if peer_address in self.connected_peripherals.keys():\r\n raise exceptions.InvalidStateException(\"Already connected to {}\".format(peer_address))\r\n if self.connecting_peripheral is not None:\r\n raise exceptions.InvalidStateException(\"Cannot initiate a new connection while connecting to another\")\r\n\r\n # Try finding the peer's name in the scan report\r\n name = \"\"\r\n scan_report = self.scanner.scan_report.get_report_for_peer(peer_address)\r\n if scan_report:\r\n name = scan_report.advertise_data.local_name\r\n\r\n if not connection_params:\r\n connection_params = self._default_conn_params\r\n\r\n self.connecting_peripheral = peer.Peripheral(self, peer_address, connection_params, self._default_security_params, name,\r\n self._default_conn_config.write_cmd_tx_queue_size)\r\n periph_connection_waitable = PeripheralConnectionWaitable(self, self.connecting_peripheral)\r\n self.ble_driver.ble_gap_connect(peer_address, conn_params=connection_params,\r\n conn_cfg_tag=self._default_conn_config.conn_tag)\r\n return periph_connection_waitable" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform any handshake if needed. Can be a noop for less complicated protocols.
async def handshake(self) -> None: ...
[ "def handshake(self):\r\n self.stream.write_uchar(3)\r\n c1 = packet.Handshake()\r\n c1.first = 0\r\n c1.second = 0\r\n c1.payload = self.create_random_bytes(1528)\r\n c1.encode(self.stream)\r\n self.stream.flush()\r\n\r\n self.stream.read_uchar()\r\n s1 = packet.Handshake()\r\n s1.decode(self.stream)\r\n\r\n c2 = packet.Handshake()\r\n c2.first = s1.first\r\n c2.second = s1.second\r\n c2.payload = s1.payload\r\n c2.encode(self.stream)\r\n self.stream.flush()\r\n\r\n s2 = packet.Handshake()\r\n s2.decode(self.stream)", "def test_handshake(self):\n cli, svr, p = connectedServerAndClient(\n ServerClass=SecurableProto,\n ClientClass=SecurableProto)\n\n okc = OKCert()\n svr.certFactory = lambda : okc\n\n cli.callRemote(\n amp.StartTLS, tls_localCertificate=okc,\n tls_verifyAuthorities=[PretendRemoteCertificateAuthority()])\n\n # let's buffer something to be delivered securely\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n # once for client once for server\n self.assertEqual(okc.verifyCount, 2)\n L = []\n cli.callRemote(SecuredPing).addCallback(L.append)\n p.flush()\n self.assertEqual(L[0], {'pinged': True})", "def do_extra_handshake(self, request):\n pass # always accept # TODO: do extra login validation?", "def test_protocol_sslv3(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)\n if no_sslv2_implies_sslv3_hello():\n # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs\n try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,\n False, client_options=ssl.OP_NO_SSLv2)", "def _validate_handshake(self, payload: Dict[str, Any]) -> None:\n self.reply(\"HELLO\", payload)\n\n ok = next(self.get_message())\n if ok != \"OK\":\n if ok.startswith(\"ERR\") and \"invalid password\" in ok.lower():\n self.socket.close()\n raise FaktoryAuthenticationError(\n \"Could not connect to Faktory; wrong password\"\n )\n self.socket.close()\n raise FaktoryHandshakeError(\n \"Could not connect to Faktory; expected OK from server, but got '{}'\".format(\n ok\n )\n )\n\n self.log.debug(\"Connected to Faktory\")", "def do_renegotiate(self):\r\n # type: () -> None\r\n if not self._is_handshake_completed:\r\n raise IOError('SSL Handshake was not completed; cannot renegotiate.')\r\n\r\n self._ssl.renegotiate()\r\n self.do_handshake()", "def test_protocol_sslv2(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)\n # SSLv23 client with specific SSL options\n if no_sslv2_implies_sslv3_hello():\n # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv2)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1)", "def main():\n handshake()\n send_file()\n teardown_connection()", "def ssl():\n pass", "def handshake():\n global CONNECTION_STATE\n initial_pkt = packet(\"SYN\", 0, 0)\n send_syn(initial_pkt)\n CONNECTION_STATE = \"ESTABLISHED\"\n print(\"HANDSHAKE COMPLETE\")", "def test_protocol_sslv23(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try:\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)\n except OSError as x:\n # this fails on some older versions of OpenSSL (0.9.7l, for instance)\n if support.verbose:\n sys.stdout.write(\n \" SSL2 client to SSL23 server test unexpectedly failed:\\n %s\\n\"\n % str(x))\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')\n\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)\n\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)\n\n # Server with specific SSL options\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,\n server_options=ssl.OP_NO_SSLv3)\n # Will choose TLSv1\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,\n server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,\n server_options=ssl.OP_NO_TLSv1)", "def sock_connect_tls(self, connect):\n\n path = HTTPRequest(connect).path.split(\":\")\n self.sever_address = (path[0], int(path[1]))\n\n self.sock.connect(self.sever_address)\n self.sock = ssl.wrap_socket(\n self.sock,\n keyfile=None,\n certfile=None,\n server_side=False,\n cert_reqs=ssl.CERT_NONE,\n ssl_version=ssl.PROTOCOL_SSLv23,\n )", "def test_protocol_tlsv1(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1)", "def perform_handshake(self):\n # TODO throw an exception if we just get a shitload of 0x7. Why does\n # this box suck so much.\n\n # Handshake. Do it twice, just 'cause.\n for i in range(2):\n self._send_internal([0x0])\n check = self._receive(1)[0]\n if check != 0x7:\n raise ButtshockError(\"Handshake received 0x%.02x, expected 0x07!\" % (check))\n\n # Send our chosen key over\n #\n # chosen by fair dice roll (oh fuck it no one cares about your xkcd\n # joke it's just 0)\n self._send_check([0x2f, 0x00])\n key_info = self._receive_check(3)\n if key_info[0] != 0x21:\n raise ButtshockError(\"Handshake received 0x%.02x, expected 0x21!\" % (key_info[0]))\n\n # Generate final key here. It's usually 0x55 ^ our_key ^ their_key, but\n # since our key is 0, we can shorten it to 0x55 ^ their_key\n self.key = 0x55 ^ key_info[1]", "def start_with_handshake(self) -> bool:\n timeout = int(datetime.datetime.now().timestamp()) + self.network.config.handshake_timeout\n nonce = self.network.instance_id\n handshake = Handshake.from_config(self.network.config, nonce)\n\n # TODO: gob encoder and decoder... https://github.com/mgeisler/pygob\n # TODO: send out the handshake and get the response\n return False", "def wrap_socket(sock, keyfile=None, certfile=None, server_side=False,\n cert_reqs=CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1,\n ca_certs=None, do_handshake_on_connect=True,\n suppress_ragged_eofs=True, server_hostname=None,\n timeout=None):\n cert_reqs = _openssl_cert_reqs[cert_reqs]\n ssl_version = _openssl_versions[ssl_version]\n\n ctx = OpenSSL.SSL.Context(ssl_version)\n\n if certfile:\n ctx.use_certificate_file(certfile)\n\n if keyfile:\n ctx.use_privatekey_file(keyfile)\n\n if cert_reqs != OpenSSL.SSL.VERIFY_NONE:\n ctx.set_verify(cert_reqs, lambda a, b, err_no, c, d: err_no == 0)\n\n if ca_certs:\n try:\n ctx.load_verify_locations(ca_certs, None)\n except OpenSSL.SSL.Error, e:\n raise ssl.SSLError('Bad ca_certs: %r' % ca_certs, e)\n\n cnx = OpenSSL.SSL.Connection(ctx, sock)\n\n # SNI support\n if server_hostname is not None:\n cnx.set_tlsext_host_name(server_hostname)\n\n cnx.set_connect_state()\n\n # SSL connection timeout doesn't work #7989 , so I'm not able to call:\n # ctx.set_timeout(timeout)\n #\n # The workaround I found was to use select.select and non-blocking sockets\n #\n # https://github.com/andresriancho/w3af/issues/7989\n sock.setblocking(0)\n sock.settimeout(timeout)\n time_begin = time.time()\n\n while True:\n try:\n cnx.do_handshake()\n break\n except OpenSSL.SSL.WantReadError:\n in_fds, out_fds, err_fds = select.select([sock, ], [], [], timeout)\n if len(in_fds) == 0:\n raise ssl.SSLError('do_handshake timed out')\n else:\n conn_time = int(time.time() - time_begin)\n if conn_time > timeout:\n raise ssl.SSLError('do_handshake timed out')\n else:\n pass\n except OpenSSL.SSL.SysCallError as e:\n raise ssl.SSLError(e.args)\n\n sock.setblocking(1)\n return SSLSocket(cnx, sock)", "def handshake(self, origins=None, subprotocols=None, extra_headers=None):\n # Read handshake request.\n try:\n path, headers = yield from read_request(self.reader)\n except Exception as exc:\n raise InvalidHandshake(\"Malformed HTTP message\") from exc\n\n self.request_headers = headers\n self.raw_request_headers = list(headers.raw_items())\n\n get_header = lambda k: headers.get(k, '')\n key = check_request(get_header)\n\n if origins is not None:\n origin = get_header('Origin')\n if not set(origin.split() or ['']) <= set(origins):\n raise InvalidOrigin(\"Origin not allowed: {}\".format(origin))\n\n if subprotocols is not None:\n protocol = get_header('Sec-WebSocket-Protocol')\n if protocol:\n client_subprotocols = [p.strip() for p in protocol.split(',')]\n self.subprotocol = self.select_subprotocol(\n client_subprotocols, subprotocols)\n\n headers = []\n set_header = lambda k, v: headers.append((k, v))\n set_header('Server', USER_AGENT)\n if self.subprotocol:\n set_header('Sec-WebSocket-Protocol', self.subprotocol)\n if extra_headers is not None:\n if callable(extra_headers):\n extra_headers = extra_headers(path, self.raw_request_headers)\n if isinstance(extra_headers, collections.abc.Mapping):\n extra_headers = extra_headers.items()\n for name, value in extra_headers:\n set_header(name, value)\n build_response(set_header, key)\n\n self.response_headers = email.message.Message()\n for name, value in headers:\n self.response_headers[name] = value\n self.raw_response_headers = headers\n\n # Send handshake response. Since the status line and headers only\n # contain ASCII characters, we can keep this simple.\n response = ['HTTP/1.1 101 Switching Protocols']\n response.extend('{}: {}'.format(k, v) for k, v in headers)\n response.append('\\r\\n')\n response = '\\r\\n'.join(response).encode()\n self.writer.write(response)\n\n assert self.state == CONNECTING\n self.state = OPEN\n self.opening_handshake.set_result(True)\n\n return path", "def handshakeClientUnknown(self, srpCallback=None, certCallback=None,\n session=None, settings=None, checker=None,\n async=False):\n handshaker = self._handshakeClientAsync(unknownParams=(srpCallback,\n certCallback), session=session, settings=settings,\n checker=checker)\n if async:\n return handshaker\n for result in handshaker:\n pass", "def test_protocol_tlsv1_1(self):\n if support.verbose:\n sys.stdout.write(\"\\n\")\n try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')\n if hasattr(ssl, 'PROTOCOL_SSLv2'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)\n if hasattr(ssl, 'PROTOCOL_SSLv3'):\n try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)\n try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,\n client_options=ssl.OP_NO_TLSv1_1)\n\n try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')\n try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)\n try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\param combobox gtk.ComboBox instance or gtk.ComboBoxEntry, if second then use_completion can be used \param answers list of tuples (value, string), string will be displayed in combobox, value will be returned by \ref get_value \param none_answer value for returning if empty item is selected \param checkbutton gtk.ToggleButton instance
def __init__(self, combobox, answers = None, none_answer = None, checkbutton = None, use_completion = True): self.checkbutton = checkbutton self.combobox = combobox self.none_answer = none_answer if not (use_completion and isinstance(combobox.get_child(), gtk.Entry)): c = gtk.CellRendererText() self.combobox.pack_start(c) self.combobox.add_attribute(c, "text", 1) self.use_completion = use_completion if answers != None: self.update_answers(answers, none_answer)
[ "def update_answers(self, answers, none_answer = None):\n if answers == None:\n return\n if len(answers) == 0:\n m = gtk.ListStore(int, str)\n self.combobox.set_model(m)\n if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry):\n self.combobox.get_child().set_completion(None)\n return\n self.none_answer = none_answer\n val = self.get_value()\n m = gtk.ListStore(isinstance(answers[0][0], basestring) and str or type(answers[0][0]), str)\n for a in answers:\n m.append(a)\n if none_answer != None:\n m.append((none_answer, \"\"))\n self.combobox.set_model(m)\n if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry):\n ent = self.combobox.get_child()\n completion = gtk.EntryCompletion()\n completion.set_model(m)\n completion.set_text_column(1)\n completion.set_inline_completion(True)\n ent.set_completion(completion)\n self.combobox.set_entry_text_column(1)\n if val != None:\n fnd = find_in_list(lambda a: a[0] == val, answers)\n if fnd != None:\n self.combobox.set_active(fnd)", "async def get_multichoice_answer(client, ctx, choices: dict, question: str, timeout: int = 120,\n check: Optional[Callable[..., bool]] = None):\n def check_msg(m):\n if m.author.id != ctx.author.id:\n return False\n if m.channel != ctx.channel:\n return False\n\n return m.content == 'c' or (min_choice <= representsInt(m.content) <= max_choice)\n\n value_type = set(type(k) for k in choices.keys())\n if value_type != {int}:\n raise ValueError('The keys of choices must be integer.')\n\n max_choice = max(choices, key=int)\n min_choice = min(choices, key=int)\n if min_choice < 0:\n raise ValueError('Min value in choices dict could not be smaller than 0')\n\n question_msg = await ctx.channel.send(question)\n\n try:\n # await self.bot.loop.create_task(self.bot.wait_for('message', check=check, timeout=60))\n choice = await client.wait_for(\"message\", check=(check_msg if check is None else check), timeout=timeout)\n except asyncio.TimeoutError as err:\n raise err\n except commands.UserInputError as err:\n raise err\n\n choice_int = representsInt(choice.content)\n # if the result of choice after casting to int is smaller than zero,\n # user has been provided c meaning to cancel the command\n return None if choice_int < 0 else choices[choice_int], (choice, question_msg)", "def CB_checkbox(self):\n\n obj = self.sender()\n if obj == self.gvars.gbox_align.checkBox_align_centers:\n if obj.isChecked(): self.set_uvar('align_centers', 'yes')\n else: self.set_uvar('align_centers', 'no')\n elif obj == self.gvars.gbox_other.checkBox_giant_move:\n if obj.isChecked(): self.set_uvar('giant_move', 'yes')\n else: self.set_uvar('giant_move', 'no')\n elif obj == self.gvars.gbox_other.checkBox_add_edge:\n if obj.isChecked(): self.set_uvar('add_edge', 'yes')\n else: self.set_uvar('add_edge', 'no')\n elif obj == self.gvars.gbox_other.checkBox_anat_has_skull:\n if obj.isChecked(): self.set_uvar('anat_has_skull', 'yes')\n else: self.set_uvar('anat_has_skull', 'no')\n\n else: print \"** CB_checkbox: unknown sender\"", "def check_buttons(self):\n\n check_options = []\n for var in self.__info.get_keys():\n\n # \"mode\" settings will get handled as radio buttons.\n if var != \"mode\":\n check_options.append(var)\n\n # Check buttons need a variable to work properly so we'll save them\n # into a list.\n self.__var_list = []\n for i in range(len(check_options)):\n\n variable = StringVar()\n self.__var_list.append(variable)\n\n # Using lambda function, each time the check button is pressed\n # it will trigger the configure_check function\n cb = Checkbutton(self.__group_options,\n text=check_options[i].capitalize(),\n variable=variable,\n onvalue=\"yes\",\n offvalue=\"no\",\n command=lambda x=check_options[i],i=i:\n self.configure_check(text=x,index=i))\n\n cb.pack(side=TOP, pady=5, anchor=\"w\")\n\n if self.__settings[check_options[i]] == \"yes\":\n cb.select()\n else:\n cb.deselect()", "def test_unhashable_choice_data():\n combo = widgets.ComboBox()\n assert not combo.choices\n combo.choices = (\"a\", \"b\", \"c\")\n assert combo.choices == (\"a\", \"b\", \"c\")\n combo.choices = ((\"a\", [1, 2, 3]), (\"b\", [1, 2, 5]))\n assert combo.choices == ([1, 2, 3], [1, 2, 5])\n combo.choices = (\"x\", \"y\", \"z\")\n assert combo.choices == (\"x\", \"y\", \"z\")\n combo.close()", "def ContainerCombobox(master, labelText, optionList) -> StringVar:\n containerCombobox = Frame(master)\n containerCombobox.pack()\n options = optionList\n _paymentLabel = Label(containerCombobox,text=labelText)\n _paymentLabel.pack(side=LEFT,padx=30)\n _payment = StringVar(master)\n _payment.set(options[0])\n _paymentChooser = ttk.Combobox(containerCombobox,state='readonly',values=options,textvariable=_payment)\n _paymentChooser.pack(side=RIGHT)\n return _payment", "def on_booklist_ok_clicked(self, obj):\n store, the_iter = self.blist.get_selected()\n if the_iter:\n data = self.blist.get_data(the_iter, [0])\n self.selection = self.booklist.get_book(cuni(data[0]))\n if self.dosave:\n self.booklist.save()", "def test1_checkboxes(self):\n dataSets = [\n ('chkVowelLength', \"CompareVowelLength\",\n \"\\u0905 \\u0906\"),\n ('chkVowelGlides', \"CompareVowelGlides\",\n \"\\u0905 \\u0910 \\u0914\"),\n ('chkNasals', \"CompareNasals\",\n \"\\u0919 \\u091e \\u0923 \\u0928 \\u092e\"),\n ('chkAspiration', \"CompareAspiration\",\n \"\\u0915 \\u0916\"),\n ('chkPOA', \"ComparePOA\",\n \"\\u0915 \\u0916 \\u0917 \\u0918\"),\n ('chkGeminates', \"CompareGeminates\",\n \"\\u0915 \\u0915\\u094d\\u0915\"),\n ('chkLiquids', \"CompareLiquids\",\n \"\\u0930 \\u0932\"),\n ]\n\n def useDialog(innerSelf):\n for dataSet in dataSets:\n ctrlname, varname, firstline_expected = dataSet\n innerSelf.dlgCtrls.comboScript.setText(\"DEVANAGARI\")\n for chk in innerSelf.dlgCtrls.checkboxVarList:\n chk.ctrl.setState(\n chk.ctrl.getModel().Name == ctrlname)\n innerSelf.getFormResults()\n for chk in innerSelf.dlgCtrls.checkboxVarList:\n self.assertEqual(\n chk.varname == varname,\n bool(innerSelf.userVars.getInt(chk.varname)),\n msg=repr([chk.varname, varname]))\n innerSelf.updateCharCompOpts()\n ctrl = innerSelf.dlgCtrls.txtCharComp\n firstline = ctrl.getText().splitlines()[0]\n self.assertEqual(\n firstline, firstline_expected,\n msg=repr([ctrlname, firstline]))\n innerSelf.evtHandler.actionPerformed(MyActionEvent(\"Close\"))\n\n self.runDlg(useDialog)", "def test_combobox_texts(self):\n # The ComboBox on the sample app has following items:\n # 0. Combo Item 1\n # 1. Combo Item 2\n ref_texts = ['Combo Item 1', 'Combo Item 2']\n\n combo_box = self.dlg.ComboBox.find()\n self.assertEqual(combo_box.item_count(), len(ref_texts))\n for t in combo_box.texts():\n self.assertEqual((t in ref_texts), True)\n\n # Mock a 0 pointer to COM element\n combo_box.iface_item_container.FindItemByProperty = mock.Mock(return_value=0)\n self.assertEqual(combo_box.texts(), ref_texts)\n\n # Mock a combobox without \"ItemContainer\" pattern\n combo_box.iface_item_container.FindItemByProperty = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError())\n self.assertEqual(combo_box.texts(), ref_texts)\n\n # Mock a combobox without \"ExpandCollapse\" pattern\n # Expect empty texts\n combo_box.iface_expand_collapse.Expand = mock.Mock(side_effect=uia_defs.NoPatternInterfaceError())\n self.assertEqual(combo_box.texts(), [])", "def OKbutton_click(self):\n win = self.popup\n r = win.showModel()\n\n if r:\n for domain in self.domains:\n count = self.QLines[domain].itemAt(0).count()\n items = [self.QLines[domain].itemAt(0).itemAt(i).itemAt(0).widget().text() for i in range(count)]\n self.combobox_opt[domain] = items\n\n with open(parameter.json_path, 'w') as combo_json:\n json.dump(self.combobox_opt, combo_json)", "def checkbox_state_change_callback(self, state):\n\n # Unchecked.\n if state == 0:\n self.dlg.comboBox_2.show()\n self.dlg.lineEdit_6.hide()\n # Checked.\n else:\n self.dlg.comboBox_2.hide()\n self.dlg.lineEdit_6.show()", "def pop_combo_box(self):\n get_data.ensure_dbfile()\n\n # clear combo box content\n self.interpreter_combo_box.clear()\n self.interpreter_combo_box.addItem(\"---\")\n\n with open(get_data.DB_FILE, newline=\"\") as cf:\n reader = csv.DictReader(cf, delimiter=\",\")\n for info in reader:\n self.interpreter_combo_box.addItem(\n f'{info[\"PYTHON_VERSION\"]} -> {info[\"PYTHON_PATH\"]}',\n info[\"PYTHON_PATH\"]\n )", "def on_noneButton_toggled(self, checked):\n self.__updateOK()", "def combobox(\n name: str,\n label: Optional[str] = None,\n placeholder: Optional[str] = None,\n value: Optional[str] = None,\n choices: Optional[List[str]] = None,\n error: Optional[str] = None,\n disabled: Optional[bool] = None,\n visible: Optional[bool] = None,\n tooltip: Optional[str] = None,\n) -> Component:\n return Component(combobox=Combobox(\n name,\n label,\n placeholder,\n value,\n choices,\n error,\n disabled,\n visible,\n tooltip,\n ))", "def combinationsListChanged(self, name):\r\n\r\n previousName = self.combBox.currentText()\r\n #Empty the comboBox\r\n self.combBox.clear() \r\n #For each combination name, add it to the comboBox\r\n self.combBox.addItem( self.manager.NONE_NAME )\r\n\r\n for combination in self.manager.combinationsList:\r\n self.combBox.addItem( combination )\r\n\r\n search = self.combBox.findText(previousName)\r\n if search == -1 :\r\n self.combBox.setCurrentIndex( 0 )\r\n else:\r\n self.combBox.setCurrentIndex( search )", "def on_book_ok_clicked(self, obj):\n if self.book.item_list:\n BookDialog(self.dbstate, self.uistate,\n self.book, BookOptions)\n else:\n WarningDialog(_('No items'), _('This book has no items.'))\n return\n self.close()", "def chooseLigand_cb(self, event = None):\n mol = self.chooser.getMolSet()\n if mol: \n try:\n self.chooser.form.withdraw()\n except:\n pass\n dict = self.vf.atorsDict\n #check that the molecule has a torTree \n #OR\n #it is current atorsDict['molecule'] and has outputfile\n ok = 0\n if hasattr(mol, 'torTree'):\n ok = 1\n elif 'molecule' in dict and mol==dict['molecule']:\n ok = hasattr(mol, 'outputfile')\n if not ok:\n self.vf.warningMsg('can only select molecule with written autotors output file')\n return 'ERROR'\n self.doitWrapper(mol,log=1,redraw=0)\n try:\n self.chooser.form.withdraw()\n except:\n pass", "def create_combo_box(self, *,\n name: typing.Optional[UIIdentifier] = None,\n items: typing.Optional[typing.Sequence[UILabel]] = None,\n items_ref: typing.Optional[UIIdentifier] = None,\n current_index: typing.Optional[UIIdentifier] = None,\n on_current_index_changed: typing.Optional[UICallableIdentifier] = None,\n **kwargs: typing.Any) -> UIDescriptionResult:\n d: UIDescriptionResult = {\"type\": \"combo_box\"}\n if name is not None:\n d[\"name\"] = name\n if items is not None:\n d[\"items\"] = list(items)\n if items_ref is not None:\n d[\"items_ref\"] = items_ref\n if current_index is not None:\n d[\"current_index\"] = current_index\n if on_current_index_changed is not None:\n d[\"on_current_index_changed\"] = on_current_index_changed\n self.__process_common_properties(d, **kwargs)\n return d", "def callbackFunc(event): # this function used to get selected item from the combo box and load into oid i/p box\r\n choice = quality_combo.get()\r\n choice = int((choice.strip())[0])\r\n\r\n oid.delete(0,1)\r\n oid.insert(0, choice)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
\brief set new answers set \param answers list of tuples like for \ref __init__ \param none_answer value to return when empty item is selected
def update_answers(self, answers, none_answer = None): if answers == None: return if len(answers) == 0: m = gtk.ListStore(int, str) self.combobox.set_model(m) if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry): self.combobox.get_child().set_completion(None) return self.none_answer = none_answer val = self.get_value() m = gtk.ListStore(isinstance(answers[0][0], basestring) and str or type(answers[0][0]), str) for a in answers: m.append(a) if none_answer != None: m.append((none_answer, "")) self.combobox.set_model(m) if self.use_completion and isinstance(self.combobox.get_child(), gtk.Entry): ent = self.combobox.get_child() completion = gtk.EntryCompletion() completion.set_model(m) completion.set_text_column(1) completion.set_inline_completion(True) ent.set_completion(completion) self.combobox.set_entry_text_column(1) if val != None: fnd = find_in_list(lambda a: a[0] == val, answers) if fnd != None: self.combobox.set_active(fnd)
[ "def test_student_set_answer_base_case() -> None:\n student = Student(1, 'John')\n q1 = MultipleChoiceQuestion(1, \"a b c or d?\", ['a', 'b', 'c', 'd'])\n a1 = Answer('a')\n q2 = CheckboxQuestion(5, \"do you like dogs?\", ['yes', 'no', 'sometimes'])\n a2 = Answer([\"yes\", \"sometimes\"])\n q3 = NumericQuestion(2, \"Pick num\", 1, 5)\n a3 = Answer(3)\n q4 = YesNoQuestion(4, \"T or F\")\n a4 = Answer(True)\n student.set_answer(q1, a1)\n student.set_answer(q2, a2)\n student.set_answer(q3, a3)\n student.set_answer(q4, a4)\n assert len(student._answers) == 4\n assert student._answers[1] == a1\n assert student._answers[5] == a2\n assert student._answers[2] == a3\n assert student._answers[4] == a4\n assert student._answers[1].content == 'a'\n assert student._answers[5].content == [\"yes\", \"sometimes\"]\n assert student._answers[2].content == 3\n assert student._answers[4].content == True", "def nullify_empty_answers(self, data, many):\n def nullify_if_necessary(data):\n if not data['answers']:\n data['answers'] = None\n\n if not many:\n nullify_if_necessary(data)\n else:\n map(nullify_if_necessary, data)\n return data", "def process_answers(self, answers: List[Answer]):\n if not len(answers):\n self.meta[\"empty_pa_calls\"] += 1\n if self.meta[\"empty_pa_calls\"] >= 20:\n self.meta[\"empty_pa_calls\"] = 0\n return self, True\n\n self.meta[\"num_ans\"] += len(answers)\n self.meta[\"process_answers_calls\"] += 1\n logger.debug(\"self.meta = %s\", self.meta)\n logger.debug(\"self.R, self.n = %s, %s\", self.R, self.n)\n\n # fmt: off\n alg_ans = [\n (a[\"head\"], a[\"winner\"],\n a[\"left\"] if a[\"winner\"] == a[\"right\"] else a[\"right\"])\n for a in answers\n ]\n # fmt: on\n self.search.push(alg_ans)\n self.search.embedding = self.opt.embedding()\n self.opt.push(alg_ans)\n if self.meta[\"num_ans\"] < (self.R * self.n) / 10:\n return self, True\n\n # Make sure only valid answers are passed to partial_fit;\n # self.opt.answers_ has a bunch of extra space for new answers\n n_ans = self.opt.meta_[\"num_answers\"]\n\n difficulty = np.log(self.params[\"n\"]) * self.params[\"d\"] * self.params[\"n\"]\n if n_ans / difficulty <= 1:\n max_epochs = 200\n elif n_ans / difficulty <= 3:\n max_epochs = 120\n else:\n max_epochs = 50\n\n # max_epochs above for completely random initializations\n self.opt.set_params(max_epochs=max_epochs)\n\n valid_ans = self.opt.answers_[:n_ans]\n self.opt.fit(valid_ans)\n self.meta[\"model_updates\"] += 1\n return self, True", "def conflateAnswers(answers):\n\n if 'objective' in answers or 'neutral' in answers:\n answers = ['neutral']\n return answers", "def test_student_set_answer_not_valid() -> None:\n student = Student(1, 'John')\n q1 = MultipleChoiceQuestion(1, \"a b c or d?\", ['a', 'b', 'c', 'd'])\n a1 = Answer('z')\n q2 = CheckboxQuestion(5, \"do you like dogs?\", ['yes', 'no', 'sometimes'])\n a2 = Answer('yes')\n q3 = NumericQuestion(2, \"Pick num\", 1, 5)\n a3 = Answer(7)\n q4 = YesNoQuestion(4, \"T or F\")\n a4 = Answer(\"True\")\n student.set_answer(q1, a1)\n student.set_answer(q2, a2)\n student.set_answer(q3, a3)\n student.set_answer(q4, a4)\n assert len(student._answers) == 0\n assert student._answers == {}", "def answerset(answerset_id):\n return render_template('answerset.html', answerset_id=answerset_id, answer_id=[])", "def any_answered(self) -> Set[str]:\n return reduce(set.union, self.answers, initial=set())", "def make_multiple_choice(question, choices, which=1, randomize=True, aota=False, \n nota=False, none_prob=0.2, number=None):\n\n import random\n import string\n\n AOTA = \"All of the above\"\n NOTA = \"None of the above\"\n \n # Select right answer if given\n if which > 0:\n correct = choices[which-1]\n elif which == 0:\n correct = AOTA\n else: # which == -1:\n correct = NOTA\n \n # Randomize order of options before appending 'All ...' or 'None ...'\n if randomize:\n random.shuffle(choices)\n\n # Append 'All of the above'\n if aota or which == 0 :\n choices.append(AOTA)\n\n # Append 'None of the above'\n if nota or which == -1:\n choices.append(NOTA)\n\n # Remove the correct answer with probability none_prob\n if which > 0 and nota and none_prob > 0:\n if random.random() <= none_prob:\n choices.remove(correct)\n correct = NOTA\n # if not enough options after removing the correct answer, add ALL\n if (not aota) and len(choices) <= 4:\n choices.insert(len(choices)-1, AOTA)\n \n # get correct answer\n answer = string.ascii_lowercase[choices.index(correct)]\n\n # format if number present\n if number is None:\n blank = \"\\n\"\n else:\n # format the question\n if(number > 9):\n blank = \"\\n \"\n else:\n blank = \"\\n \"\n \n question = str(number) + \". \" + question\n\n # Format the question with options \n for i, choice in enumerate(choices):\n question += blank + string.ascii_lowercase[i] + \") \" + choice\n\n return question, answer", "def clean_data(self, questions, answers):\n answers = [\"<START> \" + answer + \" <END>\" for answer in answers]\n return questions, answers", "def sample_answers(y, product_set, p_idk = 0.1, p_2a = 0.3, p_3a = 0.15):\n # Get set of possible questions available in the product catalog\n question_set = set(product_set[\"PropertyDefinitionId\"].values) # faster\n \n # Get dict of (true) answers available for the target product\n quest_answer_y = algo_utils.get_answers_y(y, product_set) \n result = {}\n \n # For each question sample additional answers \n # or replace true answer by idk if necessary.\n for question in question_set:\n # Sample random number b/w 0 and 1.\n u = random.random()\n # Sample if user says idk\n if u < p_idk:\n result[question] = ['idk'] \n # Else if it is possible sample if user give additional answers.\n elif quest_answer_y[question]=='none': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]]\n elif quest_answer_y[question]=='idk': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]] \n # Giving 2 answers?\n elif u < p_idk+p_2a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=1)\n # If the drawn 2nd answer is the same, redraw one\n while (str(quest_answer_y[question]) in sample.astype(str)): \n sample = np.random.choice(possible, size=1)\n result[question] = np.append([quest_answer_y[question]], sample) \n # Giving 3 answers?\n elif u < p_idk+p_2a+p_3a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=2, replace=False)\n # If the drawn 2nd or 3rd answer is the same, redraw 2 answers\n while (str(quest_answer_y[question]) in sample.astype(str)):\n sample = np.random.choice(possible, size=2)\n result[question] = np.append([quest_answer_y[question]], sample)\n # Else keep only the true answer \n else:\n result[question] = [quest_answer_y[question]] \n return(result)", "def _basicMultiAnswerCreation(self, survey, slide, owner, label):\n if not getSurveyAnswer(survey, slide, c.authuser):\n sa = SurveyAnswer(survey, slide, answer, label)\n result = 'create'\n else:\n sa = editSurveyAnswer(survey, slide, answer, label)\n result = 'modify'\n return sa, result", "def get_default_question_objects():\n question_list = list()\n question1 = Question(0, 'Choose A, B or C', False)\n question1.add_alternative(0, 'A', True)\n question1.add_alternative(1, 'B', False)\n question1.add_alternative(2, 'C', False)\n question_list.append(question1)\n question2 = Question(1, 'Choose D, E or F', False)\n question2.add_alternative(0, 'D', False)\n question2.add_alternative(1, 'E', False)\n question2.add_alternative(2, 'F', True)\n question_list.append(question2)\n return question_list", "def answer(self, answer):\n if answer is None:\n raise ValueError(\"Invalid value for `answer`, must not be `None`\")\n\n self._answer = answer", "def score_answers(\n self, prompt: Dict[str, np.ndarray], answers: List[Dict[str, np.ndarray]]\n ) -> np.ndarray:", "def get_answers_set(group_answers: list, unique: bool) -> set:\n func = operator.or_ if unique else operator.and_\n return reduce(lambda x, y: func(set(x), set(y)), group_answers, set(group_answers[0]))", "def randomize_answers(self):\n options = [self.answer, self.op1, self.op2, self.op3]\n new_order = randomize(options)\n random_options = {\n \"A\": new_order[0],\n \"B\": new_order[1],\n \"C\": new_order[2],\n \"D\": new_order[3]\n }\n return random_options", "def __init__(self, question):\n self.question = question\n self.responses = []", "def _basicAnswerCreation(self, survey, slide, answer, label = ''):\n if not getSurveyAnswer(survey, slide, c.authuser):\n sa = SurveyAnswer(survey, slide, answer, label)\n result = 'create'\n else:\n sa = editSurveyAnswer(survey, slide, answer, label)\n result = 'modify'\n return sa, result", "def post(self):\n\n # Define variables\n self.user_mc_ans = {}\n self.q_mc_ans = []\n self.q_sa_keywords = []\n self.user_sa_ans = {}\n\n # Add values to the dicts for user answers\n for key, value in list(self.params.items()):\n if key != 'quiz-id':\n if key[0:2] == 'mc':\n self.user_mc_ans[int(key[2:])] = value\n elif key[0:2] == 'sa':\n self.user_sa_ans[int(key[2:])] = value\n try:\n for question in list(self.quizjson[self.params[\"quiz-id\"]]['multiple_choice']):\n self.q_mc_ans.append(question[\"answer\"])\n except KeyError:\n self.q_mc_ans = None\n\n try:\n for question in list(self.quizjson[self.params[\"quiz-id\"]]['short_answer']):\n self.q_sa_keywords.append(question[\"keywords\"])\n except KeyError:\n self.q_sa_keywords = None\n\n self.checkans = Answer(self.q_sa_keywords, self.user_sa_ans, self.user_mc_ans, self.q_mc_ans)\n self.checked_mc = self.checkans.mc_check()\n self.checked_sa = self.checkans.sa_check()\n\n print(\"Short Answer questions \\n ###############################\")\n print(self.checked_sa)\n print(\"Multiple Choice questions \\n ###############################\")\n print(self.checked_mc)\n\n self.write(templateloader.load(\"answertemplate.html\").generate(url=url,quiz=self.quizjson[self.params[\"quiz-id\"]],id=self.params[\"quiz-id\"],mc_answers=self.checked_mc,sa_answers=self.checked_sa))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the softmax function for each row of the input x. It is crucial that this function is optimized for speed because it will be used frequently in later code.
def softmax(x): x = x.T - np.max(x.T, axis=0) x = np.exp(x) / np.sum(np.exp(x),axis=0) return x.T
[ "def softmax(x):\n if len(x.shape) > 1:\n # Matrix\n # substracting max leaves function unchanged due to softmax's invariance to sums by a constant \n # keepdims= True, because broadcasting requires trailing shape entries to match\n x -= np.max(x, axis=1, keepdims=True)\n x = np.exp(x)\n sum_exp_xj = np.sum(x, axis=1, keepdims=True)\n x = np.divide(x, sum_exp_xj)\n else:\n # Vector\n x -= np.max(x)\n x = np.exp(x)\n sum_exp_xj = np.sum(x)\n x = np.divide(x, sum_exp_xj)\n return x", "def stable_softmax(x):\r\n z = x - np.max(x, axis=-1, keepdims=True)\r\n numerator = np.exp(z)\r\n denominator = np.sum(numerator, axis=-1, keepdims=True)\r\n softmax = numerator / denominator\r\n return softmax", "def softmax(data):\n sum = 0.0\n for i in range(len(data)):\n sum += np.exp(data[i])\n for i in range(len(data)):\n data[i] = np.exp(data[i]) / sum", "def softmax(x: np.array, beta=1.0):\n v = np.exp(beta*x)\n return v / np.sum(v)", "def softmax(z: np.ndarray) -> np.ndarray:\n return np.exp(z) / np.sum(np.exp(z), axis=0)", "def softmax(a_arr):\n exp = np.exp(a_arr)\n return exp / np.sum(exp)", "def softmax_minus_max(x):\n\n exp_scores = np.exp(x - np.max(x, axis = 1, keepdims = True))\n probs = exp_scores/np.sum(exp_scores, axis = 1, keepdims = True)\n return probs", "def softmax_derivative(x):\n\n S = softmax(x).reshape(-1,1)\n # return S - np.einsum(\"i...,j...->i...\", S, S)\n return (np.diagflat(S) - np.dot(S, S.T)).sum(axis=1,keepdims=True)", "def softmax(z):\n # TODO: Compute and return softmax(z)\n return np.exp(z) / np.sum(np.exp(z), axis=0)", "def _softmax(x, axis=-1, alpha=1):\n x = alpha * x\n ndim = K.ndim(x)\n if ndim == 2:\n return K.softmax(x)\n elif ndim > 2:\n e = K.exp(x - K.max(x, axis=axis, keepdims=True))\n s = K.sum(e, axis=axis, keepdims=True)\n return e / s\n else:\n raise ValueError('Cannot apply softmax to a tensor that is 1D')", "def __softmax(self, inputs: np.ndarray) -> np.ndarray:\n res = np.exp(inputs)\n sums = res.sum()\n return res / sums", "def softmax(self, output):\n return np.exp(output) / np.sum(np.exp(output), axis=0)", "def masked_softmax(self, x, mask=None):\n if mask is not None:\n mask = mask.float()\n if mask is not None:\n x_masked = x * mask + (1 - 1 / (mask+1e-5))\n else:\n x_masked = x\n x_max = x_masked.max(1)[0]\n x_exp = (x - x_max.unsqueeze(-1)).exp()\n if mask is not None:\n x_exp = x_exp * mask.float()\n return x_exp / x_exp.sum(1).unsqueeze(-1)", "def softmax_tuned(x, tuning):\n\n # verify inputs\n assert(type(tuning) in (int, float))\n assert(type(x) is torch.Tensor)\n assert(x.size() == torch.Size([27]))\n\n # this is the normal softmax function with just the tuning variable added\n x = torch.exp(tuning*x)\n x = x/x.sum()\n\n # verify output shape\n assert(x.size() == torch.Size([27]))\n\n return x", "def LogSoftmax(axis=-1):\n return Fn('LogSoftmax', lambda x: log_softmax(x, axis=axis))", "def softmax_loss_vectorized(W, X, y):\n\n loss = 0.0\n dW = np.zeros_like(W)\n\n N = X.shape[0]\n f = np.dot(X, W)\n f_max = f.max(axis = 1).reshape(N, 1)\n \n f = f - f_max\n \n loss = np.log(np.exp(f).sum(axis = 1)).sum() - f[range(N), y].sum()\n\n total = np.exp(f) / (np.exp(f).sum(axis = 1)).reshape(N, 1)\n total[range(N), y] = total[range(N), y]-1\n dW = np.dot(X.T, total)\n\n loss = loss / N \n dW = dW / N \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return loss, dW", "def softmax_grad(self, X):\r\n return self.softmax(X)*(1-self.softmax(X))", "def spatial_softmax(self, x):\n return torch.softmax(x.view(1, self.nclasses, -1), 2).view_as(x)", "def softmax(Z):\n Z_exp = np.exp(Z)\n return Z_exp/np.sum(Z_exp, axis=0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the gradient for the sigmoid function here. Note that for this implementation, the input f should be the sigmoid function value of your original input x.
def sigmoid_grad(f): return f * (1-f)
[ "def sigmoid_derivative(x):\n return x * (1 - x)", "def sigmoidGradient(z):\r\n\r\n g = np.multiply(sigmoid(z), (1. - sigmoid(z)))\r\n return g", "def perf_sigmoid_derivative(x):\n # result = perf_sigmoid(x)\n # return result * (1 - result)\n return x * (1 - x)", "def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE", "def gradient(x):\n\t\tpass", "def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n # fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n original_value = x.copy()[ix]\n x[ix] = original_value + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = original_value - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = original_value # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print (ix, grad[ix], fxph, fxmh)\n it.iternext() # step to next dimension\n\n return grad", "def eval_numerical_gradient(f, x, verbose=False, h=1.e-7):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad", "def dsigmoid(sigmoid_x):\n return sigmoid_x - sigmoid_x**2", "def evaluateGradient(function,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros(shape(x)) \n for i in range(0,len(x)):\n # Set the step on the correct variable.\n h[i] = epsilon\n # Approximate derivative using central difference approximation.\n res[i] = (function(x + h) - function(x - h)) / (2 * epsilon)\n # Reset step for next iteration.\n h[i] = 0.0\n return res", "def softmax_grad(self, X):\r\n return self.softmax(X)*(1-self.softmax(X))", "def f_grad(self, x):\n return np.zeros((x.shape[0]))", "def compute_negative_log_likelihood_gradient(y, tx, w):\n\n gradient = tx.T@(sigmoid(tx@w)-y)\n return gradient", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def gradient_approx(self, x, h = 1e-5):\n\t\tx = np.expand_dims(x, axis=-2)\n\t\tx_forward = x.copy()\n\t\tx_forward = x_forward.repeat(repeats=x_forward.shape[-1], axis=-2)\n\t\tdiag_index = np.arange(x_forward.shape[-1])\n\t\tx_forward[:,diag_index, diag_index] += h\n\t\tgrad_true = self.softmax.activation(x_forward) - self.softmax.activation(x)\n\t\treturn grad_true[:,diag_index,diag_index] / h", "def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)", "def f_grad(self, x):\n gradient = []\n\n for key in self.mean_functions:\n gradient.push(self.mean_functions[key][1](x))\n\n return np.array(gradient)", "def _gradient(self, inputs, labels):\n sens = Tensor(np.array([1.0], inputs.dtype))\n # get grad of loss over x\n out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens)\n if isinstance(out_grad, tuple):\n out_grad = out_grad[0]\n gradient = out_grad.asnumpy()\n\n if self._is_targeted:\n gradient = -gradient\n return normalize_value(gradient, self._norm_level)", "def gradient(function, x):\n x = np.asarray(x)\n assert x.ndim == 1, \"x must be a vector\"\n x_ad = np.empty(x.shape, dtype=AutoDiffXd)\n for i in range(x.size):\n der = np.zeros(x.size)\n der[i] = 1\n x_ad.flat[i] = AutoDiffXd(x.flat[i], der)\n y_ad = np.asarray(function(x_ad))\n # TODO(eric.cousineau): Consider restricting this in the future to only be\n # a scalar.\n assert y_ad.size == 1 and y_ad.ndim <= 1, (\n \"The output of `function` must be of a scalar or a vector of size 1\")\n y_ad = y_ad.reshape(()) # To scalar.\n return y_ad.item().derivatives()", "def _do_custom_gradients(self, x):\n\n def _f(state, rng, y, weights):\n old_weights, old_state, old_rng = self.weights, self.state, self._rng\n self.weights, self.state, self._rng = weights, state, rng\n res = self.forward(y)\n s = self.state\n self.weights, self.state, self._rng = old_weights, old_state, old_rng\n return res, s\n\n def _f_fwd(state, rng, y, weights):\n old_weights, old_state, old_rng = self.weights, self.state, self._rng\n self.weights, self.state, self._rng = weights, state, rng\n res = self.forward(y)\n s = self.state\n self.weights, self.state, self._rng = old_weights, old_state, old_rng\n return (res, s), (state, rng, y, res, weights, s)\n\n def _f_bwd(residual, grad):\n \"\"\"Custom gradient function.\"\"\"\n state, rng, y, output, weights, new_state = residual\n grad = grad[0] # Ignore dummy gradient wrt state.\n out = self.backward(y, output, grad, weights, state, new_state, rng)\n return (None, None, *out)\n\n do_forward = fastmath.custom_vjp(_f, _f_fwd, _f_bwd, nondiff_argnums=(0, 1))\n\n output, state = do_forward(self.state, self._rng, x, self.weights)\n return output, state", "def activation_derivative(self, x, act_func):\n\n if act_func == 'sigmoid':\n\n return x*(1 - x)\n\n elif act_func == 'tanh':\n\n return 1 - x**2\n\n elif act_func == 'relu':\n\n return 1*(x >= 0)\n\n elif act_func == None:\n return 1\n else:\n print(\"Invalid activation function. Either 'sigmoid', 'tanh', 'relu', or None.\\nExiting...\")\n sys.exit(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gradient check for a function f f should be a function that takes a single argument and outputs the cost and its gradients x is the point (numpy array) to check the gradient at
def gradcheck_naive(f, x): rndstate = random.getstate() random.setstate(rndstate) nprndstate = np.random.get_state() np.random.set_state(nprndstate) fx, grad = f(x) # Evaluate function value at original point h = 1e-4 # Iterate over all indexes in x it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: #print("---per check--") ix = it.multi_index ### try modifying x[ix] with h defined above to compute numerical gradients ### make sure you call random.setstate(rndstate) before calling f(x) each time, this will make it ### possible to test cost functions with built in randomness later x[ix] += h random.setstate(rndstate) np.random.set_state(nprndstate) fx1, grad1 = f(x) x[ix] -= 2*h random.setstate(rndstate) np.random.set_state(nprndstate) fx2, grad2 = f(x) numgrad = (fx1 - fx2) / (2*h) x[ix] += h #print("cost:",fx, 'cost(w-h):',fx1, 'cost(w+h):', fx2) # Compare gradients reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix])) if reldiff > 1e-5: print("Gradient check failed.") print("First gradient error found at index %s" % str(ix)) print("Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)) return else: print("Pass,","Your gradient: %f \t Numerical gradient: %f" % (grad[ix], numgrad)) it.iternext() # Step to next dimension print("Gradient check passed!")
[ "def gradcheck_naive(f, x):\n\n rndstate = random.getstate()\n random.setstate(rndstate)\n fx, grad = f(x) # Evaluate function value at original point\n h = 1e-4 # Do not change this!\n\n # Iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n ix = it.multi_index\n\n # Try modifying x[ix] with h defined above to compute\n # numerical gradients. Make sure you call random.setstate(rndstate)\n # before calling f(x) each time. This will make it possible\n # to test cost functions with built in randomness later.\n\n x[ix] += h\n random.setstate(rndstate)\n f1 = f(x)[0]\n\n x[ix] -= 2*h\n random.setstate(rndstate)\n f2 = f(x)[0]\n\n x[ix] += h\n numgrad = (f1 - f2)/(2 * h)\n\n # Compare gradients\n reldiff = abs(numgrad - grad[ix]) / max(1, abs(numgrad), abs(grad[ix]))\n if reldiff > 1e-5:\n print(\"Gradient check failed.\")\n print(\"First gradient error found at index %s\" % str(ix))\n print(\"Your gradient: %f \\t Numerical gradient: %f\" % (\n grad[ix], numgrad))\n return\n\n it.iternext() # Step to next dimension\n\n print(\"Gradient check passed!\")", "def gradient_check(f, f_grad_x, x, direction=None, verbose=False, precision=1e-4):\n\n if direction is None:\n # initialize random direction\n direction = SparseVector.random(x)\n\n # normalize to be unit vector\n delta = direction * (1.0 / direction.norm2())\n\n # compute slope in direction of delta\n slope = f_grad_x.dot(delta)\n\n for k in range(20):\n slope_hat = (f(x + delta) - f(x)) / delta.norm2()\n diff = abs(slope - slope_hat)\n\n if verbose:\n print '|{} - {}| = {}'.format(slope, slope_hat, diff)\n\n # the diff must be smaller than some percentage of the theoretical slope\n if diff <= abs(slope) * precision:\n return True\n\n # keep halving the length of delta\n delta *= 0.5\n\n return False", "def gradient_checker(self, f, w, min_diff=1e-5):\n random_state = np.random.get_state()\n np.random.set_state(random_state)\n loss, grad = f(w) # Evaluate function value at with some weights vector\n h = 1e-4 # a small value, epsilon\n\n # Iterate over all indexes ix in x to check the gradient.\n it = np.nditer(w, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n iw = it.multi_index\n\n # Modifying w[iw] with h defined above to compute numerical gradients\n eps = np.zeros(w.shape)\n eps[iw] = h\n\n np.random.set_state(random_state)\n loss_plus_eps = f(w + eps)[0]\n\n np.random.set_state(random_state)\n loss_minus_eps = f(w - eps)[0]\n\n numeric_gradient = (loss_plus_eps - loss_minus_eps) / (2 * h)\n\n # Compare gradients\n gradients_diff = abs(numeric_gradient - grad[iw]) / max(1, abs(numeric_gradient), abs(grad[iw]))\n self.assertLessEqual(gradients_diff, min_diff)\n\n it.iternext() # Step to next dimension", "def generic_gradient_checker(X, y, theta, objective_func, gradient_func, epsilon=0.01, tolerance=1e-4):\n true_gradient = gradient_func(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features)\n for index in range(num_features):\n step = np.zeros(num_features)\n step[index] = epsilon\n approx_grad[index] = (objective_func(X, y, theta+step)\\\n\t\t\t\t\t\t\t\t-objective_func(X, y, theta-step))/(2*epsilon)\n\n if(np.linalg.norm(true_gradient-approx_grad)>tolerance):\n return False\n else:\n return True", "def check_grad(fcn,theta0,delta):\n x,dx = fcn(theta0)\n for i in range(len(theta0)):\n theta = theta0.copy()\n theta[i]=theta0[i]+delta\n xp,_ = fcn(theta)\n theta[i]=theta0[i]-delta\n xn,_ = fcn(theta)\n est_grad = (xp-xn)/2/delta\n print('Estimate gradient:')\n print(est_grad )\n print('Returned gradient:')\n print(dx[i])\n print('Error:',((est_grad-dx[i])**2).sum())", "def gradient(x):\n\t\tpass", "def gradCheck(l=GRULayer(1, 10)):\n\n def loss(h):\n \"\"\"A dummy loss function; the square error compared to a linspace.\"\"\"\n dh = h - np.linspace(-1, 1, h.shape[0])[:, None, None]\n return 0.5 * np.sum(dh * dh), dh\n\n num_checks = 5\n delta = 1e-5\n n = 20\n x = np.arange(n * 2.0).reshape((n, 1, 2)) # dummy input; batch of size 2, 20 samples per sequence\n h = l.forward(x)\n dh = loss(h)[1]\n dx = l.backward(dh) # analytical gradient\n\n for param, name in zip([x, l.W, l.Wr, l.Wz],\n ['x', 'W', 'Wr', 'Wz']):\n\n print(name)\n a = param if (name == 'x') else param.a # only x is not a Param object\n\n for i in range(num_checks):\n ri = int(np.random.randint(a.size))\n # compute the derivative from definition - evaluate loss at [x+delta] and [x-delta]\n old_val = a.flat[ri]\n a.flat[ri] = old_val + delta\n cg0 = loss(l.forward(x))[0]\n a.flat[ri] = old_val - delta\n cg1 = loss(l.forward(x))[0]\n a.flat[ri] = old_val # reset old value for this parameter\n # fetch both numerical and analytic gradient\n grad_analytic = (dx if (name == 'x') else param.d).flat[ri] # again, treat x differently\n grad_numerical = (cg0 - cg1) / (2 * delta)\n\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n print('%f, %f => %e ' % (grad_numerical, grad_analytic, rel_error))\n # rel_error should be on order of 1e-7 or less", "def getGradient(function):\n def grad(x):\n return evaluateGradient(function,x) \n return grad", "def evaluateGradient(function,x,epsilon = 1e-5):\n h = zeros(shape(x))\n res = zeros(shape(x)) \n for i in range(0,len(x)):\n # Set the step on the correct variable.\n h[i] = epsilon\n # Approximate derivative using central difference approximation.\n res[i] = (function(x + h) - function(x - h)) / (2 * epsilon)\n # Reset step for next iteration.\n h[i] = 0.0\n return res", "def value_and_gradient(f, xs, watch_accessed_variables=True, name=None):\n with tf.name_scope(name, 'value_and_gradient', [xs]):\n is_xs_list_like = isinstance(xs, (tuple, list))\n if not is_xs_list_like:\n xs = [xs]\n xs = [tf.convert_to_tensor(value=x, name='x{}'.format(i))\n for i, x in enumerate(xs)]\n with tf.GradientTape(\n persistent=len(xs) > 1,\n watch_accessed_variables=watch_accessed_variables) as tape:\n for x in xs:\n tape.watch(x)\n y = tf.convert_to_tensor(value=f(*xs), name='y')\n dydx = [tape.gradient(y, x) for x in xs]\n if not is_xs_list_like:\n dydx = dydx[0]\n return y, dydx", "def eval_numerical_gradient(f, x, verbose=False, h=1.e-7):\n\n fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n\n # evaluate function at x+h\n ix = it.multi_index\n oldval = x[ix]\n x[ix] = oldval + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = oldval - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = oldval # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print(ix, grad[ix])\n it.iternext() # step to next dimension\n\n return grad", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO", "def compute_gradient_of_cost_function(x, y, w):\n\n # evaluate hypotesis function\n hypothesis_function = eval_hypothesis_function(w, x)\n residual = np.subtract(hypothesis_function, y)\n\n gradient_cost_function = np.dot(residual,x)\n\n return gradient_cost_function", "def check_cost(f):\n return (f.cost(f.min) - f.value) < tol", "def eval_numerical_gradient(f, x, verbose=True, h=0.00001):\n\n # fx = f(x) # evaluate function value at original point\n grad = np.zeros_like(x)\n # iterate over all indexes in x\n it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n # evaluate function at x+h\n ix = it.multi_index\n original_value = x.copy()[ix]\n x[ix] = original_value + h # increment by h\n fxph = f(x) # evalute f(x + h)\n x[ix] = original_value - h\n fxmh = f(x) # evaluate f(x - h)\n x[ix] = original_value # restore\n\n # compute the partial derivative with centered formula\n grad[ix] = (fxph - fxmh) / (2 * h) # the slope\n if verbose:\n print (ix, grad[ix], fxph, fxmh)\n it.iternext() # step to next dimension\n\n return grad", "def check_grad_rel(func, grad, x0, *args):\n step = 1.49e-08\n target = approx_fprime(x0, func, step, *args)\n actual = grad(x0, *args)\n delta = target - actual\n # make sure target is not 0\n delta[target > 0] /= target[target > 0]\n return delta", "def check_gradients(self, X, Y, method='finite_diff'):\n grad_w_num = np.zeros((self.k, self.d))\n Y_pred, h_act = self.evaluate(X)\n grad_b1, grad_b2, grad_w1, grad_w2 = self.compute_gradients(X, Y, Y_pred, h_act)\n if method == 'finite_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_fast(X, Y)\n elif method == 'centered_diff':\n grad_b1_num, grad_b2_num, grad_w1_num, grad_w2_num = self.compute_gradient_num_slow(X, Y)\n else:\n print(method, \" IS NOT A VALID NUMERICAL GRADIENT CHECKING.\")\n\n grad_w1_vec = grad_w1.flatten()\n grad_w1_num_vec = grad_w1_num.flatten()\n x_w1 = np.arange(1, grad_w1_vec.shape[0] + 1)\n plt.bar(x_w1, grad_w1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w1+0.35, grad_w1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_w2_vec = grad_w2.flatten()\n grad_w2_num_vec = grad_w2_num.flatten()\n x_w2 = np.arange(1, grad_w2_vec.shape[0] + 1)\n plt.bar(x_w2, grad_w2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_w2 + 0.35, grad_w2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of w2, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b1_vec = grad_b1.flatten()\n grad_b1_num_vec = grad_b1_num.flatten()\n x_b1 = np.arange(1, grad_b1.shape[0] + 1)\n plt.bar(x_b1, grad_b1_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b1 + 0.35, grad_b1_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b1, batch size = \" + str(X.shape[1])))\n plt.show()\n\n grad_b2_vec = grad_b2.flatten()\n grad_b2_num_vec = grad_b2_num.flatten()\n x_b2 = np.arange(1, grad_b2.shape[0] + 1)\n plt.bar(x_b2, grad_b2_vec, 0.35, label='Analytical gradient', color='blue')\n plt.bar(x_b2 + 0.35, grad_b2_num_vec, 0.35, label=method, color='red')\n plt.legend()\n plt.title((\"Gradient check of b2, batch size = \" + str(X.shape[1])))\n plt.show()", "def conjgrad(f, x, options, gradf, optargs=[], \n returnFlog=False, \n returnPoint=False):\n \n # Set up the options.\n if len(options) < 18:\n raise Exception('Options vector too short')\n\n if options[13]:\n niters = options[13]\n else:\n niters = 100\n\n # Set up options for line search\n line_options = foptions()\n\n # Need a precise line search for success\n if options[14] > 0:\n line_options[1] = options[14]\n else:\n line_options[1] = 1e-4\n\n display = options[0]\n\n # Next two lines allow conjgrad to work with expression strings\n # f = fcnchk(f, length(varargin));\n # gradf = fcnchk(gradf, length(varargin));\n\n # Check gradients\n if options[8]:\n gradchek(x, f, gradf, *optargs);\n\n options[9] = 0\n options[10] = 0\n nparams = len(x)\n fnew = f(x, *optargs)\n options[9] = options[9] + 1\n gradnew = gradf(x, *optargs)\n options[10] = options[10] + 1\n d = -gradnew\t\t# Initial search direction\n br_min = 0\n br_max = 1.0\t# Initial value for maximum distance to search along\n tol = math.sqrt(eps())\n\n j = 0\n if returnFlog:\n flog = [fnew]\n else:\n flog = []\n if returnPoint:\n pointlog = [x]\n else:\n pointlog = []\n\n while (j < niters):\n\n xold = x\n fold = fnew\n gradold = gradnew\n\n gg = np.dot(gradold, gradold)\n if (gg == 0.0):\n # If the gradient is zero then we are done.\n options[7] = fnew\n return x, flog, pointlog\n\n # This shouldn't occur, but rest of code depends on d being downhill\n if (np.dot(gradnew,d) > 0):\n d = -d\n if options[0] >= 0:\n print 'Warning: search direction uphill in conjgrad'\n\n line_sd = d/la.norm(d)\n lmin = linemin(f, xold, line_sd, fold, line_options, *optargs)\n options[9] = options[9] + line_options[9]\n options[10] = options[10] + line_options[10]\n # Set x and fnew to be the actual search point we have found\n x = xold + lmin * line_sd\n fnew = line_options[7]\n\n # Check for termination\n if np.max(np.abs(x - xold)) < options[1] and abs(fnew - fold) < options[2]:\n options[7] = fnew\n return x, flog, pointlog\n\n gradnew = gradf(x, *optargs)\n options[10] = options[10] + 1\n\n # Use Polak-Ribiere formula to update search direction\n gamma = np.dot(gradnew - gradold, gradnew)/gg\n d = (d*gamma) - gradnew\n\n j = j + 1\n if (display > 0):\n print 'Cycle ', j, ' Function ', line_options[7]\n\n if returnFlog:\n # Store relevant variables\n if j >= len(flog):\n flog.append(fnew)\t\t# Current function value\n else:\n flog[j] = fnew\n if returnPoint:\n if j >= len(pointlog):\n pointlog.append(x) # Current position\n else:\n pointlog[j] = x \n\n # If we get here, then we haven't terminated in the given number of \n # iterations.\n options[7] = fold\n if (options[0] >= 0):\n print maxitmess()\n \n return x, flog, pointlog", "def __compute_gradients(self, w, x, y, loss_func):\n if loss_func == 'logistic':\n return 0.0\n elif loss_func == 'mean_squared':\n return 0.0\n elif loss_func == 'half_mean_squared':\n return 0.0\n else:\n raise ValueError('Supported losses: logistic, mean_squared, or half_mean_squared')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
use a templating library to turn a prefix and a list of contents into an HTML directory index
def render_index(prefix, order_by, contents, reverse_order, base_path): logger.debug('rendering index for {prefix} ordered by {order_by} and reverse_order={reverse_order}'.format(prefix=prefix, order_by=order_by, reverse_order=reverse_order)) sorted_contents = sorted(contents, key=lambda k: k[order_by], reverse=reverse_order) formatted_contents = format_file_details(sorted_contents) # Remove the base path from the prefix to avoid putting the full # filesystem path in the index path = '' if prefix == base_path else prefix.replace(base_path, '') parent_directory = '/'.join(path.split('/')[:-1]) # dumb hack because paths are prefixed with / when run on os x but not linux root_prefix = '' if path.startswith('/') else '/' index_by = {} index_by['lastModified'] = index_link(path, order_by, 'lastModified', reverse_order) index_by['name'] = index_link(path, order_by, 'name', reverse_order) index_by['size'] = index_link(path, order_by, 'size', reverse_order) logging.debug('path: {path}'.format(path=path)) logging.debug('contents: {contents}'.format(contents=contents)) logging.debug('parent_directory: {parent_directory}'.format(parent_directory=parent_directory)) HTML = """ <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"> <html> <head> <title>Index of {{root_prefix}}{{path}}</title> </head> <body> <h1>Index of {{root_prefix}}{{path}}</h1> <table><tr><th></th><th><a href="{{root_prefix}}{{index_link['name']}}">Name</a></th><th><a href="{{root_prefix}}{{index_link['lastModified']}}">Last modified</a></th><th><a href="{{root_prefix}}{{index_link['size']}}">Size</a></th><th>Description</th></tr><tr><th colspan="5"><hr></th></tr> {% if path != '/'%} {% if parent_directory == '' %} <tr><td valign="top"><img src="https://s3-us-west-2.amazonaws.com/icons.puppet.com/back.gif"></td><td><a href="{{parent_directory}}/index_by_name.html">Parent Directory</a></td><td>&nbsp;</td><td align="right"> - </td><td>&nbsp;</td></tr> {% else %} <tr><td valign="top"><img src="https://s3-us-west-2.amazonaws.com/icons.puppet.com/back.gif"></td><td><a href="{{root_prefix}}{{parent_directory}}/index_by_name.html">Parent Directory</a></td><td>&nbsp;</td><td align="right"> - </td><td>&nbsp;</td></tr> {% endif %} {% endif %} {% for item in contents %} {% if item['icon'] == 'folder.gif' %} <tr><td valign="top"><img src="https://s3-us-west-2.amazonaws.com/icons.puppet.com/{{item['icon']}}" alt="[DIR]"></td><td><a href="{{item['name'].split('/')[-1:][0]}}/">{{item['name'].split('/')[-1:][0]}}/</a></td><td align="right">{{item['lastModified']}} </td><td align="right"> {{item['size']}}</td><td>&nbsp;</td></tr> {% else %} <tr><td valign="top"><img src="https://s3-us-west-2.amazonaws.com/icons.puppet.com/{{item['icon']}}" alt="[DIR]"></td><td><a href="{{item['name'].split('/')[-1:][0]}}">{{item['name'].split('/')[-1:][0]}}</a></td><td align="right">{{item['lastModified']}} </td><td align="right"> {{item['size']}}</td><td>&nbsp;</td></tr> {% endif %} {% endfor %} <tr><th colspan="5"><hr></th></tr> </table> </body></html> """ return Environment().from_string(HTML).render( path=path, contents=formatted_contents, parent_directory=parent_directory, index_link=index_by, root_prefix=root_prefix)
[ "def get_dir_index(path, page):\n if page[0] is not \"/\":\n page = f\"/{page}\"\n if page is \"/\":\n page = \"\"\n index_html = \"<pre>\\n\"\n files = os.listdir(path)\n for file in files:\n index_html += f\"<a href='{page}/{file}'>{file}</a>\\n\"\n index_html += \"</pre>\"\n return index_html", "def doIndex(bunch, text, env):\n extension = bunch.get(\"extension\", \".html\")\n cropextension = bunch.get(\"cropextension\", False)\n nobullet = bunch.get(\"nobullet\", None)\n limit = int(bunch.get(\"limit\", 10))\n subdir = bunch.get(\"sub\", None) # WARNING: POTENT FOR HOLE(!)\n traverse = bunch.get(\"traverse\", None)\n\n if subdir and (\"..\" in subdir): # WARNING: HENCE WHY WE SCRUB THE VALUE HERE\n return \"bad subdirectory\" # If /anything/ unwanted, just don't even try\n\n docbase = env[\"docbase\"]\n if subdir is not None:\n docbase = os.path.join(docbase, subdir)\n \n files = FileListing(docbase, traverse)\n files.sort()\n\n # Get limit(num) of files by last modifcation time\n files_by_date = [ (file_last_modified(docbase,x),x) for x in files ]\n files_by_date.sort()\n files_by_date.reverse()\n files_by_date = files_by_date[:limit]\n\n # This format is used by Index, not by recentchanges\n format = '<a href=\"%s\"> %s</a>'\n if subdir is None:\n files = [(x,x) for x in files]\n else:\n files = [(\"/\"+subdir+\"/\"+x,x) for x in files]\n\n if bunch.get(\"order\"):\n if bunch[\"order\"] == \"recent\":\n if bunch.get(\"compact\", None) is None:\n format, files = doRegularOrderedForm(files, subdir, files_by_date)\n else:\n format, files = doCompactForm(files, subdir, files_by_date)\n \n if cropextension:\n files = cropExtension(files, extension)\n\n if nobullet is None:\n fileString = \"<ul>\"\n for file in files:\n filelink = format % file\n fileString += \"<li> %s </li>\" % filelink\n fileString += \"</ul>\"\n else:\n links = [ (format % file) for file in files ]\n fileString = str(nobullet).join(links)\n\n result = \"\"\"%s\n %s\n \"\"\" % (text, fileString)\n if stat_cache.get(\"dirty\",False):\n del stat_cache[\"dirty\"]\n store_stat_cache(docbase)\n \n return result", "def make_index_html():\n\tif not os.path.exists('template.html'):\n\t\tindex_content = file('lib/html/template.html','r').read()\n\t\tindex = open('template.html', 'w')\n\t\tindex.write(index_content % {\"app_name\":conf.app_name})\n\t\tindex.close()\n\t\tprint \"template.html made\"", "def generate_index(folder, original_paths):\n\n # Determine the namespaces listed here (as sub folders)\n # and the files (.html files) that we should link to\n namespaces = []\n files = []\n for item in os.listdir(folder):\n if os.path.isdir(os.path.join(folder, item)):\n namespaces.append(item)\n elif item != 'index.html':\n files.append(item)\n\n # We work with relative paths\n paths = get_relative_paths(original_paths, relative_to=folder)\n\n # Now that everything is setup, write the index.html file\n filename = os.path.join(folder, 'index.html')\n with DocsWriter(filename, type_to_path_function=get_path_for_type) as docs:\n # Title should be the current folder name\n docs.write_head(folder.title(), relative_css_path=paths['css'])\n\n docs.set_menu_separator(paths['arrow'])\n build_menu(docs, filename, relative_main_index=paths['index_all'])\n\n docs.write_title(folder.title())\n\n if namespaces:\n docs.write_title('Namespaces', level=3)\n docs.begin_table(4)\n namespaces.sort()\n for namespace in namespaces:\n # For every namespace, also write the index of it\n generate_index(os.path.join(folder, namespace), original_paths)\n docs.add_row(namespace.title(),\n link=os.path.join(namespace, 'index.html'))\n\n docs.end_table()\n\n docs.write_title('Available items')\n docs.begin_table(2)\n\n files = [(f, find_title(os.path.join(folder, f))) for f in files]\n files.sort(key=lambda t: t[1])\n\n for file, title in files:\n docs.add_row(title, link=file)\n\n docs.end_table()\n docs.end_body()", "def generate_index_files(root='content'):\n\n print \"# patterns index\"\n print 'multimarkdown --to=mmd --output=index.md index--master.md\\n'\n\n print \"# group indexes\"\n for group in sorted(s3_patterns.keys()):\n # output multimarkdown command to create build group index files\n print 'multimarkdown --to=mmd --output=%(group)s.md %(group)s--master.md' % {'group': make_pathname(group)}", "def render_index():\n return render_template('0-index.html')", "def main():\n dest_dir = \".public\"\n if os.path.isdir(dest_dir):\n shutil.rmtree(dest_dir)\n os.mkdir(dest_dir)\n\n env = Environment(\n loader=FileSystemLoader('templates'),\n autoescape=select_autoescape(['html'])\n )\n\n ignore_files = ignoreFile()\n files_in_dir = os.walk('templates')\n filenames = [filename for _, _, filename in files_in_dir]\n files = [filename for filename in filenames[0] if filename not in ignore_files]\n for i in files:\n template = env.get_template(i)\n final_html = template.render()\n\n\n write_prefix = glob.glob(\".public\")[0]\n write_path = os.path.join(write_prefix, i)\n print write_path\n try:\n html_file = codecs.open(write_path, 'w', 'utf8')\n html_file.write(final_html)\n finally:\n html_file.close()", "def generateUrlPatterns(filenames, templatePath, defaultContext=\"{}\"):\n import datetime\n \n output = \"\" # the string where we'll store our output\n \n # write the header information and the included files\n output += URLCONFIG_HEADER\n output += \"# Created \" + datetime.datetime.today().ctime() + \"\\n\"\n output += \"\\n\"\n output += URLCONFIG_INCLUDES\n output += \"\\n\"\n \n # write a list of all files in this urlconfig, as tuples of (displayName, href)\n output += \"# A list of items to display in the index, as tuples of (displayName, href).\\n\"\n output += \"index_list = [\\n\"\n for F in filenames:\n output+= \"\\t('\" + dropHtmlExt(F) + \"', '\" + F + \"'), \\n\"\n output += \"\\t]\\n\"\n \n \n # write view functions for all the files\n for F in filenames:\n cname = cleanName(F)\n output += \"def \" + cname + \"(request):\\n\"\n output += \"\\treturn render_to_response(\\\"\" + templatePath + F + \"\\\", \" + defaultContext + \")\\n\\n\"\n \n # write the index page's view function\n output += \"def index(request):\\n\"\n\t\n # get today's date and time as a string\n d = datetime.datetime.today()\n datestring = d.strftime(\"%a, %b %d %Y, %H:%I%p\")\n output += \"\\treturn render_to_response(\\\"\"+templatePath+\"index.html\\\",{'index_list':index_list, 'title':'Pydoc API', 'updatedDate':\\\"\" + datestring + \"\\\"})\\n\\n\"\n \n \n # write the urlpatterns object\n c = 0\n\n output += \"urlpatterns = patterns('',\\n\" # open the urlpatterns function\n\n for F in filenames:\n if c > 200: # we've printed close to our maximum for one urlpatterns call\n output += \"\\t)\\n\"\n output += \"urlpatterns += patterns('',\\n\"\n c = 0\n\n # write the urlconfig entry\n output += \"\\t(r'^\" + F + \"$', \" + cleanName(F) + \"), \\n\"\n c += 1\n\n output += \"\\t(r'^$',index)\\n\" # write an index router\n output += \"\\t)\\n\" # close the urlpatterns tuple\n \n return output", "def _generate_index(folder, paths,\n bots_index=False, bots_index_paths=()):\n # Determine the namespaces listed here (as sub folders)\n # and the files (.html files) that we should link to\n namespaces = []\n files = []\n INDEX = 'index.html'\n BOT_INDEX = 'botindex.html'\n\n for item in (bots_index_paths or folder.iterdir()):\n if item.is_dir():\n namespaces.append(item)\n elif item.name not in (INDEX, BOT_INDEX):\n files.append(item)\n\n # Now that everything is setup, write the index.html file\n filename = folder / (BOT_INDEX if bots_index else INDEX)\n with DocsWriter(filename, _get_path_for_type) as docs:\n # Title should be the current folder name\n docs.write_head(str(folder).replace(os.path.sep, '/').title(),\n css_path=paths['css'],\n default_css=paths['default_css'])\n\n docs.set_menu_separator(paths['arrow'])\n _build_menu(docs)\n docs.write_title(str(filename.parent)\n .replace(os.path.sep, '/').title())\n\n if bots_index:\n docs.write_text('These are the requests that you may be able to '\n 'use as a bot. Click <a href=\"{}\">here</a> to '\n 'view them all.'.format(INDEX))\n else:\n docs.write_text('Click <a href=\"{}\">here</a> to view the requests '\n 'that you can use as a bot.'.format(BOT_INDEX))\n if namespaces:\n docs.write_title('Namespaces', level=3)\n docs.begin_table(4)\n namespaces.sort()\n for namespace in namespaces:\n # For every namespace, also write the index of it\n namespace_paths = []\n if bots_index:\n for item in bots_index_paths:\n if item.parent == namespace:\n namespace_paths.append(item)\n\n _generate_index(namespace, paths,\n bots_index, namespace_paths)\n\n docs.add_row(\n namespace.stem.title(),\n link=namespace / (BOT_INDEX if bots_index else INDEX))\n\n docs.end_table()\n\n docs.write_title('Available items')\n docs.begin_table(2)\n\n files = [(f, _find_title(f)) for f in files]\n files.sort(key=lambda t: t[1])\n\n for file, title in files:\n docs.add_row(title, link=file)\n\n docs.end_table()\n docs.end_body()", "def index(request):\n subs = {}\n subs[\"current_path\"] = \"/\"\n subs[\"initial_files\"] = simplejson.loads(list_dir(subs[\"current_path\"]))\n return render_to_response(\"picman/index.html\", subs)", "def gen_lunr_search(modules: List[pdoc.Module],\n index_docstrings: bool,\n template_config: dict):\n\n def trim_docstring(docstring):\n return re.sub(r'''\n \\s+| # whitespace sequences\n \\s+[-=~]{3,}\\s+| # title underlines\n ^[ \\t]*[`~]{3,}\\w*$| # code blocks\n \\s*[`#*]+\\s*| # common markdown chars\n \\s*([^\\w\\d_>])\\1\\s*| # sequences of punct of the same kind\n \\s*</?\\w*[^>]*>\\s* # simple HTML tags\n ''', ' ', docstring, flags=re.VERBOSE | re.MULTILINE)\n\n def recursive_add_to_index(dobj):\n info = {\n 'ref': dobj.refname,\n 'url': to_url_id(dobj.module),\n }\n if index_docstrings:\n info['doc'] = trim_docstring(dobj.docstring)\n if isinstance(dobj, pdoc.Function):\n info['func'] = 1\n index.append(info)\n for member_dobj in getattr(dobj, 'doc', {}).values():\n recursive_add_to_index(member_dobj)\n\n @lru_cache()\n def to_url_id(module):\n url = module.url()\n if url not in url_cache:\n url_cache[url] = len(url_cache)\n return url_cache[url]\n\n index: List[Dict] = []\n url_cache: Dict[str, int] = {}\n for top_module in modules:\n recursive_add_to_index(top_module)\n urls = sorted(url_cache.keys(), key=url_cache.__getitem__)\n\n main_path = args.output\n with open(os.path.join(main_path, 'index.js'), \"w\", encoding=\"utf-8\") as f:\n f.write(\"URLS=\")\n json.dump(urls, f, indent=0, separators=(',', ':'))\n f.write(\";\\nINDEX=\")\n json.dump(index, f, indent=0, separators=(',', ':'))\n\n # Generate search.html\n with open(os.path.join(main_path, 'doc-search.html'), \"w\", encoding=\"utf-8\") as f:\n rendered_template = pdoc._render_template('/search.mako', **template_config)\n f.write(rendered_template)", "def __init__(self, source, destination, layouts):\r\n for root, dirs, files in os.walk(source):\r\n for name in files:\r\n content = open( os.path.join(root, name) ).read()\r\n # Iterate yaml front matter\r\n for config in yaml.load_all(content):\r\n if type(config) is dict:\r\n layout = Template(filename=os.path.join(layouts, config['layout']+\".html\"))\r\n pieces = re.split(\"---\\n\", content) # expect [blank, yaml, content]\r\n html = markdown.markdown(pieces[2])\r\n # Save page\r\n page = open(os.path.join(destination, name), 'w')\r\n page.write(layout.render(data=config, content=html))\r\n page.close()", "def index_templates(running_app):\n list(current_search.put_templates(ignore=[400]))", "def home():\n return render_template(\"dashboard/directories.html\", tagname = 'home', directories = DirectoryNameToURL.objects.all())", "def index_template(name, opts):\n parts = [\"taxonomy\", name, opts[\"hub-name\"], opts[\"hub-version\"]]\n return index_templator(parts, opts)", "def testTemplateMultipleIndexing(self):\n template = 'Welcome to the [foo:bar:zoink].'\n result = self.tmpl(template).Parse(foo={'bar': {'zoink': 'World'}})\n self.assertEqual(result, 'Welcome to the World.')", "def wrap_list_html(self, entry, filepath=None):\n if filepath is not None:\n md5 = filename_md5(filepath)\n self.html += self.add_tab() + '<li><a href=\"html/%s.html\" target=\"_blank\">%s</a></li>\\n' % (md5, entry)\n else:\n self.html += self.add_tab() + '<li>%s</li>\\n' % entry", "def api_index():\n return render_template('api_docs.html')", "def basic_pages():\n return make_response(open('src/templates/index.html').read())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save all the data about the rooms
async def write_rooms(rooms): with open(ROOMDATA, 'wb') as opened_file: pickle.dump(rooms, opened_file, protocol=pickle.HIGHEST_PROTOCOL)
[ "async def save(self):\r\n data = await self._api.update_room(\r\n self._location_id, self._room_id, self.to_data()\r\n )\r\n if data:\r\n self.apply_data(data)", "def save_data( self, ):\n\n log_msg = \"in save_data() \" #print( log_msg )\n self.logger.debug( log_msg )\n\n if not ( self.need_update() ):\n #self.logger.info( \"no update needed\" )\n return\n\n\n\n # bad ideas we shoul have some standards even if we have to reload data\n if self.parameters.mode == \"RootCellar\": # may need to expand priro to fix\n self.save_data_for_RootCellar() # later figure out if parameterization is ok\n return\n\n elif self.parameters.mode == \"WellMonitor\": # may need to expand priro to fix\n self.save_data_for_WellMonitor()\n return\n\n elif self.parameters.mode == \"GreenHouse\": # may need to expand priro to fix\n self.save_data_for_GreenHouse()\n return\n\n else:\n # should log error )\n # you are probabbly screwed unless you fix this perhaps back to greenhouse\n return", "def add_room(self, room):\n for i in range(0, 2400, 25):\n timeslots = [(room.upper(), d, i, 0) for d in range(1, 8)]\n self.c.executemany('INSERT INTO rooms VALUES (?,?,?,?)', (timeslots))\n self.conn.commit()", "def update_room():\n print(\"UPDATE A ROOM\".center(80))\n print(\"-\".center(80, '-'))\n r = _update_which_room()\n item = _add_item()\n value = _get_value(item)\n db.execute('''INSERT into items (item_name, item_value, room_id)\n VALUES (?, ?, ?)''', (item, value, r))\n db.commit()\n print(\n f'''{item.capitalize()} with value of ${value:.2f} has been added to the room.\\n'''\n )\n display_menu()", "def _save_data(self):\n if not os.path.exists(self.folder):\n self._create_folder()\n\n for revision in self.revisions:\n revision.save()", "def room_create(self):\n\t\treturn self.app.put('/room/create')", "def save_buffer(self):\n\n if not self.update_mode:\n for poi in self.poi_objects:\n if len(db.session.query(POIs).filter_by(osm_type=poi.osm_type, osm_id=poi.osm_id).all()) > 0:\n self.update_mode = True\n\n if self.update_mode:\n for poi in self.poi_objects:\n db.session.query(POIs).filter_by(osm_type=poi.osm_type, osm_id=poi.osm_id).delete()\n db.session.commit()\n\n db.session.bulk_save_objects(self.poi_objects)\n db.session.bulk_save_objects(self.tags_objects)\n db.session.bulk_save_objects(self.categories_objects)\n db.session.commit()\n self.poi_objects = []\n self.tags_objects = []\n self.categories_objects = []", "def setroom(self, room):\n pass", "def save_game(self) -> None:\n pass", "def save_sensors(self):\n for sensor in self.sensors:\n sensor.save()", "def save_guests_data(guests):\n with open('guests.json', 'w') as datafile:\n json.dump(guests, datafile)", "def add_room():\n print(\"ADD A ROOM\".center(80))\n print(\"-\".center(80, '-'))\n room = str(_get_room_name())\n db.execute('INSERT into room (name) VALUES (?)', (room,))\n db.commit()\n display_menu()", "def save_(self):\n QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))\n # Open a connection to Doyle's database.\n self.dbw, ok = write_connection(self)\n save_qry = QtSql.QSqlQuery()\n while ok:\n descp = prepare_string(self.description.text())\n dest = prepare_string(self.destination.text())\n notes = prepare_string(self.notes.toPlainText())\n # User must at least have a description and destination to save.\n if descp != \"\" and dest != \"\":\n dest_qry = \"Select Dest_ID from inventorysystem.Destination_tbl where Dest_Desc = '{0}'\".format(dest)\n if save_qry.exec_(dest_qry): # Get the destination id.\n if save_qry.first():\n dest = save_qry.value(0).toString()\n else:\n dest = '7'\n else:\n db_err(save_qry)\n dest = '7'\n qry = (\"update inventorysystem.Parts_tbl set Part_Desc='{0}', Part_Notes='{1}',destination={2} where \"\n \"Part_ID = {3}\").format(descp, notes, dest, self.partId.text())\n write_qry = QtSql.QSqlQuery(self.dbw)\n if not write_qry.exec_(qry):\n db_err(write_qry)\n break\n del self.dbw\n mach = self.machine.text()\n cycle = self.cycleTime.text()\n load = self.loadTime.text()\n setup = self.setupTime.text()\n mat = self.material.text()\n clamp = self.clamp.text()\n blank_length = self.blankLength.text()\n bar = self.barPull.text()\n square = self.squareSet.text()\n pos_stop = self.posStop.text()\n proc = self.routing.text()\n prog_file = self.program.text()\n if mach != \"\" and mat != \"\" and proc != \"\":\n mach_qry = \"Select id from machines where name = '{0}'\".format(mach)\n if save_qry.exec_(mach_qry):\n if save_qry.first():\n mach = save_qry.value(0).toString()\n else:\n mach = \"14\"\n else:\n db_err(save_qry)\n mach = \"14\"\n mat_qry = \"Select id from material where material = '{0}'\".format(mat)\n if save_qry.exec_(mat_qry):\n if save_qry.first():\n mat = save_qry.value(0).toString()\n else:\n if save_qry.exec_(\"insert into material set material='{0}'\".format(mat)):\n mat = save_qry.lastInsertId().toString()\n else:\n db_err(save_qry)\n mat = \"17\"\n else:\n db_err(save_qry)\n mat = \"17\"\n qry2 = (\"machine={0}, cycleTime=time_to_Sec('{1}'), loadTime=time_to_Sec('{2}'), \"\n \"setupTime=time_to_Sec('{3}'), material={4}, clampPSI={5}, blankLength={6}, barPull={7}, \"\n \"squareSet={8}, posStop={9}, Process='{10}', fileName='{11}'\"\n ).format(mach, cycle, load, setup, mat, clamp, blank_length, bar, square, pos_stop, proc, prog_file)\n if self.index.text() == '0':\n part_id = self.partId.text()\n qry2 = (\"Insert into setupInfo set {0}, partId={1}\".format(qry2, part_id))\n else:\n qry2 = (\"Update setupInfo set {0} where id={1}\".format(qry2, self.index.text()))\n local_qry = QtSql.QSqlQuery()\n if not local_qry.exec_(qry2):\n db_err(local_qry)\n self.load_data(self.partId.text())\n QtGui.QApplication.restoreOverrideCursor()", "def __insert(self):\n try:\n conn = connect()\n cur = conn.cursor()\n sql = \"\"\"\n insert into room (\n room_id, host_id, room_type, country, city,\n neighborhood, address, reviews, overall_satisfaction,\n accommodates, bedrooms, bathrooms, price, deleted,\n minstay, latitude, longitude, survey_id\n )\n \"\"\"\n sql += \"\"\"\n values (%s, %s, %s, %s, %s, %s, %s, %s, %s,\n %s, %s, %s, %s, %s, %s, %s, %s, %s\n )\"\"\"\n insert_args = (\n self.room_id, self.host_id, self.room_type, self.country,\n self.city, self.neighborhood, self.address, self.reviews,\n self.overall_satisfaction, self.accommodates, self.bedrooms,\n self.bathrooms, self.price, self.deleted, self.minstay,\n self.latitude, self.longitude, self.survey_id,\n )\n cur.execute(sql, insert_args)\n cur.close()\n conn.commit()\n logger.debug(\"Room \" + str(self.room_id) + \": inserted\")\n except psycopg2.IntegrityError:\n # logger.info(\"Room \" + str(self.room_id) + \": insert failed\")\n conn.rollback()\n cur.close()\n raise\n except:\n conn.rollback()\n raise", "def rooms_ajax():\n\n rooms = db.session.query(Room).all()\n return ajax.admin.rooms_data(rooms)", "def post(self):\n new_room_args = room_post_reqparser.parse_args(strict=True)\n\n name_record = db.session.query(RoomModel).filter_by(name=new_room_args['name']).first()\n if name_record:\n abort(409, error_code=409,\n error_msg='Cannot create a new room because a room with the given name already exists.'\n )\n room_admin = db.session.query(UserModel).filter_by(name=new_room_args['room_admin_name']).first()\n if not room_admin:\n abort(409, error_code=409,\n error_msg='Cannot create a new room because no user with the given name of the room admin exists.'\n )\n\n new_room = RoomModel(name=new_room_args['name'])\n room_admin.is_admin.append(new_room)\n db.session.add(new_room)\n db.session.commit()\n \n return new_room, 201", "def save(self):\n\n self.curr.execute(\n ''' INSERT INTO hotels(name, location, lodges, conference_rooms, img_url, category, date)\\\n VALUES('{}','{}','{}','{}','{}','{}','{}') RETURNING name, location, lodges, conference_rooms, img_url, category, date''' \\\n .format(self.name, self.location, self.lodges, self.conference_rooms, self.img_url, self.category, self.date))\n hotel = self.curr.fetchone()\n self.conn.commit()\n self.curr.close()\n return json.dumps(hotel, default=str)", "def get(self):\n results = db.session.query(RoomModel).all()\n\n if not results:\n abort(404, error_code=404, error_msg='No room exists in the database')\n\n rooms = {}\n for record in results:\n rooms[record._id] = {\n 'name': record.name, \n 'room_admin_name': record.user.name\n }\n\n return [rooms], 200", "def save_state(self, sqlite_database_name):\n if os.path.isfile('./'+sqlite_database_name+'.db'):\n return ('That file name already exists, '\\\n + 'please provide a different filename')\n else:\n engine = create_engine('sqlite:///'+sqlite_database_name+'.db')\n Base.metadata.bind = engine\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n Base.metadata.create_all(engine)\n \"\"\" Check whether living_space dictionary has items to be added to\n the database and add them to the Rooms table in the database\"\"\"\n if len(self.living_space) > 0:\n for key in self.living_space:\n new_room = Rooms(\n room_name = self.living_space[key].room_name,\n members = ' '.join(self.living_space[key].members),\n max_num = self.living_space[key].max_num,\n room_type ='LivingSpace')\n session.add(new_room)\n session.commit()\n \"\"\"Checks whether office dictionary has items to be added to the\n database and add them to the Rooms table in the database\"\"\"\n if len(self.office) > 0:\n for key in self.office:\n new_room = Rooms(\n room_name = self.office[key].room_name,\n members = ' '.join(self.office[key].members),\n max_num = self.office[key].max_num,\n room_type ='Office')\n session.add(new_room)\n session.commit()\n #Checks whether persons dictionay has people to be added to the database\n if len(self.persons) > 0:\n #This For loop is used to traverse all keys in the persons dictionary\n for key in self.persons:\n #This if statement is used to add Staff to People table in the database\n if self.persons[key].person_type.upper() == 'STAFF':\n new_person = People(\n person_id = self.persons[key].person_id,\n last_name = self.persons[key].last_name,\n first_name = self.persons[key].first_name,\n person_type = 'STAFF',\n proom_name = self.persons[key].proom_name)\n session.add(new_person)\n session.commit\n #This if statement is used to add Fellows to People table in the database\n if self.persons[key].person_type.upper() == 'FELLOW':\n new_person = People(\n person_id = self.persons[key].person_id,\n last_name = self.persons[key].last_name,\n first_name = self.persons[key].first_name,\n person_type = 'FELLOW',\n wants_accommodation = self.persons[key].wants_accommodation,\n proom_name = self.persons[key].proom_name,\n lroom_name = self.persons[key].lroom_name)\n session.add(new_person)\n session.commit()\n \"\"\"The if staements below are used to store list names and there\n values as a string in the Lists Database Table\"\"\"\n if len(self.fellows_with_living_room_list) > 0:\n new_list = Lists(\n list_name = 'fellows_with_living_room_list',\n list_string = ' '.join(self.fellows_with_living_room_list)\n )\n session.add(new_list)\n session.commit()\n if len(self.fellows_with_office_list) > 0:\n new_list = Lists(list_name = 'fellows_with_office_list',\n list_string = ' '.join(self.fellows_with_office_list))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_missed_living_space) > 0:\n new_list = Lists(list_name = 'fellows_who_missed_living_space',\n list_string = ' '.join(self.fellows_who_missed_living_space))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_dont_want_living_space) > 0:\n new_list = Lists(list_name = 'fellows_who_dont_want_living_space',\n list_string = ' '.join(self.fellows_who_dont_want_living_space))\n session.add(new_list)\n session.commit()\n if len(self.fellows_who_missed_office) > 0:\n new_list = Lists(list_name = 'fellows_who_missed_office',\n list_string = ' '.join(self.fellows_who_missed_office))\n session.add(new_list)\n session.commit()\n if len(self.staff_with_office_list) > 0:\n new_list = Lists(list_name = 'staff_with_office_list',\n list_string = ' '.join(self.staff_with_office_list))\n session.add(new_list)\n session.commit()\n if len(self.staff_who_missed_office_list) > 0:\n new_list = Lists(list_name = 'staff_who_missed_office_list',\n list_string = ' '.join(self.staff_who_missed_office_list))\n session.add(new_list)\n session.commit()\n if len(self.all_rooms_list) > 0:\n new_list = Lists(list_name = 'all_rooms_list',\n list_string = ' '.join(self.all_rooms_list))\n session.add(new_list)\n session.commit()\n session.close() #Close Database session\n return \"State saved succesfully\\n\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns values common to both move lines (except for debit, credit and amount_currency which are reversed)
def _get_shared_move_line_vals(self, debit, credit, amount_currency): if self.payment_difference_handling == 'open' and not self.payment_difference and not self._context.get( 'credit_aml', False): if self.payment_method_type == 'adjustment' \ and debit > 0.0 \ and not amount_currency \ and self.partner_type == 'customer': debit = 0.0 for inv_id in self.payment_inv_line_ids.filtered(lambda l: l.allocation > 0.0): debit += inv_id.allocation elif self.payment_method_type == 'adjustment' \ and credit > 0.0 \ and not amount_currency \ and self.partner_type == 'supplier': credit = 0.0 for inv_id in self.payment_inv_line_ids.filtered(lambda l: l.allocation > 0.0): credit += inv_id.allocation return { 'partner_id': self.payment_type in ('inbound', 'outbound') and self.env['res.partner']._find_accounting_partner(self.partner_id).id or False, # 'invoice_id': invoice_id and invoice_id.id or False, 'debit': debit, 'credit': credit, 'amount_currency': amount_currency or False, 'payment_id': self.id, 'journal_id': self.journal_id.id, }
[ "def group_move_lines(self, line):\n\t\t\n\t\tline2 = {}\n\t\tfor l in line:\n\t\t\ttmp = self.inv_line_characteristic_hashcode(l)\n\t\t\tif tmp in line2:\n\t\t\t\tam = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])\n\t\t\t\tline2[tmp]['debit'] = (am > 0) and am or 0.0\n\t\t\t\tline2[tmp]['credit'] = (am < 0) and -am or 0.0\n\t\t\t\tline2[tmp]['tax_amount'] += l['tax_amount']\n\t\t\t\t# line2[tmp]['analytic_lines'] += l['analytic_lines']\n\t\t\telse:\n\t\t\t\tline2[tmp] = l\n\t\t# print \"----------------->\",line2\n\t\tline = []\n\t\tfor key, val in line2.items():\n\t\t\tline.append((0,0,val))\n\t\treturn line", "def _prepare_move_line_vals(self, cr, uid, st_line, move_id, debit, credit, currency_id = False,\n amount_currency= False, account_id = False, analytic_id = False,\n partner_id = False, context=None):\n # PCARBALLO Chequeo si las cuentas coinciden o no para saber que signo se le asigna al monto.\n # MODIFICACION PARA CASOS DE DECIMALES- echaviano 30/10\n if debit == 0.0 and credit == 0.0 and abs(amount_currency) == 0.0:\n amount_currency = st_line.amount > 0 and -1 * st_line.amount or abs(st_line.amount)\n\n acc_id = account_id or st_line.account_id.id\n cur_id = currency_id or st_line.statement_id.currency.id\n par_id = partner_id or (((st_line.partner_id) and st_line.partner_id.id) or False)\n\n #PCARBALLO\n return {\n 'name': st_line.name,\n 'date': st_line.date,\n 'ref': st_line.ref,\n 'move_id': move_id,\n 'partner_id': par_id,\n 'account_id': acc_id,\n 'credit': credit,\n 'debit': debit,\n 'statement_id': st_line.statement_id.id,\n 'journal_id': st_line.statement_id.journal_id.id,\n 'period_id': st_line.statement_id.period_id.id,\n 'currency_id': amount_currency and cur_id,\n 'amount_currency': amount_currency,\n 'analytic_account_id': analytic_id,\n }", "def _amount_residual(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n if context is None:\n context = {}\n cur_obj = self.pool.get('res.currency')\n acc_move_recon_obj = self.pool.get('account.move.reconcile')\n for move_line in self.browse(cr, uid, ids, context=context):\n res[move_line.id] = {\n 'amount_original':0.0,\n 'amount_residual': 0.0,\n 'amount_residual_currency': 0.0,\n 'amount_res': 0.0,\n }\n\n if move_line.reconcile_id:\n continue\n if not move_line.account_id.type in ('payable', 'receivable'):\n #this function does not suport to be used on move lines not related to payable or receivable accounts\n continue\n\n if move_line.currency_id:\n move_line_total = move_line.amount_currency\n sign = move_line.amount_currency < 0 and -1 or 1\n else:\n move_line_total = move_line.debit - move_line.credit\n sign = (move_line.debit - move_line.credit) < 0 and -1 or 1\n amount_original = move_line_total\n line_total_in_company_currency = move_line.debit - move_line.credit\n context_unreconciled = context.copy()\n if move_line.reconcile_partial_id:\n acc_move_recon_id = acc_move_recon_obj.browse(cr, uid, move_line.reconcile_partial_id.id, context=None)\n\n for payment_line in acc_move_recon_id.line_partial_ids:\n if payment_line.id == move_line.id:\n continue\n if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:\n move_line_total += payment_line.amount_currency\n else:\n if move_line.currency_id:\n context_unreconciled.update({'date': payment_line.date})\n amount_in_foreign_currency = float_round(cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled),2)\n move_line_total += amount_in_foreign_currency\n else:\n move_line_total += (payment_line.debit - payment_line.credit)\n line_total_in_company_currency += (payment_line.debit - payment_line.credit)\n\n result = move_line_total\n# res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n res[move_line.id]['amount_original'] = sign * float_round((move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, amount_original) or amount_original),2)\n\n res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency\n ctx = {'date': move_line.cur_date or move_line.date}\n \n res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n if move_line.currency_id:\n move_line_res = abs((move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result))\n else:\n move_line_res = abs(line_total_in_company_currency)\n\n res[move_line.id]['amount_res'] = move_line_res\n return res", "def process_reconciliation(self, cr, uid, id, mv_line_dicts, context=None):\n if context is None:\n context = {}\n st_line = self.browse(cr, uid, id, context=context)\n company_currency = st_line.journal_id.company_id.currency_id\n statement_currency = st_line.journal_id.currency or company_currency\n bs_obj = self.pool.get('account.bank.statement')\n am_obj = self.pool.get('account.move')\n aml_obj = self.pool.get('account.move.line')\n currency_obj = self.pool.get('res.currency')\n\n # Checks\n if st_line.journal_entry_id.id:\n raise osv.except_osv(_('Error!'), _('The bank statement line was already reconciled.'))\n for mv_line_dict in mv_line_dicts:\n for field in ['debit', 'credit', 'amount_currency']:\n if field not in mv_line_dict:\n mv_line_dict[field] = 0.0\n if mv_line_dict.get('counterpart_move_line_id'):\n mv_line = aml_obj.browse(cr, uid, mv_line_dict.get('counterpart_move_line_id'), context=context)\n if mv_line.reconcile_id:\n raise osv.except_osv(_('Error!'), _('A selected move line was already reconciled.'))\n\n # Create the move\n move_name = (st_line.statement_id.name or st_line.name) + \"/\" + str(st_line.sequence)\n move_vals = bs_obj._prepare_move(cr, uid, st_line, move_name, context=context)\n move_id = am_obj.create(cr, uid, move_vals, context=context)\n\n # Create the move line for the statement line\n if st_line.statement_id.currency.id != company_currency.id:\n if st_line.currency_id == company_currency:\n amount = st_line.amount_currency\n else:\n ctx = context.copy()\n ctx['date'] = st_line.date\n amount = currency_obj.compute(cr, uid, st_line.statement_id.currency.id, company_currency.id, st_line.amount, context=ctx)\n else:\n amount = st_line.amount\n bank_st_move_vals = bs_obj._prepare_bank_move_line(cr, uid, st_line, move_id, amount, company_currency.id, context=context)\n aml_obj.create(cr, uid, bank_st_move_vals, context=context)\n # Complete the dicts\n st_line_currency = st_line.currency_id or statement_currency\n st_line_currency_rate = st_line.currency_id and (st_line.amount_currency / st_line.amount) or False\n to_create = []\n for mv_line_dict in mv_line_dicts:\n if mv_line_dict.get('is_tax_line'):\n continue\n mv_line_dict['ref'] = move_name\n mv_line_dict['move_id'] = move_id\n mv_line_dict['period_id'] = st_line.statement_id.period_id.id\n mv_line_dict['journal_id'] = st_line.journal_id.id\n mv_line_dict['company_id'] = st_line.company_id.id\n mv_line_dict['statement_id'] = st_line.statement_id.id\n if mv_line_dict.get('counterpart_move_line_id'):\n mv_line = aml_obj.browse(cr, uid, mv_line_dict['counterpart_move_line_id'], context=context)\n mv_line_dict['partner_id'] = mv_line.partner_id.id or st_line.partner_id.id\n mv_line_dict['account_id'] = mv_line.account_id.id\n if st_line_currency.id != company_currency.id:\n ctx = context.copy()\n ctx['date'] = st_line.date\n mv_line_dict['amount_currency'] = mv_line_dict['debit'] - mv_line_dict['credit']\n mv_line_dict['currency_id'] = st_line_currency.id\n if st_line.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:\n debit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['debit'] / st_line_currency_rate)\n credit_at_current_rate = self.pool.get('res.currency').round(cr, uid, company_currency, mv_line_dict['credit'] / st_line_currency_rate)\n elif st_line.currency_id and st_line_currency_rate:\n debit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['debit'] / st_line_currency_rate, context=ctx)\n credit_at_current_rate = currency_obj.compute(cr, uid, statement_currency.id, company_currency.id, mv_line_dict['credit'] / st_line_currency_rate, context=ctx)\n else:\n debit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)\n credit_at_current_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)\n if mv_line_dict.get('counterpart_move_line_id'):\n #post an account line that use the same currency rate than the counterpart (to balance the account) and post the difference in another line\n ctx['date'] = mv_line.date\n if mv_line.currency_id.id == mv_line_dict['currency_id'] \\\n and float_is_zero(abs(mv_line.amount_currency) - abs(mv_line_dict['amount_currency']), precision_rounding=mv_line.currency_id.rounding):\n debit_at_old_rate = mv_line.credit\n credit_at_old_rate = mv_line.debit\n else:\n debit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['debit'], context=ctx)\n credit_at_old_rate = currency_obj.compute(cr, uid, st_line_currency.id, company_currency.id, mv_line_dict['credit'], context=ctx)\n mv_line_dict['credit'] = credit_at_old_rate\n mv_line_dict['debit'] = debit_at_old_rate\n if debit_at_old_rate - debit_at_current_rate:\n currency_diff = debit_at_current_rate - debit_at_old_rate\n to_create.append(self.get_currency_rate_line(cr, uid, st_line, -currency_diff, move_id, context=context))\n if credit_at_old_rate - credit_at_current_rate:\n currency_diff = credit_at_current_rate - credit_at_old_rate\n to_create.append(self.get_currency_rate_line(cr, uid, st_line, currency_diff, move_id, context=context))\n if mv_line.currency_id and mv_line_dict['currency_id'] == mv_line.currency_id.id:\n amount_unreconciled = mv_line.amount_residual_currency\n else:\n amount_unreconciled = currency_obj.compute(cr, uid, company_currency.id, mv_line_dict['currency_id'] , mv_line.amount_residual, context=ctx)\n if float_is_zero(mv_line_dict['amount_currency'] + amount_unreconciled, precision_rounding=mv_line.currency_id.rounding):\n amount = mv_line_dict['debit'] or mv_line_dict['credit']\n sign = -1 if mv_line_dict['debit'] else 1\n currency_rate_difference = sign * (mv_line.amount_residual - amount)\n if not company_currency.is_zero(currency_rate_difference):\n exchange_lines = self._get_exchange_lines(cr, uid, st_line, mv_line, currency_rate_difference, mv_line_dict['currency_id'], move_id, context=context)\n for exchange_line in exchange_lines:\n to_create.append(exchange_line)\n\n else:\n mv_line_dict['debit'] = debit_at_current_rate\n mv_line_dict['credit'] = credit_at_current_rate\n elif statement_currency.id != company_currency.id:\n #statement is in foreign currency but the transaction is in company currency\n prorata_factor = (mv_line_dict['debit'] - mv_line_dict['credit']) / st_line.amount_currency\n mv_line_dict['amount_currency'] = prorata_factor * st_line.amount\n to_create.append(mv_line_dict)\n # If the reconciliation is performed in another currency than the company currency, the amounts are converted to get the right debit/credit.\n # If there is more than 1 debit and 1 credit, this can induce a rounding error, which we put in the foreign exchane gain/loss account.\n if st_line_currency.id != company_currency.id:\n diff_amount = bank_st_move_vals['debit'] - bank_st_move_vals['credit'] \\\n + sum(aml['debit'] for aml in to_create) - sum(aml['credit'] for aml in to_create)\n if not company_currency.is_zero(diff_amount):\n diff_aml = self.get_currency_rate_line(cr, uid, st_line, diff_amount, move_id, context=context)\n diff_aml['name'] = _('Rounding error from currency conversion')\n to_create.append(diff_aml)\n # Create move lines\n move_line_pairs_to_reconcile = []\n for mv_line_dict in to_create:\n counterpart_move_line_id = None # NB : this attribute is irrelevant for aml_obj.create() and needs to be removed from the dict\n if mv_line_dict.get('counterpart_move_line_id'):\n counterpart_move_line_id = mv_line_dict['counterpart_move_line_id']\n del mv_line_dict['counterpart_move_line_id']\n new_aml_id = aml_obj.create(cr, uid, mv_line_dict, context=context)\n if counterpart_move_line_id != None:\n move_line_pairs_to_reconcile.append([new_aml_id, counterpart_move_line_id])\n # Reconcile\n for pair in move_line_pairs_to_reconcile:\n aml_obj.reconcile_partial(cr, uid, pair, context=context)\n # Mark the statement line as reconciled\n self.write(cr, uid, id, {'journal_entry_id': move_id}, context=context)\n if st_line.statement_id.to_partner:\n self.pool.get('account.move').write(cr, uid, move_id, {'partner_id': st_line.statement_id.partner_id.id}, context)", "def _prepare_move_line_vals(self, cr, uid, st_line, move_id, debit, credit, currency_id=False,\n amount_currency=False, account_id=False, partner_id=False, context=None):\n acc_id = account_id or st_line.account_id.id\n cur_id = currency_id or st_line.statement_id.currency.id\n par_id = partner_id or (((st_line.partner_id) and st_line.partner_id.id) or False)\n vals = {\n 'name': st_line.name,\n 'date': st_line.date,\n 'ref': st_line.ref,\n 'move_id': move_id,\n 'partner_id': par_id,\n 'account_id': acc_id,\n 'credit': credit,\n 'debit': debit,\n 'statement_id': st_line.statement_id.id,\n 'journal_id': st_line.statement_id.journal_id.id,\n 'period_id': st_line.statement_id.period_id.id,\n 'currency_id': amount_currency and cur_id,\n 'amount_currency': amount_currency,\n }\n #~ if st_line.analytic_id and st_line.type in 'income':\n #~ vals.update({'analytic_account_id': st_line.analytic_id.id})\n \n return vals", "def prepare_move_lines_for_bank_reconciliation_widget(self, target_currency=False):\n if not self.lines:\n return []\n if self.env.context is None:\n context = {}\n ctx = context.copy()\n currency_obj = self.pool.get('res.currency')\n company_currency = self.pool.get('res.users').browse(self.env.uid).company_id.currency_id\n #rml_parser = report_sxw.rml_parse(self.env.cr, self.env.uid, 'reconciliation_widget_aml', context=self.env.context)\n ret = []\n\n for line in self.lines:\n partial_reconciliation_siblings_ids = []\n #if line.reconcile_partial_id:\n # partial_reconciliation_siblings_ids = self.search(cr, uid, [\n # ('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)\n # partial_reconciliation_siblings_ids.remove(line.id)\n\n ret_line = {\n 'id': line.id,\n 'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,\n 'ref': line.move_id.ref or '',\n 'account_code': line.account_id.code,\n 'account_name': line.account_id.name,\n 'account_type': line.account_id.type,\n 'date_maturity': line.date_maturity,\n 'date': line.date,\n 'period_name': line.period_id.name,\n 'journal_name': line.journal_id.name,\n 'partner_id': line.partner_id.id,\n 'partner_name': line.partner_id.name,\n 'is_partially_reconciled': bool(line.reconcile_partial_id),\n 'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,\n }\n\n # Amount residual can be negative\n debit = line.debit\n credit = line.credit\n amount = line.debit - line.credit\n amount_currency = line.amount_currency\n if amount < 0:\n debit, credit = credit, debit\n amount = -amount\n amount_currency = -amount_currency\n\n # Get right debit / credit:\n target_currency = target_currency or company_currency\n line_currency = line.currency_id or company_currency\n amount_currency_str = \"\"\n total_amount_currency_str = \"\"\n if line_currency != company_currency:\n total_amount = line.amount_currency\n actual_debit = debit > 0 and amount_currency or 0.0\n actual_credit = credit > 0 and amount_currency or 0.0\n else:\n total_amount = abs(debit - credit)\n actual_debit = debit > 0 and amount or 0.0\n actual_credit = credit > 0 and amount or 0.0\n if line_currency != target_currency:\n amount_currency_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=line_currency)\n total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)\n ret_line['credit_currency'] = actual_credit\n ret_line['debit_currency'] = actual_debit\n if target_currency == company_currency:\n actual_debit = debit > 0 and amount or 0.0\n actual_credit = credit > 0 and amount or 0.0\n total_amount = abs(debit - credit)\n else:\n ctx = context.copy()\n ctx.update({'date': line.date})\n total_amount = currency_obj.compute(self.env.cr, self.env.uid, line_currency.id, target_currency.id, total_amount,\n context=ctx)\n actual_debit = currency_obj.compute(self.env.cr, self.env.uid, line_currency.id, target_currency.id, actual_debit,\n context=ctx)\n actual_credit = currency_obj.compute(self.env.cr, self.env.uid, line_currency.id, target_currency.id, actual_credit,\n context=ctx)\n amount_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=target_currency)\n total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)\n\n ret_line['debit'] = actual_debit\n ret_line['credit'] = actual_credit\n ret_line['amount_str'] = amount_str\n ret_line['total_amount_str'] = total_amount_str\n ret_line['amount_currency_str'] = amount_currency_str\n ret_line['total_amount_currency_str'] = total_amount_currency_str\n ret.append(ret_line)\n return ret", "def find_contracts(self):\n # Select a put to short that is OTM\n short_put = self.chain[\n (self.chain[\"right\"] == \"P\")\n & (self.chain[\"strike\"] < self.underlying_price)\n ][\"skew_premium\"].idxmax()\n short_put = self.chain.iloc[short_put]\n # Buy put option so our margin required is $1000\n long_put = self.chain[\n (self.chain[\"strike\"] == (short_put[\"strike\"] - 10))\n & (self.chain[\"right\"] == \"P\")\n ].squeeze()\n\n # Find the corresponding call option to make the position delta neutral\n put_contract_delta = short_put[\"delta\"]\n short_call = np.abs(\n self.chain[self.chain[\"right\"] == \"C\"][\"delta\"] + put_contract_delta\n ).idxmin()\n short_call = self.chain[self.chain[\"right\"] == \"C\"].iloc[short_call]\n # Find respective call hedge option\n long_call = self.chain[\n (self.chain[\"strike\"] == (short_call[\"strike\"] + 10))\n & (self.chain[\"right\"] == \"C\")\n ].squeeze()\n\n return short_put, short_call, long_put, long_call", "def check_move_data(self, cr, uid, ids, context=None): \n move_line_pool = self.pool.get('account.move.line')\n move = self.pool.get('account.move').browse(cr, uid, context.get('active_id',[]), context=context)\n if move.state != 'posted':\n raise osv.except_osv(_('Warning'), _('Payment is not posted. Please Validate Payment First!'))\n if not move.journal_id.allow_check_writing:\n raise osv.except_osv(_('Warning'), _(\"Current journal doesn't allow check writing\"))\n \n account_ids = self.pool.get('account.account').search(cr, uid, [('type','=','liquidity')], context=context)\n move_line = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[]))], context=context)\n credit_lines = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[])),('credit','>',0),('account_id','not in',account_ids)], context=context)\n if credit_lines:\n raise osv.except_osv(_('Warning'), _('Can not pay with check without cash account!!'))\n \n debit_lines = move_line_pool.search(cr, uid, [('move_id','=',context.get('active_id',[])),('debit','>',0),('partner_id','=',False)], context=context)\n if debit_lines:\n raise osv.except_osv(_('Warning'), _('Can not create new check without partner!!'))\n partners = move_line_pool.read(cr, uid, move_line, ['partner_id'], context=context)#[0]['partner_id']\n x = [part['partner_id'] for part in partners]\n if len(set([part['partner_id'] for part in partners])) > 1:\n raise osv.except_osv(_('Warning'), _('Can not create new check for multiple partner!!'))\n return self.new_check(cr, uid, ids, context=context)", "def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):\n\t\tres = super(account_invoice,self).finalize_invoice_move_lines(cr, uid, invoice_browse, move_lines)\n\t\tmoves = False\n\t\tif invoice_browse.separate_tax:\n\t\t\taccount_pool = self.pool.get('account.account')\n\t\t\tcur_obj = self.pool.get('res.currency')\n\t\t\taccount_ids = [x[2]['account_id'] for x in res]\n\t\t\trec_payable_id = account_pool.search(cr,uid,[('id','in',account_ids),('type','in',('payable','receivable'))])\n\t\t\n\t\t\tif not rec_payable_id and invoice_browse.type =='out_invoice':\n\t\t\t\traise osv.except_osv(_('No Receivable Account Defined!'), _('There is no Receivable Account Defined on this transaction, please check your account configuration.'))\n\t\t\telif not rec_payable_id and invoice_browse.type =='in_invoice':\n\t\t\t\traise osv.except_osv(_('No Payable Account Defined!'), _('There is no Payable Account Defined on this transaction, please check your account configuration.'))\n\t\t\tmoves =[]\n\t\t\tmoves_ar_ap = False\n\t\t\ttotal_tax_amt_currency=0.0\n\t\t\ttotal_trans_amt_currency = 0.0\n\t\t\ttotal_trans_amt_currency2 = 0.0\n\t\t\ttotal_tax = 0.0\n\t\t\tall_taxes = self.pool.get('account.tax').search(cr,uid,[])\n\t\t\tcodes = [t.tax_code_id and t.tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tcodes = list(set(codes))\n\t\t\tbase_codes = [t.tax_code_id and t.base_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_base_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tbase_codes = list(set(base_codes))\n\n\t\t\tfound_tax = False\n\t\t\ttemp = []\n\t\t\ti=0\n\t\t\tfor line in res:\n\t\t\t\ti+=1\n\t\t\t\tsign = invoice_browse.type =='out_invoice' and -1 or 1\n\t\t\t\tposition = line[2]['credit'] !=0.0 and -1 or 1\n\n\t\t\t\ttm = line[2]['debit']!=0.0 and line[2]['debit'] or line[2]['credit']\n\t\t\t\tif line[2]['tax_amount'] and ( line[2]['tax_code_id'] in codes):\n\t\t\t\t\ttotal_tax += position * sign * tm\n\t\t\t\t\ttotal_tax_amt_currency -= sign * position * line[2]['amount_currency']\n\t\t\t\t\tfound_tax = True\n\t\t\t\t\t\n\t\t\t\tif line[2]['account_id'] not in rec_payable_id:\n\t\t\t\t\tif line[2]['debit']!=False or line[2]['credit']!=False:\n\t\t\t\t\t\tmoves.append(line)\n\t\t\t\t\t\ttotal_trans_amt_currency2 += sign*(line[2]['amount_currency'] or 0.0)\t\n\t\t\t\t\tif line[2]['tax_amount'] and line[2]['tax_code_id'] in base_codes:\n\t\t\t\t\t\ttemp.append(line)\n\t\t\t\telse:\n\t\t\t\t\tmoves_ar_ap = line\n\t\t\t\t\ttotal_trans_amt_currency += line[2]['amount_currency']\n\t\t\tfound_not_zero = False\n\t\t\tfor x in temp:\n\t\t\t\tif x[2]['debit']!=False or x[2]['credit']!=False:\n\t\t\t\t\tfound_not_zero = True\n\t\t\t\t\n\t\t\t# print \"moves_ar_ap-----------\",moves_ar_ap\n\t\t\t# if moves_ar_ap and invoice_browse.use_kmk_ar_ap:\n\t\t\t# \tt_moves_arp_ap=moves_ar_ap[2].copy()\n\t\t\t# \tamt = t_moves_arp_ap['debit'] not in (0.0,False) and t_moves_arp_ap['debit'] or (-1 * t_moves_arp_ap['credit'])\n\t\t\t# \tcur_obj =self.pool.get('res.currency')\n\t\t\t# \tcontext_rate = {}\n\t\t\t# \tcontext_rate.update({'date':invoice_browse.date_invoice or time.strftime('%Y-%m-%d'),'reverse':False,'trans_currency':invoice_browse.currency_id and invoice_browse.currency_id.id or False})\n\t\t\t# \tamount_currency = cur_obj.computerate(cr, uid, invoice_browse.currency_id.id,invoice_browse.company_id.tax_base_currency.id , amt, context=context_rate)\n\n\t\t\t# \tt_moves_arp_ap.update({'amount_currency':amount_currency,'currency_id':invoice_browse.company_id and invoice_browse.company_id.tax_base_currency.id})\n\t\t\t# \tmoves_ar_ap = (0,0,t_moves_arp_ap)\n\t\t\t\n\t\t\tprint \"moves_ar_ap-----------\",total_tax,moves_ar_ap[2]['debit'],moves_ar_ap[2]['credit']\n\t\t\tif moves_ar_ap and total_tax > 0.0 and found_tax and found_not_zero:\n\t\t\t\ttemp = moves_ar_ap[2].copy()\n\t\t\t\ttemp2 = moves_ar_ap[2].copy()\n\t\t\t\tdebit = moves_ar_ap[2]['debit']>0.0 and moves_ar_ap[2]['debit'] - total_tax or moves_ar_ap[2]['debit']\n\t\t\t\tcredit = moves_ar_ap[2]['credit']>0.0 and moves_ar_ap[2]['credit'] - total_tax or moves_ar_ap[2]['credit']\n\t\t\t\tdebit2 = moves_ar_ap[2]['debit']>0.0 and total_tax or 0.0\n\t\t\t\tcredit2 = moves_ar_ap[2]['credit']>0.0 and total_tax or 0.0\n\n\t\t\t\t# if invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id or invoice_browse.currency_tax_id.id !=invoice_browse.company_id.currency_id.id or invoice_browse.use_kmk_ar_ap:\n\t\t\t\t# \ttemp.update({\n\t\t\t\t# \t\t'amount_currency':(invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id or invoice_browse.use_kmk_ar_ap) and (total_trans_amt_currency-total_tax_amt_currency) or False,\n\t\t\t\t# \t\t'currency_id':(invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id and not invoice_browse.use_kmk_ar_ap and invoice_browse.currency_id.id) or (invoice_browse.use_kmk_ar_ap and invoice_browse.currency_tax_id and invoice_browse.currency_tax_id.id) or False,\n\t\t\t\t# \t\t})\n\n\t\t\t\t# \ttemp2.update({\n\t\t\t\t# \t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t# \t\t'ar_ap_tax':True,\n\t\t\t\t# \t\t'currency_id':invoice_browse.currency_tax_id and invoice_browse.currency_tax_id.id or invoice_browse.currency_id.id,})\n\t\t\t\t\n\t\t\t\tis_kmk_tax = invoice_browse.currency_tax_id.id == invoice_browse.company_id.tax_base_currency.id\n\t\t\t\tif is_kmk_tax:\n\t\t\t\t\tif invoice_browse.currency_id.id == invoice_browse.company_id.currency_id.id and invoice_browse.use_kmk_ar_ap:\n\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency2-total_tax_amt_currency),\n\t\t\t\t\t\t\t'currency_id':invoice_browse.currency_tax_id.id,\n\t\t\t\t\t\t\t})\n\t\t\t\t\telif invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id:\n\t\t\t\t\t\tif invoice_browse.use_kmk_ar_ap:\n\t\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency-total_tax_amt_currency),\n\t\t\t\t\t\t\t\t'currency_id': invoice_browse.currency_tax_id.id,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t\t\t'amount_currency':(total_trans_amt_currency-total_tax_amt_currency),\n\t\t\t\t\t\t\t\t'currency_id': invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,\n\t\t\t\t\t\t\t\t})\n\n\t\t\t\t\ttemp2.update({\n\t\t\t\t\t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t\t\t'ar_ap_tax':True,\n\t\t\t\t\t\t'currency_id': invoice_browse.currency_tax_id.id,})\n\t\t\t\telse:\n\t\t\t\t\ttemp.update({\n\t\t\t\t\t\t'amount_currency':invoice_browse.currency_id.id != invoice_browse.company_id.currency_id.id and (total_trans_amt_currency-total_tax_amt_currency) or 0.0,\n\t\t\t\t\t\t'currency_id':invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,\n\t\t\t\t\t\t})\n\t\t\t\t\ttemp2.update({\n\t\t\t\t\t\t'amount_currency':total_tax_amt_currency,\n\t\t\t\t\t\t'ar_ap_tax':True,\n\t\t\t\t\t\t'currency_id':invoice_browse.currency_id.id!=invoice_browse.company_id.currency_id.id and invoice_browse.currency_id.id or False,})\n\n\n\n\t\t\t\ttemp.update({'debit':abs(debit),'credit':abs(credit),})\n\t\t\t\ttemp2.update({'debit':abs(debit2),'credit':abs(credit2)})\n\n\t\t\t\tmoves.append((0,0,temp))\n\t\t\t\tmoves.append((0,0,temp2))\n\t\t\telif moves_ar_ap and not found_tax:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\telif moves_ar_ap and found_tax and not found_not_zero:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\telse:\n\t\t\t\tmoves.append(moves_ar_ap)\n\t\t\treturn moves\n\t\telse:\n\t\t\treturn res", "def _lines_different_policy(self, cr, uid, policy_id, lines, context=None):\n different_lines = set()\n if not lines:\n return different_lines\n assert not (isinstance(policy_id, list) and len(policy_id) > 1), \\\n \"policy_id: only one id expected\"\n if isinstance(policy_id, list):\n policy_id = policy_id[0]\n cr.execute(\"SELECT move_line_id FROM credit_control_line\"\n \" WHERE policy_id != %s and move_line_id in %s\",\n (policy_id, tuple(lines)))\n res = cr.fetchall()\n if res:\n different_lines.update([x[0] for x in res])\n return different_lines", "def reconcile(self):\n diff = {}\n for security, amount in self.balance.items():\n if not amount == Decimal(0):\n diff[security] = -amount\n return diff", "def _create_move_lines(self, move, amount_currency, debit=0.0, credit=0.0):\n\n account_id = self.journal_id.default_debit_account_id.id if debit else\\\n self.fee_ids.mapped('credit_card_id').account_id.id\n company_currency = self.env.user.company_id.currency_id\n\n move_line_vals = {\n 'move_id': move.id,\n 'debit': debit,\n 'credit': credit,\n 'amount_currency': amount_currency,\n 'name': move.ref,\n 'account_id': account_id,\n 'journal_id': self.journal_id.id,\n 'currency_id': self.currency_id != company_currency and self.currency_id.id or False,\n 'ref': move.ref\n }\n return self.env['account.move.line'].with_context(check_move_validity=False).create(move_line_vals)", "def get_balance(self, cr, uid, ids, context=None):\n total = 0.0\n if not ids:\n return total\n for line in self.read(\n cr, uid, ids, ['debit', 'credit'], context=context):\n total += (line['debit'] or 0.0) - (line['credit'] or 0.0)\n return total", "def army_vantage_difference(self, move):\n return self.army_vantage(move.from_territory_id) - self.army_vantage(move.to_territory_id)", "def _decode_curve_trades(self, context: DecoderContext) -> DecodingOutput:\n\n # These are nullable because in case a curve pool is not stored in our cache or if it\n # is a swap in a metapool (TOKEN_EXCHANGE_UNDERLYING) we will skip token check.\n sold_token_address: Optional[ChecksumEvmAddress] = None\n bought_token_address: Optional[ChecksumEvmAddress] = None\n\n swapping_contract: ChecksumEvmAddress\n if context.tx_log.topics[0] in (TOKEN_EXCHANGE, TOKEN_EXCHANGE_UNDERLYING):\n pool_address = context.tx_log.address\n swapping_contract = pool_address\n # When a single pool is used, spender and receiver is always the same\n spender_address = receiver_address = hex_or_bytes_to_address(context.tx_log.topics[1])\n sold_token_id = hex_or_bytes_to_int(context.tx_log.data[:32])\n raw_sold_amount = hex_or_bytes_to_int(context.tx_log.data[32:64])\n bought_token_id = hex_or_bytes_to_int(context.tx_log.data[64:96])\n raw_bought_amount = hex_or_bytes_to_int(context.tx_log.data[96:128])\n if (\n context.tx_log.topics[0] == TOKEN_EXCHANGE and\n pool_address in self.curve_pools and\n len(self.curve_pools[pool_address]) > max(sold_token_id, bought_token_id) # Make sure that tokens of the pool are cached # noqa: E501\n ):\n sold_token_address = self.curve_pools[pool_address][sold_token_id]\n bought_token_address = self.curve_pools[pool_address][bought_token_id]\n else: # EXCHANGE_MULTIPLE\n swapping_contract = CURVE_SWAP_ROUTER\n spender_address = hex_or_bytes_to_address(context.tx_log.topics[1])\n receiver_address = hex_or_bytes_to_address(context.tx_log.topics[2])\n raw_sold_amount = hex_or_bytes_to_int(context.tx_log.data[-64:-32])\n raw_bought_amount = hex_or_bytes_to_int(context.tx_log.data[-32:])\n # Curve swap router logs route (a list of addresses) that was used. Route consists of\n # 9 elements. Consider X a number of pools that was used. Then the structure can be\n # described in the following way:\n # At 0 index: Address of the sold token (token that goes in the router)\n # From 1 to X indices: Addresses of pools that were used\n # At X + 1 index: Address of the bought token (token that comes from the router)\n # From X + 2 to 8 indices: Unused elements (zero addresses)\n # Here we read only addresses of token in and token out.\n sold_token_address = hex_or_bytes_to_address(context.tx_log.data[:32])\n for i in range(1, 9): # Starting from 1 because at 0 is `sold_token_address`\n address = hex_or_bytes_to_address(context.tx_log.data[32 * i:32 * (i + 1)])\n if address == ZERO_ADDRESS:\n break\n bought_token_address = address\n\n sold_asset = _read_curve_asset(sold_token_address, self.evm_inquirer.chain_id)\n bought_asset = _read_curve_asset(bought_token_address, self.evm_inquirer.chain_id)\n spend_event: Optional[EvmEvent] = None\n receive_event: Optional[EvmEvent] = None\n for event in context.decoded_events:\n if event.address != swapping_contract:\n continue\n\n crypto_asset = event.asset.resolve_to_crypto_asset()\n if (\n event.location_label == spender_address and\n event.event_type == HistoryEventType.SPEND and\n event.balance.amount == asset_normalized_value(amount=raw_sold_amount, asset=crypto_asset) and # noqa: E501\n (sold_asset is None or event.asset == sold_asset)\n ):\n event.event_type = HistoryEventType.TRADE\n event.event_subtype = HistoryEventSubType.SPEND\n event.notes = f'Swap {event.balance.amount} {crypto_asset.symbol} in curve'\n event.counterparty = CPT_CURVE\n spend_event = event\n elif (\n event.location_label == receiver_address and\n event.event_type == HistoryEventType.RECEIVE and\n event.balance.amount == asset_normalized_value(amount=raw_bought_amount, asset=crypto_asset) and # noqa: E501\n (bought_asset is None or event.asset == bought_asset)\n ):\n event.event_type = HistoryEventType.TRADE\n event.event_subtype = HistoryEventSubType.RECEIVE\n event.notes = f'Receive {event.balance.amount} {crypto_asset.symbol} as the result of a swap in curve' # noqa: E501\n event.counterparty = CPT_CURVE\n receive_event = event\n\n if spend_event is not None and receive_event is not None:\n # Just to make sure that spend and receive events are consecutive\n maybe_reshuffle_events(ordered_events=[spend_event, receive_event], events_list=context.decoded_events) # noqa: E501\n else:\n log.debug(\n f'Did not find spend and receive events for a curve swap. '\n f'{context.transaction.tx_hash.hex()}. Probably some aggregator was used and '\n f'decoding needs to happen in the aggregator-specific decoder.',\n )\n\n return DEFAULT_DECODING_OUTPUT", "def test_avoid_amount_matching_bypass(self):\n self.env['ir.config_parameter'].set_param('account.disable_rec_models_bypass', '1')\n self.rule_1.match_total_amount_param = 90\n second_inv_matching_rule = self.env['account.reconcile.model'].create({\n 'name': 'Invoices Matching Rule',\n 'sequence': 2,\n 'rule_type': 'invoice_matching',\n 'auto_reconcile': False,\n 'match_nature': 'both',\n 'match_same_currency': False,\n 'match_total_amount': False,\n 'match_partner': True,\n 'company_id': self.company.id,\n })\n\n self.bank_line_1.write({\n 'payment_ref': self.invoice_line_1.move_id.payment_reference,\n 'amount': 99,\n })\n self.bank_line_2.write({\n 'payment_ref': self.invoice_line_2.move_id.payment_reference,\n 'amount': 1,\n })\n\n self._check_statement_matching(self.rule_1 + second_inv_matching_rule, {\n self.bank_line_1.id: {'aml_ids': [self.invoice_line_1.id], 'model': self.rule_1, 'status': 'write_off', 'partner': self.bank_line_1.partner_id},\n self.bank_line_2.id: {'aml_ids': [self.invoice_line_2.id], 'model': second_inv_matching_rule, 'partner': self.bank_line_2.partner_id}\n }, statements=self.bank_st)", "def total_value():\n cash_value = bank_of_rick.current_value\n investment_value = 0\n for ticker, investment in investments.items():\n if investment:\n investment_value += investment.current_value\n return cash_value, investment_value", "def _create_account_move_line(self, cr, uid, ids, session=None, move_id=None, context=None):\n # Tricky, via the workflow, we only have one id in the ids variable\n account_move_obj = self.pool.get('account.move')\n account_move_line_obj = self.pool.get('account.move.line')\n account_period_obj = self.pool.get('account.period')\n account_tax_obj = self.pool.get('account.tax')\n user_proxy = self.pool.get('res.users')\n property_obj = self.pool.get('ir.property')\n cur_obj = self.pool.get('res.currency')\n\n ctx = dict(context or {}, account_period_prefer_normal=True)\n period = account_period_obj.find(cr, uid, context=ctx)[0]\n\n #session_ids = set(order.session_id for order in self.browse(cr, uid, ids, context=context))\n\n if session and not all(session.id == order.session_id.id for order in self.browse(cr, uid, ids, context=context)):\n raise osv.except_osv(_('Error!'), _('Selected orders do not have the same session!'))\n\n current_company = user_proxy.browse(cr, uid, uid, context=context).company_id\n\n grouped_data = {}\n have_to_group_by = session and session.config_id.group_by or False\n\n def compute_tax(amount, tax, line):\n if amount > 0:\n tax_code_id = tax['base_code_id']\n tax_amount = line.price_subtotal * tax['base_sign']\n else:\n tax_code_id = tax['ref_base_code_id']\n tax_amount = line.price_subtotal * tax['ref_base_sign']\n\n return (tax_code_id, tax_amount,)\n\n for order in self.browse(cr, uid, ids, context=context):\n if order.account_move:\n continue\n if order.state != 'paid':\n continue\n\n user_company = user_proxy.browse(cr, order.user_id.id, order.user_id.id).company_id\n\n group_tax = {}\n account_def = property_obj.get(cr, uid, 'property_account_receivable', 'res.partner', context=context)\n\n order_account = order.partner_id and \\\n order.partner_id.property_account_receivable and \\\n order.partner_id.property_account_receivable.id or \\\n account_def and account_def.id or current_company.account_receivable.id\n\n if move_id is None:\n # Create an entry for the sale\n move_id = account_move_obj.create(cr, uid, {\n 'ref' : order.name,\n 'journal_id': order.sale_journal.id,\n }, context=context)\n\n def insert_data(data_type, values):\n # if have_to_group_by:\n\n sale_journal_id = order.sale_journal.id\n\n # 'quantity': line.qty,\n # 'product_id': line.product_id.id,\n values.update({\n 'date': order.date_order[:10],\n 'ref': order.name,\n 'journal_id' : sale_journal_id,\n 'period_id' : period,\n 'move_id' : move_id,\n 'company_id': user_company and user_company.id or False,\n })\n\n if data_type == 'product':\n key = ('product', values['partner_id'], values['product_id'], values['debit'] > 0)\n elif data_type == 'tax':\n key = ('tax', values['partner_id'], values['tax_code_id'], values['debit'] > 0)\n elif data_type == 'counter_part':\n key = ('counter_part', values['partner_id'], values['account_id'], values['debit'] > 0)\n else:\n return\n\n grouped_data.setdefault(key, [])\n\n # if not have_to_group_by or (not grouped_data[key]):\n # grouped_data[key].append(values)\n # else:\n # pass\n\n if have_to_group_by:\n if not grouped_data[key]:\n grouped_data[key].append(values)\n else:\n current_value = grouped_data[key][0]\n current_value['quantity'] = current_value.get('quantity', 0.0) + values.get('quantity', 0.0)\n current_value['credit'] = current_value.get('credit', 0.0) + values.get('credit', 0.0)\n current_value['debit'] = current_value.get('debit', 0.0) + values.get('debit', 0.0)\n current_value['tax_amount'] = current_value.get('tax_amount', 0.0) + values.get('tax_amount', 0.0)\n else:\n grouped_data[key].append(values)\n\n #because of the weird way the pos order is written, we need to make sure there is at least one line, \n #because just after the 'for' loop there are references to 'line' and 'income_account' variables (that \n #are set inside the for loop)\n #TOFIX: a deep refactoring of this method (and class!) is needed in order to get rid of this stupid hack\n assert order.lines, _('The POS order must have lines when calling this method')\n # Create an move for each order line\n\n cur = order.pricelist_id.currency_id\n for line in order.lines:\n tax_amount = 0\n taxes = [t for t in line.product_id.taxes_id]\n computed_taxes = account_tax_obj.compute_all(cr, uid, taxes, line.price_unit * (100.0-line.discount) / 100.0, line.qty)['taxes']\n\n for tax in computed_taxes:\n tax_amount += cur_obj.round(cr, uid, cur, tax['amount'])\n group_key = (tax['tax_code_id'], tax['base_code_id'], tax['account_collected_id'], tax['id'])\n\n group_tax.setdefault(group_key, 0)\n group_tax[group_key] += cur_obj.round(cr, uid, cur, tax['amount'])\n\n amount = line.price_subtotal\n\n # Search for the income account\n if line.product_id.property_account_income.id:\n income_account = line.product_id.property_account_income.id\n elif line.product_id.categ_id.property_account_income_categ.id:\n income_account = line.product_id.categ_id.property_account_income_categ.id\n else:\n raise osv.except_osv(_('Error!'), _('Please define income '\\\n 'account for this product: \"%s\" (id:%d).') \\\n % (line.product_id.name, line.product_id.id, ))\n\n # Empty the tax list as long as there is no tax code:\n tax_code_id = False\n tax_amount = 0\n while computed_taxes:\n tax = computed_taxes.pop(0)\n tax_code_id, tax_amount = compute_tax(amount, tax, line)\n\n # If there is one we stop\n if tax_code_id:\n break\n\n # Create a move for the line\n insert_data('product', {\n 'name': line.product_id.name,\n 'quantity': line.qty,\n 'product_id': line.product_id.id,\n 'account_id': income_account,\n 'credit': ((amount>0) and amount) or 0.0,\n 'debit': ((amount<0) and -amount) or 0.0,\n 'tax_code_id': tax_code_id,\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # For each remaining tax with a code, whe create a move line\n for tax in computed_taxes:\n tax_code_id, tax_amount = compute_tax(amount, tax, line)\n if not tax_code_id:\n continue\n\n insert_data('tax', {\n 'name': _('Tax'),\n 'product_id':line.product_id.id,\n 'quantity': line.qty,\n 'account_id': income_account,\n 'credit': 0.0,\n 'debit': 0.0,\n 'tax_code_id': tax_code_id,\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # Create a move for each tax group\n (tax_code_pos, base_code_pos, account_pos, tax_id)= (0, 1, 2, 3)\n\n for key, tax_amount in group_tax.items():\n tax = self.pool.get('account.tax').browse(cr, uid, key[tax_id], context=context)\n insert_data('tax', {\n 'name': _('Tax') + ' ' + tax.name,\n 'quantity': line.qty,\n 'product_id': line.product_id.id,\n 'account_id': key[account_pos] or income_account,\n 'credit': ((tax_amount>0) and tax_amount) or 0.0,\n 'debit': ((tax_amount<0) and -tax_amount) or 0.0,\n 'tax_code_id': key[tax_code_pos],\n 'tax_amount': tax_amount,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n # counterpart\n insert_data('counter_part', {\n 'name': _(\"Trade Receivables\"), #order.name,\n 'account_id': order_account,\n 'credit': ((order.amount_total < 0) and -order.amount_total) or 0.0,\n 'debit': ((order.amount_total > 0) and order.amount_total) or 0.0,\n 'partner_id': order.partner_id and self.pool.get(\"res.partner\")._find_accounting_partner(order.partner_id).id or False\n })\n\n order.write({'state':'done', 'account_move': move_id})\n\n all_lines = []\n for group_key, group_data in grouped_data.iteritems():\n for value in group_data:\n all_lines.append((0, 0, value),)\n if move_id: #In case no order was changed\n self.pool.get(\"account.move\").write(cr, uid, [move_id], {'line_id':all_lines}, context=context)\n\n return True", "def combine_positions(self):\n self.cash = 0\n self.invested_capital = 0\n self.realized_gain = 0\n self.openvalue = 0\n self.daygain = 0\n self.sector = ''\n\n for t in self.open_positions:\n if not t.expiration:\n if t.shares > 0:\n positiontype = 'longs'\n elif t.shares < 0:\n positiontype = 'shorts'\n else:\n positiontype = 'options'\n t.symbol = '{s}{e}{t}{k}'.format(\n s=t.symbol,\n e=t.expiration.strftime(\"%y%m%d\"),\n t=t.descriptor[0].upper(),\n k='{:05d}{:03d}'.format(int(t.strike), int((t.strike - int(t.strike)) * 1000))\n )\n position = self.combined_positions[positiontype].get(t.symbol, Position(t.symbol))\n position.add_transaction(t)\n position.normalize_quote(quotes.get_by_symbol(t.symbol))\n self.combined_positions[positiontype][t.symbol] = position\n self.invested_capital += t.open_price * t.shares\n self.openvalue += t.shares * quotes.get_by_symbol(t.symbol).last\n self.daygain += t.shares * quotes.get_by_symbol(t.symbol).net\n\n for t in self.closed_positions:\n self.realized_gain += (t.close_price - t.open_price) * t.shares\n closedposition = self.combined_positions['closed'].get(t.symbol, ClosedPosition(t.symbol))\n closedposition.add_transaction(t)\n self.combined_positions['closed'][t.symbol] = closedposition\n\n for t in self.cash_positions:\n self.cash += t.open_price\n cashposition = self.combined_positions['cash'].get('CASH', CashPosition('CASH'))\n cashposition.add_transaction(t)\n self.combined_positions['cash']['CASH'] = cashposition\n\n cashposition = self.combined_positions['cash'].get('CASH', CashPosition('CASH'))\n # Add a cash transaction representing invested capital.\n t = Transaction('CASH', symbol='CASH', fileportname=self.fileportname, sector='invested capital', position='cash', descriptor='intermediate', shares=Decimal(0.0), open_price=-self.invested_capital, open_date=datetime.date.today())\n cashposition.add_transaction(t)\n # Add a cash transaction representing realized gain.\n t = Transaction('CASH', symbol='CASH', fileportname=self.fileportname, sector='realized gain', position='cash', descriptor='intermediate', shares=Decimal(0.0), open_price=self.realized_gain, open_date=datetime.date.today())\n cashposition.add_transaction(t)\n\n self.totalvalue = self.cash - self.invested_capital + self.realized_gain + self.openvalue" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the journal items for the payment and update the payment's state to 'posted'. A journal entry is created containing an item in the source liquidity account (selected journal's default_debit or default_credit) and another in the destination reconcilable account (see _compute_destination_account_id). If invoice_ids is not empty, there will be one reconcilable move line per invoice to reconcile with. If the payment is a transfer, a second journal entry is created in the destination journal to receive money from the transfer account.
def post(self): AccountMove = self.env['account.move'].with_context(default_type='entry') for rec in self: if rec.state not in ['draft', 'pdc']: raise UserError(_("Only a draft payment can be posted.")) if any(inv.state != 'posted' for inv in rec.invoice_ids): raise ValidationError(_("The payment cannot be processed because the invoice is not open!")) # keep the name in case of a payment reset to draft if not rec.name: # Use the right sequence to set the name if rec.payment_type == 'transfer': sequence_code = 'account.payment.transfer' else: if rec.partner_type == 'customer': if rec.payment_type == 'inbound': sequence_code = 'account.payment.customer.invoice' if rec.payment_type == 'outbound': sequence_code = 'account.payment.customer.refund' if rec.partner_type == 'supplier': if rec.payment_type == 'inbound': sequence_code = 'account.payment.supplier.refund' if rec.payment_type == 'outbound': sequence_code = 'account.payment.supplier.invoice' rec.name = self.env['ir.sequence'].next_by_code(sequence_code, sequence_date=rec.payment_date) if not rec.name and rec.payment_type != 'transfer': raise UserError(_("You have to define a sequence for %s in your company.") % (sequence_code,)) # moves = AccountMove.create(rec._prepare_payment_moves()) amount = rec.amount * (rec.payment_type in ('outbound', 'transfer') and 1 or -1) # print("Attempt") if rec.payment_type != 'transfer': moves = AccountMove.create(rec._create_payment_entry(amount)) else: moves = AccountMove.create(rec._prepare_payment_moves()) # print("Attempt Success") moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post() # Update the state / move before performing any reconciliation. move_name = self._get_move_name_transfer_separator().join(moves.mapped('name')) rec.write({'state': 'posted', 'move_name': move_name}) if rec.payment_type in ('inbound', 'outbound'): # ==== 'inbound' / 'outbound' ==== if rec.invoice_ids: (moves[0] + rec.invoice_ids + rec.payment_crdr_inv_line_ids.filtered(lambda l: l.allocation > 0.0).mapped('invoice_id')).line_ids \ .filtered(lambda line: not line.reconciled and line.account_id == rec.destination_account_id) \ .reconcile() elif rec.payment_type == 'transfer': # ==== 'transfer' ==== (moves + rec.payment_crdr_inv_line_ids.filtered(lambda l: l.allocation > 0.0).mapped('invoice_id')).line_ids \ .filtered(lambda line: line.account_id == rec.company_id.transfer_account_id) \ .reconcile() return True
[ "def post(self):\n AccountMove = self.env['account.move'].with_context(default_type='entry')\n for rec in self:\n\n if rec.state != 'approve':\n raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'posted' for inv in rec.invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n\n # keep the name in case of a payment reset to draft\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].next_by_code(sequence_code, sequence_date=rec.payment_date)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n moves = AccountMove.create(rec._prepare_payment_moves())\n moves.filtered(lambda move: move.journal_id.post_at != 'bank_rec').post()\n\n # Update the state / move before performing any reconciliation.\n move_name = self._get_move_name_transfer_separator().join(moves.mapped('name'))\n rec.write({'state': 'posted', 'move_name': move_name})\n\n if rec.payment_type in ('inbound', 'outbound'):\n # ==== 'inbound' / 'outbound' ====\n if rec.invoice_ids:\n (moves[0] + rec.invoice_ids).line_ids \\\n .filtered(lambda line: not line.reconciled and line.account_id == rec.destination_account_id)\\\n .reconcile()\n elif rec.payment_type == 'transfer':\n # ==== 'transfer' ====\n moves.mapped('line_ids')\\\n .filtered(lambda line: line.account_id == rec.company_id.transfer_account_id)\\\n .reconcile()\n\n return True", "def action_post(self):\n res = super(AccountPayment, self).action_post()\n for rec in self:\n invoice = rec.move_id\n if invoice.book_issue_id and invoice.payment_state == \"paid\":\n invoice.book_issue_id.state = \"paid\"\n return res", "def action_post(self):\n validation = self._check_payment_approval()\n if validation:\n if self.state not in ('draft', 'approved'):\n raise UserError(_(\"Only a draft or approved payment can be posted.\"))\n\n if any(inv.state != 'posted' for inv in self.reconciled_invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n self.move_id._post(soft=False)", "def action_move_create(self, cr, uid, ids, context=None):\n\t\tait_obj = self.pool.get('account.invoice.tax')\n\t\tcur_obj = self.pool.get('res.currency')\n\t\tperiod_obj = self.pool.get('account.period')\n\t\tpayment_term_obj = self.pool.get('account.payment.term')\n\t\tjournal_obj = self.pool.get('account.journal')\n\t\tmove_obj = self.pool.get('account.move')\n\t\tif context is None:\n\t\t\tcontext = {}\n\t\tfor inv in self.browse(cr, uid, ids, context=context):\n\t\t\tif not inv.journal_id.sequence_id:\n\t\t\t\traise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n\t\t\tif not inv.invoice_line:\n\t\t\t\traise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n\t\t\tif inv.move_id:\n\t\t\t\tcontinue\n\n\t\t\tctx = context.copy()\n\t\t\tctx.update({'lang': inv.partner_id.lang})\n\t\t\tif not inv.date_invoice:\n\t\t\t\tself.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n\t\t\tcompany_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n\t\t\t# create the analytical lines\n\t\t\t# one move line per invoice line\n\t\t\timl = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n\t\t\t# check if taxes are all computed\n\t\t\tcompute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n\t\t\tself.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\t\t\t# I disabled the check_total feature\n\t\t\tgroup_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n\t\t\tgroup_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n\t\t\tif group_check_total and uid in [x.id for x in group_check_total.users]:\n\t\t\t\tif (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n\t\t\t\t\traise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n\t\t\tif inv.payment_term:\n\t\t\t\ttotal_fixed = total_percent = 0\n\t\t\t\tfor line in inv.payment_term.line_ids:\n\t\t\t\t\tif line.value == 'fixed':\n\t\t\t\t\t\ttotal_fixed += line.value_amount\n\t\t\t\t\tif line.value == 'procent':\n\t\t\t\t\t\ttotal_percent += line.value_amount\n\t\t\t\ttotal_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n\t\t\t\tif (total_fixed + total_percent) > 100:\n\t\t\t\t\traise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n\t\t\t# one move line per tax line\n\t\t\timl += ait_obj.move_line_get(cr, uid, inv.id)\n\n\t\t\tentry_type = ''\n\t\t\tif inv.type in ('in_invoice', 'in_refund'):\n\t\t\t\tref = inv.reference\n\t\t\t\tentry_type = 'journal_pur_voucher'\n\t\t\t\tif inv.type == 'in_refund':\n\t\t\t\t\tentry_type = 'cont_voucher'\n\t\t\telse:\n\t\t\t\tref = self._convert_ref(cr, uid, inv.number)\n\t\t\t\tentry_type = 'journal_sale_vou'\n\t\t\t\tif inv.type == 'out_refund':\n\t\t\t\t\tentry_type = 'cont_voucher'\n\t\t\tdiff_currency_p = inv.currency_id.id <> company_currency or inv.use_kmk_ar_ap\n\t\t\t# create one move line for the total and possibly adjust the other lines amount\n\t\t\ttotal = 0\n\t\t\ttotal_currency = 0\n\n\t\t\ttotal, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n\t\t\tacc_id = inv.account_id.id\n\t\t\t\n\t\t\tname = inv['name'] or inv['supplier_invoice_number'] or '/'\n\t\t\ttotlines = False\n\t\t\tif inv.payment_term:\n\t\t\t\ttotlines = payment_term_obj.compute(cr,\n\t\t\t\t\t\tuid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n\t\t\tif totlines:\n\t\t\t\tres_amount_currency = total_currency\n\t\t\t\ti = 0\n\t\t\t\tctx.update({'date': inv.date_invoice})\n\t\t\t\tfor t in totlines:\n\t\t\t\t\tif inv.currency_id.id != company_currency:\n\t\t\t\t\t\tif inv.use_kmk_ar_ap:\n\t\t\t\t\t\t\tamount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n\t\t\t\t\t\telse: \n\t\t\t\t\t\t\tamount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n\t\t\t\t\telse:\n\t\t\t\t\t\tamount_currency = False\n\n\t\t\t\t\t# last line add the diff\n\t\t\t\t\tres_amount_currency -= amount_currency or 0\n\t\t\t\t\ti += 1\n\t\t\t\t\tif i == len(totlines):\n\t\t\t\t\t\tamount_currency += res_amount_currency\n\n\t\t\t\t\tcurrency_p = (inv.use_kmk_ar_ap and inv.company_id.tax_base_currency.id) \\\n\t\t\t\t\t\t\tor (inv.currency_id.id != inv.company_id.currency_id.id and not inv.use_kmk_ar_ap and inv.company_id.currency_id.id) \\\n\t\t\t\t\t\t\tor False\n\n\t\t\t\t\timl.append({\n\t\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t\t'name': name,\n\t\t\t\t\t\t'price': t[1],\n\t\t\t\t\t\t'account_id': acc_id,\n\t\t\t\t\t\t'date_maturity': t[0],\n\t\t\t\t\t\t'amount_currency': diff_currency_p \\\n\t\t\t\t\t\t\t\tand amount_currency or False,\n\t\t\t\t\t\t'currency_id': currency_p,\n\t\t\t\t\t\t'ref': ref,\n\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\tcurrency_p = (inv.use_kmk_ar_ap and inv.company_id.tax_base_currency.id) \\\n\t\t\t\t\t\t\tor (inv.currency_id.id != inv.company_id.currency_id.id and not inv.use_kmk_ar_ap and inv.company_id.currency_id.id) \\\n\t\t\t\t\t\t\tor False\n\n\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total,\n\t\t\t\t\t'account_id': acc_id,\n\t\t\t\t\t'date_maturity': inv.date_due or False,\n\t\t\t\t\t'amount_currency': diff_currency_p \\\n\t\t\t\t\t\t\tand total_currency or False,\n\t\t\t\t\t'currency_id': currency_p or False,\n\t\t\t\t\t'ref': ref\n\t\t\t})\n\n\t\t\tdate = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n\t\t\tpart = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n\t\t\tline = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\t\t\tline = self.group_lines(cr, uid, iml, line, inv)\n\n\t\t\tjournal_id = inv.journal_id.id\n\t\t\tjournal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n\t\t\tif journal.centralisation:\n\t\t\t\traise osv.except_osv(_('User Error!'),\n\t\t\t\t\t\t_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n\t\t\tline = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\t\t\t\n\t\t\tall_taxes = self.pool.get('account.tax').search(cr,uid,[])\n\t\t\tcodes = [t.tax_code_id and t.tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)] + [t.ref_tax_code_id and t.ref_tax_code_id.id for t in self.pool.get('account.tax').browse(cr,uid,all_taxes)]\n\t\t\tcodes = list(set(codes))\n\t\t\t\t\t\n\t\t\tline_temp = []\n\t\t\tfor mvl_temp in line:\n\t\t\t\t\n\t\t\t\tif 'tax_code_id' in mvl_temp[2] and mvl_temp[2]['tax_code_id'] in codes:\n\t\t\t\t\tdummy_data = mvl_temp[2].copy()\n\t\t\t\t\tdummy_data.update({\n\t\t\t\t\t\t'faktur_pajak_source' :tuple(account.invoice,inv.id),\n\t\t\t\t\t\t'faktur_pajak_no'\t : inv.nomor_faktur_id and inv.nomor_faktur_id.name or ''\n\t\t\t\t\t\t})\n\t\t\t\t\tline_temp.append((0,0,dummy_data))\n\t\t\t\telse:\n\t\t\t\t\tline_temp.append(mvl_temp)\n\t\t\tline = line_temp\n\n\t\t\tmove = {\n\t\t\t\t'ref': inv.reference and inv.reference or inv.name,\n\t\t\t\t'line_id': line,\n\t\t\t\t'journal_id': journal_id,\n\t\t\t\t'date': date,\n\t\t\t\t'narration': inv.comment,\n\t\t\t\t'company_id': inv.company_id.id,\n\t\t\t}\n\t\t\tperiod_id = inv.period_id and inv.period_id.id or False\n\t\t\tctx.update(company_id=inv.company_id.id,\n\t\t\t\t\t account_period_prefer_normal=True)\n\t\t\tif not period_id:\n\t\t\t\tperiod_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n\t\t\t\tperiod_id = period_ids and period_ids[0] or False\n\t\t\tif period_id:\n\t\t\t\tmove['period_id'] = period_id\n\t\t\t\tfor i in line:\n\t\t\t\t\ti[2]['period_id'] = period_id\n\n\t\t\tctx.update(invoice=inv)\n\t\t\tmove_id = move_obj.create(cr, uid, move, context=ctx)\n\t\t\tnew_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n\t\t\t# make the invoice point to that move\n\t\t\n\t\t\tself.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n\t\t\t# Pass invoice in context in method post: used if you want to get the same\n\t\t\t# account move reference when creating the same invoice after a cancelled one:\n\t\t\t# link to account_move post\n\t\t\tmove_obj.post(cr, uid, [move_id], context=ctx)\n\t\tself._log_event(cr, uid, ids)\n\t\treturn True", "def create_journal_entry(self, new_connection_id, security_deposit_amount):\n account_obj = self.env['account.account']\n name = \"Security Deposit Receive for \" \\\n \"Damage Qty of %s\" % (new_connection_id.number)\n debit_account_id = account_obj.search([\n ('user_type_id', '=',\n self.env.ref('account.data_account_type_current_liabilities').id),\n ('company_id', '=', self.env.user.company_id.id),\n ('code', '=', '3109000')], limit=1)\n credit_account_id = account_obj.search([\n ('user_type_id', '=',\n self.env.ref('account.data_account_type_revenue').id),\n ('company_id', '=', self.env.user.company_id.id),\n ('code', '=', '4100000')], limit=1)\n debit_vals = {\n 'name': name,\n 'partner_id': new_connection_id.partner_id.id or False,\n 'account_id': debit_account_id and debit_account_id.id or False,\n 'debit': security_deposit_amount,\n 'credit': 0.0,\n }\n credit_vals = {\n 'name': name,\n 'partner_id': new_connection_id.partner_id.id or False,\n 'account_id': credit_account_id and credit_account_id.id or False,\n 'debit': 0.0,\n 'credit': security_deposit_amount,\n }\n #MOVE CREATE\n vals = {\n 'ref': name,\n 'journal_id': self.env['account.journal'].search([\n ('company_id', '=', self.env.company.id),\n ('type', '=', 'cash')], limit=1).id,\n 'state': 'draft',\n 'company_id': self.company_id.id,\n 'line_ids': [(0, 0, debit_vals), (0, 0, credit_vals)]\n }\n move_id = self.env['account.move'].create(vals)\n return move_id", "def make_acct_entries(self, user, inv, amount, **kwargs):\n from tendenci.apps.accountings.models import Acct, AcctEntry, AcctTran\n from tendenci.apps.accountings.utils import (make_acct_entries_initial,\n make_acct_entries_closing)\n\n ae = AcctEntry.objects.create_acct_entry(user, 'invoice', inv.id)\n if not inv.is_tendered:\n make_acct_entries_initial(user, ae, amount)\n else:\n # payment has now been received\n make_acct_entries_closing(user, ae, amount)\n\n # #CREDIT corporate membership SALES\n acct_number = self.get_acct_number()\n acct = Acct.objects.get(account_number=acct_number)\n AcctTran.objects.create_acct_tran(user, ae, acct, amount*(-1))", "def create_invoice(self, payment_account: PaymentAccount, line_items: [PaymentLineItem], invoice: Invoice,\n **kwargs) -> InvoiceReference:", "def process_journals():\n with db.session.connection(execution_options={\"schema_translate_map\":{\"tenant\":session['schema']}}):\n journals = Journal.query.filter_by(updated=False).order_by(Journal.id.asc()).all()\n accounts = Account.query.all()\n names = {i.id:i.account_name for i in accounts}\n if request.method ==\"POST\":\n \n for journal in journals:\n post_balance = dr_txn_post_balance(journal.date,journal.amount,journal.dr)\n txn1 = Ledger(date=journal.date,account_id=journal.dr,journal_id=journal.id,txn_type=\"DR\",amount=journal.amount,post_balance =post_balance)\n post_balance = cr_txn_post_balance(journal.date,journal.amount,journal.cr)\n txn2 = Ledger(date=journal.date,account_id=journal.cr,journal_id=journal.id,txn_type=\"CR\",amount=journal.amount,post_balance =post_balance)\n db.session.add(txn1)\n db.session.add(txn2)\n update_balances(journal.date,journal.amount,journal.dr,\"DR\")\n update_balances(journal.date,journal.amount,journal.cr,\"CR\")\n journal.updated = True\n db.session.flush()\n db.session.commit()\n flash('Journals Posted to Ledger','info')\n return redirect(url_for('process_journals'))\n else:\n return render_template(\"process_journals.html\",journals=journals,names=names)", "def post(self):\n for rec in self:\n # code start\n# total = 0.0\n# for line in rec.invoice_lines:\n# if line.allocation < 0:\n# raise ValidationError(_(\"Negative allocation amount not allowed!\"))\n# if line.allocation > line.open_amount:\n# raise UserError(\"Allocation amount %s is greater then open amount %s of Invoice.\" % (line.allocation, line.open_amount))\n# total += line.allocation\n# if line.open_amount != line.invoice_id.residual:\n# raise UserError(\"Due amount changed.\\n Please click 'Update Invoice' button to update amount\")\n# \n# if total > rec.amount:\n# raise UserError(\"Total allocation %s is more then payment amount %s\" % (total, rec.amount))\n amt = 0\n if rec.invoice_lines:\n \n for line in rec.invoice_lines:\n amt += line.allocation\n # if rec.amount < amt:\n # raise ValidationError((\"Payment amount must be greater then or equal to '%s'\") %(amt))\n # if rec.amount > amt:\n # for line in rec.invoice_lines:\n # line.allocation = line.allocation + (rec.amount - amt)\n # break\n return super(account_payment,self).post()", "def post(self):\n if self.invoice_id and self.invoice_id.revenue_type == 'month_revenue':\n if self.invoice_id.month_revenue_date > str(fields.datetime.now()):\n raise ValidationError(\n _(\"Date Specified in invoice '%s' not come Yet!!\") % (self.invoice_id.month_revenue_date))\n else:\n super(accountMove, self).post()\n self.invoice_id.state = 'posted'\n return\n\n\n else:\n super( accountMove,self).post()\n if self.invoice_id and self.invoice_id.revenue_type == 'deferred_revenue':\n is_all_move_invoice_posted = self.env['account.move'].search([('invoice_id','=',self.invoice_id.id),('state','!=','posted')])\n if len(is_all_move_invoice_posted) == 0:\n self.invoice_id.state = 'posted'", "def create_invoice(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n picking_pool = self.pool.get('stock.picking')\n\n # get user input, specially invoice_date\n onshipdata_obj = self.read(cr, uid, ids, ['journal_id', 'group', 'invoice_date'])\n if context.get('new_picking', False):\n onshipdata_obj['id'] = onshipdata_obj.new_picking\n onshipdata_obj[ids] = onshipdata_obj.new_picking\n\n # pass invoice date, we don't use due date but the program requires it\n context['date_inv'] = onshipdata_obj[0]['invoice_date']\n context['date_due'] = context['date_inv']\n\n # get invoice type\n active_ids = context.get('active_ids', [])\n active_picking = picking_pool.browse(cr, uid, context.get('active_id', False), context=context)\n inv_type = picking_pool._get_invoice_type(active_picking)\n context['inv_type'] = inv_type\n if isinstance(onshipdata_obj[0]['journal_id'], tuple):\n onshipdata_obj[0]['journal_id'] = onshipdata_obj[0]['journal_id'][0]\n\n # call function to create invoice\n res = picking_pool.action_invoice_create(cr, uid, active_ids,\n journal_id=onshipdata_obj[0]['journal_id'],\n group=onshipdata_obj[0]['group'],\n type=inv_type,\n context=context)\n return res", "def create_receipts(self, order_items):\n for order_item in order_items.all():\n self.create_order_item_receipt(order_item)", "def create_invoices(self, cr, uid, ids, context=None):\n sale_obj = self.pool.get('sale.order')\n act_window = self.pool.get('ir.actions.act_window')\n wizard = self.browse(cr, uid, ids[0], context)\n sale_ids = context.get('active_ids', [])\n salepay_obj = self.pool.get('sale.pay')\n salepay_ids = salepay_obj.search(cr, uid, [('so_id', '=', sale_ids[0])])\n reads = salepay_obj.read(cr, uid, salepay_ids, ['percent', 'due_date'], context=context)\n if salepay_ids and wizard.advance_payment_method == 'all':\n inv_ids = []\n for r in reads:\n for sale in sale_obj.browse(cr, uid, sale_ids, context=context):\n inv_lines = []\n for line in sale.order_line:\n inv_line_vals = self.pool.get('sale.order.line')._prepare_order_line_invoice_line(cr, uid, line, False, context=context)\n inv_line_vals['price_unit'] = inv_line_vals['price_unit'] * r['percent'] / 100\n inv_line_vals['name'] += \"\\n(\" + _(\"Advance of %s %%\") % (r['percent']) + \")\"\n inv_lines.append((0, 0, inv_line_vals))\n inv_values = {\n 'name': sale.client_order_ref or sale.name,\n 'origin': sale.name,\n 'type': 'out_invoice',\n 'reference': False,\n 'account_id': sale.partner_id.property_account_receivable.id,\n 'partner_id': sale.partner_invoice_id.id,\n 'invoice_line': inv_lines,\n 'currency_id': sale.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': sale.payment_term.id,\n 'fiscal_position': sale.fiscal_position.id or sale.partner_id.property_account_position.id,\n 'date_due': r['due_date']\n }\n inv_ids.append(self._create_invoices(cr, uid, inv_values, sale.id, context=context))\n if context.get('open_invoices', False):\n return sale_obj.action_view_invoice(cr, uid, sale_ids, context=context)\n return {'type': 'ir.actions.act_window_close'}\n if not salepay_ids and wizard.advance_payment_method == 'all':\n res = sale_obj.manual_invoice(cr, uid, sale_ids, context)\n if context.get('open_invoices', False):\n return res\n return {'type': 'ir.actions.act_window_close'}\n\n if wizard.advance_payment_method == 'lines':\n # open the list view of sales order lines to invoice\n res = act_window.for_xml_id(cr, uid, 'sale', 'action_order_line_tree2', context)\n res['context'] = {\n 'search_default_uninvoiced': 1,\n 'search_default_order_id': sale_ids and sale_ids[0] or False,\n }\n return res\n assert wizard.advance_payment_method in ('fixed', 'percentage')\n\n inv_ids = []\n for sale_id, inv_values in self._prepare_advance_invoice_vals(cr, uid, ids, context=context):\n inv_ids.append(self._create_invoices(cr, uid, inv_values, sale_id, context=context))\n\n if context.get('open_invoices', False):\n return self.open_invoices( cr, uid, ids, inv_ids, context=context)\n return {'type': 'ir.actions.act_window_close'}", "def make_invoices(self):\n\n billing_schedules = {\"Annual\": 1, \"Two-Pay\": 2, \"Quarterly\": 4, \"Monthly\": 12}\n months_after_eff_date_dict = {\n \"Annual\": 12,\n \"Two-Pay\": 6,\n \"Quarterly\": 3,\n \"Monthly\": 1,\n }\n\n invoices = []\n first_invoice = Invoice(\n self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium,\n )\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule in billing_schedules:\n invoices_quantity = billing_schedules.get(self.policy.billing_schedule)\n first_invoice.amount_due = first_invoice.amount_due / invoices_quantity\n months_between_invoices = months_after_eff_date_dict.get(\n self.policy.billing_schedule\n )\n for i in range(1, invoices_quantity):\n a = i * months_between_invoices\n bill_date = self.policy.effective_date + relativedelta(months=a)\n invoice = Invoice(\n self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium\n / billing_schedules.get(self.policy.billing_schedule),\n )\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):\r\n\r\n def _update_existing_journals(journals):\r\n \"\"\"\r\n Update existing journals\r\n \"\"\"\r\n for journal in journals:\r\n if journal['type'] in ['sale', 'purchase']:\r\n journal['refund_sequence'] = True\r\n if journal['code'] == 'MISC':\r\n journal['name'] = 'OPÉRATIONS DIVERSES'\r\n journal['code'] = 'OD'\r\n if journal['code'] == 'EXCH':\r\n journal['name'] = 'DIFFÉRENCE DE CHANGES'\r\n journal['code'] = 'EXCH'\r\n if journal['type'] == 'sale':\r\n journal['sequence'] = 1\r\n journal['name'] = 'VENTES'\r\n journal['code'] = 'VTE'\r\n journal['default_account_id'] = self.env.ref('l10n_pf.%s_pcg_7071' % self.env.company.id).id\r\n if journal['type'] == 'purchase':\r\n journal['sequence'] = 2\r\n journal['name'] = 'ACHATS'\r\n journal['code'] = 'ACH'\r\n journal['default_account_id'] = self.env.ref('l10n_pf.%s_pcg_6071' % self.env.company.id).id\r\n\r\n def _get_default_account(journal_vals, type='debit'):\r\n \"\"\"\r\n Return the default account\r\n :param journal_vals: values for the journal\r\n :param type: is debit or credit ?\r\n :return: The default account\r\n \"\"\"\r\n default_account = False\r\n\r\n # CHEQUES\r\n if journal_vals['code'] == 'CHQF':\r\n return self.env.ref('l10n_pf.%s_pcg_5115' % self.env.company.id).id\r\n if journal_vals['code'] == 'CHQC':\r\n return self.env.ref('l10n_pf.%s_pcg_5112' % self.env.company.id).id\r\n # CASH\r\n if journal_vals['code'] in ('CASHC', 'CASHF'):\r\n return self.env.ref('l10n_pf.%s_pcg_5300' % self.env.company.id).id\r\n # CB\r\n if journal_vals['code'] == 'CB':\r\n return self.env.ref('l10n_pf.%s_pcg_5802' % self.env.company.id).id\r\n # VIS\r\n if journal_vals['code'] == 'VISA':\r\n return self.env.ref('l10n_pf.%s_pcg_5803' % self.env.company.id).id\r\n # AMEX\r\n if journal_vals['code'] == 'AMEX':\r\n return self.env.ref('l10n_pf.%s_pcg_5804' % self.env.company.id).id\r\n\r\n return default_account\r\n\r\n def _add_extras_journals(journals):\r\n \"\"\"\r\n Add extras journals in journals\r\n :param journals: Disct holding all journal to create\r\n \"\"\"\r\n extra_journals = [\r\n {'name': _('ESPECES'), 'type': 'cash', 'code': 'CASHC', 'favorite': True, 'sequence': 14, 'partner_type': 'customer'},\r\n {'name': _('ESPECES'), 'type': 'cash', 'code': 'CASHF', 'favorite': True, 'sequence': 15, 'partner_type': 'supplier'},\r\n {'name': _('CHEQUES'), 'type': 'cash', 'code': 'CHQC', 'favorite': True, 'sequence': 20, 'partner_type': 'customer'},\r\n {'name': _('CHEQUES'), 'type': 'cash', 'code': 'CHQF', 'favorite': True, 'sequence': 21, 'partner_type': 'supplier'},\r\n {'name': _('CB'), 'type': 'cash', 'code': 'CB', 'favorite': False, 'sequence': 23, 'partner_type': 'customer'},\r\n {'name': _('VISA'), 'type': 'cash', 'code': 'VISA', 'favorite': False, 'sequence': 24, 'partner_type': 'customer'},\r\n {'name': _('AMEX'), 'type': 'cash', 'code': 'AMEX', 'favorite': False, 'sequence': 25, 'partner_type': 'customer'},\r\n ]\r\n for extra_journal in extra_journals:\r\n vals = {\r\n 'type': extra_journal['type'],\r\n 'partner_type': extra_journal['partner_type'],\r\n 'name': extra_journal['name'],\r\n 'code': extra_journal['code'],\r\n 'company_id': company.id,\r\n 'default_account_id': _get_default_account(extra_journal, 'credit'),\r\n 'loss_account_id': self.env.ref('l10n_pf.%s_pcg_658' % self.env.company.id).id,\r\n 'profit_account_id': self.env.ref('l10n_pf.%s_pcg_758' % self.env.company.id).id,\r\n 'show_on_dashboard': extra_journal['favorite'],\r\n 'color': extra_journal.get('color', False),\r\n 'sequence': extra_journal['sequence'],\r\n }\r\n journals.append(vals)\r\n\r\n \"\"\"\r\n MAIN\r\n \"\"\"\r\n # Execute Super to get default data\r\n journals = super(ChartTemplate, self)._prepare_all_journals(acc_template_ref, company, journals_dict)\r\n\r\n # Update values of existing journals\r\n _update_existing_journals(journals)\r\n\r\n # Add extras journals\r\n _add_extras_journals(journals)\r\n\r\n self.env.company.currency_provider = 'xe_com'\r\n self.env.company.currency_interval_unit = 'daily'\r\n self.env.company.invoice_is_print = False\r\n\r\n return journals", "def _create_transfer_entry(self, amount):\n aml_obj = self.env['account.move.line'].with_context(check_move_validity=False)\n debit, credit, amount_currency, dummy = aml_obj.with_context(date=self.payment_transfer_date).compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\n amount_currency = self.destination_journal_id.currency_id and self.currency_id.with_context(date=self.payment_date).compute(amount, self.destination_journal_id.currency_id) or 0\n\n dst_move = self.env['account.move'].create(self._get_move_vals(self.destination_journal_id, self.payment_transfer_date))\n\n dst_liquidity_aml_dict = self._get_shared_move_line_vals(debit, credit, amount_currency, dst_move.id)\n dst_liquidity_aml_dict.update({\n 'name': _('Transfer from %s') % self.journal_id.name,\n 'account_id': self.destination_journal_id.default_credit_account_id.id,\n 'currency_id': self.destination_journal_id.currency_id.id,\n 'journal_id': self.destination_journal_id.id})\n aml_obj.create(dst_liquidity_aml_dict)\n\n transfer_debit_aml_dict = self._get_shared_move_line_vals(credit, debit, 0, dst_move.id)\n transfer_debit_aml_dict.update({\n 'name': self.name,\n 'account_id': self.company_id.transfer_account_id.id,\n 'journal_id': self.destination_journal_id.id})\n if self.currency_id != self.company_id.currency_id:\n transfer_debit_aml_dict.update({\n 'currency_id': self.currency_id.id,\n 'amount_currency': -self.amount,\n })\n transfer_debit_aml = aml_obj.create(transfer_debit_aml_dict)\n dst_move.post()\n return transfer_debit_aml", "def update_invoice(self, # pylint:disable=too-many-arguments,no-self-use,unused-argument\n payment_account: PaymentAccount, # pylint: disable=unused-argument\n line_items: [PaymentLineItem], invoice_id: int, # pylint: disable=unused-argument\n paybc_inv_number: str, reference_count: int = 0, # pylint: disable=unused-argument\n **kwargs):\n return None", "def _process_cfs_refund(cls, invoice: InvoiceModel):\n if invoice.payment_method_code == PaymentMethod.DIRECT_PAY.value:\n cls._publish_to_mailer(invoice)\n payment: PaymentModel = PaymentModel.find_payment_for_invoice(invoice.id)\n payment.payment_status_code = PaymentStatus.REFUNDED.value\n payment.flush()\n else:\n # Create credit memo in CFS.\n # TODO Refactor this when actual task is done. This is just a quick fix for CFS UAT - Dec 2020\n cfs_account: CfsAccountModel = CfsAccountModel.find_effective_by_account_id(invoice.payment_account_id)\n line_items: List[PaymentLineItemModel] = []\n for line_item in invoice.payment_line_items:\n line_items.append(PaymentLineItemModel.find_by_id(line_item.id))\n\n cms_response = CFSService.create_cms(line_items=line_items, cfs_account=cfs_account)\n # TODO Create a payment record for this to show up on transactions, when the ticket comes.\n # Create a credit with CM identifier as CMs are not reported in payment interface file\n # until invoice is applied.\n CreditModel(cfs_identifier=cms_response.get('credit_memo_number'),\n is_credit_memo=True,\n amount=invoice.total,\n remaining_amount=invoice.total,\n account_id=invoice.payment_account_id).save()", "def check_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n voucher_pool = self.pool.get('account.voucher')\n move_line_pool = self.pool.get('account.move.line')\n voucher_id = (data.payment_id and data.payment_id.id) or (context['active_model'] == 'account.move' and self.check_move_data(cr, uid, ids, context=context))\n if not data.payment_id: data.write({'payment_id':voucher_id})\n if data.new_no:\n voucher = voucher_pool.browse(cr, uid, voucher_id, context=context)\n journal_id=voucher and (voucher.pay_journal_id or voucher.journal_id)\n if self._check_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('name','=',voucher.id),('status','=','active')], context=context)\n if data.state == 'reprint':\n check_log_pool.write(cr,uid,chk_log_ids, {'status': data.status}, context=context)\n\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':int(data.new_no)}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n voucher_pool.write(cr, uid,[voucher.id],{'amount_in_word': amount_to_text_ar(voucher.amount, 'ar'),'chk_seq': next_seq, 'chk_status':True, 'date_due': (voucher.date_due or voucher.date)}, context=context)\n if data.state == 'update':\n check_log_pool.write(cr,uid,chk_log_ids, {'check_no': next_seq}, context=context)\n else: \n check_log_pool.create(cr, uid,{'name': voucher.id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id}, context=context)\n move_pool.write(cr, uid,[voucher.move_id.id], {'ref' : next_seq or ' '}, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',voucher.move_id.id)], context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n if data.state != 'update':\n return self.print_report(cr, uid, ids, context=context)\n return {'type':'ir.actions.act_window_close'}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes node attributes and field descriptors to generate the ``modifiers`` node attribute and set it on the provided node. Alters its first argument inplace.
def setup_modifiers(node, field=None, context=None, in_tree_view=False): modifiers = {} if field is not None: transfer_field_to_modifiers(field, modifiers) transfer_node_to_modifiers( node, modifiers, context=context, in_tree_view=in_tree_view) transfer_modifiers_to_node(modifiers, node)
[ "def set_modifier(self, mod):\r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.modifier\", \r\n self._node._eco_id, mod)\r\n p2e._app.Exec(arg_str)", "def update(self):\n for dynamic_attr in self.dynamic_attrs.itervalues():\n dynamic_attr.clear_overloads()\n \n self.update_children()\n \n for modifier in self.modifiers:\n self.apply_modifier(modifier)", "def _load_attr_modifier(self, modifiers):\n for name, value in modifiers.items():\n if self._args.verbose:\n print('processing modifier \"{}\"'.format(name))\n self._session.add(AttrModifier(name=name, **value))\n self._session.commit()", "def copy_node(node, *args, **kwargs):\n cls = node.__class__\n new_node = cls()\n\n set_attrs = {}\n\n # non-keyword args\n for name, value in zip(cls._fields, args):\n set_attrs[name] = value\n\n # keyword args\n for name, value in kwargs.iteritems():\n set_attrs[name] = value\n\n # attributes\n for name, value in node.__dict__.iteritems():\n if name not in set_attrs:\n set_attrs[name] = value\n\n # apply set_attrs\n for name, value in set_attrs.iteritems():\n setattr(new_node, name, value)\n\n return new_node", "def make_modifier(ob, modifier):\n mod = ob.modifiers.new(modifier['name'], modifier['type'])\n for attr in modifier:\n if attr not in ('name', 'type'):\n setattr(mod, attr, modifier[attr])\n return mod", "def deep_copy_node(node, *args, **kwargs):\n cls = node.__class__\n new_node = cls()\n\n set_attrs = {}\n\n # non-keyword args\n for name, value in zip(cls._fields, args):\n set_attrs[name] = value\n\n # keyword args\n for name, value in kwargs.iteritems():\n set_attrs[name] = value\n\n # deep copy attributes\n for name, value in node.__dict__.iteritems():\n if name not in set_attrs:\n if isinstance(value, ast.AST):\n set_attrs[name] = deep_copy_node(value)\n else:\n set_attrs[name] = value\n\n # apply set_attrs\n for name, value in set_attrs.iteritems():\n setattr(new_node, name, value)\n\n return new_node", "def execute_shifts(node):\n shift = 0\n change = 0\n for child in node.children[::-1]: # all children from right to left\n child.prelim += shift\n child.mod += shift\n change += child.change\n shift += child.shift + change", "def applyAttrPattern(nodeType=\"string\", patternName=\"string\"):\n pass", "def set_node_attributes(self, node_attributes: dict) -> None:\n\n node_attribute_map = self._create_node_attributes(node_attributes)\n\n for col in node_attribute_map.keys():\n\n if col in self.node_columns:\n\n for node in node_attribute_map[col]:\n\n if node in self.node_map.map.keys():\n\n self.node_map.map[node]['attributes'].update(node_attribute_map[col][node])", "def set_attr(self, node: str, value: dict):\n\n if node in list(self.graph.keys()):\n self.graph[node][self._ATTR] = value\n else:\n raise NodeDoesNotExist(node)", "def modifyExistingNode(self, properties):\n pass", "def ModifyNode(self, attribute_list, predicates): \n nodes_modified = attribute_list[0] \n attrs_changed = attribute_list[1] \n modify_boolean = attribute_list[2] \n self.query_evaluator.modify_node(nodes_modified[2], \n attrs_changed[2], int ((modify_boolean[2])['val']))", "def set_metadata(self, node: Node, path: str, key: str, value: Any):\n # Unsupported fields cannot be selected\n if (\n key == \"selected\"\n and value == True\n and node.get(\"inclusion\") == \"unsupported\"\n ):\n return\n\n node[key] = value\n logging.debug(\"Setting '%s.%s' to '%s'\", path, key, value) # noqa: WPS323", "def replace_module(node2node_func: Callable[[torch.fx.Node], torch.fx.Node]) -> Callable[[torch.fx.Node], None]:\n\n def fn(node):\n new_op = node2node_func(node)\n sub_module_names = node.target.split(\".\")\n module = node.graph.owning_module\n for name in sub_module_names[:-1]:\n module = module.get_submodule(name)\n setattr(module, sub_module_names[-1], new_op)\n\n return fn", "def add_field_defaults_to_node(node):\n node.update(\n {\n \"license_description\": None,\n \"copyright_holder\": \"\",\n \"questions\": [],\n \"extra_fields\": {}\n })\n if not \"files\" in node:\n node[\"files\"] = []\n if not \"description\" in node:\n node[\"description\"] = \"\"\n if \"children\" in node:\n for i in range(0, len(node[\"children\"])):\n node[\"children\"][i] = add_field_defaults_to_node(node[\"children\"][i])\n return node", "def replaceNode(self, *args) -> \"void\":\n return _coin.SoMFNode_replaceNode(self, *args)", "def _parse(cls, node, path):\n kwargs = cls._parse_simple_attribs(node)\n kwargs.update(cls._parse_simple_elements(node, path))\n return kwargs", "def set_default_node_attributes(self):\n if self._set_default_node_attribute == 'cno':\n for node in self.nodes():\n attrs = self.get_node_attributes(node)\n for k, v in attrs.items():\n self.node[node][k] = v", "def apply_all_modifiers(obj):\n contxt = bpy.context.copy()\n contxt['object'] = obj\n\n for mod in obj.modifiers[:]:\n contxt['modifier'] = mod\n bpy.ops.object.modifier_apply(\n contxt, apply_as='DATA',\n modifier=contxt['modifier'].name)", "def lockVisibleAttrs(self, node):\r\n\r\n if not len(ls(node)):\r\n self.logger.error(\"lockVisibleAttrs: input node does not exist '%s'.\\n\" % node)\r\n node = self.convertToPyNode(node)\r\n\r\n keyableAttrs = node.listAttr(k=1)\r\n if keyableAttrs:\r\n for kAttr in keyableAttrs:\r\n try:\r\n kAttr.lock()\r\n except:\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parameterize a fixture named 'dummy_list' with an empty list
def pytest_generate_tests(metafunc): if 'dummy_list' in metafunc.fixturenames: metafunc.parametrize("dummy_list", [[]])
[ "def test_default_init(self):\n dset_list = DatasetList()\n\n assert dset_list == []\n assert dset_list.info.type_id == \"list\"\n assert dset_list.info.py_type == \"list\"\n assert len(dset_list) == 0", "def test_default_list_argument_value():\n arguments = [\n {\n \"name\": \"foods\",\n \"type\": \"list\",\n \"default\": \"pizza salad soup\",\n }\n ]\n parser = reading.build_template_argparser(arguments)\n values = parser.parse_args([])\n assert values.foods == [\"pizza\", \"salad\", \"soup\"]", "def test_init_params_list(self):\n params = insightiq_api.Parameters([('one', 1), ('two', 2)])\n\n value = params._data\n expected = [['one', 1], ['two', 2]]\n\n self.assertEqual(value, expected)", "def test_empty_list(self, fake_app, authenticated_user):\n result = fake_app.get(self.url)\n assert result.json == {'elements': []}", "def test_init_params_list_bad(self):\n with self.assertRaises(ValueError):\n insightiq_api.Parameters(['one', 1, 'two', 2])", "def test_empty_args_list(self):\n item_args = []\n operation = \"dummy\"\n with pytest.raises(MWSError):\n parse_item_args(item_args, operation)", "def test_added_to_list(*args, **kwargs):\n if (not loaded_from_fixture(kwargs)):\n update_unit_test_infos(kwargs[\"instance\"].test_list)", "def test_list_saved(*args, **kwargs):\n if not loaded_from_fixture(kwargs):\n update_unit_test_infos(kwargs[\"instance\"])", "def test_empty_filter(self):\n empty_filter = self.module.empty_filter\n result = empty_filter(\n [\"Mike\", \"\", \"Emma\", None, \"Kelly\", \"\", \"Brad\", None]\n )\n self.assertListEqual(result, [\"Mike\", \"Emma\", \"Kelly\", \"Brad\"])", "def test_preprocess_input_list() -> None:\n input = json.dumps({\"inputs\": \"test\"})\n with pytest.raises(AssertionError):\n main.__process_input(input)", "def testCrawlList_fakeData(self):\n\n # List ids to ensure test does not take too long to run.\n list_ids = []\n list_sizes = []\n\n # Real list ids and sizes. Uncomment this and run if these ever change.\n #list_ids = ['186732484', '186732631', '186814318', '186814882',\n # '186815046', '186926608', '186926651']\n #list_sizes = [62, 71, 64, 67, 83, 87, 90]\n\n # To add another JSON file that is the string output of a json object\n # printed in the oauth_playground, save the output and then transform it\n # in the following way:\n #\n # quote or delete \" chars first\n # \\U -> (nothing)\n # \\u -> (nothing)\n # None -> null\n # True -> true\n # False -> false\n # u' -> ', but special-case each time u' occurs since a simple substitution\n # will convert you're into yo\"re.\n total_list_size = 0\n list_index = 0\n for list_id in list_ids:\n response = self.testapp.get(\n '/tasks/crawl_list?list_id=%s&fake_data=true' % list_id)\n self.assertEqual(200, response.status_int)\n total_list_size += list_sizes[list_index]\n list_index += 1\n self.assertTweetDbSize(total_list_size)", "def test_me_get_list(self):\n pass", "def test_role_data_with_empty_lst():\n result = inbound_group_filter({\"id\": \"123-456-abs3\", \"members\": []}, \"azure\")\n assert result[\"members\"] == []", "def test_get_empty_list(self):\n response = self.app.get('/api/tour')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual([], response.json)", "def _create_basic_list():\n\n return ['Apples', 'Pears', 'Oranges', 'Peaches']", "def test_list_added_to_cycle(*args, **kwargs):\n if (not loaded_from_fixture(kwargs)):\n update_unit_test_infos(kwargs[\"instance\"].test_list)", "def test_parse_empty_genelist(self, es_testapp, wb_project, wb_institution):\n genelist = GeneListSubmission(\n GENELIST_PATH + \"test-empty_gene_list.txt\",\n wb_project[\"@id\"],\n wb_institution[\"@id\"],\n es_testapp,\n )\n assert not genelist.title\n assert not genelist.genes\n assert genelist.errors", "def test_empty_list(self):\n empty = []\n self.assertEqual(max_integer(empty), None)", "def test_no_lists(self):\n response = self.client.get(reverse('lists:index'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Aucune liste pour le moment\")\n self.assertQuerysetEqual(response.context['all_lists'], [])", "def test_post_get_prepper_template_list(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CPP wrapper for a grid sub_sampling (method = barycenter for points and features
def grid_sub_sampling(points, features=None, labels=None, grid_size=0.1, verbose=0): if (features is None) and (labels is None): return cpp_subsampling.subsample(points, sampleDl=grid_size, verbose=verbose) elif labels is None: return cpp_subsampling.subsample(points, features=features, sampleDl=grid_size, verbose=verbose) elif features is None: return cpp_subsampling.subsample(points, classes=labels, sampleDl=grid_size, verbose=verbose) else: return cpp_subsampling.subsample(points, features=features, classes=labels, sampleDl=grid_size, verbose=verbose)
[ "def subsampleGrid(self, subsample_fac, get_convergence=False):\n # Check that buildGrid has already been called.\n if not hasattr(self, 'im_g1'):\n raise RuntimeError(\"PowerSpectrum.buildGrid must be called before subsampleGrid\")\n\n # Check that subsample_fac is a factor of ngrid.\n effective_ngrid = self.im_g1.array.shape[0]\n if (not isinstance(subsample_fac,int)\n or effective_ngrid%subsample_fac!=0\n or subsample_fac<=1):\n raise RuntimeError(\"Subsample factor must be an integer>1 that divides the grid size!\")\n\n # Make new array subsamples and turn them into Images\n self.im_g1 = galsim.ImageD(\n np.ascontiguousarray(self.im_g1.array[::subsample_fac,::subsample_fac]))\n self.im_g2 = galsim.ImageD(\n np.ascontiguousarray(self.im_g2.array[::subsample_fac,::subsample_fac]))\n self.im_kappa = galsim.ImageD(\n np.ascontiguousarray(self.im_kappa.array[::subsample_fac,::subsample_fac]))\n\n # Update internal parameters: grid_spacing, center.\n if self.adjust_center:\n self.center += galsim.PositionD(0.5,0.5) * self.grid_spacing * (subsample_fac-1)\n self.grid_spacing *= subsample_fac\n\n if get_convergence:\n return self.grid_g1, self.grid_g2, self.grid_kappa\n else:\n return self.grid_g1, self.grid_g2", "def SubsetCoordinates(\n coords, array_size, method=\"random\", n=10000, grid_spacing=(2, 2)\n):\n #####Note: indices are switched row = 1 col = 0 to match imzML parser#####\n # Check to see if the method is uniform random sampling\n if method == \"random\":\n # Check to see if the value is less than or equal to 1\n if n < 1:\n # Interpret this value as a percentage\n n = int(len(coords) * n)\n # Otherwise the value is total pixel count\n else:\n # Interpret the value as pixel count\n n = n\n\n # Set random seed\n random.seed(1234)\n # Take subsample of integers for indexing\n idx = list(np.random.choice(a=len(coords), size=n, replace=False))\n # Use the indices to subsample coordinates\n sub_coords = [coords[c] for c in idx]\n # Create data with True values same length as sub_coords for scipy coo matrix\n data = np.ones(len(sub_coords), dtype=np.bool)\n\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n row = np.array([sub_coords[c][1] - 1 for c in range(len(sub_coords))])\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n col = np.array([sub_coords[c][0] - 1 for c in range(len(sub_coords))])\n\n # Check to see if the method is uniform sampling with random after\n elif method == \"pseudo_random\":\n # Check to see if the value is less than or equal to 1\n # if n < 1:\n # Interpret this value as a percentage\n # num_resamples = int(len(coords) * n)\n\n if n >= 0.25:\n # intialize the sampling with 2x2 grid\n grdh = 2\n grdw = 2\n elif 0.25 > n >= 0.11:\n # intialize the sampling with 3x3 grid\n grdh = 3\n grdw = 3\n elif 0.11 > n >= 0.0625:\n # intialize the sampling with 4x4 grid\n grdh = 4\n grdw = 4\n else:\n # intialize the sampling with 5x5 grid\n grdh = 5\n grdw = 5\n # Get maximum indices for x and y directions\n max_nh, max_nw = (\n max(coords, key=itemgetter(1))[1],\n max(coords, key=itemgetter(0))[0],\n )\n # Get maximum indices for x and y directions\n min_nh, min_nw = (\n min(coords, key=itemgetter(1))[1],\n min(coords, key=itemgetter(0))[0],\n )\n # Get grid in height direction and width directions\n row = np.arange(min_nh, max_nh, grdh)\n col = np.arange(min_nw, max_nw, grdw)\n # Create data with True values same length as sub_coords for scipy coo matrix\n data = np.ones(len(row) * len(col), dtype=np.bool)\n # Create meshgrid from the grid coordinates\n row, col = np.meshgrid(row, col)\n\n # Create list of subcoordinates from mesh -- this is now a bounding box around the mask coordinates\n sub_coords = list(\n map(\n tuple,\n np.vstack(\n (col.ravel() + 1, row.ravel() + 1, np.ones(len(data), dtype=np.int64))\n ).T,\n )\n )\n # Intersect the original coordinates with the grid coordinates so if mask or ROI is not square, we can capture\n sub_coords = list(set(sub_coords) & set(coords))\n # Get number of pixels of grid coordinates and calculate pixels left\n grid_perc = len(sub_coords) / len(coords)\n\n # Get number of pixels left to sample\n n = int(len(coords) * (n - grid_perc))\n\n # Create an inverse list of pixels\n inv_pix = list(set(coords).difference(set(sub_coords)))\n\n # Set random seed\n random.seed(1234)\n # Take subsample of integers for indexing from pixels left after grid\n idx = list(np.random.choice(a=len(inv_pix), size=n, replace=False))\n # Use the indices to subsample coordinates\n rand_sub_coords = [inv_pix[c] for c in idx]\n\n # Add these coordinates to the grid coordinates\n sub_coords = rand_sub_coords + sub_coords\n\n # Create data with True values same length as sub_coords for scipy coo matrix\n data = np.ones(len(sub_coords), dtype=np.bool)\n\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n row = np.array([sub_coords[c][1] - 1 for c in range(len(sub_coords))])\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n col = np.array([sub_coords[c][0] - 1 for c in range(len(sub_coords))])\n\n # Check to see if the method is uniform grid sampling\n elif method == \"grid\":\n\n # ensure the grid spacing is interpreted correctly\n if isinstance(grid_spacing, str):\n # Interpret the grid spacing as tuple\n grid_spacing = literal_eval(grid_spacing)\n\n # Get the size of the grid\n grdh = grid_spacing[0]\n grdw = grid_spacing[1]\n\n # Get maximum indices for x and y directions\n max_nh, max_nw = (\n max(coords, key=itemgetter(1))[1],\n max(coords, key=itemgetter(0))[0],\n )\n # Get maximum indices for x and y directions\n min_nh, min_nw = (\n min(coords, key=itemgetter(1))[1],\n min(coords, key=itemgetter(0))[0],\n )\n # Get grid in height direction and width directions\n row = np.arange(min_nh, max_nh, grdh)\n col = np.arange(min_nw, max_nw, grdw)\n # Create data with True values same length as sub_coords for scipy coo matrix\n data = np.ones(len(row) * len(col), dtype=np.bool)\n # Create meshgrid from the grid coordinates\n row, col = np.meshgrid(row, col)\n\n # Create list of subcoordinates from mesh -- this is now a bounding box around the mask coordinates\n sub_coords = list(\n map(\n tuple,\n np.vstack(\n (col.ravel() + 1, row.ravel() + 1, np.ones(len(data), dtype=np.int64))\n ).T,\n )\n )\n # Intersect the original coordinates with the grid coordinates so if mask or ROI is not square, we can capture\n sub_coords = list(set(sub_coords) & set(coords))\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n row = np.array([sub_coords[c][1] - 1 for c in range(len(sub_coords))])\n # Create row data for scipy coo matrix (-1 index for 0-based python)\n col = np.array([sub_coords[c][0] - 1 for c in range(len(sub_coords))])\n\n # Create data with True values same length as sub_coords for scipy coo matrix\n data = np.ones(len(sub_coords), dtype=np.bool)\n\n # Otherwise raise an error\n else:\n # Raise value error\n raise (\n Exception(\n \"Method of subsampling entered is not supported. Please enter 'random', 'grid', or 'pseudo_random'\"\n )\n )\n\n # Numpy flattens with row major order -- reorder coorinates to index resulting pandas data frame\n sub_coords = sorted(sub_coords, key=itemgetter(1, 0))\n # Create a subset mask\n sub_mask = scipy.sparse.coo_matrix((data, (row, col)), shape=array_size)\n\n # Return the objects\n return sub_mask, sub_coords", "def _sample_grid(x: np.ndarray, feature_range: np.ndarray, epsilon: float = 0.04,\n nb_samples: int = 10, res: int = 100) -> np.ndarray:\n nb_instances = x.shape[0]\n x = x.reshape(x.shape[0], -1)\n dim = x.shape[1] # number of features\n\n size = np.round(epsilon * res).astype(int)\n if size <= 2:\n size = 2\n\n deltas = (np.abs(feature_range[:, 1] - feature_range[:, 0]) / float(res)) # shape=(nb_features)\n\n rnd_sign = 2 * (np.random.randint(2, size=(nb_instances, nb_samples, dim))) - 1\n rnd = np.random.randint(size, size=(nb_instances, nb_samples, dim)) + 1\n rnd = rnd_sign * rnd # shape=(nb_instances, nb_samples, nb_features)\n\n vprime = rnd * deltas\n X_sampled = x.reshape(x.shape[0], 1, x.shape[1]) + vprime # shape=(nb_instances, nb_samples, nb_features)\n\n return X_sampled", "def test_subsampling():\n jpg = DIR_15444 / \"2KLS\" / \"oj36.j2k\"\n params = get_parameters(jpg)\n print(params)\n # 0: (1, 1)\n # 1: (2, 1)\n # 2: (2, 1)", "def test_subsampling():\n test_data = np.array([1])\n with raises(ValueError) as errorinfo:\n sub_data = _subsampling(test_data, 1)\n assert \"Unrecognized matrix dimension\" in str(errorinfo.value)\n\n test_data = np.random.rand(2, 3, 4)\n sub_data = _subsampling(test_data, sub_depth=2)\n assert sub_data.shape == (1, 2, 2)", "def full_subsampling_and_split(dnas_args,\n train_X_int=None,\n train_X_cat=None,\n train_y=None,\n weights_arch_params_split=True):\n\n # Call the correct function depending on whether we are processing the Terabyte dataset\n # or not.\n if not dnas_args.memory_map:\n return full_subsampling_and_split_direct(dnas_args,\n train_X_int=train_X_int,\n train_X_cat=train_X_cat,\n train_y=train_y,\n weights_arch_params_split=weights_arch_params_split)\n else:\n return full_subsampling_and_split_mem_map(dnas_args,\n weights_arch_params_split=weights_arch_params_split)", "def General_Slice_Sampler(self,itera=1000,showp = 10):\n samples = np.zeros(itera)\n x=0.0\n # Get the neccessary distributions \n p, lh = self.get_p() \n low = lh[0]\n high = lh[1] \n self.set_ran(np.arange(low,high,self.get_precision()))\n fd = np.ones(len(self.get_ran()))\n for f in list(p):\n fd = fd*f(self.get_ran())\n self.set_y(fd)\n fN = len(p)\n # Loop for iter\n for i in range(itera):\n # Loop in case of an emprty intersection\n if itera > showp: \n if i%(itera/showp) ==0:\n print (\"Iteration General Slice Sampler\" + str(i))\n while True:\n w = list()\n # Loop for the w\n for j in range(fN):\n w.append(np.random.uniform(0, p[j](x)))\n x = self.A_multiple_sample(p,w)\n # Handling empty case\n if x != None:\n samples[i] = x\n break\n self.set_samples(samples)", "def subimage_generator(image, patch_block_size, numberxy, numberz):\n width = np.shape(image)[1]\n height = np.shape(image)[2]\n imagez = np.shape(image)[0]\n block_width = np.array(patch_block_size)[1]\n block_height = np.array(patch_block_size)[2]\n blockz = np.array(patch_block_size)[0]\n\n stridewidth = (width - block_width) // (numberxy - 1)\n strideheight = (height - block_height) // (numberxy - 1)\n stridez = (imagez - blockz) // numberz\n\n\n # step 1:if image size of z is smaller than blockz,return zeros samples\n if imagez < blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, 0:imagez, :, :] = image[:, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(indx)\n print(nb_sub_images)\n raise ValueError(\"error sub number image\")\n return hr_samples\n\n # step 2:if stridez is bigger 1,return numberxy * numberxy * numberz samples\n if stridez >= 1:\n nb_sub_images = numberxy * numberxy * numberz\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for z in range(0, numberz * stridez, stridez):\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[z:z + blockz, x:x + block_width, y:y + block_height]\n indx += 1\n\n if (indx != nb_sub_images):\n print(indx)\n print(nb_sub_images)\n print(\"error sub number image\")\n return hr_samples\n\n # step3: if stridez==imagez,return numberxy * numberxy * 1 samples,one is [0:blockz,:,:]\n if imagez == blockz:\n nb_sub_images = numberxy * numberxy * 1\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[:, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n print(indx)\n print(nb_sub_images)\n return hr_samples\n\n # step4: if stridez==0,return numberxy * numberxy * 2 samples,one is [0:blockz,:,:],two is [-blockz-1:-1,:,:]\n if stridez == 0:\n nb_sub_images = numberxy * numberxy * 2\n hr_samples = np.empty(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float32)\n indx = 0\n for x in range(0, width - block_width + 1, stridewidth):\n for y in range(0, height - block_height + 1, strideheight):\n hr_samples[indx, :, :, :] = image[0:blockz, x:x + block_width, y:y + block_height]\n indx += 1\n hr_samples[indx, :, :, :] = image[-blockz - 1:-1, x:x + block_width, y:y + block_height]\n indx += 1\n if (indx != nb_sub_images):\n print(\"error sub number image\")\n return hr_samples", "def subsample(train_x, train_y, train_y_var, n_samp):\n idx = np.random.permutation(range(len(train_x)))[:n_samp]\n return train_x[idx], train_y[idx], train_y_var[idx]", "def test_subsample_with_replacement(self):\n # Can choose from all in first bin, all in last bin (since we're\n # sampling with replacement), or split across bins.\n a = np.array([2, 0, 1])\n actual = set()\n for i in range(1000):\n obs = subsample(a, 2, replace=True)\n actual.add(tuple(obs))\n self.assertEqual(actual, {(1, 0, 1), (2, 0, 0), (0, 0, 2)})\n\n # Test that selecting 35 counts from a 36-count vector 1000 times\n # yields more than 10 different subsamples. If we were subsampling\n # *without* replacement, there would be only 10 possible subsamples\n # because there are 10 nonzero bins in array a. However, there are more\n # than 10 possibilities when sampling *with* replacement.\n a = np.array([2, 0, 1, 2, 1, 8, 6, 0, 3, 3, 5, 0, 0, 0, 5])\n actual = set()\n for i in range(1000):\n obs = subsample(a, 35, replace=True)\n self.assertEqual(obs.sum(), 35)\n actual.add(tuple(obs))\n self.assertTrue(len(actual) > 10)", "def cartesian_sampling(nx,ny,rmax=1.):\n x = np.linspace(-1,1,nx);\n y = np.linspace(-1,1,ny);\n x,y=np.meshgrid(x,y); \n ind = x**2 + y**2 <= rmax**2;\n return x[ind],y[ind]", "def define_subgrid(a1_shape, a1_spacing, grid_density):\n a2_shape = np.array(a1_shape) * grid_density # Added the type change during debugging. Not sure if it's\n # jit compatible or not. Otherwise this line is treated as tuple multiplication, which just repeats the tuple.\n a2_spacing = a1_spacing / grid_density\n\n # Set variable types (necessary when using jit, which must infer the types of the variables from the code).\n row, col, new_row, new_col = (1, 1, 1, 1)\n x, y, new_x, new_y = (1.0, 1.0, 1.0, 1.0)\n\n # @jit(nopython=True)\n def grid_to_subgrid(row, col, x, y):\n new_row = row * grid_density - np.floor_divide(y, a2_spacing[0]) + (grid_density - 1)\n new_col = col * grid_density + np.floor_divide(x, a2_spacing[1])\n new_y = np.mod(y, a2_spacing[0])\n new_x = np.mod(x, a2_spacing[1])\n return new_row, new_col, new_x, new_y\n\n # @jit(nopython=True)\n def subgrid_to_grid(row, col, x, y):\n new_row = np.floor_divide(row, grid_density)\n new_col = np.floor_divide(col, grid_density)\n new_y = y - np.mod(row, grid_density) * a2_spacing[0] + a2_spacing[0] * (grid_density - 1)\n new_x = x + np.mod(col, grid_density) * a2_spacing[1]\n return new_row, new_col, new_x, new_y\n\n # @jit(nopython=True)\n def cart_to_grid(x, y):\n new_row = a1_shape[0] - np.floor_divide(y, a1_spacing[0]) - 1\n new_col = np.floor_divide(x, a1_spacing[1])\n new_y = np.mod(y, a1_spacing[0])\n new_x = np.mod(x, a1_spacing[1])\n return new_row, new_col, new_x, new_y\n\n # @jit(nopython=True)\n def grid_to_cart(row, col, x, y):\n new_y = y + (a1_shape[0] - row - 1) * a1_spacing[0]\n new_x = x + col * a1_spacing[1]\n return new_x, new_y\n\n # @jit(nopython=True)\n def cart_to_subgrid(x, y):\n new_row = a2_shape[0] - np.floor_divide(y, a2_spacing[0]) - 1\n new_col = np.floor_divide(x, a2_spacing[1])\n new_y = np.mod(y, a2_spacing[0])\n new_x = np.mod(x, a2_spacing[1])\n return new_row, new_col, new_x, new_y\n\n # @jit(nopython=True)\n def subgrid_to_cart(row, col, x, y):\n new_y = y + (a2_shape[0] - row - 1) * a2_spacing[0]\n new_x = x + col * a2_spacing[1]\n return new_x, new_y\n\n return grid_to_subgrid, subgrid_to_grid, cart_to_grid, grid_to_cart, cart_to_subgrid, subgrid_to_cart", "def random_subset(self, perc=0.5):", "def Source_subsample(Source_cont, NAx_coord, NAy_coord, subsampled_NA=0.1):\n\n N, M = Source_cont.shape\n\n [idx_y, idx_x] = np.where(Source_cont > 0)\n\n NAx_list = NAx_coord[idx_y, idx_x]\n NAy_list = NAy_coord[idx_y, idx_x]\n NA_list = ((NAx_list) ** 2 + (NAy_list) ** 2) ** (0.5)\n NA_idx = np.argsort(NA_list)\n\n illu_list = []\n\n first_idx = True\n\n for i in NA_idx:\n if first_idx:\n illu_list.append(i)\n first_idx = False\n elif (\n np.product(\n (NAx_list[i] - NAx_list[illu_list]) ** 2\n + (NAy_list[i] - NAy_list[illu_list]) ** 2\n >= subsampled_NA**2\n )\n == 1\n ):\n illu_list.append(i)\n\n Source_discrete = np.zeros((N, M))\n Source_discrete[idx_y[illu_list], idx_x[illu_list]] = 1\n\n return Source_discrete", "def _subcluster(array, min_points, epsilon):\n assert DBICAN._sorted_ascending(array)\n\n offset = min_points - 1\n upper = array[offset:]\n lower = array[:-offset]\n selected = upper - lower <= epsilon\n lower_index = np.arange(0, len(lower))[selected]\n upper_index = np.arange(offset, len(array))[selected] + 1\n return np.fromiter(zip(lower_index, upper_index),\n dtype=DBICAN._DTYPE_SLICE)", "def __init__(self, rng, func):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n SliceSamplerBase.__init__(self, rng, func)", "def test_subset_global_grid(self):\n lower_bound = 42 - 1.0e-7\n upper_bound = 52 + 1.0e-7\n constraint_dict = {\n \"latitude\": lambda cell: lower_bound <= cell.point <= upper_bound\n }\n constr = iris.Constraint(**constraint_dict)\n result = apply_extraction(\n self.global_gridded_cube, constr, longitude_constraint=[0, 7]\n )\n expected_data = np.array(\n [\n [1.0, 2.0, 3.0, 4.0],\n [9.0, 10.0, 11.0, 12.0],\n [17.0, 18.0, 19.0, 20.0],\n [25.0, 26.0, 27.0, 28.0],\n ]\n )\n self.assertArrayAlmostEqual(result.data, expected_data)\n self.assertArrayAlmostEqual(\n result.coord(\"longitude\").points, np.array([0.0, 2.0, 4.0, 6.0])\n )\n self.assertArrayAlmostEqual(\n result.coord(\"latitude\").points, np.array([45.0, 47.0, 49.0, 51.0])\n )", "def pool_points(data, kernel_size):\n max_x, max_y = np.max(data, axis=0)\n min_x, min_y = np.min(data, axis=0)\n\n kernel_size_x, kernel_size_y = kernel_size / 2, kernel_size\n\n x_increment_times = int((max_x - min_x) / kernel_size_x) + 1\n y_increment_times = int((max_y - min_y) / kernel_size_y) + 1\n\n selected_data_list = []\n selected_sampled_index_list = []\n\n for x_idx in range(x_increment_times):\n for y_idx in range(y_increment_times):\n x_range = (min_x + kernel_size_x * x_idx, min_x + kernel_size_x * (x_idx + 1))\n y_range = (min_y + kernel_size_y * y_idx, min_y + kernel_size_y * (y_idx + 1))\n\n data_in_range = data[(data[:, 0] > x_range[0]) & (data[:, 0] < x_range[1]) & (data[:, 1] > y_range[0]) & (\n data[:, 1] < y_range[1])]\n\n if data_in_range.shape[0] > 0:\n selected_data = np.min(data_in_range, axis=0)\n selected_data_list.append(selected_data)\n selected_sampled_index = np.argmax(np.sum(data == selected_data, axis=1))\n selected_sampled_index_list.append(selected_sampled_index)\n\n selected_data_all = np.stack(selected_data_list)\n\n return selected_data_all, selected_sampled_index_list", "def createSubdivRegion():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes neighbors for a batch of queries and supports
def batch_neighbors(queries, supports, q_batches, s_batches, radius): return cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius)
[ "def compute_neighbours(self, nns):\n self.NNS = []\n for i in range(len(self.embeds)):\n start_time = time.clock()\n write(\"Computing nearest neighbours for embedding no = %d ...\" % i)\n nbrs = NearestNeighbors(n_neighbors=nns, algorithm='ball_tree').fit(self.embeds[i].T)\n distances, indices = nbrs.kneighbors(self.embeds[i].T)\n self.NNS.append(indices[:,1:])\n end_time = time.clock()\n write(\"Done (%s sec.)\\n\" % str(end_time - start_time))\n pass", "def compute_neighbors(self, n_neighbors=100, metric=None):\n for emb in self.embeddings:\n emb.compute_neighbors(n_neighbors=n_neighbors, metric=metric)", "def neighbors_clustering(user, directed, algorithm_keywords):\n pass", "def neighbor_indices(self):", "def compute_neighbors(self):\n for img in self.images:\n self.images_superpixels_neighbours[img] = [set() for sp in self.images_superpixels[img]]\n for row in range(len(self.images_segmented[img]) - 1):\n for column in range(len(self.images_segmented[img][0]) - 1):\n current = self.images_segmented[img][row][column] # superpixel label of current pixel\n right = self.images_segmented[img][row][column + 1] # superpixel label of pixel right of current\n below = self.images_segmented[img][row + 1][column] # superpixel label of pixel below current\n if current != right:\n self.images_superpixels_neighbours[img][current].add(right)\n self.images_superpixels_neighbours[img][right].add(current)\n if current != below:\n self.images_superpixels_neighbours[img][current].add(below)\n self.images_superpixels_neighbours[img][below].add(current)", "def compute_neighbors(self, n_neighbors=100, metric=None):\n pos = self.field(Field.POSITION)\n neighbor_clf = NearestNeighbors(metric=metric or self.metric,\n n_neighbors=n_neighbors + 1).fit(pos)\n _, neigh_indexes = neighbor_clf.kneighbors(pos)\n self.set_field(Field.NEIGHBORS, neigh_indexes[:,1:])", "def filter_queries(args):\n for split_name in ['train', 'val']:\n split_dir = '%s/datasets/netvlad/%d/%s'%(cst.SCRIPT_DIR, args.data_id, split_name)\n metadata = Metadata(split_dir)\n\n knn = NearestNeighbors(n_jobs=-1)\n knn.fit(metadata.utmDb)\n\n # list of array of db idx matching a query\n # nontrivial_positives[i] = list of db img idx matching the i-th query\n nontrivial_positives = list(knn.radius_neighbors(metadata.utmQ,\n radius=metadata.dist_pos, return_distance=False))\n #print(nontrivial_positives) # [i]=array([ 0, 1, 2, 3, 4, 5])\n \n # its possible some queries don't have any non trivial potential positives\n # lets filter those out\n queries_idx = np.where(np.array([len(x) for x in nontrivial_positives])>0)[0]\n #metadata.utmQ = metadata.utmQ[queries_idx,:]\n #metadata.qImage = metadata.qImage[queries_idx]\n #num_queries = queries_idx.shape[0]\n \n metadata.filter(queries_idx)\n metadata.save()\n \n # debug\n if (1==1):\n toto = np.array([i for i,l in enumerate(nontrivial_positives) if len(l)>0])\n toto_sum = np.sum( (toto - queries_idx))\n if toto_sum!=0:\n print(toto_sum)\n print(\"Error somewhere in dataset\")\n exit(1)\n nontrivial_positives = [l for l in nontrivial_positives if len(l)>0]", "def cluster(queries):\n\n normalized_queries = min_max_normalization(queries)\n return skc.DBSCAN(eps=0.11, min_samples=4).fit_predict(normalized_queries)", "def get_neighbors_gpu(locations, bin_locations, bins, bins_help, distance, edges, if_edge, edge_count, max_neighbors):\n # get the agent index in the array\n index = cuda.grid(1)\n\n # double check that the index is within bounds\n if index < bin_locations.shape[0]:\n # get the starting index for writing edges to the holder array\n start = index * max_neighbors[0]\n\n # hold the total amount of edges for the agent\n agent_edge_count = 0\n\n # get the indices of the bin location\n x, y, z = bin_locations[index]\n\n # go through the 9 bins that could all potential neighbors\n for i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n # get the count of agents for the current bin\n bin_count = bins_help[x + i][y + j][z + k]\n\n # go through the current bin determining if an agent is a neighbor\n for l in range(bin_count):\n # get the index of the current potential neighbor\n current = bins[x + i][y + j][z + k][l]\n\n # check to see if the agent is a neighbor and prevent duplicates with index condition\n if magnitude(locations[index], locations[current]) <= distance[0] and index < current:\n # if there is room, add the edge\n if agent_edge_count < max_neighbors[0]:\n # get the index for the edge\n edge_index = start + agent_edge_count\n\n # update the edge array and identify that this edge exists\n edges[edge_index][0] = index\n edges[edge_index][1] = current\n if_edge[edge_index] = 1\n\n # increase the count of edges for an agent\n agent_edge_count += 1\n\n # update the array with number of edges for the agent\n edge_count[index] = agent_edge_count", "def make_neighbors_dataframe(num_clusters):\n cluster_assignments = get_cluster_model(num_clusters).predict(projected)\n cluster_centers = get_cluster_model(num_clusters).cluster_centers_\n centroid_cluster_assignments = get_cluster_model(num_clusters).predict(cluster_centers)\n\n # print(centroid_cluster_assignments)\n # print(len(cluster_centers))\n\n centroids = []\n centroid_num_arr = []\n num_centroids = num_clusters*10*[num_clusters]\n \n is_neighbor = []\n \n #arrays for neighbors\n timestamps = np.empty((num_clusters,10))\n timestamps_orig = np.empty((num_clusters,10))\n file_path = np.empty((num_clusters,10), dtype='S92')\n # neighbor_file_path = []\n sensor_id = np.empty((num_clusters,10), dtype='S60')\n # neighbor_timestamps_dt = np.empty((64*5), dtype = datetime.datetime)\n # print(neighbor_timestamps_dt.dtype)\n\n for i,cluster_index in enumerate(range(num_clusters)):\n #for each cluster center, query only the cluster it belongs to\n\n #Filter out only the points belonging to one cluster\n cluster_mask = (cluster_assignments==cluster_index)\n cluster_test = projected[cluster_mask]\n\n #Makes a list of the centroid of the cluster with length of the number of the points in the cluster\n centroid_list = 10*[cluster_centers[cluster_index]]\n centroids += centroid_list\n\n #Makes a list of the cluster index with length of the number of the points in the cluster\n centroid_num_list = 10*[cluster_index+1]\n centroid_num_arr += centroid_num_list\n\n# print(len(cluster_test))\n nearest_neighbors = []\n tree = spatial.KDTree(cluster_test)\n # print(cluster_centers[cluster_index])\n nearest_neighbors = tree.query(cluster_centers[cluster_index], 5)[1]\n\n #from only the points corresponding to a certain cluster in the 10000 subset of projected, apply the nearest\n #neighbors mask to obtain the other characteristics like file path, timestamp, etc\n\n neighbors_mask = np.zeros(len(cluster_test)).astype('bool')\n neighbors_mask[np.sort(nearest_neighbors)] = True\n is_neighbor += 5*['Y']\n \n #random sampling from cluster \n random_nums = np.random.choice(range(cluster_test.shape[0]), 5, replace=False)\n random_cluster_mask = np.zeros(cluster_test.shape[0]).astype('bool')\n random_cluster_mask[random_nums] = True\n is_neighbor += 5*['N']\n \n \n d_neighbors = d_middle[cluster_mask][neighbors_mask]\n d_random = d_middle[cluster_mask][random_cluster_mask]\n \n timestamps_empty = np.empty((2, 5))\n timestamps_empty[0] = d_neighbors['timestamp']\n timestamps_empty[1] = d_random['timestamp']\n timestamps[i] = (timestamps_empty.flatten())\n \n timestamps_orig_empty = np.empty((2, 5))\n timestamps_orig_empty[0] = d_neighbors['timestamp_orig']\n timestamps_orig_empty[1] = d_random['timestamp_orig']\n timestamps_orig[i] = timestamps_orig_empty.flatten()\n \n file_path_empty = np.empty((2, 5), dtype='S92')\n file_path_empty[0] = d_neighbors['file_path']\n file_path_empty[1] = d_random['file_path']\n # print(neighbor_file_path_inner)\n file_path[i] = file_path_empty.flatten()\n \n sensor_id_empty = np.empty((2, 5), dtype='S60')\n sensor_id_empty[0] = d_neighbors['sensor_id']\n sensor_id_empty[1] = d_random['sensor_id']\n sensor_id[i] = sensor_id_empty.flatten()\n \n# print('done with cluster ' + str(cluster_index) + ' of ' + str(num_clusters))\n# sys.stdout.flush()\n\n timestamps_dt = [convert_to_dt(x) for x in timestamps.flatten()]\n file_path_cut = [cut_file_path(x) for x in file_path.flatten()]\n \n# print(len(is_neighbor))\n \n # Making the dataframe\n df = pd.DataFrame(centroids)\n df.insert(0, 'is_neighbor', is_neighbor, True)\n df.insert(1, \"timestamp_orig\", timestamps_orig.flatten(), True)\n df.insert(2, \"timestamp_dt\", timestamps_dt, True)\n df.insert(3, \"sensor_id\", sensor_id.flatten(), True)\n df.insert(4, \"file_path\", file_path_cut, True)\n df.insert(5, \"centroid_num\", centroid_num_arr, True)\n df.insert(6, \"num_clusters\", num_centroids, True)\n\n return df", "def do_neighbors(self,args):\n\t\tprint(self.location.neighbors)", "def get_neighbours(training_data, unseen_data_row):\n from copy import deepcopy\n training_with_distances = deepcopy(training_data)\n\n # Working out the euclidean distances between the unseen data point and all of the existing training data points\n training_with_distances['distance'] = training_with_distances[func.FEATURES].\\\n sub(np.array(unseen_data_row)).pow(2).sum(1).pow(0.5)\n\n # Sorting the dataframe based on the distances in ascending order\n training_with_distances.sort_values(by='distance', inplace=True)\n # Selecting the closest MAX_NUM_NEIGHBOURS\n neighbours = training_with_distances.head(n=MAX_NUM_NEIGHBOURS)\n\n return neighbours", "def getNeighbours(self, user=None, limit=None):\n pass", "def get_neighbors(self, **kwargs):\r\n click.echo('[%s_OSINT_get_neighbors] Get neighbors via Match starting...' % (get_datetime()))\r\n sql = '''\r\n MATCH\r\n {class:V, where: (key = '%s')}\r\n .bothE(){as:o2n}\r\n .bothV(){class:V, as:n}\r\n RETURN \r\n o2n.@class as EDGE_TYPE, o2n.out.key as EDGE_SOURCE , o2n.in.key as EDGE_TARGET,\r\n n.key as NODE_KEY, n.title as NODE_NAME, n.@class as NODE_TYPE, n.description as NODE_ATTR_ID\r\n ''' % (kwargs[\"nodekey\"])\r\n # Start a response object with data array and node_keys including the queried so it is not included\r\n response = {\"data\": [], \"node_keys\": [kwargs[\"nodekey\"]]}\r\n for r in self.client.command(sql):\r\n r = r.oRecordData\r\n if r[\"EDGE_TARGET\"] == kwargs[\"nodekey\"]:\r\n r[\"EDGE_DIRECTION\"] = \"IN\"\r\n else:\r\n r[\"EDGE_DIRECTION\"] = \"OUT\"\r\n if r[\"NODE_KEY\"] not in response[\"node_keys\"]:\r\n response[\"data\"].append(r)\r\n response[\"node_keys\"].append(r[\"NODE_KEY\"])\r\n response[\"message\"] = \"Get neighbors for %s resulted in %d nodes\" % (kwargs[\"nodekey\"], len(response[\"data\"]))\r\n click.echo('[%s_OSINT_get_neighbors] Get neighbors via Match complete.' % (get_datetime()))\r\n return response", "def __add_neighbours(self):\n calculate_cell_neighbour_coordinates = self._neighbourhood.calculate_cell_neighbour_coordinates\n coordinates = self._current_state.keys()\n for coordinate, cell_c, cell_n in zip(coordinates, self._current_state.values(), self._next_state.values()):\n n_coord = calculate_cell_neighbour_coordinates(\n coordinate, self._dimension)\n cell_c.neighbours = list([self._current_state[nc]\n for nc in n_coord])\n cell_n.neighbours = list([self._next_state[nc] for nc in n_coord])", "def create_neighbors(self):\n for row in self._currentGrid:\n for cell in row:\n row = cell.get_row()\n column = cell.get_column()\n if row == 0:\n # 1. upper left corner (3 neighbors)\n if column == 0:\n #print('upper left')\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # 2. rest of the top row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column + 1])\n # upper right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column - 1])\n cell.add_neighbor(self._currentGrid[self._rows - 1][column])\n # middle row\n elif row < (self._rows - 1):\n #print('middle')\n # 1. middle left edge (5 neighbors)\n if column == 0:\n #print('middle left edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][self._columns - 1])\n # 2. rest of the middle row (8 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column + 1])\n # 3. middle right edge (5 neighbors)\n else:\n #print('middle right edge')\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column])\n cell.add_neighbor(self._currentGrid[row + 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row][column - self._columns - 1])\n cell.add_neighbor(self._currentGrid[row + 1][column - self._columns - 1])\n # bottom row\n else:\n #print('lower')\n # 1. bottom left corner (3 neighbors)\n if column == 0:\n #print('lower left')\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[row][self._columns - 1])\n cell.add_neighbor(self._currentGrid[row - 1][self._columns - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # 2. rest of the bottom row (5 neighbors)\n elif column < (self._columns - 1):\n #print('upper')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row][column + 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[row - 1][column + 1])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column + 1])\n cell.add_neighbor(self._currentGrid[0][column])\n # bottom right corner (3 neighbors)\n else:\n #print('upper right')\n cell.add_neighbor(self._currentGrid[row][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column - 1])\n cell.add_neighbor(self._currentGrid[row - 1][column])\n cell.add_neighbor(self._currentGrid[0][column - 1])\n cell.add_neighbor(self._currentGrid[0][column])\n cell.add_neighbor(self._currentGrid[row - 1][0])\n cell.add_neighbor(self._currentGrid[row][0])", "def neighbors(adata, n_neighbors=15, key_added=None, **kwargs):\n if not isinstance(n_neighbors, (list, tuple)):\n sc.pp.neighbors(adata, n_neighbors=n_neighbors, key_added=key_added, **kwargs)\n else:\n for i, n_nb in enumerate(n_neighbors):\n if key_added is None:\n graph_key = f\"k{n_nb}\"\n elif not isinstance(key_added, (list, tuple)):\n graph_key = f\"{key_added}_k{n_nb}\"\n elif len(key_added) == len(n_neighbors):\n graph_key = key_added[i]\n else:\n raise ValueError(\n \"`key_added` can only be None, a scalar, or an \"\n \"iterable of the same length as `n_neighbors`.\"\n )\n neighbors(\n adata,\n n_neighbors=n_nb,\n key_added=graph_key,\n **kwargs,\n )\n return adata", "def _get_neighbors(self, matrix):\n self.neighbors = list() # FIXME: name refactor to step_queue instead of neighbors\n \n matrix_Nrows = len(matrix)\n matrix_Ncols = len(matrix[0]) # FIXME: no numpy array shape to help us to validate this.\n\n # FIXME: use a doble loop with + and - instead or define the jump vectors maybe more general\n steps_list = [ (1, 0), (-1, 0), (0, 1), (0, -1)]\n if self.previous_cell is None:\n # TODO: ADD EVERYTHING\n # No previous step\n for step in steps_list:\n next_r = self.r + step[0] \n next_c = self.c + step[1] \n b_rows = (next_r < matrix_Nrows) and (next_r >= 0)\n b_cols = (next_c < matrix_Ncols) and (next_c >= 0)\n if b_rows and b_cols:\n if matrix[next_r][next_c] == 0:\n self.neighbors.append(step)\n else:\n # Append first the step back\n previous_step = (self.previous_cell.r - self.r, self.previous_cell.c - self.c)\n self.neighbors.append(previous_step)\n for step in steps_list:\n if step != previous_step:\n next_r = self.r + step[0] \n next_c = self.c + step[1] \n b_rows = (next_r < matrix_Nrows) and (next_r >= 0)\n b_cols = (next_c < matrix_Ncols) and (next_c >= 0)\n if b_rows and b_cols:\n if matrix[next_r][next_c] == 0:\n #print(\" aaa: \", step, matrix[next_r][next_c], next_r, next_c) \n self.neighbors.append(step)", "def compute_nearest_neighbors(submatrix, balltree, k, row_start):\n\n nn_dist, nn_idx = balltree.query(submatrix, k=k+1)\n\n # Remove the self-as-neighbors\n nn_idx = nn_idx[:,1:]\n nn_dist = nn_dist[:,1:]\n\n # Construct a COO sparse matrix of edges and distances\n i = np.repeat(row_start + np.arange(nn_idx.shape[0]), k)\n j = nn_idx.ravel().astype(int)\n return (i, j, nn_dist.ravel())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Forges an `OpcUa_ReadRequest` and returns the corresponding `Request`.
def new_read_request(nodeIds, attributes=None): if attributes is None: attributes = [AttributeId.Value for _ in nodeIds] assert len(nodeIds) == len(attributes),\ 'There should the same number of NodeIds, attributes, and datavalues when reading nodes' # TODO: protect this from invalid attributes ? payload = allocator_no_gc('OpcUa_ReadRequest *') # The Toolkit takes ownership of this struct payload.encodeableType = EncodeableType.ReadRequest payload.MaxAge = 0. payload.TimestampsToReturn = libsub.OpcUa_TimestampsToReturn_Both payload.NoOfNodesToRead = len(nodeIds) nodesToRead = allocator_no_gc('OpcUa_ReadValueId[]', len(nodeIds)) for i, (snid, attr) in enumerate(zip(nodeIds, attributes)): nodesToRead[i].encodeableType = EncodeableType.ReadValueId nodesToRead[i].NodeId = str_to_nodeid(snid, no_gc=True)[0] nodesToRead[i].AttributeId = attr payload.NodesToRead = nodesToRead return Request(payload)
[ "def _SendReadRequest(self):\n req = {\n 'method': 'IO.read',\n 'params': {\n 'handle': self._stream_handle,\n 'size': 32768,\n }\n }\n\n # Send multiple reads to hide request latency.\n while len(self._pending_read_ids) < 2:\n self._pending_read_ids.append(self._SendRequest(req))", "async def read_friend_request(self, request: FriendRequest) -> None:\r\n await self.session.read_friend_request(request.id, client=self)\r\n request.options.update(is_read=True)", "def is_read_request(self):\n return self.operation == READ_REQUEST", "def read(self):\n\n # Request a packet. The RDU will ignore this request if no\n # data is available, and there doesn't seem to be any harm in\n # sending the request more frequently than once per second.\n self.port.write(PKT_REQUEST)\n return self.decode(self.port.read(4096))", "def _get_request(self):\n request = self._request_queue.get()\n return request", "def fetch(self, request):\n response = self._lookup_request(request)\n return response", "def get_request(data):\n\n # parse get request\n if isinstance(data, str):\n kvs = parse_params(data)\n return __get_request_from_url(kvs)\n \n # post request is supposed to be file object\n elif isinstance(data, io.IOBase):\n root = parse_xml(data)\n return __get_request_from_xml(root)\n else:\n pass", "def getRequested( self, their, request ):\n if request is None: return None\n possible = self.getAllOurReqFieldsFor( their )\n if possible is not None:\n for f in possible:\n if f in request.keys():\n return request[ f ]\n return None", "def requestInfo(self, requestName):\n reqURL = \"/reqmgr/reqMgr/request?requestName=%s\" % requestName\n reqHeaders = {\"Accept\": \"application/json\"}\n\n try:\n self.requestor = self.createConnection(self.reqMgrURL) \n self.requestor.request(\"GET\", reqURL, None, reqHeaders)\n except socket.error, ex:\n if self.verbose:\n print \" Error connecting to ReqMgr: %s\" % ex.strerror \n sys.exit(-1)\n \n reqMgrResponse = self.requestor.getresponse()\n reqMgrResponseString = reqMgrResponse.read()\n if reqMgrResponse.status == 404:\n if self.verbose:\n print \" requestInfo(): Request %s not found.\" % requestName\n return None\n\n return json.loads(reqMgrResponseString)", "def post_read_result(self):\n self.operation = READ_REQUEST\n return self.post_outcome_request()", "def read_request(self):\n data = b''\n while data.find(b'\\r\\n\\r\\n') == -1:\n r = self.conn.recv(1024)\n # r is empty if socket is closed\n if not r:\n logging.error(\"socket is closed\")\n break\n data += r\n try:\n self.request_line = data.splitlines()[0]\n except Exception as e:\n logging.error(\"recieved data:{0}\".format(data))\n raise e", "def transaction_request():\n # type: () -> paynlsdk2.api.refund.info.Request\n from paynlsdk2.api.transaction.info import Request\n return Request()", "def _get_request(self, apiname):\n url = \"http://{host:}:{port:}/api/{api:}\".format(\n host=self.host, port=self.port, api=apiname\n )\n r = requests.get(url)\n try:\n return r.json()\n except json.decoder.JSONDecodeError:\n print(r.text)\n raise", "def read_request(stream):\n request_line, headers, body = yield from read_message(stream)\n method, uri, version = request_line[:-2].decode().split(None, 2)\n return method, uri, headers, body", "def inboundRequest(self, request):\n return self._inboundRequestEndpoint.app.resource()", "def recvRequest(self):\n\t\tself.rBuf=self.s.recv(self.size)", "def info_request():\n # type: () -> paynlsdk2.api.refund.info.Request\n from paynlsdk2.api.refund.info import Request\n return Request()", "def _read_para_echo_request_unsigned(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n _data = self._read_fileng(clen)\n\n echo_request_unsigned = dict(\n type=desc,\n critical=cbit,\n length=clen,\n data=_data,\n )\n\n _plen = length - clen\n if _plen:\n self._read_fileng(_plen)\n\n return echo_request_unsigned", "def select_request():\n user_name = id_entry.get()\n if user_name == '':\n messagebox.showinfo('Error', 'Please enter your user name.')\n return\n r = requests.get(address + \"/api/previous_request/\" + user_name)\n pre_req = r.json()\n if type(pre_req) == dict:\n req_keys = []\n for key, value in pre_req.items():\n f_n = value['filename']\n t_u = value['time_uploaded']\n p = value['procedure']\n k = key + ':' + f_n + t_u + p\n req_keys.append(k)\n open_req_cb['value'] = req_keys\n else:\n messagebox.showinfo('Error', pre_req)\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Forges an `OpcUa_WriteResponse` and returns the corresponding `Request`. Types for `datavalues` must be provided. For each `pys2opc.types.DataValue`, the type is either found in `datavalue.variantType`, or in the `types` list. If both `datavalue.variantType` and the type in `types` are given, they must be equal.
def new_write_request(nodeIds, datavalues, attributes=None, types=None): if attributes is None: attributes = [AttributeId.Value for _ in nodeIds] assert len(nodeIds) == len(attributes) == len(datavalues),\ 'There should the same number of NodeIds, attributes, and datavalues when writing nodes' if types: assert len(nodeIds) == len(types) # Compute types sopc_types = [] types = types or [None] * len(nodeIds) for dv, ty in zip(datavalues, types): if dv.variantType is not None: if ty is not None and ty != dv.variantType: raise ValueError('Inconsistent type, type of datavalue is different from type given in types list') sopc_types.append(dv.variantType) else: sopc_types.append(ty) assert None not in sopc_types, 'Incomplete type information, cannot create write request' # Overwrite values' type for dv, ty in zip(datavalues, sopc_types): dv.variantType = ty # Prepare the request, it will be freed by the Toolkit payload = allocator_no_gc('OpcUa_WriteRequest *') payload.encodeableType = EncodeableType.WriteRequest payload.NoOfNodesToWrite = len(nodeIds) nodesToWrite = allocator_no_gc('OpcUa_WriteValue[]', len(nodeIds)) for i, (snid, attr, val) in enumerate(zip(nodeIds, attributes, datavalues)): nodesToWrite[i].encodeableType = EncodeableType.WriteValue nodesToWrite[i].NodeId = str_to_nodeid(snid, no_gc=True)[0] nodesToWrite[i].AttributeId = attr nodesToWrite[i].Value = val.to_sopc_datavalue(no_gc=True)[0] payload.NodesToWrite = nodesToWrite return Request(payload)
[ "def build_write_single_register_value( # pylint: disable=too-many-arguments\n register_type: RegisterType,\n register_address: int,\n register_data_type: DataType,\n register_name: Optional[str],\n write_value: Union[str, int, float, bool, ButtonPayload, SwitchPayload, datetime],\n serial_number: Optional[str] = None,\n ) -> List[int]:\n output_content = V1Builder.build_packet_preambule(\n packet=Packet.WRITE_SINGLE_REGISTER_VALUE,\n serial_number=serial_number,\n )\n\n output_content.append(register_type.value)\n output_content.append(register_address >> 8)\n output_content.append(register_address & 0xFF)\n\n if register_data_type in (\n DataType.CHAR,\n DataType.UCHAR,\n DataType.SHORT,\n DataType.USHORT,\n DataType.INT,\n DataType.UINT,\n DataType.FLOAT,\n DataType.BOOLEAN,\n ) and isinstance(write_value, (int, float, bool)):\n transformed_value = ValueTransformHelpers.transform_to_bytes(\n data_type=register_data_type,\n value=write_value,\n )\n\n # Value could not be transformed\n if transformed_value is None:\n raise BuildPayloadException(\"Value to be written into register could not be transformed\")\n\n for value in transformed_value:\n output_content.append(value)\n\n elif register_data_type in (\n DataType.BUTTON,\n DataType.SWITCH,\n ) and isinstance(write_value, (SwitchPayload, ButtonPayload)):\n transformed_value = ValueTransformHelpers.transform_to_bytes(\n data_type=register_data_type,\n value=write_value,\n )\n\n # Value could not be transformed\n if transformed_value is None:\n raise BuildPayloadException(\"Value to be written into register could not be transformed\")\n\n for value in transformed_value:\n output_content.append(value)\n\n # SPECIAL TRANSFORMING FOR STATE ATTRIBUTE\n elif (\n register_data_type == DataType.ENUM\n and register_type == RegisterType.ATTRIBUTE\n and register_name == DeviceProperty.STATE.value\n ):\n transformed_value = ValueTransformHelpers.transform_to_bytes(\n data_type=DataType.UCHAR,\n value=StateTransformHelpers.transform_to_device(device_state=ConnectionState(write_value)).value,\n )\n\n # Value could not be transformed\n if transformed_value is None:\n raise BuildPayloadException(\"Value to be written into register could not be transformed\")\n\n for value in transformed_value:\n output_content.append(value)\n\n else:\n raise BuildPayloadException(\"Unsupported register data type\")\n\n return output_content", "def helper_maybe_read_types(nodeIds, datavalues, attributes, types, sendFct):\n # Note: this function is here to avoid copy/paste in users of new_write_request that wish to use the \"auto-type\" functionality.\n # The sendFct hints that this function may not be in the optimal place.\n\n if attributes is None:\n attributes = [AttributeId.Value for _ in nodeIds]\n assert len(nodeIds) == len(attributes) == len(datavalues),\\\n 'There should the same number of NodeIds, attributes, and datavalues when reading nodes'\n if types:\n assert len(nodeIds) == len(types)\n else:\n types = [None] * len(nodeIds)\n\n # Compute missing types, send the request, and update the missing types.\n sopc_types = [dv.variantType if dv.variantType is not None else ty for dv,ty in zip(datavalues, types)]\n missingTypesInfo = [(i, snid, attr) for i,(snid,attr,ty) in enumerate(zip(nodeIds, attributes, sopc_types)) if ty is None]\n if missingTypesInfo:\n _, readNids, readAttrs = zip(*missingTypesInfo)\n request = Request.new_read_request(readNids, readAttrs)\n readDatavalues = sendFct(request, bWaitResponse=True)\n for (i, _, _), dv in zip(missingTypesInfo, readDatavalues.results):\n assert dv.variantType != VariantType.Null, 'Automatic type detection failed, null type read.'\n sopc_types[i] = dv.variantType\n\n return sopc_types", "def _send_responses_device_type(\n self,\n remote_addr: AddressTupleVXType,\n device: UpnpDevice,\n device_type: Optional[str] = None,\n ) -> None:\n self._send_response(\n remote_addr,\n device_type or device.device_type,\n f\"{self.device.udn}::{device.device_type}\",\n )", "def register_data_type():\n try:\n data_type = json_wrapper.loads(request.values.get('desc'))\n except:\n response = jsonify({'result': 'fail', 'info': 'broken json'})\n return response\n result = __data_center.register_data_type(data_type)\n # deal response due to the result\n response = jsonify({'result': 'unhandled'})\n if result['succ']:\n response = jsonify({'result': 'successful', 'data_type_id': result['id']})\n else:\n if result['info'] == 'type_error':\n response = jsonify({'result': 'fail', 'info': 'type not accept: ' + str(result['attr'])})\n if result['info'] == 'db_error':\n response = jsonify({'result': 'fail', 'info': 'database error'})\n else:\n response = jsonify({'result': 'fail', 'info': result['info']})\n return response", "def write_multiple_values(db, values, **kwargs):\n for argument in kwargs:\n if len(kwargs[argument]) != len(values):\n raise ValueError(\"Argument length should be equal but was \"\n + str(len(kwargs[argument]))\n + \" and \"\n + str(len(values))\n )\n index = 0\n for value in values:\n measurement = default_measurement\n if \"measurement\" in kwargs.keys():\n measurement = kwargs[\"measurement\"][index]\n value_name = default_value_name\n if \"value_name\" in kwargs.keys():\n value_name = kwargs[\"value_name\"][index]\n time = datetime.utcnow()\n if \"time_utc\" in kwargs.keys():\n time = kwargs[\"time_utc\"][index]\n write_single_value(db, value, value_name=value_name, measurement=measurement, time=time)", "def PostDataMulti(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def set_response_type(nresp0=3, nresp1end=2, nresp1=2, nresp2end=1,\n nresp2=1, nresp3=1):\n assert nresp0 + nresp1end + nresp1 + nresp2end + nresp2 + nresp3 == n_runs/2\n\n resp0 = matlib.repmat([0, 0, 0], nresp0, 1)\n resp0vol = matlib.repmat(353, nresp0, 1)\n resp1end = matlib.repmat([0, 0, 1], nresp1end, 1)\n resp1endvol = matlib.repmat(355, nresp1end, 1)\n resp1_1 = matlib.repmat([1, 0, 0], np.floor(nresp1/2), 1)\n resp1_1vol = matlib.repmat(361, np.floor(nresp1/2), 1)\n resp1_2 = matlib.repmat([0, 1, 0], np.ceil(nresp1/2), 1)\n resp1_2vol = matlib.repmat(361, np.ceil(nresp1/2), 1)\n resp2end_1 = matlib.repmat([0, 1, 1], np.floor(nresp2end/2), 1)\n resp2end_1vol = matlib.repmat(363, np.floor(nresp2end/2), 1)\n resp2end_2 = matlib.repmat([1, 0, 1], np.ceil(nresp2end), 1)\n resp2end_2vol = matlib.repmat(363, np.ceil(nresp2end), 1)\n resp2 = matlib.repmat([1, 1, 0], nresp2, 1)\n resp2vol = matlib.repmat(369, nresp2, 1)\n resp3 = matlib.repmat([1, 1, 1], nresp3, 1)\n resp3vol = matlib.repmat(371, nresp3, 1)\n\n run_resps = np.concatenate([resp0, resp1end, resp1_1, resp1_2, resp2,\n resp2end_1, resp2end_2, resp3], 0)\n run_resps_vol = np.concatenate([resp0vol, resp1endvol, resp1_1vol,\n resp1_2vol, resp2vol,\n resp2end_1vol, resp2end_2vol, resp3vol], 0)\n\n def get_random_order():\n consec_noresp = True\n while consec_noresp:\n idx = np.arange(run_resps.shape[0])\n np.random.shuffle(idx)\n rand_run_resps = run_resps[idx].copy()\n rand_run_resps_vol = run_resps_vol[idx].copy()\n # Get number of consecutive instances of the same run type\n grouped = np.array([[x[0], sum(1 for i in y)] for x, y in\n itertools.groupby(rand_run_resps_vol)])\n noresps = grouped[grouped[:, 0] == 353, 1]\n # Re-randomize if there are two no-response runs back to back\n consec_noresp = sum(noresps > 1)\n return rand_run_resps, rand_run_resps_vol\n\n run_resps_s1, run_resps_vol_s1 = get_random_order()\n run_resps_s2, run_resps_vol_s2 = get_random_order()\n\n allruns_resps = np.concatenate([run_resps_s1, run_resps_s2], 0)\n allruns_resps_vol = np.concatenate([run_resps_vol_s1, run_resps_vol_s2], 0)\n return allruns_resps, allruns_resps_vol", "def make_response(self, data, *args, **kwargs):\n # we've already got a response, eg, from jsonify\n if isinstance(data, Response):\n return (data, *args)\n\n if isinstance(data, (list, tuple)) and len(data) and isinstance(data[0], Model):\n model_name = data[0].__class__.__name__\n if model_name in self.serializers_many:\n data = self.serializers_many[model_name].dump(data).data\n\n # we got the result of serializer.dump(obj)\n if isinstance(data, MarshalResult):\n data = data.data\n\n # we got plain python data types that need to be serialized\n return super().make_response(data, *args, **kwargs)", "def send_output_values(self, values, **kwargs):\n package_name = kwargs.get(\"package_name\", os.getenv(\"PACKAGE\"))\n task_id = kwargs.get(\"task_id\", os.getenv(\"TASK_ID\"))\n secrets = kwargs.get(\"secrets\", os.getenv(\"SECRETS\"))\n\n if not package_name:\n raise RuntimeError(\"A package_name is required\")\n if not task_id:\n raise RuntimeError(\"A task_id is required\")\n if not secrets:\n raise RuntimeError(\"A secrets is required\")\n\n self.response = api_send_output_values(package_name, task_id, values, secrets)", "def test_type_response():\n res = ResMsg()\n now = datetime.now()\n date = datetime.now().date()\n num = Decimal(11.11)\n test_dict = dict(now=now, date=date, num=num)\n res.update(code=ResponseCode.Success, data=test_dict)\n # return res.data,200,{\"token\":\"111\"}\n return res.data", "def get( self, request, address, implementation ):\n\t\tvariables = request.apiGenGetPdu().apiGenGetVarBind()\n\t\tresponse = request.reply()\n\t\tpdu = response.apiGenGetPdu()\n\t\ttry:\n\t\t\tresult = self.getOIDs( [key for (key,_) in variables] )\n\t\texcept errors.OIDNameError, err:\n\t\t\tpdu.apiGenSetErrorStatus( err.errorCode )\n\t\t\tpdu.apiGenSetErrorIndex( err.errorIndex + 1 ) # 1-indexed\n\t\t\tpdu.apiGenSetVarBind(variables)\n\t\t\tresult = None\n\t\telse:\n\t\t\tpdu.apiGenSetVarBind([\n\t\t\t\t(key,datatypes.typeCoerce(value,implementation))\n\t\t\t\tfor (key,value) in result\n\t\t\t])\n\t\tself.protocol.send( response.encode(), address )\n\t\treturn response", "def outbound_ds(self, data):\n\t\tif len(data) < 2:\n\t\t\tself.msg(1,\"[WSF] error: field length %d too short\", len(data))\n\t\t\treturn PDS_BAD_CMD\n\t\tself.msg(1,\"[WSF] Outbound DS value \" + ''.join(hex(ord(n)) for n in data[0]))\n\t\tif struct.unpack(\">B\", data[0])[0] != 0:\n\t\t\tself.msg(1,\"OUTBOUND_DS: Position 0 expected 0 got %s\", data[0])\n\n\t\tif data[1] == SNA_W:\n\t\t\tself.msg(1,\" - Write \")\n\t\t\tself.process_write(data[1:]) #skip the type value when we pass to process write\n\t\telif data[1] == SNA_EW:\n\t\t\tself.msg(1,\" - Erase/Write\")\n\t\t\tself.clear_screen()\n\t\t\tself.process_write(data[1:])\n\t\telif data[1] == SNA_EWA:\n\t\t\tself.msg(1,\" - Erase/Write/Alternate\")\n\t\t\tself.clear_screen()\n\t\t\tself.process_write(data[1:])\n\t\telif data[1] == SNA_EAU:\n\t\t\tself.msg(1,\" - Erase all Unprotected\")\n\t\t\tself.clear_unprotected()\n\t\telse:\n\t\t\tself.msg(1,\"unknown type \"+ ''.join(hex(ord(n)) for n in data[0]))", "def test_parse_form_and_query_params_with_union_type(self):\n sig = inspect.signature(logic_with_union)\n query_params = {'colors_or_object': '[\"blue\"]'}\n actual = parse_form_and_query_params(query_params, sig.parameters)\n assert {'colors_or_object': ['blue']} == actual\n\n query_params = {'colors_or_object': json.dumps({'str': 'auth'})}\n actual = parse_form_and_query_params(query_params, sig.parameters)\n expected = {\n 'colors_or_object': {'str': 'auth'},\n }\n assert expected == actual\n\n # An invalid type that isn't one of the accepted types\n query_params = {'colors_or_object': 45}\n with pytest.raises(ValueError):\n parse_form_and_query_params(query_params, sig.parameters)", "def ExportVariantSet(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def build_beacon_response(proxy,\n data,\n num_total_results,\n qparams_converted,\n by_entity_type,\n non_accessible_datasets,\n func_response_type):\n\n beacon_response = {\n 'meta': build_meta(proxy, qparams_converted, by_entity_type),\n 'responseSummary': build_response_summary(bool(data), num_total_results),\n # TODO: 'info': build_info(),\n 'info': {},\n 'beaconHandovers': build_beacon_handovers(),\n 'response': {\n 'resultSets': [ build_response(data, num_total_results, qparams_converted, non_accessible_datasets, func_response_type) ]\n }\n }\n return beacon_response", "def create_outputs(self):\n outputs = []\n if self.is_original:\n for i, recipient in enumerate(self.recipients):\n newUTXO = UTXO(self.hash, i, self.values[i], self.recipients[i])\n outputs.append(newUTXO)\n return outputs, len(outputs)\n # Deduct Inputs from Value\n input_sum = 0\n for i in self.inputs:\n input_sum += i.value\n\n # Send Value to Recipient\n # Sender Signs Transaction w/ Its Private Key (Ensures That Only The Private Key Owner Can Spend!)\n # transaction_val_uxto = UTXO(self.hash, 0, self.value, sender)\n # self.sender.sign_utxo(self.recipient, transaction_val_uxto)\n for i, recipient in enumerate(self.recipients):\n newUTXO = UTXO(self.hash, i, self.values[i], self.recipients[i])\n outputs.append(newUTXO)\n\n # Recipient Accepts Output\n # Recipient Uses Public Key of Sender to Verify The Sender's Signature (To Verify Chain of Ownership!)\n\n\n # (If Necessary) Return Change to Sender\n if input_sum - sum(self.values) > 0:\n newUTXO = UTXO(self.hash, len(outputs), input_sum - sum(self.values), self.sender)\n outputs.append(newUTXO)\n self.values.append(input_sum - sum(self.values))\n self.recipients.append(self.sender)\n\n return outputs, len(outputs)", "def validateQueryResponse(request,response): \n\n # Validate header\n validateResponseHeader(request,response)\n response_name = response['header']['name']\n\n # Validate response payload\n try:\n payload = response['payload']\n except:\n raise_value_error(generate_error_message(response_name,'payload is missing',response))\n\n if payload is None: raise_value_error(generate_error_message(response_name,'payload is missing',payload))\n if not isinstance(payload,dict): raise_value_error(generate_error_message(response_name,'payload must be a dict',payload))\n\n # Validate non-empty control response payload\n if response_name not in VALID_NON_EMPTY_PAYLOAD_RESPONSE_NAMES:\n if bool(payload): raise_value_error(generate_error_message(response_name,'payload must be empty',payload))\n else:\n if not bool(payload): raise_value_error(generate_error_message(response_name,'payload must not be empty',payload))\n\n # Validate thermostat query response payload\n if response_name in 'GetTemperatureReadingResponse': \n for required_key in ['temperatureReading']:\n if required_key not in payload: raise_value_error(generate_error_message(response_name,'payload.' + format(required_key) + ' is missing',payload))\n if 'value' not in payload['temperatureReading']: raise_value_error(generate_error_message(response_name,'payload.temperatureReading.value is missing',payload))\n if not is_number(payload['temperatureReading']['value']): raise_value_error(generate_error_message(response_name,'payload.temperatureReading.value must be a number',payload))\n\n if response_name in 'GetTargetTemperatureResponse': \n for required_key in ['temperatureMode']:\n if required_key not in payload: raise_value_error(generate_error_message(response_name,'payload.' + format(required_key) + ' is missing',payload))\n if 'value' not in payload['temperatureMode']: raise_value_error(generate_error_message(response_name,'payload.temperatureMode.value is missing',payload))\n if payload['temperatureMode']['value'] not in VALID_TEMPERATURE_MODES: raise_value_error(generate_error_message(response_name,'payload.temperatureMode.value is invalid',payload))\n\n mode = payload['temperatureMode']['value']\n\n for optional_key in ['targetTemperature','coolingTargetTemperature','heatingTargetTemperature']:\n if optional_key in payload:\n if 'value' not in payload[optional_key]: raise_value_error(generate_error_message(response_name,'payload.' + optional_key + '.value is missing',payload))\n if not is_number(payload[optional_key]['value']): raise_value_error(generate_error_message(response_name,'payload.' + optional_key + '.value must be a number',payload))\n\n if mode == 'CUSTOM':\n if 'friendlyName' not in payload['temperatureMode']: raise_value_error(generate_error_message(response_name,'payload.temperatureMode.friendlyName is missing',payload))\n if is_empty_string(payload['temperatureMode']['friendlyName']): raise_value_error(generate_error_message(response_name,'payload.temperatureMode.friendlyName must not be empty',payload))\n\n # Validate lock query response payload\n if response_name in ['GetLockStateResponse']:\n for required_key in ['lockState']:\n if required_key not in payload: raise_value_error(generate_error_message(response_name,'payload.' + format(required_key) + ' is missing',payload))\n if payload['lockState'] not in VALID_LOCK_STATES: raise_value_error(generate_error_message(response_name,'payload.lockState is invalid',payload))\n\n # Validate query error response payload\n if response_name == 'UnableToGetValueError':\n required_key = 'errorInfo'\n if required_key not in payload: raise_value_error(generate_error_message(response_name,'payload.' + format(required_key) + ' is missing',payload))\n for required_key in ['code','description']:\n if required_key not in payload['errorInfo']: raise_value_error(generate_error_message(response_name,'payload.errorInfo' + format(required_key) + ' is missing',payload))\n if payload['errorInfo']['code'] not in VALID_UNABLE_ERROR_INFO_CODES: raise_value_error(generate_error_message(response_name,'payload.errorInfo.code is invalid',payload))", "def createResponseDataUnit(self):\n if self.dataUnitType == TC_PACKET_HEADER_DU_TYPE:\n retVal = TCpacketResponseDataUnit()\n elif self.dataUnitType == TC_CLTU_HEADER_DU_TYPE:\n retVal = TCcltuResponseDataUnit()\n else:\n raise AttributeError(\"createResponseDataUnit() only possible for TCpacketDataUnit or TCcltuDataUnit\")\n # common attributes for TC PACKET and TC CLTU response\n retVal.spacecraftId = self.spacecraftId\n return retVal", "def getNext( self, request, address, implementation ):\n\t\tvariables = request.apiGenGetPdu().apiGenGetVarBind()\n\t\tresponse = request.reply()\n\t\tpdu = response.apiGenGetPdu()\n\t\ttry:\n\t\t\tresult = self.getNextOIDs( [key for (key,_) in variables] )\n\t\texcept errors.OIDNameError, err:\n\t\t\tpdu.apiGenSetErrorStatus( err.errorCode )\n\t\t\tpdu.apiGenSetErrorIndex( err.errorIndex + 1 ) # 1-indexed\n\t\t\tpdu.apiGenSetVarBind(variables)\n\t\t\tresult = None\n\t\telse:\n\t\t\tpdu.apiGenSetVarBind([\n\t\t\t\t(key,datatypes.typeCoerce(value,implementation))\n\t\t\t\tfor (key,value) in result\n\t\t\t])\n\t\tself.protocol.send( response.encode(), address )\n\t\treturn response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Forges an `OpcUa_BrowseResponse` and returns the corresponding `Request`.
def new_browse_request(nodeIds, maxReferencesPerNode=1000): # Prepare the request, it will be freed by the Toolkit payload = allocator_no_gc('OpcUa_BrowseRequest *') payload.encodeableType = EncodeableType.BrowseRequest view = allocator_no_gc('OpcUa_ViewDescription *') view.encodeableType = EncodeableType.ViewDescription # Leave the ViewDescription filled with NULLs payload.View = view[0] payload.RequestedMaxReferencesPerNode = maxReferencesPerNode payload.NoOfNodesToBrowse = len(nodeIds) nodesToBrowse = allocator_no_gc('OpcUa_BrowseDescription[]', len(nodeIds)) for i, snid in enumerate(nodeIds): nodesToBrowse[i].encodeableType = EncodeableType.BrowseDescription nodesToBrowse[i].NodeId = str_to_nodeid(snid, no_gc=True)[0] nodesToBrowse[i].BrowseDirection = libsub.OpcUa_BrowseDirection_Both nodesToBrowse[i].IncludeSubtypes = False nodesToBrowse[i].NodeClassMask = 0xFF # See Part4 §5.8.2 Browse, §.2 Parameters nodesToBrowse[i].ResultMask = libsub.OpcUa_BrowseResultMask_All payload.NodesToBrowse = nodesToBrowse return Request(payload)
[ "def _do_browse(self, options=None):\r\n if options is None:\r\n options = {}\r\n\r\n options = self._prepare_browse_options(options)\r\n request_json = self._prepare_browse_json(options)\r\n\r\n flag, response = self._cvpysdk_object.make_request('POST', self._BROWSE, request_json)\r\n\r\n return self._process_browse_response(flag, response, options)", "def _prepare_browse_json(self, options):\r\n operation_types = {\r\n 'browse': 0,\r\n 'find': 1,\r\n 'all_versions': 2,\r\n 'delete_data': 7\r\n }\r\n\r\n options['operation'] = options['operation'].lower()\r\n\r\n if options['operation'] not in operation_types:\r\n options['operation'] = 'find'\r\n\r\n # add the browse mode value here, if it is different for an agent\r\n # if agent is not added in the dict, default value 2 will be used\r\n browse_mode = {\r\n 'virtual server': 4,\r\n 'cloud apps': 3\r\n }\r\n\r\n mode = 2\r\n paths = []\r\n\r\n if isinstance(options['path'], basestring):\r\n paths.append(options['path'])\r\n elif isinstance(options['path'], list):\r\n paths = options['path']\r\n else:\r\n paths = ['\\\\']\r\n\r\n if self._agent_object.agent_name in browse_mode:\r\n mode = browse_mode[self._agent_object.agent_name]\r\n\r\n request_json = {\r\n \"opType\": operation_types[options['operation']],\r\n \"mode\": {\r\n \"mode\": mode\r\n },\r\n \"paths\": [{\"path\": path} for path in paths],\r\n \"options\": {\r\n \"showDeletedFiles\": options['show_deleted'],\r\n \"restoreIndex\": options['restore_index'],\r\n \"vsDiskBrowse\": options['vm_disk_browse'],\r\n \"vsFileBrowse\": options.get('vs_file_browse', False)\r\n },\r\n \"entity\": {\r\n \"clientName\": self._client_object.client_name,\r\n \"clientId\": int(self._client_object.client_id),\r\n \"applicationId\": int(self._agent_object.agent_id),\r\n \"instanceId\": int(self._instance_object.instance_id),\r\n \"backupsetId\": int(self.backupset_id),\r\n \"subclientId\": int(options['_subclient_id'])\r\n },\r\n \"timeRange\": {\r\n \"fromTime\": self._get_epoch_time(options['from_time']),\r\n \"toTime\": self._get_epoch_time(options['to_time'])\r\n },\r\n \"advOptions\": {\r\n \"copyPrecedence\": options['copy_precedence']\r\n },\r\n \"ma\": {\r\n \"clientName\": options['media_agent']\r\n },\r\n \"queries\": [{\r\n \"type\": 0,\r\n \"queryId\": \"dataQuery\",\r\n \"dataParam\": {\r\n \"sortParam\": {\r\n \"ascending\": False,\r\n \"sortBy\": [0]\r\n },\r\n \"paging\": {\r\n \"pageSize\": int(options['page_size']),\r\n \"skipNode\": int(options['skip_node']),\r\n \"firstNode\": 0\r\n }\r\n }\r\n }]\r\n }\r\n\r\n if options['filters']:\r\n # [('FileName', '*.txt'), ('FileSize','GT','100')]\r\n request_json['queries'][0]['whereClause'] = []\r\n\r\n for browse_filter in options['filters']:\r\n if browse_filter[0] in ('FileName', 'FileSize'):\r\n temp_dict = {\r\n 'connector': 0,\r\n 'criteria': {\r\n 'field': browse_filter[0],\r\n 'values': [browse_filter[1]]\r\n }\r\n }\r\n\r\n if browse_filter[0] == 'FileSize':\r\n temp_dict['criteria']['dataOperator'] = browse_filter[2]\r\n\r\n request_json['queries'][0]['whereClause'].append(temp_dict)\r\n\r\n if options['job_id'] is not 0:\r\n request_json['advOptions']['advConfig'] = {\r\n 'browseAdvancedConfigBrowseByJob': {\r\n 'commcellId': options['commcell_id'],\r\n 'jobId': options['job_id']\r\n }\r\n }\r\n\r\n if options['include_aged_data']:\r\n request_json['options']['includeAgedData'] = True\r\n\r\n if options['include_hidden']:\r\n request_json['options']['includeHidden'] = True\r\n\r\n if options['include_running_jobs']:\r\n request_json['options']['includeRunningJobs'] = True\r\n\r\n if options['vs_volume_browse']:\r\n request_json['mode']['mode'] = 3\r\n request_json['options']['vsVolumeBrowse'] = True\r\n request_json['advOptions']['browseViewName'] = options['browse_view_name']\r\n\r\n return request_json", "def fetch(self, request):\n response = self._lookup_request(request)\n return response", "def select_request():\n user_name = id_entry.get()\n if user_name == '':\n messagebox.showinfo('Error', 'Please enter your user name.')\n return\n r = requests.get(address + \"/api/previous_request/\" + user_name)\n pre_req = r.json()\n if type(pre_req) == dict:\n req_keys = []\n for key, value in pre_req.items():\n f_n = value['filename']\n t_u = value['time_uploaded']\n p = value['procedure']\n k = key + ':' + f_n + t_u + p\n req_keys.append(k)\n open_req_cb['value'] = req_keys\n else:\n messagebox.showinfo('Error', pre_req)\n return None", "def _hubapi_request(self, *args, **kwargs):\n\t\treturn self._request('hubapi', *args, **kwargs)", "def _request_raw(\n self, method: str, path: str, query: typing.Dict = None, headers: typing.Dict = None,\n data: bytes = None,\n ) -> http.client.HTTPResponse:\n url = self.base_url + path\n if query:\n url = url + '?' + urllib.parse.urlencode(query)\n\n if headers is None:\n headers = {}\n request = urllib.request.Request(url, method=method, data=data, headers=headers)\n\n try:\n response = self.opener.open(request, timeout=self.timeout)\n except urllib.error.HTTPError as e:\n code = e.code\n status = e.reason\n try:\n body = _json_loads(e.read())\n message = body['result']['message']\n except (IOError, ValueError, KeyError) as e2:\n # Will only happen on read error or if Pebble sends invalid JSON.\n body = {}\n message = '{} - {}'.format(type(e2).__name__, e2)\n raise APIError(body, code, status, message)\n except urllib.error.URLError as e:\n raise ConnectionError(e.reason)\n\n return response", "def info_request():\n # type: () -> paynlsdk2.api.refund.info.Request\n from paynlsdk2.api.refund.info import Request\n return Request()", "def hub_request(self):\r\n try:\r\n return self.hub_requesting.all()[0]\r\n except IndexError:\r\n return None", "def api_request(api_response):\n\n\tclass FakeApiRequestObject:\n\t\tdef __call__(self, *args, **kwargs):\n\t\t\treturn api_response\n\n\t\tsend = __call__\n\n\treturn FakeApiRequestObject()", "def unpack(breq_original):\n\t\tbreq = bytearray(breq_original)\n\n\t\t# Extract request ID and length.\n\t\tr_id, r_len = struct.unpack(\"<BI\", breq[:5])\n\t\tbreq = breq[5:(5+r_len)]\n\t\t# Create a dict of parameters.\n\t\td = {\"id\":r_id}\n\n\t\t# Join\n\t\tif r_id == Protocol.REQ_JOIN:\n\t\t\t# Extract nickname\n\t\t\tbnlen, = struct.unpack(\"<I\", breq[:4])\n\t\t\tbreq = breq[4:]\n\t\t\tbname, = struct.unpack(\"<{}s\".format(bnlen), breq[:bnlen])\n\t\t\td[\"name\"] = bname.decode(\"utf-8\")\n\t\t\t# Extract document name\n\t\t\td[\"doc\"] = breq[bnlen:].decode(\"utf-8\")\n\t\t# Or leave?\n\t\telif r_id == Protocol.REQ_LEAVE:\n\t\t\t# No arguments here.\n\t\t\tpass\n\t\t# A full text request?\n\t\telif r_id == Protocol.REQ_TEXT:\n\t\t\t# No arguments\n\t\t\tpass\n\t\telif r_id == Protocol.RES_TEXT:\n\t\t\t# Extract version, cursor\n\t\t\tversion, cursor, = struct.unpack(\"<II\", breq[:8])\n\t\t\td[\"version\"] = version\n\t\t\td[\"cursor\"] = cursor\n\t\t\t# Extract text\n\t\t\td[\"text\"] = breq[8:].decode(\"utf-8\")\n\t\t# Commit?\n\t\telif r_id == Protocol.RES_COMMIT:\n\t\t\t# Extract version\n\t\t\tversion, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"version\"] = version\n\t\t\td[\"sequence\"] = []\n\t\t\t# Extract operations\n\t\t\tbreq = breq[4:]\n\t\t\twhile len(breq) > 0:\n\t\t\t\tbreq, dop = Protocol.unpack_op(breq)\n\t\t\t\td[\"sequence\"].append(dop)\n\t\t# Ok response\n\t\telif r_id == Protocol.RES_OK:\n\t\t\treq, = struct.unpack(\"<B\", breq[:1])\n\t\t\td[\"req_id\"] = req\n\t\t# Error response\n\t\telif r_id == Protocol.RES_ERROR:\n\t\t\terror, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"error\"] = error\n\t\treturn d", "def combo_callback(self):\n global o_img, p_img\n request_name = open_req_cb.get()\n selected_label.config(text='{}'.format(request_name))\n request_id = selected_label.cget('text').split(':')[0]\n user_name = id_entry.get()\n r = requests.get(\n address + \"/api/retrieve_request/\" + user_name + '/' + request_id)\n result = r.json()\n p_method.set(result['procedure'])\n selected_label.config(text=result['filename'])\n show_time(result)\n show_hist(result)\n o_img = result['original_img']\n p_img = result['processed_img']\n\n return None", "def call(self, request):\n return self.wait(self.send(request))", "def get_from_bitbucket(url):\n\n # replace browse with raw in request url\n url = url.replace(\"/browse/\", \"/raw/\", 1)\n\n r = requests.get(url, auth=(user, pw))\n return r.json()", "def get_requests_resp_and_aito_resp(aito_client: AitoClient, request_obj: aito_requests.AitoRequest):\n raw_resp_obj = requests.request(\n method=request_obj.method,\n url=aito_client.instance_url + request_obj.endpoint,\n headers=aito_client.headers,\n json=request_obj.query\n )\n raw_resp_json = raw_resp_obj.json()\n\n aito_resp = aito_client.request(request_obj=request_obj)\n return raw_resp_json, aito_resp", "def get_request(data):\n\n # parse get request\n if isinstance(data, str):\n kvs = parse_params(data)\n return __get_request_from_url(kvs)\n \n # post request is supposed to be file object\n elif isinstance(data, io.IOBase):\n root = parse_xml(data)\n return __get_request_from_xml(root)\n else:\n pass", "def GET_request(action):\n\n # OAuth token of the user that requests will be made on behalf of\n\n\n # Login of the advertising agency client\n # Required parameter if requests are made on behalf of an advertising agency\n clientLogin = 'marketingdigital@zara.com'\n\n headers = {\n # OAuth token. The word Bearer must be used\n \"Authorization\": 'OAuth AQAAAABDFBfdAAcVB0yqdlcRyEzIu8BBs1TTLuE',\n # Login of the advertising agency client\n \"Client-Login\": clientLogin,\n # Language for response messages\n \"Accept-Language\": \"en\",\n # Mode for report generation\n \"processingMode\": \"auto\"\n # Format for monetary values in the report\n # \"returnMoneyInMicros\": \"false\",\n # Don't include the row with the report name and date range in the report\n # \"skipReportHeader\": \"true\",\n # Don't include the row with column names in the report\n # \"skipColumnHeader\": \"true\",\n # Don't include the row with the number of statistics rows in the report\n # \"skipReportSummary\": \"true\"\n }\n\n\n API_URL = 'https://api.webmaster.yandex.net/v4'\n\n\n\n retry_count = 0\n retry_max = 1\n\n try:\n resp = requests.get(API_URL + action, headers=headers)\n except Exception as message:\n if \"400\" or \"401\" in message:\n logging.error(f\"Could not retrieve html, authentication or token error: {message}\")\n sys.exit(1)\n elif retry_count < retry_max:\n print(f\"Retrying ... (count {retry_count})\")\n # sleep for fifteen minutes\n time.sleep(10)\n\n # increase the counter\n retry_count = retry_count + 1\n\n else:\n logging.error(f\"Could not retrieve response: {message}\")\n raise Exception(str(message))\n\n return resp.json()", "def _get_request(self, apiname):\n url = \"http://{host:}:{port:}/api/{api:}\".format(\n host=self.host, port=self.port, api=apiname\n )\n r = requests.get(url)\n try:\n return r.json()\n except json.decoder.JSONDecodeError:\n print(r.text)\n raise", "def process_request(self, request):\n\n request.access_token = None\n request.whoami_url = ''\n request.view_requests = []\n request.site = None\n\n if request.COOKIES.has_key('access_token'):\n request.access_token = request.COOKIES['access_token']\n url, params, headers = WhoAmI.build_request(request.get_host(), request.access_token)\n request.view_requests.append(grequests.get(url, params=params, headers=headers))\n request.whoami_url = url\n\n try:\n request.site = Site.retrieve(request.get_host())\n except APIException as e:\n if e.status_code == 400:\n return HttpResponseRedirect('https://microco.sm')\n return None", "async def _submit_next_request(self, request, response):\n\n if 'OverallStatus' in response and response['OverallStatus'] == 'MoreDataAvailable':\n if is_it_time_to_pause():\n await self.pause_callback({'oauth': await Oauth2Request.pause_extract(),\n 'object_type': request.ObjectType,\n 'continue_request': response['RequestID']})\n else:\n req_base = await self._get_retrieve_request_base()\n request2 = req_base(ObjectType=request.ObjectType, ContinueRequest=response['RequestID'])\n await self.submit_request(request2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Internal helper that makes a `Request` to read the missing types, if any, in the provided `datavalues` and `types` list. Return the type list. Used by `write_nodes` implementations.
def helper_maybe_read_types(nodeIds, datavalues, attributes, types, sendFct): # Note: this function is here to avoid copy/paste in users of new_write_request that wish to use the "auto-type" functionality. # The sendFct hints that this function may not be in the optimal place. if attributes is None: attributes = [AttributeId.Value for _ in nodeIds] assert len(nodeIds) == len(attributes) == len(datavalues),\ 'There should the same number of NodeIds, attributes, and datavalues when reading nodes' if types: assert len(nodeIds) == len(types) else: types = [None] * len(nodeIds) # Compute missing types, send the request, and update the missing types. sopc_types = [dv.variantType if dv.variantType is not None else ty for dv,ty in zip(datavalues, types)] missingTypesInfo = [(i, snid, attr) for i,(snid,attr,ty) in enumerate(zip(nodeIds, attributes, sopc_types)) if ty is None] if missingTypesInfo: _, readNids, readAttrs = zip(*missingTypesInfo) request = Request.new_read_request(readNids, readAttrs) readDatavalues = sendFct(request, bWaitResponse=True) for (i, _, _), dv in zip(missingTypesInfo, readDatavalues.results): assert dv.variantType != VariantType.Null, 'Automatic type detection failed, null type read.' sopc_types[i] = dv.variantType return sopc_types
[ "def test_get_types_from_request(self):\n test_query_dict = {'location': 'West Hollywood, CA, United States', 'open': 'true', 'radius': '50', 'types': 'amusement_park,cafe,campground,casino,clothing_store,department_store,library,movie_theater,movie_rental,night_club,park,restaurant,shopping_mall,zoo'}\n types = get_types_from_request(test_query_dict)\n assert isinstance(types, list)\n assert types == test_query_dict['types'].split(',')\n del test_query_dict['types']\n assert get_types_from_request(test_query_dict) == []", "def checkTypes(type_names=[]):\n\n for type_name in type_names:\n if type_name not in TYPES:\n raise TypeError('Type %s not supported.' % type_name)\n exit()", "def _validate_list(values: Union[List[int], List[float], List[str]],\n allowed_types: List[Type[Any]], name: str) -> None:\n if not values:\n raise ValueError('{}: values list is empty.'.format(name))\n\n if not isinstance(values, list):\n raise TypeError('{}: values are in a {} but expected a list.'.format(\n name, type(values)))\n\n value_type = type(values[0])\n if value_type not in allowed_types:\n raise TypeError(\n '{}: values are expected to be one of {} but are {}.'.format(\n name, allowed_types, value_type))\n if not all(isinstance(value, value_type) for value in values):\n raise TypeError(\n '{}: all value types are expected to be {} but are not.'.format(\n name, value_type))", "def check_type_get_example(self, in_types):\n pass", "def get_by_type(self, types: list, additional_filter: str = None, attribute_values: dict = None,\n\t\t\t\t\tinclude_terminating: bool = False):\n\t\tself.logger.info('Loading nodes of type %s with filter %s and values %s', types, additional_filter,\n\t\t\t\t\t\t attribute_values)\n\n\t\tfilter = ''\n\t\tif not include_terminating:\n\t\t\tfilter = 'and ItemStatus <> :terminating and ItemStatus <> :removing'\n\n\t\tif additional_filter is None and attribute_values is not None:\n\t\t\traise RuntimeError('Filter is not set but attribute values are given.')\n\t\telif additional_filter is not None and attribute_values is None:\n\t\t\traise RuntimeError('Filter is set but no attribute values are given.')\n\t\telif additional_filter is not None and attribute_values is not None:\n\t\t\tfilter = filter + ' and (' + additional_filter + ')'\n\t\telif attribute_values is None:\n\t\t\tattribute_values = { }\n\n\t\tif not include_terminating:\n\t\t\tattribute_values.update({ ':terminating': 'terminating' })\n\t\t\tattribute_values.update({ ':removing': 'removing' })\n\n\t\tparts = []\n\t\tfor index, node_type in enumerate(types):\n\t\t\tattribute_values.update({ ':node_type' + str(index): node_type })\n\t\t\tparts.append('ItemType = :node_type' + str(index))\n\t\texpression = '(' + ' or '.join(parts) + ') ' + filter\n\n\t\titems = self.client.scan(expression, attribute_values)\n\n\t\tnodes = []\n\t\tfor item in items:\n\t\t\tnode = Node(item.pop('EC2InstanceId'), item.pop('ItemType'))\n\t\t\tnode.set_status(item.pop('ItemStatus'))\n\t\t\tfor k, v in item.items():\n\t\t\t\tnode.set_property(k, v)\n\t\t\tnodes.append(node)\n\n\t\treturn nodes", "def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]", "def get_valid_order_types():\n response = requests.get(PublicMethods.api_url + \"/Public/GetValidOrderTypes\")\n return response", "def new_write_request(nodeIds, datavalues, attributes=None, types=None):\n if attributes is None:\n attributes = [AttributeId.Value for _ in nodeIds]\n assert len(nodeIds) == len(attributes) == len(datavalues),\\\n 'There should the same number of NodeIds, attributes, and datavalues when writing nodes'\n if types:\n assert len(nodeIds) == len(types)\n\n # Compute types\n sopc_types = []\n types = types or [None] * len(nodeIds)\n for dv, ty in zip(datavalues, types):\n if dv.variantType is not None:\n if ty is not None and ty != dv.variantType:\n raise ValueError('Inconsistent type, type of datavalue is different from type given in types list')\n sopc_types.append(dv.variantType)\n else:\n sopc_types.append(ty)\n assert None not in sopc_types, 'Incomplete type information, cannot create write request'\n\n # Overwrite values' type\n for dv, ty in zip(datavalues, sopc_types):\n dv.variantType = ty\n\n # Prepare the request, it will be freed by the Toolkit\n payload = allocator_no_gc('OpcUa_WriteRequest *')\n payload.encodeableType = EncodeableType.WriteRequest\n payload.NoOfNodesToWrite = len(nodeIds)\n nodesToWrite = allocator_no_gc('OpcUa_WriteValue[]', len(nodeIds))\n for i, (snid, attr, val) in enumerate(zip(nodeIds, attributes, datavalues)):\n nodesToWrite[i].encodeableType = EncodeableType.WriteValue\n nodesToWrite[i].NodeId = str_to_nodeid(snid, no_gc=True)[0]\n nodesToWrite[i].AttributeId = attr\n nodesToWrite[i].Value = val.to_sopc_datavalue(no_gc=True)[0]\n payload.NodesToWrite = nodesToWrite\n\n return Request(payload)", "def _deserialize_unsupported_types(self, field, value, **kwargs):\n deserialized_value = None\n if TypeValidator.is_dict(value):\n inner_value = list(value.values())[0]\n if kwargs.get(\"enum\"):\n data_type = list(value.keys())[0]\n if data_type == DynamoDBType.LIST:\n deserialized_value = [\n get_enum_member(kwargs[\"enum\"], element)\n for element in inner_value\n ]\n elif data_type in SET_TYPES:\n deserialized_value = {\n get_enum_member(kwargs[\"enum\"], element)\n for element in inner_value\n }\n else:\n deserialized_value = get_enum_member(kwargs[\"enum\"], inner_value)\n else:\n deserialized_value = (\n DateTimeParser.parse_datetime_string(inner_value, kwargs[\"datetime_format\"])\n or DateTimeParser.parse_date_string(inner_value, kwargs[\"date_format\"])\n or DateTimeParser.parse_time_string(\n inner_value, kwargs[\"time_format\"]\n )\n )\n return deserialized_value", "def _check_enqueue_dtypes(self, vals):\n if isinstance(vals, dict):\n if not self._names:\n raise ValueError(\"Queue must have names to enqueue a dictionary\")\n if sorted(self._names, key=str) != sorted(vals.keys(), key=str):\n raise ValueError(\"Keys in dictionary to enqueue do not match \"\n f\"names of Queue. Dictionary: {sorted(vals.keys())},\"\n f\"Queue: {sorted(self._names)}\")\n # The order of values in `self._names` indicates the order in which the\n # tensors in the dictionary `vals` must be listed.\n vals = [vals[k] for k in self._names]\n else:\n if self._names:\n raise ValueError(\"You must enqueue a dictionary in a Queue with names\")\n if not isinstance(vals, (list, tuple)):\n vals = [vals]\n\n tensors = []\n for i, (val, dtype) in enumerate(zip(vals, self._dtypes)):\n tensors.append(\n ops.convert_to_tensor(val, dtype=dtype, name=\"component_%d\" % i))\n\n return tensors", "def type_gen(types: List[str]) -> Generator[str, None, None]:\n\n t_i = 0\n while t_i < len(types):\n if types[t_i] == '...':\n t_i = 0\n yield types[t_i]\n t_i += 1\n elif types[t_i][-3::] == '...':\n yield types[t_i][:-3:]\n else:\n yield types[t_i]\n t_i += 1\n # If reached the end, raise error\n yield('Type string \"' + \" , \".join(types) + '\" is missing types')", "def valid_content_types() -> List[str]:", "def ParseDataTypes(json_data_types):\n operand_types = {\n json_operand_type[\"identifier\"]: json_operand_type[\"type\"]\n for json_operand_type in json_data_types[\"operands\"]\n }\n operand_variants = {\n json_operand_type[\"identifier\"]:\n (json_operand_type[\"variants\"], json_operand_type[\"default\"])\n for json_operand_type in json_data_types[\"operands\"]\n }\n input_types = {\n json_input_type[\"identifier\"]: json_input_type[\"type\"]\n for json_input_type in json_data_types[\"inputs\"]\n }\n input_values = {\n json_input_type[\"identifier\"]:\n (json_input_type[\"values\"], json_input_type[\"default\"])\n for json_input_type in json_data_types[\"inputs\"]\n }\n return DataTypeBuilder(operand_types, operand_variants, input_types, input_values)", "def get_types():\n try:\n return list(mongo.db.documents.distinct(\"dataType\"))\n except:\n abort(500)", "async def typed_retrieve_query(\n database: str, data_type: Type[T], query: str, values: Optional[Tuple[Any, ...]] = None,\n) -> List[T]:\n\n values = values if values else tuple()\n\n try:\n async with aiosqlite.connect(database) as connection:\n async with connection.execute(query, values) as cursor:\n data = await cursor.fetchall()\n except aiosqlite.Error as error:\n bot_logger.error(f'Retrieve Query (\"{query}\"). {error}.')\n raise error\n\n transformed_entries = []\n\n for entry in data:\n try:\n transformed_entries.append(data_type(*entry))\n except (TypeError, ValueError) as e: # data couldn't be coerced to T\n bot_logger.error(\n f'Bad Entry for type {data_type} - {e}.\\n'\n f'Data: {entry}\\n'\n f'Query: {query}\\n'\n f'Params: {values}'\n )\n\n return transformed_entries", "def data_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"data_types\")", "def _setDataTypes(self, datatypes):\r\n \r\n self._dataTypes.clear()\r\n for dataType in datatypes:\r\n self.addDataType(dataType)", "def get_data_types(project: parser.TwincatItem) -> list[dict]:\n data_types = getattr(project, \"DataTypes\", [None])[0]\n if data_types is not None:\n return list(summary.enumerate_types(data_types))\n return []", "def get_valid_limit_order_types():\n response = requests.get(\n PublicMethods.api_url + \"/Public/GetValidLimitOrderTypes\"\n )\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends a `request` on link with index `idx` (either a connection id or an endpoint id). When `bWaitResponse`, waits for the response and returns it. Otherwise, returns the `request`, and the response will be available through `get_response`.
def send_generic_request(self, idx, request, bWaitResponse): reqCtx = int(request.requestContext) self._dRequestContexts[reqCtx] = request request.timestampSent = time.time() self._send_request(idx, request) if bWaitResponse: self._sSkipResponse.add(reqCtx) return self._wait_for_response(request) else: return request
[ "def make_request(self, item):\n url = item['url']\n headers = {}\n if item['bytes_to'] != 0:\n byte_range = 'bytes=%s-%s' % (item['bytes_from'], item['bytes_to'])\n headers['Range'] = byte_range\n try:\n response = self.session.get(url, headers=headers,\n timeout=float(self.options.timeout))\n except requests.exceptions.Timeout as exception:\n self.event('error', 'timeout: ' + str(exception))\n response = None # Return a None value if timeout occurred\n except requests.exceptions.ConnectionError as exception:\n self.event('error', 'connection error: ' + str(exception))\n response = None # Return a None value if connection has failed\n if not self.options.keep_alive:\n response.connection.close()\n return response", "def _send_get_request(target_url, results):\n requests.get(target_url)\n results.append(target_url)", "def call(self, request):\n return self.wait(self.send(request))", "async def send_request(session):\n client_id = randint(0, CLIENTS)\n logging.info('sending request to %s/?clientId=%d', SERVER_URL, client_id)\n async with session.get(SERVER_URL, params=[('clientId', client_id)]) as _:\n pass\n return TaskType.REQUEST", "def linkClick(link_type, link_name, domain, adress, port):\n raise Exception(\"Fail while executing callback.\")", "def send_request(self):\n if self.redirect and self.req is None:\n fet(self.url).then(self.response_redirect).then(\n self.redirect_url).catch(lambda ev: self.on_error(ev.message))\n elif self.redirect is None and self.req is None:\n if self.response_type == \"json\":\n fet(self.url).then(self.response).then(lambda ev: self.on_response(\n Modif(ev))).catch(lambda ev: self.on_error(ev.message))\n else:\n fet(self.url).then(self.response).then(self.on_response).catch(\n lambda ev: self.on_error(ev.message))\n elif self.redirect is None and self.req:\n if self.response_type == \"json\":\n fet(self.req).then(self.response).then(lambda ev: self.on_response(\n Modif(ev))).catch(lambda ev: self.on_error(ev.message))\n else:\n fet(self.req).then(self.response).then(self.on_response).catch(\n lambda ev: self.on_error(ev.message))", "def perform_request(c, URL):\n BUFFER.truncate(0)\n BUFFER.seek(0)\n c.setopt(c.URL, DATADOG_API_URL + URL)\n try:\n print(f'Connection to URL: {DATADOG_API_URL}{URL}')\n c.perform()\n print(f'{c.getinfo(c.RESPONSE_CODE)}')\n if c.getinfo(c.RESPONSE_CODE) is not 200:\n print(f'Check the output!')\n exit()\n finally:\n c.close()", "def test_respond_to_enquire_link(self):\n fake_smsc = FakeSMSC()\n client = self.successResultOf(self.connect(fake_smsc))\n self.assertEqual(client.received, b\"\")\n\n rtel_d = fake_smsc.respond_to_enquire_link()\n yield client.write(EnquireLink(2).get_bin())\n # enquire_link response received.\n self.assertNoResult(rtel_d)\n self.assertEqual(client.received, EnquireLinkResp(2).get_bin())\n\n yield wait0()\n self.successResultOf(rtel_d)", "def retry_request():\r\n self.http_connect()\r\n self.connection.request(method, path, data, headers)\r\n return self.connection.getresponse()", "def poll(idx):\n\tclient = httpclient.AsyncHTTPClient(max_clients=MAX_CLIENTS)\n\tlog('worker#%05d: polling from %s' % (idx, HOST))\n\tbody = {'_xsrf': 'undefined'}\n\trsp = yield client.fetch(HOST + \"/a/message/updates\", \n\t\tmethod='POST', \n\t\tbody=escape.json_encode(body),\n\t\trequest_timeout=0)\n\tresponse = escape.json_decode(rsp.body)\n\tfor m in response['messages']:\n\t\tlog('worker#%05d: [body: %s, id: %s]' % (idx, m['body'], m['id']))", "def request( \n self,\n uri,\n method='GET',\n body=None,\n headers=None,\n redirections=None,\n connection_type=None):\n del connection_type # Unused\n\n with requests.Session() as session:\n session.max_redirects = redirections\n response = session.request(\n method, uri, data=body, headers=headers, timeout=HTTP_TIMEOUT_SECONDS)\n headers = dict(response.headers)\n headers['status'] = response.status_code\n content = response.content\n return httplib2.Response(headers), content", "def serve_one_request(self,timeout=None):\r\n req = self.connection.recv(timeout=timeout)\r\n if req is not None:\r\n self.handle_request(req)\r\n return req", "def __getitem__(self, index):\n\n return self.requests[index]", "def request(req_url, params={}, print_status=False):\n response = requests.get(req_url, params)\n if (print_status): print(\"...response for <{}> was {}...\".format(response.url, response.status_code),flush=True)\n return response", "def _request(self, *args, **kwargs):\n last_exception = None\n\n for i in range(len(self._cluster)):\n\n # print out the last exception if needed\n if self._debug and last_exception:\n print self.get_traceback(last_exception)\n\n try:\n node = self._get_node()\n conn = httplib.HTTPConnection(node, timeout=self._timeout)\n\n if self._debug:\n conn.set_debuglevel(1)\n print 'host: %s' % (node)\n\n conn.request(*args, **kwargs)\n res = conn.getresponse()\n res = res.read()\n conn.close()\n\n if self._debug:\n print 'response: %s' % (res)\n\n except Exception: # we've thrown an exception while fetching a response, so we record it and try another node\n last_exception = sys.exc_info()\n continue\n\n else: # we got a response, so we load it, validate, and return or throw\n res = json.loads(res)\n self._validate_response(res)\n return res\n\n raise NoNodesLeft(\"Tried %s nodes, all failed. Last Exception: \\n\\n%s\" % (len(self._cluster), self.get_traceback(last_exception)))", "def handle_request(sock):\n aphorism = recv_until(sock, b'?')\n answer = get_answer(aphorism)\n sock.sendall(answer)", "def headRequest(group, index):", "def handle_request(self, nbr_idx, pkt_req):\n for seq_new in pkt_req.m_pkts_to_req:\n in_queue = False\n for seq_old in self.m_nbrs[nbr_idx].send_queue:\n if seq_new == seq_old:\n in_queue = True\n break\n if in_queue == False:\n self.m_nbrs[nbr_idx].send_queue.append(seq_new)\n # for file sharing, need not drop the head\n #if len(self.m_nbrs[nbr_idx].send_queue) > SEND_QUEUE_LEN:\n #del self.m_nbrs[nbr_idx].send_queue[0]", "def make_request(url):\n resp = requests.get(url)\n with print_lock:\n print(\"Thread name: {}\".format(threading.current_thread().name))\n print(\"Url: {}\".format(url))\n print(\"Response code: {}\\n\".format(resp.status_code))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Receives an OpcUa_Response, creates a Response, associates it to a Request bothways. It is called for every response received through the LibSub callback_generic_event. The dictionary _dResponseClasses contains classes that will be instantiated with the OpcUa_Response as parameter. It is possible to add new elements to this dict to support more response decoders, or override existing decoders.
def _on_response(self, responsePayload, responseContext, timestamp): assert responseContext in self._dRequestContexts, 'Unknown requestContext {}.'.format(responseContext) request = self._dRequestContexts.pop(responseContext) try: if responsePayload is None: return # Build typed response encType = ffi.cast('SOPC_EncodeableType**', responsePayload) response = self._dResponseClasses.get(encType[0], Response)(responsePayload) response.timestampReceived = timestamp # Passing the timestamp instead of acquiring it here reduces it by ~10µs request.response = response response.request = request if responseContext not in self._sSkipResponse: self.on_generic_response(request, response) else: self._sSkipResponse.remove(responseContext) finally: # Hopefully the Toolkit always notifies the application, and it is caught here. # Also, if the processing of the response fails, it is caught here. request.eventResponseReceived.set() return response
[ "def _process_create_response(self, request, response):\n return self.to_resource(response[self.container])", "def __generateResponse(self,request,response,code=200):\r\n if isinstance(response, str):\r\n return Response(code,response,{HttpHeader.CONTENT_TYPE: MediaType.TEXT_PLAIN})\r\n elif isinstance(response, Response):\r\n return response\r\n else:\r\n (content,contentType) = self.__convertObjectToContentType(request, response)\r\n return Response(code,content,{HttpHeader.CONTENT_TYPE:contentType})", "def _handle_response(self,\n response_type: str,\n response: capnp._DynamicStructBuilder):\n self._RESPONSE_HANDLER.handle_response(response_type, response)", "def createResponseDataUnit(self):\n if self.dataUnitType == TC_PACKET_HEADER_DU_TYPE:\n retVal = TCpacketResponseDataUnit()\n elif self.dataUnitType == TC_CLTU_HEADER_DU_TYPE:\n retVal = TCcltuResponseDataUnit()\n else:\n raise AttributeError(\"createResponseDataUnit() only possible for TCpacketDataUnit or TCcltuDataUnit\")\n # common attributes for TC PACKET and TC CLTU response\n retVal.spacecraftId = self.spacecraftId\n return retVal", "def newResponse( self, response, field_dict=None ):\n if field_dict is None:\n field_dict = self.field_dict\n return RequestTranslator( response=response, field_dict=field_dict )", "def __init__(self):\n self.received = \"\"\n self._mapping = {}\n for name, val in self.__class__.__dict__.iteritems():\n if name.endswith(\"RESP\"):\n name = name.split(\"_\")[0].lower()\n self._mapping[val] = getattr(self, \"resp_%s\" % (name,))", "def decode_response(response):\n\n if u'result' in response:\n return ConnectionResponse(response)\n\n if 'method' in response and response['method'] == 'connection/complete':\n return ConnectionCompleteEvent(response)\n\n # Could not decode return st\n return response", "def __json_object_hook(self, d):\n return namedtuple('response', d.keys())(*d.values())", "def add_response(self, request, response):\n self.records.append(ResponseRecord(request, response))", "def _serialize_response(self, response_type, response_dict):\n retdict = {\"type\": response_type}\n retdict.update(response_dict)\n return json.dumps(retdict)", "def interpret_response(cls, response):\t\t\r\n\t\tif len(response.data) < 1:\r\n\t\t\traise InvalidResponseException(response, \"Response data must be at least 1 bytes\")\r\n\r\n\t\tresponse.service_data = cls.ResponseData()\r\n\t\tresponse.service_data.sequence_number_echo = response.data[0]\r\n\t\tresponse.service_data.parameter_records = response.data[1:] if len(response.data) > 1 else b''", "def __make_response(self, response: bytes) -> None:\r\n # split the response string into\r\n # request(request line + headers) and body\r\n request, body = self.__split_response(response)\r\n\r\n # process the response line and get back the headers\r\n # as list of strings\r\n headers = self.__process_response_line(request)\r\n\r\n # make dictionary from the headers list\r\n self.__process_headers(headers)\r\n\r\n # process the body\r\n self.__process_body(body)", "def parseResponse(self, byte_array):\n\t\tself.response_items['ID'] = self.removeBin(bin(byte_array[0]), bin(byte_array[1]))\n\t\t\n\t\tlist_header = ['QR', 'OPCODE', 'AA', 'TC', 'RD'] #List that holds all the keywords that are used later in the program. \n\t\tbin_header = self.removeBin(bin(byte_array[2])) #Example: Calling removeBin method and removes the 0b from the byte array.\n\t\tself.response_items[list_header[0]] = bin_header[0] #Example: add the returned value from removeBin with key/value pair QR=\"returned value\"\n\t\tself.response_items[list_header[1]] = bin_header[1:5]\n\t\ti = 5\n\t\twhile i < 8:\n\t\t\tself.response_items[list_header[i-3]] = bin_header[i]\n\t\t\ti += 1\n\t\tbin_header = self.removeBin(bin(byte_array[3]))\n\t\tself.response_items['RA'] = bin_header[0]\n\t\tself.response_items['Z'] = bin_header[1:4]\n\t\tself.response_items['RCODE'] = bin_header[4:]\n\t\t\n\t\tself.response_items['QDCOUNT'] = self.removeBin(bin(byte_array[4]), bin(byte_array[5]))\n\t\tself.response_items['ANCOUNT'] = self.removeBin(bin(byte_array[6]), bin(byte_array[7]))\n\t\tself.response_items['NSCOUNT'] = self.removeBin(bin(byte_array[8]), bin(byte_array[9]))\n\t\tself.response_items['ARCOUNT'] = self.removeBin(bin(byte_array[10]), bin(byte_array[11]))\n\t\t\n\t\tif len(byte_array) <= 12:\n\t\t\treturn self.response_items\n\t\t\"\"\"\n\t\tHere comes the more complicated part. All byte_array values are iterated and checked if it's a label.\n\t\t\"\"\"\n\t\titeration_read_label = 12 #Byte 12 always contains the first label.\n\t\titeration_qdcount = int(self.removeBin(bin(byte_array[4]), bin(byte_array[5])), 2) #Same as QDCOUNT, this tells us how many question sections there are in this part.\n\t\titeration_octet = byte_array[12] #This tells us how many characters we expect to parse (i.e. 4 for 'test').\n\t\ti = 0 #Just a variable that is used for counting an iteration. In this case it counts how many question sections we have parsed\n\t\tx = 0 #Just a variable that is used for counting an iteration. In this case it counts how many hostnames or subdomains we have parsed.\n\t\ty = 13 #Just a variable that is used for counting an iteration. In this case it counts which byte we have to parse for a specific character of a domain name (i.e. if it holds 15, and if the parser parses it we may get the value 't')\n\t\tz = 1 #Just a varaible that is used for counting an iteration. In this case it counts how many subdomains or hostnames we have parsed. (i.e. 1 could be test, 2 could be example, 3 could be .com)\n\t\ti_test = 0 #This variable is used as a boolean value. This checks if we have to add the QNAME/QTYPE/QCLASS section in the response_items dict.\n\t\tself.response_items\n\t\tself.temp_dict = {}\n\t\twhile i < iteration_qdcount:\n\t\t\tif i_test == 0:\n\t\t\t\tself.response_items['QNAME'] = [[]] *iteration_qdcount #Adding the QNAME section as a list in a list. Multiple lists are created if the QDCOUNT variable is more than 1.\n\t\t\t\tself.response_items['QTYPE'] = [[]] *iteration_qdcount\n\t\t\t\tself.response_items['QCLASS'] = [[]] *iteration_qdcount\n\t\t\t\ti_test = 1\n\t\t\twhile x < iteration_octet:\n\t\t\t\tself.temp_dict['domain_name_' + str(i) + str(z) +'part'] = self.temp_dict.setdefault('domain_name_' + str(i) + str(z) +'part', '') + chr(byte_array[y])\n\t\t\t\tx += 1\n\t\t\t\ty+=1\n\t\t\tself.response_items['QNAME'][i].append(self.temp_dict['domain_name_' + str(i) + str(z) +'part'])\n\t\t\tx = 0\n\t\t\ty += 1\n\t\t\titeration_read_label += iteration_octet + 1\n\t\t\titeration_octet = byte_array[iteration_read_label]\n\t\t\tz += 1\n\t\t\tif iteration_octet == 0:\n\t\t\t\tself.response_items['QTYPE'][i].append(self.removeBin(bin(byte_array[y]), bin(byte_array[y+1])))\n\t\t\t\tself.response_items['QCLASS'][i].append(self.removeBin(bin(byte_array[y+2]), bin(byte_array[y+3])))\n\t\t\t\ti += 1\n\t\t\t\tz = 1\n\t\t\t\tx = 0\n\t\t\t\ty += 4\n\t\t\t\titeration_read_label = y\n\t\t\t\ttry:\n\t\t\t\t\titeration_octet = byte_array[iteration_read_label]\n\t\t\t\texcept IndexError:\n\t\t\t\t\treturn self.response_items\n\t\titerable = y\n\t\tdel self.temp_dict, iteration_read_label, iteration_qdcount, iteration_octet, i, x, y, z, i_test\n\t\t\n\t\t\"\"\"\n\t\tThis is also a bit more complicated. Here we parse all ANCOUNT, NSCOUNT and ARCOUNT sections.\n\t\t\"\"\"\n\t\t\n\t\tancount_entries = int(self.response_items['ANCOUNT'], 2) #Here we check how many ANCOUNT sections there are.\n\t\tnscount_entries = int(self.response_items['NSCOUNT'], 2)\n\t\tarcount_entries = int(self.response_items['ARCOUNT'], 2)\n\t\tentries = ancount_entries + nscount_entries + arcount_entries\n\t\t\n\t\tself.response_items['RR'] = []\n\t\ti = 0\n\t\twhile i < entries:\n\t\t\tself.response_items['RR'].append({})\n\t\t\ti += 1\n\t\t\n\t\ti = 0\n\t\tname_start = iterable #We must know where to start. The variable \"iterable\" holds the value of the byte array that we must parse.\n\t\treference = 0\n\t\titeration_reference_loop = 0\n\t\tself.temp_dict = {}\n\t\tz= 0\n\t\ty = 0 #hoeveelste domeinnaam\n\t\tt = 0\n\t\tname_dict = str()\n\t\tname_count = 0\n\t\titeration_reference = 0\n\t\titer_loop = 0\n\t\tself.temp_dict_rdata = {}\n\t\titeration_count = 0\n\t\tloop_count = 0\n\t\t\n\t\t\n\t\twhile i < entries:\n\t\t\tif name_count < (entries - arcount_entries - nscount_entries):\n\t\t\t\tname_dict = 'ANCOUNT_ANSWER'\n\t\t\t\t\n\t\t\telif name_count < (entries - arcount_entries):\n\t\t\t\tname_dict = 'NSCOUNT_ANSWER'\n\t\t\t\t\n\t\t\telif name_count < (entries):\n\t\t\t\tname_dict = 'ARCOUNT_ANSWER'\n\t\t\telse:\n\t\t\t\tname_dict = 'ERROR'\n\t\t\tself.response_items['RR'][i]['NAME'] = list()\n\t\t\tif byte_array[name_start] & 0b11000000 == 0b11000000: #We must first check if the is a pointer, if this is the case we will start parsing. Note: this all happens in a while loop.\n\t\t\t\twhile byte_array[name_start] & 0b11000000 == 0b11000000:\n\t\t\t\t\treference = int(self.removeBin(bin(byte_array[name_start]), bin(byte_array[name_start+1]))[2:], 2)\n\t\t\t\t\titeration_reference = byte_array[reference]\n\t\t\t\t\titeration_count = reference + iteration_reference\n\t\t\t\t\titer_loop = reference+1\n\t\t\t\t\twhile True:\n\t\t\t\t\t\twhile iteration_reference_loop < iteration_reference:\t\t\t\t\t\n\t\t\t\t\t\t\tself.temp_dict['domain_name_' + str(t) + 'part'] = self.temp_dict.setdefault('domain_name_' + str(t) + 'part', '') + chr(byte_array[iter_loop])\n\t\t\t\t\t\t\titer_loop += 1\n\t\t\t\t\t\t\titeration_reference_loop += 1\n\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\tself.response_items['RR'][i]['NAME'].append(self.temp_dict['domain_name_' + str(t) + 'part'])\n\t\t\t\t\t\t\n\t\t\t\t\t\titeration_reference_loop = 0\n\t\t\t\t\t\titer_loop = iteration_count + 2\n\t\t\t\t\t\tt += 1\n\t\t\t\t\t\titeration_reference = byte_array[iteration_count+1]\n\t\t\t\t\t\titeration_count += 1\n\t\t\t\t\t\titeration_count += iteration_reference\n\t\t\t\t\t\tif (iteration_reference == 0) | (iteration_reference == 192):\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif iteration_reference == 192:\n\t\t\t\t\t\t\t\tloop_count = byte_array[iteration_count-iteration_reference+1] #The RDATA section could also contain a pointer.\n\t\t\t\t\t\t\t\titeration_reference = byte_array[loop_count]\n\t\t\t\t\t\t\t\titer_loop = loop_count + 1\n\t\t\t\t\t\t\t\titeration_count = iteration_reference + loop_count\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tt=0\n\t\t\t\t\t\t\tname_start += 2\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\n\t\t\t\t\tif byte_array[name_start] == 192:\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\n\t\t\t\t\n\n\t\t\t\t\n\t\t\t\tself.response_items['RR'][i]['RR_TYPE'] = self.response_items['RR'][i].get('RR_TYPE', '') + name_dict\n\t\t\t\t\n\t\t\t\t\n\n\t\t\t\tself.response_items['RR'][i]['TYPE'] = self.response_items['RR'][i].get('TYPE', '') + self.removeBin(bin(byte_array[name_start]), bin(byte_array[name_start+1]))\n\t\t\t\tself.response_items['RR'][i]['CLASS'] = self.response_items['RR'][i].get('CLASS', '') + self.removeBin(bin(byte_array[name_start+2]), bin(byte_array[name_start+3]))\n\t\t\t\tself.response_items['RR'][i]['TTL'] = self.response_items['RR'][i].get('TTL', 0) + int(self.removeBin(bin(byte_array[name_start+4]), bin(byte_array[name_start+5]), bin(byte_array[name_start+6]), bin(byte_array[name_start+7])), 2)\n\t\t\t\tself.response_items['RR'][i]['RDLENGTH'] = self.response_items['RR'][i].get('RDLENGTH', 0) + int(self.removeBin(bin(byte_array[name_start+8]), bin(byte_array[name_start+9])),2)\n\t\t\t\tname_start += 10\n\t\t\t\titer_loop_rdata = 0\n\t\t\t\tif byte_array[name_start] & 0b11000000 != 0b11000000:\n\t\t\t\t\twhile iter_loop_rdata < self.response_items['RR'][i]['RDLENGTH']:\n\t\t\t\t\t\tself.temp_dict_rdata['RDATA'] = self.temp_dict_rdata.setdefault('RDATA', '') + str(byte_array[name_start])\n\t\t\t\t\t\titer_loop_rdata += 1\n\t\t\t\t\t\tname_start += 1\n\t\t\t\t\tself.response_items['RR'][i]['RDATA'] = self.temp_dict_rdata['RDATA']\n\t\t\t\telse:\n\t\t\t\t\tself.response_items['RR'][i]['RDATA'] = int(self.removeBin(bin(byte_array[name_start]), bin(byte_array[name_start+1])))\n\t\t\t\t\tname_start += 2\n\t\t\t\tname_count += 1\t\n\t\t\t\ti += 1\n\t\t\t\tself.temp_dict = {}\n\t\t\telse:\n\t\t\t\traise Exception\n\t\t\n\t\treturn(self.response_items)", "def extend_response(self, response):\n bases = [GenericResponseExtension]\n\n if self.skip_n_forms:\n bases.append(type(\n \"SkipNForms\",\n (SkipNFormsExtension, ),\n dict(n=self.skip_n_forms)\n ))\n\n if self.use_intercooler:\n bases.append(IntercoolerClickExtension)\n\n bases.append(response.__class__)\n response.__class__ = type('ExtendedResponse', tuple(bases), {})\n\n return response", "def _deserialize_response(\n response: Union[ModelBase, List[ModelBase]],\n response_model: Optional['Response']=None,\n response_preprocess=None,\n response_postprocess=None,\n json=True,\n **options) -> 'Response':\n\n if not ((isinstance(response_preprocess, Callable) or\n response_preprocess is None) and \\\n (isinstance(response_postprocess, Callable) or\n response_postprocess is None)):\n \"\"\" pre_process 와 pro_process는 callable object여야 한다.\"\"\"\n raise DeserializerError('Function type Error',\n 'pre or post processor must be callable object')\n\n response_model, origin = _resolve_response_model(response_model)\n\n # If origin is not None, that is, origin was wrapped by typing module,\n # origin must be the same type with response\n if origin and origin != type(response):\n raise DeserializerError('Response type Error',\n '%s is different type with %s' % (origin, response))\n\n try:\n # Pre-process so that it can be applied\n # in response model immediately\n if response_preprocess:\n response = response_preprocess(response, **options)\n\n # convert to response model\n # It can be converted to dictionary formatting\n response = _apply_response_model(response,\n response_model,\n origin,\n json)\n if response_postprocess:\n response = response_postprocess(response, **options)\n except Exception as e:\n if isinstance(e, ValidationError):\n raise ResponseModelError('%s Model Error' % e.model.__name__, e.json())\n raise ResponseModelError('Response deserializer Error', e.args[0])\n\n return response", "def adapt_response(response):\n if not isinstance(response, tuple):\n response = (response,)\n status = 200\n headers = {\"Content-Type\": \"text/plain\"}\n body = \"\"\n body_set = False\n def _is_mapping(value):\n return isinstance(value, lupa._lupa._LuaTable) or \\\n isinstance(value, dict)\n for value in response:\n if isinstance(value, basestring):\n body = value\n body_set = True\n elif isinstance(value, int):\n status = value\n elif not body_set and _is_mapping(value):\n body = json_.dumps(value, cls=json.LuaEncoder)\n headers['Content-Type'] = 'application/json'\n body_set = True\n elif body_set and _is_mapping(value):\n for header in value:\n headers[header] = value[header]\n return flask.make_response(body, status, headers)", "def on_generic_response(self, request, response):\n # TODO: Upd doc\n assert request.requestContext not in self._dPendingResponses,\\\n 'A request with context {} is still waiting for a response'.format(request.requestContext)\n self._dPendingResponses[request.requestContext] = response", "def response_class(self, response_class):\n\n self._response_class = response_class", "def _get_response_as_dict(self, response):\n\n response_dict = response.as_dict()\n response_dict.update({\n key: self.event[key]\n for key in (\"StackId\", \"RequestId\", \"LogicalResourceId\")\n })\n return response_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This callback is called when the class receives a Response that is not waited upon. It is possible to not override it. It is possible to not call the on_generic_response of the parent class. The default implementation of this method stores the response in a doubleendqueue which tracks available responses (see pop_response).
def on_generic_response(self, request, response): # TODO: Upd doc assert request.requestContext not in self._dPendingResponses,\ 'A request with context {} is still waiting for a response'.format(request.requestContext) self._dPendingResponses[request.requestContext] = response
[ "def handle_response(self, response):\n d = self.requests.pop(response.id)\n if response.error:\n d.errback(response)\n else:\n d.callback(response)", "def _on_response(self, responsePayload, responseContext, timestamp):\n assert responseContext in self._dRequestContexts, 'Unknown requestContext {}.'.format(responseContext)\n request = self._dRequestContexts.pop(responseContext)\n try:\n if responsePayload is None:\n return\n # Build typed response\n encType = ffi.cast('SOPC_EncodeableType**', responsePayload)\n response = self._dResponseClasses.get(encType[0], Response)(responsePayload)\n response.timestampReceived = timestamp # Passing the timestamp instead of acquiring it here reduces it by ~10µs\n request.response = response\n response.request = request\n if responseContext not in self._sSkipResponse:\n self.on_generic_response(request, response)\n else:\n self._sSkipResponse.remove(responseContext)\n finally:\n # Hopefully the Toolkit always notifies the application, and it is caught here.\n # Also, if the processing of the response fails, it is caught here.\n request.eventResponseReceived.set()\n return response", "def _response_handler(self, remote_address: data_type, response: BaseResponsePacket):\n logger.info(\n \"Response received. remote_address=%s command_id=%s request_id=%s\",\n remote_address,\n response.command_id,\n response.request_id,\n )\n\n callback = self._expiring_dict.pop(request_uuid(remote_address, response), None)\n if callback is not None:\n callback(remote_address, response)\n else:\n logger.error(\"No callback\")", "def handleResponse(self, message: MessageHandler) -> T.NoReturn:\n caller_id = message.get(\"caller_id\", None)\n\n if caller_id is not None and caller_id in self.deferred_asks:\n d = self.deferred_asks[caller_id] # type: Deferred\n d.callback(message.get(\"result\"))\n del self.deferred_asks[caller_id]\n else:\n warnings.warn(f\"Response to ask {caller_id} arrived but was not found in deferred_asks\")", "async def _initialize_response_queue(self):\n result = await self.connection._channel.queue_declare(exclusive=True)\n\n self.callback_queue = result[\"queue\"]\n\n await self.connection._channel.basic_consume(\n callback=self._on_response, no_ack=True, queue_name=self.callback_queue\n )", "def __on_request_response__(self, ch, method, props, body):\r\n\t\ttry:\r\n\t\t\tself.last_message = json.loads(body)\r\n\t\texcept ValueError:\r\n\t\t\tprint 'encountered an error while decoding the message'\r\n\t\t\tself.last_message = body\r\n\r\n\t\tself.response = 'received'", "def process_response(self, request, response):\n return self.__process_awesome_response(request, response)", "def _handle_response(self,\n response_type: str,\n response: capnp._DynamicStructBuilder):\n self._RESPONSE_HANDLER.handle_response(response_type, response)", "def respond(self):\n if self.responses:\n return self.attrify(self.responses.popleft())\n return None", "def test_response_without_notifications(self):\n request = http.HttpRequest()\n response = http.HttpResponse()\n self.middleware.process_response(request, response)", "def async_response(self, async_response):\n\n self._async_response = async_response", "async def _on_response(self, channel, body, envelope, properties):\n try:\n future = self.responses[properties.correlation_id]\n except KeyError:\n logger.warning(f\"Received not expected request response with id {properties.correlation_id}\")\n else:\n if not future.cancelled():\n future.set_result(body)\n else:\n logger.warning(f\"Request response future {future} cancelled. Getting result now\")\n del self.responses[properties.correlation_id]", "def read_handler_responses(self):\r\n try:\r\n while True:\r\n if self.pending_responses:\r\n resp = self.pending_responses.popleft()\r\n handler = None\r\n else:\r\n resp = None\r\n handler = self.disp_sock.recv(zmq.NOBLOCK)\r\n delim = self.disp_sock.recv(zmq.NOBLOCK)\r\n assert delim == \"\", \"non-empty msg delimiter: \"+delim\r\n resp = self.disp_sock.recv(zmq.NOBLOCK)\r\n if resp == \"X\":\r\n self.mark_handler_disconnecting(handler)\r\n else:\r\n if handler is not None:\r\n self.mark_handler_alive(handler)\r\n if resp:\r\n self.recv_sock.send(resp,zmq.NOBLOCK)\r\n resp = None\r\n except zmq.ZMQError, e:\r\n if resp is not None:\r\n self.pending_responses.appendleft(resp)\r\n if e.errno not in (errno.EINTR,zmq.EAGAIN,):\r\n raise", "def emit_until_response(self, event_name, **kwargs):\n ...", "def finalize_response(self, req, *args, **kwargs):\n resp = super().finalize_response(req, *args, **kwargs)\n if hasattr(self, 'publisher') and self.publisher:\n HttpExchange.objects.from_exchange(\n req,\n resp,\n related_object=self.get_integration(),\n payload=self.data,\n )\n return resp", "def _recordEmptyResponse(self, distributor):\n self.inc(\"{}.{}.empty-response\".format(self.keyPrefix, distributor))", "async def pre_response(self, request, response, context=None):\n pass", "def test_commandsWithoutResponse(self):\r\n DATA = \"hello\"\r\n pp = pool.ProcessPool(ampChild=NoResponseChild, min=1, max=1)\r\n\r\n def _check(_):\r\n return pp.doWork(GetResponse\r\n ).addCallback(self.assertEquals, {\"response\": DATA})\r\n\r\n def _work(_):\r\n return pp.doWork(NoResponse, arg=DATA)\r\n\r\n return pp.start(\r\n ).addCallback(_work\r\n ).addCallback(_check\r\n ).addCallback(lambda _: pp.stop())", "def callback(self, task_id, response):\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sample data from empirical probability density function using inverse transform sampling.
def _sample_from_pdf(x, pdf, n): cum_sum = np.cumsum(pdf) inverse_density_function = interp1d(cum_sum, x) b = np.zeros(n) for i in range(len( b )): u = random.uniform( min(cum_sum), max(cum_sum) ) b[i] = inverse_density_function( u ) return b
[ "def inverse_transform_sampling(self, uni_samples):\n if self.distribution == 'normal':\n self.samples = norm.ppf(uni_samples,\n loc=self.theta[0], scale=self.theta[1])\n\n elif self.distribution == 'lognormal':\n self.samples = np.exp(norm.ppf(uni_samples,\n loc=np.log(self.theta[0]),\n scale=self.theta[1]))\n elif self.distribution == 'uniform':\n self.samples = uniform.ppf(uni_samples,\n loc=self.theta[0],\n scale=self.theta[1]-self.theta[0])", "def inverse_transform(inv_cdf, **params):\n return inv_cdf(np.random.uniform(), **params)", "def inverse_cdf_sample(xs,ps):\n r = random.random()\n acc = 0\n for x,p in zip(xs,ps):\n acc += p\n if acc > r:\n return x", "def inverse_transformation_sample(cdf, rs):\n cdf = cdf + [1]\n r = rs.rand()\n n = len(cdf)\n for i in range(1, n):\n if cdf[i] >= r:\n return rs.randint(0, i)\n return rs.randint(0, i)", "def flat(self):\n i=0\n self.x=4\n self.volume=self.x**(self.dim)\n while(i<self.npoints):\n j=0\n while (j<self.dim):\n self.data[i,j]=np.random.uniform(0,self.x)\n j+=1\n self.like[i]=1/self.volume\n i+=1 \n self.maindensity=self.npoints/self.volume\n print(\"main density is\",self.maindensity)", "def _density_based_sample(data: pd.DataFrame, coords: list, portion=None, size=None, seed=None):\n clf = LocalOutlierFactor(n_neighbors=20, algorithm='auto',\n leaf_size=30, metric='minkowski',\n p=2, metric_params=None, contamination=0.1)\n\n # coords should already exist in data, get them by column names list\n data_coords = data[coords]\n clf.fit(data_coords)\n # original score is negative, the larger the denser\n density_score = clf.negative_outlier_factor_\n delta = density_score.max() - density_score.min()\n # density score to probability: the denser the less probability to be picked up\n probability_score = 1 - (density_score - density_score.min()) / delta\n probability_score = np.sqrt(probability_score)\n probability_score = probability_score / probability_score.sum()\n\n if size is not None:\n pass\n elif portion is not None:\n size = int(data_coords.index.size * portion)\n else:\n raise ValueError('Either portion or size should be provided.')\n if seed is not None:\n np.random.seed(seed)\n selected_cell_index = np.random.choice(data_coords.index,\n size=size,\n replace=False,\n p=probability_score) # choice data based on density weights\n\n # return the down sampled data\n return data.reindex(selected_cell_index)", "def sample(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return ExponentialDistBase.sample(self)", "def gen_data(n_sample=50,dist='uniform',random_var=0):\n \n if dist=='uniform':\n d=np.random.random(size=n_sample)\n if dist=='normal':\n d=np.random.normal(loc=1-random_var,scale=1+random_var,size=n_sample)\n if dist=='binomial':\n d=np.random.binomial(n=10,p=0.5+random_var/10,size=n_sample)\n if dist=='exponential':\n d=np.random.exponential(scale=0.5+random_var,size=n_sample)\n if dist=='poisson':\n d=np.random.poisson(lam=1.0+random_var,size=n_sample)\n if dist=='chisquare':\n d=np.random.chisquare(df=3+int(5*random_var),size=n_sample)\n if dist=='gamma':\n d=np.random.gamma(shape=1.0+random_var,scale=0.5+random_var,size=n_sample)\n if dist=='beta':\n d=np.random.beta(a=0.5+random_var,b=1.0+random_var,size=n_sample)\n if dist=='triangular':\n d=np.random.triangular(left=0.0,mode=np.min([0.5+random_var,1.0]),right=1.0,size=n_sample)\n if dist=='lognormal':\n d=np.random.lognormal(mean=1-random_var,sigma=1+random_var,size=n_sample)\n if dist=='laplace':\n d=np.random.laplace(scale=0.5+random_var,size=n_sample)\n \n # Normalize data\n d = d+np.abs(d.min())\n d = d/(d.max()-d.min())\n \n return d", "def target_sampler(seed_f, domain=[-1.0, 1.0], count=1000, noise_dev=0.01):\n \n res = []\n\n for i in xrange(count):\n x = np.random.random()*(domain[1]-domain[0])+domain[0]\n y = seed_f(x) + gauss_distribution(0.0, noise_dev)\n res.append([x,y])\n\n return np.array(res)", "def _emd_sample(density, n_samples, n_emd_samples, random_state=None):\n rng = check_random_state(random_state)\n X_arr = []\n i_sample = 0\n emd_sample = np.zeros(n_emd_samples)\n # Generate new samples only if needed for n_emd_samples\n while i_sample < n_emd_samples:\n X = density.sample(n_samples, random_state=rng)\n # Compare to all previous samples\n for X_other in X_arr:\n emd_sample[i_sample] = _compute_emd(X, X_other)\n i_sample += 1\n # Break if all needed samples have been computed\n if i_sample == n_emd_samples:\n return emd_sample\n X_arr.append(X)\n raise RuntimeError('Should have returned inside the nested loop. Must be bug in code.')", "def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density", "def sample(self):\n return np.random.dirichlet(self.alpha)", "def build_empirical_inverse_cdf(X):\n n = len(X)\n\n def f(prob):\n \"\"\"\n Args:\n prob (ndarray): vector with probablities to compute the inverse\n \"\"\"\n # assert 0<=prob<=1, 'Argument of inverse function is a probability >=0 and <= 1.'\n return X[np.minimum((n * np.array(prob)).astype(int), n - 1)]\n\n return f", "def probability_integral_transform(X):\n M = X.shape[0]\n N = X.shape[1]\n \n # convert X to U by using the probability integral transform: F(X) = U\n U = np.empty(X.shape)\n for ii in range(0,N):\n x_ii = X[:, ii]\n \n # estimate the empirical cdf \n (xx, pp) = ecdf(x_ii, M)\n f = interp1d(xx, pp) # TODO: experiment w/ different kinds of interpolation?\n # for example, cubic, or spline etc...?\n \n # plug this RV sample into the empirical cdf to get uniform RV\n u_ii = f(x_ii) \n U[:, ii] = u_ii\n \n return U", "def test_data_normalise():\n X = normal( size=(200, 50) )\n y = poisson( size=(200) )\n data = Data( X, y, add_constant=False, normalise=True, whiten=False )\n # check that the mean is 0 and standard deviation is 1\n array_almost_equal( np.mean( data.X, axis=0 ), 0 )\n array_almost_equal( np.std( data.X, axis=0 ), 1 )\n # whiten\n data = Data( X, y, add_constant=False, whiten=True )\n array_almost_equal( dot( data.X.T, data.X ), eye(50) )", "def _sample_n(self, n, seed=None):\n return self._inverse_scale.solvevec(\n samplers.normal(\n shape=ps.concat([[n], ps.shape(self._loc)], axis=0), seed=seed),\n adjoint=True)", "def sample_policy(self, obs):\n mu = self.logits(obs)\n pi = torch.distributions.Normal(loc=mu, scale=torch.exp(self.log_std))\n\n return pi", "def __sample_gauss_decoder(self, entry, data):\n mu, sigma = self.sess.run(\n [self.output_mean, self.output_stdv],\n feed_dict={entry: data})\n if self.hidden_layer_type == 'conv':\n sigma = np.reshape(\n sigma, newshape=(sigma.shape[0],1,1,sigma.shape[-1]))\n return np.random.randn(*mu.shape)*sigma+mu", "def iter_sample(self, noiseless=True):\n noise = self.noise.detach()\n X = self.X.clone().detach()\n y = self.y.clone().detach()\n N = X.size(0)\n Kff = self.kernel(X).contiguous()\n Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal\n\n outside_vars = {\"X\": X, \"y\": y, \"N\": N, \"Kff\": Kff}\n\n def sample_next(xnew, outside_vars):\n \"\"\"Repeatedly samples from the Gaussian process posterior,\n conditioning on previously sampled values.\n \"\"\"\n warn_if_nan(xnew)\n\n # Variables from outer scope\n X, y, Kff = outside_vars[\"X\"], outside_vars[\"y\"], outside_vars[\"Kff\"]\n\n # Compute Cholesky decomposition of kernel matrix\n Lff = torch.linalg.cholesky(Kff)\n y_residual = y - self.mean_function(X)\n\n # Compute conditional mean and variance\n loc, cov = conditional(\n xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter\n )\n if not noiseless:\n cov = cov + noise\n\n ynew = torchdist.Normal(\n loc + self.mean_function(xnew), cov.sqrt()\n ).rsample()\n\n # Update kernel matrix\n N = outside_vars[\"N\"]\n Kffnew = Kff.new_empty(N + 1, N + 1)\n Kffnew[:N, :N] = Kff\n cross = self.kernel(X, xnew).squeeze()\n end = self.kernel(xnew, xnew).squeeze()\n Kffnew[N, :N] = cross\n Kffnew[:N, N] = cross\n # No noise, just jitter for numerical stability\n Kffnew[N, N] = end + self.jitter\n # Heuristic to avoid adding degenerate points\n if Kffnew.logdet() > -15.0:\n outside_vars[\"Kff\"] = Kffnew\n outside_vars[\"N\"] += 1\n outside_vars[\"X\"] = torch.cat((X, xnew))\n outside_vars[\"y\"] = torch.cat((y, ynew))\n\n return ynew\n\n return lambda xnew: sample_next(xnew, outside_vars)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Estimates log(R_5) dispersion probability density function for a given log(R'HK) value using the bivariate KDE distribution presented in Gomes da Silva et al. (2020). Can use values from diferent catalogues if 'filepath' is not 'None'.
def get_rhk_std_pdf(log_rhk, bw=0.07, subset="all", key_x="log_rhk_med", key_y="log_sig_r5", filepath=None, show_plot=True, save_plot=False, savepath="rhk_std_kde.pdf"): if not filepath: filepath = os.path.join(os.path.dirname(__file__), "data.csv") if log_rhk < -5.5 or log_rhk > -3.6: print("*** ERROR: log_rhk outside data boundaries [-5.5, -3.6]") return np.nan, np.nan df = pd.read_csv(filepath, index_col=0) if subset == 'all': pass elif subset == 'MS': df = df[df.lum_class == 'V'] elif subset == 'dF': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('F')] elif subset == 'dG': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('G')] elif subset == 'dK': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('K')] else: print("*** ERROR: subset must be either 'all', 'MS', 'dF', 'dG', or 'dK'.") return np.nan, np.nan x = df[key_x].values y = df[key_y].values X, Y, Z = _kde2d_sklearn(x, y, thresh=1e-100, bw=bw, xlim=[-5.5, -3.6], ylim=[-3, 0.5], xbins=400j, ybins=400j) idx = _find_nearest(X, log_rhk) step = (max(Y[idx]) - min(Y[idx])) / (Y[idx].size - 1) probi = Z[idx]/Z[idx].max() probi /= sum(probi) probi /= step plt.figure(figsize=(5, 3.6*1.5)) plt.subplot(211) _plot_rhk_kde2d(x, y, bw, xlabel=r"$\log~R'_\mathrm{HK}$ [dex]", ylabel = r"$\log~\sigma~(R_5)$ [dex]", show_points=True, show_plot=False) plt.axvline(log_rhk, color='w') plt.subplot(212) ax = plt.gca() ax.plot(Y[idx], probi, 'k-') ax.set_ylabel("Probability density", fontsize=12) ax.set_xlabel(r"$\log~\sigma~(R_5)$ [dex]", fontsize=12) plt.legend(frameon=False, fontsize=8) plt.tight_layout() if save_plot: plt.savefig(savepath) if show_plot: plt.show() plt.close() return Y[idx], probi
[ "def density_estimation(sample, X, h, kernel=\"epanechnikov\"):\n kde = KernelDensity(kernel=kernel, bandwidth=h).fit(sample.reshape(-1, 1))\n log_dens = kde.score_samples(X.reshape(-1, 1))\n density = np.exp(log_dens)\n return density", "def get_edensity_from_hu(self,huvalue):\n f = interp1d(self.ct_hu, self.re_electronic_density)\n \n \n if huvalue > self.hu_max :\n from .util import message_box\n message = \"HU is bigger than the maximun of HU-EDensity curve, \\\n Using the curve's HU maximun for following processing.\\\n current HU = {0}, the curve HU maximun = {1}\".format(huvalue,self.hu_max)\n title = \"HU out of the HU-EDensity Curve Range\" \n message_box(message,title,\"information\")\n \n return float(f(self.hu_max))\n \n if huvalue < self.hu_min :\n from .util import message_box\n message = \"HU is smaller than the minimun of HU-EDensity curve, \\\n Using the curve's HU minimun for following processing. \\\n current HU = {0}, the curve HU minimun = {1}\".format(huvalue,self.hu_min)\n title = \"HU out of the HU-EDensity Curve Range\" \n message_box(message,title,\"information\")\n \n return float( f(self.hu_min))\n \n return float(f(huvalue))", "def plot_spectral_density(self, filename=None):\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n \n ax.loglog(self.wavenumber, self.spectral_density())\n ax.set_xlabel(r'$k$ in $\\mathrm{m}^{-1}$')\n ax.set_ylabel(r'$F$')\n ax.grid()\n ax.set_title('Spectral density as function of wavenumber')\n \n \n if filename:\n fig.savefig(filename)\n else:\n fig.show()", "def simulate_rhk_population(n_samples, subset='all', bw=0.07, key_x=\"log_rhk_med\", key_y=\"log_sig_r5\", filepath=None, show_plot=True, save_plot=False, savepath1=\"rhk_sim_hists.pdf\", savepath2=\"rhk_sim_maps.pdf\"):\n if not filepath:\n filepath = os.path.join(os.path.dirname(__file__), \"data.csv\")\n\n df = pd.read_csv(filepath)\n\n if subset == 'all':\n pass\n elif subset == 'MS':\n df = df[df.lum_class == 'V']\n elif subset == 'dF':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('F')]\n elif subset == 'dG':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('G')]\n elif subset == 'dK':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('K')]\n else:\n print(\"*** ERROR: subset must be either 'all', 'MS', 'dF', 'dG', or 'dK'.\")\n return np.nan, np.nan\n\n x = df[key_x].values\n y = df[key_y].values\n\n kde_x, kde_xz, _ = _kde1d(x, bw=bw, n=100, xlims=[-5.5, -3.6])\n kde_y, kde_yz, _ = _kde1d(y, bw=bw, n=100, xlims=[-3, 0.5])\n\n x_samples = _sample_from_pdf(kde_x, kde_xz, n=n_samples)\n\n xx, yy, zz = _kde2d_sklearn(x, y, bw=bw, xlim=[-5.5, -3.6], ylim=[-3.0, 0.5], xbins=100j, ybins=100j)\n\n y_samples = np.ones_like(x_samples)\n for i, x_val in enumerate(tqdm.tqdm(x_samples)):\n time.sleep(0.01)\n idx = _find_nearest(xx, x_val)\n y_samples[i] = _sample_from_pdf(yy[idx], zz[idx], n=1)[0]\n\n xx_sim, yy_sim, zz_sim = _kde2d_sklearn(x_samples, y_samples, bw=bw, xlim=[-5.5, -3.6], ylim=[-3.0, 0.5], xbins=100j, ybins=100j)\n\n\n plt.figure(figsize=(5, 3.6*1.5))\n\n xlabel = r\"$\\log~R'_\\mathrm{HK}$ [dex]\"\n ylabel = r\"$\\log~\\sigma~(R_5)$ [dex]\"\n\n # Histograms:\n plt.subplot(211)\n bins = np.arange(-5.5, -3.6, 0.05)\n plt.hist(x_samples, color='r', alpha=0.5, density=True, bins=bins, label='sampled data')\n plt.hist(x, color='k', histtype='step', bins=bins, density=True, label='real data')\n plt.plot(kde_x, kde_xz, 'k-', label='PDF')\n plt.xlabel(xlabel, fontsize=12)\n plt.ylabel(\"Probability density\", fontsize=12)\n plt.legend(frameon=False, fontsize=10)\n\n plt.subplot(212)\n bins = np.arange(-3, 0.5, 0.1)\n plt.hist(y_samples, color='r', alpha=0.5, density=True, bins=bins, label='sampled data')\n plt.hist(y, color='k', histtype='step', bins=bins, density=True, label=\"real data\")\n plt.plot(kde_y, kde_yz, 'k-', label='PDF')\n plt.xlabel(ylabel, fontsize=12)\n plt.ylabel(\"Probability density\", fontsize=12)\n plt.legend(frameon=False, fontsize=10)\n\n plt.tight_layout()\n\n if save_plot:\n plt.savefig(savepath1)\n\n if show_plot:\n plt.show()\n plt.close()\n\n # bivariate KDE:\n plt.figure(figsize=(5, 3.6*1.5))\n\n plt.subplot(211)\n plt.pcolormesh(xx, yy, zz, cmap='Spectral_r', shading='gouraud')\n plt.plot(x, y, 'w.', ms=1.5)\n plt.xlim(-5.5, -3.6)\n plt.annotate(f\"N = {x.size}\", xy=(0.05, 0.9), xycoords='axes fraction', color='w')\n plt.ylabel(ylabel, fontsize=12)\n\n plt.subplot(212)\n plt.pcolormesh(xx_sim, yy_sim, zz_sim, cmap='Spectral_r', shading='gouraud')\n plt.plot(x_samples, y_samples, 'w.', ms=1.5)\n plt.xlim(-5.5, -3.6)\n plt.annotate(f\"N = {x_samples.size}\", xy=(0.05, 0.9), xycoords='axes fraction', color='w')\n plt.ylabel(ylabel, fontsize=12)\n plt.xlabel(xlabel, fontsize=12)\n\n plt.tight_layout()\n\n if save_plot:\n plt.savefig(savepath2)\n\n if show_plot:\n plt.show()\n plt.close()\n\n return x_samples, y_samples", "def estimate_kde(values):\n values = list(map(float, values))\n kde = scipy.stats.gaussian_kde(values)\n return kde", "def numerical_HL(self, rho = 804.3, drho = 0.1, T = 218.15, accum = 0.025):\n\n rho = rho\n rhos = np.arange(self.rho_o*1000, rho, drho)\n # rhos = np.array((self.rho_o*1000, rho))\n\n sigma_o = 0.\n\n def dsigma2_dtD_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_deuterium = firn_diffusivity_instance.deuterium(f_factor_version = self.f_factor_deuterium)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_deuterium - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt18_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o18 = firn_diffusivity_instance.o18(f_factor_version = self.f_factor_o18)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o18 - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt17_upper(sig, rhos):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o17 = firn_diffusivity_instance.o17(f_factor_version = self.f_factor_o17)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o17 - 2./rhos*sig\n return dsigma2dt\n\n\n\n def dsigma2_dtD_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_deuterium = firn_diffusivity_instance.deuterium(f_factor_version = self.f_factor_deuterium)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_deuterium - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt18_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o18 = firn_diffusivity_instance.o18(f_factor_version = self.f_factor_o18)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o18 - 2./rhos*sig\n return dsigma2dt\n\n\n def dsigma2_dt17_lower(sig, rhos, accum):\n firn_diffusivity_instance = diffusivity.FirnDiffusivity(rhos, rho_co = self.rho_co*1000, \\\n T = T, P = self.P)\n firn_diffusivity_o17 = firn_diffusivity_instance.o17(f_factor_version = self.f_factor_o17)\\\n *3600*24*365.25 ##diffusivity in m2yr-1\n # drho_dt = 1000*self.fo*11*np.exp(-10160/(self.R*T))*accum*(self.rho_i - rhos/1000)\n drho_dt = 1000*self.f1*575*np.exp(-21400/(self.R*T))*np.sqrt(accum)*(self.rho_i - rhos/1000.)\n dsigma2dt = 2./drho_dt*firn_diffusivity_o17 - 2./rhos*sig\n return dsigma2dt\n\n\n if rho<=550.:\n rhos = np.arange(self.rho_o*1000, rho, drho)\n sigma2_deuterium = sp.integrate.odeint(dsigma2_dtD_upper, 0, rhos)\n sigma2_18 = sp.integrate.odeint(dsigma2_dt18_upper, 0, rhos)\n sigma2_17 = sp.integrate.odeint(dsigma2_dt17_upper, 0, rhos)\n\n elif rho>550.:\n rhos = np.array((self.rho_o*1000, 550.))\n sigma2_deuterium_cr = sp.integrate.odeint(dsigma2_dtD_upper, 0., rhos)[1]\n sigma2_18_cr = sp.integrate.odeint(dsigma2_dt18_upper, 0., rhos)[1]\n sigma2_17_cr = sp.integrate.odeint(dsigma2_dt17_upper, 0., rhos)[1]\n\n rhos = np.arange(550., rho, drho)\n sigma2_deuterium = sp.integrate.odeint(dsigma2_dtD_lower, sigma2_deuterium_cr, rhos, args = (accum,))\n sigma2_18 = sp.integrate.odeint(dsigma2_dt18_lower, sigma2_18_cr, rhos, args = (accum,))\n sigma2_17 = sp.integrate.odeint(dsigma2_dt17_lower, sigma2_17_cr, rhos, args = (accum,))\n\n\n\n\n\n return rhos, np.sqrt(sigma2_deuterium), np.sqrt(sigma2_18), np.sqrt(sigma2_17)", "def HKLog(x0,u0,x1,u1,HKScale=1.):\n\n # list of absolute values of transport distance: \\|T(x)-x\\|\n distList=np.linalg.norm(x1-x0,axis=1)/HKScale\n \n v0=np.einsum(np.sqrt(u1/(u0+1E-16))*np.sin(distList)/(distList+1E-16),[0],(x1-x0),[0,1],[0,1])\n alpha0=-2.+2.*np.sqrt(u1/(u0+1E-16))*np.cos(distList)\n \n return (v0,alpha0)", "def compute_dispersion(dx, dt):\n my = np.load(m_for_fourier_analysis_file)\n transformed = np.log10(np.power(np.abs(fft.fftshift(fft.fft2(my))), 2))\n m, n = transformed.shape\n print m,n\n\n freq = fft.fftshift(fft.fftfreq(m, dt))\n kx = fft.fftshift(fft.fftfreq(n, dx/(2.0*np.pi)))\n\n with open(dispersion_data_file, \"w\") as f:\n f.write('# kx (nm^-1) frequency (GHz) FFT_Power (arb. unit)\\n')\n for j in range(n):\n for i in range(m):\n f.write(\"%15g %15g %15g\\n\" % (kx[n-j-1], freq[i], transformed[i][j]))\n f.write('\\n')", "def rho(self, d):\n\t\tif (self.distribution == 'spherical'):\n\t\t\t# log(rho) = (13.86 +/- 0.47) - (3.34 +/- 0.11)*log(R) [Galactocentric distance] (Wetterer 1991)\n\t\t\trho = 10**(13.86 - 3.34*log10(d*1e3))\n\t\t\terr = 10**(0.47 - 0.11*log10(d*1e3))\n\t\t\t\n\t\telif (self.distribution == 'ellipsoidal'):\n\t\t\t# log(rho) = (15.71 +/- 0.56) - (3.76 +/- 0.13)*log(a) [Galactocentric semimajor distance] (Wetterer 1991)\n\t\t\trho = 10**(15.71 - 3.76*log10(d*1e3))\n\t\t\terr = 10**(0.56 - 0.13*log10(d*1e3))\n\t\telse:\traise TypeError, 'Spatial density distribution unknown, only spherical or ellipsoidal available'\n\n\t\t\n\t\treturn [rho, err]", "def log_interp(ff, r, rho_min_jt, dr_jt):\n if r<np.exp(rho_min_jt)/2: return ff[0] # here should be less than or equal gg[0]\n\n lr = np.log(r)\n k=int((lr-rho_min_jt)/dr_jt)\n nr = len(ff)\n k = min(max(k,2), nr-4)\n dy=(lr-rho_min_jt-k*dr_jt)/dr_jt\n\n fv = (-dy*(dy**2-1.0)*(dy-2.0)*(dy-3.0)*ff[k-2] \n +5.0*dy*(dy-1.0)*(dy**2-4.0)*(dy-3.0)*ff[k-1] \n -10.0*(dy**2-1.0)*(dy**2-4.0)*(dy-3.0)*ff[k]\n +10.0*dy*(dy+1.0)*(dy**2-4.0)*(dy-3.0)*ff[k+1]\n -5.0*dy*(dy**2-1.0)*(dy+2.0)*(dy-3.0)*ff[k+2]\n +dy*(dy**2-1.0)*(dy**2-4.0)*ff[k+3])/120.0 \n\n return fv", "def read_f_RH(ceil_lam):\n\n # temp file name\n miedir = 'C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/clearFO/data/Mie/'\n # filename = 'calculated_ext_f(RH)_' + str(ceil_lam) + 'nm.csv'\n filename = 'sp_ew_ceil_guass_908-912_ext_f(RH)_908-912nm.csv'\n\n # read data\n raw = np.loadtxt(miedir + filename, delimiter=',')\n\n f_RH = {'RH': raw[:, 0],\n 'f_RH': raw[:, 1]}\n\n return f_RH", "def sdO(data):\n required_parameters = ['log_center_T', 'log_center_Rho', 'log_LHe', 'c_core_mass', 'log_Teff', 'age']\n _check_history_parameters(data, required_parameters, evol_phase='sdO')\n\n ages = HeCoreBurning(data, return_age=True)\n\n # Core He Burning phase is required\n if ages is None:\n return None\n else:\n a1, a2 = ages\n\n d = data[(data['age'] > a1) & (data['age'] < a2)]\n\n teff = 10 ** avg_(d, 'log_Teff')\n\n if teff < 40000:\n return None\n else:\n return np.where((data['age'] > a1) & (data['age'] < a2) & (10**data['log_Teff'] >= 40000))", "def plot_rho_value(rhos, values, directory):\n rho_base = 0.4286224337994642\n steps = np.arange(len(rhos))\n plt.subplot(1,2,1)\n plt.plot(rhos, color='r')\n plt.plot(rho_base*np.ones(len(rhos)), linestyle = 'dashed', color='k')\n plt.grid(True)\n plt.title('Gain (ρ)', color='black')\n plt.xlabel('Steps (x10³)')\n plt.subplot(1,2,2)\n plt.plot(values, color='k')\n plt.grid(True)\n plt.title('Value recurrent state (sI)', color='black')\n plt.xlabel('Steps (x10³)')\n plt.show()\n plt.savefig(directory+'evol_rho_value_sI.png')\n plt.close()", "def get_hu_from_edensity(self,edensityvalue):\n f = interp1d(self.re_electronic_density,self.ct_hu)\n \n \n if edensityvalue > self.ed_max :\n from .util import message_box\n message = \"Electronic density is bigger than the maximun of HU-EDensity curve, \\\n Using the curve's ED maximun for following processing.\\\n current ed = {0}, the curve ED maximun = {1}\".format(edensityvalue,self.ed_max)\n title = \"ED out of the HU-EDensity Curve Range\" \n message_box(message,title,\"information\")\n \n return int (f(self.ed_max))\n \n if edensityvalue < self.ed_min :\n from .util import message_box\n message = \"Electronic density is smaller than the minimun of HU-EDensity curve, \\\n Using the curve's ED minimun for following processing.\\\n current ed = {0}, the curve ED minimun = {1}\".format(edensityvalue,self.ed_min)\n title = \"ED out of the HU-EDensity Curve Range\" \n message_box(message,title,\"information\")\n \n return int(f(self.ed_min))\n \n \n return int(f(edensityvalue))", "def log_likelihood_ratio_test(signal_kde_coords, signal_kde_values, noise_kde_coords, noise_kde_values, log_like_thresh, LIB_window, ifos, ppdir):\n\t\t\n\tif not os.path.exists(\"%s/LIB_trigs/\"%ppdir):\n\t\tos.makedirs(\"%s/LIB_trigs/\"%ppdir)\n\t\n\t#Save the log likelihood used for this LLRT test\n\tthresh_log_like_ratio = log_like_thresh\n\tnp.savetxt(\"%s/LIB_trigs/threshold_log_likelihood_ratio.txt\"%ppdir, np.array([thresh_log_like_ratio]))\n\t\n\t#Open write files for LIB trigs and their corresponding timeslides for both 0-lags and timeslide triggers\n\tlib_0lag_times = open('%s/LIB_trigs/LIB_0lag_times_%s%s.txt'%(ppdir, ifos[0], ifos[1]), 'wt')\n\tlib_0lag_timeslides = open('%s/LIB_trigs/LIB_0lag_timeslides_%s%s.txt'%(ppdir, ifos[0], ifos[1]), 'wt')\n\tlib_ts_times = open('%s/LIB_trigs/LIB_ts_times_%s%s.txt'%(ppdir, ifos[0], ifos[1]), 'wt')\n\tlib_ts_timeslides = open('%s/LIB_trigs/LIB_ts_timeslides_%s%s.txt'%(ppdir, ifos[0], ifos[1]), 'wt')\n\t\n\t#Build LLRT object for 0-lag and for each timeslide\n\tfiles_all = sorted(os.listdir(\"%s/coincident/\"%ppdir))\n\tfor f in files_all:\n\t\t#Load in coincident omicron data for each timeslide\n\t\tterms = f.split(\"_\")\n\t\ttshift = float(terms[5].split(\"ts\")[1])\n\t\ttry:\n\t\t\tdata_array = np.genfromtxt(\"%s/coincident/%s\"%(ppdir,f)).reshape((-1,12))\n\t\texcept IOError:\n\t\t\tdata_array = np.array([])\n\n\t\t#Build calc_info dictionary\n\t\tcalc_info = {}\n\t\tcalc_info['interp method'] = 'Grid Linear'\n\t\tcalc_info['extrap method'] = 'Grid Nearest'\n\n\t\t#Build param_info dictionary\n\t\tparam_info = {}\n\t\tparam_info['delta_t'] = {}\n\t\tparam_info['delta_t']['dimension'] = 1\n\t\tparam_info['delta_t']['param names'] = ['delta_t']\n\t\tparam_info['delta_t']['interp range'] = np.array([[-0.05, 0.05]])\n\t\t\n\t\t#Load likelihood estimate for signal\n\t\ttrain_signal_data = {}\n\t\ttrain_signal_data['delta_t'] = {}\n\t\ttrain_signal_data['delta_t']['KDE'] = ([np.load(signal_kde_coords),np.load(signal_kde_values)])\n\t\t\n\t\t#Load likelihood estimate for noise\n\t\ttrain_noise_data = {}\n\t\ttrain_noise_data['delta_t'] = {}\n\t\ttrain_noise_data['delta_t']['KDE'] = ([np.load(noise_kde_coords),np.load(noise_kde_values)])\n\n\t\t#Build foreground_data dictionary\n\t\ttry:\n\t\t\tdt_tmp = data_array[:,4] - data_array[:,8]\n\t\texcept IndexError:\n\t\t\tdt_tmp = np.array([])\n\n\t\tforeground_data = {}\n\t\tforeground_data['npoints'] = len(dt_tmp)\t\t\n\t\tforeground_data['delta_t'] = {}\n\t\tforeground_data['delta_t']['data'] = np.transpose(np.array([dt_tmp]))\n\t\t\t\t\n\t\tif foreground_data['delta_t']['data'].any():\n\t\t\t#Initialize the LLRT object\n\t\t\tLLRT = LLRT_object_beta.LLRT(calc_info=calc_info, param_info=param_info, train_signal_data=train_signal_data, train_noise_data=train_noise_data, foreground_data=foreground_data)\n\t\t\n\t\t\t#Find foreground trigs that are above the passed log likelihood threshold and save them to file\n\t\t\ttrigs_above_thresh = data_array[LLRT.LLR_above_thresh(threshold=thresh_log_like_ratio, groundtype='Foreground')]\n\t\t\ttrigs_above_thresh = cluster_LIB_trigs(LIB_trig_array=trigs_above_thresh, LIB_window=LIB_window)\n\t\t\t\n\t\t\t#Save LIB triggers\n\t\t\tnp.savetxt('%s/LIB_trigs/LIB_trigs_%s%s_ts%s.txt'%(ppdir, ifos[0], ifos[1], tshift), trigs_above_thresh)\n\t\t\tif tshift == 0.:\n\t\t\t\tfor i in xrange(len(trigs_above_thresh)):\n\t\t\t\t\tlib_0lag_times.write('%10.10f\\n'%trigs_above_thresh[i,0])\n\t\t\t\t\tlib_0lag_timeslides.write('0. %s\\n'%(tshift))\n\t\t\telse:\n\t\t\t\tfor i in xrange(len(trigs_above_thresh)):\n\t\t\t\t\tlib_ts_times.write('%10.10f\\n'%trigs_above_thresh[i,0])\n\t\t\t\t\tlib_ts_timeslides.write('0. %s\\n'%(tshift))\n\t\telse:\n\t\t\tos.system('touch %s/LIB_trigs/LIB_trigs_%s%s_ts%s.txt'%(ppdir, ifos[0], ifos[1], tshift))\n\t\n\tlib_0lag_times.close()\n\tlib_0lag_timeslides.close()\n\tlib_ts_times.close()\n\tlib_ts_timeslides.close()", "def get_data(self, key):\n if key not in Hdf5Reader.data_paths:\n raise KeyError('Dictionary key not in valid keys. Use get_data_by_path')\n\n hdf = H.File(self.file_name, 'r')\n\n if key == 'lb_velocity_x':\n data = hdf[Hdf5Reader.data_paths[key]][()][1]\n\n elif key == 'lb_velocity_y':\n data = hdf[Hdf5Reader.data_paths[key]][()][0]\n\n elif key == 'dlvo_x':\n data = hdf[Hdf5Reader.data_paths['edl_x']][()] +\\\n hdf[Hdf5Reader.data_paths['attractive_x']][()]\n # hdf[Hdf5Reader.data_paths['lewis_x']][()] +\\\n # hdf[Hdf5Reader.data_paths['lvdw_x']][()]\n data = data[0]\n\n elif key == 'dlvo_y':\n data = hdf[Hdf5Reader.data_paths['edl_y']][()] +\\\n hdf[Hdf5Reader.data_paths['attractive_y']][()]\n # hdf[Hdf5Reader.data_paths['lewis_y']][()] +\\\n # hdf[Hdf5Reader.data_paths['lvdw_y']][()]\n data = data[0]\n\n elif key == 'dlvo_fine':\n data = hdf[Hdf5Reader.data_paths['edl_fine']][()] + \\\n hdf[Hdf5Reader.data_paths['attractive_fine']][()]\n data = data[0]\n\n elif key in ('lvdw_x', 'lvdw_y',\n 'lewis_x', 'lewis_y',\n 'edl_x', 'edl_y',\n 'dlvo_x', 'dlvo_y',\n 'attractive_x',\n 'attractive_y',\n 'distance_array',\n 'edl_fine',\n 'attractive_fine',\n 'distance_fine'):\n\n data = hdf[Hdf5Reader.data_paths[key]][()][0]\n\n else:\n data = hdf[Hdf5Reader.data_paths[key]][()]\n\n hdf.close()\n return data", "def plot_kernel_density_posterior(history, parameter, xmin, xmax):\n\n fig, ax = plt.subplots()\n for t in range(history.max_t + 1):\n df, w = history.get_distribution(m=0, t=t)\n pyabc.visualization.plot_kde_1d(\n df, w, xmin=xmin, xmax=xmax, x=parameter, ax=ax, label=\"PDF t={}\".format(t)\n )\n ax.legend()", "def log_density(z_0, latent_stats, log_det_jacobian):\n\n batch_size = z.shape[0]\n\n # Base distribution is diagonal-covariance Gaussian\n log_qz0Cx = math.log_density_gaussian(z_0, mu=latent_stats['mu'], logvar=latent_stats['logvar']).view(batch_size, -1).sum(1)\n\n # Sum LDJ over flow steps [1,...K]\n log_qzKCx = log_qz0Cx - log_det_jacobian.sum(dim=1) # Shape (B)\n\n return log_qzKCx", "def supp_figure_1():\r\n\r\n # Load the data\r\n df = pd.read_csv(os.path.join(DATA_DIR, 'Bogue Banks Volumes and Aspect Ratios.csv'))\r\n\r\n # Set bins edges\r\n data_set = sorted(set(df['Volume'].dropna()))\r\n bins = np.append(data_set, data_set[-1] + 1)\r\n\r\n # Use the histogram function to bin the data and find the CDF\r\n counts, bin_edges = np.histogram(df['Volume'].dropna(), bins=bins, normed=True, density=False)\r\n counts = counts.astype(float) / len(df['Volume'])\r\n cdf = np.cumsum(counts)\r\n\r\n # Find the percentile for a volume of 52m3/m\r\n use_vol = (53.5 + 50.6) / 2\r\n vol_diff = np.abs(bin_edges[1:] - use_vol)\r\n min_ix = np.argmin(vol_diff)\r\n use_percentile = cdf[min_ix]\r\n\r\n # Setup the figure\r\n fig, ax = plt.subplots(figsize=(figure_inches, figure_inches), dpi=figure_dpi)\r\n\r\n # Add a grid\r\n add_grids(ax)\r\n\r\n # Plot the CDF\r\n ax.plot(bin_edges[1:], cdf, zorder=2)\r\n\r\n # Add a line for the model volume\r\n ax.axvline(x=use_vol, ymin=0, ymax=use_percentile, color='black', linestyle='--', zorder=4)\r\n ax.axhline(y=use_percentile, xmin=0, xmax=(use_vol / 400), color='black', linestyle='--', zorder=4)\r\n ax.scatter(x=use_vol, y=use_percentile, color='red', marker='o', s=25, zorder=6)\r\n\r\n # Set axes limits\r\n axis_limits(ax, l=0, r=400, b=0, t=1)\r\n\r\n # Label the axes\r\n add_label(ax, s='Dune Volume (m$^{3}$/m)', type='X')\r\n add_label(ax, s='CDF', type='Y')\r\n add_label(ax, s=f'{use_vol} m3/m\\n{np.around(use_percentile * 100, decimals=2)}th Percentile', type='T')\r\n\r\n # Save and close\r\n save_and_close(fig=fig, title='Bogue Banks Dune Volume CDF', tight=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simulate stellar populations with median values of log(R'HK) and log(sigma(R5)) by sampling from the activity variabilitylevel bivariate KDE presented in Gomes da Silva et al. (2020). Can use values from diferent catalogues if 'filepath' is not 'None'.
def simulate_rhk_population(n_samples, subset='all', bw=0.07, key_x="log_rhk_med", key_y="log_sig_r5", filepath=None, show_plot=True, save_plot=False, savepath1="rhk_sim_hists.pdf", savepath2="rhk_sim_maps.pdf"): if not filepath: filepath = os.path.join(os.path.dirname(__file__), "data.csv") df = pd.read_csv(filepath) if subset == 'all': pass elif subset == 'MS': df = df[df.lum_class == 'V'] elif subset == 'dF': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('F')] elif subset == 'dG': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('G')] elif subset == 'dK': df = df[df.lum_class == 'V'] df = df[df.sptype.str.contains('K')] else: print("*** ERROR: subset must be either 'all', 'MS', 'dF', 'dG', or 'dK'.") return np.nan, np.nan x = df[key_x].values y = df[key_y].values kde_x, kde_xz, _ = _kde1d(x, bw=bw, n=100, xlims=[-5.5, -3.6]) kde_y, kde_yz, _ = _kde1d(y, bw=bw, n=100, xlims=[-3, 0.5]) x_samples = _sample_from_pdf(kde_x, kde_xz, n=n_samples) xx, yy, zz = _kde2d_sklearn(x, y, bw=bw, xlim=[-5.5, -3.6], ylim=[-3.0, 0.5], xbins=100j, ybins=100j) y_samples = np.ones_like(x_samples) for i, x_val in enumerate(tqdm.tqdm(x_samples)): time.sleep(0.01) idx = _find_nearest(xx, x_val) y_samples[i] = _sample_from_pdf(yy[idx], zz[idx], n=1)[0] xx_sim, yy_sim, zz_sim = _kde2d_sklearn(x_samples, y_samples, bw=bw, xlim=[-5.5, -3.6], ylim=[-3.0, 0.5], xbins=100j, ybins=100j) plt.figure(figsize=(5, 3.6*1.5)) xlabel = r"$\log~R'_\mathrm{HK}$ [dex]" ylabel = r"$\log~\sigma~(R_5)$ [dex]" # Histograms: plt.subplot(211) bins = np.arange(-5.5, -3.6, 0.05) plt.hist(x_samples, color='r', alpha=0.5, density=True, bins=bins, label='sampled data') plt.hist(x, color='k', histtype='step', bins=bins, density=True, label='real data') plt.plot(kde_x, kde_xz, 'k-', label='PDF') plt.xlabel(xlabel, fontsize=12) plt.ylabel("Probability density", fontsize=12) plt.legend(frameon=False, fontsize=10) plt.subplot(212) bins = np.arange(-3, 0.5, 0.1) plt.hist(y_samples, color='r', alpha=0.5, density=True, bins=bins, label='sampled data') plt.hist(y, color='k', histtype='step', bins=bins, density=True, label="real data") plt.plot(kde_y, kde_yz, 'k-', label='PDF') plt.xlabel(ylabel, fontsize=12) plt.ylabel("Probability density", fontsize=12) plt.legend(frameon=False, fontsize=10) plt.tight_layout() if save_plot: plt.savefig(savepath1) if show_plot: plt.show() plt.close() # bivariate KDE: plt.figure(figsize=(5, 3.6*1.5)) plt.subplot(211) plt.pcolormesh(xx, yy, zz, cmap='Spectral_r', shading='gouraud') plt.plot(x, y, 'w.', ms=1.5) plt.xlim(-5.5, -3.6) plt.annotate(f"N = {x.size}", xy=(0.05, 0.9), xycoords='axes fraction', color='w') plt.ylabel(ylabel, fontsize=12) plt.subplot(212) plt.pcolormesh(xx_sim, yy_sim, zz_sim, cmap='Spectral_r', shading='gouraud') plt.plot(x_samples, y_samples, 'w.', ms=1.5) plt.xlim(-5.5, -3.6) plt.annotate(f"N = {x_samples.size}", xy=(0.05, 0.9), xycoords='axes fraction', color='w') plt.ylabel(ylabel, fontsize=12) plt.xlabel(xlabel, fontsize=12) plt.tight_layout() if save_plot: plt.savefig(savepath2) if show_plot: plt.show() plt.close() return x_samples, y_samples
[ "def load_sample_data_opt():\n sd_excel_1 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25, 28.125, 30, 31.875, 33.75, 35.625, 37.5, 39.375, 41.25, 43.125, 45, 46.875, 48.75, 50.625, 52.5, 54.375, 56.25, 58.125, 60])\n time_excel_1 = sd_excel_1/50\n cof_excel_1 = np.array([0.1832, 0.21344, 0.214, 0.16829, 0.20092, 0.21337, 0.2154, 0.20336, 0.21587, 0.23699, 0.23952, 0.21664, 0.2014, 0.19174, 0.18813, 0.18991, 0.18264, 0.17702, 0.1875, 0.20547, 0.20907, 0.19216, 0.18327, 0.1949, 0.19512, 0.21268, 0.27634, 0.34194, 0.42582, 0.56496, 0.70509, 0.89468, 1.03188])\n\n # 6.5N, T=250, F=6.5, P=0.36, v=50, h0=25\n sd_excel_2 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25, 28.125, 30, 31.875, 33.75, 35.625, 37.5, 39.375, 41.25, 43.125, 45, 46.875, 48.75, 50.625, 52.5])\n time_excel_2 = sd_excel_2/50\n cof_excel_2 = np.array([0.19719, 0.20498, 0.21363, 0.20146, 0.18615, 0.18318, 0.19836, 0.21747, 0.21529, 0.21493, 0.21421, 0.21038, 0.21147, 0.19772, 0.18747, 0.18409, 0.18427, 0.18883, 0.19239, 0.19262, 0.19683, 0.19719, 0.19222, 0.23401, 0.34232, 0.42683, 0.53734, 0.79598, 1.01918])\n\n # 8N, T=250, F=8, P=0.43, v=50, h0=25\n sd_excel_3 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25, 28.125, 30, 31.875, 33.75, 35.625, 37.5, 39.375])\n time_excel_3 = sd_excel_3/50\n cof_excel_3 = np.array([0.17835, 0.2072, 0.19456, 0.21756, 0.20635, 0.21676, 0.21038, 0.2089, 0.19638, 0.18653, 0.19151, 0.20726, 0.21363, 0.19902, 0.18964, 0.17111, 0.23904, 0.32859, 0.41852, 0.54227, 0.78339, 1.01689])\n\n # 300DegC, T=300, F=5, P=0.27, v=50, h0=25\n sd_excel_4 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25])\n time_excel_4 = sd_excel_4/50\n cof_excel_4 = np.array([0.24057, 0.24109, 0.22403, 0.22232, 0.23368, 0.23319, 0.23083, 0.23897, 0.23636, 0.22914, 0.24916, 0.50965, 0.66055, 0.89591, 1.06436])\n\n # 350DegC, T=350, F=5, P=0.22, v=50, h0=25\n sd_excel_5 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15])\n time_excel_5 = sd_excel_5/50\n cof_excel_5 = np.array([0.29999, 0.32361, 0.32444, 0.3336, 0.31695, 0.31609, 0.5703, 0.91664, 1.07763])\n\n # 80mm/s, T=250, F=5, P=0.34, v=80, h0=25\n sd_excel_6 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625, 22.5, 24.375, 26.25])\n time_excel_6 = sd_excel_6/80\n cof_excel_6 = np.array([0.22877, 0.21741, 0.22773, 0.18984, 0.20213, 0.20723, 0.21981, 0.19403, 0.18517, 0.21203, 0.41362, 0.62392, 0.82406, 1.00583, 1.12556])\n\n # 100m/s, T=250, F=5, P=0.34, v=100, h0=25\n sd_excel_7 = np.array([0, 1.875, 3.75, 5.625, 7.5, 9.375, 11.25, 13.125, 15, 16.875, 18.75, 20.625])\n time_excel_7 = sd_excel_7/100\n cof_excel_7 = np.array([0.21865, 0.19783, 0.20394, 0.20249, 0.21899, 0.21676, 0.28037, 0.37011, 0.52885, 0.72599, 0.91727, 1.0663])\n\n cof_measured_set = []\n cof_measured_set.append(cof_excel_1)\n cof_measured_set.append(cof_excel_2)\n cof_measured_set.append(cof_excel_3)\n cof_measured_set.append(cof_excel_4)\n cof_measured_set.append(cof_excel_5)\n cof_measured_set.append(cof_excel_6)\n cof_measured_set.append(cof_excel_7)\n cof_measured_set = np.array(cof_measured_set)\n\n time_measured_set = []\n time_measured_set.append(time_excel_1)\n time_measured_set.append(time_excel_2)\n time_measured_set.append(time_excel_3)\n time_measured_set.append(time_excel_4)\n time_measured_set.append(time_excel_5)\n time_measured_set.append(time_excel_6)\n time_measured_set.append(time_excel_7)\n time_measured_set = np.array(time_measured_set)\n\n sd_measured_set = []\n sd_measured_set.append(sd_excel_1)\n sd_measured_set.append(sd_excel_2)\n sd_measured_set.append(sd_excel_3)\n sd_measured_set.append(sd_excel_4)\n sd_measured_set.append(sd_excel_5)\n sd_measured_set.append(sd_excel_6)\n sd_measured_set.append(sd_excel_7)\n sd_measured_set = np.array(sd_measured_set)\n\n current_test_parameters_1 = dict(T=250, F=5, P=0.34, v=50, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_2 = dict(T=250, F=6.5, P=0.36, v=50, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_3 = dict(T=250, F=8, P=0.43, v=50, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_4 = dict(T=300, F=5, P=0.27, v=50, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_5 = dict(T=350, F=5, P=0.22, v=50, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_6 = dict(T=250, F=5, P=0.34, v=80, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n current_test_parameters_7 = dict(T=250, F=5, P=0.34, v=100, h0=25, V=23.2, mu0_lubricated = 1.6907363829537443, Q_lubricated = 9141.506836756891, mu0_dry = 10.942250929819629, Q_dry = 9368.85126706061, eta_0 = 0.12, Q_eta = 11930, lambda_1 = 5, lambda_2 = 0.6, c = 0.012, k_1 = 2.05, k_2 = 2.98, k_3 = 5.3, blank_roughness=0.3)\n\n testing_parameters_set = []\n testing_parameters_set.append(current_test_parameters_1)\n testing_parameters_set.append(current_test_parameters_2)\n testing_parameters_set.append(current_test_parameters_3)\n testing_parameters_set.append(current_test_parameters_4)\n testing_parameters_set.append(current_test_parameters_5)\n testing_parameters_set.append(current_test_parameters_6)\n testing_parameters_set.append(current_test_parameters_7)\n\n return cof_measured_set, time_measured_set, sd_measured_set, testing_parameters_set", "def mh_sample(x, log_pdf_lambda, jump_std, D, num_samples=1, burn=1, lag=1):\n num_collected = 0\n iters = 0\n samples = []\n\n t_samples = num_samples*lag+burn\n\n checkevery = max(20, int(t_samples/100.0))\n accepted = 0.0\n acceptance_rate = 0.0\n iters = 1.0\n aiters = 1.0\n\n if D[0] >= 0.0 and D[1] == float('Inf'):\n jumpfun = lambda x, jstd: fabs(x + normrnd(0.0, jstd))\n elif D[0] == 0 and D[1] == 1:\n def jumpfun(x, jstd):\n x = fabs(x + normrnd(0.0, jstd))\n if x > 1.0:\n x = x%1\n\n assert x > 0 and x < 1\n\n return x\n else:\n jumpfun = lambda x, jstd: x + normrnd(0.0, jstd)\n\n logp = log_pdf_lambda(x)\n while num_collected < num_samples:\n\n # every now and then propose wild jumps incase there very distant modes\n x_prime = jumpfun(x, jump_std)\n assert( x_prime > D[0] and x_prime < D[1] )\n \n logp_prime = log_pdf_lambda(x_prime)\n\n # if log(random.random()) < logp_prime - logp:\n if log(random.random()) < logp_prime - logp:\n x = x_prime\n logp = logp_prime\n accepted += 1.0\n acceptance_rate = accepted/aiters\n\n if iters > burn and iters%lag == 0:\n num_collected += 1\n samples.append(x)\n\n # keep the acceptance rate around .3 +/- .1\n if iters % checkevery == 0:\n if acceptance_rate >= .4:\n jump_std *= 1.1\n elif acceptance_rate <= .2:\n jump_std *= .9019\n # print(\"j : %1.4f, AR: %1.4f\" % (jump_std, acceptance_rate))\n accepted = 0.0\n acceptance_rate = 0.0\n aiters = 0.0\n\n\n iters += 1.0\n aiters += 1.0\n\n if num_samples == 1:\n return samples[0]\n else:\n return samples", "def add_gaussian_noise(path, sigma):\n \n data = np.loadtxt(path + '/output_data.txt').view(complex)\n noise = np.random.normal(0,sigma,data.size)\n noise = np.reshape(noise, data.shape)\n noised_data = data + noise\n np.savetxt(path + '/noised_data.txt', noised_data.view(float))\n return noised_data", "def myMedianFilter(filePath, kernel_size=(3,3,3)):\n\tprint('myMedianFilter()')\n\n\tprint(' filePath:', filePath)\n\t\n\t#\n\t# load\n\tprint(' loading')\n\tmyStack = tifffile.imread(filePath) # could use scipy but directly using tifffile instead\n\t\n\t#\n\t# median filter\n\tprint(' median filter ... please wait')\n\tmedianFilteredStack = scipy.signal.medfilt(myStack, kernel_size=kernel_size)\n\t\n\t# median filter will usually return a np.float64 stack (which is huge)\n\t# convert to a more manageable size, keep in mind that with this step you can lose data ...\n\t# option 1: convert to np.float16\n\t#medianFilteredStack = medianFilteredStack.astype(np.float16)\n\t# option 2: if Fiji stitching does not like float16 then use this instead \n\tmedianFilteredStack = medianFilteredStack.astype(np.uint8)\n\t\n\t#\n\t# save\n\t\n\t# take apart filePath to make _median.tif save path\n\ttmpFolderPath, tmpFileName = os.path.split(filePath)\n\t# make a new 'analysis' folder\n\tsaveFolderPath = os.path.join(tmpFolderPath, 'analysis2')\n\tif not os.path.isdir(saveFolderPath):\n\t\tos.mkdir(saveFolderPath)\n\t# build a save file path (with full filename)\n\ttmpFileNameNoExtension, tmpExtension = tmpFileName.split('.')\n\tsaveFileName = tmpFileNameNoExtension + '_median.tif'\n\tsaveFilePath = os.path.join(saveFolderPath, saveFileName)\n\t\n\t# do the save\n\tif os.path.isfile(saveFilePath):\n\t\tprint(' not saving, already exists:', saveFilePath)\n\telse:\n\t\tprint(' saving saveFilePath:', saveFilePath)\n\t\ttifffile.imsave(saveFilePath, medianFilteredStack)\n\t\n\treturn medianFilteredStack", "def test_parse_electrondensity():\n # Parse\n envisionpy.hdf5parser.charge(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n envisionpy.hdf5parser.unitcell(PATH_TO_HDF5, PATH_TO_VASP_CALC)\n\n # Test if the generated HDF5-file contains correct information\n\n if os.path.isfile(PATH_TO_HDF5):\n with h5py.File(PATH_TO_HDF5, 'r') as h5:\n assert '/CHG' in h5\n assert '/UnitCell' in h5\n assert '/basis' in h5\n assert '/scaling_factor' in h5\n # cleanup\n os.remove(PATH_TO_HDF5)", "def python_script(filename, root, subfield_min, subfield_max, experiment, obs_type, shear_type,\n gal_dir, ps_dir, seed, n_config_per_branch, preload, my_step, public_dir='public'):\n import os\n import numpy as np\n if my_step == 1:\n with open(filename, \"w\") as f:\n f.write(\"import sys\\n\")\n f.write(\"import shutil\\n\")\n f.write(\"sys.path.append('/home/rmandelb/git/great3-private')\\n\")\n f.write(\"import great3sims\\n\")\n command_str = \"great3sims.run('\" + root + \"', subfield_min=\" + str(subfield_min) + \\\n \", subfield_max=\" + str(subfield_max) + \", experiments=['\" + experiment + \\\n \"'], obs_type=['\" + obs_type + \"'], shear_type=['\" + shear_type + \\\n \"'], gal_dir='\" + gal_dir + \"', ps_dir='\" + ps_dir + \"', seed=\" + str(seed) + \\\n \", public_dir='\" + os.path.join(root, public_dir) + \\\n \"', truth_dir='\" + os.path.join(root, 'truth') + \\\n \"', steps = ['metaparameters', 'catalogs'], preload=\" + str(preload) + \")\\n\"\n f.write(command_str)\n\n e = experiment[0]\n o = obs_type[0]\n s = shear_type[0]\n dir = os.path.join(root, experiment, obs_type, shear_type)\n config_pref = e + o + s\n psf_config_pref = e + o + s + '_psf'\n star_test_config_pref = e + o + s + '_star_test'\n\n new_config_names = []\n new_psf_config_names = []\n new_star_test_config_names = []\n x = np.arange(n_config_per_branch).astype(float)\n x1 = x+1\n first_arr = (np.round(subfield_min + (subfield_max-subfield_min+1)*x/n_config_per_branch)).astype(int)\n last_arr = (np.round(subfield_min + (subfield_max-subfield_min+1)*x1/n_config_per_branch)-1).astype(int)\n for i in range(n_config_per_branch):\n first = first_arr[i]\n last = last_arr[i]\n # Could put nproc setting here. However, for now we just want to use as many \n # processors as we have on that node.\n nproc = -1\n command_str = \"great3sims.run('\" + root + \"', subfield_min=\" + str(first) + \\\n \", subfield_max=\" + str(last) + \", experiments=['\" + experiment + \\\n \"'], obs_type=['\" + obs_type + \"'], shear_type=['\" + shear_type + \\\n \"'], gal_dir='\" + gal_dir + \"', ps_dir='\" + ps_dir + \"', seed=\" + str(seed) + \\\n \", public_dir='\" + os.path.join(root, public_dir) + \\\n \"', truth_dir='\" + os.path.join(root, 'truth') + \\\n \"', steps = ['config'], nproc=\" + str(nproc) + \", preload=\" + str(preload) + \\\n \")\\n\"\n f.write(command_str)\n new_name = '%s_%02d.yaml'%(config_pref,i)\n new_psf_name = '%s_%02d.yaml'%(psf_config_pref,i)\n new_config_names.append(new_name)\n new_psf_config_names.append(new_psf_name)\n f.write(\"shutil.move('\"+os.path.join(root,config_pref+'.yaml')+\"', '\"+\n os.path.join(root,new_name)+\"')\\n\")\n f.write(\"shutil.move('\"+os.path.join(root,psf_config_pref+'.yaml')+\"', '\"+\n os.path.join(root,new_psf_name)+\"')\\n\")\n\n # But don't make multiple star test configs, just one. We have to rerun great3sims.run\n # for this to get the config file for everything. It will also remake config files for\n # galaxy and starfield images, but since we won't add their names to our list of config\n # files to run, they just get ignored, and it's all good.\n command_str = \"great3sims.run('\" + root + \"', subfield_min=\" + str(subfield_min) + \\\n \", subfield_max=\" + str(subfield_max) + \", experiments=['\" + experiment + \\\n \"'], obs_type=['\" + obs_type + \"'], shear_type=['\" + shear_type + \\\n \"'], gal_dir='\" + gal_dir + \"', ps_dir='\" + ps_dir + \"', seed=\" + str(seed) + \\\n \", public_dir='\" + os.path.join(root, public_dir) + \\\n \"', truth_dir='\" + os.path.join(root, 'truth') + \\\n \"', steps = ['config'], preload=\" + str(preload) + \")\\n\"\n f.write(command_str)\n new_star_test_name = '%s.yaml'%(star_test_config_pref)\n new_star_test_config_names.append(new_star_test_name)\n\n return new_config_names, new_psf_config_names, new_star_test_config_names\n\n elif my_step == 3:\n with open(filename, \"w\") as f:\n f.write(\"import sys\\n\")\n f.write(\"sys.path.append('/home/rmandelb/git/great3-private')\\n\")\n f.write(\"import great3sims\\n\")\n command_str = \"great3sims.run('\" + root + \"', subfield_min=\" + str(subfield_min) + \\\n \", subfield_max=\" + str(subfield_max) + \", experiments=['\" + experiment + \\\n \"'], obs_type=['\" + obs_type + \"'], shear_type=['\" + shear_type + \\\n \"'], gal_dir='\" + gal_dir + \"', ps_dir='\" + ps_dir + \"', seed=\" + str(seed) + \\\n \", public_dir='\" + os.path.join(root, public_dir) + \\\n \"', truth_dir='\" + os.path.join(root, 'truth') + \\\n \"', steps = ['star_params', 'packages'], preload=\" + str(preload) + \")\\n\"\n f.write(command_str)\n else:\n raise NotImplementedError", "def generate_thermo_dict(file_path, smiles, temp):\n low_t, mid_t, high_t, thermo_params_dict = parse_chemkin_thermo(file_path, smiles)\n if temp < mid_t:\n i = 0\n else:\n i = 7\n\n species_names = []\n thermo_list = []\n for species, params in thermo_params_dict.items():\n # print(species)\n species_names.append(species)\n specific_heat = (params[i + 0] + params[i + 1] * temp +\n params[i + 2] * temp ** 2 +\n params[i + 3] * temp ** 3 +\n params[i + 4] * temp ** 4) * GAS_CONST\n enthalpy = (params[i + 0] + params[i + 1] * temp / 2 +\n params[i + 2] * temp ** 2 / 3 +\n params[i + 3] * temp ** 3 / 4 +\n params[i + 4] * temp ** 4 / 5 +\n params[i + 5] / temp) * temp * GAS_CONST\n entropy = (params[i + 0] * np.log(temp) + params[i + 1] * temp +\n params[i + 2] * temp ** 2 / 2 +\n params[i + 3] * temp ** 3 / 3 +\n params[i + 4] * temp ** 4 / 4 + params[i + 6]) * GAS_CONST\n free_energy = enthalpy - entropy * temp\n list_entries = [specific_heat, enthalpy, entropy, free_energy]\n thermo_list.append(list_entries)\n # print(species_names)\n dict_thermo_values = dict(zip(species_names, thermo_list))\n return dict_thermo_values", "def get_rhk_std_pdf(log_rhk, bw=0.07, subset=\"all\", key_x=\"log_rhk_med\", key_y=\"log_sig_r5\", filepath=None, show_plot=True, save_plot=False, savepath=\"rhk_std_kde.pdf\"):\n if not filepath:\n filepath = os.path.join(os.path.dirname(__file__), \"data.csv\")\n\n if log_rhk < -5.5 or log_rhk > -3.6:\n print(\"*** ERROR: log_rhk outside data boundaries [-5.5, -3.6]\")\n return np.nan, np.nan\n\n df = pd.read_csv(filepath, index_col=0)\n\n if subset == 'all':\n pass\n elif subset == 'MS':\n df = df[df.lum_class == 'V']\n elif subset == 'dF':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('F')]\n elif subset == 'dG':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('G')]\n elif subset == 'dK':\n df = df[df.lum_class == 'V']\n df = df[df.sptype.str.contains('K')]\n else:\n print(\"*** ERROR: subset must be either 'all', 'MS', 'dF', 'dG', or 'dK'.\")\n return np.nan, np.nan\n\n x = df[key_x].values\n y = df[key_y].values\n\n X, Y, Z = _kde2d_sklearn(x, y, thresh=1e-100, bw=bw, xlim=[-5.5, -3.6], ylim=[-3, 0.5], xbins=400j, ybins=400j)\n \n idx = _find_nearest(X, log_rhk)\n\n step = (max(Y[idx]) - min(Y[idx])) / (Y[idx].size - 1)\n probi = Z[idx]/Z[idx].max()\n probi /= sum(probi)\n probi /= step\n\n plt.figure(figsize=(5, 3.6*1.5))\n\n plt.subplot(211)\n _plot_rhk_kde2d(x, y, bw, xlabel=r\"$\\log~R'_\\mathrm{HK}$ [dex]\", ylabel = r\"$\\log~\\sigma~(R_5)$ [dex]\", show_points=True, show_plot=False)\n plt.axvline(log_rhk, color='w')\n\n plt.subplot(212)\n ax = plt.gca()\n ax.plot(Y[idx], probi, 'k-')\n\n ax.set_ylabel(\"Probability density\", fontsize=12)\n ax.set_xlabel(r\"$\\log~\\sigma~(R_5)$ [dex]\", fontsize=12)\n\n plt.legend(frameon=False, fontsize=8)\n plt.tight_layout()\n\n if save_plot:\n plt.savefig(savepath)\n\n if show_plot:\n plt.show()\n plt.close()\n\n return Y[idx], probi", "def simulation():\r\n # Temperature values in Kelvin\r\n T_extra = list(np.linspace(2.2, 2.5, 11))\r\n T_values = list(np.linspace(2, 2.8, 31))\r\n T_values.extend(T_extra)\r\n T_values.sort()\r\n # Dimension values\r\n L_values = [12, 24, 32]\r\n # Independent sampling\r\n thermalisation_sweeps = 5000\r\n sample_every = 50\r\n\r\n with open('domains.txt', 'w') as file:\r\n\r\n for L in L_values:\r\n\r\n N = L**2\r\n s = get_spin_list(hot_start, N)\r\n neighbours_dictionary = get_neighbours_index(N)\r\n\r\n for T in T_values:\r\n\r\n b = 1/T # Constant: 1 / temperature\r\n cluster_size_list = []\r\n start = time.process_time()\r\n\r\n for i in range(nsweeps):\r\n metropolis(N, s, neighbours_dictionary, b)\r\n if i < thermalisation_sweeps:\r\n continue\r\n elif i % sample_every == 0:\r\n cluster_size_list.append(get_average_cluster_size(s, neighbours_dictionary))\r\n\r\n cluster_std = np.std(cluster_size_list)\r\n cluster_avg = np.average(cluster_size_list)\r\n\r\n file.write(f'{L},{T},{cluster_avg},{cluster_std}\\n')\r\n time_for_sample = time.process_time() - start\r\n print(f'L = {L}, T = {T:.2f}, Cluster size = {cluster_avg:.2f}, Std = {cluster_std:.2f} --> Time for sample = {time_for_sample:.2f} seconds')", "def get_hd(self, variate_GARCH_parameters=True):\n self.filename = \"T-{}_{}_Ksim.csv\".format(self.tau_day, self.date)\n if os.path.exists(os.path.join(self.garch_data_folder, self.filename)) and (\n self.overwrite_simulations == False\n ):\n logging.info(f\"-------------- use existing Simulations {self.filename}\")\n self.GARCH._load() # load GARCH Model (parameters, z_dens)\n pass\n else:\n logging.info(\"-------------- create new Simulations\")\n simulated_log_returns, simulated_tau_mu = self.GARCH.simulate_paths(\n self.tau_day, self.simulations, variate_GARCH_parameters\n )\n self.ST = self._calculate_path(simulated_log_returns, simulated_tau_mu)\n pd.Series(self.ST).to_csv(os.path.join(self.garch_data_folder, self.filename), index=False)\n\n self.ST = pd.read_csv(os.path.join(self.garch_data_folder, self.filename))\n M = np.linspace((1 - self.cutoff), (1 + self.cutoff), 100)\n simulated_paths_in_moneyness = np.array(self.S0 / self.ST)\n self.q_M = {\"x\": M, \"y\": density_estimation(simulated_paths_in_moneyness, M, h=self.h)}", "def psd_13(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-4, -1, -2, 1e-3]], gaussNoise=1,\n gaps=[3372, 50], sameGap=True)\n \n fql = np.logspace(np.log10(0.5/(lc[0,0,-1]-lc[0,0,0])), np.log10(0.5*dt), 6)\n\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '13')", "def sim1D(**kwargs):\n import matplotlib.pyplot as plt\n from matplotlib import rc\n import numpy as np\n import os\n import progressbar as pb\n\n #Settings to make the plots appropriate for inclusion in TeX generated publications\n rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n rc('text',usetex=True)\n FONTSIZE = 10\n FIGSIZE = (3.5,3.5)\n FIGDIM = ([0.15,0.1,0.8,0.85])\n\n#Proportional control coefficient\n if 'Kp' in kwargs:\n Kp = kwargs['Kp']\n else:\n Kp = .04\n\n#number of time samples\n if 'mtas' in kwargs:\n moving_time_average_samples = mtas\n else:\n moving_time_average_samples = 15\n\n#surface flux\n if 'qs_nom' in kwargs:\n qs_nom = kwargs['qs_nom']\n else:\n qs_nom = 600. #500. #585. #W\n\n#material properties\n if 'k_type' in kwargs:\n m=nylon12(kwargs['k_type']) #instantiate m - material\n if 'const' in kwargs['k_type']:\n if 'k' in kwargs:\n m.k_const = kwargs['k']\n print 'k found\\n'\n else:\n m = nylon12('linear')\n print 'using default linear thermal conductivity.\\n'\n \n#specific heat\n if 'c_type' in kwargs:\n m.c_type = kwargs['c_type']\n if 'const' in kwargs['c_type']:\n if 'c' in kwargs:\n m.c_const = kwargs['c']\n print 'constant c found'\n else:\n print 'using default linear specific heat'\n \n#density\n if 'rho' in kwargs:\n m.rho = kwargs['rho']\n\n#spatial domain\n if 'xmax' in kwargs:\n xmax = kwargs['xmax']\n else:\n xmax = 0.02 #[m] depth of powder to consider\n if 'dx' in kwargs:\n dx = kwargs['dx']\n else:\n dx = 1.016e-4\n if 'x' in kwargs:\n x = np.asarray(kwargs['x'])\n else:\n x = np.arange(0,xmax,dx)\n\n#Temperatures\n if 'T_initial' in kwargs:\n T_initial = kwargs['T_initial']\n else:\n T_initial = 300\n \n if 'T_offset' in kwargs:\n T_offset = kwargs['T_offset']\n else:\n T_offset = 3\n \n if 'T_set' in kwargs:\n T_set = kwargs['T_set']\n else:\n T_set = 470\n\n#time domain\n if 'time' in kwargs: #set up time variable\n time = kwargs['time']\n dt = time[1] - time[0]\n if 'data' in kwargs:\n data = kwargs['data']\n Compare = True\n else:\n Compare = False\n else: #use default\n dt = dx**2/(5*m.alpha(T_set)) #stability criterion Fo<=1/2\n if 'tmax' in kwargs:\n tmax = float(kwargs['tmax'])\n else:\n tmax = 100.\n time = np.arange(0.,tmax+dt,dt)\n Compare = False\n tmax = max(time)\n num_time_steps = len(time)\n\n#initialize the working variables\n T = np.ones((num_time_steps,len(x)))*T_initial\n qs = np.zeros(num_time_steps)\n err = np.zeros(num_time_steps)\n u = np.zeros(num_time_steps)\n\n#loop through the time and space domains\n inf = len(x)-1\n print \"Solving ...\\n\"\n pbar=pb.ProgressBar().start()\n for i in range(1,num_time_steps): #time step\n dt = time[i] - time[i-1]\n #constant flux boundary condition\n err[i] = T_set + T_offset - np.mean(T[range(max(0,i-moving_time_average_samples),i),0])\n u[i] = err[i] * Kp\n qs[i] = max(min(1.,u[i]) * qs_nom,-10)\n T[i,0] = 2*Fo_T(m.alpha(T[i-1,0]),dt,dx)*(T[i-1,1] + qs[i]*dx/m.k(T[i-1,1])) + (1 - 2*Fo_T(m.alpha(T[i-1,0]),dt,dx)) * T[i-1,0]\n\n #adiabatic far wall boundary condition\n T[i,inf] = 2*Fo_T(m.alpha(T[i-1,inf-1]),dt,dx) * T[i-1,inf-1] + (1 - 2*Fo_T(m.alpha(T[i-1,inf]),dt,dx)) * T[i-1,inf]\n\n #internal nodes heat equation\n for j in range(1,len(x)-1):\n T[i,j] = Fo_T(m.alpha(T[i-1,j]),dt,dx) * (T[i-1,j-1] + T[i-1,j+1]) + (1 - 2*Fo_T(m.alpha(T[i-1,j]),dt,dx)) * T[i-1,j]\n pbar.update(100.*float(i)/float(num_time_steps))\n pbar.finish()\n\n#plot the results\n print \"Plotting ...\\n\"\n fig = plt.figure(1,figsize=FIGSIZE)\n ax = fig.add_axes(FIGDIM)\n plotlabel = 'dx=%1.2e, Fo=%1.2e' %(dx,Fo_T(m.alpha(T_set),dt,dx))\n line = ax.plot(time,T[:,0],label=plotlabel)\n if(Compare):\n line2 = ax.plot(time,data,label='Reference')\n xtext = ax.set_xlabel('Time (s)',fontsize=FONTSIZE,family='sans-serif')\n ytext = ax.set_ylabel('Surface Temperature (K)',fontsize=FONTSIZE,family='sans-serif')\n for label in ax.get_xticklabels():\n label.set_family('sans-serif')\n\n if 'filename' in kwargs:\n filename = kwargs['filename']\n else:\n filename = 'last_sim'\n\n np.savez(filename,T=T,time=time,qs=qs)\n\n figfilename = filename+'.pdf'\n plt.savefig(figfilename,format='pdf')\n\n comment_info = \"qs_nom = %.0f\\nT_set = %1.1f\\nKp = %1.3f\\nT_initial = %1.3f\\nT_set = %1.1f\\nT_offset = %1.1f\\ndx = %1.3e\\ndt=%1.3e\" % (qs_nom,\n T_set,\n Kp,\n T_initial,\n T_set,\n T_offset,\n dx,\n dt)\n \n os.system(gen_add_comment_script(figfilename,comment_info))\n try:\n rmse = np.sqrt( np.mean( (T[:,0]-data)**2 ) )\n return rmse\n except:\n return -1.", "def image_simulation(path1,path2,S, N, file_name, NCHROMS, threshold, apply_threshold,sort,maj_min,img_columns,img_rows,coef_label_dic):\n\tglobal once\n\tglobal nsl\n\tsimulation_error = []\n\tstatistics_list = []\n\tdim = []\n\n\t##################################################\n\t#############OPENING THE SIMULATION FILES#########\n\t##################################################\n\n\t#Suffix of g_zip files (Compressed)\n\tgzip_suffix = \".gz\"\n\t#Suffix of txt files (Uncompressed)\n\ttxt_suffix = \".txt\"\n\t#we import and open the file\n\tif file_name.endswith(gzip_suffix):\n\t\twith gzip.open(path1 + file_name, 'rb') as f:\n\t\t\tfile = f.read()\n\t\tif type(file) == str:\n\t\t\t#gzip files might need to be processed to be in correct format\n\t\t\tfile = file.splitlines(True)\n\telif file_name.endswith(txt_suffix):\n\t\tfile = open(path1 + file_name).readlines()\n\n\t##################################################\n\t##########INDEXING THE FILES BY INTERATION########\n\t##################################################\n\n\t#we look for the caracter // inside the file\n\tfind = []\n\tfor i, string in enumerate(file):\n\t\tif string == '//\\n':\n\t\t\tfind.append(i+3)\n\n\t##################################################\n\t###GENERATE ONE IMAGE PER SIMULATION ITERATION####\n\t##################################################\n\n\tfor ITER, pointer in enumerate(find):\n\t\ttry:\n\t\t\t###########################\n\t\t\t####CREATE CHROM MATRIX####\n\t\t\t###########################\n\n\t\t\tn_columns = len(list(file[pointer]))-1\n\t\t\tcroms = np.zeros((NCHROMS,n_columns),dtype=int)\n\t\t\tfor j in range(NCHROMS):\n\t\t\t\tf = list(file[pointer + j])\n\t\t\t\tdel f[-1]\n\t\t\t\tposition_it = file[pointer - 1].split()\n\t\t\t\tdel position_it[0]\n\t\t\t\tposition_it = np.array(position_it, dtype='float')\n\t\t\t\tposition_it = position_it*N\n\t\t\t\tF = np.array(f,dtype=int)\n\t\t\t\tif j == 0:\n\t\t\t\t\tcrom_array = F\n\t\t\t\telse:\n\t\t\t\t\tcrom_array = np.vstack((crom_array,F))\n\t\t\t\tcroms[j,:]=F\n\t\t\tn_pos = np.size(croms,1)\n\n\t\t\t###########################\n\t\t\t#####APPLY THRESHOLD#######\n\t\t\t###########################\n\n\t\t\tif apply_threshold == True:\n\t\t\t\t#Count the number of derived alleles at each position\n\t\t\t\tcount = croms.sum(axis=0,dtype=float)\n\t\t\t\t#Calculate the frrequency of the drived allele for each position\n\t\t\t\tfreq = count/float(NCHROMS)\n\t\t\t\tfor i in range(n_pos):\n\t\t\t\t\tif freq[i] > 0.5:\n\t\t\t\t\t\tfreq[i] = 1-freq[i]\n\t\t\t\t#freq is now a vector that contains the minor allele frequency for each position\n\t\t\t\t#we delete the positions in which the minor allele frequency is <= threshold\n\t\t\t\tpositions = np.where(freq<=threshold)\n\t\t\t\tcroms,n_pos,freq = delete_simulation(n_pos,croms,freq,positions)\n\n\t\t\t###########################\n\t\t\t###COLOUR BY MAJOR/MINOR###\n\t\t\t###########################\n\n\t\t\tif maj_min == True:\n\t\t\t\t#Calculate the Major and the minor allele for each position of the matrix/array\n\t\t\t\t#Traspose the matrix/array\n\t\t\t\ttransponse_array_croms = np.transpose(croms)\n\t\t\t\t#Record the Major and Minor allele for each allelic position\n\t\t\t\tmaj_allele = []\n\t\t\t\tminor_allele = []\n\t\t\t\tfor i in range(len(transponse_array_croms)):\n\t\t\t\t\tfreq_data = np.unique(transponse_array_croms[i], return_counts = True)\n\t\t\t\t\tindex_max = np.argmax(freq_data[1])\n\t\t\t\t\tif index_max == 0:\n\t\t\t\t\t\tmaj_allele.append(0)\n\t\t\t\t\t\tminor_allele.append(1)\n\t\t\t\t\tif index_max == 1:\n\t\t\t\t\t\tmaj_allele.append(1)\n\t\t\t\t\t\tminor_allele.append(0)\n\n\t\t\t\t#Black and white image:\n\t\t\t\t#Simulation File: 0 = ancestrial, 1 = Derived (White encoded by 1, Black encoded by 0)\n\t\t\t\t#If the major allele is 0, we want to change 0 with 1 and vice verasa (1 = Major, 0 = Minor)\n\t\t\t\t#If the major allele is 1, no changes need to be made as 1 would by default be coded to be white\n\n\t\t\t\tmatrix_maj_min_col = np.ones((n_pos,NCHROMS),dtype=int)\n\t\t\t\tfor row in range(len(transponse_array_croms)):\n\t\t\t\t\tif maj_allele[row] == 1:\n\t\t\t\t\t\tmatrix_maj_min_col[row,:] = transponse_array_croms[row]\n\t\t\t\t\tif maj_allele[row] == 0:\n\t\t\t\t\t\tmatrix_maj_min_col[row,:] = matrix_maj_min_col[row,:] - transponse_array_croms[row]\n\t\t\t\t#Transpose the matrix so that the rows are the NCHROM and the columns are n_pos\n\t\t\t\tcroms = np.transpose(matrix_maj_min_col)\n\n\t\t\tif maj_min == False:\n\t\t\t\t#Black and white image:\n\t\t\t\t#Simulation File: 0 = ancestrial, 1 = Derived (White encoded by 1, Black encoded by 0)\n\t\t\t\t#We want the opposite(ancestrial = white & derived = black) : hence we need to change 0 with 1 and vice versa before producing the image\n\t\t\t\tall1 = np.ones((NCHROMS,n_pos))\n\t\t\t\tcroms = all1 - croms\n\n\t\t\t###########################\n\t\t\t####ORDER ROWS/COLUMNS#####\n\t\t\t###########################\n\n\t\t\tif sort == 2:\n\t\t\t#Sort the matrix by row (chromosome)\n\t\t\t\tcroms = order_data(croms)\n\n\t\t\tif sort == 3:\n\t\t\t#Sort the matrix by column (genetic posistion)\n\t\t\t\tcroms_transpose = croms.transpose()\n\t\t\t\tcroms_transpose = order_data(croms_transpose)\n\t\t\t\tcroms = croms_transpose.transpose()\n\n\t\t\tif sort == 4:\n\t\t\t\t#First: sort the matrix by row (chromosome)\n\t\t\t\tcroms = order_data(croms)\n\t\t\t\t#Second: sort the matrix by column (genetic posistion)\n\t\t\t\tcroms_transpose = croms.transpose()\n\t\t\t\tcroms_transpose = order_data(croms_transpose)\n\t\t\t\tcroms = croms_transpose.transpose()\n\n\t\t\t######################\n\t\t\t###IMAGE GENERATION###\n\t\t\t######################\n\n\t\t\t#Create image from the simulations\n\t\t\tbw_croms_uint8 = np.uint8(croms)\n\t\t\tbw_croms_im = Image.fromarray(bw_croms_uint8*255, mode = 'L')\n\n\t\t\t#####################\n\t\t\t## IMAGE RESIZINNG ##\n\t\t\t#####################\n\n\t\t\tbw_croms_im_resized = bw_croms_im.resize((img_columns,img_rows))\n\t\t\tdim.append(bw_croms_im_resized.size[0])\n\n\t\t\t##############################\n\t\t\t## CONVERSION TO FLAT ARRAY ##\n\t\t\t##############################\n\n\t\t\tarr = np.asarray(bw_croms_im_resized).flatten()\n\t\t\tarr = arr.astype('float32')\n\n\t\t\tim_matrix.append(arr)\n\t\t\tparam_index.append(coef_label_dic[str(int(S))])\n\t\t\titer_index.append([S,ITER])\n\n\t\texcept:\n\t\t\tsimulation_error.append(pointer)\n\t\t\tcontinue\n\n\treturn(simulation_error,dim)\n\n\t\t\t\t\t\t\t\t\t##################################################################################################\n\t\t\t\t\t\t\t\t\t##################################################################################################\n\t\t\t\t\t\t\t\t\t############################################# MAIN ###############################################\n\t\t\t\t\t\t\t\t\t##################################################################################################\n\t\t\t\t\t\t\t\t\t##################################################################################################", "def gaussianSpike_MonteCarloAvg(sampleDict,injectionDict,mMin,sig_eps):\n \n # Sample our hyperparameters\n # bq: Power-law index on the conditional secondary mass distribution p(m2|m1)\n # mu: Mean of the chi-effective distribution\n # logsig_chi: Log10 of the chi-effective distribution's standard deviation\n bq = numpyro.sample(\"bq\",dist.Normal(0,3))\n mu = numpyro.sample(\"mu_chi\",dist.Uniform(-1,1))\n numpyro.factor(\"mu_prior\",-mu**2/(2.*0.4**2)) \n logsig_chi = numpyro.sample(\"logsig_chi\",dist.Uniform(-1.5,0.))\n sig = 10.**logsig_chi\n\n # Also sample the mixture fraction governing the number of events in the zero-spin spike.\n # In order to faciliate more efficient sampling, we explicitly sample logit(zeta) rather than zeta directly.\n # This is then converted to zeta, and an appropriate term added to our log-likelihood to ensure\n # a uniform prior on zeta\n logit_zeta_spike = numpyro.sample(\"logit_zeta_spike\",dist.Normal(0,2))\n zeta_spike = jnp.exp(logit_zeta_spike)/(1.+jnp.exp(logit_zeta_spike)) \n numpyro.deterministic(\"zeta_spike\",zeta_spike)\n zeta_spike_logprior = -0.5*logit_zeta_spike**2/2**2 + jnp.log(1./zeta_spike + 1./(1-zeta_spike))\n numpyro.factor(\"uniform_zeta_spike_prior\",-zeta_spike_logprior)\n \n # Read out found injections\n # Note that `pop_reweight` is the inverse of the draw weights for each event\n Xeff_det = injectionDict['Xeff']\n m1_det = injectionDict['m1']\n m2_det = injectionDict['m2']\n pop_reweight = injectionDict['pop_reweight']\n\n # Form ratio of proposed population weights over draw weights for each found injection\n p_chi_det = (1.-zeta_spike)*truncatedNormal(Xeff_det,mu,sig,-1,1) + zeta_spike*truncatedNormal(Xeff_det,0.,sig_eps,-1,1)\n p_m2_det = (1.+bq)*m2_det**bq/(m1_det**(1.+bq) - mMin**(1.+bq))\n p_m2_det = jnp.where(m2_det<mMin,0.,p_m2_det)\n xi_weights = p_chi_det*p_m2_det*pop_reweight\n \n # As a fit diagnostic, compute effective number of injections\n nEff_inj = jnp.sum(xi_weights)**2/jnp.sum(xi_weights**2)\n nObs = 1.0*len(sampleDict)\n numpyro.deterministic(\"nEff_inj_per_event\",nEff_inj/nObs)\n\n # Compute net detection efficiency and add to log-likelihood\n xi = jnp.sum(xi_weights)\n numpyro.factor(\"xi\",-nObs*jnp.log(xi))\n \n # This function defines the per-event log-likelihood\n # m1_sample: Primary mass posterior samples\n # m2_sample: Secondary mass posterior samples\n # Xeff_sample: Effective spin posterior samples\n # weights: Factors that convert to the desired m1/redshift distribution and divide out the m2 and spin prior\n def logp(m1_sample,m2_sample,Xeff_sample,weights):\n \n # Form total population prior\n p_chi = (1.-zeta_spike)*truncatedNormal(Xeff_sample,mu,sig,-1,1) + zeta_spike*truncatedNormal(Xeff_sample,0.,sig_eps,-1,1)\n p_m2 = (1.+bq)*m2_sample**bq/(m1_sample**(1.+bq) - mMin**(1.+bq))\n p_m2 = jnp.where(m2_sample<mMin,0.,p_m2)\n mc_weights = p_chi*p_m2*weights\n \n # Compute effective number of samples and return log-likelihood\n n_eff = jnp.sum(mc_weights)**2/jnp.sum(mc_weights**2) \n return jnp.log(jnp.mean(mc_weights)),n_eff\n \n # Map the log-likelihood function over each event in our catalog\n log_ps,n_effs = vmap(logp)(\n jnp.array([sampleDict[k]['m1'] for k in sampleDict]),\n jnp.array([sampleDict[k]['m2'] for k in sampleDict]),\n jnp.array([sampleDict[k]['Xeff'] for k in sampleDict]),\n jnp.array([sampleDict[k]['weights_over_priors'] for k in sampleDict]))\n \n # As a diagnostic, save minimum number of effective samples across all events\n numpyro.deterministic('min_log_neff',jnp.min(jnp.log10(n_effs)))\n\n # Tally log-likelihoods across our catalog\n numpyro.factor(\"logp\",jnp.sum(log_ps))", "def gaussianSpike(sampleDict,injectionDict,mMin,sig_eps):\n \n # Sample our hyperparameters\n # bq: Power-law index on the conditional secondary mass distribution p(m2|m1)\n # mu: Mean of the chi-effective distribution\n # logsig_chi: Log10 of the chi-effective distribution's standard deviation\n bq = numpyro.sample(\"bq\",dist.Normal(0,3))\n mu = numpyro.sample(\"mu_chi\",dist.Uniform(-1,1))\n logsig_chi = numpyro.sample(\"logsig_chi\",dist.Uniform(-1.5,0))\n sig = 10.**logsig_chi\n\n # Also sample the mixture fraction governing the number of events in our zero-spin spike.\n # In order to faciliate more efficient sampling, we explicitly sample logit(zeta) rather than zeta directly.\n # This is then converted to zeta, and an appropriate term added to our log-likelihood to ensure\n # a uniform prior on zeta\n logit_zeta_spike = numpyro.sample(\"logit_zeta_spike\",dist.Normal(0,2))\n zeta_spike = jnp.exp(logit_zeta_spike)/(1.+jnp.exp(logit_zeta_spike)) \n numpyro.deterministic(\"zeta_spike\",zeta_spike)\n zeta_spike_logprior = -0.5*logit_zeta_spike**2/2**2 + jnp.log(1./zeta_spike + 1./(1-zeta_spike))\n numpyro.factor(\"uniform_zeta_spike_prior\",-zeta_spike_logprior)\n\n # Read out found injections\n # Note that `pop_reweight` is the inverse of the draw weights for each event\n Xeff_det = injectionDict['Xeff']\n m1_det = injectionDict['m1']\n m2_det = injectionDict['m2']\n pop_reweight = injectionDict['pop_reweight']\n\n # Probability of each injection under the proposed population\n # See discussion of KDE likelihood methods in paper text\n sig_kde = 0.5*jnp.std(Xeff_det)*Xeff_det.size**(-1./5.)\n bulk_denom = jnp.sqrt(2.*jnp.pi*(sig_kde**2+sig**2))*(erf((1.-mu)/jnp.sqrt(2.*sig**2)) + erf((1.+mu)/jnp.sqrt(2.*sig**2)))\n spike_denom = jnp.sqrt(2.*jnp.pi*(sig_kde**2+sig_eps**2))*(erf(1./jnp.sqrt(2.*sig_eps**2)) + erf(1./jnp.sqrt(2.*sig_eps**2)))\n bulk_kde_integrals = (erf((sig_kde**2*(1.+mu)+sig**2*(1.+Xeff_det))/jnp.sqrt(2.*sig_kde**2*sig**2*(sig_kde**2+sig**2)))\\\n - erf((sig_kde**2*(mu-1.)+sig**2*(Xeff_det-1.))/jnp.sqrt(2.*sig_kde**2*sig**2*(sig_kde**2+sig**2))))\\\n *jnp.exp(-(Xeff_det-mu)**2/(2.*(sig_kde**2+sig**2)))/bulk_denom\n spike_kde_integrals = (erf((sig_kde**2+sig_eps**2*(1.+Xeff_det))/jnp.sqrt(2.*sig_kde**2*sig_eps**2*(sig_kde**2+sig_eps**2)))\\\n - erf((sig_kde**2*(-1.)+sig_eps**2*(Xeff_det-1.))/jnp.sqrt(2.*sig_kde**2*sig_eps**2*(sig_kde**2+sig_eps**2))))\\\n *jnp.exp(-Xeff_det**2/(2.*(sig_kde**2+sig_eps**2)))/spike_denom\n \n # Form ratio of proposed population weights over draw weights for each found injection\n p_chi_det = (1.-zeta_spike)*bulk_kde_integrals + zeta_spike*spike_kde_integrals\n p_m2_det = (1.+bq)*m2_det**bq/(m1_det**(1.+bq) - mMin**(1.+bq))\n p_m2_det = jnp.where(m2_det<mMin,0.,p_m2_det)\n xi_weights = p_chi_det*p_m2_det*pop_reweight\n \n # As a fit diagnostic, compute effective number of injections\n nEff_inj = jnp.sum(xi_weights)**2/jnp.sum(xi_weights**2)\n nObs = 1.0*len(sampleDict)\n numpyro.deterministic(\"nEff_inj_per_event\",nEff_inj/nObs)\n\n # Compute net detection efficiency and add to log-likelihood\n xi = jnp.sum(xi_weights)\n numpyro.factor(\"xi\",-nObs*jnp.log(xi))\n \n # This function defines the per-event log-likelihood\n # m1_sample: Primary mass posterior samples\n # m2_sample: Secondary mass posterior samples\n # Xeff_sample: Effective spin posterior samples\n # weights: Factors that convert to the desired m1/redshift distribution and divide out the m2 and spin prior\n def logp(m1_sample,m2_sample,Xeff_sample,weights):\n \n # KDE likelihood; see paper text\n sig_kde = 0.5*jnp.std(Xeff_sample)*Xeff_sample.size**(-1./5.)\n bulk_denom = jnp.sqrt(2.*jnp.pi*(sig_kde**2+sig**2))*(erf((1.-mu)/jnp.sqrt(2.*sig**2)) + erf((1.+mu)/jnp.sqrt(2.*sig**2)))\n spike_denom = jnp.sqrt(2.*jnp.pi*(sig_kde**2+sig_eps**2))*(erf(1./jnp.sqrt(2.*sig_eps**2)) + erf(1./jnp.sqrt(2.*sig_eps**2)))\n bulk_kde_integrals = (erf((sig_kde**2*(1.+mu)+sig**2*(1.+Xeff_sample))/jnp.sqrt(2.*sig_kde**2*sig**2*(sig_kde**2+sig**2)))\\\n - erf((sig_kde**2*(mu-1.)+sig**2*(Xeff_sample-1.))/jnp.sqrt(2.*sig_kde**2*sig**2*(sig_kde**2+sig**2))))\\\n *jnp.exp(-(Xeff_sample-mu)**2/(2.*(sig_kde**2+sig**2)))/bulk_denom\n spike_kde_integrals = (erf((sig_kde**2+sig_eps**2*(1.+Xeff_sample))/jnp.sqrt(2.*sig_kde**2*sig_eps**2*(sig_kde**2+sig_eps**2)))\\\n - erf((sig_kde**2*(-1.)+sig_eps**2*(Xeff_sample-1.))/jnp.sqrt(2.*sig_kde**2*sig_eps**2*(sig_kde**2+sig_eps**2))))\\\n *jnp.exp(-Xeff_sample**2/(2.*(sig_kde**2+sig_eps**2)))/spike_denom\n \n # Form total population prior\n p_chi = (1.-zeta_spike)*bulk_kde_integrals + zeta_spike*spike_kde_integrals\n p_m2 = (1.+bq)*m2_sample**bq/(m1_sample**(1.+bq) - mMin**(1.+bq))\n p_m2 = jnp.where(m2_sample<mMin,0.,p_m2)\n mc_weights = p_chi*p_m2*weights\n \n # Compute effective number of samples and return log-likelihood\n n_eff = jnp.sum(mc_weights)**2/jnp.sum(mc_weights**2) \n return jnp.log(jnp.mean(mc_weights)),n_eff\n \n # Map the log-likelihood function over each event in our catalog\n log_ps,n_effs = vmap(logp)(\n jnp.array([sampleDict[k]['m1'] for k in sampleDict]),\n jnp.array([sampleDict[k]['m2'] for k in sampleDict]),\n jnp.array([sampleDict[k]['Xeff'] for k in sampleDict]),\n jnp.array([sampleDict[k]['weights_over_priors'] for k in sampleDict]))\n \n # As a diagnostic, save minimum number of effective samples across all events\n numpyro.deterministic('min_log_neff',jnp.min(jnp.log10(n_effs)))\n\n # Tally log-likelihoods across our catalog\n numpyro.factor(\"logp\",jnp.sum(log_ps))", "def open_bsx_file(self, filename):\r\n if filename != '':\r\n f = open(filename, 'r')\r\n file = f.read()\r\n attr = file.split('\"')\r\n for i in range(0, len(attr)):\r\n if attr[i].find(\"Density\") != -1:\r\n self.values[\"rhop\"] = float(attr[i+1])*27.6799\r\n if attr[i].find(\"BallisticA\") != -1:\r\n self.values[\"a\"] = float(attr[i+1])\r\n if attr[i].find(\"BallisticN\") != -1:\r\n self.values[\"n\"] = float(attr[i+1])\r\n if attr[i].find(\"SpecificHeatRatio\") != -1:\r\n self.values[\"k\"] = float(attr[i+1])\r\n if attr[i].find(\"MolarMass\") != -1:\r\n self.values[\"MM\"] = float(attr[i+1])\r\n for i2 in range(0, len(attr)):\r\n if attr[i2].find(\"ISPStar\") != -1:\r\n print(self.values[\"k\"])\r\n print(\"\\n\")\r\n print((2/(self.values[\"k\"]+1))**((self.values[\"k\"] + 1)/(self.values[\"k\"]-1)))\r\n print(\"\\n\")\r\n print(self.values[\"k\"] * (2/(self.values[\"k\"]+1))**((self.values[\"k\"] + 1)/(self.values[\"k\"]-1)))\r\n self.values[\"T\"] = float(attr[i2+1])**2 * 9.81 * self.values[\"k\"] * (2/(self.values[\"k\"]+1))**((self.values[\"k\"] + 1)/(self.values[\"k\"]-1)) / self.R_specific\r\n f.close()\r\n self.engine.update(self.values[\"ri\"], self.values[\"ro\"], self.values[\"l\"], self.values[\"rt\"], self.values[\"re\"])\r\n self.tspan = linspace(0, 2, num=int(self.values[\"tstepnum\"]))\r\n self.R_specific = self.R / self.values[\"MM\"]", "def main(config_path,inject_error):\n try:\n with open(config_path) as handle:\n config = json.load(handle)\n\n #prepare metrics configurations\n misc_config = config.get(\"misc\", {})\n interval_ms = misc_config.get(\"interval_ms\", 500)\n devmode = misc_config.get(\"devmode\", False)\n destination = misc_config.get(\"destination\", \"file\")\n\n #prepare assets\n asset_0 = config.get(\"asset_0\",{})\n asset_1 = config.get(\"asset_1\",{})\n\n #Start simulation\n generate(config, asset_0, asset_1, interval_ms, inject_error, devmode, destination)\n\n except IOError as error:\n print(\"Error opening config file '%s'\" % config_path, error)", "def test_gamegen_noise_gaussian(game_file):\n with stderr() as err, stdout() as out:\n assert run(\n \"gen\", \"noise\", \"-d\", \"gaussian\", \"-w\", \"1.5\", \"-s\", \"5\", \"-i\", game_file\n ), err.getvalue()\n gamereader.loads(out.getvalue())", "def psd_11(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.logspace(np.log10(0.5/n), np.log10(0.5*dt), 6)\n\n \n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100, nMult=4,\n input_psd=['broken_powerlaw', [1e-6, -1, -3, 3e-3]])\n lc[:,1] = lc[:,1] - lc[:,1].mean(1)[:,None] + lc[:,1].mean()\n\n fit_log_psd(fql, lc, extra, '11')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Asserts that a trail of edges is a ring in the graph
def assertIsRing(self, graph, edges): for e in edges: self.assertIn( e, graph, f"The edge {e} of the ring does not exist in the graph." ) self.assertGreaterEqual( len(edges), 3, "A ring consists of at least 3 edges." ) print(f"edges in ring: {edges}") for i, (u_i, v_i) in enumerate(edges[:-1]): u_j, v_j = edges[i+1] self.assertTrue( u_i in set([u_j, v_j]) or v_i in set([u_j, v_j]), f"The edges ('{u_i}', '{v_i}') and " f"('{u_j}', '{v_j}') are not connected." ) u_1, v_1 = edges[0] u_k, v_k = edges[-1] self.assertTrue( u_k in set([u_1, v_1]) or v_k in set([u_1, v_1]), "The ring is not closed " f"[({u_1}, {v_1}), ..., ({u_k}, {v_k})]." ) for i, (u_i, v_i) in enumerate(edges[:-1]): for u_j, v_j in edges[i+1:]: self.assertTrue( u_i not in set([u_j, v_j]) or v_i not in set([u_j, v_j]), f"The edges ({u_i}, {v_i}) and " f"({u_j}, {v_i}) are not distinct." )
[ "def test_extended_sanity(self):\n testgraph = nx.Graph([(0,1),(0,2),(0,3),(2,4),(2,5),(3,6),(3,7),(7,8),(6,8)])\n found, thering = ring_extended(testgraph)\n self.assertTrue(found)\n self.is_ring(testgraph, thering)\n # Uncomment to visualize the graph and returned ring:\n #draw_graph(testgraph,thering)", "def test_ring(self):\n eight_node = Graph()\n for i in range(8):\n eight_node.add_node(Node(value=i))\n for i in range(8):\n eight_node.add_edge(i, (i + 1) % 8)\n self.assertEqual(eight_node.neighbors,\n [{1, 7}, {0, 2}, {1, 3}, {2, 4},\n {3, 5}, {4, 6}, {5, 7}, {0, 6}])\n\n self.assertEqual(eight_node.shortest_path(0, 4),\n [0, 7, 6, 5, 4])\n self.assertEqual(eight_node.shortest_path(0, 6),\n [0, 7, 6])\n self.assertTrue(eight_node.is_adjacent(0, 7))\n\n # Look for node with value 6 and value not present\n found_index = eight_node.find_index(6)\n self.assertEqual(found_index, 6)\n found_index = eight_node.find_index(10)\n self.assertIsNone(found_index)\n\n # Make a hole in the ring and check new distance\n eight_node.remove_node(7)\n self.assertTrue(eight_node.is_adjacent(0, 1))\n self.assertFalse(eight_node.is_adjacent(0, 6))\n self.assertEqual(eight_node.shortest_path(0, 6),\n [0, 1, 2, 3, 4, 5, 6])", "def test_ring_perception(self):\n mol = Molecule(smiles='c12ccccc1cccc2')\n mol.identify_ring_membership()\n for atom in mol.atoms:\n if atom.element == 'C':\n self.assertTrue(atom.props['inRing'])\n elif atom.element == 'H':\n self.assertFalse(atom.props['inRing'])", "def test_has_vert_filled_wrong(graph_one):\n assert graph_one.has_vert(\"X\") is False", "def test_edge_balance_for_menger_sponge(mesh):\n diag = mesh.diagnose()\n assert diag.is_edge_balance_broken is False", "def test_Polygon_edges_3():\n a = Polygon(10, 10)\n assert a.edges == 10, \"Number of edges cannot be different\"", "def test_edge_not_match_direction(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"B\")\n self.assertFalse(e1.matches(e2))", "def test_has_vert_filled(graph_one):\n\n assert graph_one.has_vert(\"C\") is True", "def find_roundabouts(network): \n roundabouts = []\n for edge in network.edges.itertuples():\n if shapely.predicates.is_ring(edge.geometry): roundabouts.append(edge)\n return roundabouts", "def test_edge_not_match_type(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"P\",\"T\")\n self.assertFalse(e1.matches(e2))", "def test_isRagged(self):\n assert(self.ragged.isRagged())\n assert(not self.identical.isRagged())\n assert(not self.gaps.isRagged())", "def ray_trace_jones_bottom_up_test():", "def has_edge(self, u, v):", "def test_spacing(shape):\n graph = TriGraph(shape)\n assert_array_almost_equal(graph.length_of_link, 1.0)\n\n graph = TriGraph(shape, spacing=2)\n assert_array_almost_equal(graph.length_of_link, 2.0)", "def test_Polygon_edgeLength_6():\n a = Polygon(3, 3)\n assert a.edgeLength == (2 * 3 * math.sin(math.pi / 3)), \"wrongly calculates edgeLength\"", "def test_vertex_rl(self):\n\n # RL = 0\n vertex_index = 0\n\n length = 2\n delta_x = float(length)/2\n width = 1\n delta_y = float(width)/2\n center = [5,5]\n theta = np.pi/3\n\n box_geometry = BoxGeometry(length, width, center, theta)\n \n vertex = box_geometry.vertex(vertex_index)\n \n expected_vertex_x = center[0] - delta_x * np.cos(theta) + delta_y * np.sin(theta)\n expected_vertex_y = center[1] - delta_x * np.sin(theta) - delta_y * np.cos(theta)\n\n outcome = (abs(expected_vertex_x - vertex[0]) < 1e-6) and\\\n (abs(expected_vertex_y - vertex[1]) < 1e-6)\n\n self.assertTrue(outcome)", "def test_edge_match(self):\n e1 = ed.Edge(\"O\",\"B\")\n e2 = ed.Edge(\"O\",\"T\")\n self.assertTrue(e1.matches(e2))", "def test_large_graph_true():\n from route_between_nodes import route_between_nodes\n G = {'a': ['b', 'c'], 'c': ['d', 'e'], 'e': ['f', 'g', 'h'], 'h': ['z']}\n assert route_between_nodes(G, 'a', 'z')", "def test_vertex_edge_count1(self):\n sum_of_the_degrees = sum( [ len( list( self.G[v] ) ) for v in self.G ] )\n number_of_edges = len( self.G.edges() )\n assert sum_of_the_degrees == number_of_edges * 2, \"sum of degrees: %i, num of edges: %i does not satisfy relationship\" % ( sum_of_the_degrees, number_of_edges )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
TensorFlow has its own wrapper for shapes because some entries could be None. This function turns them into intlists. None will become a 1. Arguments
def tensorshape_to_intlist(tensorshape): return list(map(lambda j: 1 if j is None else int(j), tensorshape))
[ "def _to_shape(shape):\n return tuple(int(sh) for sh in shape)", "def _shapes(x):\n def shape(x):\n try:\n return tuple([int(i) for i in x.shape])\n except Exception: # pylint: disable=broad-except\n return ()\n return tuple(nested_map(shape, x))", "def normalize_shape(shape):\n\n if shape is None:\n raise TypeError('shape is None')\n\n # handle 1D convenience form\n if isinstance(shape, integer_types):\n shape = (int(shape),)\n\n # normalize\n shape = tuple(int(s) for s in shape)\n return shape", "def canonical_weight_shapes(self):\n if not self._input_size:\n raise RuntimeError(\n \"%s.canonical_weight_shapes invoked before input shape is known\" %\n type(self).__name__)\n\n shapes = []\n for i in range(self._num_layers):\n shapes.extend(self._canonical_weight_shape(i))\n return shapes", "def get_shape(\n value: Union[types.FloatTensor, types.IntTensor]) -> types.IntTensor:\n result = value.shape\n return tf.shape(value) if None in result.as_list() else result", "def array_shapes(self):\n return None", "def map_shape(x: Dict[Text, tf.Tensor]) -> Dict[Text, Sequence[int]]:\n return tf.nest.map_structure(lambda t: list(tf.shape(t).numpy()), x)", "def resolve_shape(\n tensor: tf.Tensor,\n resolve_batch_size: bool = True) -> List[Union[tf.Tensor, int]]:\n with tf.name_scope('resolve_shape'):\n shape = tensor.get_shape().as_list()\n if None in shape:\n shape_dynamic = tf.shape(tensor)\n if shape[0] is None:\n shape[0] = shape_dynamic[0] if resolve_batch_size else -1\n for i in range(1, len(shape)):\n if shape[i] is None:\n shape[i] = shape_dynamic[i]\n return shape", "def input_shape(self):\n if context.in_eager_mode():\n raise RuntimeError('Layer.input_shape not supported in Eager mode.')\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set(\n [str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n input_shapes = self._inbound_nodes[0].input_shapes\n if len(input_shapes) == 1:\n return tuple(tensor_shape.TensorShape(input_shapes[0]).as_list())\n else:\n return [\n tuple(tensor_shape.TensorShape(shape).as_list())\n for shape in input_shapes\n ]\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')", "def _infer_raw_shape(tt_cores):\n num_dims = len(tt_cores)\n num_tensor_shapes = len(tt_cores[0].shape) - 2\n raw_shape = [[] for _ in range(num_tensor_shapes)]\n for dim in range(num_dims):\n curr_core_shape = tt_cores[dim].shape \n for i in range(num_tensor_shapes):\n raw_shape[i].append(curr_core_shape[i+1])\n for i in range(num_tensor_shapes):\n raw_shape[i] = list(raw_shape[i])\n\n return tuple(raw_shape)", "def shape(x):\n\treturn tf.shape(x)", "def _shape_to_list(shape):\r\n if isinstance(shape, (list, tuple)):\r\n return shape\r\n tmp = []\r\n if shape == \"\":\r\n return ()\r\n for i in shape:\r\n tmp.append(i.value)\r\n return tmp", "def infer_shape(shape, axis):\n def squeeze_axis(shape, axis):\n if not axis:\n out_shape = [d for d in shape if d != 1]\n else:\n ndim = len(shape)\n out_shape = [shape[i] for i in range(ndim) if not (i in axis or (i - ndim) in axis)]\n if not out_shape:\n out_shape = [1]\n return out_shape\n if isinstance(shape, (list, tuple)):\n if isinstance(axis, int):\n axis = [axis]\n if isinstance(axis, (list, tuple)):\n return squeeze_axis(shape, axis)\n raise ValueError(\"Invalid axis for Squeeze.\")", "def arrays_shape(*arrays):\n for array in arrays:\n if array is not None:\n shape = array.shape\n return shape", "def infer_shape(layers, input_shape, key=None):\n input_shape = [x for x in input_shape]\n if input_shape[0] is None:\n input_shape[0] = 4 # should be more than 1, otherwise batch norm will not work\n x = torch.tensor(np.random.normal(size=input_shape), dtype=torch.float, device='cpu')\n for layer in layers:\n x = layer(x)\n if key is not None:\n x = x[key]\n output_shape = list(x.shape)\n output_shape[0] = None\n return output_shape", "def textboxes_feat_shapes_from_net(predictions, default_shapes=None):\r\n\tfeat_shapes = []\r\n\tfor l in predictions:\r\n\t\tprint(l)\r\n\t\tshape = l.get_shape().as_list()[1:3]\r\n\t\t# shape = tuple(l[1:3])\r\n\r\n\t\tif None in shape:\r\n\t\t\treturn default_shapes\r\n\t\telse:\r\n\t\t\tfeat_shapes.append(shape)\r\n\treturn feat_shapes", "def get_shapes(model: keras.Model) -> List[Tuple[int]]:\n model_weights = model.get_weights()\n shapes = [x.shape for x in model_weights]\n return shapes", "def list_shape(shape, elem=None):\n\n if (len(shape) == 0):\n return []\n\n def helper(elem, shape, i):\n if len(shape) - 1 == i:\n return [elem] * shape[i]\n return [ helper(elem, shape, i+1) for _ in range(shape[i]) ]\n\n return helper(elem, shape, 0)", "def shape_from_args(self):\r\n return u.Shape(self.rows, self.cols)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This constructor takes a reference to a TensorFlow Operation or Tensor or Keras model and then applies the two TensorFlow functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes to cleanse the graph of any nodes that are linked to training. This leaves us with the nodes you need for inference. In the resulting graph there should only be tf.Operations left that have one of the following types [Const, MatMul, Add, BiasAdd, Conv2D, Reshape, MaxPool, AveragePool, Placeholder, Relu, Sigmoid, Tanh, LeakyRelu] If the input should be a Keras model we will ignore operations with type Pack, Shape, StridedSlice, and Prod such that the Flatten layer can be used. Arguments
def __init__(self, model, session = None): output_names = None if issubclass(model.__class__, tf.Tensor): output_names = [model.op.name] elif issubclass(model.__class__, tf.Operation): output_names = [model.name] elif issubclass(model.__class__, Sequential): session = tf.keras.backend.get_session() output_names = [model.layers[-1].output.op.inputs[0].op.name] model = model.layers[-1].output.op elif issubclass(model.__class__, onnx.ModelProto): assert 0, 'not tensorflow model' else: import keras if issubclass(model.__class__, keras.engine.sequential.Sequential): session = keras.backend.get_session() output_names = [model.layers[-1].output.op.inputs[0].op.name] model = model.layers[-1].output.op else: assert 0, "ERAN can't recognize this input" if session is None: session = tf.get_default_session() tmp = graph_util.convert_variables_to_constants(session, model.graph.as_graph_def(), output_names) self.graph_def = graph_util.remove_training_nodes(tmp)
[ "def translate(self):\n\t\toperation_types = []\n\t\toperation_resources = []\n\t\treshape_map = {}\n\t\toperations_to_be_ignored = [\"Reshape\", \"Pack\", \"Shape\", \"StridedSlice\", \"Prod\", \"ConcatV2\"]\n\t\toperations_to_be_ignored_without_reshape = [\"NoOp\", \"Assign\", \"Const\", \"RestoreV2\", \"SaveV2\", \"IsVariableInitialized\", \"Identity\"]\n\t\t\n\t\twith tf.Graph().as_default() as graph:\n\t\t\twith tf.Session() as sess:\n\t\t\t\tself.sess = sess\n\t\t\t\ttf.import_graph_def(self.graph_def)\n\t\t\t\t#print(\"Operations \", graph.get_operations())\n\t\t\t\tfor op in graph.get_operations():\n\t\t\t\t\tif op.type in operations_to_be_ignored_without_reshape:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif op.type in operations_to_be_ignored:\n\t\t\t\t\t\tinput_name = op.inputs[0].name\n\t\t\t\t\t\toutput_name = op.outputs[0].name\n\t\t\t\t\t\tkind = op.inputs[0].op.type\n\t\t\t\t\t\tif kind in operations_to_be_ignored:\n\t\t\t\t\t\t\treshape_map[output_name] = reshape_map[input_name]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treshape_map[output_name] = input_name\n\t\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\t\toperation_types.append(op.type)\n\t\t\t\t\tinput_tensor_names = []\n\t\t\t\t\tfor inp in op.inputs:\n\t\t\t\t\t\tname = inp.name\n\t\t\t\t\t\tkind = inp.op.type\n\t\t\t\t\t\tif kind in operations_to_be_ignored:\n\t\t\t\t\t\t\tname = reshape_map[name]\n\t\t\t\t\t\tif kind == 'Const':\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tinput_tensor_names.append(name)\n\t\t\t\t\tin_out_info = (input_tensor_names, op.outputs[0].name, tensorshape_to_intlist(op.outputs[0].shape))\n\t\t\t\n\t\t\t\t\tif op.type == \"MatMul\":\n\t\t\t\t\t\tdeeppoly_res = self.matmul_resources(op) + in_out_info\n\t\t\t\t\t\tdeepzono_res = deeppoly_res \n\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\telif op.type == \"Add\":\n\t\t\t\t\t\tleft_type = op.inputs[0].op.type\n\t\t\t\t\t\tright_type = op.inputs[1].op.type\n\t\t\t\t\t\tif left_type == 'Const' and right_type == 'Const':\n\t\t\t\t\t\t\tassert 0, \"we don't support the addition of two constants yet\"\n\t\t\t\t\t\telif left_type == 'Const' or right_type == 'Const':\n\t\t\t\t\t\t\tdeeppoly_res = self.add_resources(op) + in_out_info\n\t\t\t\t\t\t\tdeepzono_res = deeppoly_res\n\t\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toperation_types[-1] = \"Resadd\"\n\t\t\t\t\t\t\toperation_resources.append({'deepzono':in_out_info, 'deeppoly':in_out_info})\n\t\t\t\t\telif op.type == \"BiasAdd\":\n\t\t\t\t\t\tif op.inputs[1].op.type == 'Const':\n\t\t\t\t\t\t\tdeeppoly_res = self.add_resources(op) + in_out_info\n\t\t\t\t\t\t\tdeepzono_res = deeppoly_res\n\t\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tassert 0, \"this bias add doesn't meet our assumption (bias is constant)\"\n\t\t\t\t\telif op.type == \"Conv2D\":\n\t\t\t\t\t\tfilters, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right = self.conv2d_resources(op)\n\t\t\t\t\t\tdeeppoly_res = (filters, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info\n\t\t\t\t\t\tdeepzono_res = deeppoly_res \n\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\telif op.type == \"MaxPool\" or op.type == \"AvgPool\":\n\t\t\t\t\t\timage_shape, window_size, strides, pad_top, pad_left, pad_bottom, pad_right = self.pool_resources(op)\n\t\t\t\t\t\tdeeppoly_res = (image_shape, window_size, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info\n\t\t\t\t\t\tdeepzono_res = deeppoly_res\n\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\telif op.type in [\"Placeholder\", \"PlaceholderWithDefault\"]:\n\t\t\t\t\t\tdeeppoly_res = in_out_info\n\t\t\t\t\t\tdeepzono_res = in_out_info\n\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\telif op.type in [\"Relu\", \"Sigmoid\", \"Tanh\", \"Sign\", \"Softmax\", \"LeakyRelu\"]:\n\t\t\t\t\t\tdeeppoly_res = self.nonlinearity_resources(op) + in_out_info\n\t\t\t\t\t\tdeepzono_res = deeppoly_res\n\t\t\t\t\t\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\t#elif op.type == \"ConcatV2\":\n\t\t\t\t\t#\tprint(\"Concatv2\")\n\t\t\t\t\t#\tdeeppoly_res = self.concat_resources(op)\n\t\t\t\t\t#\tdeepzono_res = deeppoly_res + in_out_info\n\t\t\t\t\t#\toperation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res})\n\t\t\t\t\telse:\n\t\t\t\t\t\t#print(\"operation type1 \",in_out_info,op.inputs[0].shape,op.inputs[1].shape)\n\t\t\t\t\t\tassert 0, \"Operations of type \" + op.type + \" in \" + str([o.type for o in graph.get_operations()]) + \" are not yet supported.\"\n\t\t\n\t\t\t\treturn operation_types, operation_resources", "def __init__(self, explained_model, model_builder, masking_operation, loss=categorical_crossentropy,\n model_filepath=None, flatten_for_explained_model=False, num_models=1, downsample_factors=(1,)):\n super(TensorflowCXPlain, self).__init__(explained_model, model_builder, masking_operation, loss,\n downsample_factors, num_models)\n\n self.model_filepath = model_filepath\n self.model = None\n self.prediction_model = None\n self.last_fit_score = None\n self.flatten_for_explained_model = flatten_for_explained_model", "def build_model():\n g = tf.Graph()\n with g.as_default():\n # inputs, labels = imagenet_input(is_training=False)\n inputs = np.random.randint(0, 255, size=(1,224,224,3))\n labels = np.random.randint(0, 10, size=(1,10))\n inputs = tf.Variable(inputs,dtype=tf.float32)\n labels = tf.Variable(labels,dtype=tf.float32)\n scope = mobilenet_v1.mobilenet_v1_arg_scope(\n is_training=False, weight_decay=0.0)\n with slim.arg_scope(scope):\n logits, _ = mobilenet_v1.mobilenet_v1(\n inputs,\n is_training=False,\n depth_multiplier=FLAGS.depth_multiplier,\n num_classes=FLAGS.num_classes)\n\n if FLAGS.quantize:\n tf.contrib.quantize.create_eval_graph()\n\n eval_ops = metrics(logits, labels)\n\n return g, eval_ops", "def prepare(cls, model, device='CPU', **kwargs):\n super(TensorflowBackendBase, cls).prepare(model, device, **kwargs)\n\n predict_net = (cls.onnx_graph_to_tensorflow_net(\n model.graph, opset=model.opset_import[0].version))\n\n return TensorflowRep(predict_net)", "def __call__(self, *args, **kwargs):\n new_node = Tensor()\n new_node.op = self\n return new_node", "def graph_conv_net(batch_size, prior, num_task):\n tg = TensorGraph(use_queue=False)\n if prior == True:\n add_on = num_task\n else:\n add_on = 0\n atom_features = Feature(shape=(None, 75 + 2*add_on))\n circular_features = Feature(shape=(batch_size, 256), dtype=tf.float32)\n\n degree_slice = Feature(shape=(None, 2), dtype=tf.int32)\n membership = Feature(shape=(None,), dtype=tf.int32)\n deg_adjs = []\n for i in range(0, 10 + 1):\n deg_adj = Feature(shape=(None, i + 1), dtype=tf.int32)\n deg_adjs.append(deg_adj)\n\n gc1 = GraphConv(\n 64 + add_on,\n activation_fn=tf.nn.elu,\n in_layers=[atom_features, degree_slice, membership] + deg_adjs)\n batch_norm1 = BatchNorm(in_layers=[gc1])\n gp1 = GraphPool(in_layers=[batch_norm1, degree_slice, membership] + deg_adjs)\n\n\n gc2 = GraphConv(\n 64 + add_on,\n activation_fn=tf.nn.elu,\n in_layers=[gc1, degree_slice, membership] + deg_adjs)\n batch_norm2 = BatchNorm(in_layers=[gc2])\n gp2 = GraphPool(in_layers=[batch_norm2, degree_slice, membership] + deg_adjs)\n\n add = Concat(in_layers = [gp1, gp2])\n add = Dropout(0.5, in_layers =[add])\n dense = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[add])\n batch_norm3 = BatchNorm(in_layers=[dense])\n readout = GraphGather(\n batch_size=batch_size,\n activation_fn= tf.nn.tanh,\n in_layers=[batch_norm3, degree_slice, membership] + deg_adjs)\n batch_norm4 = BatchNorm(in_layers=[readout])\n\n dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features])\n dense1 = BatchNorm(in_layers=[dense1])\n dense1 = Dropout(0.5, in_layers =[dense1])\n dense1 = Dense(out_channels=128, activation_fn=tf.nn.elu, in_layers=[circular_features])\n dense1 = BatchNorm(in_layers=[dense1])\n dense1 = Dropout(0.5, in_layers =[dense1])\n merge_feat = Concat(in_layers = [dense1, batch_norm4])\n merge = Dense(out_channels=256, activation_fn=tf.nn.elu, in_layers=[merge_feat])\n costs = []\n labels = []\n for task in range(num_task):\n classification = Dense(\n out_channels=2, activation_fn=None,in_layers=[merge])\n softmax = SoftMax(in_layers=[classification])\n tg.add_output(softmax)\n label = Label(shape=(None, 2))\n labels.append(label)\n cost = SoftMaxCrossEntropy(in_layers=[label, classification])\n costs.append(cost)\n all_cost = Stack(in_layers=costs, axis=1)\n weights = Weights(shape=(None, num_task))\n loss = WeightedError(in_layers=[all_cost, weights])\n tg.set_loss(loss)\n #if prior == True:\n # return tg, atom_features,circular_features, degree_slice, membership, deg_adjs, labels, weights#, prior_layer\n return tg, atom_features, circular_features ,degree_slice, membership, deg_adjs, labels, weights", "def __build_train_op(self) -> None:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)\n # train only custom variables that are trainable\n var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.resnet.custom_scope.name)\n accum_vars = [tf.get_variable('{}/grad_accum'.format(var.op.name), var.shape, tf.float32, tf.zeros_initializer,\n trainable=False) for var in var_list]\n self.zero_gradients_op = [var.assign(tf.zeros_like(var)) for var in accum_vars]\n gradients = optimizer.compute_gradients(loss=self.resnet.loss, var_list=var_list,\n aggregation_method=tf.AggregationMethod.ADD_N)\n\n # insert UPDATE_OPS if needed\n self.accumulate_gradients_op = [accum_vars[i].assign_add(g[0]) for i, g in enumerate(gradients)]\n\n grad_scaling = 1. / self.virtual_batch_size_factor\n self.apply_gradients_op = optimizer.apply_gradients([\n (tf.multiply(accum_vars[i], grad_scaling), # accumulated, averaged gradients\n g[1]) # variable to update\n for i, g in enumerate(gradients)])", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n if 'input' in self.elmo_positions:\n if self.elmo_utils:\n self._elmo_embed_input_with_cache()\n\n if self.algo == 'MEMNET':\n # self._run_memory_network(gated=self.gated_memnet)\n raise NotImplementedError(\"self.algo {} is not implemented\".format(self.algo))\n else:\n # encode layers\n if self.dial_encode == 'CONCAT':\n self._encode()\n self._word_match_for_concated()\n elif self.dial_encode == 'HIERARCHY':\n # for now, we still use the concated encoding at the same time\n self._encode()\n # hierarchy encode\n self._hierarchy_encode()\n self._word_match_for_concated()\n else:\n raise NotImplementedError(\"dial_encode {} is not implemented\".format(self.dial_encode))\n\n if 'SEQTAG' in self.decode_goal:\n self._decode_seq_tags()\n else:\n self._decode_multiclass()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if self.train:\n with tf.control_dependencies(update_ops):\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def _build_graph(self):\n self.op_size = len(self._ops)\n op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]\n self._add_connections(op_node_connections)\n for i in range(self.op_size):\n self._uses[i].update(self._ops[i].input_arg_names())\n self._defs[i].update(self._ops[i].output_arg_names())", "def from_onnx(model, name=None):\n execute_ws = workspace.get_workspace()\n graph_str = execute_ws._impl.PrepareONNXModel(model)\n graph_def = dragon_pb2.GraphDef()\n graph_def.ParseFromString(graph_str)\n graph_def.name = 'Graph' if name is None else name\n GraphLib._add_device(graph_def)\n GraphLib._add_optimization(graph_def)\n for input in graph_def.input:\n execute_ws.create_tensor(input)\n graph_def.name = execute_ws.create_graph(graph_def)\n return GraphExecutionContext(graph_def, execute_ws)", "def _construct_graph(self, model: torch.nn.Module, model_input: Union[torch.Tensor, Tuple]):\n module_tensor_shapes_map = ConnectedGraph._generate_module_tensor_shapes_lookup_table(model, model_input)\n trace = torch.jit.trace(model, model_input, **jit_trace_args)\n self._parse_top_level_trace(trace, model)\n self._optimize_connected_graph()\n self._transform_ops_and_products_to_connected_graph_convention()\n self._fill_op_and_product_properties(module_tensor_shapes_map)\n\n # In certain models, a 'mangled' version of nodes like Conv or BN appears in the trace which exposes parameters\n # as constant inputs to the node. In such cases, remove constant inputs since param products will be created\n # to track them.\n self._remove_inputs_for_ops()\n # Create parameters for ops such as conv, batchnorm, etc.\n self._create_param_products()\n\n # For each split in the model, insert a corresponding split Op in the connected graph.\n ops_list = [op for op in self._ops.values()]\n for op in ops_list:\n self._determine_split_behavior_for_op_and_insert_split_op_in_connected_graph(op)", "def train():\n\n # Set the random seeds for reproducibility. DO NOT CHANGE.\n tf.set_random_seed(42)\n np.random.seed(42)\n\n ########################\n # PUT YOUR CODE HERE #\n ########################\n # Import dataset\n cifar10 = cifar10_utils.get_cifar10(data_dir= FLAGS.data_dir)\n\n # Create session\n tf.reset_default_graph()\n sess = tf.Session()\n\n # Create MLP object\n conv_net = ConvNet(n_classes = 10,\n weight_initializer = WEIGHT_INITIALIZATION_DICT[FLAGS.weight_init](FLAGS.weight_init_scale),\n weight_regularizer = WEIGHT_REGULARIZER_DICT[FLAGS.weight_reg](FLAGS.weight_reg_strength) if WEIGHT_REGULARIZER_DICT[FLAGS.weight_reg] is not None else None)\n\n\n\n # Setup placeholders for input data and labels\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32, [None, 32, 32, 3], name='x-input')\n y = tf.placeholder(tf.float32, [None, conv_net.n_classes], name='y-input')\n\n keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')\n #tf.summary.scalar('keep_prob', keep_prob)\n\n #Define global step and optimizer\n global_step = tf.Variable(0, trainable = False, name = 'global_step')\n optimizer = OPTIMIZER_DICT[FLAGS.optimizer](learning_rate = FLAGS.learning_rate)\n\n # Define ops\n logits_op = conv_net.inference(x, keep_prob)\n loss_op = conv_net.loss(logits_op, y)\n accuracy_op = conv_net.accuracy(logits_op, y)\n train_op = conv_net.train_step(loss_op, {'optimizer': optimizer, 'global_step': global_step})\n conf_mat_op = conv_net.confusion_matrix(logits_op, y)\n summary_op = tf.summary.merge_all()\n\n save_model = FLAGS.checkpoint_dir is not None\n write_log = FLAGS.log_dir is not None\n\n # If enabled, set up log writers\n if write_log:\n train_log_path = os.path.join(FLAGS.log_dir, '{}_train'.format(FLAGS.name))\n _check_path(train_log_path)\n train_log_writer = tf.summary.FileWriter(train_log_path, graph = sess.graph)\n\n test_log_path = os.path.join(FLAGS.log_dir, '{}_test'.format(FLAGS.name))\n _check_path(test_log_path)\n test_log_writer = tf.summary.FileWriter(test_log_path, graph = sess.graph)\n\n # Run init op\n init_op = tf.global_variables_initializer()\n local_init_op = tf.local_variables_initializer()\n sess.run(fetches=[init_op, local_init_op])\n\n if FLAGS.data_augmentation:\n img_generator = tf.keras.preprocessing.image.ImageDataGenerator(\n rotation_range = 10,\n shear_range = 0.1,\n zoom_range = 0.1,\n fill_mode = 'nearest',\n data_format = 'channels_last')\n\n cifar10_augmented = img_generator.flow(x = cifar10.train.images,\n y = cifar10.train.labels,\n batch_size = FLAGS.batch_size)\n\n tr_stats = []\n test_stats = []\n\n for tr_step in range(FLAGS.max_steps):\n\n # Get next batch\n if FLAGS.data_augmentation:\n x_tr, y_tr = cifar10_augmented.next()\n else:\n x_tr, y_tr = cifar10.train.next_batch(FLAGS.batch_size)\n\n tr_feed = {x: x_tr, y: y_tr, keep_prob: 1. - FLAGS.dropout_rate}\n fetches = [train_op, loss_op, accuracy_op]\n\n # Run train step on training set\n if tr_step % FLAGS.print_freq == 0 and write_log:\n fetches += [summary_op]\n _, tr_loss, tr_accuracy, tr_summary = sess.run(fetches = fetches, feed_dict = tr_feed)\n train_log_writer.add_summary(tr_summary, tr_step)\n else:\n _, tr_loss, tr_accuracy = sess.run(fetches = fetches, feed_dict = tr_feed)\n\n tr_stats += [[tr_step , tr_loss, tr_accuracy]]\n\n # Print statistics\n if tr_step % FLAGS.print_freq == 0:\n print('Step:{} Loss:{:.4f}, Accuracy:{:.4f}'.format(tr_step, tr_loss, tr_accuracy))\n\n # Test set evaluation\n if tr_step % FLAGS.eval_freq == 0 or tr_step == FLAGS.max_steps-1:\n #Use 10 batches to estimate test performance with less variance\n x_test, y_test = cifar10.test.next_batch(10*FLAGS.batch_size)\n test_feed = {x: x_test, y: y_test, keep_prob: 1.0}\n test_loss, test_accuracy, test_logits, test_summary, test_confusion_matrix = sess.run(\n fetches = [loss_op, accuracy_op, logits_op, summary_op, conf_mat_op],\n feed_dict = test_feed)\n if write_log:\n test_log_writer.add_summary(test_summary, tr_step)\n\n test_stats += [[tr_step, test_loss, test_accuracy]]\n\n print('TEST - Loss:{:.4f}, Accuracy:{:.4f}'.format(test_loss, test_accuracy))\n #print('TEST - Conf Matrix \\n {} \\n'.format(test_confusion_matrix))\n\n # Save checkpoint model\n if tr_step % FLAGS.checkpoint_freq == 0 and save_model:\n save_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.name)\n saver = tf.train.Saver()\n _check_path(save_dir)\n saver.save(sess, save_path = os.path.join(save_dir, 'model.ckpt'))\n\n\n # Once done with training, close writers\n if write_log:\n train_log_writer.close()\n test_log_writer.close()\n\n print(tr_step, tr_loss, tr_accuracy, test_loss, test_accuracy)\n pickle.dump( tr_stats, open( \"./pickles/\" + FLAGS.name + \"_train.p\", \"wb\" ) )\n pickle.dump( test_stats, open( \"./pickles/\" + FLAGS.name + \"_test.p\", \"wb\" ) )\n\n # Save final trained model\n if save_model:\n save_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.name)\n saver = tf.train.Saver()\n _check_path(save_dir)\n saver.save(sess, save_path = os.path.join(save_dir, 'model.ckpt'))\n\n ########################\n # END OF YOUR CODE #\n ########################", "def build_inference(self, reuse=False):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n\n self.build_placeholders()\n\n map, goal, b0, isstart, act_in, obs_in, weight, act_label = self.placeholders # TODO clean up\n\n # types conversions\n map = tf.to_float(map)\n goal = tf.to_float(goal)\n isstart = tf.to_float(isstart)\n isstart = tf.reshape(isstart, [self.batch_size] + [1]*(b0.get_shape().ndims-1))\n act_in = tf.to_int32(act_in)\n obs_in = tf.to_float(obs_in)\n act_label = tf.to_int32(act_label)\n\n outputs = []\n\n # pre-compute context, fixed through time\n with tf.variable_scope(\"planner\"):\n Q, _, _ = PlannerNet.VI(map, goal, self.params)\n with tf.variable_scope(\"filter\"):\n Z = FilterNet.f_Z(map, self.params)\n\n self.context_tensors = [Q, Z]\n\n # create variable for hidden belief (equivalent to the hidden state of an RNN)\n self.belief = tf.Variable(np.zeros(b0.get_shape().as_list(), 'f'), trainable=False, name=\"hidden_belief\")\n\n # figure out current b. b = b0 if isstart else blast\n b = (b0 * isstart) + (self.belief * (1-isstart))\n\n for step in range(self.step_size):\n # filter\n with tf.variable_scope(\"filter\") as step_scope:\n if step >= 1:\n step_scope.reuse_variables()\n b = FilterNet.beliefupdate(Z, b, act_in[step], obs_in[step], self.params)\n\n # planner\n with tf.variable_scope(\"planner\") as step_scope:\n if step >= 1:\n step_scope.reuse_variables()\n action_pred = PlannerNet.policy(Q, b, self.params)\n outputs.append(action_pred)\n\n # create op that updates the belief\n self.update_belief_op = self.belief.assign(b)\n\n # compute loss (cross-entropy)\n logits = tf.stack(values=outputs, axis=0) # shape is [step_size, batch_size, num_action]\n\n # logits = tf.reshape(logits, [self.step_size*self.batch_size, self.params.num_action])\n # act_label = tf.reshape(act_label, [-1])\n # weight = tf.reshape(weight, [-1])\n\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=act_label)\n\n # weight loss. weights are 0.0 for steps after the end of a trajectory, otherwise 1.0\n loss = loss * weight\n loss = tf.reduce_mean(loss, axis=[0, 1], name='xentropy')\n\n self.logits = logits\n self.loss = loss", "def _build_model(self):\n input = layers.Input(shape=self.input_shape)\n model = tf.keras.Model(\n inputs=input, outputs=self.network_function(input)\n )\n return model", "def fuse_op(graph_def, input_nodes, output_nodes, output_dtypes,\n output_quantized, op_name, op_type):\n\n if not isinstance(graph_def, graph_pb2.GraphDef):\n raise TypeError(\"graph_def must be a graph_pb2.GraphDef proto.\")\n\n if isinstance(input_nodes, six.string_types):\n raise TypeError(\"input_nodes must be a list.\")\n\n if isinstance(output_nodes, six.string_types):\n raise TypeError(\"output_nodes must be a list.\")\n\n name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(\n graph_def)\n _assert_nodes_are_present(name_to_node, input_nodes + output_nodes)\n\n # Nodes upto and including input_nodes\n reachable_by_input = _bfs_for_reachable_nodes(input_nodes, name_to_input_name)\n # Nodes upto and including output_nodes\n reachable_by_output = _bfs_for_reachable_nodes(output_nodes,\n name_to_input_name)\n\n # Set of nodes in the list input_nodes\n input_nodes_set = set(input_nodes)\n\n # Set of nodes in the list output_nodes\n output_nodes_set = set(output_nodes)\n\n nodes_post_output = []\n for node in graph_def.node:\n n = _node_name(node.name)\n if n in reachable_by_output:\n if n not in reachable_by_input and n not in output_nodes_set:\n # n is between input and output, i.e., part of the fused op\n next_to_visit = [n]\n visited = set()\n while next_to_visit:\n cur_node = next_to_visit[0]\n visited.add(cur_node)\n del next_to_visit[0]\n if cur_node in reachable_by_input and cur_node not in input_nodes_set:\n raise TypeError(\"Node %s uses input %s not in input_nodes.\" %\n (n, cur_node))\n if cur_node not in input_nodes_set:\n next_to_visit += [\n input_node for input_node in name_to_input_name[cur_node]\n if input_node not in visited\n ]\n elif n not in reachable_by_input:\n nodes_post_output.append(n)\n\n # Add all nodes upto the input nodes\n out = graph_pb2.GraphDef()\n reachable_by_input_sorted = sorted(\n list(reachable_by_input), key=lambda n: name_to_seq_num[n])\n for node in reachable_by_input_sorted:\n out.node.extend([copy.deepcopy(name_to_node[node])])\n\n # Add the custom op\n new_node = node_def_pb2.NodeDef()\n for node in input_nodes:\n new_node.input.append(node)\n new_node.attr[\"_output_types\"].list.type[:] = output_dtypes\n new_node.attr[\"_output_quantized\"].b = output_quantized\n new_node.op = op_type\n new_node.name = op_name\n out.node.extend([new_node])\n\n # Add the nodes in the output of the custom op\n for index, n in enumerate(output_nodes):\n assert len(name_to_node[n].input) == 1\n new_node = copy.deepcopy(name_to_node[n])\n del new_node.input[:]\n new_node.input.append(op_name + (\":\" + str(index) if index != 0 else \"\"))\n out.node.extend([new_node])\n\n # Add the nodes post output_nodes\n for n in nodes_post_output:\n out.node.extend([copy.deepcopy(name_to_node[n])])\n\n out.library.CopyFrom(graph_def.library)\n out.versions.CopyFrom(graph_def.versions)\n return out", "def build(self, hp, inputs=None):\n input_node = [tf.keras.layers.Flatten()(node) if len(node.shape) > 2 else node for node in nest.flatten(inputs)]\n\n # Trim tensors to the minimum dimension\n shape_set = set(node.shape[1] for node in input_node) # shape[0] is the batch size\n if len(shape_set) > 1:\n min_len = min(shape_set)\n input_node = [tf.keras.layers.Dense(min_len)(node)\n if node.shape[1] != min_len else node for node in input_node]\n\n elementwise_type = self.elementwise_type or hp.Choice('elementwise_type',\n [\"sum\", \"average\", \"multiply\", \"max\", \"min\"],\n default='average')\n if elementwise_type == \"sum\":\n output_node = tf.add_n(input_node)\n elif elementwise_type == \"average\":\n output_node = tf.reduce_mean(input_node, axis=0)\n elif elementwise_type == \"multiply\":\n output_node = tf.reduce_prod(input_node, axis=0)\n elif elementwise_type == \"max\":\n output_node = tf.reduce_max(input_node, axis=[0])\n elif elementwise_type == \"min\":\n output_node = tf.reduce_min(input_node, axis=[0])\n else:\n output_node = tf.add_n(input_node)\n return output_node", "def create_nncf_graph(model: ov.Model) -> NNCFGraph:\n nncf_graph = NNCFGraph()\n visited = set()\n read_value_nodes = [op for op in model.get_ops() if op.get_type_name() == \"ReadValue\"]\n inference_nodes = model.get_parameters() + read_value_nodes\n\n while inference_nodes:\n node = inference_nodes[0]\n inference_nodes = inference_nodes[1:]\n if node.get_friendly_name() not in visited:\n GraphConverter._add_nncf_node(node, nncf_graph)\n visited.add(node.get_friendly_name())\n for out in node.outputs():\n for inp in sorted(out.get_target_inputs(), key=lambda inp: inp.get_node().get_friendly_name()):\n inference_nodes.append(inp.get_node())\n\n for node in model.get_ops():\n metatype = GraphConverter._get_node_metatype(node)\n # Add nodes from constant subgraphs\n node_name = node.get_friendly_name()\n if node_name not in visited:\n GraphConverter._add_nncf_node(node, nncf_graph)\n # Set const port id\n elif metatype in METATYPES_WITH_CONST_PORT_ID:\n const_attrs, act_attrs = {}, {}\n for inp in GraphConverter._filter_weight_input_ports(node.inputs(), metatype):\n inp_name = inp.get_source_output().get_node().get_friendly_name()\n if inp_name in visited:\n continue\n\n const_port_id = inp.get_index()\n const_node = get_operation_const_op(node, const_port_id)\n ov_dtype = const_node.get_element_type().get_type_name()\n if GraphConverter.convert_to_nncf_dtype(ov_dtype) == Dtype.INTEGER:\n continue\n\n const_attrs[const_port_id] = {\n \"name\": const_node.get_friendly_name(),\n \"shape\": tuple(const_node.get_output_shape(0)),\n }\n\n if metatype == OVMatMulMetatype:\n node_inputs = node.inputs()\n attribute_names = [\"transpose_a\", \"transpose_b\"]\n node_attributes = node.get_attributes()\n const_transpose_name = attribute_names[const_port_id]\n const_attrs[const_port_id][\"transpose\"] = node_attributes[const_transpose_name]\n\n act_port_id = abs(const_port_id - 1)\n act_attrs[\"transpose\"] = node_attributes[attribute_names[act_port_id]]\n partial_shape = node_inputs[act_port_id].get_partial_shape()\n act_attrs[\"shape\"] = tuple(partial_shape.get_max_shape())\n\n if const_attrs or act_attrs:\n nncf_node = nncf_graph.get_node_by_name(node_name)\n nncf_node.layer_attributes = OVConstantLayerAttributes(const_attrs, act_attrs)\n\n GraphConverter._add_edges_to_nncf_graph(model, nncf_graph)\n return nncf_graph", "def _build_graph(self):\n\n # build simple architecture to multiply two numbers\n w1 = keras.layers.Input(shape=(1,), name=\"w1\")\n w2 = keras.layers.Input(shape=(1,), name=\"w2\")\n\n add = keras.layers.add([w1, w2])\n mult = keras.layers.multiply([w1, w2])\n out = keras.layers.concatenate([add, mult])\n\n return keras.models.Model(inputs=[w1, w2], outputs=out)", "def __init__(self,\n filter_size_dict=\"imagenet_auto\",\n filter_size_reduction_factor=4,\n auxiliary_classifier_weights=[0.3,0.3],\n use_mini_model=False, #skips three of the inception layers that dont change output size\n dtype=tf.float32,\n input_shape=[64,224,224,3], #224x224x3 imagenet images\n output_shape=[64,1,1,1000], \n scope=None):\n \n self.scope = \"inceptionV1_classifier\" if not scope else scope #no empty strings, None.\n self.dtype=dtype\n self.model = None\n self.input_shape, self.output_shape, self.filter_size_dict = self.validate_dimensions(input_shape, output_shape, filter_size_dict)\n \n #TODO - move this below logic elsewhere\n if filter_size_reduction_factor not in [1,2,4,8]:\n raise ValueError(\"The filter_size_reduction_factor argument must be 1,2,4 or 8. This is because all of the depth values in the network need to be divisible by this number, otherwise the architecture must be changed.\")\n else:\n self.filter_size_reduction_factor = filter_size_reduction_factor\n \n if filter_size_dict == \"imagenet_auto\":\n self.filter_size_dict, self.layer_output_shapes = data_utils.load_imagenet_architecture_filters(model=\"v1\", reduction_factor=4, batch_size=self.input_shape[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The constructor has produced a graph_def with the help of the functions graph_util.convert_variables_to_constants and graph_util.remove_training_nodes. translate() takes that graph_def, imports it, and translates it into two lists which then can be processed by an Optimzer object. Return
def translate(self): operation_types = [] operation_resources = [] reshape_map = {} operations_to_be_ignored = ["Reshape", "Pack", "Shape", "StridedSlice", "Prod", "ConcatV2"] operations_to_be_ignored_without_reshape = ["NoOp", "Assign", "Const", "RestoreV2", "SaveV2", "IsVariableInitialized", "Identity"] with tf.Graph().as_default() as graph: with tf.Session() as sess: self.sess = sess tf.import_graph_def(self.graph_def) #print("Operations ", graph.get_operations()) for op in graph.get_operations(): if op.type in operations_to_be_ignored_without_reshape: continue elif op.type in operations_to_be_ignored: input_name = op.inputs[0].name output_name = op.outputs[0].name kind = op.inputs[0].op.type if kind in operations_to_be_ignored: reshape_map[output_name] = reshape_map[input_name] else: reshape_map[output_name] = input_name continue operation_types.append(op.type) input_tensor_names = [] for inp in op.inputs: name = inp.name kind = inp.op.type if kind in operations_to_be_ignored: name = reshape_map[name] if kind == 'Const': continue input_tensor_names.append(name) in_out_info = (input_tensor_names, op.outputs[0].name, tensorshape_to_intlist(op.outputs[0].shape)) if op.type == "MatMul": deeppoly_res = self.matmul_resources(op) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) elif op.type == "Add": left_type = op.inputs[0].op.type right_type = op.inputs[1].op.type if left_type == 'Const' and right_type == 'Const': assert 0, "we don't support the addition of two constants yet" elif left_type == 'Const' or right_type == 'Const': deeppoly_res = self.add_resources(op) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) else: operation_types[-1] = "Resadd" operation_resources.append({'deepzono':in_out_info, 'deeppoly':in_out_info}) elif op.type == "BiasAdd": if op.inputs[1].op.type == 'Const': deeppoly_res = self.add_resources(op) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) else: assert 0, "this bias add doesn't meet our assumption (bias is constant)" elif op.type == "Conv2D": filters, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right = self.conv2d_resources(op) deeppoly_res = (filters, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) elif op.type == "MaxPool" or op.type == "AvgPool": image_shape, window_size, strides, pad_top, pad_left, pad_bottom, pad_right = self.pool_resources(op) deeppoly_res = (image_shape, window_size, strides, pad_top, pad_left, pad_bottom, pad_right) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) elif op.type in ["Placeholder", "PlaceholderWithDefault"]: deeppoly_res = in_out_info deepzono_res = in_out_info operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) elif op.type in ["Relu", "Sigmoid", "Tanh", "Sign", "Softmax", "LeakyRelu"]: deeppoly_res = self.nonlinearity_resources(op) + in_out_info deepzono_res = deeppoly_res operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) #elif op.type == "ConcatV2": # print("Concatv2") # deeppoly_res = self.concat_resources(op) # deepzono_res = deeppoly_res + in_out_info # operation_resources.append({'deepzono':deepzono_res, 'deeppoly':deeppoly_res}) else: #print("operation type1 ",in_out_info,op.inputs[0].shape,op.inputs[1].shape) assert 0, "Operations of type " + op.type + " in " + str([o.type for o in graph.get_operations()]) + " are not yet supported." return operation_types, operation_resources
[ "def _build_graph(self):\n self.op_size = len(self._ops)\n op_node_connections = [(i, i + 1) for i in range(self.op_size - 1)]\n self._add_connections(op_node_connections)\n for i in range(self.op_size):\n self._uses[i].update(self._ops[i].input_arg_names())\n self._defs[i].update(self._ops[i].output_arg_names())", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HFlattening_SRULE_Product_ConstantRHS, self).__init__(name='HFlattening_SRULE_Product_ConstantRHS', num_nodes=7, edges=[])\n \n # Add the edges\n self.add_edges([(0, 5), (5, 1), (0, 6), (6, 2), (3, 4), (4, 0)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'Simulink'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"\"\"\"\n self[\"GUID__\"] = UUID('454906ae-79a9-4b8a-8c02-92a177c0510f')\n \n # Set the node attributes\n self.vs[0][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[0][\"MT_post__Position\"] = \"\"\"return PreNode('1')['Position']\"\"\"\n self.vs[0][\"MT_label__\"] = \"\"\"5\"\"\"\n self.vs[0][\"MT_post__gain\"] = \"\"\"return PreNode('1')['value']\"\"\"\n self.vs[0][\"MT_post__Name\"] = \"\"\"return PreNode('1')['Name']+'Gain'\"\"\"\n self.vs[0][\"mm__\"] = \"\"\"MT_post__Gain\"\"\"\n self.vs[0][\"GUID__\"] = UUID('e1f4f049-33c6-4551-bdb6-a2e87dfb8546')\n self.vs[1][\"MT_label__\"] = \"\"\"0\"\"\"\n self.vs[1][\"MT_post__Name\"] = \"\"\"return 1\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"MT_post__Port_Input\"\"\"\n self.vs[1][\"GUID__\"] = UUID('32364107-a231-4441-b66a-b7256a9ee831')\n self.vs[2][\"MT_label__\"] = \"\"\"4\"\"\"\n self.vs[2][\"MT_post__Name\"] = \"\"\"return 1\"\"\"\n self.vs[2][\"mm__\"] = \"\"\"MT_post__Port_Output\"\"\"\n self.vs[2][\"GUID__\"] = UUID('6da1eb11-ddad-4cfe-83d2-f500fbc3cede')\n self.vs[3][\"MT_label__\"] = \"\"\"99\"\"\"\n self.vs[3][\"MT_post__Name\"] = \"\"\"return attr_value\"\"\"\n self.vs[3][\"mm__\"] = \"\"\"MT_post__SubSystem\"\"\"\n self.vs[3][\"GUID__\"] = UUID('779013ba-4e95-4458-b085-b745eca82c4f')\n self.vs[4][\"MT_label__\"] = \"\"\"9900000005\"\"\"\n self.vs[4][\"MT_post__Name\"] = \"\"\"return attr_value\"\"\"\n self.vs[4][\"mm__\"] = \"\"\"MT_post____Contains__\"\"\"\n self.vs[4][\"GUID__\"] = UUID('ab008e63-29d6-4238-8bf6-495c887bf4e9')\n self.vs[5][\"MT_label__\"] = \"\"\"50000000000\"\"\"\n self.vs[5][\"mm__\"] = \"\"\"MT_post____Block_Inport__\"\"\"\n self.vs[5][\"GUID__\"] = UUID('430267a6-cb3c-4980-969a-468f99275ad8')\n self.vs[6][\"MT_label__\"] = \"\"\"50000000004\"\"\"\n self.vs[6][\"mm__\"] = \"\"\"MT_post____Block_Outport__\"\"\"\n self.vs[6][\"GUID__\"] = UUID('86bf73f0-7ebe-4b8e-a58a-f7b562ccb8fc')\n\n from HFlattening_SRULE_Product_ConstantLHS import HFlattening_SRULE_Product_ConstantLHS\n self.pre = HFlattening_SRULE_Product_ConstantLHS()", "def _build_graph(self):\n start_t = time.time()\n self._setup_placeholders()\n self._embed()\n if 'input' in self.elmo_positions:\n if self.elmo_utils:\n self._elmo_embed_input_with_cache()\n\n if self.algo == 'MEMNET':\n # self._run_memory_network(gated=self.gated_memnet)\n raise NotImplementedError(\"self.algo {} is not implemented\".format(self.algo))\n else:\n # encode layers\n if self.dial_encode == 'CONCAT':\n self._encode()\n self._word_match_for_concated()\n elif self.dial_encode == 'HIERARCHY':\n # for now, we still use the concated encoding at the same time\n self._encode()\n # hierarchy encode\n self._hierarchy_encode()\n self._word_match_for_concated()\n else:\n raise NotImplementedError(\"dial_encode {} is not implemented\".format(self.dial_encode))\n\n if 'SEQTAG' in self.decode_goal:\n self._decode_seq_tags()\n else:\n self._decode_multiclass()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n if self.train:\n with tf.control_dependencies(update_ops):\n self._create_train_op()\n self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))\n param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])\n self.logger.info('There are {} parameters in the model'.format(param_num))", "def build_graph(self):\r\n self._create_placeholders()\r\n self._create_network()\r\n self._create_loss()\r\n self._create_optimizer()\r\n self._create_summaries()\r\n self._show_current_model()", "def prepare_graph(self):\n flow_graph = self.deb_graph.get_flow_graph()\n self.solver = GraphSolver(flow_graph)", "def __init__(self):\n self.G = nx.Graph()\n self.node_attr_dfs = dict()\n self.unique_relations = set()\n self.node_types = dict()\n self.normalized_node_id_map = dict()\n self.train_edges = list()\n self.valid_edges = list()\n self.test_edges = list()\n self.relation_to_id = dict()\n self.id_to_relation = dict()\n self.nodeid2rowid = dict()\n self.rowid2nodeid = dict()\n self.rowid2vocabid = dict()", "def create_graph(self, graph_name):", "def create_graph(self):\n # C reates graph from saved graph_def.pb.\n with tf.gfile.FastGFile(os.path.join(\n self.graph_file), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')", "def compile(self):\n independent_variable_placeholders = (\n self.build_independent_variable_placeholders()\n )\n equation_weight_placeholders = self.build_equation_weight_placeholders()\n\n equation_nodes, all_nodes = tf.map_fn(\n self.map_inputs,\n (independent_variable_placeholders, equation_weight_placeholders),\n dtype=self.get_mapped_type(),\n )\n equation_nodes[\"batch_mean\"] = tf.reduce_mean(equation_nodes[\"mean\"])\n equation_nodes[\"weights\"] = equation_weight_placeholders\n return CompiledGraph(\n self, independent_variable_placeholders, equation_nodes, all_nodes\n )", "def __init_graph(self) -> None:\n self.graph = Graph()", "def __init__(self):\n self.graph_string = {'x1': [\"f1\"],\n \"x2\": [\"f2\"],\n \"x3\": [\"f1\", \"f2\"],\n \"x4\": [\"f3\"],\n \"x5\": [\"f1\", \"f3\"],\n \"x6\": [\"f2\", \"f3\"],\n \"x7\": [\"f1\", \"f2\", \"f3\"],\n \"f1\": [\"x1\", \"x3\", \"x5\", \"x7\"],\n \"f2\": [\"x2\", \"x3\", \"x6\", \"x7\"],\n \"f3\": [\"x4\", \"x5\", \"x6\", \"x7\"]}\n self.nodes = {}\n self.edges = {}\n for node, _ in self.graph_string.iteritems():\n n = None\n if node.startswith(\"x\"):\n n = Node(node, False)\n elif node.startswith(\"f\"):\n n = Node(node, True)\n self.nodes[n.id] = n\n for node, connections in self.graph_string.iteritems():\n n = self.nodes[node]\n for connection in connections:\n edge = None\n if self.nodes.get(connection):\n edge = Edge(n, self.nodes[connection])\n n.outgoing_edges.append(edge)\n self.nodes[connection].incoming_edges.append(edge)\n self.edges[str(edge)] = edge", "def __init__(self, graph_class):\n #graph_factory = GraphFactory(graph_class)\n self.graph_class = graph_class", "def __init__(self):\n # use networkX to create a directed graph\n # of words\n self.__graph = nx.DiGraph()\n # # map graph nodes to positions\n # self.__layout = {}\n # # map words to the synsets they belong to\n # self.__words_to_synsets = {}\n # # reverse of above\n # self.__synsets_to_words = {}\n # # map words to tense, definition, and id\n # self.__info_dict = {}\n # create w/ all synsets\n self.__create_graph_all_words()", "def init(self):\n self.graph = tf.Graph()\n return self.graph", "def __init__(self):\n # Flag this instance as compiled now\n self.is_compiled = True\n \n super(HFlattening_SRULE_Product_ConstantLHS, self).__init__(name='HFlattening_SRULE_Product_ConstantLHS', num_nodes=3, edges=[])\n \n # Add the edges\n self.add_edges([(1, 2), (2, 0)])\n # Set the graph attributes\n self[\"mm__\"] = pickle.loads(\"\"\"(lp1\nS'Simulink'\np2\na.\"\"\")\n self[\"name\"] = \"\"\"\"\"\"\n self[\"GUID__\"] = UUID('5b0822f5-6d2f-42ea-9190-bdbdcdf1016b')\n \n # Set the node attributes\n self.vs[0][\"MT_subtypeMatching__\"] = False\n self.vs[0][\"MT_pre__Position\"] = \"\"\"return True\"\"\"\n self.vs[0][\"MT_label__\"] = \"\"\"10\"\"\"\n self.vs[0][\"MT_subtypes__\"] = \"\"\"\"\"\"\n self.vs[0][\"BackgroundColor\"] = \"\"\"white\"\"\"\n self.vs[0][\"mm__\"] = \"\"\"MT_pre_SubSystem\"\"\"\n self.vs[0][\"MT_dirty__\"] = False\n self.vs[0][\"MT_pre__Name\"] = \"\"\"return True\"\"\"\n self.vs[0][\"GUID__\"] = UUID('8ef6a86a-aa35-4cdd-ae0a-b55a2000e35b')\n self.vs[1][\"MT_subtypeMatching__\"] = False\n self.vs[1][\"MT_label__\"] = \"\"\"99\"\"\"\n self.vs[1][\"MT_subtypes__\"] = \"\"\"\"\"\"\n self.vs[1][\"mm__\"] = \"\"\"MT_pre__SubSystem\"\"\"\n self.vs[1][\"MT_dirty__\"] = False\n self.vs[1][\"MT_pre__Name\"] = \"\"\"return True\"\"\"\n self.vs[1][\"GUID__\"] = UUID('7c2787f9-7db8-43eb-a218-556c51604293')\n self.vs[2][\"MT_subtypeMatching__\"] = False\n self.vs[2][\"MT_label__\"] = \"\"\"9900000010\"\"\"\n self.vs[2][\"MT_subtypes__\"] = \"\"\"\"\"\"\n self.vs[2][\"mm__\"] = \"\"\"MT_pre____Contains__\"\"\"\n self.vs[2][\"MT_dirty__\"] = False\n self.vs[2][\"MT_pre__Name\"] = \"\"\"return True\"\"\"\n self.vs[2][\"GUID__\"] = UUID('0b308f21-fe28-46a1-935e-47b21f0c4a4b')", "def __init__(self):\r\n # Flag this instance as compiled now\r\n self.is_compiled = True\r\n \r\n super(Hlayer0rule2, self).__init__(name='Hlayer0rule2', num_nodes=0, edges=[])\r\n \r\n \r\n # Set the graph attributes\r\n self[\"mm__\"] = ['HimesisMM']\r\n \r\n self[\"name\"] = \"\"\"layer0rule2\"\"\"\r\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule2')\r\n \r\n # match model. We only support one match model\r\n self.add_node()\r\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\r\n \r\n # apply model node\r\n self.add_node()\r\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\r\n \r\n # paired with relation between match and apply models\r\n self.add_node()\r\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\r\n \r\n \r\n # match class Operation(layer0rule2class0) node\r\n self.add_node()\r\n\r\n self.vs[3][\"mm__\"] = \"\"\"Operation\"\"\" \r\n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \r\n # match_contains node for class Operation(layer0rule2class0)\r\n self.add_node()\r\n self.vs[4][\"mm__\"] = \"\"\"match_contains\"\"\"\r\n \r\n \r\n # apply class CFunctionPointerStructMember(layer0rule2class2) node\r\n self.add_node()\r\n\r\n self.vs[5][\"mm__\"] = \"\"\"CFunctionPointerStructMember\"\"\" \r\n self.vs[5][\"attr1\"] = \"\"\"1\"\"\"\r\n # apply_contains node for class CFunctionPointerStructMember(layer0rule2class2)\r\n self.add_node()\r\n self.vs[6][\"mm__\"] = \"\"\"apply_contains\"\"\"\r\n # apply class FunctionRefType(layer0rule2class3) node\r\n self.add_node()\r\n\r\n self.vs[7][\"mm__\"] = \"\"\"FunctionRefType\"\"\" \r\n self.vs[7][\"attr1\"] = \"\"\"1\"\"\"\r\n # apply_contains node for class FunctionRefType(layer0rule2class3)\r\n self.add_node()\r\n self.vs[8][\"mm__\"] = \"\"\"apply_contains\"\"\"\r\n # apply class PointerType(layer0rule5class2) node\r\n self.add_node()\r\n\r\n self.vs[9][\"mm__\"] = \"\"\"PointerType\"\"\" \r\n self.vs[9][\"attr1\"] = \"\"\"1\"\"\"\r\n # apply_contains node for class PointerType(layer0rule5class2)\r\n self.add_node()\r\n self.vs[10][\"mm__\"] = \"\"\"apply_contains\"\"\"\r\n # apply class VoidType(layer0rule5class3) node\r\n self.add_node()\r\n\r\n self.vs[11][\"mm__\"] = \"\"\"VoidType\"\"\" \r\n self.vs[11][\"attr1\"] = \"\"\"1\"\"\"\r\n # apply_contains node for class VoidType(layer0rule5class3)\r\n self.add_node()\r\n self.vs[12][\"mm__\"] = \"\"\"apply_contains\"\"\"\r\n \r\n \r\n \r\n # apply association CFunctionPointerStructMember--type-->FunctionRefType node\r\n self.add_node()\r\n self.vs[13][\"attr1\"] = \"\"\"type\"\"\"\r\n self.vs[13][\"mm__\"] = \"\"\"directLink_T\"\"\"\r\n # apply association FunctionRefType--argTypes-->PointerType node\r\n self.add_node()\r\n self.vs[14][\"attr1\"] = \"\"\"argTypes\"\"\"\r\n self.vs[14][\"mm__\"] = \"\"\"directLink_T\"\"\"\r\n # apply association PointerType--baseType-->VoidType node\r\n self.add_node()\r\n self.vs[15][\"attr1\"] = \"\"\"baseType\"\"\"\r\n self.vs[15][\"mm__\"] = \"\"\"directLink_T\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Add the edges\r\n self.add_edges([\r\n (0,4), # matchmodel -> match_contains\r\n (4,3), # match_contains -> match_class Operation(layer0rule2class0)\r\n (1,6), # applymodel -> apply_contains\r\n (6,5), # apply_contains -> apply_class CFunctionPointerStructMember(layer0rule2class2)\r\n (1,8), # applymodel -> apply_contains\r\n (8,7), # apply_contains -> apply_class FunctionRefType(layer0rule2class3)\r\n (1,10), # applymodel -> apply_contains\r\n (10,9), # apply_contains -> apply_class PointerType(layer0rule5class2)\r\n (1,12), # applymodel -> apply_contains\r\n (12,11), # apply_contains -> apply_class VoidType(layer0rule5class3)\r\n (5,13), # apply_class CFunctionPointerStructMember(layer0rule2class2) -> association type\r\n (13,7), # association type -> apply_class FunctionRefType(layer0rule2class3)\r\n (7,14), # apply_class FunctionRefType(layer0rule2class3) -> association argTypes\r\n (14,9), # association argTypes -> apply_class PointerType(layer0rule5class2)\r\n (9,15), # apply_class PointerType(layer0rule5class2) -> association baseType\r\n (15,11), # association baseType -> apply_class VoidType(layer0rule5class3)\r\n (0,2), # matchmodel -> pairedwith\r\n (2,1) # pairedwith -> applyModel\t\t\t\t\r\n\t\t])\r\n\r\n # Add the attribute equations\r\n self[\"equations\"] = [((5,'name'),(3,'name')), ((5,'__ApplyAttribute'),('constant','CFunctionPointerStructMember')), ((7,'__ApplyAttribute'),('constant','FunctionRefType')), ((9,'__ApplyAttribute'),('constant','InstancePointer')), ]", "def create_graph(self):\n self.my_graph = eval_or_exec(self.program)\n self.parse_graph()", "def __init__(self, start, end, adjList):\n self.adjList = adjList\n self.start = start\n self.end = end\n self.DAG = dict()", "def __init__(self):\r\n # Flag this instance as compiled now\r\n self.is_compiled = True\r\n \r\n super(Hlayer0rule3, self).__init__(name='Hlayer0rule3', num_nodes=0, edges=[])\r\n \r\n \r\n # Set the graph attributes\r\n self[\"mm__\"] = ['HimesisMM']\r\n \r\n self[\"name\"] = \"\"\"layer0rule3\"\"\"\r\n self[\"GUID__\"] = uuid.uuid3(uuid.NAMESPACE_DNS,'layer0rule3')\r\n \r\n # match model. We only support one match model\r\n self.add_node()\r\n self.vs[0][\"mm__\"] = \"\"\"MatchModel\"\"\"\r\n \r\n # apply model node\r\n self.add_node()\r\n self.vs[1][\"mm__\"] = \"\"\"ApplyModel\"\"\"\r\n \r\n # paired with relation between match and apply models\r\n self.add_node()\r\n self.vs[2][\"mm__\"] = \"\"\"paired_with\"\"\"\r\n \r\n \r\n # match class StringType(layer0rule3class0) node\r\n self.add_node()\r\n\r\n self.vs[3][\"mm__\"] = \"\"\"StringType\"\"\" \r\n self.vs[3][\"attr1\"] = \"\"\"+\"\"\" \r\n # match_contains node for class StringType(layer0rule3class0)\r\n self.add_node()\r\n self.vs[4][\"mm__\"] = \"\"\"match_contains\"\"\"\r\n \r\n \r\n # apply class StringType(layer0rule3class1) node\r\n self.add_node()\r\n\r\n self.vs[5][\"mm__\"] = \"\"\"StringType\"\"\" \r\n self.vs[5][\"attr1\"] = \"\"\"1\"\"\"\r\n # apply_contains node for class StringType(layer0rule3class1)\r\n self.add_node()\r\n self.vs[6][\"mm__\"] = \"\"\"apply_contains\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n # Add the edges\r\n self.add_edges([\r\n (0,4), # matchmodel -> match_contains\r\n (4,3), # match_contains -> match_class StringType(layer0rule3class0)\r\n (1,6), # applymodel -> apply_contains\r\n (6,5), # apply_contains -> apply_class StringType(layer0rule3class1)\r\n (0,2), # matchmodel -> pairedwith\r\n (2,1) # pairedwith -> applyModel\t\t\t\t\r\n\t\t])\r\n\r\n # Add the attribute equations\r\n self[\"equations\"] = [((5,'__ApplyAttribute'),('constant','StringType')), ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks which one of the direct ancestor tf.Operations is a constant and returns the underlying tensor as a numpy.ndarray inside a tuple. The matrix is manipulated in a way that it can be used as the left multiplier in the matrix multiplication. Arguments
def matmul_resources(self, op): inputs = op.inputs left = inputs[0] right = inputs[1] if left.op.type == "Const": matrix = self.sess.run(left) if not op.get_attr("transpose_a") else self.sess.run(left).transpose() else: matrix = self.sess.run(right).transpose() if not op.get_attr("transpose_b") else self.sess.run(right) return (matrix,)
[ "def _transfer_tensor_to_tuple(inputs):\n if isinstance(inputs, Tensor):\n return (inputs,)\n\n return inputs", "def test_meta_const():\n\n with tf.Graph().as_default():\n one_mt = mt.const(1, \"int32\", \"Const\")\n\n with tf.Graph().as_default():\n another_one_mt = mt(1)\n\n assert one_mt == another_one_mt\n assert isinstance(one_mt.reify(), tf.Tensor)\n assert one_mt.reify().op.type == \"Const\"", "def _tuple_sub_tensor(x, y):\n x = utils.sequence_to_tensor(x, y.dtype)\n return F.tensor_sub(x, y)", "def _format_single_matrix(op):\n # This can be specified as list [[coeff, Pauli], ... ]\n if isinstance(op, numpy.ndarray):\n return op\n if isinstance(op, (Instruction, QuantumCircuit)):\n return Operator(op).data\n if hasattr(op, 'to_operator'):\n return op.to_operator().data\n return None", "def getOperationMatrix(self):\n \toperation_df = None\n \tif self.mesgraph.lower_inverse is None:\n \t pass\n \telif self.mesgraph.rref_operation is None:\n \t operation_df = self.mesgraph.lower_inverse\n \telse:\n \t operation_df = self.mesgraph.rref_operation.dot(self.mesgraph.lower_inverse)\n \treturn operation_df", "def __get_tensor(self, branch, tag):\n return self.tensors[\n self.__make_op_name(branch, tag)]", "def _tensor_sub_tuple(x, y):\n y = utils.sequence_to_tensor(y, x.dtype)\n return F.tensor_sub(x, y)", "def _scalarize(self, transformed_multi_objectives: tf.Tensor) -> tf.Tensor:", "def _scalar_sub_tensor(x, y):\n return F.tensor_sub(x, y)", "def _make_output_composite_tensors_match(op_type, branch_graphs):\n # Note: since this is only used for gradient graphs, we do not expect the\n # outputs to be structured (e.g. nested lists), and thus do not need to use\n # nest.flatten, etc.\n assert branch_graphs\n branch_outputs = [g.structured_outputs for g in branch_graphs]\n outputs_per_branch = list(len(outs) for outs in branch_outputs)\n assert len(set(outputs_per_branch)) == 1, outputs_per_branch\n\n for output_idx, branch_outs in enumerate(zip(*branch_outputs)):\n if len(set(type(out) for out in branch_outs)) == 1:\n continue\n if not any(\n isinstance(out, indexed_slices.IndexedSlices) for out in branch_outs):\n continue\n for branch_idx, branch_out in enumerate(branch_outs):\n if isinstance(branch_out, indexed_slices.IndexedSlices):\n continue\n elif isinstance(branch_out, tensor_lib.Tensor):\n with branch_graphs[branch_idx].as_default():\n branch_outputs[branch_idx][output_idx] = math_ops._as_indexed_slices(\n branch_out)\n else:\n raise TypeError(\n \"Cannot reconcile {op_name} {output_idx}-th outputs:\\n\"\n \" outputs from all branches: {outputs}\".format(\n op_name=\"tf.cond\" if op_type == _COND else \"tf.switch_case\",\n output_idx=output_idx,\n outputs=branch_outs))\n\n for branch_graph, branch_outs in zip(branch_graphs, branch_outputs):\n branch_graph.structured_outputs = branch_outs\n branch_graph.outputs = [\n t for t in func_graph_module.flatten(branch_outs) if t is not None\n ]", "def _cootensor_sub_tensor(x, y):\n check_equal(x.shape, y.shape, \"input1 (shape={}) and input2(shape={}) should be the same shape.\")\n return F.tensor_scatter_add(-y, x.indices, x.values)", "def _get_tensor(self):\n return self._input", "def dualize(self, operator):\n if isinstance(operator, np.ndarray):\n operator = COO.from_numpy(operator)\n return sparse.tensordot(operator.conj(), self.operators,\n ([-2, -1], [1, 2]))", "def build_operators(self):\n A = self.frame\n m, n = A.shape\n Ax = lambda x: np.dot(A, x)\n Ax = LinearOperator( (m,n), Ax, matmat=Ax, dtype='d' )\n Aty = lambda y: np.dot(A.T, y)\n Aty = LinearOperator( (n,m), Aty, matmat=Aty, dtype='d' )\n return Ax, Aty", "def __mul__(self, other: Union['Tensor', TensorableT]) -> 'Tensor':\r\n return mul(self, assure_tensor(other))", "def __mod__(self, other):\r\n\r\n # Tensor product between the two operators\r\n if isinstance(other, Operator):\r\n result = Operator(self.n_qubits + other.n_qubits)\r\n result.matrix = csc_matrix(kron(self.matrix, other.matrix))\r\n return result\r\n else:\r\n raise TypeError(\r\n 'Operation not defined between operator and {}.'.format(type(other))\r\n )", "def internal_coproduct(self):\n # Coerce to F basis and back, doing the actual computation in that basis.\n parent = self.parent()\n F = parent.realization_of().F()\n from sage.categories.tensor import tensor\n result = tensor([parent.zero(), parent.zero()])\n for lam, a in F(self).internal_coproduct():\n (I, J) = lam\n result += a * tensor([parent(F(I)), parent(F(J))])\n return result", "def metric_tensor(self) -> np.ndarray:\n return dot(self._matrix, self._matrix.T)", "def _get_tensor(self):\n return self._output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts the filter, the stride of the filter, and the padding from op as well as the shape of the input coming into op Arguments
def conv2d_resources(self, op): inputs = op.inputs image = inputs[0] filters = op.inputs[1] filters = self.sess.run(filters) image_shape = tensorshape_to_intlist(image.shape)[1:] strides = op.get_attr('strides')[1:3] padding_str = op.get_attr('padding').decode('utf-8') pad_top, pad_left, pad_bottom, pad_right = calculate_padding(padding_str, image_shape, filters.shape, strides) return filters, image_shape, strides, pad_top, pad_left, pad_bottom, pad_right
[ "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(\n image_dimensions, kernel_size, stride\n )", "def convolve_complex_1d(\n tensor: tf.Tensor,\n filter: tf.Tensor,\n stride: int = 1,\n padding: str = \"VALID\",\n):\n if tensor.dtype != filter.dtype:\n raise ValueError(\"`tensor` and `filter` must have same dtype got `{}`\"\n \"\".format([tensor.dtype, filter.dtype]))\n filter.shape.assert_is_compatible_with([None, None, None])\n\n filter_length = filter.shape[0]\n\n if padding == \"VALID\":\n pass\n elif padding == \"SAME\":\n if (tensor.shape[-2] % stride == 0):\n pad_along_height = max(filter_length - stride, 0)\n else:\n pad_along_height = max(filter_length - (tensor.shape[-2] % stride), 0)\n pad_top = pad_along_height // 2\n pad_bottom = pad_along_height - pad_top\n tensor = tf.pad(tensor, [[0, 0]] * tensor.shape[:-2].ndims + [\n [pad_top, pad_bottom]] + [[0, 0]])\n else:\n raise ValueError(\"`padding` must be one of `VALID` or `SAME` but got `{}`\"\n \"\".format(padding))\n\n # Slice `tensor`.\n tensor_slices = [tensor[..., start_slice:start_slice + filter_length, :] for\n start_slice in\n range(0, tensor.shape[-2] - filter_length + 1,\n stride)]\n\n # Add batch dimensions to filter.\n filters = tf.reshape(filter,\n [1] * tensor.shape[:-1].ndims + filter.shape.as_list())\n\n # Stack slices. `tensor` now has shape\n # `batch_dimensions + [output_dimension, filter_length, in_channels]`.\n tensor = tf.stack(tensor_slices, -3)\n\n # Expand last dimension of `tensor` to account for `filter_count`. `tensor`\n # now has shape\n # `batch_dimensions + [output_dimension, filter_length, in_channels, 1]`.\n tensor = tensor[..., tf.newaxis]\n\n # Mupltiply tensor and filters.\n tensor = tensor * filters\n\n # Sum along `filter_length` and `in_channels` dimensions.\n return tf.reduce_sum(tensor, [-3, -2])", "def calc_conv_layer_shape(prev_layer_shape, num_filters, stride):\n\n return np.array([num_filters] + calc_image_dimensions(\n prev_layer_shape[1:], stride), dtype=np.uint64)", "def compute_same_padding(filter_size, in_size, stride):\n out_size = (in_size + (stride - 1)) // stride\n return max((out_size - 1) * stride + filter_size - in_size, 0)", "def get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample,\n filter_dilation=None):\n # ====== convert tensorflow shape to theano shape ====== #\n image_shape = (image_shape[0], image_shape[-1]) + tuple(image_shape[1:-1])\n kernel_shape = (kernel_shape[-1], kernel_shape[-2]) + tuple(kernel_shape[:-2])\n # ====== infer shape ====== #\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if filter_dilation is None:\n filter_dilation = np.ones(len(subsample), dtype='int')\n if isinstance(border_mode, tuple):\n out_shp = tuple(__get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i],\n subsample[i], filter_dilation[i]) for i in range(len(subsample)))\n else:\n out_shp = tuple(__get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode,\n subsample[i], filter_dilation[i]) for i in range(len(subsample)))\n # ====== convert theano to tensorflow shape ====== #\n return (bsize, ) + out_shp + (nkern,)", "def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4.\")\n # Add support for left padding.\n if \"padding\" in kwargs and kwargs[\"padding\"] == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n if \"name\" in kwargs:\n original_name = kwargs[\"name\"]\n name = kwargs.pop(\"name\") + \"_\" + name_suffix\n else:\n original_name = None\n name = \"conv_\" + name_suffix\n original_force2d = None\n if \"force2d\" in kwargs:\n original_force2d = kwargs.pop(\"force2d\")\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")", "def _decode_and_write_vc_filters(self, shape, feed_dict):\n\n vc_num_filters = shape[3]\n\n # Create input data such that filter Z is active (=1, rest zeros)\n vc_filters = []\n for z in range(vc_num_filters):\n vc_filter = np.zeros(shape[1:])\n x = shape[1] // 2\n y = shape[2] // 2\n vc_filter[x, y, z] = 1\n vc_filters.append(vc_filter)\n\n # Must split into N=batch_size chunks due to static batch size\n vc_filters_chunks = [\n vc_filters[x:x+self._hparams.batch_size] for x in range(\n 0, len(vc_filters), self._hparams.batch_size)\n ]\n\n logging.info('VC Filters: %s, Number of chunks: %s',\n np.array(vc_filters).shape,\n len(vc_filters_chunks))\n\n vc_decoded_filters = []\n for vc_filters_chunk in vc_filters_chunks:\n if len(vc_filters_chunk) != self._hparams.batch_size:\n logging.warning('Skipping filter chunk due to even/odd mismatch '\n 'between the batch size and number of filters.')\n continue\n\n vc_decoded_filter = self._decoder(0, 'vc_filters', 'vc',\n vc_filters_chunk, feed_dict,\n summarise=False)\n vc_decoded_filters.append(vc_decoded_filter)\n\n if vc_decoded_filters:\n vc_decoded_filters = np.array(vc_decoded_filters) # [ #batch-chunks, batch size, width, height, channels]\n\n vc_decoded_filters_flat = vc_decoded_filters.reshape( # [filters, width, height, channels]\n vc_decoded_filters.shape[0] * vc_decoded_filters.shape[1],\n vc_decoded_filters.shape[2], vc_decoded_filters.shape[3],\n vc_decoded_filters.shape[4])\n\n for idx, decoded_filter in enumerate(vc_decoded_filters_flat):\n decoded_filter = decoded_filter.reshape(decoded_filter.shape[0], decoded_filter.shape[1])\n filetype = 'png'\n filename = 'decoded_filter_' + str(idx) + '.' + filetype\n filepath = os.path.join(self._summary_dir, filename)\n\n if not os.path.isfile(filepath):\n plt.title('VC Filter ' + str(idx))\n plt.imshow(decoded_filter, interpolation='none')\n plt.savefig(filepath, dpi=300, format=filetype)\n plt.close()", "def get_padding_info(op_layer, target_layer, data_format=None):\n if data_format is None:\n data_format = tf.keras.backend.image_data_format()\n if data_format == 'channels_last':\n _, target_rows, target_cols, _ = target_layer.shape\n _, op_rows, op_cols, _ = op_layer.shape\n else : # channel first\n _, _, target_rows, target_cols = target_layer.shape\n _, _, op_rows, op_cols = op_layer.shape\n\n top_pad = (target_rows - op_rows) // 2\n left_pad = (target_cols - op_cols) // 2\n bottom_pad = (target_rows - op_rows) - top_pad\n right_pad = (target_cols - op_cols) - left_pad\n\n return top_pad, bottom_pad, left_pad, right_pad", "def __make_conv_wb(self, params, op_name):\n filters = self.__make_weights(params,op_name)\n biases = self.__make_biases(params[-1],op_name)\n return (filters, biases)", "def _modify_conv_stride_dilation(\n conv: nn.Conv2d,\n stride: Tuple[int, int] = (1, 1),\n padding: int = None,\n) -> None:\n conv.stride = stride\n\n if padding is not None:\n conv.padding = padding", "def _conv_output_shape(cls, h_w: Union[tuple, int],\n kernel_size: Union[tuple, int],\n stride: Union[tuple, int],\n pad: Union[tuple, int] = 0,\n dilation=1):\n # source https://discuss.pytorch.org/t/utility-function-for-calculating-the-shape-of-a-conv-output/11173/6\n\n if type(h_w) is not tuple:\n h_w = (h_w, h_w)\n\n if type(kernel_size) is not tuple:\n kernel_size = (kernel_size, kernel_size)\n\n if type(stride) is not tuple:\n stride = (stride, stride)\n\n if type(pad) is not tuple:\n pad = (pad, pad)\n\n h = (h_w[0] + (2 * pad[0]) - (dilation * (kernel_size[0] - 1)) - 1) // stride[0] + 1\n w = (h_w[1] + (2 * pad[1]) - (dilation * (kernel_size[1] - 1)) - 1) // stride[1] + 1\n\n return h, w", "def _create_conv_layer(self, prev_layer_shape, interpreted_shape,\n filter_shape, stride, act_type, act_args):\n\n back_type = INTEGRATOR_TYPE.CONV.value\n # Appropriate depth is added to filter shape to build the # 3-element 1D array\n back_args = (np.array([prev_layer_shape[0]] + list(filter_shape), dtype=np.uint64),\n interpreted_shape, # layer_shape funct outputs dtype=np.uint64\n np.array(prev_layer_shape, dtype=np.uint64),\n int(stride))\n self_type = INTEGRATOR_TYPE.NONE.value\n self_args = tuple()\n return layer_generator.CreateLayer(back_type,\n back_args, self_type, self_args,\n act_type, act_args, interpreted_shape)", "def filter2d(\n x: torch.Tensor,\n kernel: torch.Tensor,\n padding: Union[int, Tuple[int, int]] = 0,\n) -> torch.Tensor:\n\n return F.conv2d(x, kernel, padding=padding, groups=x.size(1))", "def _remove_dilations(self):\n\n input_shape = tf_shape(self.input)\n in_spatial_shape = input_shape[1:self.spatial_size + 1]\n\n channels_count = input_shape[self.spatial_size + 1]\n # Initialize gather_ind with the range of channels\n # e.g. [0 1]\n gather_ind = tf.range(channels_count, dtype=tf.int64)\n # convert the vector to column vector\n # in the following logic we use column vectors\n gather_ind = tf.expand_dims(gather_ind, 1)\n\n # initilize the output_shape with zeros\n # self.output_shape will contain the shape of the\n # output tensor after the loop below is executed\n self.output_shape = [0] * (self.spatial_size + 2)\n self.output_shape[0] = input_shape[0]\n \"\"\"\n Loop over the input spatial dimensions starting from the\n last (most internal) going up to the first dimension\n\n On every step of the loop calculate the output indices and\n map them to the input indices using `_calc_input_ind`,\n then \"combine\" with the already calculated indices from the\n previous dimensions using cartesian product.\n\n For the following example input:\n\n Input: [[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [ 12, 13, 14, 15]]\n\n Kernel: [2, 2]\n Dilations: [2, 2]\n Strides: [1, 1]\n\n these are the steps that will be executed:\n\n 1. Initilize gather_ind = [[0]] # we have only 1 channel\n\n 2. Loop step 0 (axis 1):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0]\n [2 0]\n [1 0]\n [3 0]]\n\n 3. Loop step 1 (axis 0):\n filter_size = 3\n output_size = 4\n dim_ind = [[0]\n [2]\n [1]\n [3]]\n\n gather_ind = [[0 0 0]\n [0 2 0]\n [0 1 0]\n [0 3 0]\n [2 0 0]\n [2 2 0]\n [2 1 0]\n [2 3 0]\n [1 0 0]\n [1 2 0]\n [1 1 0]\n [1 3 0]\n [3 0 0]\n [3 2 0]\n [3 1 0]\n [3 3 0]]\n\n These are the indices used for gather_nd operation to collect\n the values from the input data.\n \"\"\"\n\n for dim in range(self.spatial_size - 1, -1, -1):\n filter_size = (self.kernel_shape[dim] - 1) * \\\n self.dilations[dim] + 1\n output_size = ((\n (in_spatial_shape[dim] - filter_size) // self.strides[dim]) + 1\n ) * self.kernel_shape[dim]\n self.output_shape[dim + 1] = output_size\n\n # initialize the output dimension index with the range of the\n # dimension output size (e.g. 4): [0, 1, 2, 3]\n dim_ind = tf.range(output_size)\n\n # calculate the matching indices in the input data\n # [0, 1, 2, 3] will calculate to [0, 2, 1, 3]\n # from the above example\n dim_ind = self._calc_input_ind(dim_ind, self.kernel_shape[dim],\n self.dilations[dim], self.strides[dim])\n # convert to column vector\n dim_ind = tf.expand_dims(dim_ind, 1)\n\n # \"combine\" current dimension indices with the previous dimensions\n # using cartesian product\n gather_ind = tf_product(dim_ind, gather_ind)\n\n # The result from the above loop for 2D data will be:\n # [[y1, x1, c], [y2, x2, c], ..., [yn, xm, c]] where n is the height,\n # m is the width and c is the channel number.\n\n # set the channels count in the output_shape\n self.output_shape[self.spatial_size + 1] = channels_count\n\n # expand the dimensions to match the input dimensions + 1\n for x in range(self.spatial_size):\n gather_ind = tf.expand_dims(gather_ind, 0)\n # dublicate the indices for every batch\n gather_ind = tf.tile(gather_ind,\n [input_shape[0]] + [1] * (self.spatial_size + 1))\n\n # extract the selected values from the input\n output = tf.gather_nd(self.input, gather_ind, batch_dims=1)\n # reshape the output to the correct shape calculated earlier\n output = tf.reshape(output, self.output_shape)\n\n return output", "def calculate_out_shape(in_shape, kernel_size, stride, padding):\n in_shape = np.atleast_1d(in_shape)\n out_shape = ((in_shape - kernel_size + padding + padding) // stride) + 1\n out_shape = tuple(int(s) for s in out_shape)\n\n return tuple(out_shape) if len(out_shape) > 1 else out_shape[0]", "def my_imfilter(image, filter):\n\n assert filter.shape[0] % 2 == 1\n assert filter.shape[1] % 2 == 1\n\n ############################\n ### TODO: YOUR CODE HERE ###\n\n filtered_image = np.zeros_like(image)\n filter_xcoord = int((filter.shape[0] - 1)/2)\n filter_ycoord = int((filter.shape[1] - 1)/2)\n\n padded_input = np.pad(image, [(filter_xcoord, filter_xcoord), (filter_ycoord,filter_ycoord), (0, 0)], 'reflect')\n \n for layer in range(image.shape[2]):\n for row in range(image.shape[0]):\n for col in range(image.shape[1]):\n x = padded_input[row:row + filter.shape[0], col:col+filter.shape[1], layer]\n filtered_image[row, col, layer] = np.sum(np.multiply(filter, x))\n\n ### END OF STUDENT CODE ####\n ############################\n\n return filtered_image", "def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None):", "def _conv2d_fixed_padding(inputs, filters, kernel_size, strides):\n # The padding is consistent and is based only on `kernel_size`, not on the\n # dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).\n if strides > 1:\n inputs = BaselineResNet._fixed_padding(inputs, kernel_size)\n\n return tf.layers.conv2d(\n inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,\n padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,\n kernel_initializer=tf.variance_scaling_initializer())", "def receptive_field_size(conv_ops_list, input_size=None):\n output_rf = 1\n output_size = input_size\n stride_jumps = 1\n for conv_op in conv_ops_list:\n kernel_size, stride, _ = conv_op\n output_rf += stride_jumps * (kernel_size-1)\n stride_jumps *= stride\n if input_size:\n output_size = int(np.ceil((output_size - kernel_size + 1) / stride))\n return output_rf, output_size" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Same as ``lisa.target.Target.pull`` but will cache the file in the ``target.res_dir`` folder, based on the source path.
def cached_pull(self, src, dst, **kwargs): cache = (self._cache_dir / 'pull') cache.mkdir(parents=True, exist_ok=True) m = hashlib.sha256() m.update(src.encode('utf-8')) key = m.hexdigest() cached_path = cache / key / os.path.basename(src) if not cached_path.exists(): self.pull(src, cached_path, **kwargs) if cached_path.is_dir(): shutil.copytree(cached_path, dst) else: shutil.copy2(cached_path, dst)
[ "def _pull(paths: List[str]):\n pull_paths_from_storage(project_context.repository, *paths)", "def pull(self, remote = 'origin'):", "def refresh_source(options):\n cd(options.source, options.dry_run)\n if options.update:\n update_existing_repo(options.dry_run)\n else:\n clone_repo(options.dry_run)", "def pull(self, container, path = None):\n repo = get_repo_in_folder(self.service, container)\n if repo:\n repo.pull()\n else:\n print(\"Error: repository doesn't exist in this folder\")", "def pull():\n\n buildout_directory = _env.hostout.options.get('path')\n fallback_user = _env.user or 'root'\n effective_user = _env.hostout.options.get('effective-user', fallback_user)\n local_sudo = _env.hostout.options.get('local-sudo') == \"true\"\n\n assert buildout_directory, u'No path found for the selected hostout'\n\n var_directory = os.path.join(buildout_directory, 'var')\n filestorage_directory = os.path.join(var_directory, 'filestorage')\n\n # Ensure filestorage\n if not os.path.exists(var_directory):\n cmd = 'mkdir -p {0:s}'.format(filestorage_directory)\n if local_sudo:\n cmd = 'sudo {0:s}'.format(cmd)\n if _output.running:\n print('[localhost] pull: {0:s}'.format(cmd))\n _local(cmd)\n\n # Pull filestorage\n _rsync(os.path.join(filestorage_directory, 'Data.fs'),\n os.path.join(filestorage_directory, 'Data.fs'),\n delete=True)\n\n # Pull blobstorage\n _rsync(os.path.join(var_directory, 'blobstorage'), var_directory,\n delete=True)\n\n # Chown var-directory\n var_directory = os.path.join(buildout_directory, 'var')\n cmd = 'chown -R {0:s} {1:s}'.format(effective_user, var_directory)\n if local_sudo:\n cmd = 'sudo {0:s}'.format(cmd)\n if _output.running:\n print('[localhost] pull: {0:s}'.format(cmd))\n _local(cmd)", "def update_source_file(self, fake=False):\r\n try:\r\n source_file = urllib2.urlopen(self.source_file_url)\r\n except:\r\n logger.error(\"Could not pull source file for resource %s (%s)\" %\r\n (self.resource.full_name, self.source_file_url))\r\n raise\r\n\r\n filename = ''\r\n if source_file.info().has_key('Content-Disposition'):\r\n # If the response has Content-Disposition, we try to take\r\n # filename from it\r\n content = source_file.info()['Content-Disposition']\r\n if 'filename' in content:\r\n filename = content.split('filename')[1]\r\n filename = filename.replace('\"', '').replace(\"'\", \"\"\r\n ).replace(\"=\", \"\").replace('/', '-').strip()\r\n\r\n if filename == '':\r\n parts = urlparse.urlsplit(self.source_file_url)\r\n #FIXME: This still might end empty\r\n filename = parts.path.split('/')[-1]\r\n\r\n try:\r\n if not self.resource.i18n_method:\r\n msg = \"No i18n method defined for resource %s\"\r\n logger.error(msg % self.resource)\r\n return\r\n parser = registry.appropriate_handler(\r\n self.resource, language=self.resource.source_language,\r\n filename=filename\r\n )\r\n language = self.resource.source_language\r\n content = source_file.read()\r\n parser.bind_content(content)\r\n parser.set_language(language)\r\n parser.bind_resource(self.resource)\r\n parser.is_content_valid()\r\n parser.parse_file(is_source=True)\r\n strings_added, strings_updated = 0, 0\r\n if not fake:\r\n strings_added, strings_updated = parser.save2db(is_source=True)\r\n except Exception,e:\r\n logger.error(\"Error importing source file for resource %s.%s (%s): %s\" %\r\n ( self.resource.project.slug, self.resource.slug,\r\n self.source_file_url, str(e)))\r\n raise\r\n finally:\r\n source_file.close()\r\n gc.collect()\r\n\r\n return strings_added, strings_updated", "def update_resource_file(\r\n self, remote_resource: GenomicResource,\r\n dest_resource: GenomicResource,\r\n filename: str) -> Optional[ResourceFileState]:", "def pull(self, file_name):\n self.ui_.pushbuffer() # Hmmm.. add param to make this optional?\n try:\n pull_bundle(self.repo, self.ui_, file_name)\n finally:\n self.ui_.popbuffer()", "def fetch_resource(url):\n if local_resource_re.match(url):\n # This is already a local resource\n return url\n local_id = hashlib.md5(url).hexdigest()\n extension = \"\"\n # Try to figure out a file extension just to make things nicer to file browsers\n try:\n filename = getparams_re.sub('', os.path.basename(url))\n extension = filename.rsplit('.', 1)[1] # get the last extension.\n except Exception,e:\n print \"Got exception %s when trying to figure out file extension for %s\" % (e, url)\n local_path = os.path.join('resources', local_id[0:2], local_id + \".\" + extension)\n # If we already have the file just return it\n if os.path.isfile(local_path):\n # Make sure the file has sane amount of data...\n if (os.stat(local_path).st_size < 16):\n print \"ERR: Local file %s is empty, removing\" % local_path\n os.unlink(local_path)\n else:\n return local_path\n # Create the container dir if it's not there\n if not os.path.isdir(os.path.dirname(local_path)):\n os.makedirs(os.path.dirname(local_path))\n if debug:\n print \"Fetching (BIN) %s to %s\" % (url, local_path)\n fp_from = None\n fp_to = None\n try:\n fp_from = urllib_cached.urlopen(url)\n fp_to = open(local_path, 'wb')\n # TODO: use a sensibly sized buffer ?\n fp_to.write(fp_from.read())\n fp_from.close()\n fp_to.close()\n except Exception,e:\n print \"Got exception %s\" % e\n if fp_from:\n fp_from.close()\n if fp_to:\n fp_to.close()\n if os.path.isfile(local_path):\n os.unlink(local_path)\n return None\n return local_path", "def _download(self):\n log(\n \"Downloading '%s' -> '%s'\" %\n (self.source_url, self.source_fn), indent=4)\n if not os.path.exists(os.path.dirname(self.source_fn)):\n os.makedirs(os.path.dirname(self.source_fn))\n utils.download(self.source_url, self.source_fn)\n utils.unpack(self.source_fn, os.path.dirname(self.source_fn))\n\n if self._needs_download():\n raise ValueError(\n \"Downloading and unpacking '%s' did not result in '%s'\"\n % (self.source_url, self.source_fn))", "def doChecksGetRsrcsFromTarget(self, args):\n\n p = self._getRealPath(args.target)\n if not os.path.exists(p):\n # For create and push commands (mutators) the passed target\n # should exist in the file system.\n raise MutatorError(\"The given path: {} does not exist\".format(p))\n\n rsrcType = args.rsrcType\n rt = ResourceFactory.resourceTypeFromString(rsrcType)\n if args.inGit:\n r = GitUtils.getExistingRepo(p)\n self.checkCleanDirInRepo(r, p)\n\n rsrcs = self.getRsrcsFromPath(rt, p, args.customerTag, args)\n\n logging.info(\"Using the resources: [{0}] from path: {1}\".format(\n \" \".join([str(r.uniqueId()) + rt.fileExtension() for r in rsrcs]), p))\n\n return rsrcs", "def unpackFile(uri, fetchTarget, sourceBaseDir, sourceSubDir, foldSubDir):\n\n\tsourceDir = sourceBaseDir + '/' + sourceSubDir \\\n\t\tif sourceSubDir else sourceBaseDir\n\tif uri.endswith('#noarchive'):\n\t\tif os.path.isdir(fetchTarget):\n\t\t\tshutil.copytree(fetchTarget, sourceDir, symlinks=True)\n\t\telse:\n\t\t\tif not os.path.isdir(sourceDir):\n\t\t\t\tos.makedirs(sourceDir)\n\t\t\tshutil.copy(fetchTarget, sourceDir)\n\telse:\n\t\tactualSubDir = sourceSubDir\n\t\tif actualSubDir:\n\t\t\tif foldSubDir:\n\t\t\t\tactualSubDir += '/' + foldSubDir\n\t\telse:\n\t\t\tactualSubDir = foldSubDir\n\t\tunpackArchive(fetchTarget, sourceBaseDir, actualSubDir)\n\t\tif foldSubDir:\n\t\t\tfoldSubdirIntoSourceDir(foldSubDir, sourceDir)", "def copy_resource_file(\r\n self,\r\n remote_resource: GenomicResource,\r\n dest_resource: GenomicResource,\r\n filename: str) -> Optional[ResourceFileState]:", "def fetch_local(path):\n tempdir = mkdtemp()\n destination = \"{}/bundle\".format(tempdir)\n copytree(path, destination)\n return destination", "def gitpull():\n with cd('%(site_dir)s/' % env):\n run('git pull origin master')", "def load_source(self):\n self.source = pyglet.media.load(self.file_name)", "def update_resource(resource: Union[str, Traversable], target_dir: Path) -> None:\n resource = _resolve_resource(resource)\n _copy_resource(resource, target_dir, copy_if=_copy_if_hash_mismatch)", "def copy_to_cache(self, ident):\n ident = unquote(ident)\n\n # get source image and write to temporary file\n (bucketname, keyname) = self.s3bucket_from_ident(ident)\n\n try:\n s3obj = self.s3.Object(bucketname, keyname)\n content_type = s3obj.content_type\n except Exception as e:\n msg = \"no content_type for s3 object ({}:{}): {}\".format(\n bucketname, keyname, e\n )\n logger.error(msg)\n raise ResolverException(msg)\n\n extension = self.cache_file_extension(ident, content_type)\n cache_dir = self.cache_dir_path(ident)\n os.makedirs(cache_dir, exist_ok=True)\n local_fp = os.path.join(cache_dir, \"loris_cache.\" + extension)\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tmp_file:\n try:\n self.s3.Bucket(bucketname).download_fileobj(keyname, tmp_file)\n except Exception as e:\n msg = \"unable to access or save s3 object ({}:{}): {}\".format(\n bucketname, keyname, e\n )\n logger.error(msg)\n raise ResolverException(msg)\n\n # Now rename the temp file to the desired file name if it still\n # doesn't exist (another process could have created it).\n #\n # Note: This is purely an optimisation; if the file springs into\n # existence between the existence check and the copy, it will be\n # overridden.\n if os.path.exists(local_fp):\n logger.info(\"Another process downloaded src image {}\".format(local_fp))\n os.remove(tmp_file.name)\n else:\n safe_rename(tmp_file.name, local_fp)\n logger.info(\"Copied {}:{} to {}\".format(bucketname, keyname, local_fp))\n\n # Check for rules file associated with image file\n # These files are < 2k in size, so fetch in one go.\n # Assumes that the rules will be next to the image\n # cache_dir is image specific, so this is easy\n bits = os.path.split(keyname) # === bash basename\n fn = bits[1].rsplit(\".\")[0] + \".\" + self.auth_rules_ext\n rules_keyname = bits[0] + \"/\" + fn\n local_rules_fp = os.path.join(cache_dir, \"loris_cache.\" + self.auth_rules_ext)\n try:\n self.s3.Object(bucketname, rules_keyname).download_file(local_rules_fp)\n except Exception as e:\n # no connection available?\n msg = \"ignoring rules file({}/{}) for ident({}): {}\".format(\n bucketname, rules_keyname, ident, e\n )\n logger.warn(msg)\n\n return local_fp", "def fetch_and_save(\n path_or_url,\n dest_dir,\n filename,\n as_text=True,\n):\n content = CACHE.get(\n path_or_url=path_or_url,\n as_text=as_text,\n )\n output = os.path.join(dest_dir, filename)\n wmode = \"w\" if as_text else \"wb\"\n with open(output, wmode) as fo:\n fo.write(content)\n return content" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given devlib module is available.
def is_module_available(self, module): if module not in _DEVLIB_AVAILABLE_MODULES: raise ValueError(f'"{module}" is not a devlib module') try: getattr(self, module) except Exception: # pylint: disable=broad-except return False else: return True
[ "def module_check():\n\tstatus = True\n\ttry:\n\t\timport fpdf\n\t\tprint '[+] Fpdf module installed.'\n\texcept ImportError as e:\n\t\tstatus = False\n\t\tif \"fpdf\" in repr(e):\n\t\t\tprint \"[-] FPDF module not installed. Run the following commands:\"\n\t\t\tprint \"[-] python -m pip install fpdf\"\n\ttry:\n\t\timport enum\n\t\tprint '[+] Enum module installed.'\n\texcept ImportError as e:\n\t\tstatus = False\n\t\tif \"enum\" in repr(e):\n\t\t\tprint \"[-] Enum module not installed. Run the following commands:\"\n\t\t\tprint \"[-] python -m pip install enum34\"\n\ttry:\n\t\timport psutil\n\t\tprint '[+] Psutil module installed'\n\texcept ImportError as e:\n\t\tstatus = False\n\t\tif \"psutil\" in repr(e):\n\t\t\tprint \"Enum module not installed. Run the following commands:\"\n\t\t\tprint \"python -m pip install psutil\"\n\treturn status", "def _check_module(self, module):\n return hasattr(module, '__phonebook_class__')", "def check_install(module, at_least_version=None, debug=False):\n try:\n module_version = __import__(module).__version__\n is_module = True\n except ImportError as e:\n is_module = False\n if is_module:\n if at_least_version is not None:\n if parse_version(at_least_version) <= parse_version(module_version):\n return True\n else:\n return False\n else:\n return True\n else:\n False", "def is_available():\n\n return importlib.util.find_spec('onnxruntime') is not None", "def has(self, module_name: str):\n found_flag = module_name in self._registry\n\n if self._third_party_lib:\n for lib in self._third_party_lib:\n if hasattr(lib, module_name):\n found_flag = True\n break\n\n return found_flag", "def drmaa_available():\n try:\n pass\n except (ImportError, RuntimeError):\n return False\n else:\n return True", "def is_dev_installed(self):\n result = False\n r = urllib2.urlopen(self.ecp_address + \"query/apps\").read()\n xml_root = ElementTree.fromstring(r)\n for app in xml_root.getiterator(\"app\"):\n if app.get(\"id\") == \"dev\":\n result = True\n break\n return result", "def is_loaded() -> bool:\n global _lib\n return _lib is not None", "def is_valid_module(module_obj):\n # Check for the existence and type of the required variables\n for var in [('NAME', 'str'), ('setup', 'function'), ('step', 'function')]:\n if var[0] not in dir(module_obj):\n return False\n if module_obj.__getattribute__(var[0]).__class__.__name__ != var[1]:\n return False\n\n # Return module as invalid if the hidden_module variable exists\n if 'hidden_module' in dir(module_obj):\n return False\n\n return True", "def _is_lib_already_installed(package: str) -> bool:\n return package in sys.modules.keys()", "def has_lib (self):\r\n return (self.distribution.has_pure_modules() or\r\n self.distribution.has_ext_modules())", "def _check_modules():\n all_available = True\n try:\n # Grab Kernel version with `uname`\n kernel_version = check_output([\"uname\", \"-r\"]).rstrip()\n\n modules_loadable_path = \"/lib/modules/%s/modules.dep\" % kernel_version\n modules_builtin_path = \"/lib/modules/%s/modules.builtin\" % kernel_version\n\n # For the modules we're expecting to look for, the mainline case is that\n # they will be loadable modules. Therefore, loadable modules are checked\n # first and builtins are checked only if needed.\n available_lines = open(modules_loadable_path).readlines()\n builtin_lines = None\n\n for module in REQUIRED_MODULES:\n module_available = check_module_lines(available_lines, module)\n if not module_available:\n # Open and check builtin modules\n if not builtin_lines:\n builtin_lines = open(modules_builtin_path).readlines()\n module_builtin = check_module_lines(builtin_lines, module)\n\n # If module is not available or builtin, issue warning\n if not module_builtin:\n print >> sys.stderr, \"WARNING: Unable to detect the %s \" \\\n \"module as available or builtin.\" % module\n all_available = False\n\n # If something goes wrong with uname or file access, try lsmod.\n except BaseException:\n try:\n modules = check_output([\"lsmod\"])\n for module in REQUIRED_MODULES:\n if module not in modules:\n print >> sys.stderr, \"WARNING: Unable to detect the %s \" \\\n \"module with lsmod.\" % module\n all_available = False\n except BaseException as e:\n print >> sys.stderr, \"ERROR: Could not check for loaded modules \\n%s\" % e\n return False\n\n return all_available", "def _has_module(self, path, module_name):\n\n return os.path.isfile(os.path.join(path, '{module}.py'.format(module=module_name)))", "def has_module(self, name: str) -> bool:\n return name in self.module_files", "def check_device(pydbg_device_url):\n from csr.front_end.pydbg_front_end import PydbgFrontEnd\n from csr.transport.tctrans import TcError\n try:\n device, _ = PydbgFrontEnd.attach({\"device_url\": pydbg_device_url}, interactive=False)\n except TcError:\n print(\"Connection failed\")\n return False\n\n print(\"Connected to device\")\n print(\"Checking if device is readable...\")\n try:\n device_is_readable = device.chip.curator_subsystem.core.data[0x8000]\n return True\n except RuntimeError:\n print(\"Device not readable\")\n return False", "def _is_system_installed( self ):\n return self._system.test_library(self._library, self._headers)", "def isAvailable(cls, raise_exception=False):\n \n libc_name = \"libc.so.6\"\n func_name = \"\"\n try:\n if sys.platform.startswith('linux'):\n cls.libc = CDLL(libc_name)\n func_name = \"prctl\"\n cls.prctl = cls.libc.prctl\n func_name = \"syscall\"\n cls.syscall = cls.libc.syscall\n return True\n else:\n return False\n except OSError as e:\n if raise_exception:\n raise LibraryNotFoundError(libc_name, *e.args)\n return False\n except AttributeError as e:\n if raise_exception:\n raise FuncNotFoundError(libc_name, func_name, *e.args)\n return False", "def has_binding(api):\n # we can't import an incomplete pyside and pyqt4\n # this will cause a crash in sip (#1431)\n # check for complete presence before importing\n module_name = {QT_API_PYSIDE: 'PySide',\n QT_API_PYQT: 'PyQt4',\n QT_API_PYQTv1: 'PyQt4',\n QT_API_PYQT_DEFAULT: 'PyQt4',\n QT_API_PYQT5: 'PyQt5',\n QT_API_PYQT6: 'PyQt6',\n }\n module_name = module_name[api]\n\n import imp\n try:\n #importing top level PyQt4/PySide module is ok...\n mod = __import__(module_name)\n #...importing submodules is not\n imp.find_module('QtCore', mod.__path__)\n imp.find_module('QtGui', mod.__path__)\n imp.find_module('QtSvg', mod.__path__)\n\n #we can also safely check PySide version\n if api == QT_API_PYSIDE:\n return check_version(mod.__version__, '1.0.3')\n else:\n return True\n except ImportError:\n return False", "def is_installed():\n modules_dir = Powershell.get_modules_dir()\n\n if not os.path.exists(modules_dir):\n return False\n\n modules_in_powershell_dir = os.listdir(modules_dir)\n\n for module in __get_powercli_modules_names(ZipFile(consts.POWERCLI_ZIP_PATH)):\n if module not in modules_in_powershell_dir:\n return False\n \n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List our attributes plus the ones from the underlying target, and the devlib modules that could be loaded ondemand.
def __dir__(self): attrs = set(super().__dir__()) | set(dir(self.target)) | self._devlib_loadable_modules return sorted(attrs)
[ "def build_attributes(self):\n pass", "def print_attribute_list(self):\n p = prettytable.PrettyTable((\"VISA name\", \"Constant\", \"Python name\", \"val\"))\n for attr in getattr(self.current, \"visa_attributes_classes\", ()):\n try:\n val = self.current.get_visa_attribute(attr.attribute_id)\n except VisaIOError as e:\n val = e.abbreviation\n except Exception as e:\n val = str(e)\n if len(val) > 10:\n val = val[:10] + \"...\"\n p.add_row((attr.visa_name, attr.attribute_id, attr.py_name, val))\n\n print(p.get_string(sortby=\"VISA name\"))", "def Attributes(self) -> _n_5_t_17:", "def device_state_attributes(self):", "def getAttrs(self):\n\t\treturn self._attributes", "def listAttributes(object):\n prettyPrint(object.__dict__.keys())", "def get_stack_attributes(self, environment):\n stack_attribute_list, stack_attribute_dict = [], {}\n for attribute, details in self.module_config_data['stack_attributes'].iteritems():\n stack_attribute_list.append((details['display_name'], details['editable']))\n stack_attribute_dict[details['display_name']] = details\n return(stack_attribute_list, stack_attribute_dict)", "def get_attrs(self):\n return self.ms.get_attrs()", "def do_list_modules(self, line):\n print(\"Loaded modules: \")\n for m in self.modules:\n print(\" - %s: %s [%s] \" % (self.colorize(self.modules[m].name, \"blue\"), self.modules[m].short_description,\n \",\".join([str(x) for x in self.modules[m].get_available_devices()])))", "def supportedAttrs(self, path=None):\n if not path: path = self.homedir\n d = self.do_getattrdict(path, [FATTR4_SUPPORTED_ATTRS])\n return d[FATTR4_SUPPORTED_ATTRS]", "def listattrs(cls: object, verbose:bool=False):\n for attr in dir(cls):\n if isinstance(getattr(cls, attr), (bytes, str, tuple, list, dict)):\n if not verbose and (attr.startswith('__') and attr.endswith('__')):\n continue\n echo(cls=cls, attribute=attr, list_delimiter=', ')", "def _get_Buffer_andAttributeList(self):\n try:\n return ReadIM.get_Buffer_andAttributeList(self.loadfile)\n except IOError:\n raise IOError(\n \"Problem loading file: {0}.Message: {1}\".format(self.loadfile, sys.exc_info()[1])\n )", "def listAttributes(self):\n return list(self._attributes.keys())", "def createAttrDicts():\n ret = {}\n # lfw v1.1\n ret['lfw_v1.1'] = d = {}\n fields = getmodelfields('lfw_v1.1')\n for l in open('attrnames.txt'):\n num, name = l.strip().split('\\t', 1)\n if name not in fields: continue\n d[num] = d[int(num)] = d[name] = name\n return ret", "def __dir__(self):\n d = object.__dir__(self)\n d.extend(self._readonlyattrs)\n d.extend(self._attrmap.keys())\n if self.request:\n d.extend(self._after_request)\n return d", "def _findVersions(self):\n self.descriptorVersions = []\n for nm in self.simpleList:\n vers = 'N/A'\n if hasattr(DescriptorsMod, nm):\n fn = getattr(DescriptorsMod, nm)\n if hasattr(fn, 'version'):\n vers = fn.version\n self.descriptorVersions.append(vers)", "def test_all_modules_reported(self, capsys):\r\n pydicom.env_info.main()\r\n\r\n out, err = capsys.readouterr()\r\n lines = out.split(\"\\n\")\r\n modules = [line.split(\"|\")[0].strip() for line in lines[2:] if line]\r\n\r\n assert modules == [\r\n \"platform\",\r\n \"Python\",\r\n \"pydicom\",\r\n \"gdcm\",\r\n \"jpeg_ls\",\r\n \"numpy\",\r\n \"PIL\",\r\n ]", "def _RuntimeProperties(self):\n ret = []\n if FLAGS.boost_dex2oat:\n ret.append(Properties(name='dalvik.vm.dex2oat-filter',\n value='interpret-only'))\n return ret", "def _get_available_fields(self):\n return self.walkdir(PATH_DEBUGFS_KVM)[2]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Install tools additional to those specified in the test config 'tools' field
def install_tools(self, tools): def bin_path(tool): binary = os.path.join(ASSETS_PATH, 'binaries', self.abi, tool) if not os.path.isfile(binary): binary = os.path.join(ASSETS_PATH, 'binaries', 'scripts', tool) return binary tools = sorted(set(tools) - self._installed_tools) # TODO: compute the checksum of the tool + install location and keep # that in _installed_tools, so we are sure to be correct for tool in tools: self.target.install(bin_path(tool)) self._installed_tools.add(tool)
[ "def tools(c):\n for tool in TOOLS:\n if not which(tool):\n c.run(f\"{VENV_BIN}/python -m pip install {tool}\", pty=PTY)", "def install_all():\n wf_list = list(set().union(verify_list(), verify_directory()))\n wf_list.sort()\n\n tools = {}\n for wflow in wf_list:\n WFC.import_workflow_from_local_path(wflow, True)\n if ARGS.t:\n wf_d = dict_from_file(wflow)\n if \"steps\" in wf_d.keys():\n tool_d = wf_d[\"steps\"]\n tools = process_tool_dict(tool_d, tools)\n\n if ARGS.t:\n\t#install tools\n dtime = datetime.datetime.now()\n tmp_file = \"/tmp/gtools_\"+str(dtime.microsecond)+\".yml\"\n with open(tmp_file, \"w+\") as raw:\n raw.write(tool_to_yaml(tools, \"G-OnRamp Tools\"))\n env = \"/usr/bin/env\"\n cmd = \"/tmp/gonramp/install_tool_yml.py\"\n call([\"pwd\"])\n t_args = [\"-a\", ARGS.a, \"-k\", ARGS.k, \"-t\", tmp_file]\n call([env, \"python\", cmd] + t_args)\n call([env, \"rm\", \"-f\", tmp_file])\n\n ta_file = \"/tmp/gonramp/tool_addenda.yml\"\n if isfile(ta_file):\n ta_args = [\"-a\", ARGS.a, \"-k\", ARGS.k, \"-t\", ta_file]\n call([env, \"python\", cmd] + ta_args)\n call([env, \"rm\", \"-f\", ta_file])", "def setup():\n if toolbox is None:\n return\n # Push all the toolbox tests to module level\n G = globals()\n for i, section in enumerate( toolbox.sections ):\n for j, tool in enumerate( section.tools ):\n if tool.tests:\n for k, testdef in enumerate( tool.tests ):\n name = \"%s > %s > %s\" % ( section.name, tool.name, testdef.name )\n testcase = get_testcase( testdef, name )\n G[ 'testcase_%d_%d_%d' % ( i, j, k ) ] = testcase", "def install_tools():\n commands = \"\"\"\nyum -y update\nyum -y install kernel-devel\nyum -y groupinstall \"KDE desktop\" \"X Window System\" \"Fonts\"\nyum -y groupinstall \"Server with GUI\"\n \"\"\".strip().split('\\n')\n run_commands(commands)\n reboot()", "def upgrade_tools(self, execution_type=None,\n installer_options=None, **kwargs):\n pass", "def test_torsiondrive_tools():\n import torsiondrive.tools", "def test_multi_tool(self):\n tools = set()\n while len(tools) < 3:\n tools.add(random.choice(tool_list))\n tools = list(tools)\n # normalise\n if 'all' in tools:\n tools = ['all']\n args = parse_args(\"tests {}\".format(' '.join(tools)), use_shlex=True)\n self.assertCountEqual(args.tool, tools)", "def _initExtra(self):\n if not self._extratools:\n if not self.exists(\"ExtraTools.py\"):\n url = \"https://raw.githubusercontent.com/Jumpscale/jumpscale_core/master/install/ExtraTools.py\"\n self.download(url, \"/tmp/ExtraTools.py\")\n if \"/tmp\" not in sys.path:\n sys.path.append(\"/tmp\")\n from ExtraTools import extra\n self.extra = extra\n self._extratools = True", "def test_install_helper_brew(self):\n self.brew_install_test('cdrtools')", "def setup_release_tools(context):\n context.run('npm install -g semantic-release@\"^17.0.4\"')\n context.run('npm install -g @semantic-release/changelog@\"^5.0.1\"')\n context.run('npm install -g @semantic-release/exec@\"^5.0.0\"')\n context.run('npm install -g @semantic-release/git@\"^9.0.0\"')\n context.run('npm install -g @semantic-release/github@\"^7.0.5\"')", "def install_additional_packages():\n if INSTALLER == 'APT':\n os.system('apt-get -y update && apt-get -y install wget apache2 git \\\n && service apache2 start')\n elif INSTALLER == 'YUM':\n os.system('yum update -y && yum install -y wget httpd git \\\n && service httpd start')\n elif INSTALLER == 'ZYPPER':\n os.system('zypper update -y && zypper install -y wget httpd git \\\n && service apache2 start')", "def update_tool_configs(self) -> None:\n tools = reduce(lambda a, b: a + b, list(self.tool_configs.values()))\n self.database.update_tools(tools)", "def prepare_install_modulemd_tools(log, host):\n command = 'dnf copr enable frostyx/modulemd-tools-epel -y'\n retval = host.sh_run(log, command)\n if retval.cr_exit_status:\n log.cl_error(\"failed to run command [%s] on host [%s], \"\n \"ret = [%d], stdout = [%s], stderr = [%s]\",\n command,\n host.sh_hostname,\n retval.cr_exit_status,\n retval.cr_stdout,\n retval.cr_stderr)\n return None\n\n return [\"modulemd-tools\"]", "def _check_tools(self, args: Namespace) -> None:\n self._dow_minion.check_tool(tool=args.dow_tool[0] if args.dow_tool else 'aria2c')\n self._alc_minion.check_tool(conversion_tool=args.cov_tool[0] if args.cov_tool else 'ffmpeg',\n concatenation_tool=args.cat_tool[0] if args.cat_tool else 'cat')\n if self._encrypted:\n self._dec_minion.check_tool(tool=args.dec_method[0] if args.dec_tool else 'openssl')", "def post_setup(self, context):\n os.environ[\"VIRTUAL_ENV\"] = context.env_dir\n # if not self.nodist:\n # self.install_setuptools(context)\n # Can't install pip without setuptools\n if not self.nopip and not self.nodist:\n self.install_pip(context)", "def _add_installation_directories_to_tool_dependencies(self, tool_dependencies):\n for dependency_key, requirements_dict in tool_dependencies.items():\n if dependency_key in [\"set_environment\"]:\n continue\n repository_name = requirements_dict.get(\"repository_name\", UNKNOWN)\n repository_owner = requirements_dict.get(\"repository_owner\", UNKNOWN)\n changeset_revision = requirements_dict.get(\"changeset_revision\", UNKNOWN)\n dependency_name = requirements_dict[\"name\"]\n version = requirements_dict[\"version\"]\n if self.app.tool_dependency_dir:\n root_dir = self.app.tool_dependency_dir\n else:\n root_dir = \"<set your tool_dependency_dir in your Galaxy configuration file>\"\n install_dir = os.path.join(\n root_dir, dependency_name, version, repository_owner, repository_name, changeset_revision\n )\n requirements_dict[\"install_dir\"] = install_dir\n tool_dependencies[dependency_key] = requirements_dict\n return tool_dependencies", "def setup_build_tests(self):\n self.cache_extra_test_sources([self.examples_src_dir])", "def get_tool_install(tool_dirname: str) -> Dict[str, Any]:\n return load_yaml(os.path.join(PHP_TOOL_PATH, tool_dirname, \"install.yml\"))", "def task_develop_install():\n return {'actions': [\n CmdAction(_conda_build_deps),\n CmdAction(_conda_install_with_options_hacked),\n #CmdAction(_build_dev), # Switch to locally built version at later point\n #\"conda install --use-local panel\"\n \"conda uninstall panel --force\",\n \"python setup.py develop --no-deps\"],\n 'params': [_options_param,_channel_param]}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Context manager that lets you freeze the userspace.
def freeze_userspace(self): logger = self.logger if not self.is_rooted: logger.warning('Could not freeze userspace: target is not rooted') cm = nullcontext elif not self.is_module_available('cgroups'): logger.warning('Could not freeze userspace: "cgroups" devlib module is necessary') cm = nullcontext else: controllers = [s.name for s in self.cgroups.list_subsystems()] if 'freezer' not in controllers: logger.warning('Could not freeze userspace: freezer cgroup controller not available on the target') cm = nullcontext else: exclude = copy.copy(self.CRITICAL_TASKS[self.target.os]) # Do not freeze the process in charge of de-freezing, otherwise we # will freeze to death and a machine hard reboot will be required if isinstance(self.target, devlib.LocalLinuxTarget): exclude.append(str(os.getpid())) @destroyablecontextmanager def cm(): logger.info(f"Freezing all tasks except: {','.join(exclude)}") try: yield self.cgroups.freeze(exclude) except ContextManagerExit: logger.info('Un-freezing userspace tasks') self.cgroups.freeze(thaw=True) return cm()
[ "def softModCtx(*args, **kwargs):\n\n pass", "def softModContext(*args, **kwargs):\n\n pass", "def nonsecure_lock(self) -> _MemAttrContext:\n return self.hnonsec_lock(NONSECURE)", "def __enter__(self) -> Context:\n return self", "def jvm_context_manager(parent_task, current_task):\n try:\n set_jvm_context(current_task)\n yield\n finally:\n set_jvm_context(parent_task)", "def SetupContextCache(self):\n ctx = tasklets.get_context()\n ctx.set_cache_policy(False)\n ctx.set_memcache_policy(False)", "def set_context(self):\n # self.ctx = [gpu(int(i)) for i in self.gpus.split(',') if i.strip()][0]\n # self.ctx = self.ctx if self.ctx else [cpu()]\n self.ctx = gpu(1)", "def ContextProfiler():\n import yappi\n\n print('[YAPPI START]')\n yappi.set_clock_type('wall')\n yappi.start()\n\n try:\n yield None\n finally:\n finish_yappi()", "def keep_system_active():\n if os.name == 'nt':\n windll.kernel32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)", "def contextmanager(func):\n func.__returns_contextmanager__ = True\n return func", "def jp_zmq_context():\n import zmq\n\n ctx = zmq.asyncio.Context()\n yield ctx\n ctx.term()", "def act(self):\n g = get_root(self).globals\n g.ipars.unfreeze()\n g.rpars.unfreeze()\n g.observe.load.enable()\n self.disable()", "def __preexec_fn(self):\n if self.pty is not None:\n self.__pty_make_controlling_tty(self.pty)\n\n if not self.aslr:\n try:\n if context.os == 'linux' and self._setuid is not True:\n ADDR_NO_RANDOMIZE = 0x0040000\n ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)\n\n resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))\n except Exception:\n self.exception(\"Could not disable ASLR\")\n\n # Assume that the user would prefer to have core dumps.\n try:\n resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))\n except Exception:\n pass\n\n # Given that we want a core file, assume that we want the whole thing.\n try:\n with open('/proc/self/coredump_filter', 'w') as f:\n f.write('0xff')\n except Exception:\n pass\n\n if self._setuid is False:\n try:\n PR_SET_NO_NEW_PRIVS = 38\n ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)\n except Exception:\n pass\n\n # Avoid issues with attaching to processes when yama-ptrace is set\n try:\n PR_SET_PTRACER = 0x59616d61\n PR_SET_PTRACER_ANY = -1\n ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)\n except Exception:\n pass\n\n\n if self.alarm is not None:\n signal.alarm(self.alarm)\n\n self.preexec_fn()", "def currentCtx():\n pass", "def greasePencilCtx():\n pass", "def deactivate(self, context):\n pass", "def freeze(self):\n self.app.disable()\n self.clear.disable()\n self.nod.disable()\n self.led.disable()\n self.dummy.disable()\n self.readSpeed.disable()\n self.expose.disable()\n self.number.disable()\n self.wframe.disable(everything=True)\n self.nmult.disable()\n self.frozen = True", "def suspend():\n\n def decorator(f):\n def wrapper(*args, **kwargs):\n with SuspendUndo():\n return f(*args, **kwargs)\n return wrapper\n return decorator", "def global_context(ctx):\n global compile_ctx\n old_ctx = compile_ctx\n compile_ctx = ctx\n yield\n compile_ctx = old_ctx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Context manager that lets you disable all idle states
def disable_idle_states(self): logger = self.logger logger.info('Disabling idle states for all domains') try: cpuidle = self.cpuidle except AttributeError: logger.warning('Could not disable idle states, cpuidle devlib module is not loaded') cm = nullcontext else: @destroyablecontextmanager def cm(): try: for cpu in range(self.plat_info['cpus-count']): cpuidle.disable_all(cpu) yield except ContextManagerExit: logger.info('Re-enabling idle states for all domains') for cpu in range(self.plat_info['cpus-count']): cpuidle.enable_all(cpu) return cm()
[ "def deactivate(self, context):\n context.deactivate()", "def deactivate(self, context):\n pass", "def idle(self):\n self._change_state(\"idle\")", "def noop_context():\n yield", "def to_idle(self):\r\n\r\n\t\tself.__send_extended_byte_array(self.MODE_IDLE, [])", "def keep_system_active():\n if os.name == 'nt':\n windll.kernel32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)", "def in_idle():\n ...", "def reset_context(self):\n self.current.clear()", "def idle_check():\n pass", "def disable():\n global ideep_enabled\n old = ideep_enabled\n ideep_enabled = False\n try:\n yield\n finally:\n ideep_enabled = old", "def disable_contexts(self, n=None):\n if n is None:\n n = len(self._contexts)\n self._active_ctx.remove_contexts(n)\n self._build_cache()", "def on_idle(self, *args):", "def unlockMouse(self):\n pass", "def detach_context():\n libca.ca_detach_context()", "def DisableOnWinEnter(self):\n class C(object):\n def __enter__(c):\n self._disable_on_winenter = True\n return c\n\n def __exit__(c, type, value, traceback):\n self._disable_on_winenter = False\n\n return C()", "def is_idle(self):\n pass", "def idle(self):\r\n self.schedule = []\r\n self.blockList = []\r\n print(\"System is now entering sleep mode\")", "def __exit__(self, exc_type, exc_val, exc_tb):\n GlobalOGLContextStack.pop_current() # removing self\n # return binding\n last = GlobalOGLContextStack.get_current()\n if last and last is self:\n pass\n else:\n if last:\n with last.manager.glfw as glfw_window:\n glfw.make_context_current(glfw_window)\n else:\n glfw.make_context_current(None)\n ContextCounter.checkout(self)", "def release_context(self, context):\n if self._active_context is context:\n self._active_context = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorates a given function to execute remotely using
def remote_func(self, **kwargs): def wrapper_param(f): @functools.wraps(f) def wrapper(*f_args, **f_kwargs): return self.execute_python(f, f_args, f_kwargs, **kwargs) return wrapper return wrapper_param
[ "def wrap_with_server(f, server):\n if not has_ls_param_or_annotation(f, type(server)):\n return f\n\n if asyncio.iscoroutinefunction(f):\n\n async def wrapped(*args, **kwargs):\n return await f(server, *args, **kwargs)\n\n else:\n wrapped = functools.partial(f, server)\n if is_thread_function(f):\n assign_thread_attr(wrapped)\n\n return wrapped", "def remote_eval(func, *args, **kwargs):\n return eval(remote(func, *args, **kwargs))", "def __call__(self, fn):\n # We record the function name for that command\n self.fn = fn.__name__\n # And we decorate the function\n def decorated(cls, player, arg):\n m = self.regex.match(arg)\n if m:\n # if arguments match, we execute the command\n return fn(cls, player, m)\n else:\n # orelse we print a short usage\n if self.onfail is not None:\n return getattr(cls, self.onfail)(player, arg)\n else:\n return info(player,\n \"<b>Usage:</b> <code>{} <i>{}</i></code>.\"\n .format(self.name, self.usage)\n )\n return decorated", "def Wrap( self, fn, wrapFn ):\n def Wrapped( *args ):\n return wrapFn( *fn( *args ) )\n return Wrapped", "def passthrough_decorator(f):\n return f", "def ApplyToResult( func ):\n\n @simple_decorator\n def wrap( f ):\n def new_function(*args, **kw):\n return func( f( *args, **kw ) )\n return new_function\n \n return wrap", "def _run_with_cli(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return HPE3ParFilePersonaClient._build_command_and_run_with_ssh(\n func.__name__, *args, **kwargs)\n return wrapper", "def wrapper(entity, *args, **kwargs):\n if entity.is_coordinator:\n return funct(entity, *args, **kwargs)\n return funct(entity.coordinator, *args, **kwargs)", "def decorator(function):\n try:\n self.__save_endpoint(\n symmetric.endpoints.Endpoint(\n route, methods, response_code, function, auth_token\n )\n )\n except symmetric.errors.DuplicatedRouteError as err:\n self.__app.logger.error(\n f\"[[symmetric]] DuplicatedRouteError: {err}\"\n )\n sys.exit(1)\n\n # Decorate the wrapper\n @self.__app.route(\n route, methods=methods, endpoint=function.__name__\n )\n def wrapper(*args, **kwargs):\n \"\"\"\n Function wrapper. The main function gets logged, the JSON body\n gets extracted from the request and gets unpacked as **kwargs\n to pass to the main function. Some precautions are also taken\n (namely a try/except combo). Returns the function's output\n jsonified with a response code.\n \"\"\"\n try:\n self.__log_request(flask.request, route, function)\n\n # Get the body\n body = flask.request.get_json()\n if not body:\n body = {}\n\n # Check for token authentication\n symmetric.helpers.authenticate(\n body, auth_token, self.__client_token_name,\n self.__server_token_name)\n\n # Filter method parameters\n parameters = symmetric.helpers.filter_params(\n function, body, auth_token, self.__client_token_name)\n return flask.jsonify(function(**parameters)), response_code\n except symmetric.errors.AuthenticationRequiredError as err:\n # Error authenticating\n self.__app.logger.error(\n f\"[[symmetric]] exception caught: {err}\"\n )\n return flask.jsonify({}), 401\n except Exception as err:\n self.__app.logger.error(\n f\"[[symmetric]] exception caught: {err}\"\n )\n return flask.jsonify({}), 500\n return wrapper", "def dummy_wrap(self, *args, **kwargs):\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)", "def __call__(self, func):\n # set logger if it was not set earlier\n if not self.logger:\n formatter = logging.Formatter('%(asctime)s %(levelno)s %(name)s @ %(message)s')\n self.logger = logging.getLogger(func.__module__)\n self.logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setFormatter(formatter)\n console.setLevel(logging.INFO)\n self.logger.addHandler(console)\n\n @functools.wraps(func)\n def wrapper(*args, **kwds):\n self.logger.info(self.ENTRY_MESSAGE.format(func.__name__))\n f_result = func(*args, **kwds)\n self.logger.info(self.RETURNS_MESSAGES.format(pformat(f_result)))\n self.logger.info(self.EXIT_MESSAGE.format(func.__name__))\n return f_result\n return wrapper", "def _run(func, *args, **kwargs):\n enable()\n func(*args, **kwargs)", "def wrapper(func):\n\tdef inner_func(*args, **kwargs):\n\t\t\"\"\"\n\t\tA real inner function to run parammter function.\n\t\t:param args: default args\n\t\t:param kwargs: default more args\n\t\t:return: None\n\t\t\"\"\"\n\t\tprint \"Entering function \"\n\t\tfunc(*args, **kwargs)\n\t\tprint \"Exiting function\"\n\n\treturn inner_func", "def __call__(self, f):\n if self.fn is None:\n # Store the decorator to decorate so that we can apply\n self.fn = f\n functools.update_wrapper(self, f)\n return self\n decorated = self.fn(f)\n return self.decorator(bind(*self.args, **self.kwargs)(decorated))", "def func_wrapper():\n real_adapter_send = HTTPAdapter.send\n func()\n HTTPAdapter.send = real_adapter_send", "def rpc_method(func):\r\n setattr(func, 'rpc_method', True)\r\n\r\n return func", "def add_fake_remote(func):\n def wrapper(*args, **kwargs):\n instance = args[0]\n output = instance.conan(['remote', 'list'])\n if 'fake' not in output:\n instance.conan(['remote', 'add', 'fake', 'https://api.bintray.com/conan/foobar/conan'])\n func(*args, **kwargs)\n return wrapper", "def auto(func, filename, *args, **kwargs):\n if is_remote_path(filename):\n return remote_eval(func, filename, *args, **kwargs)\n return func(filename, *args, **kwargs)", "def background_func(func):\n # Putting this in the decorator function to avoid circular imports\n from shrapnel.classtools import BackgroundFunction\n class FuncClass(BackgroundFunction):\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n super(FuncClass, self).__init__()\n\n def execute(self):\n return func(*self.args, **self.kwargs)\n\n @wraps(func)\n def decorated(*args, **kwargs):\n return FuncClass(*args, **kwargs)\n\n return decorated" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Kind of a broadcast version of `torch.gather` function Currently this support for inputs `x` with 3 dimensions and `indices` with 2 dimensions.
def gather_row(x, indices): assert ( len(x.size()) == 3 and len(indices.size()) == 2 ), "not supported input tensor shape" batch_size, sequence_size, hidden_size = x.size() indices += torch.arange(0, batch_size * sequence_size, sequence_size).to(x.device)[ :, None ] out = x.view((batch_size * sequence_size, hidden_size)) out = out.index_select(0, indices.flatten()) out = out.reshape(indices.size() + (hidden_size,)) return out
[ "def gather(data, axis, indices):\n return cpp.gather(data, axis, indices)", "def batch_gather(tensor, indices):\n shape = list(tensor.shape)\n flat_first = tf.reshape(tensor, [shape[0] * shape[1]] + shape[2:])\n indices = tf.convert_to_tensor(indices)\n offset_shape = [shape[0]] + [1] * (indices.shape.ndims - 1)\n offset = tf.reshape(tf.range(shape[0]) * shape[1], offset_shape)\n output = tf.gather(flat_first, indices + offset)\n return output", "def gather(reference, indices):\n\treturn tf.gather(reference, indices)", "def _gather_for_multidim_indexing(args: GatherArgs):\n # Guess the axis.\n axis = args.dnums.collapsed_slice_dims[0]\n squeezed_indices = tf.squeeze(args.start_indices, -1)\n op_shape = jax2tf._eval_shape(args.op_shape)\n start_indices = _clip((op_shape[axis],), squeezed_indices, (1,))\n return tf.gather(args.operand, start_indices, axis=axis, batch_dims=0)", "def _gather_rowwise(self, values, indices):\n\n result = np.zeros(indices.shape, dtype=values.dtype)\n\n for row in range(len(indices)):\n result[row,:] = values[row, indices[row]]\n\n return result", "def select_indices(tensor, indices):\n return tensor.gather(1, indices.unsqueeze(1)).squeeze()", "def _gather_for_scalar_indexing(args: GatherArgs):\n indices = tf.expand_dims(args.dnums.start_index_map, 1)\n # lax.gather uses an \"index map\" which maps `start_indices` to the right axes\n # in `operand`. Since tf.strided_slice uses a single array for specifying the\n # start indices, we use a scatter to map the start indices to the right axes.\n op_shape = jax2tf._eval_shape(args.op_shape)\n slice_sizes_tf = jax2tf._eval_shape(args.slice_sizes)\n # TODO(marcvanzee): Consider transposing `operand`, which is probably more\n # optimization friendly.\n begin = tf.scatter_nd(indices, args.start_indices, [len(op_shape)])\n begin = _clip(op_shape, begin, slice_sizes_tf)\n end = slice_sizes_tf + begin\n\n # `collapsed_slice_dims` is a tuple of dimensions to collapse, e.g. (0, 2).\n # `tf.strided_slice` expects a binary mask to specify the shrink axes, i.e.,\n # if we want to shrink axis 0 and 2, this corresponds to binary mask 101,\n # which is 5 in decimals. The following line converts the lax representation\n # to the one used by `tf.strided_slice`.\n shrink_mask = sum(2**x for x in args.dnums.collapsed_slice_dims)\n res = tf.strided_slice(args.operand, begin, end, shrink_axis_mask=shrink_mask)\n # Shape inference doesn't work for tf.strided_slice.\n res.set_shape(jax2tf._aval_to_tf_shape(args.out_aval))\n return res", "def concat_all_gather(x: torch.Tensor) -> torch.Tensor:\n output = [torch.empty_like(x) for _ in range(dist.get_world_size())]\n dist.all_gather(output, x, async_op=False)\n output = torch.cat(output, dim=0)\n return output", "def _index(t, index):\n if not isinstance(index, (tuple, list)):\n index = list(index)\n for i in index:\n t = tf.gather(t, i)\n return t", "def _index(t, index):\n if not isinstance(index, (tuple, list)):\n index = list(index)\n for i in index:\n t = tf.gather(t, i)\n #t = tf.gather(t, index[0])\n return t", "def _apply_index(data, indices, axis=0):\n ndim = data.type.ndim\n shape = data.shape\n if indices.type.ndim < ndim - 1:\n indices = tensor.shape_padright(indices,\n n_ones=ndim-indices.type.ndim-1)\n return data[tuple(indices if a == axis else\n _axis_count(shape, a, ndim - 1) if a < axis else\n _axis_count(shape, a - 1, ndim - 1)\n for a in range(ndim))]", "def test_gather_nd(self):\n self.assert_tensor_equal(\n sample_tensor(),\n tf.gather_nd(sample_tensor(), index_tensor(sample_tensor().shape))\n )", "def _apply_perm(data, indices, axis=0):\n ndim = data.type.ndim\n shape = data.shape\n if indices.type.ndim < ndim:\n indices = tensor.shape_padright(indices,\n n_ones=ndim-indices.type.ndim)\n return data[tuple(indices if a == axis else\n _axis_count(shape, a, ndim) for a in range(ndim))]", "def helper_sf_gather(sf_tensor, indeces):\n sizes = list(sf_tensor.size()) # list: [batch, *, |A|, d]\n sizes[-2] = 1 # list: [batch, *, 1, d]\n\n idxs = indeces.clone().unsqueeze(-1) # (batch_n, 1, 1)\n while len(sizes) > len(idxs.size()):\n idxs = idxs.unsqueeze(-2) # (batch_n, *(1), 1, 1)\n idxs = idxs.expand(sizes) # (batch_n, *, 1, d) # TODO check if this works with dim > 3\n\n # gathering. For dim == -2\n # out[i]...[j][k] = input[i]...[idxs[i]...[j][k]][k]\n sf_a_tensor = sf_tensor.gather(-2, idxs) # (batch_n, *, 1, d)\n return sf_a_tensor.squeeze(-2) # (batch_n, *, d)", "def take_along_cols(X, indices):\n return jnp.take_along_axis(X, indices, axis=0)", "def all_gather(value, dim=0):\n return AllGather.apply(value, dim)", "def take_along_rows(X, indices):\n return jnp.take_along_axis(X, indices, axis=1)", "def map_idx(tensor_unfl, idx_fl, gpu):\n #row_number of unflattened tensor is index of flattened tensor // amount of columns of unflattened tensor\n #col_number of unflattened tensor is index of flattened tensor % amount of columns of unflattened tensor\n n_cols = tensor_unfl.size()[-1]\n row_idx_unfl = idx_fl // n_cols\n col_idx_unfl = idx_fl % n_cols\n result = torch.tensor([row_idx_unfl, col_idx_unfl])\n if gpu:\n if torch.cuda.is_available():\n result = result.to('cuda')\n return result", "def csr_gather_np(indptr, indices, dense, shape):\n import scipy.sparse\n sparse_data = scipy.sparse.csr_matrix((np.zeros(indices.shape), indices, indptr), shape[:2])\n coo = sparse_data.tocoo()\n coo_idx = np.stack((coo.row, coo.col))\n expect = dense[tuple(coo_idx.tolist())]\n return expect" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`torch.nn.functional.gumbel_softmax(vector)` does not work if some elements of `vector` should be masked. This performs a gumbel_softmax on just the nonmasked portions of `vector`. Passing `None` in for the mask is also acceptable; you'll just get a regular gumbel softmax. `vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask, do it yourself before passing the mask into this function.
def masked_gumbel_softmax( vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1, tau: float = 1, ) -> torch.Tensor: if mask is None: result = torch.nn.functional.gumbel_softmax(vector, dim=dim, tau=tau) else: while mask.dim() < vector.dim(): mask = mask.unsqueeze(1) result = torch.nn.functional.gumbel_softmax(vector * mask, dim=dim, tau=tau) result = result * mask result = result / ( result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype) ) return result
[ "def masked_softmax(\n vector: torch.Tensor,\n mask: torch.BoolTensor,\n dim: int = -1,\n memory_efficient: bool = False,\n) -> torch.Tensor:\n if mask is None:\n result = torch.nn.functional.softmax(vector, dim=dim)\n else:\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n if not memory_efficient:\n # To limit numerical errors from large vector elements outside the mask, we zero these out.\n result = torch.nn.functional.softmax(vector * mask, dim=dim)\n result = result * mask\n result = result / (\n result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype)\n )\n else:\n masked_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))\n result = torch.nn.functional.softmax(masked_vector, dim=dim)\n return result", "def memory_effient_masked_softmax(vector: torch.Tensor, mask: torch.Tensor,\n dim: int = -1, mask_value=-1e7) -> torch.Tensor:\n if mask is None:\n result = torch.nn.functional.softmax(vector, dim=dim)\n else:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # To limit numerical errors from large vector elements outside the mask, we zero these out.\n result = torch.nn.functional.softmax(vector + (1 - mask) * mask_value, dim=dim)\n return result", "def masked_log_softmax(vector, mask, dim=-1):\n if mask is not None:\n mask = mask.float()\n while mask.dim() < vector.dim():\n mask = mask.unsqueeze(1)\n # vector + mask.log() is an easy way to zero out masked elements in logspace, but it\n # results in nans when the whole vector is masked. We need a very small value instead of a\n # zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely\n # just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it\n # becomes 0 - this is just the smallest value we can actually use.\n vector = vector + (mask + 1e-45).log()\n return torch.nn.functional.log_softmax(vector, dim=dim)", "def masked_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):\n assert data is not None and mask is not None, \"Missing input data and mask\"\n return _api_internal.masked_softmax(data, mask, axis, temperature, normalize)", "def masked_softmax(logits, mask, axis=-1):\n exp_mask = (1 - tf.cast(mask, 'float')) * (-1e30) # -large where there's padding, 0 elsewhere\n masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large\n prob_dist = tf.nn.softmax(masked_logits, axis)\n return masked_logits, prob_dist", "def masked_softmax(self, x, mask=None):\n if mask is not None:\n mask = mask.float()\n if mask is not None:\n x_masked = x * mask + (1 - 1 / (mask+1e-5))\n else:\n x_masked = x\n x_max = x_masked.max(1)[0]\n x_exp = (x - x_max.unsqueeze(-1)).exp()\n if mask is not None:\n x_exp = x_exp * mask.float()\n return x_exp / x_exp.sum(1).unsqueeze(-1)", "def masked_argmax(input_tensor, mask, output_type=tf.int32):\n input_tensor.shape.assert_is_compatible_with(mask.shape)\n neg_inf = tf.constant(-float('Inf'), input_tensor.dtype)\n tf.compat.v1.assert_equal(\n tf.reduce_max(mask, axis=1), tf.constant(1, dtype=mask.dtype))\n modified_input = tf.compat.v2.where(\n tf.cast(mask, tf.bool), input_tensor, neg_inf)\n return tf.argmax(modified_input, axis=-1, output_type=output_type)", "def convert_mask_to_soft_mask(mask, dtype):\n mask = mask.to(dtype=dtype)\n mask = (1.0 - mask) * torch.finfo(dtype).min\n return mask", "def masked_log_softmax(data, mask, axis=-1, temperature=1.0, normalize=True):\n assert data is not None and mask is not None, \"Missing input data and mask\"\n return _api_internal.masked_log_softmax(data, mask, axis, temperature, normalize)", "def tf_masked_softmax(logits, legal_actions_mask):\n # This will raise a warning as we are taking the log of 0, which sets the 0\n # values to -inf. However, this is fine, as we then apply tf.exp, which sets\n # tf.exp(-inf) to 0. e.g. if we have logits [5, 3, 1], with legal_mask\n # [0, 1, 1], then masked_logits == [-inf, 3, 1], so we subtract the max to\n # get [-inf, 0, -2], and apply tf.exp to get [0, 1, e^-2].\n legal_actions_mask = tf.cast(legal_actions_mask, dtype=logits.dtype)\n masked_logits = logits + tf.log(legal_actions_mask)\n max_logit = tf.reduce_max(masked_logits, axis=-1, keepdims=True)\n exp_logit = tf.exp(masked_logits - max_logit)\n return exp_logit / tf.reduce_sum(exp_logit, axis=-1, keepdims=True)", "def np_masked_softmax(logits, legal_actions_mask):\n masked_logits = logits + np.log(legal_actions_mask)\n max_logit = np.amax(masked_logits, axis=-1, keepdims=True)\n exp_logit = np.exp(masked_logits - max_logit)\n return exp_logit / np.sum(exp_logit, axis=-1, keepdims=True)", "def compute_mask(self, t, default_mask):\n\n def _combine_masks(method, t, mask):\n r\"\"\"\n Args:\n method (a BasePruningMethod subclass): pruning method\n currently being applied.\n t (paddle.Tensor): tensor representing the parameter to prune\n (of same dimensions as mask).\n mask (paddle.Tensor): mask from previous pruning iteration\n\n Returns:\n new_mask (paddle.Tensor): new mask that combines the effects\n of the old mask and the new mask from the current\n pruning method (of same dimensions as mask and t).\n \"\"\"\n new_mask = mask # start off from existing mask\n new_mask = new_mask.to(dtype=t.dtype)\n\n # compute a slice of t onto which the new pruning method will operate\n if method.PRUNING_TYPE == \"unstructured\":\n # prune entries of t where the mask is 1\n slc = mask == 1\n\n # for struct pruning, exclude channels that have already been\n # entirely pruned\n elif method.PRUNING_TYPE == \"structured\":\n if not hasattr(method, \"dim\"):\n raise AttributeError(\n \"Pruning methods of PRUNING_TYPE \"\n '\"structured\" need to have the attribute `dim` defined.'\n )\n\n # find the channels to keep by removing the ones that have been\n # zeroed out already (i.e. where sum(entries) == 0)\n n_dims = t.dim() # \"is this a 2D tensor? 3D? ...\"\n dim = method.dim\n # convert negative indexing\n if dim < 0:\n dim = n_dims + dim\n # if dim is still negative after subtracting it from n_dims\n if dim < 0:\n raise IndexError(\n \"Index is out of bounds for tensor with dimensions {}\".format(\n n_dims\n )\n )\n # find channels along dim = dim that aren't already tots 0ed out\n keep_channel = mask.sum(axis=[d for d in range(n_dims) if d != dim]) != 0\n # create slice to identify what to prune\n slc = [slice(None)] * n_dims\n slc[dim] = keep_channel\n\n elif method.PRUNING_TYPE == \"global\":\n n_dims = len(t.shape) # \"is this a 2D tensor? 3D? ...\"\n slc = [slice(None)] * n_dims\n\n else:\n raise ValueError(\n \"Unrecognized PRUNING_TYPE {}\".format(method.PRUNING_TYPE)\n )\n\n # compute the new mask on the unpruned slice of the tensor t\n partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])\n new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)\n\n return new_mask\n\n method = self._pruning_methods[-1]\n mask = _combine_masks(method, t, default_mask)\n return mask", "def softmax(x):\n if len(x.shape) > 1:\n # Matrix\n # substracting max leaves function unchanged due to softmax's invariance to sums by a constant \n # keepdims= True, because broadcasting requires trailing shape entries to match\n x -= np.max(x, axis=1, keepdims=True)\n x = np.exp(x)\n sum_exp_xj = np.sum(x, axis=1, keepdims=True)\n x = np.divide(x, sum_exp_xj)\n else:\n # Vector\n x -= np.max(x)\n x = np.exp(x)\n sum_exp_xj = np.sum(x)\n x = np.divide(x, sum_exp_xj)\n return x", "def softmax_tuned(x, tuning):\n\n # verify inputs\n assert(type(tuning) in (int, float))\n assert(type(x) is torch.Tensor)\n assert(x.size() == torch.Size([27]))\n\n # this is the normal softmax function with just the tuning variable added\n x = torch.exp(tuning*x)\n x = x/x.sum()\n\n # verify output shape\n assert(x.size() == torch.Size([27]))\n\n return x", "def replace_masked(self,tensor, mask, value):\n mask = mask.unsqueeze(1).transpose(2, 1)\n reverse_mask = 1.0 - mask\n values_to_add = value * reverse_mask\n return tensor * mask + values_to_add", "def mask_log(x: torch.FloatTensor, mask: Optional[torch.Tensor] = None) -> torch.FloatTensor:\n if mask is not None:\n # Set masked entries of x equal to 1 (in a differentiable way) so log(1) = 0\n mask = mask.float()\n x = x * mask + (1 - mask)\n\n return torch.log(x)", "def logsumexp_masked(a, mask):\n mask = tf.cast(mask, a.dtype)\n a_max = tf.math.reduce_max(a * mask, axis=1, keepdims=True)\n a = a - a_max\n a_exp = tf.math.exp(a)\n a_sum_exp = tf.math.reduce_sum(a_exp * mask, axis=1, keepdims=True)\n return tf.squeeze(tf.math.log(a_sum_exp) + a_max)", "def compute_dynamic_mask(box_num: Tensor):\n max_len = torch.max(box_num)\n mask = torch.arange(0, max_len, device=box_num.device).expand((box_num.shape[0], max_len))\n box_num = box_num.expand_as(mask)\n mask = mask < box_num\n row_mask = mask.unsqueeze(1)\n column_mask = mask.unsqueeze(2)\n mask = row_mask & column_mask\n mask = ~mask * -1\n return mask.unsqueeze(-1)", "def spatial_softmax(self, x):\n return torch.softmax(x.view(1, self.nclasses, -1), 2).view_as(x)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert source and target text to proper word ids
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')] target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] + [target_vocab_to_int['<EOS>']] for sentence in target_text.split('\n')] return (source_id_text, target_id_text)
[ "def convert_word_to_wordId(source_line, target_line):\n source_words = source_line.split()\n target_words = target_line.split()\n source_id = [SourceWLexicon[word.lower()] for word in source_words]\n target_id = [TargetWLexicon[word.lower()] for word in target_words]\n return source_id, target_id", "def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):\n # TODO: Implement Function\n def _text_to_ids(text, vocab_to_int, add_eos):\n \"\"\"\n Convert text to ids\n :param text: String that contains all the text.\n :param vocab_to_int: Dictionary to go from the words to an id\n :add_eos: Boolean to decide if add '<EOS>' or not\n :return: A 2D list of ids\n \"\"\"\n # Check if id of '<EOS>' needs to add at the end of each sentence\n if add_eos:\n eos = [vocab_to_int['<EOS>']]\n else:\n eos = []\n \n # Get the id of each word in the text\n id_text = []\n for sentence in text.split('\\n'):\n sentence_id_text = [vocab_to_int[word] for word in sentence.split()] + eos\n id_text.append(sentence_id_text)\n \n return id_text\n \n # Convert source and target text to ids\n source_id_text = _text_to_ids(source_text, source_vocab_to_int, add_eos = False)\n target_id_text = _text_to_ids(target_text, target_vocab_to_int, add_eos = True)\n \n return (source_id_text, target_id_text)", "def _text_to_ids(text, vocab_to_int, add_eos):\n # Check if id of '<EOS>' needs to add at the end of each sentence\n if add_eos:\n eos = [vocab_to_int['<EOS>']]\n else:\n eos = []\n \n # Get the id of each word in the text\n id_text = []\n for sentence in text.split('\\n'):\n sentence_id_text = [vocab_to_int[word] for word in sentence.split()] + eos\n id_text.append(sentence_id_text)\n \n return id_text", "def mapWords2indices(self):\n for row in range(self.dataset.shape[0]):\n words2indices = []\n for word in self.dataset[row, 0].split():\n words2indices.append(self.word2index[word])\n \n # Append the end of the sentence token\n if self.eos_token:\n words2indices.append(self.word2index[self.eos_token])\n \n self.dataset[row, 0] = np.array(words2indices)\n \n # Map strings from target column\n if self.target_col:\n for row in range(self.dataset.shape[0]):\n words2indices = []\n \n # Insert the start of the sentence token\n if self.sos_token:\n words2indices.append(self.word2index[self.sos_token])\n \n for word in self.dataset[row, self.target_col].split():\n words2indices.append(self.word2index[word])\n\n \n # Append the end of the sentence token\n if self.eos_token:\n words2indices.append(self.word2index[self.eos_token])\n \n self.dataset[row, self.target_col] = np.array(words2indices)\n \n print('Mapped words to indices')", "def preprocess(text):\n text = text.lower()\n text = text.replace(\".\", \" .\") # ピリオドを単語から切り離す\n words = text.split(\" \")\n\n word_to_id = {}\n id_to_word = {}\n for word in words:\n # 内包表記だと重複する単語を除きにくいのでこの書き方になると思われる\n if word not in word_to_id:\n new_id = len(word_to_id)\n word_to_id[word] = new_id\n id_to_word[new_id] = word\n\n corpus = np.array([word_to_id[w] for w in words])\n\n return corpus, word_to_id, id_to_word", "def map_text_targets(self, text, title):\n start_position = text.index(title)\n mapped_target = [1 if start_position <= index < start_position + len(title) else 0 for index in\n range(len(text))]\n return torch.Tensor(mapped_target).view(1, -1).long().to(self.device)", "def next_word_processing(texts_to_ids: List, batch_size: int, seq_length: int):\n n_batches = int(len(texts_to_ids)/(seq_length*batch_size))\n texts_to_ids = texts_to_ids[:n_batches*batch_size*seq_length] # Get the exact number of batches wrt to batch size and seq length\n\n target_texts = np.zeros_like(texts_to_ids)\n target_texts[:-1] = texts_to_ids[1:] #Shift data to the right\n target_texts[-1] = texts_to_ids[0]\n\n target_texts = target_texts.reshape(batch_size, -1)\n texts_to_ids = np.reshape(texts_to_ids, (batch_size, -1))\n\n return texts_to_ids, target_texts", "def file_to_word_ids(file_name, word2id):\n labels = []\n prems = []\n hypoes = []\n\n print '...loading data from ' + file_name\n\n keys = word2id.keys()\n\n with open(file_name, 'r') as f:\n for line in f:\n label = line.strip().split('\\t')[0]\n prem = line.strip().split('\\t')[1]\n hypo = line.strip().split('\\t')[2]\n\n labels.append(word2id[label])\n\n line_prem = []\n for word in nltk.word_tokenize(prem.strip()):\n if word in keys:\n line_prem.append(word2id[word])\n else:\n line_prem.append(word2id['<unk>'])\n prems.append(line_prem)\n\n line_hypo = []\n for word in nltk.word_tokenize(hypo.strip()):\n if word in keys:\n line_hypo.append(word2id[word])\n else:\n line_hypo.append(word2id['<unk>'])\n hypoes.append(line_hypo)\n\n return labels, prems, hypoes", "def text2index(self, text_array, word2int):\n text2index = []\n for sentence in text_array:\n indexes = []\n for word in sentence.split(' '):\n if word in word2int:\n indexes.append(word2int.get(word))\n else:\n indexes.append(\"1\") # <unk>\n text2index.append(indexes)\n return text2index", "def buildDictGetTIDList(self,text):\n self.doc_id += 1\n out = []\n\n stemmed = [self.clean_word(w) for w in text.split()]\n for w in stemmed:\n if not self.dictionary.has_key(w):\n t_id = len(self.dictionary)\n self.dictionary.update({w:[t_id,[self.doc_id]]})\n out.append(t_id)\n else:\n t_id = self.dictionary[w][0]\n self.dictionary[w][1] += [self.doc_id]\n out.append(t_id)\n\n return out", "def _get_source_strings(self, ids):\r\n strings = super(_MarkSourceMixin, self)._get_source_strings(ids)\r\n res = []\r\n for s in strings:\r\n res.append(list(s))\r\n res[-1][1] = s[1] + '_txss'\r\n return res", "def data_to_token_ids(data_path, target_path, vocabulary_path):\n if not gfile.Exists(target_path):\n print(\"Tokenizing preprocess in %s\" % data_path)\n\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"r\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n for line in tqdm(data_file):\n tokens_ids = sentence_to_token(line, vocab)\n tokens_file.write(\" \".join([str(tok) for tok in tokens_ids]) + \"\\n\")", "def preprocess_request(request, word_to_id):\n request = util.lower_and_no_accent(request)\n words_id = [word_to_id[w] for w in request.split() if w in word_to_id]\n return sorted(set(words_id)), '-'.join(request.split())", "def mapWord2index(self):\n # Add special tokens as first elements in word2index dictionary\n token_count = 0\n for token in [self.pad_token, self.sos_token, self.eos_token, self.unk_token]:\n if token:\n self.word2index[token] = token_count\n token_count += 1\n \n # If vocabulary is trimmed, use trimmed_word_count\n if self.min_word_count or self.max_vocab_size:\n for key in self.trimmed_word_count.keys():\n self.word2index[key] = token_count\n token_count += 1\n \n # If vocabulary is not trimmed, iterate through dataset \n else:\n for line in self.dataset.iloc[:, 0]:\n for word in line.split():\n if word not in self.word2index.keys():\n self.word2index[word] = token_count\n token_count += 1\n # Include strings from target column\n if self.target_col:\n for line in self.dataset.iloc[:, self.target_col]:\n for word in line.split():\n if word not in self.word2index.keys():\n self.word2index[word] = token_count\n token_count += 1\n \n self.word2index.default_factory = lambda: self.word2index[self.unk_token]", "def idx_to_sentence(self, sentence_ids):\n return [' '.join([self.dictionary.idx2word[x] for x in sentence_ids])]", "def process_source_text(self, source_text):\n return source_text", "def ids_to_word(self, ids):\n str_decoded = ''.join([self.id_to_char(x) for x in ids])\n return str_decoded", "def test_snippets_to_ids():\n\tsnippets = [['sentence', 'one'], ['sentence'], ['two']]\n\tresult = (([12205, 68, 0], [12205, 0, 0]), (2, 1))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result\n\n\tsnippets = [['sentence', 'three']]\n\tresult = (([12205, 98, 0], [0, 0, 0]), (2, 0))\n\tassert lstm.snippets_to_ids(snippets, 3, 2) == result", "def _paragraph_to_ids(paragraph, word_to_id, post_size, emotions):\n words = []\n vocab_size = len(word_to_id)\n\n # Remove emotion hashtags from the post.\n emotion_regex = re.compile('|'.join(map(re.escape, ['#' + emotion for emotion in emotions])))\n paragraph = emotion_regex.sub('', paragraph.lower())\n\n regex = re.compile('[%s]' % re.escape(_PUNCTUATION))\n # Remove punctuation, convert to lower case before splitting\n words = regex.sub('', paragraph).lower().split()\n # Replace unknown words by an id equal to the size of the vocab\n words = map(lambda x: word_to_id.get(x, vocab_size), words)\n words_len = len(words)\n if words_len > post_size:\n words = words[:post_size]\n words_len = post_size\n else:\n words = words + [vocab_size] * (post_size - words_len)\n return words, words_len" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a random piece on the board.
def getRandPiece(): row = random.randint(0, 11) # Board is a weird L shape col = random.randint(0, 5 if row < 6 else 11) # Return move in row (letter) + col (number) grid reference # e.g. A3 is represented as 0,2 return (row, col)
[ "def get_random_move(self):\n return random.choice(self.possible_moves)", "def random_strategy(player, board):\n return random.choice(othello.legal_moves(player, board))", "def get_piece_at(self, x, y) -> object:\n return self.board[y-1][x-1]", "def __get_random_move(self, possible_moves):\n\n return random.choice(possible_moves)", "def AIEasy():\n global gameBoard\n return random.choice(gameBoard.availableTiles())", "def random_move(board, player, para):\r\n valid_moves = board.get_valid_moves()\r\n r_move = random.choice(valid_moves)\r\n return r_move", "def pullPiece(self):\n self.waveTransition()\n totalLeft = self.piecesLeft()\n if totalLeft == 0:\n return 0\n zombieRange = self.zombieCount/totalLeft\n fZombieRange = (self.zombieCount+self.fZombieCount)/totalLeft\n bombRange = (self.zombieCount+self.fZombieCount+self.bombCount)/totalLeft\n nextPiece = random.random() # Random number in [0.0, 1.0)\n if nextPiece < zombieRange:\n self.zombieCount -= 1\n return 1\n elif nextPiece < fZombieRange:\n self.fZombieCount -= 1\n return 2\n elif nextPiece < bombRange:\n self.bombCount -= 1\n return 4\n else:\n self.multCount -= 1\n return 5", "def getRandomPosition(self):\n while True:\n x = random.randrange(self.width)\n y = random.randrange(self.height)\n pos = (x,y)\n if not self.isTileOccupied(pos):\n break\n return pos", "def generate_random_move(board, color):\n moves = board.get_empty_points()\n if len(moves) == 0:\n return PASS\n np.random.shuffle(moves)\n return moves[0]", "def random_floor_tile(self):\n\n if not Tile.floor in self.tiles.values():\n raise ValueError(\"No floor tile found\")\n\n Point = namedtuple(\"Point\", ['x', 'y'])\n\n # Get list all unoccupied floor tiles positions (floor tiles\n # with no entities on them)\n floor_tiles = []\n for (x, y), tile in self.tiles.items():\n if tile == Tile.floor and self.get_entity_at(x, y) == None:\n floor_tiles.append(Point(x, y))\n\n if len(floor_tiles) == 0:\n raise ValueError(\"No unoccupied floor tiles\")\n\n # Take random unoccupied floor tile\n return random.choice(floor_tiles)", "def get_random_card(self):\n return random.choice(self.cards)", "def randomcell(grid, checkfree = True):\n if checkfree:\n lst = [i for i in range(9) if grid[i] == None]\n p = random.choice(lst)\n else:\n p = random.choice(grid)\n return p", "def random_element(self):\n pass", "def random_move (self, player):\n choices = []\n for cell in self.cells.keys():\n if (self.cells[cell] == self.empty):\n choices.append(cell)\n n_choices = len(choices) - 1\n rand_move = random.randint(0, n_choices)\n return (None, choices[rand_move])", "def get_piece(self, position):\n return self._positions[str(position)].piece", "def get_random_cell():\n return \\\n random.randint(0, DIMENSION - 1), \\\n random.randint(0, DIMENSION - 1)", "def get_random(self):\n index = randrange(self.size)\n return self.individuals[index]", "def make_a_move(self, game):\n rnums = list(game.board.pieces)\n while len(rnums) > 0:\n i = random.randint(0, len(rnums)-1)\n rnum = rnums[i]\n if game.board.at(rnum).color != self.color:\n del rnums[i]\n elif game.has_a_move(rnum):\n paths = game.take_a_peek(rnum)\n path = random.choice(paths)\n game.make_a_move(path)\n return path\n else:\n del rnums[i]\n return []", "def placeAPiece(self):\n # Check if you can eliminate any opponent piece by placing your piece\n for y in range(0, 8):\n for x in range(0, 8):\n if self.board.board[y][x] == self.piece:\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n try:\n if (x + dx + dx) < 0 or (y + dy + dy) < 0:\n continue\n\n if (self.board.board[y + dy][x + dx] == self.opponentPiece\n and self.board.board[y + dy +dy][x + dx + dx] == \"-\"\n and (x + dx + dx, y + dy + dy) not in self.board.placeBanList):\n if x + dx + dx > 0 and y + dy + dy > 0:\n self.board.placePiece((x + dx + dx, y + dy + dy), self.myColour)\n return (x + dx + dx, y + dy + dy)\n else:\n continue\n except IndexError:\n continue\n\n # Tries to place a piece on the middle positions of the board first\n counter = 0\n while True:\n lowerBound = 3\n upperBound = 4\n # The range for placing slowly grows outwards\n # if it cannot find a place at first within a few tries\n if counter > 5 and counter < 15:\n lowerBound = 2\n upperBound = 5\n elif counter > 15 and counter < 50:\n lowerBound = 1\n upperBound = 6\n elif counter > 50:\n lowerBound = 0\n upperBound = 7\n\n x = randint(lowerBound, upperBound)\n y = randint(lowerBound, upperBound)\n\n counter += 1\n # Checks if the piece will get eliminated next turn if we\n # place a piece in the generated position\n dangerPlace = False\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n # In order to get rid of negative indexing since its annoying\n if (x + dx) < 0 or (y + dy) < 0:\n continue\n\n try:\n if ((self.board.board[y+dy][x+dx] == self.opponentPiece or\n self.board.board[y+dy][x+dx] == \"X\") and\n self.board.board[y-dy][x-dx] == \"-\"):\n dangerPlace = True\n break\n except IndexError:\n continue\n if dangerPlace:\n continue\n # Place the piece if the game rules allow it and then return\n if (x, y) not in self.board.placeBanList:\n self.board.placePiece((x, y), self.myColour)\n return ((x, y))", "def get_random_row(self):\n cur = self.connection.cursor()\n cur.execute(self._random_statement)\n return cur.fetchall()[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if an order is saved, the related pricing is recalculated and the order updated.
def test_pricing_updated_on_order_save(self): order = OrderFactory(vat_status=VATStatus.UK, discount_value=0) assert order.vat_cost > 0 order.vat_status = VATStatus.OUTSIDE_EU order.save() order.refresh_from_db() assert order.vat_cost == 0
[ "def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order)\n\n assert pre_update_pricing == post_update_pricing", "def save(self, *args, **kwargs):\n orders = Order.objects.filter(product=self)\n\n # We exclude completed orders\n orders = orders.exclude(state=\"COM\")\n\n for order in orders:\n order.unit_price = self.unit_price\n order.save()\n\n super().save(*args, **kwargs)", "def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n if self.order:\n self.order.update_paid_status()", "def save(self, *args, **kwargs):\n super(FioPayment, self).save(*args, **kwargs)\n\n if self.order:\n self.order.update_paid_status()", "def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_save_paid_sold(\n self,\n mock_generate_symvar,\n mock_mark_drawings_as_sold,\n mock_notify_current_status,\n ):\n order = Order(\n price=500,\n delivery_id=10,\n payment_id=10,\n symvar='ax22123',\n )\n order.initial_paid = False\n order.paid = True\n order.save()\n mock_mark_drawings_as_sold.assert_called_once()", "def test_pricing_updated_on_assignee_deleted(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.delete()\n\n order.refresh_from_db()\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_save_order_call(self):\n order = ORDER_FACTORY.create_order()\n # On create of an AdRepOrder, FirestormSoap.save_order will be called.\n ad_rep_order = AdRepOrder.objects.create(ad_rep_id=1000, order=order)\n mock_soap = MockSoap()\n mock_soap.save_order(ad_rep_order=ad_rep_order)\n ad_rep_order2 = AdRepOrder.objects.get(id=ad_rep_order.id)\n self.assertEqual(ad_rep_order2.firestorm_order_id, 1)", "def save(self):\n order = self.context['order']\n order.complete_order()", "async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]", "def test_changecountry(self):\n # Shipping method\n s1 = self.make_test_shippingmethod(order_cost=Decimal('4.00'))\n s1.name = 'expensive'\n s1.save()\n\n # Get us a country\n country1 = Country.objects.all()[1]\n country2 = Country.objects.all()[2]\n\n # Shipping method with country1 and country2\n s2 = self.make_test_shippingmethod(order_cost=Decimal('3.00'))\n s2.name = 'less expensive'\n s2.save()\n\n # Make sure the second method is only valid for this country\n s2.countries.add(country2)\n\n # Create product\n p = self.make_test_product(price=Decimal('10.00'), slug='p1')\n p.save()\n\n # Create order\n o = self.make_test_order()\n o.shipping_address.country = country1\n o.shipping_address.save()\n o.save()\n\n i = OrderItem(quantity=2, product=p, piece_price=p.get_price())\n o.orderitem_set.add(i)\n\n # Update the order: calculate costs etc.\n o.update()\n\n self.assertEqual(o.shipping_method, s1)\n self.assertEqual(o.get_shipping_costs(), Decimal('4.00'))\n self.assertEqual(o.order_shipping_costs, Decimal('4.00'))\n self.assertEqual(o.get_order_shipping_costs(), Decimal('4.00'))\n self.assertEqual(o.get_price_without_shipping(), Decimal('20.00'))\n self.assertEqual(o.get_price(), Decimal('24.00'))\n\n o.shipping_address.country = country2\n o.shipping_address.save()\n\n o.update()\n\n self.assertEqual(o.shipping_method, s2)\n self.assertEqual(o.get_shipping_costs(), Decimal('3.00'))\n self.assertEqual(o.order_shipping_costs, Decimal('3.00'))\n self.assertEqual(o.get_order_shipping_costs(), Decimal('3.00'))\n self.assertEqual(o.get_price_without_shipping(), Decimal('20.00'))\n self.assertEqual(o.get_price(), Decimal('23.00'))", "def test_shippingorderamount(self):\n # Most expensive method, always valid\n s1 = self.make_test_shippingmethod(order_cost=Decimal('4.00'))\n s1.name = 'expensive'\n s1.save()\n\n\n # LEss expensive method, valid from order price of 2\n s2 = self.make_test_shippingmethod(order_cost=Decimal('3.00'))\n s2.minimal_order_price=Decimal('2.0')\n s2.name = 'less expensive'\n s2.save()\n\n # Least expensive method, valid from order price of 10\n s3 = self.make_test_shippingmethod(order_cost=Decimal('2.00'))\n s3.minimal_order_price=Decimal('10.0')\n s3.name = 'least expensive'\n s3.save()\n\n # Free shipping for a price of 11 or higher\n s4 = self.make_test_shippingmethod(order_cost=Decimal('0.00'))\n s4.minimal_order_price=Decimal('11.0')\n s4.name = 'free shipping'\n s4.save()\n\n # Create product\n p = self.make_test_product(price=Decimal('1.00'), slug='p1')\n p.save()\n\n # Create order with order price 1.0\n o = self.make_test_order()\n o.save()\n\n i = OrderItem(quantity=1, product=p, piece_price=p.get_price())\n o.orderitem_set.add(i)\n\n # Update the order: calculate costs etc.\n o.update()\n\n self.assertEqual(o.get_price_without_shipping(), Decimal('1.00'))\n self.assertEqual(o.get_shipping_costs(), Decimal('4.00'))\n self.assertEqual(o.shipping_method, s1)\n\n\n # Create order with order price 3.0\n o = self.make_test_order()\n o.save()\n\n i = OrderItem(quantity=3, product=p, piece_price=p.get_price())\n o.orderitem_set.add(i)\n\n # Update the order: calculate costs etc.\n o.update()\n\n self.assertEqual(o.get_price_without_shipping(), Decimal('3.00'))\n self.assertEqual(o.get_shipping_costs(), Decimal('3.00'))\n self.assertEqual(o.shipping_method, s2)\n\n\n # Create order with order price 10.0\n o = self.make_test_order()\n o.save()\n\n i = OrderItem(quantity=10, product=p, piece_price=p.get_price())\n o.orderitem_set.add(i)\n\n # Update the order: calculate costs etc.\n o.update()\n\n self.assertEqual(o.get_price_without_shipping(), Decimal('10.00'))\n self.assertEqual(o.get_shipping_costs(), Decimal('2.00'))\n self.assertEqual(o.shipping_method, s3)\n\n\n # Create order with order price 12.0\n o = self.make_test_order()\n o.save()\n\n i = OrderItem(quantity=12, product=p, piece_price=p.get_price())\n o.orderitem_set.add(i)\n\n # Update the order: calculate costs etc.\n o.update()\n\n self.assertEqual(o.get_price_without_shipping(), Decimal('12.00'))\n self.assertEqual(o.get_shipping_costs(), Decimal('0.00'))\n self.assertEqual(o.shipping_method, s4)", "def save(self):\n order = self.context['order']\n order.issue_order()", "def test_save_not_paid_not_sold(\n self,\n mock_generate_symvar,\n mock_mark_drawings_as_sold,\n mock_notify_current_status,\n ):\n order = Order(\n price=500,\n delivery_id=10,\n payment_id=10,\n symvar='ax22123',\n )\n order.initial_paid = True\n order.paid = False\n order.save()\n mock_mark_drawings_as_sold.assert_not_called()", "def test_updating_of_an_order_successfully(self):\n self.client.force_authenticate(user=self.user)\n data = {\n \"item_name\": \"updated item\",\n }\n res = self.client.patch(self.order_url, data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data[\"item_name\"], \"updated item\")", "def test_patch_order(client, db):\r\n request = dict(\r\n truck_type='regional',\r\n inl_terminal='KAT',\r\n SomethingNew='TEST',\r\n SomethingNewAlso=None\r\n )\r\n rv = patch_order(client, 1, **request)\r\n\r\n assert rv.status_code == 200\r\n order = Order.query.get(1)\r\n assert order.truck_type == 'regional'\r\n assert order.inl_terminal == 'KAT'\r\n assert order.others['SomethingNew'] == 'TEST'\r\n assert 'SomethingNewAlso' not in order.others", "def test_update_depends_stock(self):\n with mn.model() as m:\n Foo = mn.stock('Foo', lambda: 1, (), lambda x: x, ('Bar',))\n Bar = mn.constant('Bar', 99)\n\n self.assertEqual(m['Foo'][''], 99)\n m['Bar'][''] = 90\n m.recalculate()\n self.assertEqual(m['Foo'][''], 90)\n m.step()\n self.assertEqual(m['Foo'][''], 91)", "def update_book_quantity_when_order_is_updated(sender, instance, **kwargs):\n book = instance.book\n # Check if this a new or an existing orderItem\n initial_order = OrderItem.objects.filter(id=instance.id)\n if initial_order.exists():\n # Get the initial quantity of the order from database\n initial_order_quantity = initial_order.first().quantity\n # If the orderItem quantity has increased\n if initial_order_quantity < instance.quantity:\n updated_quantity = instance.quantity - initial_order_quantity\n # subtract the updated quantity from book quantity in stock\n book.quantity -= updated_quantity\n book.save()\n # If the orderItem quantity has decreased\n elif initial_order_quantity > instance.quantity:\n quantity_difference = initial_order_quantity - instance.quantity\n book.quantity += quantity_difference\n book.save()\n else:\n book.quantity -= instance.quantity\n book.save()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if an unrelated field gets updated, the pricing stays the same.
def test_pricing_unchanged_if_update_unrelated(self): order = OrderFactory() pre_update_pricing = get_pricing_from_order(order) order.description = 'updated description' order.save() order.refresh_from_db() post_update_pricing = get_pricing_from_order(order) assert pre_update_pricing == post_update_pricing
[ "def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_updated_on_order_save(self):\n order = OrderFactory(vat_status=VATStatus.UK, discount_value=0)\n assert order.vat_cost > 0\n\n order.vat_status = VATStatus.OUTSIDE_EU\n order.save()\n\n order.refresh_from_db()\n assert order.vat_cost == 0", "def test_set_price(self):\n\n test_price = 100.0\n test_quantity = 1\n\n # Grab the first part\n p = Part.list(self.api)[0]\n\n # Grab all internal prices for the part\n ip = InternalPrice.list(self.api, part=p.pk)\n\n # Delete any existsing prices\n for price in ip:\n self.assertEqual(type(price), InternalPrice)\n price.delete()\n\n # Ensure that no part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 0)\n\n # Set the internal price\n p.setInternalPrice(test_quantity, test_price)\n\n # Ensure that the part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 1)\n \n # Grab the internal price\n ip = ip[0]\n\n self.assertEqual(ip.quantity, test_quantity)\n self.assertEqual(ip.part, p.pk)\n ip_price_clean = float(ip.price)\n self.assertEqual(ip_price_clean, test_price)", "def test_catalog_special_price_storage_v1_update_post(self):\n pass", "def test_pricing_updated_on_assignee_deleted(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.delete()\n\n order.refresh_from_db()\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_update_partial_more_expensive_retirement_missing_info(self):\n self.client.force_authenticate(user=self.admin)\n\n self.retirement2.price = 999\n self.retirement2.save()\n\n data = {\n 'retirement': reverse(\n 'retirement:retirement-detail',\n kwargs={'pk': 2},\n ),\n }\n\n response = self.client.patch(\n reverse(\n 'retirement:reservation-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_400_BAD_REQUEST,\n response.content\n )\n\n response_data = json.loads(response.content)\n\n content = {\n 'non_field_errors': [\n \"The new retirement is more expensive than the current one. \"\n \"Provide a payment_token or single_use_token to charge the \"\n \"balance.\"\n ]\n }\n\n self.assertEqual(response_data, content)\n\n self.retirement2.price = 199\n self.retirement2.save()", "def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_update(self):\n product_id = None\n product = Product.create(name=\"apple\", price=2)\n product_id = product.id\n\n # Try to change the price.\n with self.db.transaction:\n product.price = 3\n\n # Check that the product has been updated.\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)\n\n # Now try a new update, but make sure it fails.\n try:\n with self.db.transaction:\n product.price = 4\n raise InterruptedError\n except InterruptedError:\n pass\n\n # There should have been a rollback, so the price\n # shouldn't have changed.\n self.assertEqual(product.price, 3)\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)\n\n # Try again, to make sure double-transactions don't cause a crash.\n try:\n with self.db.transaction:\n product.price = 4\n raise InterruptedError\n except InterruptedError:\n pass\n\n # There should have been a rollback, so the price\n # shouldn't have changed.\n self.assertEqual(product.price, 3)\n product = Product.get(id=product_id)\n self.assertEqual(product.price, 3)", "def test_price_diff(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff, diff)", "def test_pies_product_update(updated_test_brand_data, test_brand_record):\n PiesDataStorage(updated_test_brand_data).store_brand_data()\n compare_products_to_db(updated_test_brand_data, test_brand_record)", "def test_amountscurrent(self):\n self.assertTrue(self._instance.debt)\n self.assertEqual(self.compute_current_amount(), self._instance.amount)", "def test_multiple_purchases_update_product_price(self):\n\n # Generate timestamps for correct timing of purchases and updates\n t1 = datetime.datetime.now() - datetime.timedelta(seconds=30)\n t2 = datetime.datetime.now() - datetime.timedelta(seconds=25)\n t3 = datetime.datetime.now() - datetime.timedelta(seconds=20)\n t4 = datetime.datetime.now() - datetime.timedelta(seconds=15)\n t5 = datetime.datetime.now() - datetime.timedelta(seconds=10)\n t6 = datetime.datetime.now() - datetime.timedelta(seconds=5)\n # Update product price\n pp = ProductPrice(product_id=1, price=300, admin_id=1, timestamp=t1)\n db.session.add(pp)\n db.session.commit()\n # Get the first product price\n product = Product.query.filter_by(id=1).first()\n pr_1 = copy(product.price)\n # Do first purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t2)\n db.session.add(purchase)\n db.session.commit()\n # Update product price\n pp = ProductPrice(product_id=1, price=100, admin_id=1, timestamp=t3)\n db.session.add(pp)\n db.session.commit()\n # Get the second product price\n product = Product.query.filter_by(id=1).first()\n pr_2 = copy(product.price)\n # Do second purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t4)\n db.session.add(purchase)\n # Update product price\n pp = ProductPrice(product_id=1, price=600, admin_id=1, timestamp=t5)\n db.session.add(pp)\n db.session.commit()\n # Get the third product price\n product = Product.query.filter_by(id=1).first()\n pr_3 = copy(product.price)\n # Do third purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t6)\n db.session.add(purchase)\n db.session.commit()\n\n # Check the product prices\n self.assertEqual(pr_1, 300)\n self.assertEqual(pr_2, 100)\n self.assertEqual(pr_3, 600)\n\n # Check user credit\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 3)\n self.assertEqual(user.credit, -(pr_1 + pr_2 + pr_3))\n\n # Check purchase prices\n purchases = Purchase.query.all()\n self.assertEqual(purchases[0].price, 300)\n self.assertEqual(purchases[1].price, 100)\n self.assertEqual(purchases[2].price, 600)", "async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]", "def test_price_0_differs(self):\n pr_item = fake_price_region_item(\n fake_inventory_item(), fake_price_rule())\n for index in range(6):\n price = random_price()\n self.assertNotEqual(price, Decimal(0))\n sp_change = SellPriceChange(\n price_region_item=pr_item,\n price_diffs=[Decimal(\"0.00\") for _ in range(6)])\n sp_change.price_diffs[index] = price\n if index == 0:\n self.assertTrue(sp_change.price_0_differs)\n else:\n self.assertFalse(sp_change.price_0_differs)", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "def update_price(self, company: Company):\n pass", "def test_equality_fail_price(self):\r\n for value in self.comparison:\r\n self.car1.price = value\r\n self.assertFalse(self.car1.equality(self.car3))", "def percent_price_reduction(change):\n \n upcoming_price_changes(change)\n\n # TODO do you wish to continue?\n\n sql_update = \"\"\"\n update `tabItem Price` ip\n \n left join `tabItem` it\n on ip.item_code = it.item_code\n \n set ip.price_list_rate = ip.price_list_rate + (ip.price_list_rate * %s / 100.0)\n\n where ip.selling = 1\n and it.ebay_id REGEXP '[0-9]'\n \n and it.modified < now() - interval 10 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change)\n\n frappe.db.sql(sql_update, auto_commit=True)\n \n sql_update_it = \"\"\"\n update `tabItem` it\n\n set \n it.standard_rate = it.standard_rate + (it.standard_rate * %s / 100.0),\n it.vat_inclusive_price = it.vat_inclusive_price + (it.vat_inclusive_price * %s / 100.0)\n \n where \n it.ebay_id REGEXP '[0-9]'\n and it.modified < now() - interval 30 day\n \n and ((it.standard_rate >25 and it.delivery_type = 'Standard Parcel')\n or (it.standard_rate >75 and it.delivery_type = 'Pallet'))\n and (select count(sii.name) from `tabSales Invoice Item` sii where sii.item_code = it.item_code and sii.docstatus=1) = 0\n \n \"\"\"%(change, change)\n\n frappe.db.sql(sql_update_it, auto_commit=True)\n\n print(\"Price reduction completed\")", "def test_update_is_return(self):\n trip = Trip.objects.first()\n self.assertEqual(trip.is_return, True)\n\n trip.is_return = False\n trip.save()\n\n self.assertEqual(trip.is_return, False)\n self.assertEqual(trip.distance, 3) # 3km one way" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if a new assignee is added, the pricing on the order changes.
def test_pricing_update_on_assignee_created(self): order = OrderFactory(discount_value=0) assert order.total_cost > 0 pre_update_total_cost = order.total_cost OrderAssigneeFactory(order=order) order.refresh_from_db() assert order.total_cost > 0 post_update_total_cost = order.total_cost assert pre_update_total_cost != post_update_total_cost
[ "def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_updated_on_assignee_deleted(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.delete()\n\n order.refresh_from_db()\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order)\n\n assert pre_update_pricing == post_update_pricing", "def test_pricing_updated_on_order_save(self):\n order = OrderFactory(vat_status=VATStatus.UK, discount_value=0)\n assert order.vat_cost > 0\n\n order.vat_status = VATStatus.OUTSIDE_EU\n order.save()\n\n order.refresh_from_db()\n assert order.vat_cost == 0", "def test_price_creation(leader_client, event1):\n event1.leaders.append(leader_client.user)\n db.session.add(event1)\n db.session.commit()\n response = leader_client.get(\n f\"/payment/event/{event1.id}/edit_prices\", follow_redirects=True\n )\n assert response.status_code == 200\n\n data = utils.load_data_from_form(response.text, \"new_price\")\n\n data[\"item_title\"] = \"Banana\"\n data[\"title\"] = \"Adult\"\n data[\"amount\"] = 10\n data[\"enabled\"] = \"y\"\n\n response = leader_client.post(\n f\"/payment/event/{event1.id}/edit_prices\", data=data, follow_redirects=True\n )\n assert response.status_code == 200\n prices = [len(i.prices) for i in event1.payment_items]\n assert len(prices) == 1\n\n price = event1.payment_items[0].prices[0]\n assert price.title == \"Adult\"\n assert price.item.title == \"Banana\"\n assert price.amount == 10\n assert price.enabled == True", "def test_price_diff(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff, diff)", "def test_invoicing_ratio(self):\n project = self.TICKET.create(\n {'name': 'Test',\n 'participant_ids': [(6, 0, [self.customer_id, self.member_id])],\n 'analytic_journal_id': self.ANAJOURNAL.search([])[0].id,\n 'product_id': self.ref('product.product_product_consultant'),\n 'method_id': self.ref('anytracker.method_test')})\n account = self.ANACCOUNT.create({\n 'name': 'project',\n 'type': 'contract',\n 'to_invoice': self.ref(\n 'hr_timesheet_invoice.timesheet_invoice_factor1')})\n project.write({'analytic_account_id': account.id})\n # we create 3 tickets\n ticket1 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket1',\n 'parent_id': project.id, })\n ticket2 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket2',\n 'parent_id': project.id, })\n ticket3 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket3',\n 'parent_id': project.id, })\n ticket4 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket4',\n 'parent_id': project.id, })\n\n # we set ratings\n (ticket1 + ticket2 + ticket3 + ticket4).write({\n 'my_rating': self.ref('anytracker.complexity1')})\n # we set priorities to the tickets 1 to 3 but not 4\n ticket1.write({\n 'priority_id': self.ref('anytracker.test_prio_normal')})\n ticket2.write({\n 'priority_id': self.ref('anytracker.test_prio_prio')})\n ticket3.write({\n 'priority_id': self.ref('anytracker.test_prio_urgent')})\n\n # Now we create a bouquet with the 3 tickets\n bouquet = self.BOUQUET.create(\n {'name': 'bouquet',\n 'ticket_ids': [(6, 0, [\n ticket1.id, ticket2.id, ticket3.id, ticket4.id])]\n })\n # we launch invoicing on the bouquet\n bouquet.create_analytic_lines()\n\n # we check the ratio\n self.assertEquals(0, ticket1.analytic_line_id.to_invoice.factor)\n self.assertEquals(-40, ticket2.analytic_line_id.to_invoice.factor)\n self.assertEquals(-80, ticket3.analytic_line_id.to_invoice.factor)\n self.assertEquals(0, ticket4.analytic_line_id.to_invoice.factor)", "def test_patch_investment_requirements(self):\n pass", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "def test_billing_recurring_update(self):\n pass", "def test_set_price(self):\n\n test_price = 100.0\n test_quantity = 1\n\n # Grab the first part\n p = Part.list(self.api)[0]\n\n # Grab all internal prices for the part\n ip = InternalPrice.list(self.api, part=p.pk)\n\n # Delete any existsing prices\n for price in ip:\n self.assertEqual(type(price), InternalPrice)\n price.delete()\n\n # Ensure that no part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 0)\n\n # Set the internal price\n p.setInternalPrice(test_quantity, test_price)\n\n # Ensure that the part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 1)\n \n # Grab the internal price\n ip = ip[0]\n\n self.assertEqual(ip.quantity, test_quantity)\n self.assertEqual(ip.part, p.pk)\n ip_price_clean = float(ip.price)\n self.assertEqual(ip_price_clean, test_price)", "def test_add_payee(self):\n self.ledger.add_payee('Waitrose')\n\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': \"Sainsbury's\"},\n {'payee_id': 3,\n 'payee_name': 'Waitrose'}]\n actual_result = self.ledger.get_payees()\n\n self.assertEqual(actual_result, expected_result)", "def test_admin_booking_customer_price(app, tickets):\n app.refresh_page()\n app.booking.select_event(tickets)\n app.booking.apply_custom_price(tickets)\n app.booking.fill_out_customer_info(tickets)\n app.booking.select_payment_method(tickets)\n app.booking.verify_payment_table(tickets)\n app.booking.submit_successful_booking()", "def test_update_entitlement_item(self):\n pass", "def test_patch_investment(self):\n pass", "def test_update_priority(self):\n self.client.login(username=\"alice@example.org\", password=\"password\")\n self._invite_member(\"glassman@example.com\")\n member = self._get_member(\"glassman@example.com\")\n form = {\"update_priority\": \"1\", \"email\": \"glassman@example.com\"}\n response = self.client.post(\"/accounts/profile/\", form)\n self.assertEqual(response.status_code, 200)\n\n member = self._get_member(\"glassman@example.com\")", "def test_edit_payee(self):\n self.ledger.edit_payee(2, 'Tesco')\n\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': 'Tesco'}]\n actual_result = self.ledger.get_payees()\n\n self.assertEqual(actual_result, expected_result)", "def test_properties_update():\n donor = Donor('test', [50, 100, 150])\n donor.add_donation(300)\n assert donor.sum_donations == 600\n assert donor.num_donations == 4\n assert donor.avg_donation == 150", "def test_stock_price(self):\n\n # Make a mock object for testing.\n sALE = Stock('ALE', 'common', 23, nan, 60)\n\n # A stock without trades has a ticker price equal to its par value.\n self.assertEqual(sALE.stock_price(), 60)\n\n # Add some mock Trades.\n sALE.buy(500, 25)\n sALE.sell(300, 15)\n self.assertEqual(len(sALE._trades), 2)\n\n # Easy case for ticker price with two Trades.\n self.assertEqual(sALE.stock_price(), ((500*25)+(300*15))/(500+300))\n\n # Add some mock Trades in the distant past (such that they are excluded\n # from the average).\n sALE.buy(100, 87, datetime.datetime.now() -\n datetime.timedelta(minutes=16))\n sALE.buy(23, 34, datetime.datetime.now() -\n datetime.timedelta(minutes=15))\n self.assertEqual(len(sALE._trades), 4)\n\n # Stock price should be unchanged.\n self.assertEqual(sALE.stock_price(), ((500*25)+(300*15))/(500+300))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if an assignee is updated, the pricing on the order changes.
def test_pricing_updated_on_assignee_updated(self): order = OrderFactory(discount_value=0) assert order.total_cost > 0 pre_update_total_cost = order.total_cost assignee = order.assignees.first() assignee.estimated_time += 100 assignee.save() order.refresh_from_db() assert order.total_cost > 0 post_update_total_cost = order.total_cost assert pre_update_total_cost != post_update_total_cost
[ "def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_updated_on_assignee_deleted(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.delete()\n\n order.refresh_from_db()\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order)\n\n assert pre_update_pricing == post_update_pricing", "def test_pricing_updated_on_order_save(self):\n order = OrderFactory(vat_status=VATStatus.UK, discount_value=0)\n assert order.vat_cost > 0\n\n order.vat_status = VATStatus.OUTSIDE_EU\n order.save()\n\n order.refresh_from_db()\n assert order.vat_cost == 0", "async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]", "def test_price_diff(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff, diff)", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "def test_billing_recurring_update(self):\n pass", "def test_properties_update():\n donor = Donor('test', [50, 100, 150])\n donor.add_donation(300)\n assert donor.sum_donations == 600\n assert donor.num_donations == 4\n assert donor.avg_donation == 150", "def test_set_price(self):\n\n test_price = 100.0\n test_quantity = 1\n\n # Grab the first part\n p = Part.list(self.api)[0]\n\n # Grab all internal prices for the part\n ip = InternalPrice.list(self.api, part=p.pk)\n\n # Delete any existsing prices\n for price in ip:\n self.assertEqual(type(price), InternalPrice)\n price.delete()\n\n # Ensure that no part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 0)\n\n # Set the internal price\n p.setInternalPrice(test_quantity, test_price)\n\n # Ensure that the part has an internal price\n ip = InternalPrice.list(self.api, part=p.pk)\n self.assertEqual(len(ip), 1)\n \n # Grab the internal price\n ip = ip[0]\n\n self.assertEqual(ip.quantity, test_quantity)\n self.assertEqual(ip.part, p.pk)\n ip_price_clean = float(ip.price)\n self.assertEqual(ip_price_clean, test_price)", "def test_multiple_purchases_update_product_price(self):\n\n # Generate timestamps for correct timing of purchases and updates\n t1 = datetime.datetime.now() - datetime.timedelta(seconds=30)\n t2 = datetime.datetime.now() - datetime.timedelta(seconds=25)\n t3 = datetime.datetime.now() - datetime.timedelta(seconds=20)\n t4 = datetime.datetime.now() - datetime.timedelta(seconds=15)\n t5 = datetime.datetime.now() - datetime.timedelta(seconds=10)\n t6 = datetime.datetime.now() - datetime.timedelta(seconds=5)\n # Update product price\n pp = ProductPrice(product_id=1, price=300, admin_id=1, timestamp=t1)\n db.session.add(pp)\n db.session.commit()\n # Get the first product price\n product = Product.query.filter_by(id=1).first()\n pr_1 = copy(product.price)\n # Do first purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t2)\n db.session.add(purchase)\n db.session.commit()\n # Update product price\n pp = ProductPrice(product_id=1, price=100, admin_id=1, timestamp=t3)\n db.session.add(pp)\n db.session.commit()\n # Get the second product price\n product = Product.query.filter_by(id=1).first()\n pr_2 = copy(product.price)\n # Do second purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t4)\n db.session.add(purchase)\n # Update product price\n pp = ProductPrice(product_id=1, price=600, admin_id=1, timestamp=t5)\n db.session.add(pp)\n db.session.commit()\n # Get the third product price\n product = Product.query.filter_by(id=1).first()\n pr_3 = copy(product.price)\n # Do third purchase\n purchase = Purchase(user_id=1, product_id=1, amount=1, timestamp=t6)\n db.session.add(purchase)\n db.session.commit()\n\n # Check the product prices\n self.assertEqual(pr_1, 300)\n self.assertEqual(pr_2, 100)\n self.assertEqual(pr_3, 600)\n\n # Check user credit\n user = User.query.filter_by(id=1).first()\n self.assertEqual(len(user.purchases.all()), 3)\n self.assertEqual(user.credit, -(pr_1 + pr_2 + pr_3))\n\n # Check purchase prices\n purchases = Purchase.query.all()\n self.assertEqual(purchases[0].price, 300)\n self.assertEqual(purchases[1].price, 100)\n self.assertEqual(purchases[2].price, 600)", "def test_update_entitlement_item(self):\n pass", "def test_patch_investment_requirements(self):\n pass", "def test_edit_payee(self):\n self.ledger.edit_payee(2, 'Tesco')\n\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': 'Tesco'}]\n actual_result = self.ledger.get_payees()\n\n self.assertEqual(actual_result, expected_result)", "def test_admin_booking_customer_price(app, tickets):\n app.refresh_page()\n app.booking.select_event(tickets)\n app.booking.apply_custom_price(tickets)\n app.booking.fill_out_customer_info(tickets)\n app.booking.select_payment_method(tickets)\n app.booking.verify_payment_table(tickets)\n app.booking.submit_successful_booking()", "def test_patch_investment_value(self):\n pass", "def test_invoicing_ratio(self):\n project = self.TICKET.create(\n {'name': 'Test',\n 'participant_ids': [(6, 0, [self.customer_id, self.member_id])],\n 'analytic_journal_id': self.ANAJOURNAL.search([])[0].id,\n 'product_id': self.ref('product.product_product_consultant'),\n 'method_id': self.ref('anytracker.method_test')})\n account = self.ANACCOUNT.create({\n 'name': 'project',\n 'type': 'contract',\n 'to_invoice': self.ref(\n 'hr_timesheet_invoice.timesheet_invoice_factor1')})\n project.write({'analytic_account_id': account.id})\n # we create 3 tickets\n ticket1 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket1',\n 'parent_id': project.id, })\n ticket2 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket2',\n 'parent_id': project.id, })\n ticket3 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket3',\n 'parent_id': project.id, })\n ticket4 = self.TICKET.with_context({'active_id': project.id}).create(\n {'name': 'Invoiced ticket4',\n 'parent_id': project.id, })\n\n # we set ratings\n (ticket1 + ticket2 + ticket3 + ticket4).write({\n 'my_rating': self.ref('anytracker.complexity1')})\n # we set priorities to the tickets 1 to 3 but not 4\n ticket1.write({\n 'priority_id': self.ref('anytracker.test_prio_normal')})\n ticket2.write({\n 'priority_id': self.ref('anytracker.test_prio_prio')})\n ticket3.write({\n 'priority_id': self.ref('anytracker.test_prio_urgent')})\n\n # Now we create a bouquet with the 3 tickets\n bouquet = self.BOUQUET.create(\n {'name': 'bouquet',\n 'ticket_ids': [(6, 0, [\n ticket1.id, ticket2.id, ticket3.id, ticket4.id])]\n })\n # we launch invoicing on the bouquet\n bouquet.create_analytic_lines()\n\n # we check the ratio\n self.assertEquals(0, ticket1.analytic_line_id.to_invoice.factor)\n self.assertEquals(-40, ticket2.analytic_line_id.to_invoice.factor)\n self.assertEquals(-80, ticket3.analytic_line_id.to_invoice.factor)\n self.assertEquals(0, ticket4.analytic_line_id.to_invoice.factor)", "def test_update_priority(self):\n self.client.login(username=\"alice@example.org\", password=\"password\")\n self._invite_member(\"glassman@example.com\")\n member = self._get_member(\"glassman@example.com\")\n form = {\"update_priority\": \"1\", \"email\": \"glassman@example.com\"}\n response = self.client.post(\"/accounts/profile/\", form)\n self.assertEqual(response.status_code, 200)\n\n member = self._get_member(\"glassman@example.com\")", "def update_price(self, company: Company):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that if an assignee is deleted, the pricing on the order changes.
def test_pricing_updated_on_assignee_deleted(self): order = OrderFactory(discount_value=0) assert order.total_cost > 0 pre_update_total_cost = order.total_cost assignee = order.assignees.first() assignee.delete() order.refresh_from_db() post_update_total_cost = order.total_cost assert pre_update_total_cost != post_update_total_cost
[ "def test_pricing_update_on_assignee_created(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n OrderAssigneeFactory(order=order)\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_updated_on_assignee_updated(self):\n order = OrderFactory(discount_value=0)\n assert order.total_cost > 0\n pre_update_total_cost = order.total_cost\n\n assignee = order.assignees.first()\n assignee.estimated_time += 100\n assignee.save()\n\n order.refresh_from_db()\n assert order.total_cost > 0\n post_update_total_cost = order.total_cost\n\n assert pre_update_total_cost != post_update_total_cost", "def test_pricing_unchanged_if_update_unrelated(self):\n order = OrderFactory()\n pre_update_pricing = get_pricing_from_order(order)\n\n order.description = 'updated description'\n order.save()\n\n order.refresh_from_db()\n post_update_pricing = get_pricing_from_order(order)\n\n assert pre_update_pricing == post_update_pricing", "def test_pricing_updated_on_order_save(self):\n order = OrderFactory(vat_status=VATStatus.UK, discount_value=0)\n assert order.vat_cost > 0\n\n order.vat_status = VATStatus.OUTSIDE_EU\n order.save()\n\n order.refresh_from_db()\n assert order.vat_cost == 0", "def test_delete_payee(self):\n # first, add a payee with no associated transactions\n self.ledger.add_payee('Waitrose')\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': \"Sainsbury's\"},\n {'payee_id': 3,\n 'payee_name': 'Waitrose'}]\n actual_result = self.ledger.get_payees()\n self.assertEqual(actual_result, expected_result)\n\n # then delete the new payee to make sure it's gone\n self.ledger.delete_payee(3)\n\n expected_result = [{'payee_id': 1,\n 'payee_name': 'National Rail'},\n {'payee_id': 2,\n 'payee_name': \"Sainsbury's\"}]\n actual_result = self.ledger.get_payees()\n\n self.assertEqual(actual_result, expected_result)", "def test_delete_muveto_pmt_item(self):\n pass", "def test_delete_order(self):\n\n Pizza.objects.create(\n flavour='Dessert',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00}\n )\n Pizza.objects.create(flavour='Vegan',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00})\n\n pizzas = Pizza.objects.all().order_by('-flavour')\n orders = create_orders(pizzas, self.user)\n\n url = detail_url(orders[0].uuid)\n res = self.client.delete(url)\n\n orders_state = Order.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(orders_state), 1)\n self.assertNotEqual(orders_state[0].uuid, orders[0].uuid)", "def test_delitem_existing_dependent(self):\n self.assertIn('energy', self.record.curve_sets['cs1']['dependent'])\n del self.record.curve_set_values.cs1['energy']\n self.assertNotIn('energy', self.record.curve_sets['cs1']['dependent'])", "def test_price_diff(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff, diff)", "def test_you_have_been_removed_for_adviser(self, settings):\n settings.OMIS_NOTIFICATION_API_KEY = settings.OMIS_NOTIFICATION_TEST_API_KEY\n notify = Notify()\n\n order = OrderFactory()\n\n notify.adviser_removed(order=order, adviser=AdviserFactory())", "def test_delitem_existing_dependent_in_specific_set(self):\n self.assertIn('energy', self.record.curve_sets['cs1']['dependent'])\n del self.record.curve_set_values.cs1.dependent['energy']\n self.assertNotIn('energy', self.record.curve_sets['cs1']['dependent'])", "def test_delete_entitlement_item(self):\n pass", "def test_update_order_status_delivered_fail(self):\n Pizza.objects.create(\n flavour='Dessert',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00}\n )\n\n pizzas = Pizza.objects.all().order_by('-flavour')\n orders = create_orders(pizzas, self.user, status='DL')\n\n payload = {'status': 'P'}\n url = detail_url(orders[0].uuid)\n res = self.client.patch(url, payload)\n\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)", "def destroy(self, request, *args, **kwargs):\n instance = self.get_object()\n in_process_orders_exists = Order.objects.filter(\n status__lt=const.DELIVERED,\n ordered_pizzas__pizza_price=instance\n ).exists()\n if in_process_orders_exists:\n raise PermissionDenied(\"You can't delete PizzaPrice while it \"\n \"related to a new Order!\")\n\n return super().destroy(request, *args, **kwargs)", "def test_price_diff_percentage(self):\n supp_item = fake_supplier_item(fake_inventory_item())\n for diff in range(-9, 10):\n diff = Decimal(diff)\n price_was = random_price()\n price_now = price_was + diff\n diff_percentage = diff / price_was\n bp_change = BuyPriceChange(\n supplier_item=supp_item,\n price_was=price_was,\n price_now=price_now)\n self.assertEqual(bp_change.price_diff_percentage, diff_percentage)", "async def unassign(self) -> None:\n db_courier = await self.get_courier()\n\n order_assigner = OrderAssigner(courier_id=self.courier_id, courier=db_courier)\n order_selector = OrderSelector(courier_id=self.courier_id, courier=db_courier)\n order_filter = OrderFilter(courier_id=self.courier_id, courier=db_courier)\n\n current_orders = await order_selector.select(completed=False)\n suited_orders = await order_filter.filter_by_courier_features(current_orders)\n\n if len(current_orders) != len(suited_orders):\n orders_ids_to_unassign = set(get_objects_ids(current_orders)) - set(get_objects_ids(suited_orders))\n orders_to_unassign = [order for order in current_orders if order.id in orders_ids_to_unassign]\n await order_assigner.unassign(orders_to_unassign)", "def test_employee_was_deleted(self):\n delete_employee(2, productionDB=False)\n self.cur.execute('SELECT COUNT(*) FROM employees WHERE manager_id = 1')\n data = self.cur.fetchone()\n self.new_number_of_employees = data[0]\n self.assertEqual(self.new_number_of_employees + 1, self.old_number_of_employees, \"\"\"The number of accounts did \n not change\"\"\")", "def test_membership_discount(self):\n # create a user with a subscription with 10% discount (level10)\n user = self.new_user()\n subscription = user.subscription_set.create(level=self.level10,amount=0)\n subscription.recalculate()\n user = get_user_model().objects.get(id=user.id)\n self.assertEqual(user.level.id,self.level10.id)\n\n # user signs up without loging in, no discount\n order_id = self.add_to_cart(self.session1.sessionproduct)\n self.assertEqual(Order.objects.get(id=order_id).order_total,45)\n\n # after logging in the total drops by 10%\n self.login(user)\n self.start_checkout()\n self.assertEqual(Order.objects.get(id=order_id).order_total,40.5)\n\n #! TODO Test an expired membership strips member of level and gives no discount", "def test_parent_price(self):\n parent = Product.objects.get(slug='foo-product')\n self.assertEqual(parent.price, Decimal('25.22'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
works for 3 sec and returns i2 + j
def fun(i, j): start = time.time() while time.time() - start < 3.: 0. + 0. # pointless operation to see the CPU activity raising (top, htop, ...) return i ** 2 + j
[ "def example2(S):\n n = len(S)\n total = 0\n for j in range(0, n, 2): # note the increment of 2\n total += S[j]\n return total", "def add3(i):\n pass", "def _sum(a, i, j):\n if i > j: # T(n) = 0 \n return 0\n if i == j: # T(n) = 1\n return a[i]\n mid = (i+j)//2\n return _sum(a, i, mid) + _sum(a, mid+1, j)", "def time(state):", "def Problem2():\n F = 6000 #kips\n p = 0.6 #in\n d = 1.7 #in\n dm = d - p/2\n f = 0.1\n l = p*1 # lead for single threaded\n Tr = F*dm/2*(l+np.pi*f*dm)/(np.pi*dm-f*l)\n Tr2 = Screw.TorqueRaiseThread(F, dm, f, l)\n print(Tr, Tr2)", "def d(j, i):\n return distance[i][j]", "def CJspeed2(P1, T1, q, mech):\n \n gas2 = Solution(mech)\n gas1 = Solution(mech)\n gas = Solution(mech)\n\n #INTIAL CONDITIONS\n gas.TPX = T1, P1, q;\n gas1.TPX = T1, P1, q;\n gas2.TPX = T1, P1, q; \n \n #INITIALIZE ERROR VALUES & CHANGE VALUES\n ERRFT = 1.0*10**-4; ERRFV = 1.0*10**-4;\n\n r1 = gas1.density; V1 = 1/r1;\n P1 = gas1.P; T1 = gas1.T;\n i = 0;\n #PRELIMINARY GUESS\n Vg = V1/10; rg = 1/Vg; \n \n gas.TD = T1,rg; \n gas.equilibrate('UV')\n Tg = gas.T; \n gas2.TDX = Tg, rg, gas.X\n \n #SAVE STATE\n V = Vg; r = rg;\n T = Tg;\n deltaT = 1000; deltaV = 1000; cj_speed = 0;\n #START LOOP\n while(abs(deltaT) > ERRFT*T or abs(deltaV) > ERRFV*V):\n i = i + 1\n if i == 500:\n print \"CJ speed 2 calc did not converge\"\n return gas\n \n #CALCULATE FH & FP FOR GUESS 1\n [FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n\n\n #TEMPERATURE PERTURBATION\n DT = T*0.01; Tper = T + DT;\n Vper = V; Rper = 1/Vper;\n \n gas.TD = Tper, Rper\n gas.equilibrate('TV',2)\n gas2.TDX = Tper, Rper, gas.X\n\n #CALCULATE FHX & FPX FOR \"IO\" STATE\n [FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n #ELEMENTS OF JACOBIAN\n DFHDT = (FHX-FH)/DT; DFPDT = (FPX-FP)/DT;\n\n #VOLUME PERTURBATION\n DV = 0.01*V; Vper = V + DV;\n Tper = T; Rper = 1/Vper;\n \n gas.TD = Tper, Rper\n gas.equilibrate('TV',2)\n gas2.TDX = Tper, Rper, gas.X\n \n #CALCULATE FHX & FPX FOR \"IO\" STATE\n [FHX,FPX,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n #ELEMENTS OF JACOBIAN\n DFHDV = (FHX-FH)/DV; DFPDV = (FPX-FP)/DV;\n\n #INVERT MATRIX\n J = DFHDT*DFPDV - DFPDT*DFHDV\n b = [DFPDV, -DFHDV, -DFPDT, DFHDT]\n a = [-FH, -FP]\n deltaT = (b[0]*a[0]+b[1]*a[1])/J; deltaV = (b[2]*a[0]+b[3]*a[1])/J;\n\n #CHECK & LIMIT CHANGE VALUES\n #TEMPERATURE\n DTM = 0.2*T\n if abs(deltaT) > DTM:\n deltaT = DTM*deltaT/abs(deltaT)\n #VOLUME\n V2X = V + deltaV\n if V2X > V1:\n DVM = 0.5*(V1 - V)\n else:\n DVM = 0.2*V\n if abs(deltaV) > DVM:\n deltaV = DVM*deltaV/abs(deltaV)\n #MAKE THE CHANGES\n T = T + deltaT; V = V + deltaV; r = 1/V;\n gas.TD = T, r\n gas.equilibrate('TV',2)\n gas2.TDX = T, r, gas.X\n\n [FH,FP,cj_speed] = FHFP_CJ2(gas,gas1,gas2)\n \n return [gas,cj_speed]", "def OP1(i,j):\n if j < i:\n I1 = 0\n elif j == i:\n I1 = ((2*j+1)**2 - 4*i**2)**0.5/(2*np.pi) - 2*j*OP0(i,j)\n elif j > i:\n I1 = (((2*j+1)**2 - 4*i**2)**0.5 - ((2*j-1)**2 - 4*i**2)**0.5)/(2*np.pi) - 2*j*OP0(i,j)\n else:\n raise(ValueError)\n \n return I1", "def Add(i, j):\n\treturn _hi.hi_Add(i, j)", "def autorange(self, callback=None):\n for i in range(1, 10):\n number = 10**i\n time_taken = self.timeit(number)\n if callback:\n callback(number, time_taken)\n if time_taken >= 0.2:\n break\n return (number, time_taken)", "def subsum(i, j):\n return nums[j-1] - (nums[i-1] if i > 0 else 0)", "def value(i,j):\n if i<0 or i>=M or j<0 or j>=N:\n return 0\n return f[i,j]", "def answer2():\n sum= 0\n for i in range(1000):\n if i % 3 != 0 and i % 5 != 0:\n continue\n sum += i\n return sum", "def meet_me(pos1, jump_distance1, sleep1, pos2, jump_distance2, sleep2):\n for i in range(1000):\n if i == sleep1:\n pos1 += jump_distance1\n sleep1 += sleep1\n if i == sleep2:\n pos2 += jump_distance2\n sleep2 += sleep2\n if pos1 == pos2:\n print(pos1)\n break", "def spinij(self, i, j):\n return self._spins[i%self._N, j%self._N]", "def sumRange(self, i, j):\n bucket_size, nums, table = NumArray.bucket_size, self.nums, self.table\n \n if i == j:\n return nums[i]\n \n vsum = 0\n now = i\n \n while now <= j:\n if now % bucket_size != 0:\n for k in range(now, min(j+1, now-(now%bucket_size) + bucket_size)):\n vsum += nums[k]\n now -= (now%bucket_size)\n else:\n if now+bucket_size-1 <= j:\n vsum += table[now//bucket_size]\n else:\n for k in range(now, j+1):\n vsum += nums[k]\n \n now += bucket_size\n \n return vsum", "def teleportation():\r\n start = time.time()\r\n global final\r\n final = beta*original_votes + (1-beta)*teleported\r\n print(time.time()-start)", "def get( self, time, i ):\n assert( self.__sorted( ) )\n # __result is assumed to be an ordered list, since the only method to add elements is add and it would add\n # elements in order. If it turns out that __result is not sorted, then some procedure must have modified the\n # the data structure illegaly\n j = bisect.bisect_right( self.__result, RateStructure( time, [] ) )\n return self.__interpolate( time, j, i )", "def combine(a=0, b=0, c=0):\n time.sleep(0.5)\n return a + b + c" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create new CloudFormation Stack from the template
def launch(args, config, cf_conn, template): print("Creating CloudFormation Stack %s..." % config['stack_name']) stack_id = cf_conn.create_stack( config['stack_name'], template_body=template.to_json(), parameters=cf_params(), tags=config['tags'], capabilities=['CAPABILITY_IAM'] ) print('Created ' + stack_id)
[ "def create_stack(self):\n command = \"cfn-create-stack \" + self.stack_name + \" -f \" + self.template_file\n if (self.parameters is not None):\n command += \" -p \\\"\" + self.parameters + \"\\\"\"\n run_command(command)", "def create_stack(self, **kwargs):\n stack_name = kwargs.get('stack_name')\n template_file = kwargs.get('template_file')\n if not self.quiet:\n print('Creating stack: ', end='')\n print('{}'.format(stack_name), color='green')\n shell_statement = 'aws --profile {} --region {} cloudformation create-stack --stack-name {} --template-body file://{} --capabilities=CAPABILITY_IAM'.format(self.profile_name, self.region_name, stack_name, template_file)\n if self.stack_exists(stack_name):\n print('Stack with the name {} already exists. Current stacks:'.format(stack_name), color='red')\n for _name, _status in self.check_statuses():\n if _status == 'CREATE_COMPLETE':\n color='green'\n else:\n color='yellow'\n print('{:<25} {}'.format(_name, _status), color=color)\n sys.exit(1)\n\n try:\n results = subprocess.check_output(\n shlex.split(shell_statement),\n stderr=subprocess.STDOUT\n )\n except subprocess.CalledProcessError as e:\n print('An error ({}) occurred:'.format(e.returncode), color='red')\n print(e.output.decode())\n sys.exit(1)", "def create_or_update_stack(self, template_name):\n stack_name = self.get_stack_name(template_name)\n stack_parameters = self.get_stack_parameters(template_name)\n template_body = self.read_template(template_name)\n\n # check if the stack exists\n status = self.get_stack_status(stack_name)\n\n # otherwise, deploy it\n if status and ( status['StackStatus'] == 'CREATE_COMPLETE' or \n status['StackStatus'] == 'UPDATE_COMPLETE'):\n pass\n elif not status or status['StackStatus'] in ['DELETE_COMPLETE']:\n create_response = self.client.create_stack(\n StackName=stack_name,\n #http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html\n #Capabilities.member.1=CAPABILITY_IAM,\n Capabilities=['CAPABILITY_IAM'],\n TemplateBody=template_body,\n Parameters=stack_parameters)\n self.wait_for_stack_status(stack_name)\n elif status['StackStatus'] in ['CREATE_IN_PROGRESS']:\n self.wait_for_stack_status(stack_name)\n else:\n raise Exception(\n 'not sure what to do...stack is in state {}'.format(\n status['StackStatus']))\n\n # keep track of variables that are outputs from each stack\n stack = self.describe_stack(template_name)\n self.add_outputs_to_namespace(stack)\n\n return stack", "def create_stack(self, cnxt, stack_name, template, params, files, args):\r\n LOG.info(_('template is %s') % template)\r\n\r\n def _stack_create(stack):\r\n # Create/Adopt a stack, and create the periodic task if successful\r\n if stack.adopt_stack_data:\r\n stack.adopt()\r\n else:\r\n stack.create()\r\n\r\n if (stack.action in (stack.CREATE, stack.ADOPT)\r\n and stack.status == stack.COMPLETE):\r\n # Schedule a periodic watcher task for this stack\r\n self.stack_watch.start_watch_task(stack.id, cnxt)\r\n else:\r\n LOG.warning(_(\"Stack create failed, status %s\") % stack.status)\r\n\r\n tmpl = parser.Template(template, files=files)\r\n self._validate_new_stack(cnxt, stack_name, tmpl)\r\n\r\n common_params = api.extract_args(args)\r\n env = environment.Environment(params)\r\n stack = parser.Stack(cnxt, stack_name, tmpl, env, **common_params)\r\n\r\n self._validate_deferred_auth_context(cnxt, stack)\r\n\r\n stack.validate()\r\n\r\n stack.store()\r\n\r\n self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,\r\n _stack_create, stack)\r\n\r\n return dict(stack.identifier())", "def provision(template_name=None, stack_name=None):\n if not template_name:\n abort('Must provide template')\n if not stack_name:\n abort('Must provide stack_name')\n client = boto3.client('cloudformation')\n\n config = load_config()\n\n update = False\n try:\n resp = client.describe_stacks(StackName=stack_name)\n message = 'Stack {0} exists, and is in state {1}. Proceed with update?'.format(\n stack_name, resp['Stacks'][0]['StackStatus'])\n if not confirm(message):\n abort('Aborting.')\n else:\n update = True\n except ClientError:\n logger.info('No stack named {0}; proceeding with stack creation'.format(stack_name))\n\n with open(os.path.join(OUTPUT_DIR, template_name + OUTPUT_EXT)) as output_contents:\n if update:\n response = client.update_stack(StackName=stack_name,\n TemplateBody=output_contents.read(),\n Parameters=config.get(template_name, {}).get('parameters', []),\n Capabilities=['CAPABILITY_IAM'])\n else:\n response = client.create_stack(StackName=stack_name,\n TemplateBody=output_contents.read(),\n Parameters=config.get(template_name, {}).get('parameters', []),\n Capabilities=['CAPABILITY_IAM'])\n logger.info(json.dumps(response, indent=2))", "def cfn_create(test=False):\n stack_name = get_stack_name(new=True)\n cfn_config = get_config()\n\n cfn = get_connection(Cloudformation)\n if test:\n print cfn_config.process()\n return\n # Upload any SSL certs that we may need for the stack.\n if 'ssl' in cfn_config.data:\n print green(\"Uploading SSL certificates to stack\")\n iam = get_connection(IAM)\n iam.upload_ssl_certificate(cfn_config.ssl(), stack_name)\n # Useful for debug\n # print cfn_config.process()\n # Inject security groups in stack template and create stacks.\n try:\n stack = cfn.create(stack_name, cfn_config.process(), tags=get_cloudformation_tags())\n except:\n # cleanup ssl certificates if any\n if 'ssl' in cfn_config.data:\n print red(\"Deleting SSL certificates from stack\")\n iam.delete_ssl_certificate(cfn_config.ssl(), stack_name)\n import traceback\n abort(red(\"Failed to create: {error}\".format(error=traceback.format_exc())))\n\n print green(\"\\nSTACK {0} CREATING...\\n\").format(stack_name)\n\n if not env.blocking:\n print 'Running in non blocking mode. Exiting.'\n sys.exit(0)\n\n tail(cfn, stack_name)\n stack_evt = cfn.get_last_stack_event(stack)\n\n if stack_evt.resource_status == 'CREATE_COMPLETE':\n print 'Successfully built stack {0}.'.format(stack)\n else:\n # So delete the SSL cert that we uploaded\n if 'ssl' in cfn_config.data:\n iam.delete_ssl_certificate(cfn_config.ssl(), stack_name)\n abort('Failed to create stack: {0}'.format(stack))", "def createTemplateStack(self):\n\n\t\ttemplatestack = os.path.join(self.params['rundir'], \"templatestack.spi\")\n\t\tapFile.removeFile(templatestack, warn=True)\n\n\t\t### hack to use standard filtering library\n\t\ttemplateparams = {}\n\t\ttemplateparams['apix'] = self.stack['apix']\n\t\ttemplateparams['rundir'] = os.path.join(self.params['rundir'], \"templates\")\n\t\ttemplateparams['templateIds'] = self.templatelist\n\t\ttemplateparams['bin'] = self.params['bin']\n\t\ttemplateparams['lowpass'] = self.params['lowpass']\n\t\ttemplateparams['median'] = None\n\t\ttemplateparams['pixlimit'] = None\n\t\tprint 'Converting reference templates:\\n', templateparams\n\t\tapParam.createDirectory(os.path.join(self.params['rundir'], \"templates\"))\n\t\tfilelist = apTemplate.getTemplates(templateparams)\n\n\t\tlocalclip = self.clipsize/self.params['bin']\n\t\tfor mrcfile in filelist:\n\t\t\temancmd = (\"proc2d templates/\"+mrcfile+\" \"+templatestack\n\t\t\t\t+\" clip=\"+str(localclip)+\",\"+str(localclip)\n\t\t\t\t+\" edgenorm spiderswap \")\n\t\t\tif self.params['inverttemplates'] is True:\n\t\t\t\temancmd += \" invert \"\n\t\t\tapEMAN.executeEmanCmd(emancmd, showcmd=False)\n\n\t\treturn templatestack", "def update(args, config, cf_conn, template):\n print(\"Updating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.update_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Updated ' + stack_id)", "def template_stack(name=None, profile=None):\n h_client = _auth(profile)\n\n if not name:\n return {\"result\": False, \"comment\": \"Parameter name missing or None\"}\n try:\n get_template = h_client.stacks.template(name)\n except heatclient.exc.HTTPNotFound:\n return {\"result\": False, \"comment\": \"No stack with {}\".format(name)}\n except heatclient.exc.BadRequest:\n return {\"result\": False, \"comment\": \"Bad request fot stack {}\".format(name)}\n if \"heat_template_version\" in get_template:\n template = salt.utils.yaml.safe_dump(get_template)\n else:\n template = jsonutils.dumps(get_template, indent=2, ensure_ascii=False)\n\n checksum = __salt__[\"hashutil.digest\"](template)\n ret = {\"template\": template, \"result\": True, \"checksum\": checksum}\n return ret", "def test_create_stack(self):\n pass", "def generate(self):\n try:\n self.stack_cf = self.template.render(stack=self.stack, env=self.env, config=self.config)\n with open(self.output_folder + self.stack_cf_file, 'w') as cffile:\n cffile.write(self.stack_cf)\n print('[' + self.stack + '] generate in ' + self.output_folder + self.stack_cf_file)\n except:\n print('[' + self.stack + '] generate cloudformation file failed.')\n raise Exception('Stack ' + self.stack + ' generate cloudformation file failed.')", "def test_save_create(self):\n self._cf.create_stack.return_value = True\n\n with patch('krux_cloud_formation.cloud_formation.CloudFormation._is_stack_exists', MagicMock(return_value=False)):\n self._cfn.save(self.TEST_STACK_NAME)\n self._s3.create_key.assert_called_once_with(\n bucket_name=self.S3_BUCKET,\n key=self.TEST_STACK_NAME,\n str_content=self._cfn.template.to_json()\n )\n self._cf.create_stack.assert_called_once_with(\n StackName=self.TEST_STACK_NAME,\n TemplateURL=self.FAKE_URL\n )", "def create_stack(stackName, app_type):\n if app_type in ['core', 'expacore', 'expa_core']:\n app_type = 'core'\n\n try:\n app_settings\n except NameError:\n app_settings = loadsettings(app_type)\n\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n try:\n git_cfg\n except NameError:\n try:\n git_cfg = load_git_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide a github conifguration based on git.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n key_file_path = os.path.expanduser(git_cfg.get('git', 'key_dir')) + '/' + git_cfg.get('cookbooks', 'deploy_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n cookbooks_deploy_key = key_file.read()\n\n key_file_path = os.path.expanduser(git_cfg.get('git', 'key_dir')) + '/' + git_cfg.get(app_type, 'deploy_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n app_deploy_key = key_file.read()\n\n key_file_path = os.path.expanduser(aws_cfg.get('aws', 'key_dir')) + '/' + aws_cfg.get('aws', 'opsworks_public_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n opsworks_public_key = key_file.read()\n\n cookbooks_source = {\"Url\": \"%s\" % git_cfg.get('cookbooks', 'repo_url'),\n \"Type\": \"git\",\n \"SshKey\": cookbooks_deploy_key}\n\n recipes = {\"Setup\": [\"app::sns-handler\", \"bootstrap::misc\", \"newrelic::default\", \"dokku::bootstrap\", \"app::dokku-logs\", \"logstash::agent\", \"app::newrelic-nginx-plugin\"],\n \"Deploy\": [\"app::sns-handler\", \"dokku::apps\", \"nginx::logging\", \"dokku_deploy::default\", \"app::newrelic-notify-deployment\", \"logstash::restart_agent\", \"app::sync-db-to-expa\"],\n \"Shutdown\": [\"app::sns-handler\", \"app::shutdown\"]}\n\n app_source = {\"Url\": \"%s\" % git_cfg.get(app_type, 'repo_url'),\n \"Type\": \"git\",\n \"SshKey\": app_deploy_key}\n\n arns = create_opsworks_roles()\n\n create_s3_buckets(app_type)\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n\n try:\n opsworks.create_user_profile(iam_user_arn=arns['user_arn'], ssh_public_key=opsworks_public_key)\n except ValidationException, error:\n if error.message == 'User ARN already exists':\n opsworks.update_user_profile(iam_user_arn=arns['user_arn'], ssh_public_key=opsworks_public_key)\n else:\n print error\n return 1\n\n if stackName in [stack['Name'] for stack in stacks['Stacks']]:\n foundStacks = [(stack['Name'], stack['StackId']) for stack in stacks['Stacks']]\n for foundStack in foundStacks:\n if foundStack[0] == stackName:\n print(_red(\"%s: %s already exists. please choose another stack name\" % (foundStack[0], foundStack[1])))\n return 1\n\n try:\n stack = opsworks.create_stack(name=stackName, region=aws_cfg.get('aws', 'region'),\n service_role_arn=arns['serviceRole'], default_instance_profile_arn=arns['instanceProfile'],\n default_os='Ubuntu 12.04 LTS', hostname_theme=choice(OPWORKS_INSTANCE_THEMES),\n configuration_manager=OPSWORKS_CONFIG_MANAGER, custom_json=json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': ')),\n use_custom_cookbooks=True, custom_cookbooks_source=cookbooks_source, default_ssh_key_name=aws_cfg.get(\"aws\", \"key_name\"),\n default_root_device_type='ebs')\n\n opsworks.set_permission(stack_id=stack['StackId'], iam_user_arn=arns['user_arn'], allow_ssh=True, allow_sudo=True)\n except Exception, error:\n print error\n print json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': '))\n return 1\n\n ec2 = connect_to_ec2()\n webserver_sg = ec2.get_all_security_groups(groupnames=['AWS-OpsWorks-Web-Server'])\n layer = opsworks.create_layer(stack_id=stack['StackId'], type='custom', name=app_settings[\"APP_NAME\"], shortname=app_settings[\"APP_NAME\"], custom_recipes=recipes,\n enable_auto_healing=True, auto_assign_elastic_ips=False, auto_assign_public_ips=True, custom_security_group_ids=[webserver_sg[0].id])\n\n elb_name = stackName + '-elb'\n lb = create_elb(name=elb_name, app_type=app_type)\n\n opsworks.attach_elastic_load_balancer(elastic_load_balancer_name=lb.name, layer_id=layer['LayerId'])\n\n if app_type == 'app':\n appDomains = [app_settings[\"HOST_NAME\"], app_settings[\"DOMAIN_NAME\"]]\n else:\n appDomains = [app_settings[\"HOST_NAME\"]]\n app = opsworks.create_app(stack_id=stack['StackId'], name=app_settings[\"APP_NAME\"], type='static', app_source=app_source,\n domains=appDomains)\n\n print(_green(\"created stack with following info\"))\n print(_yellow(\"stack name/id: %s/%s\" % (stackName, stack['StackId'])))\n print(_yellow(\"layer name/id: %s/%s\" % (app_settings[\"APP_NAME\"], layer['LayerId'])))\n print(_yellow(\"app name/id: %s/%s\" % (app_settings[\"APP_NAME\"], app['AppId'])))\n\n zones = random.sample([zone.name for zone in ec2.get_all_zones()], 2)\n\n add_instance(stackName=stackName, layerName=app_settings[\"APP_NAME\"], zone=zones[0])\n add_instance(stackName=stackName, layerName=app_settings[\"APP_NAME\"], zone=zones[1])\n\n rds_instance_name = stackName + '-' + app_settings[\"HOST_NAME\"].replace('.', '-') + '-db'\n rds = connect_to_rds()\n if app_settings[\"DATABASE_HOST\"] == \"localhost\":\n try:\n create_rds(name=rds_instance_name, app_type=app_type, engine_type=app_settings['DB_TYPE'])\n except Exception:\n print(_red(\"rds creation failed. deleting stack with no RDS instance\"))\n delete_stack(stackName)\n else:\n try:\n rds.get_all_dbinstances(instance_id=app_settings[\"DATABASE_HOST\"].split('.')[0])\n except BotoServerError, error:\n if error.code == 'DBInstanceNotFound':\n create_rds(name=rds_instance_name, app_type=app_type, engine_type=app_settings['DB_TYPE'])\n else:\n print error\n\n try:\n rds.authorize_dbsecurity_group(group_name=aws_cfg.get('aws', 'group_name'),\n ec2_security_group_owner_id=webserver_sg[0].owner_id, ec2_security_group_name='AWS-OpsWorks-Web-Server')\n except BotoServerError, error:\n if error.code == 'AuthorizationAlreadyExists':\n pass\n else:\n print error\n\n # update stack with new custom_json updated by create_rds and create_s3_buckets\n app_settings = loadsettings(app_type)\n opsworks.update_stack(stack_id=stack['StackId'], custom_json=json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': ')))\n\n if raw_input(\"shall we start the opsworks instance(s)? (y/n) \").lower() == \"y\":\n start_instance(stackName)\n else:\n print(_green(\"use fab start_instance:%s to start the stack\" % stackName))", "def deploy(stack_name, cf_resource):\n with open('setup.yaml') as setup_file:\n setup_template = setup_file.read()\n print(f\"Creating {stack_name}.\")\n stack = cf_resource.create_stack(\n StackName=stack_name,\n TemplateBody=setup_template,\n Capabilities=['CAPABILITY_NAMED_IAM'])\n print(\"Waiting for stack to deploy. This typically takes a minute or two.\")\n waiter = cf_resource.meta.client.get_waiter('stack_create_complete')\n waiter.wait(StackName=stack.name)\n stack.load()\n print(f\"Stack status: {stack.stack_status}\")\n print(\"Created resources:\")\n for resource in stack.resource_summaries.all():\n print(f\"\\t{resource.resource_type}, {resource.physical_resource_id}\")\n print(\"Outputs:\")\n for oput in stack.outputs:\n print(f\"\\t{oput['OutputKey']}: {oput['OutputValue']}\")\n if oput['OutputKey'] == 'TableName':\n with open('config.py', 'w') as config:\n config.write(f\"TABLE_NAME = '{oput['OutputValue']}'\\n\")", "def create(self, template):\n raise NotImplementedError('Create Template not implemented')", "def from_boto_dict(\n cls, stack: Dict, region: Optional[str] = None\n ) -> \"CloudFormationStack\":\n stack_id = stack[\"StackId\"]\n name = stack[\"StackName\"]\n description = stack.get(\"Description\")\n parameters = OrderedDict(\n [\n (p[\"ParameterKey\"], p[\"ParameterValue\"])\n for p in stack.get(\"Parameters\", [])\n if not p.get(\"UsePreviousValue\")\n ]\n )\n creation_time = stack[\"CreationTime\"]\n last_updated_time = stack.get(\"LastUpdatedTime\")\n status = stack[\"StackStatus\"]\n stack_status_reason = stack.get(\"StackStatusReason\")\n disable_rollback = stack[\"DisableRollback\"]\n notification_arns = stack[\"NotificationARNs\"]\n timeout_in_minutes = stack.get(\"TimeoutInMinutes\")\n capabilities = stack.get(\"Capabilities\")\n outputs = stack.get(\"Outputs\")\n tags = OrderedDict([(t[\"Key\"], t[\"Value\"]) for t in stack[\"Tags\"]])\n\n return cls(\n stack_id,\n name,\n description,\n parameters,\n creation_time,\n last_updated_time,\n status,\n stack_status_reason,\n disable_rollback,\n notification_arns,\n timeout_in_minutes,\n capabilities,\n outputs,\n tags,\n region=region,\n )", "def get_cloudformation_template(cfn_client, stack_name):\n\n response = cfn_client.get_template(StackName=stack_name)\n return response[\"TemplateBody\"]", "def update(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n parameters = [\n {\"ParameterKey\": key, \"ParameterValue\": value}\n for key, value in self.parameters.items()\n ]\n try:\n client.update_stack(\n StackName=self.name,\n TemplateBody=json.dumps(self.template),\n Parameters=parameters,\n Capabilities=self.capabilities or [],\n )\n except ClientError as err:\n response = err.response\n error_info = response[\"Error\"]\n error_message = error_info[\"Message\"]\n if error_message == \"No updates are to be performed.\":\n raise StackNotUpdated(self.name)\n else:\n raise", "def get_template(client, stack):\n try:\n response = client.get_template(\n StackName=stack\n )\n template = response[\"TemplateBody\"]\n if isinstance(template, dict):\n template = json.dumps(template, indent=2, sort_keys=True)\n return template\n except botocore.exceptions.ClientError as e:\n click.echo(e.response[\"Error\"][\"Message\"])\n sys.exit(1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update an existing CloudFormation Stack
def update(args, config, cf_conn, template): print("Updating CloudFormation Stack %s..." % config['stack_name']) stack_id = cf_conn.update_stack( config['stack_name'], template_body=template.to_json(), parameters=cf_params(), tags=config['tags'], capabilities=['CAPABILITY_IAM'] ) print('Updated ' + stack_id)
[ "def update(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n parameters = [\n {\"ParameterKey\": key, \"ParameterValue\": value}\n for key, value in self.parameters.items()\n ]\n try:\n client.update_stack(\n StackName=self.name,\n TemplateBody=json.dumps(self.template),\n Parameters=parameters,\n Capabilities=self.capabilities or [],\n )\n except ClientError as err:\n response = err.response\n error_info = response[\"Error\"]\n error_message = error_info[\"Message\"]\n if error_message == \"No updates are to be performed.\":\n raise StackNotUpdated(self.name)\n else:\n raise", "def update_stack(self, cnxt, stack_identity, template, params,\r\n files, args):\r\n LOG.info(_('template is %s') % template)\r\n\r\n # Get the database representation of the existing stack\r\n db_stack = self._get_stack(cnxt, stack_identity)\r\n\r\n current_stack = parser.Stack.load(cnxt, stack=db_stack)\r\n\r\n if current_stack.action == current_stack.SUSPEND:\r\n msg = _('Updating a stack when it is suspended')\r\n raise exception.NotSupported(feature=msg)\r\n\r\n if current_stack.status == current_stack.IN_PROGRESS:\r\n msg = _('Updating a stack when another action is in progress')\r\n raise exception.NotSupported(feature=msg)\r\n\r\n # Now parse the template and any parameters for the updated\r\n # stack definition.\r\n tmpl = parser.Template(template, files=files)\r\n if len(tmpl[tmpl.RESOURCES]) > cfg.CONF.max_resources_per_stack:\r\n raise exception.RequestLimitExceeded(\r\n message=exception.StackResourceLimitExceeded.msg_fmt)\r\n stack_name = current_stack.name\r\n common_params = api.extract_args(args)\r\n common_params.setdefault(rpc_api.PARAM_TIMEOUT,\r\n current_stack.timeout_mins)\r\n env = environment.Environment(params)\r\n updated_stack = parser.Stack(cnxt, stack_name, tmpl,\r\n env, **common_params)\r\n updated_stack.parameters.set_stack_id(current_stack.identifier())\r\n\r\n self._validate_deferred_auth_context(cnxt, updated_stack)\r\n updated_stack.validate()\r\n\r\n self.thread_group_mgr.start_with_lock(cnxt, current_stack,\r\n self.engine_id,\r\n current_stack.update,\r\n updated_stack)\r\n\r\n return dict(current_stack.identifier())", "def create_or_update_stack(self, template_name):\n stack_name = self.get_stack_name(template_name)\n stack_parameters = self.get_stack_parameters(template_name)\n template_body = self.read_template(template_name)\n\n # check if the stack exists\n status = self.get_stack_status(stack_name)\n\n # otherwise, deploy it\n if status and ( status['StackStatus'] == 'CREATE_COMPLETE' or \n status['StackStatus'] == 'UPDATE_COMPLETE'):\n pass\n elif not status or status['StackStatus'] in ['DELETE_COMPLETE']:\n create_response = self.client.create_stack(\n StackName=stack_name,\n #http://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStack.html\n #Capabilities.member.1=CAPABILITY_IAM,\n Capabilities=['CAPABILITY_IAM'],\n TemplateBody=template_body,\n Parameters=stack_parameters)\n self.wait_for_stack_status(stack_name)\n elif status['StackStatus'] in ['CREATE_IN_PROGRESS']:\n self.wait_for_stack_status(stack_name)\n else:\n raise Exception(\n 'not sure what to do...stack is in state {}'.format(\n status['StackStatus']))\n\n # keep track of variables that are outputs from each stack\n stack = self.describe_stack(template_name)\n self.add_outputs_to_namespace(stack)\n\n return stack", "def updateOpsworksStackJson(stackName, chefJson):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stack = getstacks(stackName=stackName)[0]\n if 'stackid' in stack.keys():\n opsworks = connect_to_opsworks()\n opsworks.update_stack(stack_id=stack['stackid'], custom_json=json.dumps(chefJson, sort_keys=True, indent=2, separators=(',', ': ')))\n else:\n print _red(\"no stack found with name %s\" % stackName)", "def stacks_update_preview(self, stack_name, ext_id, template_url=None, template=None, \n environment=None, files=None, parameters=None, tags=None,\n timeout_mins=None, use_all_urls=False):\n \n data={}\n \n if stack_name is not None and ext_id is not None:\n if template is not None:\n data['template'] = template\n if template_url is not None:\n data['template_url'] = template_url\n if environment is not None:\n if use_all_urls is True:\n environment=json.loads(urlopen(environment).read())\n data['environment'] = environment \n if parameters is not None:\n if use_all_urls is True:\n parameters=json.loads(urlopen(parameters).read())\n data['parameters'] = parameters\n if timeout_mins is not None:\n data['timeout_mins'] = timeout_mins\n if files is not None:\n if use_all_urls is True:\n files=json.loads(urlopen(files).read())\n data['files'] = files\n if tags is not None:\n data['tags'] = tags \n\n path = '/stacks/%s/%s/preview'%(stack_name,ext_id)\n \n self.logger.debug('-------------data: %s'%json.dumps(data))\n res = self.client.call(path, 'PUT', data=json.dumps(data), \n token=self.manager.identity.token)\n self.logger.debug('Preview update openstack heat stack: %s' % truncate(res))\n else:\n raise OpenstackError(\"You must specify both stack name and stack UUID\", 404) \n \n return res[0]", "def test_save_update_success(self):\n self._cf.update_stack.return_value = True\n\n with patch('krux_cloud_formation.cloud_formation.CloudFormation._is_stack_exists', MagicMock(return_value=True)):\n self._cfn.save(self.TEST_STACK_NAME)\n self._s3.update_key.assert_called_once_with(\n bucket_name=self.S3_BUCKET,\n key=self.TEST_STACK_NAME,\n str_content=self._cfn.template.to_json()\n )\n self._cf.update_stack.assert_called_once_with(\n StackName=self.TEST_STACK_NAME,\n TemplateURL=self.FAKE_URL\n )", "def deploy_stack(self, template, params=None, timeout=3600):\n logging.info(f\"Building/Updating {self.name} stack.\")\n if params is None:\n params = {}\n name = self.create_changeset(template, params)\n try:\n changes = self.describe_changeset(name)\n except EmptyChangeSetError:\n logging.info(f\"No change in {self.stack_name} stack.\")\n return\n logging.info(\n f\"Changes in changeset `{name}`: \\n{format_changes(changes)}\"\n )\n self.execute_changeset(name, timeout)\n return changes", "def changeset_stack(\n profile: Union[str, bool] = False,\n region: Union[str, bool] = False,\n replace: bool = False,\n local_path: Union[str, bool] = False,\n root: bool = False,\n wait: bool = False,\n info: bool = False,\n execute: bool = False,\n delete: bool = False,\n extra: bool = False,\n bucket: str = None,\n version: Union[str, bool] = False,\n) -> None:\n cloudformation = Cloudformation(profile, region)\n cloudformation.set_stack()\n\n # if not creating new changeset\n if info or execute or delete:\n fzf = Pyfzf()\n response: Dict[str, Any] = cloudformation.client.list_change_sets(\n StackName=cloudformation.stack_name\n )\n # get the changeset name\n fzf.process_list(\n response.get(\"Summaries\", []),\n \"ChangeSetName\",\n \"StackName\",\n \"ExecutionStatus\",\n \"Status\",\n \"Description\",\n )\n\n if info:\n selected_changeset = str(fzf.execute_fzf())\n describe_changes(cloudformation, selected_changeset)\n\n # execute the change set\n elif execute:\n selected_changeset = fzf.execute_fzf()\n if get_confirmation(\"Execute changeset %s?\" % selected_changeset):\n response = cloudformation.client.execute_change_set(\n ChangeSetName=selected_changeset,\n StackName=cloudformation.stack_name,\n )\n cloudformation.wait(\n \"stack_update_complete\", \"Wating for stack to be updated ...\"\n )\n print(\"Stack updated\")\n\n elif delete:\n selected_changeset = fzf.execute_fzf(multi_select=True)\n for changeset in selected_changeset:\n print(\"(dryrun) Delete changeset %s\" % changeset)\n if get_confirmation(\"Confirm?\"):\n for changeset in selected_changeset:\n cloudformation.client.delete_change_set(\n ChangeSetName=changeset, StackName=cloudformation.stack_name\n )\n\n else:\n changeset_name = input(\"Enter name of this changeset: \")\n if not changeset_name:\n raise NoNameEntered(\"No changeset name specified\")\n changeset_description = input(\"Description: \")\n # since is almost same operation as update stack\n # let update_stack handle it, but return update details instead of execute\n cloudformation_args = update_stack(\n cloudformation.profile,\n cloudformation.region,\n replace,\n local_path,\n root,\n wait,\n extra,\n bucket,\n version,\n dryrun=True,\n cloudformation=cloudformation,\n )\n cloudformation_args[\n \"cloudformation_action\"\n ] = cloudformation.client.create_change_set\n cloudformation_args[\"ChangeSetName\"] = changeset_name\n if changeset_description:\n cloudformation_args[\"Description\"] = changeset_description\n\n response = cloudformation.execute_with_capabilities(**cloudformation_args)\n\n response.pop(\"ResponseMetadata\", None)\n print(json.dumps(response, indent=4, default=str))\n print(80 * \"-\")\n print(\"Changeset create initiated\")\n\n if wait:\n cloudformation.wait(\n \"change_set_create_complete\",\n \"Wating for changset to be created ...\",\n ChangeSetName=changeset_name,\n )\n print(\"Changeset created\")\n describe_changes(cloudformation, changeset_name)", "def delete(args, config, cf_conn):\n # Delete an existing CloudFormation Stack with same name\n print(\"Deleting CloudFormation Stack %s...\" % config['stack_name'])\n resp = cf_conn.delete_stack(\n config['stack_name'],\n )\n print(resp)", "def delete(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n client.delete_stack(StackName=self.stack_id)", "def deploy(stack_name, cf_resource):\n with open('setup.yaml') as setup_file:\n setup_template = setup_file.read()\n print(f\"Creating {stack_name}.\")\n stack = cf_resource.create_stack(\n StackName=stack_name,\n TemplateBody=setup_template,\n Capabilities=['CAPABILITY_NAMED_IAM'])\n print(\"Waiting for stack to deploy. This typically takes a minute or two.\")\n waiter = cf_resource.meta.client.get_waiter('stack_create_complete')\n waiter.wait(StackName=stack.name)\n stack.load()\n print(f\"Stack status: {stack.stack_status}\")\n print(\"Created resources:\")\n for resource in stack.resource_summaries.all():\n print(f\"\\t{resource.resource_type}, {resource.physical_resource_id}\")\n print(\"Outputs:\")\n for oput in stack.outputs:\n print(f\"\\t{oput['OutputKey']}: {oput['OutputValue']}\")\n if oput['OutputKey'] == 'TableName':\n with open('config.py', 'w') as config:\n config.write(f\"TABLE_NAME = '{oput['OutputValue']}'\\n\")", "def _diff_stack(self, stack: Stack, **_: Any) -> Status:\n if self.cancel.wait(0):\n return INTERRUPTED\n\n if not deploy.should_submit(stack):\n return NotSubmittedStatus()\n\n provider = self.build_provider()\n\n if not deploy.should_update(stack):\n stack.set_outputs(provider.get_outputs(stack.fqn))\n return NotUpdatedStatus()\n\n tags = deploy.build_stack_tags(stack)\n\n try:\n provider_stack = provider.get_stack(stack.fqn)\n except exceptions.StackDoesNotExist:\n provider_stack = None\n\n try:\n stack.resolve(self.context, provider)\n parameters = self.build_parameters(stack, provider_stack)\n outputs = provider.get_stack_changes(\n stack, self._template(stack.blueprint), parameters, tags\n )\n stack.set_outputs(outputs)\n except exceptions.StackDidNotChange:\n LOGGER.info(\"%s:no changes\", stack.fqn)\n stack.set_outputs(provider.get_outputs(stack.fqn))\n except exceptions.StackDoesNotExist:\n if self.context.persistent_graph:\n return SkippedStatus(\n \"persistent graph: stack does not exist, will be removed\"\n )\n return DoesNotExistInCloudFormation()\n except AttributeError as err:\n if (\n self.context.persistent_graph\n and \"defined class or template path\" in str(err)\n ):\n return SkippedStatus(\"persistent graph: will be destroyed\")\n raise\n except ClientError as err:\n if (\n err.response[\"Error\"][\"Code\"] == \"ValidationError\"\n and \"length less than or equal to\" in err.response[\"Error\"][\"Message\"]\n ):\n LOGGER.error(\n \"%s:template is too large to provide directly to the API; \"\n \"S3 must be used\",\n stack.name,\n )\n return SkippedStatus(\"cfngin_bucket: existing bucket required\")\n raise\n return COMPLETE", "def provision(template_name=None, stack_name=None):\n if not template_name:\n abort('Must provide template')\n if not stack_name:\n abort('Must provide stack_name')\n client = boto3.client('cloudformation')\n\n config = load_config()\n\n update = False\n try:\n resp = client.describe_stacks(StackName=stack_name)\n message = 'Stack {0} exists, and is in state {1}. Proceed with update?'.format(\n stack_name, resp['Stacks'][0]['StackStatus'])\n if not confirm(message):\n abort('Aborting.')\n else:\n update = True\n except ClientError:\n logger.info('No stack named {0}; proceeding with stack creation'.format(stack_name))\n\n with open(os.path.join(OUTPUT_DIR, template_name + OUTPUT_EXT)) as output_contents:\n if update:\n response = client.update_stack(StackName=stack_name,\n TemplateBody=output_contents.read(),\n Parameters=config.get(template_name, {}).get('parameters', []),\n Capabilities=['CAPABILITY_IAM'])\n else:\n response = client.create_stack(StackName=stack_name,\n TemplateBody=output_contents.read(),\n Parameters=config.get(template_name, {}).get('parameters', []),\n Capabilities=['CAPABILITY_IAM'])\n logger.info(json.dumps(response, indent=2))", "def updatestackjson(stackName, jsonFile=None):\n\n if jsonFile is None:\n jsonFile = \"../../awsjson/%s.json\" % stackName\n print _yellow(\"json file not specified. defaulting to %s\" % jsonFile)\n\n try:\n with open(os.path.join(os.path.expanduser(jsonFile)), \"r\") as chefJsonFile:\n localStackChefJson = json.load(chefJsonFile)\n except IOError, e:\n raise e\n\n print _green(\"updating opsworks stack %s with json from %s...\" % (stackName, jsonFile))\n updateOpsworksStackJson(stackName, localStackChefJson)", "def set_stack(ctx, app_name, stack):\n gigalixir_app.set_stack(ctx.obj['host'], app_name, stack)", "def delete_stack(stackName):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n stackIds = [stack['StackId'] for stack in stacks['Stacks'] if stack['Name'] == stackName]\n for stackId in stackIds:\n prompt = _green(\"shall we remove stack: \") + _yellow(\"%s/%s? (y/n) \") % (stackName, str(stackId).encode('ascii', 'replace'))\n answer = raw_input(prompt)\n if answer.lower() == 'y':\n stop_instance(stackName=stackName)\n apps = opsworks.describe_apps(stack_id=stackId)\n appIds = [app['AppId'] for app in apps['Apps']]\n instances = opsworks.describe_instances(stack_id=stackId)\n instanceIds = [instance['InstanceId'] for instance in instances['Instances']]\n for instanceId in instanceIds:\n opsworks.delete_instance(instance_id=instanceId, delete_elastic_ip=True, delete_volumes=True)\n for appId in appIds:\n opsworks.delete_app(appId)\n opsworks.delete_stack(stackId)", "def destroy(stack, cf_resource):\n print(f\"Deleting {stack.name}.\")\n stack.delete()\n print(\"Waiting for stack removal.\")\n waiter = cf_resource.meta.client.get_waiter('stack_delete_complete')\n waiter.wait(StackName=stack.name)\n print(\"Stack delete complete.\")", "def launch(args, config, cf_conn, template):\n print(\"Creating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.create_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Created ' + stack_id)", "def create_stack(stackName, app_type):\n if app_type in ['core', 'expacore', 'expa_core']:\n app_type = 'core'\n\n try:\n app_settings\n except NameError:\n app_settings = loadsettings(app_type)\n\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n try:\n git_cfg\n except NameError:\n try:\n git_cfg = load_git_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide a github conifguration based on git.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n key_file_path = os.path.expanduser(git_cfg.get('git', 'key_dir')) + '/' + git_cfg.get('cookbooks', 'deploy_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n cookbooks_deploy_key = key_file.read()\n\n key_file_path = os.path.expanduser(git_cfg.get('git', 'key_dir')) + '/' + git_cfg.get(app_type, 'deploy_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n app_deploy_key = key_file.read()\n\n key_file_path = os.path.expanduser(aws_cfg.get('aws', 'key_dir')) + '/' + aws_cfg.get('aws', 'opsworks_public_key')\n key_file_path = os.path.expandvars(key_file_path)\n with open(key_file_path, \"r\") as key_file:\n opsworks_public_key = key_file.read()\n\n cookbooks_source = {\"Url\": \"%s\" % git_cfg.get('cookbooks', 'repo_url'),\n \"Type\": \"git\",\n \"SshKey\": cookbooks_deploy_key}\n\n recipes = {\"Setup\": [\"app::sns-handler\", \"bootstrap::misc\", \"newrelic::default\", \"dokku::bootstrap\", \"app::dokku-logs\", \"logstash::agent\", \"app::newrelic-nginx-plugin\"],\n \"Deploy\": [\"app::sns-handler\", \"dokku::apps\", \"nginx::logging\", \"dokku_deploy::default\", \"app::newrelic-notify-deployment\", \"logstash::restart_agent\", \"app::sync-db-to-expa\"],\n \"Shutdown\": [\"app::sns-handler\", \"app::shutdown\"]}\n\n app_source = {\"Url\": \"%s\" % git_cfg.get(app_type, 'repo_url'),\n \"Type\": \"git\",\n \"SshKey\": app_deploy_key}\n\n arns = create_opsworks_roles()\n\n create_s3_buckets(app_type)\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n\n try:\n opsworks.create_user_profile(iam_user_arn=arns['user_arn'], ssh_public_key=opsworks_public_key)\n except ValidationException, error:\n if error.message == 'User ARN already exists':\n opsworks.update_user_profile(iam_user_arn=arns['user_arn'], ssh_public_key=opsworks_public_key)\n else:\n print error\n return 1\n\n if stackName in [stack['Name'] for stack in stacks['Stacks']]:\n foundStacks = [(stack['Name'], stack['StackId']) for stack in stacks['Stacks']]\n for foundStack in foundStacks:\n if foundStack[0] == stackName:\n print(_red(\"%s: %s already exists. please choose another stack name\" % (foundStack[0], foundStack[1])))\n return 1\n\n try:\n stack = opsworks.create_stack(name=stackName, region=aws_cfg.get('aws', 'region'),\n service_role_arn=arns['serviceRole'], default_instance_profile_arn=arns['instanceProfile'],\n default_os='Ubuntu 12.04 LTS', hostname_theme=choice(OPWORKS_INSTANCE_THEMES),\n configuration_manager=OPSWORKS_CONFIG_MANAGER, custom_json=json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': ')),\n use_custom_cookbooks=True, custom_cookbooks_source=cookbooks_source, default_ssh_key_name=aws_cfg.get(\"aws\", \"key_name\"),\n default_root_device_type='ebs')\n\n opsworks.set_permission(stack_id=stack['StackId'], iam_user_arn=arns['user_arn'], allow_ssh=True, allow_sudo=True)\n except Exception, error:\n print error\n print json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': '))\n return 1\n\n ec2 = connect_to_ec2()\n webserver_sg = ec2.get_all_security_groups(groupnames=['AWS-OpsWorks-Web-Server'])\n layer = opsworks.create_layer(stack_id=stack['StackId'], type='custom', name=app_settings[\"APP_NAME\"], shortname=app_settings[\"APP_NAME\"], custom_recipes=recipes,\n enable_auto_healing=True, auto_assign_elastic_ips=False, auto_assign_public_ips=True, custom_security_group_ids=[webserver_sg[0].id])\n\n elb_name = stackName + '-elb'\n lb = create_elb(name=elb_name, app_type=app_type)\n\n opsworks.attach_elastic_load_balancer(elastic_load_balancer_name=lb.name, layer_id=layer['LayerId'])\n\n if app_type == 'app':\n appDomains = [app_settings[\"HOST_NAME\"], app_settings[\"DOMAIN_NAME\"]]\n else:\n appDomains = [app_settings[\"HOST_NAME\"]]\n app = opsworks.create_app(stack_id=stack['StackId'], name=app_settings[\"APP_NAME\"], type='static', app_source=app_source,\n domains=appDomains)\n\n print(_green(\"created stack with following info\"))\n print(_yellow(\"stack name/id: %s/%s\" % (stackName, stack['StackId'])))\n print(_yellow(\"layer name/id: %s/%s\" % (app_settings[\"APP_NAME\"], layer['LayerId'])))\n print(_yellow(\"app name/id: %s/%s\" % (app_settings[\"APP_NAME\"], app['AppId'])))\n\n zones = random.sample([zone.name for zone in ec2.get_all_zones()], 2)\n\n add_instance(stackName=stackName, layerName=app_settings[\"APP_NAME\"], zone=zones[0])\n add_instance(stackName=stackName, layerName=app_settings[\"APP_NAME\"], zone=zones[1])\n\n rds_instance_name = stackName + '-' + app_settings[\"HOST_NAME\"].replace('.', '-') + '-db'\n rds = connect_to_rds()\n if app_settings[\"DATABASE_HOST\"] == \"localhost\":\n try:\n create_rds(name=rds_instance_name, app_type=app_type, engine_type=app_settings['DB_TYPE'])\n except Exception:\n print(_red(\"rds creation failed. deleting stack with no RDS instance\"))\n delete_stack(stackName)\n else:\n try:\n rds.get_all_dbinstances(instance_id=app_settings[\"DATABASE_HOST\"].split('.')[0])\n except BotoServerError, error:\n if error.code == 'DBInstanceNotFound':\n create_rds(name=rds_instance_name, app_type=app_type, engine_type=app_settings['DB_TYPE'])\n else:\n print error\n\n try:\n rds.authorize_dbsecurity_group(group_name=aws_cfg.get('aws', 'group_name'),\n ec2_security_group_owner_id=webserver_sg[0].owner_id, ec2_security_group_name='AWS-OpsWorks-Web-Server')\n except BotoServerError, error:\n if error.code == 'AuthorizationAlreadyExists':\n pass\n else:\n print error\n\n # update stack with new custom_json updated by create_rds and create_s3_buckets\n app_settings = loadsettings(app_type)\n opsworks.update_stack(stack_id=stack['StackId'], custom_json=json.dumps(app_settings[\"OPSWORKS_CUSTOM_JSON\"], sort_keys=True, indent=4, separators=(',', ': ')))\n\n if raw_input(\"shall we start the opsworks instance(s)? (y/n) \").lower() == \"y\":\n start_instance(stackName)\n else:\n print(_green(\"use fab start_instance:%s to start the stack\" % stackName))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Deletes an existing CloudFormation Stack
def delete(args, config, cf_conn): # Delete an existing CloudFormation Stack with same name print("Deleting CloudFormation Stack %s..." % config['stack_name']) resp = cf_conn.delete_stack( config['stack_name'], ) print(resp)
[ "def delete(self):\n client = BotoClientProxy(\"cloudformation\", self.region)\n client.delete_stack(StackName=self.stack_id)", "def destroy(stack, cf_resource):\n print(f\"Deleting {stack.name}.\")\n stack.delete()\n print(\"Waiting for stack removal.\")\n waiter = cf_resource.meta.client.get_waiter('stack_delete_complete')\n waiter.wait(StackName=stack.name)\n print(\"Stack delete complete.\")", "def delete_stack(stackName):\n try:\n aws_cfg\n except NameError:\n try:\n aws_cfg = load_aws_cfg()\n except Exception, error:\n print(_red(\"error loading config. please provide an AWS conifguration based on aws.cfg-dist to proceed. %s\" % error))\n return 1\n\n stackName = stackName.lower()\n opsworks = connect_to_opsworks()\n stacks = opsworks.describe_stacks()\n stackIds = [stack['StackId'] for stack in stacks['Stacks'] if stack['Name'] == stackName]\n for stackId in stackIds:\n prompt = _green(\"shall we remove stack: \") + _yellow(\"%s/%s? (y/n) \") % (stackName, str(stackId).encode('ascii', 'replace'))\n answer = raw_input(prompt)\n if answer.lower() == 'y':\n stop_instance(stackName=stackName)\n apps = opsworks.describe_apps(stack_id=stackId)\n appIds = [app['AppId'] for app in apps['Apps']]\n instances = opsworks.describe_instances(stack_id=stackId)\n instanceIds = [instance['InstanceId'] for instance in instances['Instances']]\n for instanceId in instanceIds:\n opsworks.delete_instance(instance_id=instanceId, delete_elastic_ip=True, delete_volumes=True)\n for appId in appIds:\n opsworks.delete_app(appId)\n opsworks.delete_stack(stackId)", "def delete(self, req):\r\n self._enforce(req, 'DeleteStack')\r\n\r\n con = req.context\r\n try:\r\n identity = self._get_identity(con, req.params['StackName'])\r\n res = self.rpc_client.delete_stack(con, identity, cast=False)\r\n\r\n except Exception as ex:\r\n return exception.map_remote_error(ex)\r\n\r\n if res is None:\r\n return api_utils.format_response('DeleteStack', '')\r\n else:\r\n return api_utils.format_response('DeleteStack', res['Error'])", "def delete(ctx, resource_path, namespace=DEV_NAMESPACE):\n print(f'Deleting local k8s stack for {resource_path}...')\n ctx.run(f'{KUBERNETES_DELETE_CMD} \"{resource_path}\" -n \"{namespace}\"')", "def cmd_delete(self):\r\n self.deleteCurrentStack()", "def delete_stacks(**kwargs):\n\n session = kwargs['session']\n job_identifier = kwargs['job_identifier']\n\n cfn_client = session.client('cloudformation')\n\n stack_names = sorted([stack['StackName'] for stack in cfn_client.describe_stacks()[\n 'Stacks'] if \"{}-\".format(job_identifier) in stack['StackName']])\n\n choice = click.confirm(\n \"Do you want to delete these stacks? : {}\".format(stack_names))\n if choice:\n for stack_name in reversed(stack_names):\n cfn_client.delete_stack(StackName=stack_name)\n try:\n cfn_client.get_waiter('stack_delete_complete').wait(\n StackName=stack_name)\n click.echo(\"Deleted {}.\".format(stack_name))\n except botocore.exceptions.WaiterError as waiter_error:\n click.echo(\"{} failed to delete. {}\".format(\n stack_name, waiter_error))\n click.echo(\"Stopped stack deletion.\")\n break", "def delete(self):\n delete_stack(self)", "def delete_stack(name=None, poll=0, timeout=60, profile=None):\n h_client = _auth(profile)\n ret = {\"result\": True, \"comment\": \"\"}\n if not name:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Parameter name missing or None\"\n return ret\n try:\n h_client.stacks.delete(name)\n except heatclient.exc.HTTPNotFound:\n ret[\"result\"] = False\n ret[\"comment\"] = \"No stack {}\".format(name)\n except heatclient.exc.HTTPForbidden as forbidden:\n log.exception(forbidden)\n ret[\"result\"] = False\n ret[\"comment\"] = str(forbidden)\n if ret[\"result\"] is False:\n return ret\n\n if poll > 0:\n try:\n stack_status, msg = _poll_for_events(\n h_client, name, action=\"DELETE\", poll_period=poll, timeout=timeout\n )\n except heatclient.exc.CommandError:\n ret[\"comment\"] = \"Deleted stack {}.\".format(name)\n return ret\n except Exception as ex: # pylint: disable=W0703\n log.exception(\"Delete failed %s\", ex)\n ret[\"result\"] = False\n ret[\"comment\"] = \"{}\".format(ex)\n return ret\n\n if stack_status == \"DELETE_FAILED\":\n ret[\"result\"] = False\n ret[\"comment\"] = \"Deleted stack FAILED'{}'{}.\".format(name, msg)\n else:\n ret[\"comment\"] = \"Deleted stack {}.\".format(name)\n return ret", "def destroy(sock_gateway, lambda_role_name, iam_resource, stack, cf_resource):\n print(f\"Deleting websocket API {sock_gateway.api_name}.\")\n sock_gateway.remove_connection_permissions(iam_resource.Role(lambda_role_name))\n sock_gateway.delete_api()\n\n print(f\"Deleting stack {stack.name}.\")\n stack.delete()\n print(\"Waiting for stack removal.\")\n waiter = cf_resource.meta.client.get_waiter('stack_delete_complete')\n waiter.wait(StackName=stack.name)\n print(\"Stack delete complete.\")", "def update(args, config, cf_conn, template):\n print(\"Updating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.update_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Updated ' + stack_id)", "def delete_all_stacks(self):\n logging.debug(\"Destroying all cfn stacks\")\n for value in reversed(OrderedDict(self.__created_stacks).values()):\n try:\n self.delete_stack(value.name, value.region)\n except Exception as e:\n logging.error(\n \"Failed when destroying stack {0} in region {1} with exception {2}.\".format(\n value.name, value.region, e\n )\n )", "def remove_from_stack(stack):\n stack.pop()\n return stack", "def destroy(config):\n\n # Check if env already exists\n env_name = config['environment'].get('name')\n env_vers = config['environment'].get('version', None)\n env = env_name\n\n if env_vers:\n env = \"-\".join([env_name, env_vers])\n\n system_type = config['tags'].get('system_type', None)\n if not aws.environment_exists(env_name, env_vers, system_type):\n msg = \"No such environment with the name {} exists.\"\n if system_type:\n env = \"-\".join([system_type, env])\n raise EnvironmentExistsException(msg.format(env))\n\n tf_root = _precheck(config, 'destroy')\n\n # Tag the resources as ready to destroy\n aws.tag_resources(config)\n\n # Run destroy\n tf_command = tf.destroy(config)\n return_code = utils.run_command(tf_command, cwd=config['tf_root'])\n\n # Double check the make sure we don't have anything left running\n # before destroying the S3 resources.\n if not aws.environment_exists(env_name, env_vers, system_type) and return_code == 0:\n # Destroy the per-environment S3 folder in\n msg = \"Destroying S3 env folder: {}\".format(config['env_folder'])\n logger.debug(msg)\n s3.destroy_folder(config['project_config'],config['env_folder'])\n\n # Destroy the state file in S3\n msg = \"Destroying S3 State file: {}\".format(config['tf_state'])\n logger.debug(msg)\n s3.delete_object(config['tf_state_bucket'], config['tf_state'])\n\n return True", "def test_delete_stack_user(self):\n\n self._stubs_v3()\n\n ctx = utils.dummy_context()\n ctx.trust_id = None\n\n # mock keystone client delete function\n self.mock_ks_v3_client.users = self.m.CreateMockAnything()\n self.mock_ks_v3_client.users.delete(user='atestuser').AndReturn(None)\n self.mock_ks_v3_client.users.delete(user='atestuser').AndRaise(\n kc_exception.NotFound)\n\n self.m.ReplayAll()\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\n heat_ks_client.delete_stack_user('atestuser')\n # Second delete will raise ignored NotFound\n heat_ks_client.delete_stack_user('atestuser')", "def test_delete_stack_domain_project(self):\n\n self._stub_domain_admin_client()\n self.mock_admin_client.projects = self.m.CreateMockAnything()\n dummy = self.m.CreateMockAnything()\n dummy.id = 'aproject123'\n dummy.domain_id = 'adomain123'\n dummy.delete().AndReturn(None)\n self.mock_admin_client.projects.get(project='aprojectid').AndReturn(\n dummy)\n self.m.ReplayAll()\n\n ctx = utils.dummy_context()\n ctx.trust_id = None\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\n heat_ks_client.delete_stack_domain_project(project_id='aprojectid')", "def test_delete_stack_user(self):\r\n\r\n self._stubs_v3()\r\n\r\n ctx = utils.dummy_context()\r\n ctx.trust_id = None\r\n\r\n # mock keystone client delete function\r\n self.mock_ks_v3_client.users = self.m.CreateMockAnything()\r\n self.mock_ks_v3_client.users.delete(user='atestuser').AndReturn(None)\r\n self.mock_ks_v3_client.users.delete(user='atestuser').AndRaise(\r\n kc_exception.NotFound)\r\n\r\n self.m.ReplayAll()\r\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\r\n heat_ks_client.delete_stack_user('atestuser')\r\n # Second delete will raise ignored NotFound\r\n heat_ks_client.delete_stack_user('atestuser')", "def test_delete_stack_domain_project(self):\r\n\r\n self._stub_domain_admin_client()\r\n self.mock_admin_client.projects = self.m.CreateMockAnything()\r\n self.mock_admin_client.projects.delete(project='aprojectid')\r\n self.mock_admin_client.projects.delete(project='aprojectid').AndRaise(\r\n kc_exception.NotFound)\r\n self.m.ReplayAll()\r\n\r\n ctx = utils.dummy_context()\r\n ctx.trust_id = None\r\n heat_ks_client = heat_keystoneclient.KeystoneClient(ctx)\r\n heat_ks_client.delete_stack_domain_project(project_id='aprojectid')\r\n # Second delete will raise ignored NotFound\r\n heat_ks_client.delete_stack_domain_project(project_id='aprojectid')", "def launch(args, config, cf_conn, template):\n print(\"Creating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.create_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Created ' + stack_id)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Describes a CloudFormation Stack and prints the outputs
def output(args, config, cf_conn): print("Describing CloudFormation Stack %s..." % config['stack_name']) resp = conn.describe_stacks( config['stack_name'] ) print('---'); print('region: %s' % args['--region']) for output in resp[0].outputs: print("%s: %s" % (output.description, output.value))
[ "def describe(self, req):\r\n self._enforce(req, 'DescribeStacks')\r\n\r\n def format_stack_outputs(o):\r\n keymap = {\r\n engine_api.OUTPUT_DESCRIPTION: 'Description',\r\n engine_api.OUTPUT_KEY: 'OutputKey',\r\n engine_api.OUTPUT_VALUE: 'OutputValue',\r\n }\r\n\r\n def replacecolon(d):\r\n return dict(map(lambda (k, v): (k.replace(':', '.'), v),\r\n d.items()))\r\n\r\n def transform(attrs):\r\n \"\"\"\r\n Recursively replace all : with . in dict keys\r\n so that they are not interpreted as xml namespaces.\r\n \"\"\"\r\n new = replacecolon(attrs)\r\n for key, value in new.items():\r\n if isinstance(value, dict):\r\n new[key] = transform(value)\r\n return new\r\n\r\n return api_utils.reformat_dict_keys(keymap, transform(o))\r\n\r\n def format_stack(s):\r\n \"\"\"\r\n Reformat engine output into the AWS \"StackSummary\" format\r\n \"\"\"\r\n keymap = {\r\n engine_api.STACK_CAPABILITIES: 'Capabilities',\r\n engine_api.STACK_CREATION_TIME: 'CreationTime',\r\n engine_api.STACK_DESCRIPTION: 'Description',\r\n engine_api.STACK_DISABLE_ROLLBACK: 'DisableRollback',\r\n engine_api.STACK_NOTIFICATION_TOPICS: 'NotificationARNs',\r\n engine_api.STACK_PARAMETERS: 'Parameters',\r\n engine_api.STACK_ID: 'StackId',\r\n engine_api.STACK_NAME: 'StackName',\r\n engine_api.STACK_STATUS_DATA: 'StackStatusReason',\r\n engine_api.STACK_TIMEOUT: 'TimeoutInMinutes',\r\n }\r\n\r\n if s[engine_api.STACK_UPDATED_TIME] is not None:\r\n keymap[engine_api.STACK_UPDATED_TIME] = 'LastUpdatedTime'\r\n\r\n result = api_utils.reformat_dict_keys(keymap, s)\r\n\r\n action = s[engine_api.STACK_ACTION]\r\n status = s[engine_api.STACK_STATUS]\r\n result['StackStatus'] = '_'.join((action, status))\r\n\r\n # Reformat outputs, these are handled separately as they are\r\n # only present in the engine output for a completely created\r\n # stack\r\n result['Outputs'] = []\r\n if engine_api.STACK_OUTPUTS in s:\r\n for o in s[engine_api.STACK_OUTPUTS]:\r\n result['Outputs'].append(format_stack_outputs(o))\r\n\r\n # Reformat Parameters dict-of-dict into AWS API format\r\n # This is a list-of-dict with nasty \"ParameterKey\" : key\r\n # \"ParameterValue\" : value format.\r\n result['Parameters'] = [{'ParameterKey': k,\r\n 'ParameterValue': v}\r\n for (k, v) in result['Parameters'].items()]\r\n\r\n return self._id_format(result)\r\n\r\n con = req.context\r\n # If no StackName parameter is passed, we pass None into the engine\r\n # this returns results for all stacks (visible to this user), which\r\n # is the behavior described in the AWS DescribeStacks API docs\r\n try:\r\n if 'StackName' in req.params:\r\n identity = self._get_identity(con, req.params['StackName'])\r\n else:\r\n identity = None\r\n\r\n stack_list = self.rpc_client.show_stack(con, identity)\r\n\r\n except Exception as ex:\r\n return exception.map_remote_error(ex)\r\n\r\n res = {'Stacks': [format_stack(s) for s in stack_list]}\r\n\r\n return api_utils.format_response('DescribeStacks', res)", "def info(ctx, show_all):\r\n ts, tier = _get_config_and_tier(ctx.obj.tier_name)\r\n\r\n #hd = ['template_name', '']\r\n #print \"template code\", [t().template_name for t in templater.export]\r\n\r\n _list_stacks(tier, show_all)", "def print_stacks(self):\n print(self.operand_stack)\n print(self.type_stack)\n print(self.operator_stack)", "def description(self, force=False):\n if not getattr(self, \"_description\", None) or force:\n with self.catch_boto_400(StackDoesntExist, \"Couldn't find stack\"):\n while True:\n try:\n with self.ignore_throttling_error():\n response = self.conn.describe_stacks(StackName=self.stack_name)\n self._description = response['Stacks'][0]\n break\n except Throttled:\n log.info(\"Was throttled, waiting a bit\")\n time.sleep(0.5)\n return self._description", "def detail(self, req):\r\n stacks = self.rpc_client.list_stacks(req.context)\r\n\r\n return {'stacks': [stacks_view.format_stack(req, s) for s in stacks]}", "def __str__(self):\n return str(self.stack)", "def create_stack(self):\n command = \"cfn-create-stack \" + self.stack_name + \" -f \" + self.template_file\n if (self.parameters is not None):\n command += \" -p \\\"\" + self.parameters + \"\\\"\"\n run_command(command)", "def format_stack_summary(s):\r\n # Map the engine-api format to the AWS StackSummary datatype\r\n keymap = {\r\n engine_api.STACK_CREATION_TIME: 'CreationTime',\r\n engine_api.STACK_UPDATED_TIME: 'LastUpdatedTime',\r\n engine_api.STACK_ID: 'StackId',\r\n engine_api.STACK_NAME: 'StackName',\r\n engine_api.STACK_STATUS_DATA: 'StackStatusReason',\r\n engine_api.STACK_TMPL_DESCRIPTION: 'TemplateDescription',\r\n }\r\n\r\n result = api_utils.reformat_dict_keys(keymap, s)\r\n\r\n action = s[engine_api.STACK_ACTION]\r\n status = s[engine_api.STACK_STATUS]\r\n result['StackStatus'] = '_'.join((action, status))\r\n\r\n # AWS docs indicate DeletionTime is omitted for current stacks\r\n # This is still TODO(unknown) in the engine, we don't keep data for\r\n # stacks after they are deleted\r\n if engine_api.STACK_DELETION_TIME in s:\r\n result['DeletionTime'] = s[engine_api.STACK_DELETION_TIME]\r\n\r\n return self._id_format(result)", "def stacks_details(self, stack_name, ext_id):\n \n path=\"/stacks/%s/%s\"%(stack_name,ext_id)\n if stack_name is not None and ext_id is not None:\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n else:\n raise OpenstackError(\"You must specify both stack name and stack UUID\", 404) \n \n self.logger.debug('Openstack heat stack Details: %s' % \\\n truncate(res))\n return res[0]", "def _describe_stack_resource_drifts(self) -> dict:\n self.logger.info(f\"{self.stack.name} - Describing Stack Resource Drifts\")\n\n return self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_stack_resource_drifts\",\n kwargs={\"StackName\": self.stack.external_name},\n )", "def inspect(name):\n repo = TemplateRepository()\n config = yaml.load(repo.config(name))\n\n kind = config[\"kind\"]\n name = config[\"metadata\"][\"name\"]\n\n parameters = config[\"spec\"][\"parameters\"]\n\n steps = config[\"spec\"][\"steps\"]\n\n rows, columns = os.popen(\"stty size\", \"r\").read().split()\n\n display_params = []\n for param in parameters:\n if \"default\" in param:\n default = param[\"default\"]\n else:\n default = \"\"\n display_params.append(\n [\n param[\"name\"],\n \"\\n\".join(textwrap.wrap(param[\"description\"], int(columns) - 20)),\n default,\n ]\n )\n\n steps_data = []\n steps_headers = [\n \"Steps ({count})\".format(count=len(steps)),\n \"Image\",\n \"Command\",\n \"Args\",\n ]\n for step in steps:\n data = [step[\"name\"], step[\"image\"]]\n if \"command\" in step:\n data.append(\"\\n\".join(step[\"command\"]))\n else:\n data.append(\"\")\n if \"args\" in step:\n data.append(\"\\n\".join(step[\"args\"]))\n else:\n data.append(\"\")\n steps_data.append(data)\n\n click.echo(name)\n if name in repo.templates:\n click.echo(repo.templates[name])\n click.echo()\n click.echo(\n tabulate(\n display_params,\n headers=[\n \"Parameters ({count})\".format(count=len(parameters)),\n \"Description\",\n \"Default\",\n ],\n )\n )\n click.echo()\n click.echo(tabulate(steps_data, headers=steps_headers))", "def launch(args, config, cf_conn, template):\n print(\"Creating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.create_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Created ' + stack_id)", "def create_stack(self, **kwargs):\n stack_name = kwargs.get('stack_name')\n template_file = kwargs.get('template_file')\n if not self.quiet:\n print('Creating stack: ', end='')\n print('{}'.format(stack_name), color='green')\n shell_statement = 'aws --profile {} --region {} cloudformation create-stack --stack-name {} --template-body file://{} --capabilities=CAPABILITY_IAM'.format(self.profile_name, self.region_name, stack_name, template_file)\n if self.stack_exists(stack_name):\n print('Stack with the name {} already exists. Current stacks:'.format(stack_name), color='red')\n for _name, _status in self.check_statuses():\n if _status == 'CREATE_COMPLETE':\n color='green'\n else:\n color='yellow'\n print('{:<25} {}'.format(_name, _status), color=color)\n sys.exit(1)\n\n try:\n results = subprocess.check_output(\n shlex.split(shell_statement),\n stderr=subprocess.STDOUT\n )\n except subprocess.CalledProcessError as e:\n print('An error ({}) occurred:'.format(e.returncode), color='red')\n print(e.output.decode())\n sys.exit(1)", "def test_stack_show(self):\n resp, stack = self.client.get_stack(self.stack_name)\n self.assertEqual('200', resp['status'])\n self.assertIsInstance(stack, dict)\n self.assertEqual(self.stack_name, stack['stack_name'])\n self.assertEqual(self.stack_id, stack['id'])", "def generate(self):\n try:\n self.stack_cf = self.template.render(stack=self.stack, env=self.env, config=self.config)\n with open(self.output_folder + self.stack_cf_file, 'w') as cffile:\n cffile.write(self.stack_cf)\n print('[' + self.stack + '] generate in ' + self.output_folder + self.stack_cf_file)\n except:\n print('[' + self.stack + '] generate cloudformation file failed.')\n raise Exception('Stack ' + self.stack + ' generate cloudformation file failed.')", "def test_list_stacks(self):\n pass", "def deploy(stack_name, cf_resource):\n with open('setup.yaml') as setup_file:\n setup_template = setup_file.read()\n print(f\"Creating {stack_name}.\")\n stack = cf_resource.create_stack(\n StackName=stack_name,\n TemplateBody=setup_template,\n Capabilities=['CAPABILITY_NAMED_IAM'])\n print(\"Waiting for stack to deploy. This typically takes a minute or two.\")\n waiter = cf_resource.meta.client.get_waiter('stack_create_complete')\n waiter.wait(StackName=stack.name)\n stack.load()\n print(f\"Stack status: {stack.stack_status}\")\n print(\"Created resources:\")\n for resource in stack.resource_summaries.all():\n print(f\"\\t{resource.resource_type}, {resource.physical_resource_id}\")\n print(\"Outputs:\")\n for oput in stack.outputs:\n print(f\"\\t{oput['OutputKey']}: {oput['OutputValue']}\")\n if oput['OutputKey'] == 'TableName':\n with open('config.py', 'w') as config:\n config.write(f\"TABLE_NAME = '{oput['OutputValue']}'\\n\")", "def describe_events(self):\n return self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_stack_events\",\n kwargs={\"StackName\": self.stack.external_name},\n )", "def describe_resources(self):\n self.logger.debug(\"%s - Describing stack resources\", self.stack.name)\n try:\n response = self.connection_manager.call(\n service=\"cloudformation\",\n command=\"describe_stack_resources\",\n kwargs={\"StackName\": self.stack.external_name},\n )\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Message\"].endswith(\"does not exist\"):\n return {self.stack.name: []}\n raise\n\n self.logger.debug(\n \"%s - Describe Stack resource response: %s\", self.stack.name, response\n )\n\n desired_properties = [\"LogicalResourceId\", \"PhysicalResourceId\"]\n\n formatted_response = {\n self.stack.name: [\n {k: v for k, v in item.items() if k in desired_properties}\n for item in response[\"StackResources\"]\n ]\n }\n return formatted_response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure that all the objects in the object_dep list remain in the database. Call the callback if this isn't true.
def ensure_object_permanence(self, object_dep, cb): if object_dep is None or cb is None: return self.ensuring_objects = True self.ensuring_object_cb = cb self.ensuring_object_dep = object_dep
[ "def check(self):\r\n gc.collect()\r\n dead = self.allNames[:]\r\n alive = []\r\n for k in self.objs:\r\n dead.remove(k)\r\n alive.append(k)\r\n print(\"Deleted objects:\", dead)\r\n print(\"Live objects:\", alive)", "def _objects_changed(self, old, new):\n if self.is_active:\n parent = self.parent\n if parent is not None:\n with parent.children_event_context():\n new_set = set(new)\n if self.destroy_old:\n for obj in old:\n if obj not in new_set:\n obj.destroy()\n else:\n for obj in old:\n if obj not in new_set:\n obj.set_parent(None)\n if new_set:\n parent.insert_children(self, self.objects)", "def _objects_items_changed(self, event):\n if self.is_active:\n parent = self.parent\n if parent is not None:\n with parent.children_event_context():\n add_set = set(event.added)\n if self.destroy_old:\n for obj in event.removed:\n if obj not in add_set:\n obj.destroy()\n else:\n for obj in event.removed:\n if obj not in add_set:\n obj.set_parent(None)\n if add_set:\n parent.insert_children(self, self.objects)", "def at_object_delete(self):\r\n return True", "def commit(self):\n while self._objects_to_commit:\n _, object = self._objects_to_commit.popitem()\n if object._p_changed or object._p_serial == NEW:\n self._store_object(object)", "def request_save_objects(self):\n if len(self.objects) >= self.batch_size:\n self.save_objects()", "def at_post_object_leave(self, obj):\n # Try removing the object from the coordinates system\n if loc := self.db.itemcoordinates.pop(obj, None):\n # The object was removed successfully\n # Make sure there was a room at that location\n if room := self.db.rooms.get(loc):\n # If so, try to clean up the room\n self._destroy_room(room)", "def clean_orphan_obj_perms():\n from grontextual.models import UserObjectGroup\n\n deleted = 0\n # TODO: optimise\n for uog in UserObjectGroup.objects.all():\n if uog.content_object is None:\n logger.debug(\"Removing %s (pk=%d)\" % (uog, uog.pk))\n uog.delete()\n deleted += 1\n logger.info(\"Total removed orphan object permissions instances: %d\" %\n deleted)\n return deleted", "def check_conflicts(self, objects):\n with self.__lock:\n procedures = self._check_conflicts(objects)\n _LOGGER.debug(\n \"The procedures (%s) have locked or are willing to lock \"\n \"one of the objects in (%s).\", procedures, objects\n )\n return procedures", "def _clean(self):\n\t\tto_clean = [x for x in self.obj if\n\t\t x.parent and x.parent.name != self.filename.split('.')[0]]\n\t\tdeselect_all()\n\t\tfor mesh in to_clean:\n\t\t\ttry:\n\t\t\t\tmesh.select_set(True)\n\t\t\t\tbpy.ops.object.delete()\n\t\t\texcept Exception:\n\t\t\t\tpass", "def _check_batch_dependencies(self, batch, committed_txn_cache):\n for txn in batch.transactions:\n if not self._check_transaction_dependencies(txn,\n committed_txn_cache):\n # if any transaction in this batch fails the whole batch\n # fails.\n committed_txn_cache.remove_batch(batch)\n return False\n # update so any subsequent txn in the same batch can be dependent\n # on this transaction.\n committed_txn_cache.add_txn(txn.header_signature)\n return True", "def test_delitem_existing_dependent(self):\n self.assertIn('energy', self.record.curve_sets['cs1']['dependent'])\n del self.record.curve_set_values.cs1['energy']\n self.assertNotIn('energy', self.record.curve_sets['cs1']['dependent'])", "def _break_conflicts(self, objects):\n assert(isinstance(objects, set))\n procedures = self._check_conflicts(objects)\n for procedure in procedures:\n requested_objects, thread, condition = self.__procedures[procedure]\n assert(len(requested_objects & objects) != 0)\n self._dequeue(procedure)\n if thread and threading.current_thread() != thread:\n try:\n _utils.async_raise(thread.ident, _errors.LockManagerError)\n except (ValueError, SystemError):\n _LOGGER.debug(\n \"Error trying to notify thread (%s) that holds locks \"\n \"for procedure (%s).\", thread, procedure\n )\n if condition:\n with condition:\n condition.notify_all()\n return procedures", "def _raise_list(self, objects, above=None):\n d = {}\n for object in objects: d[object] = 1\n\n # Objects that aren't in the list.\n new_objects = []\n for object in self._objects:\n if not d.has_key(object): new_objects.append(object)\n\n # Put the list in the right place.\n if above: idx = new_objects.index(above)+1\n else: idx = len(new_objects)\n new_objects[idx:idx] = objects\n\n # And install.\n self._objects = new_objects", "def force_clean(self, caller=True):\n if caller:\n self.to_graph_objs() # TODO add error handling here!\n for entry in self:\n entry.force_clean(caller=False)\n del_indicies = [index for index, item in enumerate(self)\n if len(item) == 0]\n del_ct = 0\n for index in del_indicies:\n del self[index - del_ct]\n del_ct += 1", "def force_clean(self, **kwargs):\n for entry in self:\n entry.force_clean()\n del_indicies = [index for index, item in enumerate(self)\n if len(item) == 0]\n del_ct = 0\n for index in del_indicies:\n del self[index - del_ct]\n del_ct += 1", "def clean(self):\n with StepReferenceView.last_clean_lock:\n if datetime.today() - StepReferenceView.last_clean < timedelta(seconds=StepReferenceView.CLEAN_EVERY_SECONDS):\n return\n StepReference.objects.filter(date__lt=datetime.today() - timedelta(seconds=StepReferenceView.DELETE_AFTER)).delete()\n StepReferenceView.last_clean = datetime.today()\n logging.info('deleting old references')", "def processMemVarDependencies(self):\n debug(\"CCCCCCCCCCCCCCCCCCCC CHECKING MEMVAR DEPENDENCIES CCCCCCCCCCCCCCCCCCCCCCCCCCC\")\n for r in self.regions:\n for a in r.model.agents:\n a.validateReferenceDependencies()\n a.getVarInitOrder()\n debug(\"CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC\")", "def clean_object_within_county(self, related_objects):\n if self.geolocation_object.us_county.id in related_objects.filter(\n geolocation_type__model='uscounty').values_list(\n 'geolocation_id', flat=True):\n raise ValidationError(\n _(\"Cannot add %s when %s is already related.\" % (\n self.geolocation_object,\n self.geolocation_object.us_county)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the position of a fake perception object.
def set_fake_position(self, pos): raise NotImplementedError()
[ "def set_player_position(self, position):", "def set_random_position(self):\n self.random_coords = self.shape.\\\n set_random_position(self.field_width - 15)\n\n self.x = self.random_coords + 1\n self.y = 1\n\n self.move(self.x, self.y)\n self.update()", "def test_position(self):\n self.assertEqual(self.label.position, (0.1, 0.2))\n\n self.label.position = (0.4, 0.5)\n self.assertEqual(self.label.position, (0.4, 0.5))\n\n self.label._set_position((0.3, 0.6))\n self.assertEqual(self.label._get_position(), (0.3, 0.6))", "def setCVPosition(*args, **kwargs):\n \n pass", "def set_position(self, position):\n self.__send_command(CommandsBytes.SET_POSITION)\n self.__send_vector3(position)\n result = self.__receive_string()\n if result != \"ok\":\n print(\"Error setting position\")", "def set_position(self, position):\n self.mediaplayer.set_position(position / 1000.0)", "def set_eye_position(self, position):\n self.queue_event(self.EVENT_EYE_POSITION, position)", "def set_position(self, point):\r\n point = p2e._base._util.scale_1000(point)\r\n \r\n arg_str = p2e._base._util._convert_args_to_string(\"set.node.position\", \r\n self._node._eco_id, \r\n point[0], \r\n point[1], \r\n point[2])\r\n p2e._app.Exec(arg_str)", "def reset_position(self):\n self.xyz = self.xyz + self.tfm", "def setPosition(self, p: 'SbVec2s') -> \"void\":\n return _coin.SoEvent_setPosition(self, p)", "def test_positions_my_position(self):\n pass", "def setPosition(self, x, y):\n\n\t\tself.rect.left = x\n\t\tself.rect.top = y", "def update_position(self, x, y):\n self.x = x\n self.y = y", "def setposition(self, value):\n self._dev.Adjust(value)", "def setEyePoint(*args, **kwargs):\n \n pass", "def set_cover_position(ent, position) -> None:\n ent._values[\"current_cover_position\"] = position", "def setup_method(self, object):\n self.testList = []\n for count in range(50):\n self.testList.append(\"Item\" + str(count))\n\n self.positionController = PositionController(items=self.testList, itemsPerPage=5,\n pagesShownAtOnce=4)", "def set_zero_point(self):\n self.current_position = 0.0\n self.goal_position = 0.0", "def set_pose(self, pose):\n self._cmd.enable_position_controller = True\n self._cmd.pose = pose\n\n self._apply_command()\n self._pose_cmd_set = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the test coverage of the Matrix.cpp is 100%
def test_Matrix_coverage(self): coverage = "not found" for l in check_output(["python3", "coverage.py", "-r", ".", "-f", "Matrix.cpp"]).split("\n"): if l.startswith("Matrix.cpp"): coverage = l.split()[3] self.assertEqual(coverage, "100%", msg="Test coverage is not 100%")
[ "def test_correct_estimates(self):\n self.assertEqual(self.ajive.common.rank, 1)\n self.assertEqual(self.ajive.blocks['x'].individual.rank, 1)\n self.assertEqual(self.ajive.blocks['y'].individual.rank, 2)", "def test_sim_i_all():\n input_df = pd.read_csv(data_path + \"/playground_df_cleaned_kegg_with_smiles.csv\")\n test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))\n metric = pd.DataFrame()\n assert metric.empty == True, \"\"\"ShapeError, input metric dataframe\n should be initialized as empty\"\"\"\n for index, row in test_df.iterrows():\n assert cheminform.sim_i_all(test_df, index, row, metric) == None, \"\"\"OutputError, function\n shouldn't return anything\"\"\"\n assert metric[index].all() >= 0 and metric[index].all() <= 1.0, \"\"\"ValueError,\n metric should be between 0 and 1\"\"\"\n return \"3/3 Tests successful\"", "def test_values(self):\n\t\ttest = (self.genes_lowcoverage['% Coverage at 30x'] == 100).any()\n\t\tif test == True:\n\t\t\tsys.stdout.write(\"check output\")", "def test_occupancy_calc1(self):\n occ = calculateOccupancy(np.array([1,0,0]),np.array([1,1,1]),self.params)\n self.assertTrue(occ[0] == 0)", "def testC(self):\n np.random.seed(0)\n mat = random((NROW, NCOL))\n self.assertTrue(mat.flags['C_CONTIGUOUS'])\n mat_out = normalize_logspace(mat)\n row_sum = mat_out.sum(1)\n approx_equal = arrays_almost_equal(row_sum, np.ones(NROW),\n accuracy=ACC)\n self.assertTrue(approx_equal)", "def test_model_performance(self):\n\t\tself.load_data()\n\t\tself.load_model()\n\t\tthreshold = 0.78 #0.78 to pass - change to 0.90 to deliberate fail test and therefore faild cloud build\n\t\tscore = self.model.score(self.X_test, self.y_test)\n\t\tis_above_threshold = True if score >= threshold else False\n\t\tassert is_above_threshold is True", "def test_occupancy_calc2(self):\n occ = calculateOccupancy(np.array([1,1,1]),np.array([1,1,1]),self.params)\n self.assertTrue(occ[0] == 0.5)", "def test_004_adjacency_matrix_ok(self):\n\n print(\"Test Four... To show that distance.pdist function calculates correctly on a pdb.cif file\")\n\n with open('./extracted_test_data/1j5a.cif') as infile:\n target_list = infile.read().split('\\n')\n df_1 = pd.DataFrame(data=target_list, columns=[\"header\"]) # Put list in a dataframe m X 1 column\n df_1 = df_1[:-1] # Removes additional row that is included\n cif_to_df_2 = df_1.header.str.split(expand=True) # Put dataframe to m x 20 columns\n critical_info_to_df_3 = cif_to_df_2.drop(columns=[0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # df containing aa & coordinate positions\n convert_to_array = critical_info_to_df_3.drop(columns=[5], axis=1).to_numpy() # Removes aa flag & contains only coordinate info\n calculate_distances = distance.pdist(convert_to_array, 'euclidean')\n make_square = distance.squareform(calculate_distances)\n\n for i in range(0,make_square.shape[1]):\n print(make_square[i,i])\n self.assertEqual(make_square[i,i], 0)", "def test_sim_i_j():\n input_df = pd.read_csv(data_path + \"/playground_df_cleaned_kegg_with_smiles.csv\")\n test_df = cheminform.fingerprint_products(cheminform.input_data(input_df))\n A = test_df.iloc[0]\n #B = test_df.iloc[1]\n #C = test_df.iloc[2]\n assert cheminform.sim_i_j(A, A) == 1, \"Self correlation is broken\"\n #assert metamoles.sim_i_j(A, B) == -1, \"Standard correlation is broken\"\n #assert metamoles.sim_i_j(A, C) == 0, \"Standard correlation is broken\"\n return '1/1 tests successful'", "def test_mine_count(self):\n test_board = MynesBoard()\n count = 0\n for x in range(test_board.width):\n for y in range(test_board.height):\n if test_board.board[y][x].value == -1:\n count += 1\n self.assertEqual(count, test_board.mine_count)", "def test_is_perfect(self):\n sonar = FakeSonar()\n project = domain.Project(metric_sources={metric_source.Sonar: sonar})\n product = domain.Product(metric_source_ids={sonar: \"sonar id\"})\n violations = self.metric_class(subject=product, project=project)\n self.assertEqual('perfect', violations.status())", "def test_basics(self):\n self.report('Testing adding data, evaluation and marginal likelihood.' +\n ' Probabilistic test, might fail.')\n num_coeffs_vals = [2, 1, 4, 5] * 5\n num_tests = 0\n num_successes = 0\n for dataset in self.datasets:\n for dist_type in self.dist_types:\n for kernel_type in self.kernel_types[dist_type]:\n curr_num_coeffs = num_coeffs_vals.pop(0)\n curr_gp = build_nngp_with_dataset(dataset, kernel_type, curr_num_coeffs,\n dist_type)\n # Predictions & Marginal likelihood\n curr_preds, _ = curr_gp.eval(dataset[2], 'std')\n curr_gp_err = compute_average_prediction_error(dataset, curr_preds)\n const_err = compute_average_prediction_error(dataset, dataset[1].mean())\n lml = curr_gp.compute_log_marginal_likelihood()\n is_success = curr_gp_err < const_err\n num_tests += 1\n num_successes += is_success\n self.report(('(%s, ntr=%d, nte=%d):: GP-lml=%0.4f, GP-err=%0.4f, ' +\n 'Const-err=%0.4f. succ=%d')%(dataset[-1][:5], len(dataset[0]),\n len(dataset[2]), lml, curr_gp_err, const_err, is_success),\n 'test_result')\n succ_frac = num_successes / float(num_tests)\n self.report('Summary: num_successes / num_floats = %d/%d = %0.4f'%(num_successes,\n num_tests, succ_frac), 'test_result')\n assert succ_frac > 0.5", "def test_corr(self):\n assert False", "def test_training_testing_uniform_coverage():\n # set up test\n X = normal( size=(1000, 10) )\n X[:, 0] = np.arange(1000)\n y = np.arange(1000)\n data = Data( X, y, add_constant=False )\n s = Ridge( data )\n s.define_training_and_testing_regions( 0.2, 60 )\n # lengths of training and testing are correct\n assert s.T == 1000\n assert s.T_training == 800\n assert s.T_testing == 200\n # training and testing are disjoint sets\n count = zeros( 1000, dtype=int )\n for sl in s.testing_slices + s.training_slices:\n count[sl] += 1\n assert np.all( count == 1 )\n # slicing by training / testing works\n y_training = s.slice_by_training( y )\n y_testing = s.slice_by_testing( y )\n y_both = np.concatenate([ y_training, y_testing ])\n assert ( np.sort(y_both) == y ).all()\n X_training = s.slice_by_training( X )[:, 0]\n X_testing = s.slice_by_testing( X )[:, 0 ]\n X_both = np.concatenate([ X_training, X_testing ])\n assert ( np.sort(X_both) == X[:, 0] ).all()\n # uniform probability of coverage\n count = zeros( 1000, dtype=float )\n N_reps = 1000\n for _ in range(N_reps):\n s.define_training_and_testing_regions( 0.2, 60 )\n for sl in s.testing_slices:\n count[sl] += 1\n count /= N_reps\n assert np.std(count) < 0.05\n assert np.min(count) > 0.1\n assert np.max(count) < 0.3", "def test_has_matrix_true(self):\n\n class MyOp(qml.operation.Operator):\n num_wires = 1\n\n @staticmethod\n def compute_matrix():\n return np.eye(2)\n\n assert MyOp.has_matrix\n assert MyOp(wires=0).has_matrix", "def test_basic(self):\n\n result = GeneratePercentilesFromANeighbourhood(2000).make_percentile_cube(\n self.cube\n )\n self.assertIsInstance(result, Cube)", "def test_num_sections() -> None:\n assert a2_courses.num_sections(CSC110) == 1", "def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True", "def test_handcrafted_examples(self):\n self.assertTrue(abs(pi(1000000) - 3.14) < 0.01)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save matrix M to file using the specified format
def save(self, M, filename): m, n = M.shape np.savetxt(filename, M, fmt='%d', header="{} {}".format(m, n), comments='')
[ "def save_M(M, f_out):\n _ATOM = '%s%5i %-4s%3s %c%4i%c %8.3f%8.3f%8.3f%6.2f%6.2f %4s%2s%2s\\n'\n\n def get_ATOM_line(atom_i, name, resid, x, y, z, aa_type):\n \"\"\"\n Write PDB ATOM line.\n \"\"\"\n args=('ATOM ', atom_i, name, aa_type, 'A', resid, ' ', x, y, z, 0.0, 0.0, 'X', ' ', ' ')\n s = _ATOM % args\n return s\n\n fp = open(f_out, 'w')\n for i in range(0, M.shape[0]):\n x, y, z = M[i]\n s = get_ATOM_line(i, 'CA', i, x, y, z, 'ALA') \n fp.write(s)\n fp.close()", "def _write_matrix(matrix, output_matrix):\n numpy.savetxt(output_matrix, matrix, delimiter=' ', newline='\\n')", "def save_sparse_matrix(data,fmt,filepath):\n if fmt == 'tsv':\n m = data.tocoo()\n with open(filepath,'w') as out:\n for u,i,v in izip(m.row,m.col,m.data):\n print >>out,'{0}\\t{1}\\t{2}'.format(u+1,i+1,v)\n elif fmt == 'csv':\n m = data.tocoo()\n with open(filepath,'w') as out:\n for u,i,v in izip(m.row,m.col,m.data):\n print >>out,'{0},{1},{2}'.format(u+1,i+1,v)\n elif fmt == 'mm':\n mmwrite(filepath,data)\n elif fmt == 'npz':\n savez(data.tocoo(),filepath)\n elif fmt == 'fsm':\n fast_sparse_matrix(data).save(filepath)\n else:\n raise ValueError('unknown output format: {0}'.format(fmt))", "def save_txt(output_path, matrix):\n np.savetxt(output_path, matrix)", "def save_mat(ar, filename):\n mat_dict = {'data': ar}\n sio.savemat(filename, mat_dict, do_compression=True)\n return", "def save_moc_matrix(moc_matrix, outfile):\n moc_matrix.to_csv(outfile, sep=\",\", header=True, index=True)\n print(f\"MoC matrix saved in {outfile}.\")", "def save_data(self, matrix, file_name, header = \"\"):\r\n formatted_header = \"\"\r\n np.set_printoptions(suppress=True,\r\n formatter={'float_kind':'{:f}'.format})\r\n if(isinstance(header, list)):\r\n for i in range(len(header)):\r\n header_el = header[i]\r\n missing_spaces = self.check_for_length(header[i])\r\n formatted_header = formatted_header + header[i] + \" \"*missing_spaces \r\n else:\r\n formatted_header = header\r\n \r\n f = open(file_name, \"w\")\r\n f.write(formatted_header + os.linesep)\r\n missing_spaces = np.zeros(matrix.shape[0])\r\n for i in range(matrix.shape[1]): \r\n write_string = \"\"\r\n for j in range(matrix.shape[0]):\r\n missing_space = self.check_for_length(matrix[j,i])\r\n missing_spaces[j] = missing_space\r\n write_string = write_string + \"{:.12f}\".format(matrix[j,i])+\" \"*missing_space\r\n f.write(write_string + os.linesep)\r\n f.close()", "def write_numpy_matrix_csv(matrix, output_file):\n\n numpy.savetxt(output_file, matrix, delimiter=\";\")", "def f_write_raw_mat(data, filename, data_format='f4', end='l'):\n if not isinstance(data, np.ndarray):\n print(\"Error write_raw_mat: input should be np.array\")\n return False\n f = open(filename,'wb')\n if len(data_format)>0:\n if end=='l':\n data_format = '<'+data_format\n elif end=='b':\n data_format = '>'+data_format\n else:\n data_format = '='+data_format\n datatype = np.dtype(data_format)\n temp_data = data.astype(datatype)\n else:\n temp_data = data\n temp_data.tofile(f,'')\n f.close()\n return True", "def save_matrix(self, matrix):\n print(\"dumping \")\n path = self._create_path(self.dataset)\n print(path)\n print(matrix.sum())\n np.save(path, matrix)\n print(\"dumped to %s\" % path)", "def _write_niftyreg_matrix(matrix, txt_path):\n matrix = np.linalg.inv(matrix)\n np.savetxt(txt_path, matrix, fmt='%.8f')", "def save_csv(self, filename) -> None:\n np.savetxt(filename, self.matrix, fmt='%d', delimiter=',',\n header=','.join(self.class_names))", "def export_matrix(self):\n self.matrix_filename = f'similarity_matrix_{self.m1}_{self.m2}_{self.type}_{self.parce}_{self.net}_{self.corr}'\n\n self.path_matrix_final = f'{self.output}/similarity_matrices/{self.type}/{self.parce}/{self.corr}'\n if not os.path.exists(self.path_matrix_final):\n os.makedirs(self.path_matrix_final)\n\n with open(f'{self.path_matrix_final}/{self.matrix_filename}', 'w') as f:\n np.savetxt(f, self.sim_matrix, fmt='%1.3f')\n f.flush\n \n return None", "def writeMatrix(header_rows,header_cols,matrix,matrixFile,precision=4):\n \n nrows=len(header_rows)\n ncols=len(header_cols)\n \n # interaction matrix output\n out_fh=gzip.open(matrixFile,\"wb\")\n \n # write matrix col headers\n header=[str(i) for i in header_cols]\n print(str(nrows)+\"x\"+str(ncols)+\"\\t\"+\"\\t\".join(header),file=out_fh)\n\n format_func=(\"{:0.\"+str(precision)+\"f}\").format\n k=0\n \n for i in xrange(nrows):\n print(header_rows[i]+\"\\t\"+\"\\t\".join(map(format_func,matrix[i,:])),file=out_fh)\n \n out_fh.close()", "def save(self, filename=None, mode=\"homer\", usePFM=False):\n assert filename, \"no filename specified\"\n\n matrix_to_use = self.__matrix\n if usePFM:\n assert self.__original_PFM is not None, \"pwm.save: No PFM is avaialble for this pwm\"\n matrix_to_use = self.__original_PFM\n\n if mode == \"homer\":\n oh = open(filename, \"w\")\n\n oh.write(\">%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.name, self.name, 0, 0, 0, \"T:0(0),B:0(0),P(0)\"))\n for i in matrix_to_use:\n nl = numpy.array([0.0, 0.0, 0.0, 0.0]) if sum(i) == 0 else i/float(sum(i))\n print(nl)\n oh.write(\"%s\\n\" % \"\\t\".join([str(b) for b in nl])) \n\n elif mode == \"counts\":\n oh = open(filename, \"w\")\n\n oh.write(\">%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" % (self.name, self.name, 0, 0, 0, \"T:0(0),B:0(0),P(0)\"))\n for i in matrix_to_use:\n oh.write(\"%s\\n\" % \"\\t\".join(str(b) for b in nl)) \n\n return(None)", "def save_gmm(gmm, filename):\n with open(filename, 'w') as _file:\n gmm_write(gmm, _file)", "def encode_file(matrix, fp):\n fp.write(IdxEncoder().write(matrix))", "def save_mat_df(df, error, filename):\n output = {'x': df}\n if error is not None:\n output['errors'] = error\n sio.savemat(filename, output)", "def save_mat(args, filename: t.Union[Path, str], scan: torch.Tensor):\n scan = scan.permute((1, 2, 3, 0))\n assert scan.shape == args.original_shape\n flair = utils.to_numpy(scan[0, ...])\n t1 = utils.to_numpy(scan[1, ...])\n t2 = utils.to_numpy(scan[2, ...])\n savemat(filename, {\"FLAIRarray\": flair, \"T1array\": t1, \"T2array\": t2})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize a ProtocolEngine transport for use in a child thread. This adapter allows a client to make blocking command calls on its thread to the asynchronous ProtocolEngine running with an event loop in a different thread.
def __init__(self, engine: ProtocolEngine, loop: AbstractEventLoop) -> None: self._engine = engine self._loop = loop
[ "def _InitTransport(self):\n if self.transport is None:\n self.transport = \\\n self.transport_class(self._GetAddress(),\n timeouts=self.timeouts,\n allow_non_master=self.allow_non_master)", "def startProtocol(self):\n reactor.resolve(self.host).addCallback(self.connectTransport)", "def startProtocol(self):\n self.transport.connect(self.host, self.port)\n logging.info(\"Connect with %s:%d\" % (self.host, self.port))", "def __init__(self, connection_control, handler, queue):\n threading.Thread.__init__(self)\n self.connection_control = connection_control\n self.handler = handler\n self.queue = queue", "def test_makeConnection(self):\n\n class TestProtocol(Protocol):\n transport = None\n\n def makeConnection(self, transport):\n self.transport = transport\n\n server = TestProtocol()\n client = TestProtocol()\n loopback.loopbackAsync(server, client)\n self.assertIsNotNone(client.transport)\n self.assertIsNotNone(server.transport)", "def __init__(self): \n threading.Thread.__init__(self)\n self.opc_client = Client(\"opc.tcp://10.0.0.57:4048\") # Connecting OPC Server Running ON Laptop\n self.opc_client.connect()\n self.initiate_nodes() # Instantiating Nodes", "def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n if self.encryption_enabled():\n self.client = TcpClientConnection(conn=conn, addr=self.client.addr)\n if b'HttpProtocolHandlerPlugin' in self.flags.plugins:\n for klass in self.flags.plugins[b'HttpProtocolHandlerPlugin']:\n instance = klass(\n self.uid,\n self.flags,\n self.client,\n self.request,\n self.event_queue)\n self.plugins[instance.name()] = instance\n logger.debug('Handling connection %r' % self.client.connection)", "async def connect(self) -> None:\n udp_client_factory = UDPClient.UDPClientFactory(\n self.local_addr[0],\n multicast=self.multicast,\n data_received_callback=self.data_received_callback,\n )\n loop = asyncio.get_running_loop()\n if self.multicast:\n sock = UDPClient.create_multicast_sock(self.local_addr[0], self.remote_addr)\n (transport, _) = await loop.create_datagram_endpoint(\n lambda: udp_client_factory, sock=sock\n )\n self.transport = transport\n\n else:\n (transport, _) = await loop.create_datagram_endpoint(\n lambda: udp_client_factory,\n local_addr=self.local_addr,\n remote_addr=self.remote_addr,\n )\n self.transport = transport", "def __init__(self):\n super(TNL3ServicePlugin, self).__init__()\n self._tn_info = None\n # self._driver = None\n self.task_manager = tasks.TaskManager()\n self.task_manager.start()\n self.tn_init()", "async def _ensure_global_protocol_started(cls) -> None:\n if not cls.__protocol__:\n loop = asyncio.get_running_loop()\n _, cls.__protocol__ = await loop.create_datagram_endpoint(\n lambda: SmartglassProtocol(),\n family=socket.AF_INET,\n allow_broadcast=True\n )", "def __init__(self, timeouts=None, transport=t.Transport,\n allow_non_master=False):\n self.timeouts = timeouts\n self.transport_class = transport\n self.allow_non_master = allow_non_master\n self.transport = None\n # The version used in RPC communication, by default unused:\n self.version = None", "def __init__(self, master, host = \"\", port = 54321, timeout = 0.2):\n threading.Thread.__init__(self)\n self.master = master\n self.deamon = True\n self.connections = []\n self.commandRequests = collections.deque()\n self.commandResponses = collections.deque()\n self.timeout = timeout\n self.ca = ConnectionAcceptor(self, host, port)\n self.ca.start()", "def __init__(self, *args):\n _ida_pro.__qthread_t_swiginit(self, _ida_pro.new___qthread_t(*args))", "def __init__(self, protocol):\r\n self.ttype_step = 0\r\n self.protocol = protocol\r\n self.protocol.protocol_flags['TTYPE'] = {\"init_done\": False}\r\n # is it a safe bet to assume ANSI is always supported?\r\n self.protocol.protocol_flags['TTYPE']['ANSI'] = True\r\n # setup protocol to handle ttype initialization and negotiation\r\n self.protocol.negotiationMap[TTYPE] = self.will_ttype\r\n # ask if client will ttype, connect callback if it does.\r\n self.protocol.do(TTYPE).addCallbacks(self.will_ttype, self.wont_ttype)", "def _set_engine(self):\n self.engine = Engine(endpoint=self._socket, inc_queue=self._inc_mq_b,\n out_queue=self._out_mq_b)", "def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)", "def connect_thread():\n return factory.connect_thread(SlaveService, remote_service=SlaveService)", "async def StartAsyncTlsServer( # pylint: disable=invalid-name,dangerous-default-value\n context=None,\n identity=None,\n address=None,\n sslctx=None,\n certfile=None,\n keyfile=None,\n password=None,\n reqclicert=False,\n allow_reuse_address=False,\n custom_functions=[],\n **kwargs,\n):\n server = ModbusTlsServer(\n context,\n kwargs.pop(\"framer\", ModbusTlsFramer),\n identity,\n address,\n sslctx,\n certfile,\n keyfile,\n password,\n reqclicert,\n allow_reuse_address=allow_reuse_address,\n **kwargs,\n )\n await _serverList.run(server, custom_functions)", "def create_client(client_klass, *, host=None, port=None, loop=None):\n if not loop:\n loop = asyncio.get_event_loop()\n transport, protocol = yield from loop.create_connection(\n TAsyncioServer.ThriftClientProtocolFactory(client_klass),\n host=host, port=port)\n return protocol_manager(protocol)", "def _prepare_transport(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a view of the Protocol Engine's state.
def state(self) -> StateView: return self._engine.state_store
[ "def get_state(self):\n return self.StateEngine(self.symbols)", "def get_state(self):\n return self._skuld.cmd(SkuldCmd(name='get_state',\n args=None, block=True))", "def get_state(self):\n \n return self._instance.state", "def v(self):\n return self.state", "def get_state(self):\n return self.get_state_changes()", "def get_behavior_state_rpc(token):\n return runtime.get_behavior_state(token)", "def _state(self) -> Dict[str, Any]:\n return self._fetcher.get_cached_state(self)", "def get_ovp_state(self):\r\n ovp_state = str(self.inst.query(\"VOLT:PROT:STAT?\"))\r\n return(ovp_state)", "def state(self):\n return self._state_env.state", "def get_robot_state(self):", "def show_state():\n resultjson = _run_speedify_cmd([\"state\"])\n return find_state_for_string(resultjson[\"state\"])", "def get_state(self):\n if self.state:\n return self.state\n\n from timon.state import TMonState\n self.state = state = TMonState(self.cfg['statefile'], config=self)\n return state", "def get_current_state(self):\n s = RobotState()\n c_str = self._g.get_current_state()\n conversions.msg_from_string(s, c_str)\n return s", "def state(self):\n return Projector.State(self._send_msg())", "def vpp_show_lisp_state(node):\n\n vat = VatExecutor()\n vat.execute_script_json_out('lisp/show_lisp_status.vat',\n node)\n return JsonParser().parse_data(vat.get_script_stdout())", "def state(self):\n\n try:\n out = self.__get_facts()\n except VsanNotPresent:\n return None\n if out:\n return out.get(get_key(vsankeys.VSAN_STATE, self._SW_VER))\n return None", "async def get_state(self, **kwargs: Any) -> ModuleState:\n return self._state", "def get_env_state(self) -> np.ndarray:\n return self.env.env.state", "def get_state_detail (self, ttype=None) :\n return self._adaptor.get_state_detail (ttype=ttype)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Benchmarks argmax over fractions.
def bench_argmax_over_fracs(num_values): fractions = MultiArray(sizes=[num_values, 3], value_type=sint) fractions.assign_all(1) argmax_over_fracs(fractions)
[ "def maximum_basic(a: float, b: float) -> float:", "def realmax():\n return np.finfo(float).max", "def max(x):\n\treturn np.max(x)", "def argmaxn(arr, num_vals):\n return arr.argsort()[-num_vals:][::-1]", "def test_maximum():\n test_maximum_case(0, [0, 0, 0], 0)\n test_maximum_case(1, [2, 0, 0], 2)\n test_maximum_case(2, [1, 2, 1], 2)\n test_maximum_case(3, [4, 5, 6], 6)\n test_maximum_case(4, [4.5, 5.1, 6.7], 6.7)\n test_maximum_case(5, [], None)", "def get_max_with_many_arguments(*args):\n result = args[0]\n for num in args:\n if (num > result):\n result = num\n return result", "def max_after_zero(x: np.ndarray) -> int:\n pass", "def max(array, axis=0):\n\tpass", "def hasMax(*args, **kwargs):\n \n pass", "def _tensor_max(*args):\n maximum, *rest = args\n for arg in rest:\n maximum = maximum.max(arg)\n return maximum", "def maxValue(max=None):", "def argmax(array: list) -> int:\n index, value = max(enumerate(array), key=lambda x: x[1])\n return index", "def find_max_global(*args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def get_max_with_one_or_more_arguments(first, *args):\n result = first\n for num in args:\n if (num > result):\n result = num\n return result", "def test_g():\n y = 5\n true_fval = 5.0\n true_xf = -0.0\n xf, fval, info = brent_max(g, -10, 10, args=(y,))\n assert_almost_equal(true_fval, fval, decimal=4)\n assert_almost_equal(true_xf, xf, decimal=4)", "def rargmax(vector):\n m = np.amax(vector)\n indices = np.nonzero(vector == m)[0]\n return rn.choice(indices)", "def maximum_ternary(a: float, b: float) -> float:", "def _get_max_value(x):\n return int(max(x.asnumpy()))", "def argmax( indices, A, column, f=abs ):\n i_max= indices[0]\n for i in indices[1:]:\n if f(A[i][column]) > f(A[i_max][column]):\n i_max= i\n return i_max" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load logging.json config and set specified logging settings.
def setup_logging(): with open(CONFIG_JSON_PATH) as f: logging_config = json.load(f) if DEBUG: for logger_name, logger_info in logging_config["loggers"].items(): logger_info["level"] = "DEBUG" logging.config.dictConfig(logging_config)
[ "def load_logging_config():\n log_config_path = os.path.join(constants.CORE_CONF_DIR, \"logging.conf\")\n with open(log_config_path, \"r\") as log_config_file:\n log_config = json.load(log_config_file)\n logging.config.dictConfig(log_config)", "def configure_logging(logging_config: dict[str, Any]) -> None:\n LOGGING.configure(logging_config)", "def load_logging(logging_conf):\r\n if not os.path.exists(logging_conf):\r\n raise ValueError(f'cannot load logging config from {logging_conf} (does not exist)')\r\n\r\n ext = os.path.splitext(logging_conf)[1]\r\n if ext != '.json':\r\n raise ValueError(f'expected logging config is json file but got {logging_conf}')\r\n\r\n with open(logging_conf, 'r') as infile:\r\n config = json.load(infile)\r\n logging.config.dictConfig(config)", "def _set_logging(self):\n logging.basicConfig(**self.settings[\"general\"][\"logging\"])\n log.info(\n \"Setting logging config: {!r}\".format(self.settings[\"general\"][\"logging\"])\n )", "def setup_log(config_path='logging.yaml'):\n if os.path.exists(config_path):\n with open(config_path, 'rt') as f:\n if config_path.endswith('yaml'):\n logging.config.dictConfig(yaml.load(f))\n elif config_path.endswith('json'):\n logging.config.dictConfig(json.load(f))\n log = logging.getLogger(__name__)\n log.debug('Logger setup from configuration file: {}'.format(config_path))\n else:\n print 'Error retrieving logging file!'", "def _configure_logging(config_file):\n if not has_toml:\n raise ImportError(\n \"A TOML parser is required to enable PennyLane logging defaults. \"\n \"We support any of the following TOML parsers: [tomli, tomlkit, tomllib] \"\n \"You can install either tomli via `pip install tomli`, \"\n \"tomlkit via `pip install tomlkit`, or use Python 3.11 \"\n \"or above which natively offers the tomllib library.\"\n )\n with open(os.path.join(_path, config_file), \"rb\") as f:\n pl_config = tomllib.load(f)\n logging.config.dictConfig(pl_config)", "def set_logging_config():\n log_format = '%(asctime)-20s %(levelname)-9s %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n # Only log Error/Info/Critical to the console, but log Everything to the file\n logging.basicConfig(level=logging.DEBUG,\n format=log_format,\n datefmt=date_format,\n filename=\"convert_utilization_settings_{}.log\".format(\n datetime.now().strftime(\"%Y-%m-%d_%H-%M\")))\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter(fmt=log_format, datefmt=date_format))\n logging.getLogger().addHandler(console)", "def logging_init():\n # Default logging levels. These can be overridden when the config file is loaded.\n logging.getLogger().setLevel(logging.WARNING)\n logging.getLogger('neocommon').setLevel(logging.INFO)\n logging.getLogger('fetch').setLevel(logging.INFO)\n\n # Add logging handlers\n logging.getLogger().addHandler(_LOG_HANDLER)", "def setup_logger():\n logger_path = os.path.join(os.path.dirname(__file__), '../logger.yaml')\n\n with open(logger_path, 'r') as file:\n config = yaml.safe_load(file.read())\n logging.config.dictConfig(config)", "def setup_logging(self):\n logfile = self.configuration['options'].get('logfile', None)\n if logfile and isinstance(logfile, basestring):\n ch = logging.FileHandler(logfile)\n\n level = self.configuration['options'].get('loglevel', None)\n if not level:\n level = 'INFO'\n\n ch.setLevel({\n 'DEBUG': logging.DEBUG,\n 'INFO': logging.INFO,\n 'WARNING': logging.WARNING,\n 'ERROR': logging.ERROR,\n 'CRITICAL': logging.CRITICAL,\n }.get(level, logging.INFO))\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)", "def configure_logging(self) -> None:\n\n # type checking does not seem to recognize that config\n # and homedir were validated/coerced in __init__\n assert self.config is not None\n assert isinstance(self.homedir, Path)\n\n # Convert loglevel text to log type\n loglevel = eval(\"logging.\" + self.config.main.loglevel.upper())\n\n logfile = self.homedir / (self.name + \".log\")\n logging.basicConfig(filename=str(logfile), level=loglevel)", "def _setup_logging(args):\n\n if args.logconf is None:\n level = (50 - (10 * args.verbose)) \n logging.basicConfig(format=LOG_FORMAT, level=level)\n logging.getLogger(TSV2NICECXMODULE).setLevel(level)\n logger.setLevel(level)\n return\n # logconf was set use that file\n logging.config.fileConfig(args.logconf, disable_existing_loggers=False)", "def _logging_config(my_args) -> dict:\n file_log_path_full = my_args.log_file_path\n filename = (\n Path(file_log_path_full) / _LOG_FILE_NAME\n if file_log_path_full\n else Path.cwd() / _LOG_FILE_NAME\n )\n print(f\"Writing logfile to: {filename}\")\n return {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"console\": {\"format\": \"[%(levelname)s] %(message)s\"},\n \"file\": {\"format\": \"%(asctime)s [%(levelname)s] %(name)s: %(message)s\"},\n },\n \"handlers\": {\n \"console\": {\n \"level\": my_args.log_console_level,\n \"formatter\": \"console\",\n \"class\": \"logging.StreamHandler\",\n \"stream\": \"ext://sys.stdout\", # Default is stderr\n },\n \"file\": {\n \"level\": my_args.log_file_level,\n \"formatter\": \"file\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"filename\": str(filename),\n \"maxBytes\": 1024 * 1024 * 10,\n \"backupCount\": 10,\n },\n },\n \"loggers\": {\n \"\": { # root logger\n \"handlers\": [\"console\", \"file\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n \"discord\": { # discord.py library\n \"handlers\": [\"console\", \"file\"],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n }", "def __init__(self):\n path = os.environ.get(\n \"KEDRO_LOGGING_CONFIG\", Path(__file__).parent / \"default_logging.yml\"\n )\n logging_config = Path(path).read_text(encoding=\"utf-8\")\n self.configure(yaml.safe_load(logging_config))", "def modify(config):\n\n global _CONFIG\n\n new_config = copy.deepcopy(_CONFIG)\n logging.config.dictConfig(new_config)\n _CONFIG = merge_dict(new_config, config)", "def setup_logging():\n client = logging.Client()\n client.get_default_handler()\n client.setup_logging()", "def configure_logging(logging_conf_paths, logging_config_override=None):\n global logging_was_configured\n logging_was_configured = True\n\n for path in logging_conf_paths:\n try:\n config.add_configuration(path)\n except Exception, e:\n print 'WARNING: could not load logging configuration file %s: %s' % (path, e)\n if logging_config_override:\n try:\n config.add_configuration(logging_config_override)\n except Exception,e:\n print 'WARNING: failed to apply logging override %r: %e' % (logging_config_override,e)\n\n # direct warnings mechanism to loggers\n logging.captureWarnings(True)", "def get_logging_conf() -> dict:\n filename = (\n \"/home/as/mosquito_monitor.log\"\n )\n\n dir_name = osp.dirname(osp.normpath(filename))\n\n pathlib.Path(dir_name).mkdir(parents=True, exist_ok=True)\n\n try:\n logging_conf = {\n \"version\": 1,\n \"formatters\": {\n \"simple\": {\n \"format\": \"%(levelname)-6s :: %(name)-5s :: \"\n \"%(funcName)-5s :: %(message)s\"\n },\n \"precise\": {\n \"format\": \"%(asctime)s :: %(levelname)-6s :: %(name)-5s ::\"\n \" %(funcName)-5s :: %(message)s\"\n },\n 'json_formatter': {\n 'format': '%(message)s %(lineno)d '\n '%(funcName)s %(filename)s',\n 'class': 'pythonjsonlogger.jsonlogger.JsonFormatter'\n }\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"simple\"\n },\n 'json': {\n 'formatter': 'json_formatter',\n 'backupCount': 4,\n 'class': 'logging.handlers.TimedRotatingFileHandler',\n 'encoding': 'ASCII',\n 'filename': filename,\n 'interval': 1,\n 'when': 'midnight',\n 'level': 'DEBUG'\n }\n },\n \"loggers\": {\n \"MOSQUITO_MONITOR\": {\n \"level\": \"DEBUG\",\n \"propagate\": \"no\",\n \"handlers\": [\"json\", \"console\"]\n },\n \"local_mqtt_client.local_mqtt_client\": {\n \"level\": \"DEBUG\",\n \"propagate\": \"no\",\n \"handlers\": [\"json\", \"console\"]\n }\n }\n }\n except SyntaxError as invalid_syntax_exception:\n raise ConfigException(\n \"Invalid config provided, {}\".format(invalid_syntax_exception)\n )\n else:\n configure(\n context_class=threadlocal.wrap_dict(dict),\n logger_factory=stdlib.LoggerFactory(),\n wrapper_class=stdlib.BoundLogger,\n processors=[\n stdlib.filter_by_level,\n stdlib.add_logger_name,\n stdlib.add_log_level,\n stdlib.PositionalArgumentsFormatter(),\n processors.TimeStamper(fmt='iso'),\n processors.StackInfoRenderer(),\n processors.format_exc_info,\n processors.UnicodeDecoder(),\n stdlib.render_to_log_kwargs,\n ]\n )\n\n return logging_conf", "def configure_logging(add_handler):\n _ignore_log_keys = set(logging.makeLogRecord({}).__dict__)\n\n def _json_format(record):\n extras = ' '.join(\n \"%s=%s\" % (k, record.__dict__[k])\n for k in set(record.__dict__).difference(_ignore_log_keys))\n if extras:\n record.msg = \"%s %s\" % (record.msg, extras)\n return record\n\n class JsonFormatter(logging.Formatter):\n def format(self, record):\n record = _json_format(record)\n return super(JsonFormatter, self).format(record)\n\n if not log.handlers:\n if add_handler is True:\n _h = logging.StreamHandler()\n _h.setFormatter(JsonFormatter())\n log.addHandler(_h)\n elif isinstance(add_handler, logging.Handler):\n log.addHandler(add_handler)\n else:\n log.addHandler(logging.NullHandler())\n log.setLevel(logging.DEBUG)\n log.propagate = False\n return log" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Example particle HDF5 file generated by OSIRIS 4.4.4 The associated data types are taken from an example output file.
def make_osiris_444_particles_hdf(path: Path, data: np.ndarray, name: str): # makes sure we have data with a 'charge' if "q" not in data.dtype.fields: raise ValueError("structured dataset with a field 'q' required") with h5.File(path, mode="w") as fp: # root attrs fp.attrs["NAME"] = np.array([name], dtype="|S256") fp.attrs["TYPE"] = np.array(["particles"], dtype="|S9") fp.attrs["ITER"] = np.array([12345], dtype="i4") fp.attrs["TIME"] = np.array([-321.9], dtype="f4") fp.attrs["TIME UNITS"] = np.array([b"time unit"], dtype="|S256") # other quantities for field in data.dtype.fields: d = data[field] quants = fp.create_dataset(field, data=d) quants.attrs["LONG_NAME"] = np.array([f"{field} label"], dtype="|S256") quants.attrs["UNITS"] = np.array([f"{field} unit"], dtype="|S256") # tags tags = np.arange(len(data) * 2, dtype="i4").reshape((len(data), 2)) fp.create_dataset("tag", data=tags)
[ "def make_osiris_dev_particles_hdf(path: Path, data: np.ndarray, name: str):\n # makes sure we have data with a 'charge'\n if \"q\" not in data.dtype.fields:\n raise ValueError(\"structured dataset with a field 'q' required\")\n\n with h5.File(path, mode=\"w\") as fp:\n # root attrs\n fp.attrs[\"NAME\"] = np.array([name], dtype=\"|S256\")\n fp.attrs[\"TYPE\"] = np.array([\"particles\"], dtype=\"|S9\")\n fp.attrs[\"ITER\"] = np.array([12345], dtype=\"i4\")\n fp.attrs[\"TIME\"] = np.array([-321.9], dtype=\"f4\")\n fp.attrs[\"TIME UNITS\"] = np.array([b\"time unit\"], dtype=\"|S256\")\n\n data_fields = data.dtype.fields\n fp.attrs[\"QUANTS\"] = np.array([str.encode(f) for f in data_fields])\n fp.attrs[\"LABELS\"] = np.array([str.encode(f\"{f} label\") for f in data_fields])\n fp.attrs[\"UNITS\"] = np.array([str.encode(f\"{f} unit\") for f in data_fields])\n\n # other quantities\n for field in data.dtype.fields:\n d = data[field]\n fp.create_dataset(field, data=d)", "def build_example(n_points=1000):\n\n x=np.linspace(1,3,n_points)\n y=np.linspace(1,3,n_points)\n z=p_known(x,y)\n zerr= np.random.rand(n_points)\n \n data=np.column_stack([x,y,z,zerr])\n \n header='# density Temperature property sigma_property'\n np.savetxt('input_example.dat',data, header=header)", "def transfer_to_h5part(plot_config):\n run_dir = plot_config[\"run_dir\"]\n tindex = plot_config[\"tframe\"] * plot_config[\"tinterval\"]\n ptl_vel = plot_config[\"ptl_vel\"]\n fname = (run_dir + 'data_' + str(tindex) + '_' +\n str(ptl_vel) + 'c/particle_diagnostics.h5')\n file = h5py.File(fname,'r')\n group = file['/particles_fields']\n dset_ptl = group['particles']\n dset_emf = group['fields']\n sz, = dset_ptl.shape\n tinterval_traj = get_traj_tinterval(run_dir)\n nsteps_tot = get_num_steps(run_dir)\n if nsteps_tot > 1E6:\n nsteps_tot = int(1E6)\n ntraj = nsteps_tot // tinterval_traj + 1\n nptl = sz / ntraj\n fname_out = (run_dir + 'data_' + str(tindex) + '_' +\n str(ptl_vel) + 'c/particle_diagnostics.h5part')\n print(fname_out)\n toffset = 10\n with h5py.File(fname_out, 'w') as fh_out:\n for tf in range(0, ntraj, toffset):\n print(\"Time frame: %d\" % tf)\n x = np.array(dset_ptl['x'][tf::ntraj])\n y = np.array(dset_ptl['y'][tf::ntraj])\n z = np.array(dset_ptl['z'][tf::ntraj])\n ux = np.array(dset_ptl['ux'][tf::ntraj])\n uy = np.array(dset_ptl['uy'][tf::ntraj])\n uz = np.array(dset_ptl['uz'][tf::ntraj])\n gamma = np.sqrt(1.0 + ux**2 + uy**2 + uz**2)\n t = np.array(dset_ptl['t'][tf::ntraj])\n Ex = np.array(dset_emf['Ex'][tf::ntraj])\n Ey = np.array(dset_emf['Ey'][tf::ntraj])\n Ez = np.array(dset_emf['Ez'][tf::ntraj])\n Bx = np.array(dset_emf['Bx'][tf::ntraj])\n By = np.array(dset_emf['By'][tf::ntraj])\n Bz = np.array(dset_emf['Bz'][tf::ntraj])\n grp = fh_out.create_group('Step#' + str(tf//toffset))\n grp.create_dataset('x', (nptl, ), data=x)\n grp.create_dataset('y', (nptl, ), data=y)\n grp.create_dataset('z', (nptl, ), data=z)\n grp.create_dataset('ux', (nptl, ), data=ux)\n grp.create_dataset('uy', (nptl, ), data=uy)\n grp.create_dataset('uz', (nptl, ), data=uz)\n grp.create_dataset('gamma', (nptl, ), data=gamma)\n grp.create_dataset('t', (nptl, ), data=t)\n grp.create_dataset('Ex', (nptl, ), data=Ex)\n grp.create_dataset('Ey', (nptl, ), data=Ey)\n grp.create_dataset('Ez', (nptl, ), data=Ez)\n grp.create_dataset('Bx', (nptl, ), data=Bx)\n grp.create_dataset('By', (nptl, ), data=By)\n grp.create_dataset('Bz', (nptl, ), data=Bz)", "def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n out_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n impute_type='mode', filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n \n print 'Starting to parse genotypes'\n genotype_data = {}\n h5py_file = h5py.File(out_file_prefix + '.hdf5')\n genotype_data['hdf5p_file'] = h5py_file\n genot_group = h5py_file.create_group('genot_data')\n indiv_group = h5py_file.create_group('indiv_data')\n \n \n tot_num_snps = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_ambiguous_loc_removed = 0\n curr_chrom = 1\n print 'Working on chromosome %d' % curr_chrom\n \n g_filename = '%s.tped' % (in_file_prefix) \n s_filename = '%s.bim' % (in_file_prefix)\n i_filename = '%s.tfam' % (in_file_prefix) \n\n \n \n indiv_ids = []\n phenotypes = [] \n sex = []\n print 'Parsing individuals file: %s' % i_filename\n with open(i_filename) as f:\n for line in f:\n l = line.split()\n iid = l[0]\n indiv_ids.append(iid)\n sex.append(int(l[4]))\n phenotypes.append(float(l[5]))\n tot_num_indiv = len(indiv_ids) \n \n print 'Storing individual data in individ. group'\n indiv_group.create_dataset('indiv_ids', data=indiv_ids)\n indiv_group.create_dataset('sex', data=sex)\n indiv_group.create_dataset('phenotypes', data=phenotypes)\n \n \n \n num_indiv = len(indiv_ids)\n print 'Found %d Individuals' % (num_indiv)\n\n print 'Parsing nucleotide map'\n nt_map = {}\n chromsomoes = []\n curr_chrom = 0\n with open(s_filename) as f:\n for line in f:\n l = line.split()\n chrom = l[0]\n if chrom != curr_chrom:\n chromsomoes.append(chrom)\n curr_chrom = chrom\n nt_map[l[1]] = (l[4], l[5]) \n assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'\n curr_chrom = chromsomoes[0]\n \n position = -1\n # Initializing containers.\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n t0 = time.time()\n\n print 'Starting to parse SNP files'\n gf = open(g_filename)\n for g_line in gf:\n# if random.random() > 0.01:\n# continue\n gl = g_line.split()\n chrom = gl[0]\n if chrom != curr_chrom:\n \n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)\n t0 = time.time()\n\n \n\n # Reset containers\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_ambiguous = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n \n curr_chrom = chrom\n\n sid = gl[1]\n prev_position = position\n position = int(gl[3])\n\n # Skipping unmappable locations\n if position == prev_position:\n num_ambiguous_loc_removed += 1\n continue\n if position == 0:\n num_ambiguous_loc_removed += 1\n continue\n\n nt = nt_map[sid]\n \n snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')\n a = sp.arange(tot_num_indiv * 2)\n even_map = a % 2 == 0\n odd_map = a % 2 == 1\n snp = snp0[even_map] + snp0[odd_map] - 2\n snp[snp < 0] = 9\n \n bin_counts = sp.bincount(snp)\n \n\n if len(bin_counts) > 3:\n missing_count = bin_counts[-1]\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n continue\n elif impute_type == 'mode':\n nt_counts = bin_counts[:3] \n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n bin_counts = sp.bincount(snp)\n else:\n raise Exception('Imputation type is unknown')\n else:\n missing_count = 0\n\n assert len(bin_counts) < 4, 'Issues with nucleotides.'\n nt_counts = bin_counts[:3] \n if len(nt_counts) == 2:\n nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])\n elif len(nt_counts) == 1:\n nt_counts = sp.array([nt_counts[0], 0, 0])\n \n\n # Removing monomorphic SNPs\n if filter_monomorphic_snps:\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(position)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq) \n\n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)\n\n \n gf.close()\n \n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed\n h5py_file.close()\n \n print 'Done parsing genotypes.'", "def example_bed_l2_h5():\n yield h5py.File(\"tests/test_data/example_test_2label.h5\", \"r\")", "def temp_emsoft_h5ebsd_file(tmpdir, request):\n f = File(tmpdir.join(\"emsoft_h5ebsd_file.h5\"), mode=\"w\")\n\n # Unpack parameters\n map_shape, (dy, dx), example_rotations, n_top_matches, refined = request.param\n ny, nx = map_shape\n map_size = ny * nx\n\n # Create groups used in reader\n ebsd_group = f.create_group(\"Scan 1/EBSD\")\n data_group = ebsd_group.create_group(\"Data\")\n header_group = ebsd_group.create_group(\"Header\")\n phase_group = header_group.create_group(\"Phase/1\") # Always single phase\n\n # Create `header_group` datasets used in reader\n for name, data, dtype in zip(\n [\"nRows\", \"nColumns\", \"Step Y\", \"Step X\"],\n [ny, nx, dy, dx],\n [np.int32, np.int32, np.float32, np.float32],\n ):\n header_group.create_dataset(name, data=np.array([data], dtype=dtype))\n\n # Create `data_group` datasets, mostly quality metrics\n data_group.create_dataset(\"X Position\", data=np.tile(np.arange(nx) * dx, ny))\n # Note that \"Y Position\" is wrongly written to their h5ebsd file by EMsoft\n data_group.create_dataset(\n \"Y Position\",\n data=np.tile(np.arange(nx) * dx, ny), # Wrong\n # data=np.sort(np.tile(np.arange(ny) * dy, nx)), # Correct\n )\n for name, shape, dtype in [\n (\"AvDotProductMap\", map_shape, np.int32),\n (\"CI\", map_size, np.float32),\n (\"CIMap\", map_shape, np.int32),\n (\"IQ\", map_size, np.float32),\n (\"IQMap\", map_shape, np.int32),\n (\"ISM\", map_size, np.float32),\n (\"ISMap\", map_shape, np.int32),\n (\"KAM\", map_shape, np.float32),\n (\"OSM\", map_shape, np.float32),\n (\"Phase\", map_size, np.uint8),\n ]:\n data_group.create_dataset(name, data=np.zeros(shape, dtype=dtype))\n\n # `data_group` with rotations\n # Sample as many rotations from `rotations` as `map_size`\n rot_idx = np.random.choice(np.arange(len(example_rotations)), map_size)\n rot = example_rotations[rot_idx]\n n_sampled_oris = 333227 # Cubic space group with Ncubochoric = 100\n data_group.create_dataset(\"FZcnt\", data=np.array([n_sampled_oris], dtype=np.int32))\n data_group.create_dataset(\n \"TopMatchIndices\",\n data=np.vstack(\n (np.random.choice(np.arange(n_sampled_oris), n_top_matches),) * map_size\n ),\n dtype=np.int32,\n )\n data_group.create_dataset(\n \"TopDotProductList\",\n data=np.vstack((np.random.random(size=n_top_matches),) * map_size),\n dtype=np.float32,\n )\n data_group.create_dataset(\n \"DictionaryEulerAngles\",\n data=np.column_stack(\n (np.random.uniform(low=0, high=2 * np.pi, size=n_sampled_oris),) * 3\n ),\n dtype=np.float32,\n )\n\n if refined:\n data_group.create_dataset(\"RefinedEulerAngles\", data=rot.astype(np.float32))\n data_group.create_dataset(\n \"RefinedDotProducts\", data=np.zeros(map_size, dtype=np.float32)\n )\n\n # Number of top matches kept\n f.create_dataset(\n \"NMLparameters/EBSDIndexingNameListType/nnk\",\n data=np.array([n_top_matches], dtype=np.int32),\n )\n\n # `phase_group`\n for name, data in [\n (\"Point Group\", \"Cubic (Oh) [m3m]\"),\n (\"MaterialName\", \"austenite/austenite\"),\n (\"Lattice Constant a\", \"3.595\"),\n (\"Lattice Constant b\", \"3.595\"),\n (\"Lattice Constant c\", \"3.595\"),\n (\"Lattice Constant alpha\", \"90.000\"),\n (\"Lattice Constant beta\", \"90.000\"),\n (\"Lattice Constant gamma\", \"90.000\"),\n ]:\n phase_group.create_dataset(name, data=np.array([data], dtype=np.dtype(\"S\")))\n\n yield f\n gc.collect()", "def writeHD5():\n global Data1\n\n store = HDFStore('.\\store.h5')\n store['listCrisis'] = Data1\n store.close()", "def output_file_setup(model):\n \n filename = model.filename\n\n if os.path.isfile(filename):\n print('\\n'+filename+' already exists, deleting '+filename+'\\n')\n os.remove(filename)\n \n \n \n model.out_file = nc4.Dataset(filename,'w',format='NETCDF4')\n\n model.data_group = model.out_file.createGroup('data')\n model.data_group.createDimension('time',None)\n \n var_dict = model()\n model.save_dict = {}\n sizes = []\n for key in var_dict.keys():\n \n if type(var_dict[key]) in (int,float,np.int64,np.float64):\n s = 1\n elif not isinstance(type(var_dict[key]), (str,np.ndarray)):\n s = len(var_dict[key])\n else:\n pdb.set_trace()\n \n if s not in sizes:\n model.data_group.createDimension(str(s),s)\n \n sizes.append(s)\n \n if s == 1:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time','1'))\n else:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time',str(s)))\n \n \n \n \n types = (int, float, np.int, np.float, np.ndarray, str)\n \n parameter_group = model.out_file.createGroup('parameters')\n\n for key, value in model.parameters.items():\n if type(value) in types:\n setattr(parameter_group, key, value)\n \n \n return model", "def load_hep_data(self,variables2plot=[]):\n file = uproot.open(self.hep_data)\n data = file[self.treename]\n self.df = data.pandas.df( self.features+['target']+variables2plot )\n #self.df = df.sample(frac=0.2)\n print self.df.dtypes\n\n self.metadata = file['metadata'] # names of samples, target values, etc.\n\n return", "def write(s,filename,header=\"Opacity file written by optool.particle.write\"):\n\n if (s.np>1):\n raise TypeError('Writing is not supported for multi-particle objects')\n try:\n wfile = open(filename, 'w')\n except:\n raise RuntimeError('Cannot write to file: '+filename)\n\n headerlines = header.splitlines()\n for i in range(len(headerlines)):\n wfile.write(\"# %s\\n\" % headerlines[i])\n if s.scat:\n wfile.write(' 0\\n')\n wfile.write(' %d\\n' % s.nlam)\n wfile.write(' %d\\n' % s.nang)\n wfile.write('\\n')\n else:\n wfile.write(' 3\\n')\n wfile.write(' %d\\n' % s.nlam)\n \n for i in range(s.nlam):\n # write the lambda grid and the opacities\n wfile.write(' %15.5e %15.5e %15.5e %15.5e\\n' % (s.lam[i],s.kabs[0,i],s.ksca[0,i],s.gsca[0,i]))\n \n if s.scat:\n # we have a scattering matrix\n wfile.write('\\n')\n # Write the angular grid\n for i in range(s.nang):\n wfile.write(\"%9.2f\\n\" % s.scatang[i])\n wfile.write('\\n')\n # Write the scattering matrix\n for il in range(s.nlam):\n for ia in range(s.nang):\n wfile.write(' %15.5e %15.5e %15.5e %15.5e %15.5e %15.5e\\n' %\n (s.f11[0,il,ia],s.f12[0,il,ia],s.f22[0,il,ia],\n s.f33[0,il,ia],s.f34[0,il,ia],s.f44[0,il,ia]))\n wfile.close()", "def writeMetaDataOfExperiments(experimentData,path,dict):\n \n if 'exp_type' in dict:\n exp_type = dict['exp_type']\n else:\n print(\"Error: No experiment type has been chosen. Choose either ViSDEM or SaMSEM.\")\n return\n \n if 'filename' in dict:\n filename = dict['filename']\n else:\n filename = 'meta-data'\n path_out = os.path.join(path, filename)\n \n if exp_type == \"visdem\":\n f = open(path_out,'w+')\n f.write(\"ViSDEM meta data\\n\")\n sessions = sorted(set(experimentData['session_id']))\n f.write('... # of sessions: '+str(len(sessions))+' ; '+str(sessions)+'\\n')\n sets = sorted(set(experimentData['set_id']))\n f.write('... # of sets: '+str(len(sets))+' ; '+str(sets)+'\\n')\n motives = sorted(set(experimentData['motive_id']))\n f.write('... # of motives: '+str(len(motives))+' ; '+str(motives)+'\\n')\n daltonizations = sorted(set(experimentData['dalt_id']))\n f.write('... # of daltonization ids: '+str(len(daltonizations))+' ; '+str(daltonizations)+'\\n')\n observers = sorted(set(experimentData['observer_id']))\n f.write(\"... # of observers: \" +str(len(observers))+' ; '+str(observers)+'\\n')\n observers_norm = sorted(set(experimentData[experimentData['observer_coldef_type']==0]['observer_id']))\n f.write(\"...... # of normal observers: \"+str(len(observers_norm))+' ; '+str(observers_norm)+'\\n')\n observers_prot = sorted(set(experimentData[experimentData['observer_coldef_type']==1]['observer_id']))\n f.write(\"...... # of protan observers: \"+str(len(observers_prot))+' ; '+str(observers_prot)+'\\n') \n observers_deut = sorted(set(experimentData[experimentData['observer_coldef_type']==2]['observer_id']))\n f.write(\"...... # of deutan observers: \"+str(len(observers_deut))+' ; '+str(observers_deut)+'\\n') \n f.close()\n elif exp_type == 'samsem':\n f = open(path_out,'w+')\n f.write(\"SaMSEM meta data\\n\")\n sessions = sorted(set(experimentData['session_id']))\n f.write('... # of sessions: '+str(len(sessions))+' ; '+str(sessions)+'\\n')\n images = sorted(set(experimentData['image_id']))\n f.write('... # of images: '+str(len(images))+' ; '+str(images)+'\\n')\n simulations = sorted(set(experimentData['sim_id']))\n f.write('... # of simulations: '+str(len(simulations))+' ; '+str(simulations)+'\\n')\n observers = sorted(set(experimentData['observer_id']))\n f.write(\"... # of observers: \" +str(len(observers))+' ; '+str(observers)+'\\n')\n observers_norm = sorted(set(experimentData[experimentData['observer_coldef_type']==0]['observer_id']))\n f.write(\"...... # of normal observers: \"+str(len(observers_norm))+' ; '+str(observers_norm)+'\\n')\n observers_prot = sorted(set(experimentData[experimentData['observer_coldef_type']==1]['observer_id']))\n f.write(\"...... # of protan observers: \"+str(len(observers_prot))+' ; '+str(observers_prot)+'\\n')\n observers_deut = sorted(set(experimentData[experimentData['observer_coldef_type']==2]['observer_id']))\n f.write(\"...... # of deutan observers: \"+str(len(observers_deut))+' ; '+str(observers_deut)+'\\n')\n else:\n print(\"Error: No valid experiment format has been chosen. Choose either visdem or samsem.\")\n return", "def load_eigenstrat_genotypes(in_file_prefix='eigenstrat_file_prefix',\n out_file_prefix='hdf5_file_prefix',\n impute_type='mode',\n filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n import h5py\n import scipy as sp\n import os\n import sys\n \n data_file_prefix = '%s_mv%0.2f_imp_%s.' % (out_file_prefix, missing_val_thr, impute_type) \n \n genotype_data = {}\n \n # Setting the HDF5 file up\n h5py_file_name = data_file_prefix + 'h5py'\n if os.path.isfile(h5py_file_name):\n print 'Overwriting: %s' % h5py_file_name\n os.remove(h5py_file_name)\n h5py_file = h5py.File(h5py_file_name)\n genotype_data['h5py_file'] = h5py_file_name\n \n \n # Fill out individuals data, if available\n i_filename = '%sind' % (in_file_prefix)\n if os.path.isfile(i_filename):\n iids = []\n phens = []\n genders = []\n with open(i_filename) as f:\n for line in f:\n l = (line.strip()).split()\n iids.append(l[0])\n genders.append(l[1])\n phens.append(l[2])\n ind_group = h5py_file.create_group('indivs')\n ind_group.create_dataset('indiv_ids', data=iids)\n ind_group.create_dataset('sex', data=genders)\n ind_group.create_dataset('phenotype', data=phens)\n else:\n print 'Individual information file not found: %s' % i_filename\n \n tot_num_snps = 0\n tot_num_duplicated_snps_removed = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_monomorphic_snps_removed = 0\n \n \n # Open the genotype files.\n s_filename = '%ssnp' % (in_file_prefix) \n g_filename = '%sgeno' % (in_file_prefix)\n print 'Starting to parse files:\\n\\t %s \\n\\t %s' % (s_filename, g_filename)\n sf = open(s_filename) \n gf = open(g_filename) \n \n\n # Figure out sample size, number of SNPs, etc. \n # Initialize HDF5 file.\n\n # Setting up containers.\n curr_chrom = 1\n curr_hdf5_group = h5py_file.create_group('chrom_%d' % curr_chrom)\n snps_mat = []\n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_duplicated_snps_removed = 0\n\n print 'Starting to parse SNP files'\n for s_line in sf:\n g_line = gf.next()\n sl = s_line.split()\n pos = int(sl[3])\n chrom = int(sl[1])\n sid = sl[0]\n\n if chrom != curr_chrom:\n # Report statistics and store stuff\n print 'Finished with Chromosome %d' % curr_chrom\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of duplicated SNPs removed: %d' % num_duplicated_snps_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n snps = sp.array(snps_mat, dtype='int8')\n curr_hdf5_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_file.flush()\n print 'Raw SNPs stored'\n snps = snps.T\n snps = (snps - sp.mean(snps, 0)) / sp.std(snps, 0)\n curr_hdf5_group.create_dataset('snps', compression='lzf', data=snps.T)\n h5py_file.flush()\n print 'Normalized SNPs stored'\n del snps\n del snps_mat\n curr_hdf5_group.create_dataset('positions', compression='lzf', data=positions)\n curr_hdf5_group.create_dataset('nts', compression='lzf', data=nts_list)\n curr_hdf5_group.create_dataset('nt_counts', compression='lzf', data=sp.array(nt_counts_list))\n curr_hdf5_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n curr_hdf5_group.create_dataset('freqs', compression='lzf', data=freqs)\n curr_hdf5_group.create_dataset('snp_ids', compression='lzf', data=sids) \n h5py_file.flush()\n sys.stdout.flush()\n\n # Reset containers\n curr_chrom = chrom\n curr_hdf5_group = h5py_file.create_group('chrom_%d' % curr_chrom)\n snps_mat = []\n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_duplicated_snps_removed = 0\n \n \n # Debug filter\n \n nt = (sl[4], sl[5])\n\n snp = sp.array(map(int, g_line.strip()), dtype='int8')\n num_indiv = len(snp)\n bin_counts = sp.bincount(snp)\n# print bin_counts\n missing_count = bin_counts[-1]\n\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n tot_num_missing_val_snps_removed += 1\n continue\n\n nt_counts = list(bin_counts[:3])\n # Imputing the SNPs roughly by replacing missing values with the mode value.\n if impute_type == 'mode':\n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n else:\n raise Exception('Imputation type is unknown')\n\n bin_counts = sp.bincount(snp)\n nt_counts = list(bin_counts[:3])\n # Removing monomorphic SNPs\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n tot_num_monomorphic_snps_removed += 1\n continue\n if len(nt_counts) == 2:\n nt_counts.append(0)\n \n# assert len(nt_counts) == 3, 'ARrrg' \n\n # Is this position already there?\n if len(positions) > 0 and pos == positions[-1]:\n num_duplicated_snps_removed += 1\n tot_num_duplicated_snps_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(pos)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq)\n\n tot_num_snps += 1\n \n\n\n # Report statistics and store stuff\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of duplicated SNPs removed: %d' % num_duplicated_snps_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n snps = sp.array(snps_mat, dtype='int8')\n curr_hdf5_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_file.flush()\n print 'Raw SNPs stored'\n snps = snps.T\n snps = (snps - sp.mean(snps, 0)) / sp.std(snps, 0)\n curr_hdf5_group.create_dataset('snps', compression='lzf', data=snps.T)\n h5py_file.flush()\n print 'Normalized SNPs stored'\n del snps\n del snps_mat\n curr_hdf5_group.create_dataset('positions', compression='lzf', data=positions)\n curr_hdf5_group.create_dataset('nts', compression='lzf', data=nts_list)\n curr_hdf5_group.create_dataset('nt_counts', compression='lzf', data=sp.array(nt_counts_list))\n curr_hdf5_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n curr_hdf5_group.create_dataset('freqs', compression='lzf', data=freqs)\n curr_hdf5_group.create_dataset('snp_ids', compression='lzf', data=sids) \n \n \n gf.close()\n sf.close()\n \n print 'Genotypes for %d individuals were parsed.' % num_indiv\n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to monomorphicity: %d' % tot_num_monomorphic_snps_removed\n print 'Total number of duplicated SNPs removed: %d' % tot_num_duplicated_snps_removed\n h5py_file.close()\n sys.stdout.flush()\n \n print 'Done parsing genotypes.'", "def test_int_data_types(tmp_path):\n test_file = GeneratedFile()\n test_file.add_segment(\n (\"kTocMetaData\", \"kTocRawData\", \"kTocNewObjList\"),\n segment_objects_metadata(\n channel_metadata(\"/'group'/'i8'\", 1, 4),\n channel_metadata(\"/'group'/'u8'\", 5, 4),\n channel_metadata(\"/'group'/'i16'\", 2, 4),\n channel_metadata(\"/'group'/'u16'\", 6, 4),\n channel_metadata(\"/'group'/'i32'\", 3, 4),\n channel_metadata(\"/'group'/'u32'\", 7, 4),\n channel_metadata(\"/'group'/'i64'\", 4, 4),\n channel_metadata(\"/'group'/'u64'\", 8, 4),\n ),\n \"01 02 03 04\"\n \"01 02 03 04\"\n \"01 00 02 00 03 00 04 00\"\n \"01 00 02 00 03 00 04 00\"\n \"01 00 00 00 02 00 00 00 03 00 00 00 04 00 00 00\"\n \"01 00 00 00 02 00 00 00 03 00 00 00 04 00 00 00\"\n \"01 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00\"\n \"01 00 00 00 00 00 00 00 02 00 00 00 00 00 00 00 03 00 00 00 00 00 00 00 04 00 00 00 00 00 00 00\"\n )\n\n tdms_data = test_file.load()\n h5_path = tmp_path / 'h5_data_test.h5'\n h5 = tdms_data.as_hdf(h5_path)\n\n for chan, expected_dtype in [\n ('i8', np.dtype('int8')),\n ('u8', np.dtype('uint8')),\n ('i16', np.dtype('int16')),\n ('u16', np.dtype('uint16')),\n ('i32', np.dtype('int32')),\n ('u32', np.dtype('uint32')),\n ('i64', np.dtype('int64')),\n ('u64', np.dtype('uint64'))]:\n h5_channel = h5['group'][chan]\n assert h5_channel.dtype == expected_dtype\n np.testing.assert_almost_equal(h5_channel[...], [1, 2, 3, 4])\n h5.close()", "def e5loadhalo(id1, t, idarr=None, dir=0):\n\t\n\tt = str(int(t)) # Make sure it's a string of an integer\n\t\n\tif type(dir) != str:\n\t\tdir = '/Users/astevens/Documents/6-MonthProject/e5/' # Default directory for files\n\t\n\tz, Ntot, Nfiles, boxsize, Omega_M, Omega_L, h = e5snaphead('snapshot_'+t, dir) # Read in header info\n\t\n\tif idarr==None:\n\t\tf = open(dir+'snapshotids_'+t,'rb')\n\t\tf.seek(4*sum(Ntot))\n\t\tidarr = np.fromfile(f, 'i4', sum(Ntot)) # Subhalo IDs\n\t\tf.close()\n\t\n\tprint(t, id1, idarr, len(idarr))\n\tidargs = np.argwhere(idarr==id1) # Arguments where idarr is equal to id1\n\t\n\tcon_g = (idargs < Ntot[0]) # Condition to find the total number of gas particles in the halo\n\tcon_dm = (idargs >= Ntot[0]) * (idargs < sum(Ntot[:2])) # Condition ... dark matter\n\tcon_s = (idargs >= sum(Ntot[:2])) * (idargs < sum(Ntot[:5])) # Condition ... stars\n\tcon_bh = (idargs >= sum(Ntot[:5])) # Condition ... black holes\n\t#print con_bh\n\t\n\tNptot = np.array([len(idargs[con_g]), len(idargs[con_dm]), 0, 0, len(idargs[con_s]), len(idargs[con_bh])]) # Total number of particles in the halo in particle type\n\tprint('Nptot is', Nptot)\n\tNaccum, Npaccum = np.zeros(6), np.zeros(6) # Initialize the accumulation of N\n\t\n\t# Initialize arrays that will contain the particle data\n\tpos_g, pos_dm, pos_s, pos_bh = np.zeros((Nptot[0],3)), np.zeros((Nptot[1],3)), np.zeros((Nptot[4],3)), np.zeros((Nptot[5],3))\n\tvel_g, vel_dm, vel_s, vel_bh = np.zeros((Nptot[0],3)), np.zeros((Nptot[1],3)), np.zeros((Nptot[4],3)), np.zeros((Nptot[5],3))\n\tid_g, id_dm, id_s, id_bh = np.zeros(Nptot[0]), np.zeros(Nptot[1]), np.zeros(Nptot[4]), np.zeros(Nptot[5])\n\tmass_g, mass_dm, mass_s, mass_bh = np.zeros(Nptot[0]), np.zeros(Nptot[1]), np.zeros(Nptot[4]), np.zeros(Nptot[5])\n\tu, rho, sfr = np.zeros(Nptot[0]), np.zeros(Nptot[0]), np.zeros(Nptot[0])\n\t\n\tfor fno in range(Nfiles): # fno is file number\n\t\tNbytes = [] # Initialize list that will contain information on the block sizes in each file\n\t\tf = open(dir+'snapshot_'+t+'.'+str(fno), 'rb')\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t# Read header information\n\t\tN = np.fromfile(f, 'u4', 6) # Number of particles for each particle type in this file\n\t\tNsum = sum(N) # Total number of particles in the file\n\t\tmass_pt = np.fromfile(f, 'f8', 6) # Mass of each particle type. If 0 then it varies for each particle of that type\n\t\tNmass = sum(N[np.argwhere(mass_pt==0.0)]) # Number of particles in the file with individual masses to be read in\n\t\ta = np.fromfile(f, 'f8', 1)[0] # Expansion factor (normalised to 1 at z=0)\n\t\tz = np.fromfile(f, 'f8', 1)[0] # Redshift of snapshot\n\t\tflag_sfr = np.fromfile(f, 'i4', 1)[0] # Flag for star formation rate\n\t\tflag_feedback = np.fromfile(f, 'i4', 1)[0] # Flag for feedback\n\t\tNtot = np.fromfile(f, 'u4', 6) # Total number of particles for each particle type in the entire simulation\n\t\tflag_cool = np.fromfile(f, 'i4', 1)[0] # Flag for cooling\n\t\tNfiles = np.fromfile(f, 'i4', 1)[0] # Number of files for each snapshot\n\t\tboxsize = np.fromfile(f, 'f8', 1)[0] # Size of box if periodic boundary conditions are used\n\t\tOmega_M = np.fromfile(f, 'f8', 1)[0] # Omega Matter\n\t\tOmega_L = np.fromfile(f, 'f8', 1)[0] # Omega (Lambda) Dark Energy\n\t\th = np.fromfile(f, 'f8', 1)[0] # Little Hubble h\n\t\tflag_StarAge = np.fromfile(f, 'i4', 1)[0] # Flag for the creation times of stars\n\t\tflag_metals = np.fromfile(f, 'i4', 1)[0] # Flag for metallicity values\n\t\t\n\t\tg_con = (idargs < N[0]+Naccum[0]) * (idargs >= Naccum[0]) # Gas condition to figure out how many particles for the halo are in this file\n\t\tdm_con = (idargs-Ntot[0] < N[1]+Naccum[1]) * (idargs-Ntot[0] >= Naccum[1]) # Dark matter condition...\n\t\ts_con = (idargs-sum(Ntot[:4]) < N[4]+Naccum[4]) * (idargs-sum(Ntot[:4]) >= Naccum[4]) # Star condition...\n\t\tbh_con = (idargs-sum(Ntot[:5]) < N[5]+Naccum[5]) * (idargs-sum(Ntot[:5]) >= Naccum[5]) # Black hole condition...\n\t\t\n\t\tg_args = idargs[g_con] - Naccum[0] # Location of gas particles to extract in the data file\n\t\tdm_args = idargs[dm_con] - Ntot[0] - Naccum[1] # Ditto dark matter\n\t\ts_args = idargs[s_con] - sum(Ntot[:4]) - Naccum[4] # Ditto stars\n\t\tbh_args = idargs[bh_con] - sum(Ntot[:5]) - Naccum[5] # Ditto black holes\n\t\t\n\t\tNparts = np.array([ len(g_args), len(dm_args), 0, 0, len(s_args), len(bh_args) ])\n\t\t\n\t\tNmass_pt = np.zeros(6,dtype='u4')\n\t\tfor i in range(6):\n\t\t\tif mass_pt[i]==0: Nmass_pt[i] = N[i]\n\t\t\n\t\t## Extract positions\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + arg*24)\n\t\t\tpos_g[Npaccum[0]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+N[0])*24)\n\t\t\tpos_dm[Npaccum[1]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+sum(N[:4]))*24)\n\t\t\tpos_s[Npaccum[4]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+sum(N[:5]))*24)\n\t\t\tpos_bh[Npaccum[5]+i,:] = np.fromfile(f, 'f8', 3)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*2)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract velocities\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + arg*24)\n\t\t\tvel_g[Npaccum[0]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+N[0])*24)\n\t\t\tvel_dm[Npaccum[1]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+sum(N[:4]))*24)\n\t\t\tvel_s[Npaccum[4]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+sum(N[:5]))*24)\n\t\t\tvel_bh[Npaccum[5]+i,:] = np.fromfile(f, 'f8', 3)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*4)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract IDs\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + arg*8)\n\t\t\tid_g[Npaccum[0]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+N[0])*8)\n\t\t\tid_dm[Npaccum[1]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+sum(N[:4]))*8)\n\t\t\tid_s[Npaccum[4]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+sum(N[:5]))*8)\n\t\t\tid_bh[Npaccum[5]+i] = np.fromfile(f, 'u8', 1)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*6)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract Masses\n\t\tif mass_pt[0]==0:\n\t\t\tfor i, arg in enumerate(g_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + arg*8)\n\t\t\t\tmass_g[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_g[Npaccum[0]:Npaccum[0]+Nparts[0]] = mass_pt[0]*np.ones(Nparts[0])\n\t\t\n\t\tif mass_pt[1]==0:\n\t\t\tfor i, arg in enumerate(dm_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+Nmass_pt[0])*8)\n\t\t\t\tmass_dm[Npaccum[1]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_dm[Npaccum[1]:Npaccum[1]+Nparts[1]] = mass_pt[1]*np.ones(Nparts[1])\n\t\t\n\t\tif mass_pt[4]==0:\n\t\t\tfor i, arg in enumerate(s_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+sum(Nmass_pt[:4]))*8)\n\t\t\t\tmass_s[Npaccum[4]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_s[Npaccum[4]:Npaccum[4]+Nparts[4]] = mass_pt[4]*np.ones(Nparts[4])\n\t\t\n\t\tif mass_pt[5]==0:\n\t\t\tfor i, arg in enumerate(bh_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+sum(Nmass_pt[:5]))*8)\n\t\t\t\tmass_bh[Npaccum[5]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_bh[Npaccum[5]:Npaccum[5]+Nparts[5]] = mass_pt[5]*np.ones(Nparts[5])\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*8)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract gas properties\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*11 + arg*8)\n\t\t\tu[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t\n\t\tf.seek(sum(Nbytes)+4*10)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*13 + arg*8)\n\t\t\trho[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t\n\t\tf.seek(sum(Nbytes)+4*12)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\tif flag_cool==1:\n\t\t\tf.seek(sum(Nbytes)+4*14)\n\t\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\tf.seek(sum(Nbytes)+4*16)\n\t\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\textra = 16\n\t\telse:\n\t\t\textra = 0\n\n\t\tf.seek(sum(Nbytes) + 4*14 + extra)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*17 + extra + arg*8)\n\t\t\tsfr[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t##\n\t\t\n\t\tNaccum += N\n\t\tNpaccum += Nparts\n\t\tf.close()\n\t\n\t\n\t## Extract premeasured properties of halo from tab file\n\tNtotgroups, TotNids, Nfiles, NtotSubgroups = e5halotabhead(t=t)\n\tNaccum = 0\n\tfloattype = 'f4'\n\tidtype = 'u8'\n\t\n\tif np.max(idarr) == NtotSubgroups-1: # Determine if the code is getting a group or subhalo\n\t\tfor i in range(Nfiles):\n\t\t\tf = open(dir+'subhalo_tab_'+t+'.'+str(i))\n\t\t\tNgroups = np.fromfile(f, 'i4', 1)[0] # Number of groups in this file\n\t\t\tf.seek(24)\n\t\t\tNsubgroups = np.fromfile(f, 'i4', 1)[0] # Number of subhaloes in this file\n\t\t\t\n\t\t\tif id1<=Naccum+Nsubgroups:\n\t\t\t\tid2 = id1-Naccum # Number of haloes' data to skip (number listed in this file)\n\t\t\t\tf.seek(32 + 19*4*Ngroups)\n\t\t\t\t# Data for subhaloes\n\t\t\t\tNparts = np.fromfile(f, 'i4', Nsubgroups)[id2] # Number of particles in the subhalo\n\t\t\t\tprint('Nparts', Nparts)\n\t\t\t\toffset = np.fromfile(f, 'i4', Nsubgroups)[id2]\n\t\t\t\tparent = np.fromfile(f, 'i4', Nsubgroups)[id2] # Index of parent (neither Yu nor I are sure what this is)\n\t\t\t\tmass = np.fromfile(f, floattype, Nsubgroups)[id2] *1e10/h # Mass of the subhalo in solar masses (sum of all particle types)\n\t\t\t\tpos = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] *1e3/h # Position of the subhalo in pc\n\t\t\t\tvel = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] # Halo velocity in km/s\n\t\t\t\tcom = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] *1e3/h # COM location of the halo (almost identical to halopos)\n\t\t\t\tspin = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] # Spin of the halo (presumably the dimensionless spin parameter?)\n\t\t\t\tveldisp = np.fromfile(f, floattype, Nsubgroups)[id2] # Velocity dispersion of the subhalo\n\t\t\t\tvmax = np.fromfile(f, floattype, Nsubgroups)[id2] # Maximum circular velocity of the subhalo (presumably km/s)\n\t\t\t\tvmaxrad = np.fromfile(f, floattype, Nsubgroups)[id2]*1e3/h # Radius for the maximum circular velocity in pc\n\t\t\t\thalfmassr = np.fromfile(f, floattype, Nsubgroups)[id2]*1e3/h # Radius encompasses half the subhalo's mass in pc\n\t\t\t\tidbp = np.fromfile(f, idtype, Nsubgroups)[id2] # ID of the most bound particle in the subhalo\n\t\t\t\tidgp = np.fromfile(f, 'u4', Nsubgroups)[id2] # Index of group that this subhalo is in.\n\t\t\t\tpartmass = np.fromfile(f, np.dtype(('f4',6)), Nsubgroups)[id2]*1e10/h # Integrated mass of the halo for each individual particle type in solar masses\n\t\t\t\ttabdata = [Nparts, offset, parent, mass, pos, vel, com, spin, veldisp, vmax, vmaxrad, halfmassr, idbp, idgp, partmass]\n\t\t\t\tbreak\n\t\t\tNaccum += Nsubgroups\n\t\n\telif np.max(idarr) == Ntotgroups-1:\n\t\tfor i in range(Nfiles):\n\t\t\tf = open(dir+'subhalo_tab_'+t+'.'+str(i))\n\t\t\tNgroups = np.fromfile(f, 'i4', 1)[0] # Number of groups in this file\n\t\t\t\n\t\t\tif id1<=Naccum+Ngroups:\n\t\t\t\tf.seek(32)\n\t\t\t\tid2 = id1-Naccum\n\t\t\t\tNparts = np.fromfile(f, 'i4', Ngroups)[id2] # Number of particles in each group\n\t\t\t\toffset = np.fromfile(f, 'i4', Ngroups)[id2]\n\t\t\t\tmass = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Mass of each group\n\t\t\t\tpos = np.fromfile(f, np.dtype((floattype,3)), Ngroups)[id2]*1e3/h # COM of the group\n\t\t\t\tmmean200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Mass within radius enclosing an average density 200x that of the mean density of the Universe\n\t\t\t\trmean200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h # The radius for the above\n\t\t\t\tmcrit200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # As for mmean200 but critical density of the Universe\n\t\t\t\trcrit200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h # Radius for the above\n\t\t\t\tmtoph200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # As above masses but for a tophat. Not sure about this...\n\t\t\t\trtoph200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h\n\t\t\t\tveldispmean200 = np.fromfile(f, floattype, Ngroups)[id2] # This and the next are the velocity dispersions for the various radii\n\t\t\t\tveldispcrit200 = np.fromfile(f, floattype, Ngroups)[id2]\n\t\t\t\tveldisptoph200 = np.fromfile(f, floattype, Ngroups)[id2]\n\t\t\t\tlencontam = np.fromfile(f, 'i4', Ngroups)[id2] # Number of particles in the group that are not associated with a subhalo\n\t\t\t\tmasscontam = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Summed mass of those above particles\n\t\t\t\tnhalo = np.fromfile(f, 'i4', Ngroups)[id2] # Number of subhaloes within the group\n\t\t\t\tfirsthalo = np.fromfile(f, 'i4', Ngroups)[id2] # Related to concatenation and \"offset\". Can probably ignore.\n\t\t\t\ttabdata = [Nparts, offset, mass, pos, mmean200, rmean200, mcrit200, rcrit200, mtoph200, rtoph200, veldispmean200, veldispcrit200, veldisptoph200, lencontam, masscontam, nhalo, firsthalo]\n\t\t\t\tbreak\n\t\t\tNaccum += Ngroups\n\t##\n\treturn [pos_g[:,0]*1e3*a/h, pos_g[:,1]*1e3*a/h, pos_g[:,2]*1e3*a/h, vel_g[:,0], vel_g[:,1], vel_g[:,2], id_g, mass_g*1e10/h, u*1e6, rho*10*h*h/(a**3), sfr], [pos_dm[:,0]*1e3*a/h, pos_dm[:,1]*1e3*a/h, pos_dm[:,2]*1e3*a/h, vel_dm[:,0], vel_dm[:,1], vel_dm[:,2], id_dm, mass_dm*1e10/h], [pos_s[:,0]*1e3*a/h, pos_s[:,1]*1e3*a/h, pos_s[:,2]*1e3*a/h, vel_s[:,0], vel_s[:,1], vel_s[:,2], id_s, mass_s*1e10/h], [pos_bh[:,0]*1e3*a/h, pos_bh[:,1]*1e3*a/h, pos_bh[:,2]*1e3*a/h, vel_bh[:,0], vel_bh[:,1], vel_bh[:,2], id_bh, mass_bh*1e10/h], tabdata", "def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)", "def Output2File(data_array, filebase, format, hdr=None, shape=None):\n # used by 'AIDA_Functions.py'\n \n # below is old\n #if shape is None:\n #\n # shape = data_array.shape\n \n ### EHom (20130625): adding line to shape data_array according to shape input parameter\n ### Should have been here before\n if (shape != None):\n data_array.shape = shape\n \n import matplotlib.pyplot as plt\n #plt.figure()\n #plt.imshow(data_array)\n #plt.title(data_array[0,0])\n #plt.show()\n \n if format == 'm':\n\n Mrc.save(data_array, filebase + '.mrc', ifExists=\"overwrite\")\n \n # below is old way - Mrc.bindArr no longer exists in Priithon\n #rs = ''\n #\n #for i in shape:\n # \n # rs += '%d ' %i\n #\n #dtype = data_array.dtype\n #\n #temp = Mrc.bindArr(filebase + '.mrc', data_array.astype(np.float32))\n ## can only write out as single precision\n #fileheader = temp.Mrc.hdrArray[0]\n #fileheader.setfield('NumTitles',1)\n #fileheader.field('title')[0] = 'Shape: ' + rs\n #temp.Mrc.close()\n ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n \n elif format == 'f':\n\n if os.path.exists(filebase + '.fits') == 1:\n\n os.remove(filebase + '.fits')\n\n # Clement: using astropy.io.fits now\n \n fits_file = iofits.HDUList()\n datahdu = PrimaryHDU()\n datahdu.data = data_array\n \n \n iofits.append(filebase + '.fits',data_array,header=hdr)\n \n elif format == 't':\n if os.path.exists(filebase + '.tiff') == 1:\n\n os.remove(filebase + '.tiff')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tiff')\n \n elif format == 't2':\n if os.path.exists(filebase + '.tif') == 1:\n\n os.remove(filebase + '.tif')\n \n img = scipy.misc.toimage(data_array)\n img.save(filebase + '.tif')\n \n# Clement: Old version using pyfits (deprecated)\n# fits_file = pyfits.HDUList()\n# datahdu = pyfits.PrimaryHDU()\n# datahdu.data = data_array\n# \n# ## STILL NEED TO PROVIDE A WAY OF SETTING HEADER INFO FROM INPUT\n# #if type(hdr) is not types.NoneType:\n# #\n# # datahdu.header = hdr\n# # \n# # print hdr\n# \n# # Provide header info from the original fits file.\n# \n# \n# fits_file.append(datahdu)\n# fits_file.writeto(filebase + '.fits')\n \n# else: # format must be .tiff\n# \n# #!!!! TENTATIVE !!!!\n# # make sure orientation of TIFF file matches convention\n# if len(data_array.shape) == 2:\n# \n# U.saveImg(data_array[...,::-1,...], filebase + \".tiff\")\n# elif len(data_array.shape) == 3:\n# \n# U.saveImg_seq(data_array[...,::-1,...], filebase + \".tiff\")\n# else:\n# \n# message = \"\\n'data_array' shape is not 2 or 3! Cannot write \" + \\\n# \"out TIFF file!\"\n# raise ValueError, message\n\n ### EHom (20130616): also output results (if 2D) as an 8-bit JPEG files using PIL\n ### In the division of 255, I hack the addition of a small value to avoid \n ### a divide by zero in a true_divide call\n if len(data_array.shape) == 2:\n\n min = data_array.min()\n max = data_array.max()\n #print data_array.min()\n #print data_array.max()\n #print data_array.mean()\n rescaled = np.where(data_array > min, data_array-min, 0.)\n if ((max - min) == 0):\n message = \"\\nMax Min problem in outputting array! Cannot write JPEG file\\n\"\n print(message)\n else:\n rescaled *= (255.0 / (max - min))\n # Clement: we don't need to save the jpeg\n # im = ImageOps.flip(Image.fromarray(rescaled.astype(np.uint8)))\n # rescale and flip vertically to properly register image with FITS output\n # im.save(filebase + '.jpeg')", "def to_h5(self):\n import time\n from pymicro import __version__ as pymicro_version\n\n print('opening file %s.h5 for writing' % self.name)\n f = h5py.File('%s.h5' % self.name, 'w')\n f.attrs['Pymicro_Version'] = np.string_(pymicro_version)\n f.attrs['HDF5_Version'] = h5py.version.hdf5_version\n f.attrs['h5py_version'] = h5py.version.version\n f.attrs['file_time'] = time.time()\n f.attrs['microstructure_name'] = self.name\n if hasattr(self, 'data_dir'):\n f.attrs['data_dir'] = self.data_dir\n # ensemble data\n ed = f.create_group('EnsembleData')\n cs = ed.create_group('CrystalStructure')\n sym = self.get_lattice().get_symmetry()\n cs.attrs['symmetry'] = sym.to_string()\n lp = cs.create_dataset('LatticeParameters',\n data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32))\n # feature data\n fd = f.create_group('FeatureData')\n grain_ids = fd.create_dataset('grain_ids',\n data=np.array([g.id for g in self.grains], dtype=np.int))\n avg_rods = fd.create_dataset('R_vectors',\n data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32))\n centers = fd.create_dataset('centers',\n data=np.array([g.center for g in self.grains], dtype=np.float32))\n # cell data\n cd = f.create_group('CellData')\n if hasattr(self, 'grain_map') and self.grain_map is not None:\n gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9)\n gm.attrs['voxel_size'] = self.voxel_size\n if hasattr(self, 'mask') and self.mask is not None:\n ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9)\n ma.attrs['voxel_size'] = self.voxel_size\n print('done writing')\n f.close()", "def save_data(halo_particles):\n mass, pos, vel = halo_particles(N_part=100, seed=42)\n data = np.ndarray([len(mass), 4])\n data[:, 0] = pos[:, 0]\n data[:, 1] = pos[:, 1]\n data[:, 2] = pos[:, 2]\n data[:, 3] = mass\n\n np.savetxt(\"mock_particles.dat\", data, fmt=\"%12.6f\")", "def create_data_tables(table):\n data = mcf.read_data_file(table)\n\n for ent in data:\n if mcf.is_neumeric(ent):\n obsid = ent.strip()\n else:\n atemp = re.split('\\s+', ent)\n obsid = atemp[0]\n\n if mcf.is_neumeric(obsid) == False:\n continue\n\n print(str(obsid))\n\n fits = hcf.run_arc5gl(0, 0, obsid=obsid, level='2', filetype='evt2')\n\n if fits == False:\n write_on_skip_file(obsid)\n print(\"Data is not extracted\")\n continue\n#\n#--- if there are multiple output, use only first one\n#\n if isinstance(fits, list):\n fits = fits[0]\n\n xxx = 999\n #if xxx == 999:\n try:\n out = extract_count_stats(fits)\n #else:\n except:\n cmd = 'rm -f ' + fits + '*'\n os.system(cmd)\n write_on_skip_file(obsid)\n print(\"Analysis Failed\")\n continue\n\n if out[-1] <0:\n cmd = 'rm -f ' + fits + '*'\n os.system(cmd)\n write_on_skip_file(obsid)\n print(\"No Output\")\n continue\n\n line = str(obsid) + '\\t'\n\n if float(obsid) < 1000:\n line = line + '\\t'\n\n line = line + str(fits) + '\\t'\n line = line + out[7] + '\\t'\n line = line + '%2.1f' % round(out[6],1) + '\\t'\n line = line + '%2.2f' % round(out[5],2) + '\\t'\n line = line + '%2.2f' % round(out[8],2) + '\\t'\n line = line + '%2.4f' % round(out[9],4) + '\\n'\n\n if out[-1] == 0:\n outfile = data_dir + 'hrc_s_0_results'\n if out[-1] == 1:\n outfile = data_dir + 'hrc_s_10_results'\n if out[-1] == 2:\n outfile = data_dir + 'hrc_s_25_results'\n if out[-1] == 3:\n outfile = data_dir + 'hrc_s_m10_results'\n if out[-1] == 4:\n outfile = data_dir + 'hrc_s_m25_results'\n\n if out[-1] == 10:\n outfile = data_dir + 'hrc_i_0_results'\n if out[-1] == 11:\n outfile = data_dir + 'hrc_i_10_results'\n if out[-1] == 12:\n outfile = data_dir + 'hrc_i_25_results'\n if out[-1] == 13:\n outfile = data_dir + 'hrc_i_m10_results'\n if out[-1] == 14:\n outfile = data_dir + 'hrc_i_m25_results'\n\n with open(outfile, 'a') as fo:\n fo.write(line)\n\n cmd = 'rm -f *fits*'\n os.system(cmd)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Example particle HDF5 file generated by the dev branch of OSIRIS (May 2021) The associated data types are taken from an example output file.
def make_osiris_dev_particles_hdf(path: Path, data: np.ndarray, name: str): # makes sure we have data with a 'charge' if "q" not in data.dtype.fields: raise ValueError("structured dataset with a field 'q' required") with h5.File(path, mode="w") as fp: # root attrs fp.attrs["NAME"] = np.array([name], dtype="|S256") fp.attrs["TYPE"] = np.array(["particles"], dtype="|S9") fp.attrs["ITER"] = np.array([12345], dtype="i4") fp.attrs["TIME"] = np.array([-321.9], dtype="f4") fp.attrs["TIME UNITS"] = np.array([b"time unit"], dtype="|S256") data_fields = data.dtype.fields fp.attrs["QUANTS"] = np.array([str.encode(f) for f in data_fields]) fp.attrs["LABELS"] = np.array([str.encode(f"{f} label") for f in data_fields]) fp.attrs["UNITS"] = np.array([str.encode(f"{f} unit") for f in data_fields]) # other quantities for field in data.dtype.fields: d = data[field] fp.create_dataset(field, data=d)
[ "def make_osiris_444_particles_hdf(path: Path, data: np.ndarray, name: str):\n # makes sure we have data with a 'charge'\n if \"q\" not in data.dtype.fields:\n raise ValueError(\"structured dataset with a field 'q' required\")\n\n with h5.File(path, mode=\"w\") as fp:\n # root attrs\n fp.attrs[\"NAME\"] = np.array([name], dtype=\"|S256\")\n fp.attrs[\"TYPE\"] = np.array([\"particles\"], dtype=\"|S9\")\n fp.attrs[\"ITER\"] = np.array([12345], dtype=\"i4\")\n fp.attrs[\"TIME\"] = np.array([-321.9], dtype=\"f4\")\n fp.attrs[\"TIME UNITS\"] = np.array([b\"time unit\"], dtype=\"|S256\")\n\n # other quantities\n for field in data.dtype.fields:\n d = data[field]\n quants = fp.create_dataset(field, data=d)\n quants.attrs[\"LONG_NAME\"] = np.array([f\"{field} label\"], dtype=\"|S256\")\n quants.attrs[\"UNITS\"] = np.array([f\"{field} unit\"], dtype=\"|S256\")\n\n # tags\n tags = np.arange(len(data) * 2, dtype=\"i4\").reshape((len(data), 2))\n fp.create_dataset(\"tag\", data=tags)", "def transfer_to_h5part(plot_config):\n run_dir = plot_config[\"run_dir\"]\n tindex = plot_config[\"tframe\"] * plot_config[\"tinterval\"]\n ptl_vel = plot_config[\"ptl_vel\"]\n fname = (run_dir + 'data_' + str(tindex) + '_' +\n str(ptl_vel) + 'c/particle_diagnostics.h5')\n file = h5py.File(fname,'r')\n group = file['/particles_fields']\n dset_ptl = group['particles']\n dset_emf = group['fields']\n sz, = dset_ptl.shape\n tinterval_traj = get_traj_tinterval(run_dir)\n nsteps_tot = get_num_steps(run_dir)\n if nsteps_tot > 1E6:\n nsteps_tot = int(1E6)\n ntraj = nsteps_tot // tinterval_traj + 1\n nptl = sz / ntraj\n fname_out = (run_dir + 'data_' + str(tindex) + '_' +\n str(ptl_vel) + 'c/particle_diagnostics.h5part')\n print(fname_out)\n toffset = 10\n with h5py.File(fname_out, 'w') as fh_out:\n for tf in range(0, ntraj, toffset):\n print(\"Time frame: %d\" % tf)\n x = np.array(dset_ptl['x'][tf::ntraj])\n y = np.array(dset_ptl['y'][tf::ntraj])\n z = np.array(dset_ptl['z'][tf::ntraj])\n ux = np.array(dset_ptl['ux'][tf::ntraj])\n uy = np.array(dset_ptl['uy'][tf::ntraj])\n uz = np.array(dset_ptl['uz'][tf::ntraj])\n gamma = np.sqrt(1.0 + ux**2 + uy**2 + uz**2)\n t = np.array(dset_ptl['t'][tf::ntraj])\n Ex = np.array(dset_emf['Ex'][tf::ntraj])\n Ey = np.array(dset_emf['Ey'][tf::ntraj])\n Ez = np.array(dset_emf['Ez'][tf::ntraj])\n Bx = np.array(dset_emf['Bx'][tf::ntraj])\n By = np.array(dset_emf['By'][tf::ntraj])\n Bz = np.array(dset_emf['Bz'][tf::ntraj])\n grp = fh_out.create_group('Step#' + str(tf//toffset))\n grp.create_dataset('x', (nptl, ), data=x)\n grp.create_dataset('y', (nptl, ), data=y)\n grp.create_dataset('z', (nptl, ), data=z)\n grp.create_dataset('ux', (nptl, ), data=ux)\n grp.create_dataset('uy', (nptl, ), data=uy)\n grp.create_dataset('uz', (nptl, ), data=uz)\n grp.create_dataset('gamma', (nptl, ), data=gamma)\n grp.create_dataset('t', (nptl, ), data=t)\n grp.create_dataset('Ex', (nptl, ), data=Ex)\n grp.create_dataset('Ey', (nptl, ), data=Ey)\n grp.create_dataset('Ez', (nptl, ), data=Ez)\n grp.create_dataset('Bx', (nptl, ), data=Bx)\n grp.create_dataset('By', (nptl, ), data=By)\n grp.create_dataset('Bz', (nptl, ), data=Bz)", "def build_example(n_points=1000):\n\n x=np.linspace(1,3,n_points)\n y=np.linspace(1,3,n_points)\n z=p_known(x,y)\n zerr= np.random.rand(n_points)\n \n data=np.column_stack([x,y,z,zerr])\n \n header='# density Temperature property sigma_property'\n np.savetxt('input_example.dat',data, header=header)", "def temp_emsoft_h5ebsd_file(tmpdir, request):\n f = File(tmpdir.join(\"emsoft_h5ebsd_file.h5\"), mode=\"w\")\n\n # Unpack parameters\n map_shape, (dy, dx), example_rotations, n_top_matches, refined = request.param\n ny, nx = map_shape\n map_size = ny * nx\n\n # Create groups used in reader\n ebsd_group = f.create_group(\"Scan 1/EBSD\")\n data_group = ebsd_group.create_group(\"Data\")\n header_group = ebsd_group.create_group(\"Header\")\n phase_group = header_group.create_group(\"Phase/1\") # Always single phase\n\n # Create `header_group` datasets used in reader\n for name, data, dtype in zip(\n [\"nRows\", \"nColumns\", \"Step Y\", \"Step X\"],\n [ny, nx, dy, dx],\n [np.int32, np.int32, np.float32, np.float32],\n ):\n header_group.create_dataset(name, data=np.array([data], dtype=dtype))\n\n # Create `data_group` datasets, mostly quality metrics\n data_group.create_dataset(\"X Position\", data=np.tile(np.arange(nx) * dx, ny))\n # Note that \"Y Position\" is wrongly written to their h5ebsd file by EMsoft\n data_group.create_dataset(\n \"Y Position\",\n data=np.tile(np.arange(nx) * dx, ny), # Wrong\n # data=np.sort(np.tile(np.arange(ny) * dy, nx)), # Correct\n )\n for name, shape, dtype in [\n (\"AvDotProductMap\", map_shape, np.int32),\n (\"CI\", map_size, np.float32),\n (\"CIMap\", map_shape, np.int32),\n (\"IQ\", map_size, np.float32),\n (\"IQMap\", map_shape, np.int32),\n (\"ISM\", map_size, np.float32),\n (\"ISMap\", map_shape, np.int32),\n (\"KAM\", map_shape, np.float32),\n (\"OSM\", map_shape, np.float32),\n (\"Phase\", map_size, np.uint8),\n ]:\n data_group.create_dataset(name, data=np.zeros(shape, dtype=dtype))\n\n # `data_group` with rotations\n # Sample as many rotations from `rotations` as `map_size`\n rot_idx = np.random.choice(np.arange(len(example_rotations)), map_size)\n rot = example_rotations[rot_idx]\n n_sampled_oris = 333227 # Cubic space group with Ncubochoric = 100\n data_group.create_dataset(\"FZcnt\", data=np.array([n_sampled_oris], dtype=np.int32))\n data_group.create_dataset(\n \"TopMatchIndices\",\n data=np.vstack(\n (np.random.choice(np.arange(n_sampled_oris), n_top_matches),) * map_size\n ),\n dtype=np.int32,\n )\n data_group.create_dataset(\n \"TopDotProductList\",\n data=np.vstack((np.random.random(size=n_top_matches),) * map_size),\n dtype=np.float32,\n )\n data_group.create_dataset(\n \"DictionaryEulerAngles\",\n data=np.column_stack(\n (np.random.uniform(low=0, high=2 * np.pi, size=n_sampled_oris),) * 3\n ),\n dtype=np.float32,\n )\n\n if refined:\n data_group.create_dataset(\"RefinedEulerAngles\", data=rot.astype(np.float32))\n data_group.create_dataset(\n \"RefinedDotProducts\", data=np.zeros(map_size, dtype=np.float32)\n )\n\n # Number of top matches kept\n f.create_dataset(\n \"NMLparameters/EBSDIndexingNameListType/nnk\",\n data=np.array([n_top_matches], dtype=np.int32),\n )\n\n # `phase_group`\n for name, data in [\n (\"Point Group\", \"Cubic (Oh) [m3m]\"),\n (\"MaterialName\", \"austenite/austenite\"),\n (\"Lattice Constant a\", \"3.595\"),\n (\"Lattice Constant b\", \"3.595\"),\n (\"Lattice Constant c\", \"3.595\"),\n (\"Lattice Constant alpha\", \"90.000\"),\n (\"Lattice Constant beta\", \"90.000\"),\n (\"Lattice Constant gamma\", \"90.000\"),\n ]:\n phase_group.create_dataset(name, data=np.array([data], dtype=np.dtype(\"S\")))\n\n yield f\n gc.collect()", "def writeHD5():\n global Data1\n\n store = HDFStore('.\\store.h5')\n store['listCrisis'] = Data1\n store.close()", "def example_bed_l2_h5():\n yield h5py.File(\"tests/test_data/example_test_2label.h5\", \"r\")", "def parse_single_12tped_to_hdf5(in_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n out_file_prefix='/home/bv25/data/Ls154/Ls154_12',\n impute_type='mode', filter_monomorphic_snps=True,\n missing_val_thr=0.1):\n \n print 'Starting to parse genotypes'\n genotype_data = {}\n h5py_file = h5py.File(out_file_prefix + '.hdf5')\n genotype_data['hdf5p_file'] = h5py_file\n genot_group = h5py_file.create_group('genot_data')\n indiv_group = h5py_file.create_group('indiv_data')\n \n \n tot_num_snps = 0\n tot_num_missing_val_snps_removed = 0\n tot_num_ambiguous_loc_removed = 0\n curr_chrom = 1\n print 'Working on chromosome %d' % curr_chrom\n \n g_filename = '%s.tped' % (in_file_prefix) \n s_filename = '%s.bim' % (in_file_prefix)\n i_filename = '%s.tfam' % (in_file_prefix) \n\n \n \n indiv_ids = []\n phenotypes = [] \n sex = []\n print 'Parsing individuals file: %s' % i_filename\n with open(i_filename) as f:\n for line in f:\n l = line.split()\n iid = l[0]\n indiv_ids.append(iid)\n sex.append(int(l[4]))\n phenotypes.append(float(l[5]))\n tot_num_indiv = len(indiv_ids) \n \n print 'Storing individual data in individ. group'\n indiv_group.create_dataset('indiv_ids', data=indiv_ids)\n indiv_group.create_dataset('sex', data=sex)\n indiv_group.create_dataset('phenotypes', data=phenotypes)\n \n \n \n num_indiv = len(indiv_ids)\n print 'Found %d Individuals' % (num_indiv)\n\n print 'Parsing nucleotide map'\n nt_map = {}\n chromsomoes = []\n curr_chrom = 0\n with open(s_filename) as f:\n for line in f:\n l = line.split()\n chrom = l[0]\n if chrom != curr_chrom:\n chromsomoes.append(chrom)\n curr_chrom = chrom\n nt_map[l[1]] = (l[4], l[5]) \n assert len(chromsomoes) == len(set(chromsomoes)), 'Chromosomes need to be in order.'\n curr_chrom = chromsomoes[0]\n \n position = -1\n # Initializing containers.\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n t0 = time.time()\n\n print 'Starting to parse SNP files'\n gf = open(g_filename)\n for g_line in gf:\n# if random.random() > 0.01:\n# continue\n gl = g_line.split()\n chrom = gl[0]\n if chrom != curr_chrom:\n \n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % curr_chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse Chromosome %s.' % (t / 60, t % 60, curr_chrom)\n t0 = time.time()\n\n \n\n # Reset containers\n snps_mat = [] \n positions = []\n sids = []\n nts_list = []\n nt_counts_list = []\n missing_counts = []\n freqs = []\n num_missing_removed = 0\n num_ambiguous = 0\n num_monomorphic_removed = 0\n num_ambiguous_loc_removed = 0\n \n curr_chrom = chrom\n\n sid = gl[1]\n prev_position = position\n position = int(gl[3])\n\n # Skipping unmappable locations\n if position == prev_position:\n num_ambiguous_loc_removed += 1\n continue\n if position == 0:\n num_ambiguous_loc_removed += 1\n continue\n\n nt = nt_map[sid]\n \n snp0 = sp.array(map(int, (g_line.strip()).split()[4:]), 'int8')\n a = sp.arange(tot_num_indiv * 2)\n even_map = a % 2 == 0\n odd_map = a % 2 == 1\n snp = snp0[even_map] + snp0[odd_map] - 2\n snp[snp < 0] = 9\n \n bin_counts = sp.bincount(snp)\n \n\n if len(bin_counts) > 3:\n missing_count = bin_counts[-1]\n # Filtering SNPs with too many missing values\n if missing_count > missing_val_thr * 2 * num_indiv:\n num_missing_removed += 1\n continue\n elif impute_type == 'mode':\n nt_counts = bin_counts[:3] \n v = sp.argmax(nt_counts)\n snp[snp == 9] = v\n bin_counts = sp.bincount(snp)\n else:\n raise Exception('Imputation type is unknown')\n else:\n missing_count = 0\n\n assert len(bin_counts) < 4, 'Issues with nucleotides.'\n nt_counts = bin_counts[:3] \n if len(nt_counts) == 2:\n nt_counts = sp.array([nt_counts[0], nt_counts[1], 0])\n elif len(nt_counts) == 1:\n nt_counts = sp.array([nt_counts[0], 0, 0])\n \n\n # Removing monomorphic SNPs\n if filter_monomorphic_snps:\n if max(nt_counts) == sum(nt_counts):\n num_monomorphic_removed += 1\n continue\n \n freq = sp.mean(snp) / 2.0 \n snps_mat.append(snp)\n positions.append(position)\n sids.append(sid)\n nts_list.append(nt)\n nt_counts_list.append(nt_counts)\n missing_counts.append(missing_count)\n freqs.append(freq) \n\n # Store everything and reset.\n print 'Number of SNPs removed due to too many missing values: %d' % num_missing_removed\n print 'Number of SNPs removed due to ambiguous location: %d' % num_ambiguous_loc_removed\n print 'Number of monomorphic SNPs removed: %d' % num_monomorphic_removed\n print 'Number of SNPs retained: %d' % len(positions)\n print 'Number of individuals: %d' % num_indiv\n snps = sp.array(snps_mat, dtype='int8')\n h5py_chrom_group = genot_group.create_group('chrom_%s' % chrom)\n h5py_chrom_group.create_dataset('raw_snps', compression='lzf', data=snps)\n h5py_chrom_group.create_dataset('positions', compression='lzf', data=positions)\n h5py_chrom_group.create_dataset('nts', compression='lzf', data=nts_list)\n h5py_chrom_group.create_dataset('nt_counts', compression='lzf', data=nt_counts_list)\n h5py_chrom_group.create_dataset('missing_counts', compression='lzf', data=missing_counts)\n h5py_chrom_group.create_dataset('freqs', compression='lzf', data=freqs)\n h5py_chrom_group.create_dataset('snp_ids', compression='lzf', data=sids) \n tot_num_snps += len(positions)\n tot_num_missing_val_snps_removed += num_missing_removed\n tot_num_ambiguous_loc_removed += num_ambiguous_loc_removed\n h5py_file.create_dataset('num_snps', data=sp.array(tot_num_snps))\n h5py_file.flush() \n t1 = time.time()\n t = t1 - t0\n print 'It took %d minutes and %0.2f seconds to parse chromosome %s.' % (t / 60, t % 60, chrom)\n\n \n gf.close()\n \n print 'Total number of SNPs parsed successfully was: %d' % tot_num_snps\n print 'Total number of SNPs removed due to too many missing values: %d' % tot_num_missing_val_snps_removed\n print 'Total number of SNPs removed due to ambiguous locations: %d' % tot_num_ambiguous_loc_removed\n h5py_file.close()\n \n print 'Done parsing genotypes.'", "def output_file_setup(model):\n \n filename = model.filename\n\n if os.path.isfile(filename):\n print('\\n'+filename+' already exists, deleting '+filename+'\\n')\n os.remove(filename)\n \n \n \n model.out_file = nc4.Dataset(filename,'w',format='NETCDF4')\n\n model.data_group = model.out_file.createGroup('data')\n model.data_group.createDimension('time',None)\n \n var_dict = model()\n model.save_dict = {}\n sizes = []\n for key in var_dict.keys():\n \n if type(var_dict[key]) in (int,float,np.int64,np.float64):\n s = 1\n elif not isinstance(type(var_dict[key]), (str,np.ndarray)):\n s = len(var_dict[key])\n else:\n pdb.set_trace()\n \n if s not in sizes:\n model.data_group.createDimension(str(s),s)\n \n sizes.append(s)\n \n if s == 1:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time','1'))\n else:\n model.save_dict[key] = model.data_group.createVariable(key,'f8',('time',str(s)))\n \n \n \n \n types = (int, float, np.int, np.float, np.ndarray, str)\n \n parameter_group = model.out_file.createGroup('parameters')\n\n for key, value in model.parameters.items():\n if type(value) in types:\n setattr(parameter_group, key, value)\n \n \n return model", "def save_data(halo_particles):\n mass, pos, vel = halo_particles(N_part=100, seed=42)\n data = np.ndarray([len(mass), 4])\n data[:, 0] = pos[:, 0]\n data[:, 1] = pos[:, 1]\n data[:, 2] = pos[:, 2]\n data[:, 3] = mass\n\n np.savetxt(\"mock_particles.dat\", data, fmt=\"%12.6f\")", "def write(s,filename,header=\"Opacity file written by optool.particle.write\"):\n\n if (s.np>1):\n raise TypeError('Writing is not supported for multi-particle objects')\n try:\n wfile = open(filename, 'w')\n except:\n raise RuntimeError('Cannot write to file: '+filename)\n\n headerlines = header.splitlines()\n for i in range(len(headerlines)):\n wfile.write(\"# %s\\n\" % headerlines[i])\n if s.scat:\n wfile.write(' 0\\n')\n wfile.write(' %d\\n' % s.nlam)\n wfile.write(' %d\\n' % s.nang)\n wfile.write('\\n')\n else:\n wfile.write(' 3\\n')\n wfile.write(' %d\\n' % s.nlam)\n \n for i in range(s.nlam):\n # write the lambda grid and the opacities\n wfile.write(' %15.5e %15.5e %15.5e %15.5e\\n' % (s.lam[i],s.kabs[0,i],s.ksca[0,i],s.gsca[0,i]))\n \n if s.scat:\n # we have a scattering matrix\n wfile.write('\\n')\n # Write the angular grid\n for i in range(s.nang):\n wfile.write(\"%9.2f\\n\" % s.scatang[i])\n wfile.write('\\n')\n # Write the scattering matrix\n for il in range(s.nlam):\n for ia in range(s.nang):\n wfile.write(' %15.5e %15.5e %15.5e %15.5e %15.5e %15.5e\\n' %\n (s.f11[0,il,ia],s.f12[0,il,ia],s.f22[0,il,ia],\n s.f33[0,il,ia],s.f34[0,il,ia],s.f44[0,il,ia]))\n wfile.close()", "def test_hdf5_file_input():\n catfile = os.path.join(TEST_DATA_DIR, 'point_sources.cat')\n output_hdf5 = os.path.join(TEST_DATA_DIR, 'all_spectra.hdf5')\n sed_file = os.path.join(TEST_DATA_DIR, 'sed_file_with_normalized_dataset.hdf5')\n sed_catalog = spec.make_all_spectra(catfile, input_spectra_file=sed_file,\n normalizing_mag_column='nircam_f444w_magnitude',\n output_filename=output_hdf5)\n\n comparison = hdf5.open(os.path.join(TEST_DATA_DIR, 'output_spec_from_hdf5_input_including_normalized.hdf5'))\n constructed = hdf5.open(sed_catalog)\n for key in comparison:\n assert key in constructed.keys()\n assert all(comparison[key][\"wavelengths\"].value == constructed[key][\"wavelengths\"].value)\n assert all(comparison[key][\"fluxes\"].value == constructed[key][\"fluxes\"].value)\n assert comparison[key][\"wavelengths\"].unit == constructed[key][\"wavelengths\"].unit\n assert comparison[key][\"fluxes\"].unit == constructed[key][\"fluxes\"].unit\n\n cat_base = catfile.split('.')[0]\n outbase = cat_base + '_with_flambda.cat'\n flambda_output_catalog = os.path.join(TEST_DATA_DIR, outbase)\n os.remove(flambda_output_catalog)\n os.remove(sed_catalog)", "def to_h5(self):\n import time\n from pymicro import __version__ as pymicro_version\n\n print('opening file %s.h5 for writing' % self.name)\n f = h5py.File('%s.h5' % self.name, 'w')\n f.attrs['Pymicro_Version'] = np.string_(pymicro_version)\n f.attrs['HDF5_Version'] = h5py.version.hdf5_version\n f.attrs['h5py_version'] = h5py.version.version\n f.attrs['file_time'] = time.time()\n f.attrs['microstructure_name'] = self.name\n if hasattr(self, 'data_dir'):\n f.attrs['data_dir'] = self.data_dir\n # ensemble data\n ed = f.create_group('EnsembleData')\n cs = ed.create_group('CrystalStructure')\n sym = self.get_lattice().get_symmetry()\n cs.attrs['symmetry'] = sym.to_string()\n lp = cs.create_dataset('LatticeParameters',\n data=np.array(self.get_lattice().get_lattice_parameters(), dtype=np.float32))\n # feature data\n fd = f.create_group('FeatureData')\n grain_ids = fd.create_dataset('grain_ids',\n data=np.array([g.id for g in self.grains], dtype=np.int))\n avg_rods = fd.create_dataset('R_vectors',\n data=np.array([g.orientation.rod for g in self.grains], dtype=np.float32))\n centers = fd.create_dataset('centers',\n data=np.array([g.center for g in self.grains], dtype=np.float32))\n # cell data\n cd = f.create_group('CellData')\n if hasattr(self, 'grain_map') and self.grain_map is not None:\n gm = cd.create_dataset('grain_ids', data=self.grain_map, compression='gzip', compression_opts=9)\n gm.attrs['voxel_size'] = self.voxel_size\n if hasattr(self, 'mask') and self.mask is not None:\n ma = cd.create_dataset('mask', data=self.mask, compression='gzip', compression_opts=9)\n ma.attrs['voxel_size'] = self.voxel_size\n print('done writing')\n f.close()", "def load_hep_data(self,variables2plot=[]):\n file = uproot.open(self.hep_data)\n data = file[self.treename]\n self.df = data.pandas.df( self.features+['target']+variables2plot )\n #self.df = df.sample(frac=0.2)\n print self.df.dtypes\n\n self.metadata = file['metadata'] # names of samples, target values, etc.\n\n return", "def transfer_to_csv(plot_config):\n run_dir = plot_config[\"run_dir\"]\n tindex = plot_config[\"tframe\"] * plot_config[\"tinterval\"]\n ptl_vel = plot_config[\"ptl_vel\"]\n fname = (run_dir + 'data_' + str(tindex) + '_' +\n str(ptl_vel) + 'c/particle_diagnostics.h5')\n file = h5py.File(fname,'r')\n group = file['/particles_fields']\n dset_ptl = group['particles']\n dset_emf = group['fields']\n sz, = dset_ptl.shape\n tinterval_traj = get_traj_tinterval(run_dir)\n nsteps_tot = get_num_steps(run_dir)\n if nsteps_tot > 1E6:\n nsteps_tot = int(1E6)\n ntraj = nsteps_tot // tinterval_traj + 1\n nptl = sz / ntraj\n fdir = run_dir + 'data_' + str(tindex) + '_' + str(ptl_vel) + 'c/'\n fdir += 'traj_csv/'\n mkdir_p(fdir)\n pdata = np.zeros([14, ntraj])\n # for iptl in range(nptl):\n for iptl in range(2):\n print(iptl)\n ps, pt = ntraj * iptl, ntraj * (iptl + 1)\n pdata[0] = np.array(dset_ptl['x'][ps:pt])\n pdata[1] = np.array(dset_ptl['y'][ps:pt])\n pdata[2] = np.array(dset_ptl['z'][ps:pt])\n pdata[3] = np.array(dset_ptl['ux'][ps:pt])\n pdata[4] = np.array(dset_ptl['uy'][ps:pt])\n pdata[5] = np.array(dset_ptl['uz'][ps:pt])\n pdata[6] = np.sqrt(1.0 + np.sum(pdata[3:6]**2, axis=0))\n pdata[7] = np.array(dset_ptl['t'][ps:pt])\n pdata[8] = np.array(dset_emf['Ex'][ps:pt])\n pdata[9] = np.array(dset_emf['Ey'][ps:pt])\n pdata[10] = np.array(dset_emf['Ez'][ps:pt])\n pdata[11] = np.array(dset_emf['Bx'][ps:pt])\n pdata[12] = np.array(dset_emf['By'][ps:pt])\n pdata[13] = np.array(dset_emf['Bz'][ps:pt])\n fname = fdir + 'traj_' + str(iptl) + '.csv'\n # np.savetxt(fname, pdata.T, delimiter=\",\",\n # header=\"x,y,z,ux,uy,uz,gamma,t,Ex,Ey,Ez,Bx,By,Bz\")\n df = pd.DataFrame(pdata.T)\n df.to_csv(fname, mode='w', index=True,\n header=[\"x\", \"y\", \"z\", \"ux\", \"uy\", \"uz\", \"gamma\", \"t\",\n \"Ex\", \"Ey\", \"Ez\", \"Bx\", \"By\", \"Bz\"])", "def read_raw_hdf5_case_and_write_pandas_hdf5(\n hdf5_file,\n root = '' ,\n output_file = '' ,\n serration_angle = 0 ,\n angle_correction = 0 ,\n height_correction = 0 ,\n streamwise_correction = 0 ,\n overwrite = False ,\n time_step_limit = 0 ,\n plot = False ,\n airfoil_normal = False,\n):\n\n #######################################################\n #######################################################\n # IMPORTANT\n #\n # The coordinates coming from the HDF5 file are the\n # vertical freestream coordinates of DaVis.\n #\n # The coordinates used for the local variables are\n # already put to the left-to-right freestream \n # coordinates\n #\n #######################################################\n #######################################################\n\n from progressbar import ProgressBar,Percentage,Bar,ETA,SimpleProgress\n import h5py\n import numpy as np\n import pandas as pd\n from os.path import isfile,join\n\n write_frequency = 150\n\n case = hdf5_file.replace('.hdf5','')\n\n # File related things ######################################################\n if not output_file:\n output_file = case+\".hdf5\"\n\n if airfoil_normal:\n output_file = output_file+\"_AirfoilNormal\"\n\n if not output_file.endswith('.hdf5'):\n output_file = output_file.replace(\".hdf5\",\"\")+\".hdf5\"\n\n if isfile(output_file) and not overwrite:\n print \" Exiting; file exists:\\n{0}\".format(output_file)\n return 0\n # ##########################################################################\n\n h5 = h5py.File(join(root,hdf5_file),'r')\n\n # Read the available times #################################################\n available_times = sorted([int(f[0]) for f in \\\n h5['{0}'.format(case)].iteritems()\\\n if not 'mask' in f and not 'x' in f and not 'y'\\\n in f])\n # ##########################################################################\n\n if time_step_limit:\n available_times = available_times[:time_step_limit]\n\n progress = ProgressBar(\n widgets=[\n Bar(),' ',\n Percentage(),' ',\n ETA(), ' (time step ',\n SimpleProgress(),')'], \n maxval=len(available_times)\n ).start()\n\n t_x_cnt = 0\n cnt = 0\n\n hdf = pd.HDFStore(output_file)\n\n df_dump = pd.DataFrame( columns = ['x','y','u','v','w','time_step'] )\n\n rotation_angle = serration_angle + angle_correction\n if airfoil_normal:\n rotation_angle = rotation_angle - 11.4\n\n for ti in available_times:\n df = pd.DataFrame( data = {\n 'x' : np.array(h5[\"{0}/y\".format(case)].value),\n 'y' : -np.array(h5[\"{0}/x\".format(case)].value),\n 'u' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vy')].value),\n 'v' : -np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vx')].value),\n 'w' : np.array(h5[\"{0}/{1}/{2}\".format(case,ti,'Vz')].value),\n })\n\n df[ 'time_step' ] = ti\n\n df = correct_flow_plane_df(\n df,\n rotation_angle = rotation_angle,\n height_correction = height_correction,\n streamwise_correction = streamwise_correction,\n )\n\n if plot and ti == 0:\n show_surface_from_df(\n df[df.time_step == ti], \n 'u'\n )\n\n progress.update(ti)\n\n df_dump = df_dump.append(df,ignore_index=True)\n\n if cnt == write_frequency:\n\n if t_x_cnt == cnt:\n hdf.put(\n case, \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n else:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n df_dump = pd.DataFrame( \n columns = ['x','y','u','v','w','time_step'] \n )\n cnt = 0\n\n if ti == available_times[-1]:\n hdf.append(\n case , \n df_dump.convert_objects(), \n format='table', \n data_columns=True\n )\n\n t_x_cnt += 1\n cnt += 1\n\n hdf.close()\n h5.close()\n\n progress.finish()", "def writeMetaDataOfExperiments(experimentData,path,dict):\n \n if 'exp_type' in dict:\n exp_type = dict['exp_type']\n else:\n print(\"Error: No experiment type has been chosen. Choose either ViSDEM or SaMSEM.\")\n return\n \n if 'filename' in dict:\n filename = dict['filename']\n else:\n filename = 'meta-data'\n path_out = os.path.join(path, filename)\n \n if exp_type == \"visdem\":\n f = open(path_out,'w+')\n f.write(\"ViSDEM meta data\\n\")\n sessions = sorted(set(experimentData['session_id']))\n f.write('... # of sessions: '+str(len(sessions))+' ; '+str(sessions)+'\\n')\n sets = sorted(set(experimentData['set_id']))\n f.write('... # of sets: '+str(len(sets))+' ; '+str(sets)+'\\n')\n motives = sorted(set(experimentData['motive_id']))\n f.write('... # of motives: '+str(len(motives))+' ; '+str(motives)+'\\n')\n daltonizations = sorted(set(experimentData['dalt_id']))\n f.write('... # of daltonization ids: '+str(len(daltonizations))+' ; '+str(daltonizations)+'\\n')\n observers = sorted(set(experimentData['observer_id']))\n f.write(\"... # of observers: \" +str(len(observers))+' ; '+str(observers)+'\\n')\n observers_norm = sorted(set(experimentData[experimentData['observer_coldef_type']==0]['observer_id']))\n f.write(\"...... # of normal observers: \"+str(len(observers_norm))+' ; '+str(observers_norm)+'\\n')\n observers_prot = sorted(set(experimentData[experimentData['observer_coldef_type']==1]['observer_id']))\n f.write(\"...... # of protan observers: \"+str(len(observers_prot))+' ; '+str(observers_prot)+'\\n') \n observers_deut = sorted(set(experimentData[experimentData['observer_coldef_type']==2]['observer_id']))\n f.write(\"...... # of deutan observers: \"+str(len(observers_deut))+' ; '+str(observers_deut)+'\\n') \n f.close()\n elif exp_type == 'samsem':\n f = open(path_out,'w+')\n f.write(\"SaMSEM meta data\\n\")\n sessions = sorted(set(experimentData['session_id']))\n f.write('... # of sessions: '+str(len(sessions))+' ; '+str(sessions)+'\\n')\n images = sorted(set(experimentData['image_id']))\n f.write('... # of images: '+str(len(images))+' ; '+str(images)+'\\n')\n simulations = sorted(set(experimentData['sim_id']))\n f.write('... # of simulations: '+str(len(simulations))+' ; '+str(simulations)+'\\n')\n observers = sorted(set(experimentData['observer_id']))\n f.write(\"... # of observers: \" +str(len(observers))+' ; '+str(observers)+'\\n')\n observers_norm = sorted(set(experimentData[experimentData['observer_coldef_type']==0]['observer_id']))\n f.write(\"...... # of normal observers: \"+str(len(observers_norm))+' ; '+str(observers_norm)+'\\n')\n observers_prot = sorted(set(experimentData[experimentData['observer_coldef_type']==1]['observer_id']))\n f.write(\"...... # of protan observers: \"+str(len(observers_prot))+' ; '+str(observers_prot)+'\\n')\n observers_deut = sorted(set(experimentData[experimentData['observer_coldef_type']==2]['observer_id']))\n f.write(\"...... # of deutan observers: \"+str(len(observers_deut))+' ; '+str(observers_deut)+'\\n')\n else:\n print(\"Error: No valid experiment format has been chosen. Choose either visdem or samsem.\")\n return", "def e5loadhalo(id1, t, idarr=None, dir=0):\n\t\n\tt = str(int(t)) # Make sure it's a string of an integer\n\t\n\tif type(dir) != str:\n\t\tdir = '/Users/astevens/Documents/6-MonthProject/e5/' # Default directory for files\n\t\n\tz, Ntot, Nfiles, boxsize, Omega_M, Omega_L, h = e5snaphead('snapshot_'+t, dir) # Read in header info\n\t\n\tif idarr==None:\n\t\tf = open(dir+'snapshotids_'+t,'rb')\n\t\tf.seek(4*sum(Ntot))\n\t\tidarr = np.fromfile(f, 'i4', sum(Ntot)) # Subhalo IDs\n\t\tf.close()\n\t\n\tprint(t, id1, idarr, len(idarr))\n\tidargs = np.argwhere(idarr==id1) # Arguments where idarr is equal to id1\n\t\n\tcon_g = (idargs < Ntot[0]) # Condition to find the total number of gas particles in the halo\n\tcon_dm = (idargs >= Ntot[0]) * (idargs < sum(Ntot[:2])) # Condition ... dark matter\n\tcon_s = (idargs >= sum(Ntot[:2])) * (idargs < sum(Ntot[:5])) # Condition ... stars\n\tcon_bh = (idargs >= sum(Ntot[:5])) # Condition ... black holes\n\t#print con_bh\n\t\n\tNptot = np.array([len(idargs[con_g]), len(idargs[con_dm]), 0, 0, len(idargs[con_s]), len(idargs[con_bh])]) # Total number of particles in the halo in particle type\n\tprint('Nptot is', Nptot)\n\tNaccum, Npaccum = np.zeros(6), np.zeros(6) # Initialize the accumulation of N\n\t\n\t# Initialize arrays that will contain the particle data\n\tpos_g, pos_dm, pos_s, pos_bh = np.zeros((Nptot[0],3)), np.zeros((Nptot[1],3)), np.zeros((Nptot[4],3)), np.zeros((Nptot[5],3))\n\tvel_g, vel_dm, vel_s, vel_bh = np.zeros((Nptot[0],3)), np.zeros((Nptot[1],3)), np.zeros((Nptot[4],3)), np.zeros((Nptot[5],3))\n\tid_g, id_dm, id_s, id_bh = np.zeros(Nptot[0]), np.zeros(Nptot[1]), np.zeros(Nptot[4]), np.zeros(Nptot[5])\n\tmass_g, mass_dm, mass_s, mass_bh = np.zeros(Nptot[0]), np.zeros(Nptot[1]), np.zeros(Nptot[4]), np.zeros(Nptot[5])\n\tu, rho, sfr = np.zeros(Nptot[0]), np.zeros(Nptot[0]), np.zeros(Nptot[0])\n\t\n\tfor fno in range(Nfiles): # fno is file number\n\t\tNbytes = [] # Initialize list that will contain information on the block sizes in each file\n\t\tf = open(dir+'snapshot_'+t+'.'+str(fno), 'rb')\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t# Read header information\n\t\tN = np.fromfile(f, 'u4', 6) # Number of particles for each particle type in this file\n\t\tNsum = sum(N) # Total number of particles in the file\n\t\tmass_pt = np.fromfile(f, 'f8', 6) # Mass of each particle type. If 0 then it varies for each particle of that type\n\t\tNmass = sum(N[np.argwhere(mass_pt==0.0)]) # Number of particles in the file with individual masses to be read in\n\t\ta = np.fromfile(f, 'f8', 1)[0] # Expansion factor (normalised to 1 at z=0)\n\t\tz = np.fromfile(f, 'f8', 1)[0] # Redshift of snapshot\n\t\tflag_sfr = np.fromfile(f, 'i4', 1)[0] # Flag for star formation rate\n\t\tflag_feedback = np.fromfile(f, 'i4', 1)[0] # Flag for feedback\n\t\tNtot = np.fromfile(f, 'u4', 6) # Total number of particles for each particle type in the entire simulation\n\t\tflag_cool = np.fromfile(f, 'i4', 1)[0] # Flag for cooling\n\t\tNfiles = np.fromfile(f, 'i4', 1)[0] # Number of files for each snapshot\n\t\tboxsize = np.fromfile(f, 'f8', 1)[0] # Size of box if periodic boundary conditions are used\n\t\tOmega_M = np.fromfile(f, 'f8', 1)[0] # Omega Matter\n\t\tOmega_L = np.fromfile(f, 'f8', 1)[0] # Omega (Lambda) Dark Energy\n\t\th = np.fromfile(f, 'f8', 1)[0] # Little Hubble h\n\t\tflag_StarAge = np.fromfile(f, 'i4', 1)[0] # Flag for the creation times of stars\n\t\tflag_metals = np.fromfile(f, 'i4', 1)[0] # Flag for metallicity values\n\t\t\n\t\tg_con = (idargs < N[0]+Naccum[0]) * (idargs >= Naccum[0]) # Gas condition to figure out how many particles for the halo are in this file\n\t\tdm_con = (idargs-Ntot[0] < N[1]+Naccum[1]) * (idargs-Ntot[0] >= Naccum[1]) # Dark matter condition...\n\t\ts_con = (idargs-sum(Ntot[:4]) < N[4]+Naccum[4]) * (idargs-sum(Ntot[:4]) >= Naccum[4]) # Star condition...\n\t\tbh_con = (idargs-sum(Ntot[:5]) < N[5]+Naccum[5]) * (idargs-sum(Ntot[:5]) >= Naccum[5]) # Black hole condition...\n\t\t\n\t\tg_args = idargs[g_con] - Naccum[0] # Location of gas particles to extract in the data file\n\t\tdm_args = idargs[dm_con] - Ntot[0] - Naccum[1] # Ditto dark matter\n\t\ts_args = idargs[s_con] - sum(Ntot[:4]) - Naccum[4] # Ditto stars\n\t\tbh_args = idargs[bh_con] - sum(Ntot[:5]) - Naccum[5] # Ditto black holes\n\t\t\n\t\tNparts = np.array([ len(g_args), len(dm_args), 0, 0, len(s_args), len(bh_args) ])\n\t\t\n\t\tNmass_pt = np.zeros(6,dtype='u4')\n\t\tfor i in range(6):\n\t\t\tif mass_pt[i]==0: Nmass_pt[i] = N[i]\n\t\t\n\t\t## Extract positions\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + arg*24)\n\t\t\tpos_g[Npaccum[0]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+N[0])*24)\n\t\t\tpos_dm[Npaccum[1]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+sum(N[:4]))*24)\n\t\t\tpos_s[Npaccum[4]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*3 + (arg+sum(N[:5]))*24)\n\t\t\tpos_bh[Npaccum[5]+i,:] = np.fromfile(f, 'f8', 3)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*2)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract velocities\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + arg*24)\n\t\t\tvel_g[Npaccum[0]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+N[0])*24)\n\t\t\tvel_dm[Npaccum[1]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+sum(N[:4]))*24)\n\t\t\tvel_s[Npaccum[4]+i,:] = np.fromfile(f, 'f8', 3)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*5 + (arg+sum(N[:5]))*24)\n\t\t\tvel_bh[Npaccum[5]+i,:] = np.fromfile(f, 'f8', 3)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*4)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract IDs\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + arg*8)\n\t\t\tid_g[Npaccum[0]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(dm_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+N[0])*8)\n\t\t\tid_dm[Npaccum[1]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(s_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+sum(N[:4]))*8)\n\t\t\tid_s[Npaccum[4]+i] = np.fromfile(f, 'u8', 1)\n\t\tfor i, arg in enumerate(bh_args):\n\t\t\tf.seek(sum(Nbytes) + 4*7 + (arg+sum(N[:5]))*8)\n\t\t\tid_bh[Npaccum[5]+i] = np.fromfile(f, 'u8', 1)\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*6)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract Masses\n\t\tif mass_pt[0]==0:\n\t\t\tfor i, arg in enumerate(g_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + arg*8)\n\t\t\t\tmass_g[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_g[Npaccum[0]:Npaccum[0]+Nparts[0]] = mass_pt[0]*np.ones(Nparts[0])\n\t\t\n\t\tif mass_pt[1]==0:\n\t\t\tfor i, arg in enumerate(dm_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+Nmass_pt[0])*8)\n\t\t\t\tmass_dm[Npaccum[1]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_dm[Npaccum[1]:Npaccum[1]+Nparts[1]] = mass_pt[1]*np.ones(Nparts[1])\n\t\t\n\t\tif mass_pt[4]==0:\n\t\t\tfor i, arg in enumerate(s_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+sum(Nmass_pt[:4]))*8)\n\t\t\t\tmass_s[Npaccum[4]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_s[Npaccum[4]:Npaccum[4]+Nparts[4]] = mass_pt[4]*np.ones(Nparts[4])\n\t\t\n\t\tif mass_pt[5]==0:\n\t\t\tfor i, arg in enumerate(bh_args):\n\t\t\t\tf.seek(sum(Nbytes) + 4*9 + (arg+sum(Nmass_pt[:5]))*8)\n\t\t\t\tmass_bh[Npaccum[5]+i] = np.fromfile(f, 'f8', 1)\n\t\telse:\n\t\t\tmass_bh[Npaccum[5]:Npaccum[5]+Nparts[5]] = mass_pt[5]*np.ones(Nparts[5])\n\t\t##\n\t\t\n\t\tf.seek(sum(Nbytes)+4*8)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\t## Extract gas properties\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*11 + arg*8)\n\t\t\tu[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t\n\t\tf.seek(sum(Nbytes)+4*10)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*13 + arg*8)\n\t\t\trho[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t\n\t\tf.seek(sum(Nbytes)+4*12)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\n\t\tif flag_cool==1:\n\t\t\tf.seek(sum(Nbytes)+4*14)\n\t\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\tf.seek(sum(Nbytes)+4*16)\n\t\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\t\t\textra = 16\n\t\telse:\n\t\t\textra = 0\n\n\t\tf.seek(sum(Nbytes) + 4*14 + extra)\n\t\tNbytes += list(np.fromfile(f, 'i4', 1))\n\n\t\tfor i, arg in enumerate(g_args):\n\t\t\tf.seek(sum(Nbytes) + 4*17 + extra + arg*8)\n\t\t\tsfr[Npaccum[0]+i] = np.fromfile(f, 'f8', 1)\n\t\t##\n\t\t\n\t\tNaccum += N\n\t\tNpaccum += Nparts\n\t\tf.close()\n\t\n\t\n\t## Extract premeasured properties of halo from tab file\n\tNtotgroups, TotNids, Nfiles, NtotSubgroups = e5halotabhead(t=t)\n\tNaccum = 0\n\tfloattype = 'f4'\n\tidtype = 'u8'\n\t\n\tif np.max(idarr) == NtotSubgroups-1: # Determine if the code is getting a group or subhalo\n\t\tfor i in range(Nfiles):\n\t\t\tf = open(dir+'subhalo_tab_'+t+'.'+str(i))\n\t\t\tNgroups = np.fromfile(f, 'i4', 1)[0] # Number of groups in this file\n\t\t\tf.seek(24)\n\t\t\tNsubgroups = np.fromfile(f, 'i4', 1)[0] # Number of subhaloes in this file\n\t\t\t\n\t\t\tif id1<=Naccum+Nsubgroups:\n\t\t\t\tid2 = id1-Naccum # Number of haloes' data to skip (number listed in this file)\n\t\t\t\tf.seek(32 + 19*4*Ngroups)\n\t\t\t\t# Data for subhaloes\n\t\t\t\tNparts = np.fromfile(f, 'i4', Nsubgroups)[id2] # Number of particles in the subhalo\n\t\t\t\tprint('Nparts', Nparts)\n\t\t\t\toffset = np.fromfile(f, 'i4', Nsubgroups)[id2]\n\t\t\t\tparent = np.fromfile(f, 'i4', Nsubgroups)[id2] # Index of parent (neither Yu nor I are sure what this is)\n\t\t\t\tmass = np.fromfile(f, floattype, Nsubgroups)[id2] *1e10/h # Mass of the subhalo in solar masses (sum of all particle types)\n\t\t\t\tpos = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] *1e3/h # Position of the subhalo in pc\n\t\t\t\tvel = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] # Halo velocity in km/s\n\t\t\t\tcom = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] *1e3/h # COM location of the halo (almost identical to halopos)\n\t\t\t\tspin = np.fromfile(f, np.dtype((floattype,3)), Nsubgroups)[id2] # Spin of the halo (presumably the dimensionless spin parameter?)\n\t\t\t\tveldisp = np.fromfile(f, floattype, Nsubgroups)[id2] # Velocity dispersion of the subhalo\n\t\t\t\tvmax = np.fromfile(f, floattype, Nsubgroups)[id2] # Maximum circular velocity of the subhalo (presumably km/s)\n\t\t\t\tvmaxrad = np.fromfile(f, floattype, Nsubgroups)[id2]*1e3/h # Radius for the maximum circular velocity in pc\n\t\t\t\thalfmassr = np.fromfile(f, floattype, Nsubgroups)[id2]*1e3/h # Radius encompasses half the subhalo's mass in pc\n\t\t\t\tidbp = np.fromfile(f, idtype, Nsubgroups)[id2] # ID of the most bound particle in the subhalo\n\t\t\t\tidgp = np.fromfile(f, 'u4', Nsubgroups)[id2] # Index of group that this subhalo is in.\n\t\t\t\tpartmass = np.fromfile(f, np.dtype(('f4',6)), Nsubgroups)[id2]*1e10/h # Integrated mass of the halo for each individual particle type in solar masses\n\t\t\t\ttabdata = [Nparts, offset, parent, mass, pos, vel, com, spin, veldisp, vmax, vmaxrad, halfmassr, idbp, idgp, partmass]\n\t\t\t\tbreak\n\t\t\tNaccum += Nsubgroups\n\t\n\telif np.max(idarr) == Ntotgroups-1:\n\t\tfor i in range(Nfiles):\n\t\t\tf = open(dir+'subhalo_tab_'+t+'.'+str(i))\n\t\t\tNgroups = np.fromfile(f, 'i4', 1)[0] # Number of groups in this file\n\t\t\t\n\t\t\tif id1<=Naccum+Ngroups:\n\t\t\t\tf.seek(32)\n\t\t\t\tid2 = id1-Naccum\n\t\t\t\tNparts = np.fromfile(f, 'i4', Ngroups)[id2] # Number of particles in each group\n\t\t\t\toffset = np.fromfile(f, 'i4', Ngroups)[id2]\n\t\t\t\tmass = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Mass of each group\n\t\t\t\tpos = np.fromfile(f, np.dtype((floattype,3)), Ngroups)[id2]*1e3/h # COM of the group\n\t\t\t\tmmean200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Mass within radius enclosing an average density 200x that of the mean density of the Universe\n\t\t\t\trmean200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h # The radius for the above\n\t\t\t\tmcrit200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # As for mmean200 but critical density of the Universe\n\t\t\t\trcrit200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h # Radius for the above\n\t\t\t\tmtoph200 = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # As above masses but for a tophat. Not sure about this...\n\t\t\t\trtoph200 = np.fromfile(f, floattype, Ngroups)[id2]*1e3/h\n\t\t\t\tveldispmean200 = np.fromfile(f, floattype, Ngroups)[id2] # This and the next are the velocity dispersions for the various radii\n\t\t\t\tveldispcrit200 = np.fromfile(f, floattype, Ngroups)[id2]\n\t\t\t\tveldisptoph200 = np.fromfile(f, floattype, Ngroups)[id2]\n\t\t\t\tlencontam = np.fromfile(f, 'i4', Ngroups)[id2] # Number of particles in the group that are not associated with a subhalo\n\t\t\t\tmasscontam = np.fromfile(f, floattype, Ngroups)[id2]*1e10/h # Summed mass of those above particles\n\t\t\t\tnhalo = np.fromfile(f, 'i4', Ngroups)[id2] # Number of subhaloes within the group\n\t\t\t\tfirsthalo = np.fromfile(f, 'i4', Ngroups)[id2] # Related to concatenation and \"offset\". Can probably ignore.\n\t\t\t\ttabdata = [Nparts, offset, mass, pos, mmean200, rmean200, mcrit200, rcrit200, mtoph200, rtoph200, veldispmean200, veldispcrit200, veldisptoph200, lencontam, masscontam, nhalo, firsthalo]\n\t\t\t\tbreak\n\t\t\tNaccum += Ngroups\n\t##\n\treturn [pos_g[:,0]*1e3*a/h, pos_g[:,1]*1e3*a/h, pos_g[:,2]*1e3*a/h, vel_g[:,0], vel_g[:,1], vel_g[:,2], id_g, mass_g*1e10/h, u*1e6, rho*10*h*h/(a**3), sfr], [pos_dm[:,0]*1e3*a/h, pos_dm[:,1]*1e3*a/h, pos_dm[:,2]*1e3*a/h, vel_dm[:,0], vel_dm[:,1], vel_dm[:,2], id_dm, mass_dm*1e10/h], [pos_s[:,0]*1e3*a/h, pos_s[:,1]*1e3*a/h, pos_s[:,2]*1e3*a/h, vel_s[:,0], vel_s[:,1], vel_s[:,2], id_s, mass_s*1e10/h], [pos_bh[:,0]*1e3*a/h, pos_bh[:,1]*1e3*a/h, pos_bh[:,2]*1e3*a/h, vel_bh[:,0], vel_bh[:,1], vel_bh[:,2], id_bh, mass_bh*1e10/h], tabdata", "def touch_result_hdf5_file(target_dir, poe, ds_names, n_realizations,\n n_periods):\n file_name = _HDF5_FILE_NAME_FMT % poe\n full_path = os.path.join(target_dir, file_name)\n\n ds_shape = (n_realizations, n_periods)\n\n with h5py.File(full_path, 'w') as h5_file:\n for name in ds_names:\n h5_file.create_dataset(name, dtype=numpy.float64, shape=ds_shape)\n\n return full_path", "def test_create(self):\n\n smtH5 = ModisSmoothH5(\n rawfile=self.testfile,\n targetdir=\"/tmp/data\",\n )\n\n self.assertFalse(smtH5.exists)\n smtH5.create()\n self.assertTrue(smtH5.exists)\n\n with h5py.File(smtH5.filename, \"r\") as hdf5_file:\n ds = hdf5_file.get(\"data\")\n self.assertTrue(ds)\n\n attrs = ds.attrs\n ysize = attrs[\"RasterYSize\"] * attrs[\"RasterXSize\"]\n self.assertEqual(ds.shape, (ysize, 4))\n self.assertEqual(attrs[\"temporalresolution\"], 8)\n\n dates = hdf5_file.get(\"dates\")\n self.assertTrue(dates)\n\n self.assertNotEqual(smtH5.last_smoothed, \"2002201\")\n self.assertEqual(smtH5.last_smoothed, \"2002209\")\n\n smtH5.filename.unlink()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a granule list file from a file path pattern matching the granules. If a granules has already been ingested with same md5sum signature, it is not included in this list. When deconstruct_nfs is True, the paths will shown as viewed on the nfs server and not as they are mounted on the nfs client where the script runs (default behaviour).
def create_granule_list(file_path_pattern, dataset_ingestion_history_manager, granule_list_file_path, deconstruct_nfs=False, date_from=None, date_to=None, forward_processing=False): file_list = get_file_list(file_path_pattern) logger.info("Create granule list file %s", granule_list_file_path) dir_path = os.path.dirname(granule_list_file_path) logger.info("Granule list file created in directory %s", dir_path) Path(dir_path).mkdir(parents=True, exist_ok=True) if forward_processing: if dataset_ingestion_history_manager: timestamp_from = dataset_ingestion_history_manager.get_latest_ingested_file_update() if dataset_ingestion_history_manager is None or timestamp_from is None: logger.info("No ingestion history available, forward processing ignored") timestamp_from = None timestamp_to = None else: timestamp_from = date_from.timestamp() if date_from else None timestamp_to = date_to.timestamp() if date_to else None if deconstruct_nfs: mount_points = nfs_mount_parse.get_nfs_mount_points() with open(granule_list_file_path, 'w') as file_handle: for file_path in file_list: if is_in_time_range(file_path, timestamp_from, timestamp_to): filename = os.path.basename(file_path) already_ingested = False if dataset_ingestion_history_manager: logger.info(f"is file {file_path} already ingested ?") already_ingested = dataset_ingestion_history_manager.has_valid_cache(file_path) if not already_ingested: logger.info(f"file {filename} not ingested yet, added to the list") if deconstruct_nfs: file_path = nfs_mount_parse.replace_mount_point_with_service_path(file_path, mount_points) file_handle.write(f'{file_path}\n') else: logger.debug(f"file {filename} already ingested with same md5sum") else: logger.debug(f"file {file_path} has not been updated in the targeted time range")
[ "def pdm_gfal_list_file(props, root, result):\n listing = props.copy()\n listing['name'] = os.path.split(root)[1]\n result[os.path.split(root)[0]] = [listing]", "def granules ( path, prod, syn_time, coll='006', nsyn=8 ):\n\n # Determine synoptic time range\n # -----------------------------\n dt = timedelta(seconds = 12. * 60. * 60. / nsyn) # half of time window\n t1, t2 = (syn_time-dt,syn_time+dt) # time window boundaries\n\n # Find MODIS granules in synoptic time range\n # ------------------------------------------\n dtGranule = timedelta(minutes=5)\n t = datetime(t1.year,t1.month,t1.day,t1.hour,0,0)\n Granules = []\n while t < t2:\n if t >= t1:\n doy = t.timetuple()[7]\n basen = \"%s/%s/%04d/%03d/%s_L2.A%04d%03d.%02d%02d.%s.*.hdf\"\\\n %(path,prod,t.year,doy,prod,t.year,doy,t.hour,t.minute,coll)\n try:\n filen = glob(basen)[0]\n Granules += [filen,]\n except:\n pass\n t += dtGranule\n\n if len(Granules) == 0:\n print \"WARNING: no %s collection %s granules found for time\" % (prod, coll), syn_time\n\n return Granules", "def test_make_file_list(self):\n path = \"/eos/uscms/store/user/cmsdas/test\"\n output = RecursiveFileList.make_file_list(path, RecursiveFileList.map_dir(path, path))\n assert isinstance(output, list)\n assert len(output) == 62", "def get_fileList(reanalysis, grid='Nh50km'):\n \n globStr = {\n 'ERAI': '/disks/arctic5_raid/abarrett/ERA_Interim/daily/PRECTOT/*/*/' + \\\n 'era_interim.PRECIP_STATS.??????.month.Nh50km.nc',\n 'CFSR': '/disks/arctic5_raid/abarrett/CFSR*/TOTPREC/????/??/' + \\\n 'CFSR*.*.PRECIP_STATS.??????.month.Nh50km.nc4',\n 'MERRA': '/disks/arctic5_raid/abarrett/MERRA/daily/PRECTOT/*/*/' + \\\n 'MERRA.prod.PRECIP_STATS.assim.tavg1_2d_flx_Nx.??????.month.Nh50km.nc4',\n 'MERRA2': '/disks/arctic5_raid/abarrett/MERRA2/daily/PRECTOT/*/*/' + \\\n 'MERRA2.tavg1_2d_flx_Nx.PRECIP_STATS.??????.month.Nh50km.nc4',\n 'JRA55': '/projects/arctic_scientist_data/Reanalysis/JRA55/daily/TOTPREC/*/*/' + \\\n 'JRA55.fcst_phy2m.PRECIP_STATS.??????.month.Nh50km.nc',\n 'ERA5': '/projects/arctic_scientist_data/Reanalysis/ERA5/daily/TOTPREC/????/??/' + \\\n 'era5.single_level.PRECIP_STATS.??????.month.Nh50km.nc4'\n }\n\n filelist = glob.glob(globStr[reanalysis])\n \n return sorted(filelist)", "def create_fs_rules():\n return [\n RootRule(PathGlobs),\n ]", "def parse_lammps_dumps(file_pattern):\n files = glob.glob(file_pattern)\n if len(files) > 1:\n pattern = r\"%s\" % file_pattern.replace(\"*\", \"([0-9]+)\")\n pattern = pattern.replace(\"\\\\\", \"\\\\\\\\\")\n files = sorted(files, key=lambda f: int(re.match(pattern, f).group(1)))\n\n for fname in files:\n with zopen(fname, \"rt\") as f:\n dump_cache = []\n for line in f:\n if line.startswith(\"ITEM: TIMESTEP\"):\n if len(dump_cache) > 0:\n yield LammpsDump.from_string(\"\".join(dump_cache))\n dump_cache = [line]\n else:\n dump_cache.append(line)\n yield LammpsDump.from_string(\"\".join(dump_cache))", "def get_files_list_and_lock_dirs(fast5s_dir, ignore_locks):\n ignore_locks_mess = (\n 'This set of reads is currently being processed by another ' +\n 'resquiggle command. Multiple resquiggle commands cannot be ' +\n 'run concurrently on a set of reads to avoid corrupting ' +\n 'read files. If you are sure this set of reads is not being ' +\n 'processed by another command (usually caused by previous ' +\n 'unexpected exit) set the --ignore-read-locks flag.')\n all_fast5s = []\n lock_fns = []\n try:\n # walk through directory structure searching for fast5 files\n for root, _, fns in os.walk(fast5s_dir):\n lock_fn = get_lock_fn(root)\n if not ignore_locks and os.path.exists(lock_fn):\n clear_tombo_locks(lock_fns)\n error_message_and_exit(ignore_locks_mess)\n lock_fns.append(lock_fn)\n # create empty file indicating this directory is locked\n open(lock_fn, 'w').close()\n\n for fn in fns:\n if not fn.endswith('.fast5'): continue\n all_fast5s.append(os.path.join(root, fn))\n except:\n clear_tombo_locks(lock_fns)\n error_message_and_exit(\n 'Unexpected error during file enumeration. Check that you have ' +\n 'write permission within the specified [fast5_basedir].')\n\n return all_fast5s, lock_fns", "def parse(self, file):\n\n path = self.folder+'/'+file\n new_patterns = []\n logging.info('[>] Parsing all patterns in ' + file)\n \n raw_text = open(path,'r')\n lines = raw_text.readlines()\n patterns = getChunks(lines,4)\n for pattern in patterns:\n vuln = pattern[0]\n entry_points = pattern[1].split(',')\n san_functions = pattern[2].split(',')\n sens_sinks = pattern[3].split(',')\n\n\n new_pattern = Pattern(vuln,entry_points,san_functions,sens_sinks)\n new_patterns.append(new_pattern)\n\n logging.debug(new_pattern)\n\n raw_text.close()\n self.known_patterns += new_patterns", "def create_lauer(f_lauer_file):\n list2 = [[\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"SupplierA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"SupplierB\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"SupplierC\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_B\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"SupplierD\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_B\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"SupplierA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_C\", \"XXXX\", \"API_C\", \"Drug_C freetext\", \"unit\", \"Supp\", \"SupplierA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_C\", \"XXXX\", \"API_C\", \"Drug_C freetext\", \"unit\", \"Supp\", \"SupplierA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_C\", \"XXXX\", \"API_C\", \"Drug_C freetext\", \"unit\", \"Supp\", \"SupplierA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_D\", \"API_D\", \"XXXX\", \"API_D\", \"Drug_D freetext\", \"unit\", \"Supp\", \"SupplierD\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_E\", \"API_E\", \"XXXX\", \"API_E\", \"Drug_E freetext\", \"unit\", \"Supp\", \"SupplierE\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_F\", \"API_F\", \"XXXX\", \"API_F\", \"Drug_F freetext\", \"unit\", \"Supp\", \"SupplierC\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderB\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_A\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderC\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_B\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_B\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderB\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_B\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderD\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderB\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_C\", \"API_A\", \"XXXX\", \"API_A\", \"Drug_A freetext\", \"unit\", \"Supp\", \"ParalleletraderC\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_D\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderA\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_D\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderB\", 12345678, \"NaN\", 123, 123, 123, 123],\n [\"Drug_D\", \"API_B\", \"XXXX\", \"API_B\", \"Drug_B freetext\", \"unit\", \"Supp\", \"ParalleletraderD\", 12345678, \"NaN\", 123, 123, 123, 123],\n ]\n sampling = list2\n\n df_l = pd.DataFrame(sampling, columns=[\"Drug\", \"API\", 'Product', \"API_l\", \"Product-Int\", \"Einh.\", \"Supplier_short\", \"Supplier\", \"PZN\", \"V\", \"Gewicht Pack. [g]\", \"Höhe Pack. [mm]\", \"Breite Pack. [mm]\", \"Tiefe Pack. [mm]\"])\n apu = []\n for x in range(len(df_l)):\n apu.append(random.uniform(2, 1500))\n df_l[\"APU / HAP\"]=apu\n df_l[\"PpU (APU exkl. NBR)\"]=[x-6 for x in apu]\n df_l[\"PpU-bas. Taxe-EK\"]=[x-random.randint(2,10) for x in apu]\n df_l[\"PpU-bas. Taxe-VK\"]=[x-random.randint(-3,10) for x in apu]\n df_l[\"Amount\"]=[random.randint(1,3000) for x in range(len(df_l))]\n df_l.to_csv(f_lauer_file, index = False)\n return df_l", "def parse_groups_file(path):\n groups = []\n\n def read_line(line):\n group, count = line.split(' +++ ', 1)\n parsed_group = list(ast.literal_eval(group))\n groups.append(parsed_group)\n common.read_file_lines(path, read_line)\n\n return groups", "def processFiles(pathList):\n lettersRegEx = re.compile('^\\D+', re.IGNORECASE)\n files = []\n masterList = []\n \n for i in range(len(pathList)):\n data = map(lambda l: l.strip().split('\\t'), open(pathList[i],'rU'))\n if data[0][0].startswith('#'): data.pop(0) # get rid of headers\n tmpDict = {}\n for i in range(len(data)):\n tmpDict[data[i][0]] = combinePos1and2(data[i][1:])\n \n \n # Add new hitDict to the master list\n masterList.append(tmpDict)\n return masterList", "def import_landmark_files(pattern, max_landmarks=None, shuffle=False,\n as_generator=False, verbose=False):\n return _import_glob_lazy_list(pattern, image_landmark_types,\n max_assets=max_landmarks, shuffle=shuffle,\n as_generator=as_generator, verbose=verbose)", "def test_make_dir_list(self):\n path = \"/eos/uscms/store/user/cmsdas/test\"\n output = RecursiveFileList.make_dir_list(path, RecursiveFileList.map_dir(path, path))\n assert isinstance(output, list)\n assert len(output) == 6", "def _get_glob_patterns(self, username, realm):\n\n for group in [a for a in self.config.sections]:\n groups = self._get_groups(username)\n if group in groups: # User is in a group listed, pull up realm\n for group_realm, glob_patterns in self.config[group].iteritems():\n if group_realm == realm:\n #glob_patterns = to_list(glob_patterns) # Split comma-delimited list \n\n if isinstance(glob_patterns, basestring):\n return [glob_patterns]\n else:\n return glob_patterns\n\n return [] # Nada found", "def test_get_file_list(self):\n path = \"/eos/uscms/store/user/cmsdas/test\"\n output = RecursiveFileList.get_file_list(path)\n assert isinstance(output, list)\n assert len(output) == 62", "def group_tree(root, config):\n files = []\n dirs = []\n if root == \"/dev/null\":\n return []\n for node in os.listdir(root):\n node_path = os.path.join(root, node)\n if node == \"mdf.json\":\n with open(node_path) as f:\n try:\n new_config = json.load(f)\n logger.debug(\"Config updating: \\n{}\".format(new_config))\n except Exception as e:\n logger.warning(\"Error reading config file '{}': {}\".format(node_path, str(e)))\n else:\n config = mdf_toolbox.dict_merge(new_config, config)\n elif os.path.isfile(node_path):\n files.append(node_path)\n elif os.path.isdir(node_path):\n dirs.append(node_path)\n else:\n logger.debug(\"Ignoring non-file, non-dir node '{}'\".format(node_path))\n\n # Group the files\n # list \"groups\" is list of dict, each dict contains actual file list + extractor info/config\n groups = []\n # Group by dir overrides other grouping\n if config.get(\"group_by_dir\"):\n groups.append({\"files\": files,\n \"extractors\": [],\n \"params\": {}})\n else:\n for format_rules in config.get(\"known_formats\", {}).values():\n format_name_list = format_rules[\"files\"]\n format_groups = {}\n # Check each file for rule matching\n # Match to appropriate group (with same pre/post pattern)\n # eg a_[match]_b groups with a_[other match]_b but not c_[other match]_d\n for f in files:\n fname = os.path.basename(f).lower().strip()\n for format_name in format_name_list:\n if format_name in fname:\n pre_post_pattern = fname.replace(format_name, \"\")\n if not format_groups.get(pre_post_pattern):\n format_groups[pre_post_pattern] = []\n format_groups[pre_post_pattern].append(f)\n break\n # Remove grouped files from the file list and add groups to the group list\n for g in format_groups.values():\n for f in g:\n files.remove(f)\n group_info = {\n \"files\": g,\n \"extractors\": format_rules[\"extractors\"],\n \"params\": format_rules[\"params\"]\n }\n groups.append(group_info)\n\n # NOTE: Keep this grouping last!\n # Default grouping: Each file is a group\n groups.extend([{\"files\": [f],\n \"extractors\": [],\n \"params\": {}}\n for f in files])\n\n [groups.extend(group_tree(d, config)) for d in dirs]\n\n return groups", "def add_files(path, file_list, mana, step_len, snr):\n for the_file in file_list:\n mana.read_data(path + the_file, step_len=step_len, snr=snr, norm=True)", "def H5List(filename, node='/', list_all=False):\n\n def TraverseNode(obj, path, list_all):\n \"\"\"Traverses a node in HDF5 and builds the catalog.\"\"\"\n catalog = []\n if isinstance(obj, h5py.Group):\n if list_all:\n catalog.append(path)\n for child in obj.iterkeys():\n child_path = os.path.join(path, child)\n catalog += TraverseNode(obj[child], child_path, list_all)\n elif isinstance(obj, h5py.Dataset):\n catalog.append(path)\n return catalog\n\n with contextlib.closing(h5py.File(filename, 'r')) as f:\n if node not in f:\n raise H5IndexError(node, [], 'Cannot index %s' % node)\n else:\n catalog = TraverseNode(f[node], node, list_all)\n return catalog", "def read_montage_table():\n files_dict = {'u':[],'g':[],'r':[],'i':[],'z':[]}\n files = sp.check_output(\"awk '{print $NF}' *.imglist | grep _st\",shell=True).decode(\"UTF-8\").strip().split('\\n')\n for i in files:\n _dict = parse_path(i)\n files_dict[_dict[\"filter\"]].append(_dict['file'])\n\n\n return files_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the configuration and launch the ingestion for the given collection row
def collection_row_callback(collection, collection_config_template, granule_file_list_root_path, dataset_configuration_root_path, history_root_path, deconstruct_nfs=False, **pods_run_kwargs ): dataset_id = collection['id'] netcdf_variable = collection['variable'] netcdf_file_pattern = collection['path'] if 'forward_processing' in collection.keys(): forward_processing = collection['forward_processing'] else: forward_processing = False granule_list_file_path = os.path.join(granule_file_list_root_path, f'{dataset_id}-granules.lst') dataset_ingestion_history_manager = sdap_ingest_manager.history_manager\ .DatasetIngestionHistoryFile(history_root_path, dataset_id, lambda x: str(os.path.getmtime(x))) time_range = {} for time_boundary in {"from", "to"}: if time_boundary in collection.keys() and collection[time_boundary]: # add prefix "from" because is a reserved name which can not be used as function argument time_range[f'date_{time_boundary}'] = datetime.fromisoformat(collection[time_boundary]) logger.info(f"time criteria {time_boundary} is {time_range[f'date_{time_boundary}']}") create_granule_list(netcdf_file_pattern, dataset_ingestion_history_manager, granule_list_file_path, deconstruct_nfs=deconstruct_nfs, **time_range, forward_processing=forward_processing) dataset_configuration_file_path = os.path.join(dataset_configuration_root_path, f'{dataset_id}-config.yml') create_dataset_config(dataset_id, netcdf_variable, collection_config_template, dataset_configuration_file_path) cwd = os.getcwd() # group must match the following regex (([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])? # and when suffixed with uid must not exceed 64 characters prog = re.compile(GROUP_PATTERN) group = prog.match(dataset_id[:19]) if group is None: group_name = GROUP_DEFAULT_NAME else: group_name = group.group(0) pods_run_kwargs['file_list_path'] = granule_list_file_path pods_run_kwargs['job_config'] = dataset_configuration_file_path pods_run_kwargs['job_group'] = group_name pods_run_kwargs['ningester_version'] = '1.1.0' pods_run_kwargs['delete_successful'] = True pods_run_kwargs['hist_manager'] = dataset_ingestion_history_manager def param_to_str_arg(k, v): k_with_dash = k.replace('_', '-') str_k = f'--{k_with_dash}' if type(v) == bool: if v: return [str_k] else: return [] elif isinstance(v, list): return [str_k, ','.join(v)] else: return [str_k, str(v)] pod_launch_options = [param_to_str_arg(k, v) for (k,v) in pods_run_kwargs.items()] flat_pod_launch_options = [item for option in pod_launch_options for item in option] pod_launch_cmd = ['run_granules'] + flat_pod_launch_options logger.info("launch pod with command:\n%s", " ".join(pod_launch_cmd)) sdap_ingest_manager.granule_ingester.create_and_run_jobs(**pods_run_kwargs)
[ "def test_create_ingestion_configuration(self):\n\n\n #------------------------------------------------------------------------\n # Make assertions\n #----------------------------------------------------------------------\n # checking that an ingestion_configuration_id gets successfully generated\n self.assertIsNotNone(self.ingestion_configuration_id, \"Could not generate ingestion_configuration_id.\")\n\n # read the ingestion configuration object and see if it contains what it is supposed to....\n ingestion_configuration = self.ingestion_cli.read_ingestion_configuration(self.ingestion_configuration_id)\n\n self.assertEquals(ingestion_configuration.number_of_workers, self.number_of_workers)\n self.assertEquals(ingestion_configuration.hdf_storage.relative_path, self.hdf_storage.relative_path)\n self.assertEquals(ingestion_configuration.couch_storage.datastore_name, self.couch_storage.datastore_name)", "def test_dataset_config_dict_in_ingestion_worker(self):\n\n\n #------------------------------------------------------------------------\n # Get the ingestion process instances:\n #------------------------------------------------------------------------\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n proc_1 = self.container.proc_manager.procs[transforms[0].process_id]\n log.info(\"PROCESS 1: %s\" % str(proc_1))\n\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n ar_1 = gevent.event.AsyncResult()\n def got_event_1(msg, headers):\n ar_1.set(msg)\n\n ar_2 = gevent.event.AsyncResult()\n def got_event_2(msg, headers):\n ar_2.set(msg)\n\n proc_1.dataset_configs_event_test_hook = got_event_1\n\n proc_2.dataset_configs_event_test_hook = got_event_2\n\n #------------------------------------------------------------------------\n # Create the dataset config\n #------------------------------------------------------------------------\n\n dataset_config_id = self.ingestion_cli.create_dataset_configuration(\n dataset_id = self.input_dataset_id,\n archive_data = True,\n archive_metadata = False,\n ingestion_configuration_id = self.ingestion_configuration_id\n )\n #--------------------------------------------------------------------------------------------------------\n # Do assertions and checks!\n #--------------------------------------------------------------------------------------------------------\n\n ar_1.get(timeout=5)\n ar_2.get(timeout=5)\n\n\n self.assertIn(self.input_stream_id, proc_1.dataset_configs)\n self.assertIn(self.input_stream_id, proc_2.dataset_configs)", "def test_dataset_config_implementation_for_science_data(self):\n\n\n #--------------------------------------------------------------------------------------------------------\n # Get the ingestion process instances:\n #--------------------------------------------------------------------------------------------------------\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n proc_1 = self.container.proc_manager.procs[transforms[0].process_id]\n log.info(\"PROCESS 1: %s\" % str(proc_1))\n\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n #--------------------------------------------------------------------------------------------------------\n # Create a dataset config\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config_id = self.ingestion_cli.create_dataset_configuration(\n dataset_id = self.input_dataset_id,\n archive_data = True,\n archive_metadata = True,\n ingestion_configuration_id = self.ingestion_configuration_id\n )\n\n #--------------------------------------------------------------------------------------------------------\n # Set up the gevent event AsyncResult queue\n #--------------------------------------------------------------------------------------------------------\n\n queue=gevent.queue.Queue()\n\n def call_to_persist(packet):\n queue.put(packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Grab the ingestion worker processes\n #--------------------------------------------------------------------------------------------------------\n\n # when persist_immutable() is called, then call_to_persist() is called instead....\n proc_1.persist_immutable = call_to_persist\n proc_2.persist_immutable = call_to_persist\n\n #--------------------------------------------------------------------------------------------------------\n # Create a packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the dataset config\n #--------------------------------------------------------------------------------------------------------\n\n # test that the ingestion worker tries to persist the ctd_packet in accordance to the dataset config\n self.assertEquals(queue.get(timeout=10).stream_resource_id, ctd_packet.stream_resource_id)\n\n #--------------------------------------------------------------------------------------------------------\n # Now change the dataset config for the same stream\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n\n dataset_config.configuration.archive_metadata = False\n\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the new dataset config...\n # This time, the packet should not be persisted since archive_metadata is False\n #--------------------------------------------------------------------------------------------------------\n\n with self.assertRaises(gevent.queue.Empty):\n queue.get(timeout=0.25)\n\n #--------------------------------------------------------------------------------------------------------\n # Now just do this thing one more time, with an updated dataset config...\n #\n # Change the dataset config for the same stream for the third time, updating archive_metadata back to True\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n dataset_config.configuration.archive_metadata = True\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the new dataset config\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(queue.get(timeout=10).stream_resource_id, ctd_packet.stream_resource_id)\n\n\n #--------------------------------------------------------------------------------------------------------\n #--------------------------------------------------------------------------------------------------------\n # Check that the dataset id is passed properly in the GranuleIngestedEvent\n #--------------------------------------------------------------------------------------------------------\n #--------------------------------------------------------------------------------------------------------\n\n ar = gevent.event.AsyncResult()\n def granule_ingested_hook(msg, headers):\n ar.set(msg)\n\n\n #Start the event subscriber - really - what a mess!\n event_subscriber = EventSubscriber(\n event_type=\"GranuleIngestedEvent\",\n origin=self.input_dataset_id,\n callback=granule_ingested_hook\n )\n\n self.gl.append(spawn(event_subscriber.listen))\n event_subscriber._ready_event.wait(timeout=5)\n self.event_subscribers.append(event_subscriber)\n\n\n\n # Set up the gevent Result queue for the hook in the workers\n\n queue=gevent.queue.Queue()\n\n def config_event_hook(packet, headers):\n queue.put(packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Override the worker processes methods with the gevent event hooks\n #--------------------------------------------------------------------------------------------------------\n\n proc_1.dataset_configs_event_test_hook = config_event_hook\n proc_2.dataset_configs_event_test_hook = config_event_hook\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n dataset_config.configuration.archive_metadata = True\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the dataset id got from the dataset_config event hook is what it should be in both procs\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(queue.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertEquals(queue.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertTrue(queue.empty())\n\n\n event_msg = ar.get(timeout=10)\n self.assertEquals(event_msg.origin, self.input_dataset_id)\n\n data_stream_id = self.ctd_stream_def.data_stream_id\n element_count_id = self.ctd_stream_def.identifiables[data_stream_id].element_count_id\n record_count = ctd_packet.identifiables[element_count_id].value\n\n self.assertEquals(event_msg.ingest_attributes['number_of_records'], record_count)", "def ingest_import(user_import_id):\n user_import = UserImport.objects.get(pk=user_import_id)\n\n newrelic.agent.add_custom_parameter(\n 'organization_id', user_import.organization.pk)\n newrelic.agent.add_custom_parameter(\n 'user_import_id', user_import_id)\n\n user_import.status = 'ingesting'\n user_import.save()\n\n try:\n filename = load_file(user_import.file)\n\n with open(filename, 'rb') as file_obj:\n reader = unicodecsv.DictReader(file_obj, encoding='utf-8-sig')\n if reader.fieldnames != [u'first_name',\n u'last_name',\n u'email',\n u'address']:\n raise Exception('Invalid fields.')\n\n count = 0\n new_records = []\n for row in reader:\n count += 1\n new_records.append(ImportRecord(\n user_import=user_import,\n first_name=row.get('first_name', ''),\n last_name=row.get('last_name', ''),\n email=row.get('email', ''),\n address=row.get('address', '')))\n if not count % 5000:\n ImportRecord.objects.bulk_create(new_records)\n # Empty the list\n new_records = []\n\n # Import any records not imported in the \"by-5000\" iterator\n ImportRecord.objects.bulk_create(new_records)\n\n user_import.ingested = len(new_records)\n user_import.save()\n trigger_import.delay(user_import.pk)\n except Exception as error:\n user_import.status = 'failed'\n user_import.note = unicode(error)\n user_import.save()\n alert_failed_import.delay(user_import.pk)\n raise", "def generate_and_insert_data( inputs ):\n global gpudb_ingestor\n\n batch_size, num_batches = inputs\n\n my_id = int(random.random() * 100)\n\n null_percentage = 0.1\n alphanum = (string.ascii_letters + string.digits)\n\n # Nested loop\n # Outer loop controls how many batches of records are added to the ingestor\n for i in range(0, num_batches):\n print (\"thread {_id:>5} outer loop: {i:>5}\".format( _id = my_id, i = i ))\n records = []\n # Inner loop generated records for this batch\n for j in range(0, batch_size):\n _i_plus_j = (i + j)\n record = collections.OrderedDict()\n record[ \"i1\" ] = i * j\n record[ \"i2\" ] = random.randint( -_i_plus_j, _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"i8\" ] = random.randint( -128, 127 ) if (random.random() >= null_percentage) else None\n record[ \"i16\" ] = random.randint( -32768, 32767 ) if (random.random() >= null_percentage) else None\n record[ \"d1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"f1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"l1\" ] = (random.randint( 0,_i_plus_j ) * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"timestamp\" ] = random.randint( -30610239758979, 29379542399999 ) if (random.random() >= null_percentage) else None\n record[ \"s1\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 2, 200 ) )] )\n record[ \"date\" ] = None if (random.random() < null_percentage) \\\n else strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" )\n record[ \"datetime\" ] = None if (random.random() < null_percentage) \\\n else ( strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" ) \\\n + \" \"\n + ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) )\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"decimal\" ] = None if (random.random() < null_percentage) \\\n else ( str( random.randint( -922337203685477, 922337203685477 ) )\n + \".\" + str( random.randint( 0, 9999 ) ) )\n record[ \"ipv4\" ] = None if (random.random() < null_percentage) \\\n else '.'.join( [ str( random.randint( 0, 255 ) ) for n in range(0, 4)] )\n record[ \"time\" ] = None if (random.random() < null_percentage) \\\n else ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) \\\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"c1\" ] = None if (random.random() < null_percentage) \\\n else random.choice( alphanum )\n record[ \"c2\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 2 ) )] )\n record[ \"c4\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 4 ) )] )\n record[ \"c8\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 8 ) )] )\n record[ \"c16\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 16 ) )] )\n record[ \"c32\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 32 ) )] )\n record[ \"c64\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 64 ) )] )\n record[ \"c128\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 128 ) )] )\n record[ \"c256\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 256 ) )] )\n\n # Add the record to the list of records\n records.append( record )\n # end for loop\n\n # Add the records to the ingestor\n gpudb_ingestor.insert_records( records )\n # end generating data\n\n\n # Need to flush here since the gpudb_ingestor of the parent\n # thread won't get this child thread's state\n gpudb_ingestor.flush()", "def load_file(client, config, data_filename):\n\n db=client[config.db]\n col=db[config.collection]\n\n def post_bulk(posts):\n \"\"\"ship a bulk to MongoDB for insert\"\"\"\n\n # Bulk insert\n ###############################\n result=col.insert_many(posts) #\n ###############################\n\n\n mongo_posts=[]\n sys.stderr.write( \"Starting loading data from %s\\n\" % data_filename )\n\n # opening data file\n with gzip.open(data_filename) as f:\n\n item_nb=0 # item number in the file\n batch_nb=0 # number of the batch within the file\n\n # bulk variables reset\n bulk_action={}\n bulk_action[\"create\"]={}\n elastic_post=\"\"\n bulk_buffer_size=0 # item number in the batch\n time1=datetime.datetime.now() # recording time before reading/decoding\n\n # Reading the file line by line\n for line in f:\n \n # decode the CSV line - If we don't manage to read the line we just ignore it...\n try:\n item_nb+=1\n\n # reading/decoding line\n ######################################\n input=config.decode_input_line(line) #\n ######################################\n bulk_buffer_size+=1\n \n # dataFile field is not really useful for analytics \n # but it allows keeping track (and potentially cleaning) what was loaded\n input[\"dataFile\"]=os.path.basename(data_filename)\n # itemNb allow to find back which line of the file corresponds to which document in the database\n input[\"itemNb\"]=item_nb-1\n \n # convert into JSON\n input_json=json.dumps(input)\n \n # Add in the buffer: \n ###########################\n mongo_posts.append(input) #\n ###########################\n\n except Exception, e: # If we don't manage to read the line we just ignore it...\n sys.stderr.write( \"WARNING: unable to decode line: %s \\n\" %line)\n sys.stderr.write( str(e) )\n \n # When the buffer reaches the max size (config) then we load\n if bulk_buffer_size >= config.bulk_buffer_size_max:\n\n batch_nb+=1\n sys.stderr.write( \" Loading the batch of data #%d (%d items)\\n\" % (batch_nb,bulk_buffer_size))\n time2=datetime.datetime.now() # recording time before inserting\n read_time=int((time2-time1).total_seconds()*1000) # reading/decoding time\n\n ########################\n post_bulk(mongo_posts) #\n ########################\n\n time3=datetime.datetime.now()# recording time after inserting\n post_time=int((time3-time2).total_seconds()*1000) # insert time\n\n # print statistics in stdout\n #################################################################\n print \"LOAD: %d,%d,%d\" % (bulk_buffer_size,read_time,post_time) #\n #################################################################\n\n # bulk variable reset\n mongo_posts=[]\n bulk_buffer_size=0\n time1=datetime.datetime.now()\n\n\n # If there are remaining unloaded items in the buffer we load them\n if bulk_buffer_size>0:\n\n batch_nb+=1\n sys.stderr.write( \" Loading the batch of data #%d (%d items)\\n\" % (batch_nb,bulk_buffer_size))\n time2=datetime.datetime.now() # recording time before inserting\n read_time=int((time2-time1).total_seconds()*1000) # reading/decoding time\n \n ########################\n post_bulk(mongo_posts) #\n ########################\n \n time3=datetime.datetime.now()# recording time after inserting\n post_time=int((time3-time2).total_seconds()*1000) # insert time\n \n # print statistics in stdout\n #################################################################\n print \"LOAD: %d,%d,%d\" % (bulk_buffer_size,read_time,post_time) #\n #################################################################\n \n sys.stderr.write( \"Load from %s finished sucessfully \" % data_filename)\n sys.stderr.write( \" (%d items in %d batches)\\n\" % (item_nb,batch_nb))\n\n return", "def setUp(self):\n self.spark, self.log, *_ = start_spark(\n app_name='my_etl_job')\n\n self.config = json.loads(\"\"\"{\n \"extract\" : {\"uri\": \"tests/test_data/udf_test_data/recipes_negative.json\",\n \"clean\": \"True\",\n \"temptable\": \"recipes\"},\n \"transform\": {\"sql_path\": \"configs/transformation.sql\",\n \"udfs_required\":[\"tominutes\"]},\n \"load\" : {\"database\": \"hellofresh\",\n \"tablename\": \"recipes\",\n \"load_path\": \"user/hive/warehouse/hellofresh.db/recipes\",\n \"partition_cols\": {\"difficulty\": \"string\"}\n },\n \"impala\" : {\"impala_host\": \"localhost\"}\n }\n\n \"\"\")", "def _read_data_create_collection(data):\n src_csv = DATA_PATH / data\n src_json = str(DATA_PATH / data).replace(\".csv\", '.json')\n logger.info(f\"Reading csv: {data}:\")\n coll_csv = pd.read_csv(src_csv, encoding='ISO-8859-1')\n len_csv = int(coll_csv.iloc[:, 0].count())\n coll_csv.to_json(src_json,\n orient='records')\n logger.info(f\"Opening json: {src_json}:\")\n coll_json = open(src_json).read()\n coll_json = json.loads(coll_json)\n coll = db[data[:-4]]\n source = coll_json\n start_count = coll.count_documents({})\n logger.info(f\"Inserting data into : {coll}:\")\n result = coll.insert_many(source)\n logger.info(f\"Process time was {time.thread_time()}:\")\n gc.collect()\n record_count = coll.count_documents({})\n result_tuple = (len_csv, start_count, record_count, time.thread_time())\n PROCESS_RESULT.append(result_tuple)", "def load_userRuns():\n lines = [line.rstrip('\\n') for line in open(\"seed_data/user_runs.csv\")] \n for line in lines: \n column_data = line.split(\",\")\n line = UserRun(run_id=column_data[0], user1=column_data[1], active_status=column_data[2], lat_coordinates=column_data[3], lon_coordinates=column_data[4], scheduled=column_data[5])\n db.session.add(line)\n db.session.commit()", "def test_ingestion_workers_creation(self):\n\n\n #------------------------------------------------------------------------\n # Check that the two ingestion workers are running\n #------------------------------------------------------------------------\n\n print (\"self.rr_cli.find_associations(ingestion_configuration_id, PRED.hasTransform) : %s\" % self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform))\n print (\"type : %s\" % type(self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)))\n\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n for transform in transforms:\n self.assertTrue(self.container.proc_manager.procs[transform.process_id])", "def run(self):\n path = \"{date}-kxp\".format(date=self.yesterday.strftime(\"%y%m%d\"))\n for index in os.listdir(path):\n # doing several enrichment things before indexing the data\n for f in os.listdir(path+\"/\"+index):\n cmd = \"esbulk -z -verbose -server {host} -w {workers} -index slub-{index} -type schemaorg -id identifier {fd}\".format(\n **self.config, index=index, fd=path+\"/\"+index+\"/\"+f)\n shellout(cmd)\n newconfig = None\n with open('lodkxp_config.json') as data_file:\n newconfig = json.load(data_file)\n newconfig[\"lastupdate\"] = str(self.yesterday.strftime(\"%y%m%d\"))\n with open('lodkxp_config.json', 'w') as data_file:\n json.dump(newconfig, data_file)", "def populate(run_path):\n call([\"BanzaiDB\", \"populate\", \"mapping\", run_path])", "def test_create_dataset_config_and_event_subscriber(self):\n\n\n #------------------------------------------------------------------------\n # Get the ingestion process instances:\n #------------------------------------------------------------------------\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n proc_1 = self.container.proc_manager.procs[transforms[0].process_id]\n log.info(\"PROCESS 1: %s\" % str(proc_1))\n\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n #------------------------------------------------------------------------\n # Set up the gevent events\n #------------------------------------------------------------------------\n\n # Over ride the call back for the event subscriber\n ar_1 = gevent.event.AsyncResult()\n def message_received_1(message, headers):\n ar_1.set(message)\n\n proc_1.event_subscriber._callback = message_received_1\n\n # Over ride the call back for the event subscriber\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n ar_2 = gevent.event.AsyncResult()\n def message_received_2(message, headers):\n ar_2.set(message)\n\n proc_2.event_subscriber._callback = message_received_2\n\n\n # Create a dataset ingestion config which sends an event\n\n dataset_config_id = self.ingestion_cli.create_dataset_configuration(\n dataset_id = self.input_dataset_id,\n archive_data = True,\n archive_metadata = False,\n ingestion_configuration_id = self.ingestion_configuration_id\n )\n\n\n dataset_config = self.rr_cli.read(dataset_config_id)\n\n #--------------------------------------------------------------------------------------------------------\n # Do assertions!\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(dataset_config.configuration.stream_id, self.input_stream_id)\n self.assertEquals(dataset_config.configuration.archive_data, True)\n self.assertEquals(dataset_config.configuration.archive_metadata, False)\n\n\n self.assertEqual(ar_1.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertEqual(ar_2.get(timeout=10).configuration.dataset_id,self.input_dataset_id)", "async def async_step_import(self, user_input): # pylint: disable=unused-argument\n\t\tif self._async_current_entries():\n\t\t\treturn self.async_abort(reason=\"single_instance_allowed\")\n\n\t\treturn self.async_create_entry(title=\"configuration.yaml\", data={})", "def run(self):\n self.call(ClienteTableSeeder)\n self.call(ProductoTableSeeder)\n self.call(PedidoTableSeeder)", "def _load_configs(self, conn):\n\t\tfor (x,y), chip in self.chips.iteritems():\n\t\t\t# Select the chip to load the data into\n\t\t\tconn.selected_cpu_coords = (x,y,0)\n\t\t\t\n\t\t\t# Generate the chip's routing table (only loaded by a single core)\n\t\t\tnum_router_entries, router_entries = \\\n\t\t\t\tspinn_route.table_gen.spin1_table_gen(chip.router)\n\t\t\t\n\t\t\t# Ensure we don't have too many routing entries\n\t\t\tif num_router_entries > spinnaker_app.MAX_ROUTES_PER_CORE:\n\t\t\t\traise Exception(\"Too many router entries on a single core: %d (max %d)\"%(\n\t\t\t\t\tnum_router_entries, spinnaker_app.MAX_ROUTES_PER_CORE\n\t\t\t\t))\n\t\t\t\n\t\t\tfor index, core in enumerate(chip.cores.itervalues()):\n\t\t\t\t# Arbitarily choose one core to load the routing tables\n\t\t\t\tloads_router_enties = index == 0\n\t\t\t\t\n\t\t\t\t# Ensure we don't have too many sources/sinks\n\t\t\t\tif len(self.core_generators[core]) > spinnaker_app.MAX_SOURCES_PER_CORE:\n\t\t\t\t\traise Exception(\"Too many sources on a single core: %d (max %d)\"%(\n\t\t\t\t\t\tlen(self.core_generators[core]), spinnaker_app.MAX_SOURCES_PER_CORE\n\t\t\t\t\t))\n\t\t\t\tif len(self.core_consumers[core]) > spinnaker_app.MAX_SINKS_PER_CORE:\n\t\t\t\t\traise Exception(\"Too many sinks on a single core: %d (max %d)\"%(\n\t\t\t\t\t\tlen(self.core_consumers[core]), spinnaker_app.MAX_SINKS_PER_CORE\n\t\t\t\t\t))\n\t\t\t\t\n\t\t\t\t# The root block of the configuration \n\t\t\t\tconfig_root = spinnaker_app.config_root_t.pack(*spinnaker_app.config_root_tuple(\n\t\t\t\t\tcompletion_state = spinnaker_app.COMPLETION_STATE_RUNNING,\n\t\t\t\t\tseed = random.getrandbits(32),\n\t\t\t\t\ttick_microseconds = self._tick_period,\n\t\t\t\t\twarmup_duration = int(self._warmup/self.tick_period),\n\t\t\t\t\tduration = int(self._duration/self.tick_period),\n\t\t\t\t\trtr_drop_e = self._router_timeout_e,\n\t\t\t\t\trtr_drop_m = self._router_timeout_m,\n\t\t\t\t\tresult_dropped_packets = 0,\n\t\t\t\t\tresult_forwarded_packets = 0,\n\t\t\t\t\tnum_sources = len(self.core_generators[core]),\n\t\t\t\t\tnum_sinks = len(self.core_consumers[core]),\n\t\t\t\t\tnum_router_entries = num_router_entries if loads_router_enties else 0,\n\t\t\t\t))\n\t\t\t\t\n\t\t\t\t# Define the packet sources\n\t\t\t\tconfig_sources = \"\"\n\t\t\t\tfor route, gen in self.core_generators[core].iteritems():\n\t\t\t\t\t# Encode packet generator data\n\t\t\t\t\tif type(gen) is BernoulliGeneration:\n\t\t\t\t\t\ttemporal_dist = spinnaker_app.TEMPORAL_DIST_BERNOULLI\n\t\t\t\t\t\ttemporal_dist_data = spinnaker_app.bernoulli_packet_prob_t.pack(\n\t\t\t\t\t\t\t*spinnaker_app.bernoulli_packet_prob_tuple(\n\t\t\t\t\t\t\t\tbernoulli_packet_prob = gen.probability\n\t\t\t\t\t\t\t)\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"Unknown packet generator %s.\"%repr(gen))\n\t\t\t\t\t\n\t\t\t\t\t# Encode this source\n\t\t\t\t\tconfig_sources += spinnaker_app.config_source_t.pack(*spinnaker_app.config_source_tuple(\n\t\t\t\t\t\trouting_key = route.key,\n\t\t\t\t\t\ttemporal_dist = temporal_dist,\n\t\t\t\t\t\ttemporal_dist_data = temporal_dist_data,\n\t\t\t\t\t\tresult_packets_generated = 0,\n\t\t\t\t\t\tresult_packets_sent = 0,\n\t\t\t\t\t))\n\t\t\t\t\n\t\t\t\t# Define the packet sinks (which must be supplied in ascending order of\n\t\t\t\t# routing key)\n\t\t\t\tconfig_sinks = \"\"\n\t\t\t\tfor route in sorted(self.core_consumers[core]):\n\t\t\t\t\tcon = self.core_consumers[core][route]\n\t\t\t\t\t\n\t\t\t\t\t# Encode packet generator data\n\t\t\t\t\tif type(con) is not InstantConsumption:\n\t\t\t\t\t\traise Exception(\"Unknown packet consumer %s.\"%repr(con))\n\t\t\t\t\t\n\t\t\t\t\t# Encode this sink\n\t\t\t\t\tconfig_sinks += spinnaker_app.config_sink_t.pack(*spinnaker_app.config_sink_tuple(\n\t\t\t\t\t\trouting_key = route.key,\n\t\t\t\t\t\tresult_packets_arrived = 0,\n\t\t\t\t\t))\n\t\t\t\t\n\t\t\t\t# Put all the configuration blocks together\n\t\t\t\tconfig = config_root + config_sources + config_sinks\n\t\t\t\t\n\t\t\t\tif loads_router_enties:\n\t\t\t\t\tconfig += router_entries\n\t\t\t\t\n\t\t\t\t# Load this core's configuration\n\t\t\t\taddr = spinnaker_app.config_root_sdram_addr(core.core_id)\n\t\t\t\tdata = config\n\t\t\t\tself._write_mem_with_retry(conn, addr, scp.TYPE_BYTE, data)", "def test_ingestion(self):\n directory = DataSetTestConfig().ingestion_directory\n runtime = DataSetTestConfig().ingestion_runtime\n\n sleeptime = 600\n to = None\n\n if runtime:\n sleeptime = int(runtime)\n to = gevent.Timeout(sleeptime)\n to.start()\n\n try:\n # Now start the agent up and just hang out.\n self.assert_initialize()\n\n while True:\n log.debug(\"In our event sleep loop. just resting for a bit.\")\n gevent.sleep(sleeptime)\n\n except Timeout:\n log.info(\"Finished ingestion test as runtime has been exceeded\")\n\n finally:\n if runtime:\n to.cancel()", "async def async_step_import(self, user_input):\r\n for entry in self._async_current_entries():\r\n if entry.source == \"import\":\r\n return self.async_abort(reason=\"single_instance_allowed\")\r\n\r\n return self.async_create_entry(title=\"configuration.yaml\",\r\n data=user_input)", "def process():\n db = DataParser.get_connection()\n cursor = db.cursor()\n DataParser.set_up_database(cursor)\n config = DataParser.get_config()\n cursor.execute(\"use %s\" % config[\"database\"][\"database_name\"])\n DataParser.import_articles(cursor)\n DataParser.import_citations(cursor)\n DataParser.import_words(cursor)\n DataParser.import_users(cursor)\n DataParser.clean_up(db, cursor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load all places from places.csv and arrange each place's attribute
def load_places(self): file_read = open('places_backup.csv', 'r') for place in file_read: place_string = place.split(",") self.places.append( [Place(place_string[0], place_string[1], int(place_string[2]), place_string[3].strip())]) file_read.close()
[ "def load_places():\r\n places_list = []\r\n file_open = csv.reader(open(\"places.csv\"))\r\n for row in file_open:\r\n places_list.append(row)\r\n places_list = sorted(places_list, key=lambda places_list: places_list[2])\r\n return places_list", "def parse_places_from_csv(file: IO) -> Iterator[PlaceTuple]:\n data_reader = csv.reader(file, delimiter=\"\\t\")\n for row in data_reader:\n if row[0] and row[1] and row[4] and row[5] and row[7]:\n yield PlaceTuple(\n data_source=\"geonames\",\n source_id=int(row[0]),\n name=row[1],\n country=row[8],\n latitude=float(row[4]),\n longitude=float(row[5]),\n place_type=row[7],\n altitude=float(row[14]),\n srid=4326,\n )", "def copy_places():\r\n\r\n cities = []\r\n countries = []\r\n states = []\r\n subregions = []\r\n regions = []\r\n for place in Region.objects.all():\r\n plural_names, additional_keywords, abbreviations= same_attributes(place)\r\n array = {\r\n \"name\": place.name,\r\n \"plural_names\": plural_names,\r\n \"additional_keywords\": additional_keywords,\r\n \"abbreviations\": abbreviations,\r\n \"place_id\": place.place_id\r\n }\r\n regions.append(array)\r\n for place in Subregion.objects.all():\r\n plural_names, additional_keywords, abbreviations= same_attributes(place)\r\n array = {\r\n \"name\": place.name,\r\n \"plural_names\": plural_names,\r\n \"additional_keywords\": additional_keywords,\r\n \"abbreviations\": abbreviations,\r\n \"place_id\": place.place_id,\r\n \"region\": place.region.name\r\n }\r\n subregions.append(array)\r\n for place in Country.objects.all():\r\n plural_names, additional_keywords, abbreviations= same_attributes(place)\r\n array = {\r\n \"name\": place.name,\r\n \"plural_names\": plural_names,\r\n \"additional_keywords\": additional_keywords,\r\n \"abbreviations\": abbreviations,\r\n \"place_id\": place.place_id,\r\n \"region\": place.region.name\r\n }\r\n if place.subregion:\r\n array[\"subregion\"] = place.subregion.name\r\n countries.append(array)\r\n for place in State.objects.all():\r\n plural_names, additional_keywords, abbreviations= same_attributes(place)\r\n array = {\r\n \"name\": place.name,\r\n \"plural_names\": plural_names,\r\n \"additional_keywords\": additional_keywords,\r\n \"abbreviations\": abbreviations,\r\n \"place_id\": place.place_id,\r\n \"region\": place.region.name,\r\n \"country\": place.country.name\r\n }\r\n if place.subregion:\r\n array[\"subregion\"] = place.subregion.name\r\n states.append(array)\r\n for place in City.objects.all():\r\n plural_names, additional_keywords, abbreviations= same_attributes(place)\r\n array = {\r\n \"name\": place.name,\r\n \"plural_names\": plural_names,\r\n \"additional_keywords\": additional_keywords,\r\n \"abbreviations\": abbreviations,\r\n \"place_id\": place.place_id,\r\n \"region\": place.region.name,\r\n \"country\": place.country.name\r\n }\r\n if place.subregion:\r\n array[\"subregion\"] = place.subregion.name\r\n if place.state:\r\n array[\"state\"] = place.state.name\r\n if place.iata:\r\n array[\"iata\"] = place.iata\r\n else:\r\n array[\"iata\"] = []\r\n cities.append(array)\r\n mass_array = {\r\n \"regions\": regions,\r\n \"subregions\": subregions,\r\n \"countries\": countries,\r\n \"states\": states,\r\n \"cities\": cities}\r\n with open('places.JSON','w') as fi:\r\n json.dump(mass_array, fi)\r\n return mass_array", "def load_places():\r\n\r\n with open('places.JSON', 'r') as f:\r\n places = json.load(f)\r\n for place in places.get(\"regions\"):\r\n new_region = Region(\r\n name=place.get(\"name\"),\r\n plural_names=place.get(\"plural_names\"),\r\n additional_keywords=place.get(\"additional_keywords\"),\r\n abbreviations=place.get(\"abbreviations\"),\r\n place_id=place.get(\"place_id\"))\r\n new_region.save()\r\n for place in places.get(\"subregions\"):\r\n new_subregion = Subregion(\r\n name=place.get(\"name\"),\r\n plural_names=place.get(\"plural_names\"),\r\n additional_keywords=place.get(\"additional_keywords\"),\r\n abbreviations=place.get(\"abbreviations\"),\r\n place_id=place.get(\"place_id\"),\r\n region=Region.objects.get(name=place.get(\"region\")))\r\n new_subregion.save()\r\n for place in places.get(\"countries\"):\r\n new_country = Country(\r\n name=place.get(\"name\"),\r\n plural_names=place.get(\"plural_names\"),\r\n additional_keywords=place.get(\"additional_keywords\"),\r\n abbreviations=place.get(\"abbreviations\"),\r\n place_id=place.get(\"place_id\"),\r\n region=Region.objects.get(name=place.get(\"region\")))\r\n if place.get(\"subregion\"):\r\n new_country.subregion =\\\r\n Subregion.objects.get(name=place.get(\"subregion\"))\r\n new_country.save()\r\n for place in places.get(\"states\"):\r\n new_state = State(\r\n name=place.get(\"name\"),\r\n plural_names=place.get(\"plural_names\"),\r\n additional_keywords=place.get(\"additional_keywords\"),\r\n abbreviations=place.get(\"abbreviations\"),\r\n place_id=place.get(\"place_id\"),\r\n region=Region.objects.get(name=place.get(\"region\")),\r\n country=Country.objects.get(name=place.get(\"country\")))\r\n if place.get(\"subregion\"):\r\n new_state.subregion =\\\r\n Subregion.objects.get(name=place.get(\"subregion\"))\r\n new_state.save()\r\n for place in places.get(\"cities\"):\r\n new_city = City(\r\n name=place.get(\"name\"),\r\n plural_names=place.get(\"plural_names\"),\r\n additional_keywords=place.get(\"additional_keywords\"),\r\n abbreviations=place.get(\"abbreviations\"),\r\n place_id=place.get(\"place_id\"),\r\n iata=place.get(\"iata\"),\r\n region=Region.objects.get(name=place.get(\"region\")),\r\n country=Country.objects.get(name=place.get(\"country\")))\r\n if place.get(\"subregion\"):\r\n new_city.subregion =\\\r\n Subregion.objects.get(name=place.get(\"subregion\"))\r\n if place.get(\"state\"):\r\n new_city.state =\\\r\n State.objects.get(name=place.get(\"state\"))\r\n new_city.save()\r\n return", "def load_maps():\n\n #reads the csv and inserts the data in the table\n csvfile = open('csvs/maps_table.csv')\n data = csv.reader(csvfile)\n next(data, None) # skip the headers\n\n for row in data:\n row = [element if len(element) > 0 else None for element in row]\n maps = StateMapNames(state_id=row[0],\n map_name=row[1])\n\n db.session.add(maps)\n db.session.commit()", "def sort_places(stat_table, route):\n # Work in progress! Waiting for translation list.\n # Or should this be part of the data instead??\n place_translations = {\n \"Göteborg\": \"Gothenburg\"\n }\n\n if \"place\" in route.rule:\n lang = \"en\"\n else:\n lang = \"sv\"\n\n if lang == \"en\":\n for d in stat_table:\n d[\"display_name\"] = place_translations.get(d[\"name\"], d[\"name\"])\n else:\n for d in stat_table:\n d[\"display_name\"] = d[\"name\"]\n\n stat_table.sort(key=lambda x: x.get(\"name\").strip())\n return stat_table", "def load_states():\n\n #reads the csv and inserts the data in the table\n csvfile = open('csvs/states_table.csv')\n data = csv.reader(csvfile)\n next(data, None) # skip the headers\n\n for row in data:\n row = [element if len(element) > 0 else None for element in row]\n state = State(state_id=row[0],\n name=row[1],\n latitude=row[2],\n longitude=row[3],\n zoom=row[4])\n\n db.session.add(state)\n db.session.commit()", "def get_all_places():\n places = {'places': []}\n\n conn = sqlite3.connect('places.db')\n cursor = conn.cursor()\n\n sql = 'SELECT local_name, full_address, latitude, longitude, place_id FROM places'\n\n for place in cursor.execute(sql):\n place_modal = {}\n place_modal['local_name'] = place[0]\n place_modal['full_address'] = place[1]\n place_modal['latitude'] = place[2]\n place_modal['longitude'] = place[3]\n place_modal['place_id'] = place[4]\n places['places'].append(place_modal)\n\n conn.close()\n return places", "def load_herd_areas():\n\n # HerdArea.query.delete() # deletes rows before adding so that data is not duplicated\n\n #reads the csv and inserts the data in the table\n csvfile = open('csvs/herd_names.csv')\n data = csv.reader(csvfile)\n next(data, None) #skip the header row\n\n for row in data:\n row = [element if len(element) > 0 else None for element in row]\n herds = HerdArea(herd_id=row[0],\n state_id=row[2],\n herd_name=row[1].title(),\n gis_data=row[3],)\n\n db.session.add(herds)\n db.session.commit()", "def places(self, data):\n self._places = data", "def LoadHouses(filePath):\n\n # Makes list to return to be filled with csv data\n houses = []\n\n with open(filePath, \"r\") as f:\n\n # Skips header\n csvreader = csv.reader(f)\n next (csvreader, None)\n\n # Reads the lines\n for line in f:\n\n houseData = []\n\n for element in line.split(\",\"):\n\n # Reads out numbers\n houseData.append(element)\n\n # Appends to list\n newHouse = House(int(houseData[0]), int(houseData[1]), float(houseData[2]))\n houses.append(newHouse)\n\n return houses", "def get_all_places() -> List[str]:\n with DBPlaces() as db:\n places = db.get_places(columns='name')\n return [place_sample[0] for place_sample in places]", "def load_locations():\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Location.query.delete()\n\n with open('seed_data/location.csv', 'r') as f:\n reader = csv.reader(f)\n location_list = list(reader)\n del location_list[0]\n\n # Read location list and insert data\n\n d = {}\n\n for row in location_list:\n location_id, district_id, state_name = row\n\n d[location_id] = [district_id, state_name]\n\n if district_id == '':\n loc = Location(location_id=location_id,\n district_id=None,\n state_name=state_name)\n else:\n loc = Location(location_id=location_id,\n district_id=district_id,\n state_name=state_name)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(loc)\n\n # Once we're done, we should commit our work\n db.session.commit()", "def location_tags(path):\n items = os.listdir(path)\n print(items)\n os.system('mkdir loc')\n for csvfile in items:\n with open(path + '/' + csvfile, 'r') as fin, open('loc/new_'+csvfile, 'w') as fout:\n reader = csv.reader(fin, lineterminator='\\n')\n\n writer = csv.writer(fout, lineterminator='\\n')\n\n read = list(reader)\n titles = read[0]\n\n location = int(titles.index('location'))\n has_public_page = int(titles.index('location.has_public_page'))\n loc_id = int(titles.index('location.id'))\n name = int(titles.index('location.name'))\n slug = int(titles.index('location.slug'))\n\n writer.writerow(titles)\n for row in read:\n loc = row[location]\n try:\n locate = json.loads(row[location])\n except:\n locate = {}\n\n if len(locate) > 0:\n row[has_public_page] = locate['has_public_page']\n row[loc_id] = locate['id']\n row[name] = locate['name']\n row[slug] = locate['slug']\n\n writer.writerow(row)", "def import_data():\n PROJECT_DIR = path.dirname( path.dirname( path.dirname( __file__ ) ) )\n \n geoplaces = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'geoplaces2.csv' ), \n usecols = ['placeID','smoking_area', 'dress_code', 'accessibility', 'price', 'other_services'], \n error_bad_lines = False \n ).dropna()\n \n parking = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'chefmozparking.csv' ), \n usecols = ['placeID','parking_lot'], \n error_bad_lines = False \n ).dropna()\n \n rating = pd.read_csv( \n filepath_or_buffer = path.join( PROJECT_DIR, 'data', 'rating_final.csv' ), \n usecols = ['placeID', 'userID', 'rating'], \n error_bad_lines = False \n ).dropna()\n \n # Remove duplicate ratings from the same user about the same restaurant if any and drop userID\n rating = rating.drop_duplicates( ['placeID', 'userID'] ).drop( 'userID', axis=1 )\n \n # INNER JOIN tables on placeID to make a duplicate row for each client rating and parking type\n data = pd.merge( pd.merge( geoplaces, parking, on = 'placeID' ), rating, on = 'placeID' )\n \n return data.drop( 'placeID', axis=1 )", "def load_csv(apps, url, state_mapping):\n Area = apps.get_model('model_api', 'Area')\n Covid19PredictionDataPoint = apps.get_model(\n 'model_api', 'Covid19PredictionDataPoint')\n\n try:\n f = io.StringIO(urllib.request.urlopen(url).read().decode('utf-8'))\n reader = csv.reader(f)\n header = next(reader, None)\n\n # Because different csv files have different column arrangements,\n # find out the index the columns containing different data fields first.\n location_col = -1\n date_col = -1\n target_col = -1\n type_col = -1\n value_col = -1\n\n for i in range(0, len(header)):\n if (header[i] == \"location\"):\n location_col = i\n elif (header[i] == \"target_end_date\"):\n date_col = i\n elif (header[i] == \"target\"):\n target_col = i\n elif (header[i] == \"type\"):\n type_col = i\n elif (header[i] == \"value\"):\n value_col = i\n\n data = []\n\n for row in reader:\n # Skip the row of quantile-type prediction or not cumulative type.\n if (row[type_col] != \"point\" or \"cum death\" not in row[target_col]):\n continue\n\n area = None\n state = \"\"\n country = \"\"\n\n if row[location_col] == \"US\":\n country = \"US\"\n else:\n country = \"US\"\n state_id = int(row[location_col])\n state = state_mapping[state_id]\n\n # Try to find the corresponding area.\n try:\n area = Area.objects.get(country=country, state=state)\n except Area.DoesNotExist:\n msg = \"Could not find the area for country '{0}'\".format(\n country)\n if state:\n msg += \" and state '{0}'\".format(state)\n msg += ' in model_api_area. Skip this area.'\n print(msg)\n continue\n\n except Area.MultipleObjectsReturned:\n msg = \"Found multiple areas for country '{0}'\".format(\n country)\n if state:\n msg += \" and state '{0}'\".format(state)\n msg += ' in model_api_area. Skip this area.'\n print(msg)\n continue\n \n raw_date = row[date_col]\n date = datetime.datetime(*[int(item) for item in raw_date.split('-')])\n\n # Skip invalid values.\n raw_val = row[value_col]\n if raw_val in ['NaN', '-Inf', 'Inf']:\n continue\n \n # Skip negative values.\n val = int(float(raw_val))\n if val < 0:\n continue\n \n data.append(Covid19PredictionDataPoint(\n area=area,\n date=date,\n val=val\n ))\n\n return data\n\n except urllib.error.HTTPError as httpe:\n print(\"A HttpError is found when loading data from\" + url)\n return []\n except urllib.error.URLError as urle:\n print(\"A URLError is found when loading data from\" + url)\n return []", "def load_airports(self):\n\n print(f'Loading airport data from {self.path_to_airport_csv}')\n\n # try:\n self.airports = {} # Creates an empty dictionary to store airport objects\n\n with open(self.path_to_airport_csv) as fp:\n read_csv = csv.reader(fp, delimiter=',')\n for row in read_csv:\n key = row[4]\n self.airports[key] = Airport(airport_name=row[2],\n country=row[3],\n code=row[4],\n latitude=float(row[6]),\n longitude=float(row[7]))\n # except IOError as (errno, strerror):\n # print(\"I/O error({0}): {1}\".format(errno, strerror))\n return self.airports", "def read_zips_csv(self, zips_csv):\n # Create a map from a zipcode to a set of rate areas.\n self.rate_areas = defaultdict(set)\n # Process data from the zips_csv file.\n for data in self.read_csv(zips_csv, self.zips_schema):\n # Update the map.\n self.rate_areas[data.zipcode].add((data.state, data.rate_area))", "def all_amenities_places(place_id):\n obj_places = storage.get(Place, place_id)\n list_amenities = []\n if obj_places is None:\n abort(404)\n if getenv('HBNB_TYPE_STORAGE') == 'db':\n amenity_obj = obj_places.amenities\n else:\n amenity_obj = obj_places.amenity_ids\n for amenity in amenity_obj:\n list_amenities.append(amenity.to_dict())\n return jsonify(list_amenities)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the place list and count the number of places that still need to be visited
def count_unvisited_places(self): unvisited_places = 0 for place in self.places: if place[0].status == 'y': unvisited_places += 1 return unvisited_places
[ "def count_visited_places(self):\r\n visited_places = 0\r\n for place in self.places:\r\n if place[0].status == 'n':\r\n visited_places += 1\r\n return visited_places", "def display_visited_places(places_list):\r\n visited_list = []\r\n for place in range(0, len(places_list)):\r\n if places_list[place][3] == 'v':\r\n visited_list.append(places_list[place])\r\n if len(visited_list) == 0:\r\n print(\"No visited places\")\r\n else:\r\n for number in range(0, len(visited_list)):\r\n print(\"{} {} in {} (Priority {})\".format(number, visited_list[number][0], visited_list[number][1], visited_list[number][2]))\r\n return visited_list", "def display_unvisited_places(places_list):\r\n unvisited_list = []\r\n for un_list in range(0, len(places_list)):\r\n if places_list[un_list][3] == 'n':\r\n unvisited_list.append(places_list[un_list])\r\n if len(unvisited_list) == 0:\r\n print(\"No places to visit\")\r\n else:\r\n for unvisited in range(0, len(unvisited_list)):\r\n print(\"{} {} in {} (Priority {})\".format(unvisited, unvisited_list[unvisited][0],\r\n unvisited_list[unvisited][1], unvisited_list[unvisited][2]))\r\n return unvisited_list", "def all_places(places_list):\r\n full_list = []\r\n for place in range(0, len(places_list)):\r\n full_list.append(places_list[place])\r\n if len(full_list) == 0:\r\n print(\"No places in list\")\r\n else:\r\n for number in range(0, len(full_list)):\r\n print(\"{} {} in {} (Priority {})\".format(number, full_list[number][0], full_list[number][1],\r\n full_list[number][2]))\r\n return full_list", "def set_country_count(self,places):\n self.__NPOINTS = len(places);\n for p in places:\n try:\n index = self.__countries.index(p.country)\n self.__country_val[index] += 1\n except:\n self.__countries.append(p.country)\n self.__country_val.append(1)", "def count_islands(grid):\n\tvisited = grid.copy() # copy the grid in order not to lose the real information.\n\tM = len(grid)\n\tN = len(grid[0])\n\tc = 0\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tif visited[k][l]:\n\t\t\t\tc += 1 # found a new island\n\t\t\t\tvisit_island(visited, k, l, M, N) # visit the connected pieces\n\treturn c", "def _count_holes(cls, list):\n return len([i for i in list if cls._only_below_fermi(i)])", "def island_count(*grid: str) -> int:\n unvisited = {(x, y)\n for (y, row) in enumerate(grid)\n for (x, char) in enumerate(row)\n if bool(int(char))}\n\n number_of_islands = 0\n while unvisited:\n explore_island(next(iter(unvisited)), unvisited)\n number_of_islands += 1\n return number_of_islands", "def count_islands(rows,columns,tiles):\n numOfIslands = 0\n for i in range(0,rows):\n for j in range(0,columns):\n if tiles[i][j] == True:\n numOfIslands += 1\n find_all_parts_of_island(rows,columns,i,j,tiles)\n return numOfIslands", "def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4", "def add_new_place(places_list):\r\n new_place = [0, '0', 0, 0]\r\n new_place[3] = 'n'\r\n new_place[0] = str(input(\"Place name: \").strip())\r\n new_place[1] = str(input(\"Country: \").strip())\r\n while new_place[0] == \"\":\r\n print(\"Input cannot be blank\")\r\n new_place[0] = str(input(\"Place name: \").strip())\r\n while new_place[1] == \"\":\r\n print(\"Input cannot be blank\")\r\n new_place[1] = str(input(\"Country: \").strip())\r\n else:\r\n while True:\r\n try:\r\n new_place[2] = str(input(\"Priority: \"))\r\n if int(new_place[2]) >= 1 and int(new_place[2]) <= 3:\r\n break\r\n else:\r\n print(\"Priority must be 1, 2 or 3\")\r\n except ValueError:\r\n print(\"Invalid input; enter a valid number\")\r\n print(\"{} in {} (priority {}) added to unvisited list\".format(new_place[0], new_place[1], new_place[2]))\r\n places_list.append(new_place)\r\n places_list = sorted(places_list, key=lambda places_list: places_list[2])\r\n return places_list", "def number_bites_resolved(self) -> int:\n return len(\n set([\n row['bite'] for row in self.rows if row['completed'] == 'True'\n ]))", "def globalNeighbors (listAtom, count):\n\n for atom in listAtom:\n nbNeighbor = numberNeigthbor(atom[\"neighbors\"])\n for neighbor in atom[\"neighbors\"]:\n # print count\n neighbor_classif = structure.classificationATOM(neighbor)\n count[\"allNumberNeighbors\"][neighbor_classif] = count[\"allNumberNeighbors\"][neighbor_classif] + 1\n if not nbNeighbor in count.keys():\n count[nbNeighbor] = structure.countClassificationAtoms()\n\n if neighbor_classif in count[nbNeighbor].keys():\n count[nbNeighbor][neighbor_classif] = count[nbNeighbor][neighbor_classif] + 1\n\n else:\n count[nbNeighbor][\"others\"] = count[nbNeighbor][\"others\"] + 1", "def orders_placed(self):\n return len(self.orders)", "def obstacle_count(self):\n # Gotten from the discord server\n # do a scan of the area in front of the robot\n self.scan()\n # FIGURE OUT HOW MANY OBSTACLES THERE WERE\n see_an_object = False\n count = 0\n\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object:\n see_an_object = True\n count += 1\n print(\"~~~~ I SEE SOMETHING!!! ~~~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\")\n\n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n print(\"\\nI saw %d objects\" % count)", "def free_locations(self) -> int:\n return len(list(filter(lambda x: x[\"content\"] is None, self.data[\"locations\"])))", "def visit(nbrs, atom, visited):\n visited[atom] = 1\n result = 1 # To be returned.\n for nbr in nbrs[atom]:\n if visited[nbr] > 0:\n continue\n result += visit(nbrs, nbr, visited)\n\n return result", "def numIslands(self, grid):\r\n \r\n if not grid:\r\n return 0\r\n \r\n rows, cols = len(grid), len(grid[0])\r\n visited = set()\r\n res = 0\r\n \r\n def bfs(r, c):\r\n q = []\r\n visited.add( (r,c) )\r\n q.append( (r,c) )\r\n while q:\r\n row, col = q.pop(0)\r\n directions = [ [1,0], [-1,0], [0,1], [0,-1] ]\r\n for dr, dc in directions:\r\n r, c = row + dr, col + dc\r\n if r in range(rows) and c in range(cols) and grid[r][c] == \"1\" and (r,c) not in visited:\r\n q.append( (r,c) )\r\n visited.add( (r,c) )\r\n \r\n for r in range(rows):\r\n for c in range(cols):\r\n if grid[r][c] == \"1\" and (r,c) not in visited:\r\n bfs(r,c)\r\n res += 1\r\n return res", "def test_returns_zero_if_list_is_empty(self):\n result = island_counter([])\n self.assertEqual(result, 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check the place list and count the number of visited places
def count_visited_places(self): visited_places = 0 for place in self.places: if place[0].status == 'n': visited_places += 1 return visited_places
[ "def count_unvisited_places(self):\r\n unvisited_places = 0\r\n for place in self.places:\r\n if place[0].status == 'y':\r\n unvisited_places += 1\r\n return unvisited_places", "def display_visited_places(places_list):\r\n visited_list = []\r\n for place in range(0, len(places_list)):\r\n if places_list[place][3] == 'v':\r\n visited_list.append(places_list[place])\r\n if len(visited_list) == 0:\r\n print(\"No visited places\")\r\n else:\r\n for number in range(0, len(visited_list)):\r\n print(\"{} {} in {} (Priority {})\".format(number, visited_list[number][0], visited_list[number][1], visited_list[number][2]))\r\n return visited_list", "def display_unvisited_places(places_list):\r\n unvisited_list = []\r\n for un_list in range(0, len(places_list)):\r\n if places_list[un_list][3] == 'n':\r\n unvisited_list.append(places_list[un_list])\r\n if len(unvisited_list) == 0:\r\n print(\"No places to visit\")\r\n else:\r\n for unvisited in range(0, len(unvisited_list)):\r\n print(\"{} {} in {} (Priority {})\".format(unvisited, unvisited_list[unvisited][0],\r\n unvisited_list[unvisited][1], unvisited_list[unvisited][2]))\r\n return unvisited_list", "def count_islands(grid):\n\tvisited = grid.copy() # copy the grid in order not to lose the real information.\n\tM = len(grid)\n\tN = len(grid[0])\n\tc = 0\n\tfor k in range(M):\n\t\tfor l in range(N):\n\t\t\tif visited[k][l]:\n\t\t\t\tc += 1 # found a new island\n\t\t\t\tvisit_island(visited, k, l, M, N) # visit the connected pieces\n\treturn c", "def set_country_count(self,places):\n self.__NPOINTS = len(places);\n for p in places:\n try:\n index = self.__countries.index(p.country)\n self.__country_val[index] += 1\n except:\n self.__countries.append(p.country)\n self.__country_val.append(1)", "def all_places(places_list):\r\n full_list = []\r\n for place in range(0, len(places_list)):\r\n full_list.append(places_list[place])\r\n if len(full_list) == 0:\r\n print(\"No places in list\")\r\n else:\r\n for number in range(0, len(full_list)):\r\n print(\"{} {} in {} (Priority {})\".format(number, full_list[number][0], full_list[number][1],\r\n full_list[number][2]))\r\n return full_list", "def island_count(*grid: str) -> int:\n unvisited = {(x, y)\n for (y, row) in enumerate(grid)\n for (x, char) in enumerate(row)\n if bool(int(char))}\n\n number_of_islands = 0\n while unvisited:\n explore_island(next(iter(unvisited)), unvisited)\n number_of_islands += 1\n return number_of_islands", "def globalNeighbors (listAtom, count):\n\n for atom in listAtom:\n nbNeighbor = numberNeigthbor(atom[\"neighbors\"])\n for neighbor in atom[\"neighbors\"]:\n # print count\n neighbor_classif = structure.classificationATOM(neighbor)\n count[\"allNumberNeighbors\"][neighbor_classif] = count[\"allNumberNeighbors\"][neighbor_classif] + 1\n if not nbNeighbor in count.keys():\n count[nbNeighbor] = structure.countClassificationAtoms()\n\n if neighbor_classif in count[nbNeighbor].keys():\n count[nbNeighbor][neighbor_classif] = count[nbNeighbor][neighbor_classif] + 1\n\n else:\n count[nbNeighbor][\"others\"] = count[nbNeighbor][\"others\"] + 1", "def count_islands(rows,columns,tiles):\n numOfIslands = 0\n for i in range(0,rows):\n for j in range(0,columns):\n if tiles[i][j] == True:\n numOfIslands += 1\n find_all_parts_of_island(rows,columns,i,j,tiles)\n return numOfIslands", "def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4", "def visit(nbrs, atom, visited):\n visited[atom] = 1\n result = 1 # To be returned.\n for nbr in nbrs[atom]:\n if visited[nbr] > 0:\n continue\n result += visit(nbrs, nbr, visited)\n\n return result", "def _count_neighbours(self):\n for point in self._points:\n self._neighbour_counter[point] += len(point.cluster.points)", "def numIslands(self, grid):\r\n \r\n if not grid:\r\n return 0\r\n \r\n rows, cols = len(grid), len(grid[0])\r\n visited = set()\r\n res = 0\r\n \r\n def bfs(r, c):\r\n q = []\r\n visited.add( (r,c) )\r\n q.append( (r,c) )\r\n while q:\r\n row, col = q.pop(0)\r\n directions = [ [1,0], [-1,0], [0,1], [0,-1] ]\r\n for dr, dc in directions:\r\n r, c = row + dr, col + dc\r\n if r in range(rows) and c in range(cols) and grid[r][c] == \"1\" and (r,c) not in visited:\r\n q.append( (r,c) )\r\n visited.add( (r,c) )\r\n \r\n for r in range(rows):\r\n for c in range(cols):\r\n if grid[r][c] == \"1\" and (r,c) not in visited:\r\n bfs(r,c)\r\n res += 1\r\n return res", "def _count_holes(cls, list):\n return len([i for i in list if cls._only_below_fermi(i)])", "def add_new_place(places_list):\r\n new_place = [0, '0', 0, 0]\r\n new_place[3] = 'n'\r\n new_place[0] = str(input(\"Place name: \").strip())\r\n new_place[1] = str(input(\"Country: \").strip())\r\n while new_place[0] == \"\":\r\n print(\"Input cannot be blank\")\r\n new_place[0] = str(input(\"Place name: \").strip())\r\n while new_place[1] == \"\":\r\n print(\"Input cannot be blank\")\r\n new_place[1] = str(input(\"Country: \").strip())\r\n else:\r\n while True:\r\n try:\r\n new_place[2] = str(input(\"Priority: \"))\r\n if int(new_place[2]) >= 1 and int(new_place[2]) <= 3:\r\n break\r\n else:\r\n print(\"Priority must be 1, 2 or 3\")\r\n except ValueError:\r\n print(\"Invalid input; enter a valid number\")\r\n print(\"{} in {} (priority {}) added to unvisited list\".format(new_place[0], new_place[1], new_place[2]))\r\n places_list.append(new_place)\r\n places_list = sorted(places_list, key=lambda places_list: places_list[2])\r\n return places_list", "def test_returns_zero_if_list_is_empty(self):\n result = island_counter([])\n self.assertEqual(result, 0)", "def obstacle_count(self):\n # Gotten from the discord server\n # do a scan of the area in front of the robot\n self.scan()\n # FIGURE OUT HOW MANY OBSTACLES THERE WERE\n see_an_object = False\n count = 0\n\n for angle in self.scan_data:\n dist = self.scan_data[angle]\n if dist < self.SAFE_DISTANCE and not see_an_object:\n see_an_object = True\n count += 1\n print(\"~~~~ I SEE SOMETHING!!! ~~~~~\")\n elif dist > self.SAFE_DISTANCE and see_an_object:\n see_an_object = False\n print(\"I guess the object ended\")\n\n print(\"ANGLE: %d | DIST: %d\" % (angle, dist))\n print(\"\\nI saw %d objects\" % count)", "def qubits_within(self, depth: int, qubit: cirq.GridQubit,\n qubit_list: Iterable[cirq.GridQubit],\n visited: Set[cirq.GridQubit]) -> int:\n if qubit not in qubit_list:\n return 0\n if qubit in visited:\n return 0\n visited.add(qubit)\n if depth <= 0:\n return 1\n c = 1\n for diff in ADJACENCY:\n c += self.qubits_within(depth - 1, qubit + diff, qubit_list, visited)\n return c", "def count_alive(self):\n\n # set the alive count to 0\n num_alive = 0\n\n # run through all cells in the map array, and if \n # the cell is a space cell, increment the alive \n # value.\n for j in range(0, self.height):\n for i in range(0, self.width):\n if (self.map[j][i] == self.space_val):\n num_alive += 1\n\n # check the map contents, and return the alive val.\n self.assert_array_size('count_alive', self.map)\n return num_alive" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save all the changes from the user to places.csv
def save_file(self): file_write = open('places_backup.csv', 'w') for place in self.places: file_write.write( place[0].name + "," + place[0].country + "," + str(place[0].priority) + "," + place[ 0].status + "\n") file_write.close()
[ "def _saveCSV( self ):", "def save_rewards(self):\n with open(self.rewards_path, 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(self.global_rewards)", "async def save(self):\r\n\r\n # Loads all inactive users from the old file and adds to the new file\r\n with open(\"data/orbs.csv\", mode=\"r\", newline=\"\") as file:\r\n all_users_list = []\r\n reader = csv.reader(file, delimiter=\";\")\r\n for line in reader:\r\n if line[0] not in self._active_users:\r\n all_users_list.append(list(line))\r\n print(all_users_list)\r\n \r\n # Adds the active users to the list of users\r\n for user in self._active_users:\r\n all_users_list.append(user.generate_line())\r\n\r\n # Writes all the saved data to the file\r\n with open(\"data/orbs.csv\", mode=\"w\", newline=\"\") as file:\r\n writer = csv.writer(file, delimiter=\";\", quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n for user in all_users_list:\r\n writer.writerow(user)\r\n\r\n self._active_users = []", "def save_place(place):\n conn = sqlite3.connect('places.db')\n cursor = conn.cursor()\n\n sql = 'INSERT INTO places(local_name, full_address, latitude, longitude, place_id)' \\\n 'VALUES(?, ?, ?, ?, ?)'\n\n cursor.execute(sql, (place['local_name'],\n place['full_address'],\n place['latitude'],\n place['longitude'],\n place['place_id']\n )\n )\n conn.commit()\n conn.close()", "def save(users):\n with open('users.csv', 'w+') as f:\n writer = csv.writer(f)\n columns = ['name', 'messages_sent', 'likes_given', 'self_likes', 'likes_received', 'words_sent']\n writer.writerow(columns)\n for key in users:\n writer.writerow([users[key][column] for column in columns])", "def save_csv(self, filename):\n redditors = set(self.submitters.keys()).union(self.commenters.keys())\n mapping = dict((x.lower(), x) for x in redditors)\n with codecs.open(filename, 'w', encoding='utf-8') as outfile:\n outfile.write('username, type, permalink, score\\n')\n for _, redditor in sorted(mapping.items()):\n for submission in self.submitters.get(redditor, []):\n outfile.write(u'{0}, submission, {1}, {2}\\n'\n .format(redditor, submission.permalink,\n submission.score))\n for comment in self.commenters.get(redditor, []):\n outfile.write(u'{0}, comment, {1}, {2}\\n'\n .format(redditor, comment.permalink,\n comment.score))", "def save_data(self, path=\"\"):\n with open(path, \"w\", encoding=\"utf-8\") as fp:\n writer = csv.writer(fp, dialect=csv.unix_dialect)\n for row in self._data:\n writer.writerow(row)\n #writer.writerows(self._data)", "def load_places(self):\r\n file_read = open('places_backup.csv', 'r')\r\n for place in file_read:\r\n place_string = place.split(\",\")\r\n self.places.append(\r\n [Place(place_string[0], place_string[1], int(place_string[2]), place_string[3].strip())])\r\n\r\n file_read.close()", "def on_toCsv_clicked(self):\n pth = self.path.text()\n print(pth)\n\n # Save data as .csv\n conn = sqlite3.connect('stock.db')\n stockModel = dm.StockModel(conn)\n stockModel.to_csv(pth)", "def saveToCSV(office, sport, matches, driver):\r\n # fill .csv file with the scraped matches (rewrites the existing values in provided datafile with fresh ones)\r\n with open(office + '/' + sport + '.csv', 'w', newline='', encoding='utf-8') as f:\r\n write = csv.writer(f)\r\n write.writerow(['name', 'date', 'home', 'draw', 'away'])\r\n write.writerows(matches)\r\n # close chrome driver\r\n driver.quit()", "def save(self, name=None):\n if name is None:\n name = self.file_path\n ids = name+'.ids.csv'\n print('saving ids to {:s}'.format(ids))\n lists2csv(self.ids, ids, delimiter=' ')\n dictionary = name+'.dict.csv'\n print('saving dictionary to {:s}'.format(dictionary))\n lists2csv([[word, i] for word, i in self.word_to_id.items()], dictionary, \" \")", "def save_part_assignments(self):\n csvfile = os.path.join(self.path, \"jlcpcb\", \"part_assignments.csv\")\n with open(csvfile, \"w\", newline=\"\", encoding=\"utf-8\") as f:\n writer = csv.writer(f, delimiter=\",\", quotechar='\"')\n for part, values in self.parts.items():\n fp = get_footprint_by_ref(self.board, part)\n bom = get_exclude_from_bom(fp)\n pos = get_exclude_from_pos(fp)\n writer.writerow([part, values[\"lcsc\"], int(bom), int(pos)])", "def updateIntoCsv(self,filename,where):\n\t\tpass", "def save_item(self):\n self.df_selected = self.df.query(\"title == @self.food_names_dropdown.get()\")\n self.expire = self.entry_date + datetime.timedelta(days=int(self.df_selected[\"expiration (d)\"]))\n self.notify = self.expire - datetime.timedelta(days=int(self.df_selected[\"notify (d)\"]))\n self.new_row = {\"title\":self.food_names_dropdown.get(), \"type\":self.food_type_dropdown.get(), \"amount\":self.servings_dropdown.get(), \"entry date\":self.entry_date, \"notify (days)\": self.notify, \"expiration (days)\": self.expire}\n\n self.df_user = self.df_user.append(self.new_row, ignore_index=True)\n self.df_user.to_csv('user_items.csv', mode=\"w+\", index=False)\n \n self.update_treeview()\n self.clear_all()", "def save(self, name=None):\n if name is None:\n name = self.file_path\n ids = name + '.ids.csv'\n print('saving ids to {:s}'.format(ids))\n lists2csv(self.ids, ids, delimiter=' ')\n dictionary = name + '.dict.csv'\n print('saving dictionary to {:s}'.format(dictionary))\n lists2csv([[word, i] for word, i in self.word_to_id.items()], dictionary, \" \", encoding='utf-8')", "def save(self):\n self.init_file()\n fields = self._get_attributes()\n values = []\n for field in fields:\n if isinstance(getattr(self, field), CSVModel):\n value = str(getattr(self, field).id)\n elif isinstance(getattr(self, field), list):\n value = []\n for i in getattr(self, field):\n value.append(i.id)\n value = \",\".join(value)\n else:\n value = getattr(self, field)\n values.append(value)\n\n open(self.file_path(), \"a\").write(\"|\".join(values) + \"\\n\")", "def handle_save_table(self): \n\n # Generates a dialog to get the file's name \n filename = unicode(QFileDialog.getSaveFileName(self, 'Save Table', '', \".csv(*.csv)\")) \n \n fcsv = file(filename, 'w')\n \n for currentRow in range(self.tablefrequency.rowCount()):\n \n a_0 = str(self.tablefrequency.item(currentRow, 0).text())\n a_1 = str(self.tablefrequency.item(currentRow, 1).text())\n a_2 = str(self.tablefrequency.item(currentRow, 2).text())\n a_3 = str(self.tablefrequency.item(currentRow, 3).text())\n \n fcsv.write(\"{0}, {1}, {2}, {3}\\n\".format(a_0, a_1, a_2, a_3))\n\n fcsv.close()", "def saveCommit(commitRow,path):\n exportRowCsv(path,commitRow)", "def save_coordinates(self,tweet):\n if tweet['coordinates']:\n with open(self.tweets_geo_file, \"ab\") as output:\n i=1\n for c in tweet['coordinates']:\n output.write(tweet['id']+','+tweet['country']+','+tweet['city']+','+tweet['province']+','+str(i)+', '+str(c[0])+', '+str(c[1])+'\\n')\n i+=1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true of the profile is pure
def is_pure_profile(game, prof): # For an asymmetric game, this will always return false, but then it # shouldn't be an issue, because pure strategy regret will be more # informative. pure = np.any(np.add.reduceat(prof, game.role_starts) > 1.5) utils.check( game.is_profile(np.asarray(prof, int)) if pure else game.is_mixture(prof), "profile must be valid", ) return pure
[ "def isPure(self):\n return self.pure", "def _profile_flag(self, group):\n if self.core.metadata.groups[group].is_profile:\n return 'yes'\n else:\n return 'no'", "def verify_profile_availability(self, profile):\n pass", "def is_pure(self):\n return \"py3\" in self.python_versions and \"none\" in self.abis and \"any\" in self.platforms", "def has_profile_loaded(self):\n if self.browsermodel:\n return True\n else:\n return False", "def is_pure(self):\n dims = [face.dimension() for face in self._facets]\n return max(dims) == min(dims)", "def hasManual(self) -> bool:\n ...", "def Explicit(self) -> bool:", "def test_user_has_profile_is_hirable_by_default(self):\n this_user = self.users[0]\n this_user.save()\n self.assertTrue(self.users[0].profile.hireable is True)", "def isProfileSetting(name):\n\tglobal settingsDictionary\n\tif name in settingsDictionary and settingsDictionary[name].isProfile():\n\t\treturn True\n\treturn False", "def return_True():\n return True", "def test_user_has_profile_is_not_hirable(self):\n this_user = self.users[0]\n this_user.profile.hireable = False\n this_user.save()\n self.assertTrue(self.users[0].profile.hireable is False)", "def justEvaluated(self) -> bool:\r\n return False", "def has_hm(self) -> bool:\n return self.halo_profile is not None", "def check_profile(profile, remote):\n\n return profile in get_profiles(remote)", "def is_fireproof(self) -> bool:\n pass", "def is_probability(cube: Cube) -> bool:\n try:\n find_threshold_coordinate(cube)\n except CoordinateNotFoundError:\n return False\n return True", "def professors_full():\n return np.sum(prof_avail) == 0", "def check_profile(profile=None):\n from conf.profiles import getAllProfilesObjects\n from core.exceptions import ProfileDoesNotExist\n\n if not profile:\n return False\n\n profile_available = []\n [profile_available.append(p.name) for p in getAllProfilesObjects()]\n try:\n for p in profile:\n if p not in profile_available: # Check profile exist\n raise ProfileDoesNotExist(\"Profile %s doesnt exist !\" % profile)\n else:\n return True\n except ProfileDoesNotExist as pne:\n print pne\n exit(pne.code)", "def test_TrivialProfile():\n\tprofile_model = hpc.TrivialProfile()\n\n\tassert hasattr(profile_model, 'cosmology')\n\tassert isinstance(profile_model.cosmology, cosmology.FlatLambdaCDM)\n\n\tassert type(profile_model.cumu_inv_func_table) == np.ndarray\n\tassert list(profile_model.cumu_inv_func_table) == []\n\n\tassert profile_model.cumu_inv_func_table_dict == {}\n\n\tassert type(profile_model.cumu_inv_param_table) == np.ndarray\n\tassert list(profile_model.cumu_inv_param_table) == []\n\n\tassert profile_model.cumu_inv_param_table_dict == {}\n\n\tassert profile_model.halo_prof_func_dict == {}\n\n\tassert profile_model.haloprop_key_dict == {}\n\n\tprofile_model.build_inv_cumu_lookup_table()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get the welfare of a profile or mixture
def welfare(game, prof): if is_pure_profile(game, prof): # pylint: disable=no-else-return return regret.pure_social_welfare(game, np.asarray(prof, int)).item() else: return regret.mixed_social_welfare(game, prof).item()
[ "def social_welfare(game, profile, role=None):\n\tif is_pure_profile(profile):\n\t\tvalues = (game.values[game[profile]] * game.counts[game[profile]])\n\telif is_mixture_array(profile):\n\t\tplayers = np.array([game.players[r] for r in game.roles])\n\t\tvalues = (game.getExpectedPayoff(profile) * players)\n\telif is_profile_array(profile):\n\t\treturn social_welfare(game, game.toProfile(profile))\n\telif is_mixed_profile(profile):\n\t\treturn social_welfare(game, game.toArray(profile))\n\telse:\n\t\traise TypeError(\"unrecognized profile type: \" + str(profile))\n\tif role == None:\n\t\treturn values.sum()\n\telse:\n\t\treturn values[game.index(role)].sum()", "def calc_windProfile(inputSpeed,windUnits,inputHeight,outputHeight,canopyHeight,heightUnits,crownRatio,surface,model):\n \n \n #Set up the wind profile \n wind=wp.windProfile()\n wind.set_paths(sDataFile,canopyFlowPath)\n wind.useMutliProc=True\n wind.set_InputWindSpeed(float(inputSpeed),windUnits)\n wind.set_InputWindHeight(float(inputHeight),heightUnits)\n wind.set_OutputWindHeight(float(outputHeight),heightUnits)\n wind.set_CanopyHeight(float(canopyHeight),heightUnits)\n wind.crownRatio=float(crownRatio)\n wind.set_surface(surface)\n \n\n if(model==\"Massman\"):\n wind.cf_uz()\n outputWindSpeed = [wind.get_OutputWindSpeed(windUnits),\"NAN\"]\n outDataFile = [wind.PlotDataFile,\"NAN\"] \n \n if(model==\"Albini\"):\n wind.a_uz()\n outputWindSpeed = [\"NAN\",wind.get_aOutputWindSpeed(windUnits)]\n outDataFile = [\"NAN\",wind.a_PlotDataFile] \n\n if(model==\"Both\"):\n wind.cf_uz()\n wind.a_uz()\n \n outputWindSpeed = [wind.get_OutputWindSpeed(windUnits),\n wind.get_aOutputWindSpeed(windUnits)]\n outDataFile = [wind.PlotDataFile,wind.a_PlotDataFile]\n# else:\n# outputWindSpeed = [\"NAN\",\"NAN\"]\n# outDataFile = [\"NAN\",\"NAN\"]\n \n writeLogFile(wind.writeLogText()) \n return outputWindSpeed,outDataFile", "def advapi32_GetCurrentHwProfile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpHwProfileInfo\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _get_profile_type(self):\n return self.__profile_type", "def extract_significance(properties, tool, default):\n if properties is None:\n return default\n significance = codesonar_significance(properties)\n if significance is not None:\n return significance\n if tool[:5] == \"Julia\":\n return julia_significance(properties, default)\n return default", "def get_terror_waves_info(self):", "def get_profiles(self):\n return self.get_matrix_profile(), self.get_index_profile()", "def get_reactant_specie(self, specie):\n for s in self.reactants:\n if s.specie == specie:\n return s\n return None", "def get_relevant_leverage():\n position_leverage = get_leverage()\n margin_leverage = get_margin_leverage()\n if position_leverage is None:\n return margin_leverage\n if margin_leverage is None:\n return position_leverage\n # a position leverage of 100 means cross (bitmex)\n return position_leverage if 100 > position_leverage > margin_leverage else margin_leverage", "def get_posterior(self, w1: str, w2: str) -> float:\n return self._bigram_model[w1][w2][\"proba\"]", "def get_fitted_profile(self):\n return (self.wavelengths, self.profile_multiple(self.stddev_new, self.means_new, self.amplitudes_new))", "def get_we_weightunit():\n \n return ['lb', 'kg', 'bodyweight']", "def GetLastUsedWiredNetwork(self):\n profileList = self.config.sections()\n for profile in profileList:\n if misc.to_bool(self.config.get(profile, \"lastused\")):\n return profile\n return None", "def calc_profile(self, err=sys.stderr, pdf=None):\n\t\tlargest = 0\n\t\tfor bac in self.bacteria:\n\t\t\tif self.bacteria[bac].calc_mic(\"{}_{}_{}_\".format(pdf, str(self), str(bac))) > largest:\n\t\t\t\tlargest = self.bacteria[bac].mic\n\t\tlargestnmic = 0\n\t\tfor bac in self.bacteria:\n\t\t\tdil = self.bacteria[bac]\n\t\t\t\n\t\t\tif dil.mic == -1:\n\t\t\t\terr.write(\"Sample: {}; Organism: {} - Poor Fit.\\n\".format(str(self), str(bac)))\n\t\t\telif dil.mic == -2:\n\t\t\t\terr.write(\"Sample: {}; Organism: {} - Too dilute. Never reached MIC.\\n\".format(str(self), str(bac)))\n\t\t\telif dil.mic == -3:\n\t\t\t\terr.write(\"Sample: {}; Organism: {} - Too concentrated. Dilute and re-screen.\\n\".format(str(self), str(bac)))\n\t\t\t\n\t\t\tif dil.mic < 0:\n\t\t\t\tdil.nmic = 0\n\t\t\telse:\n\t\t\t\tdil.nmic = math.log10(10 * ((dil.mic / largest) ** (-1)))\n\t\t\t\n\t\t\tif dil.nmic > largestnmic:\n\t\t\t\tlargestnmic = dil.nmic\n\t\t\t\t\n\t\tfor bac in self.bacteria:\n\t\t\tif largestnmic == 0:\n\t\t\t\tbreak\n\t\t\tself.bacteria[bac].nmic /= largestnmic", "def GetDefaultWiredNetwork(self):\n profileList = self.config.sections()\n for profile in profileList:\n if misc.to_bool(self.config.get(profile, \"default\")):\n return profile\n return None", "def getProfileSetting(name):\n\tif name in tempOverride:\n\t\treturn tempOverride[name]\n\tglobal settingsDictionary\n\tif name in settingsDictionary and settingsDictionary[name].isProfile():\n\t\treturn settingsDictionary[name].getValue()\n\ttraceback.print_stack()\n\tsys.stderr.write('Error: \"%s\" not found in profile settings\\n' % (name))\n\treturn ''", "def _get_test_profile(self):\n return self.__test_profile", "def get_normalized_dope_profile(self):\n from modeller.selection import selection\n import normalized_dope\n import physical\n sel = selection(self)\n edat = sel.get_dope_energy_data()\n oldgprsr = self.group_restraints\n self.group_restraints = sel.get_dope_potential()\n try:\n profile = sel.get_energy_profile(edat, physical.nonbond_spline)\n finally:\n self.group_restraints = oldgprsr\n scorer = normalized_dope.DOPEScorer(self)\n return scorer.get_profile(profile)", "def get_profile():\n # Get the netCDF file\n nc = test_sbm.make_ctd_file()\n\n # Create profile object\n profile = ambient.Profile(nc, chem_names='all')\n \n # Add crossflow\n z = profile.interp_ds.coords['z'].values\n ua = np.zeros(len(z))\n for i in range(len(z)):\n ua[i] = 0.15\n\n # Add this crossflow profile to the Profile dataset\n data = np.vstack((z, ua)).transpose()\n symbols = ['z', 'ua']\n units = ['m', 'm/s']\n comments = ['measured', 'synthetic']\n profile.append(data, symbols, units, comments, 0)\n \n # Close the netCDF dataset\n profile.close_nc()\n \n # Return a profile object\n return profile" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add parser for payoff calculation
def add_parser(subparsers): parser = subparsers.add_parser( "payoffs", aliases=["pay"], help="""Compute payoffs""", description="""Compute payoff relative information in input game of specified profiles.""", ) parser.add_argument( "--input", "-i", metavar="<input-file>", default=sys.stdin, type=argparse.FileType("r"), help="""Input file for script. (default: stdin)""", ) parser.add_argument( "--output", "-o", metavar="<output-file>", default=sys.stdout, type=argparse.FileType("w"), help="""Output file for script. (default: stdout)""", ) parser.add_argument( "profiles", metavar="<profile>", nargs="+", help="""File or string with json profiles from input games for which payoffs should be calculated. This file can be to be a list or a single profile""", ) parser.add_argument( "-t", "--type", metavar="type", default="payoffs", choices=TYPE, help="""What to return: {} (default: %(default)s)""".format(TYPE_HELP), ) return parser
[ "def _parse_price_original(self, response, add_xpath=None):\n xpathes = '//*[@id=\"price\"]/.//*[contains(@class, \"a-text-strike\")]' \\\n '/text()'\n\n if add_xpath:\n xpathes += ' |' + add_xpath\n\n price_original = self._is_empty(\n response.xpath(xpathes).extract()\n )\n\n if price_original:\n price_original = self._is_empty(\n re.findall(\n FLOATING_POINT_RGEX,\n price_original\n ), 0.00\n )\n try:\n price_original = float(price_original)\n except ValueError:\n price_original = None\n\n return price_original", "def get_payment_amount(): \r\n ask_price = request.args.get('asking_price')\r\n down_pay = request.args.get('down_payment')\r\n payment_schedule = request.args.get('payment_schedule')\r\n amor_period = request.args.get('amor_period')\r\n # check input\r\n if not ask_price or not down_pay or not payment_schedule or not amor_period:\r\n results = \"check input\"\r\n data = {\"Error\": results}\r\n response = app.response_class(response=json.dumps(data), status=400)\r\n return response\r\n\r\n # call function\r\n results = mc.get_payment_amount(ask_price, down_pay, payment_schedule, amor_period)\r\n\r\n # handle results\r\n if results != -1:\r\n data = {\"payment_amount\": results}\r\n response = app.response_class(response=json.dumps(data), status=200)\r\n else:\r\n results = \"check down_pay >= 5% for 1st 500k, 10% for rest, also atBor > down_pay\"\r\n data = {\"Error\": results}\r\n response = app.response_class(response=json.dumps(data), status=400)\r\n return response", "def parse_retirement_paragraph(soup):\n ret_age = int(soup.find(id='ret_age').string.split(' ')[0])\n ret_date = int(soup.find(id='ret_date').string.strip(' '))\n ret_amount = float(soup.find(id='ret_amount').string.strip(' ').replace(',', ''))\n return ret_age, ret_date, ret_amount", "def final_payoff_calculator(self):\n if self.treatment == 1:\n self.final_payoff = self.accumulated_payoff * Constants.shock\n self.final_payoff_cop = int(self.accumulated_payoff * Constants.shock * Constants.cop_per_ume)\n elif self.treatment == 0:\n self.final_payoff = self.accumulated_payoff\n self.final_payoff_cop = self.accumulated_payoff * Constants.cop_per_ume\n print(\"[[ APP_1_TRANSCRIPTION ]] - PLAYER - final_payoff().............round_number: \",self.round_number)\n print(\"[[ APP_1_TRANSCRIPTION ]] - PLAYER - final_payoff().............final_payoff: \",self.final_payoff)\n print(\"[[ APP_1_TRANSCRIPTION ]] - PLAYER - final_payoff().............final_payoff: \",self.final_payoff_cop)\n print(\"[[ APP_1_TRANSCRIPTION ]] - PLAYER - final_payoff().............treatment: \",self.treatment)", "def parse(self):\r\n for line in self.trade_text:\r\n print(f\"TEsting {line}\")\r\n if self.is_forex_symbol(line):\r\n self.symbol.append(line)\r\n if self.is_price(line):", "def add_calculated_price(data: dict, rate_exchange: float) -> None:\n data['Price'] = int(data['Q2-KM']) * (date.today() - date(day=1, month=1, year=data['Q5-ModelData']['year'])).days * rate_exchange", "def calculate_pay(self) -> float:\n return self.salary + self.age * 2", "def parse(cls, input):", "def parse_offer(self, response):", "def parse_input():\n user_input = input(\"Enter equation: \")\n user_input = user_input.split(\" \")\n calculator(float(user_input[0]), float(user_input[2]), user_input[1])", "def parsePriceTargetTag(tag: bs4.element.Tag) -> float:\n pt = tag.text.split(u\"\\u279D\")[-1]\n price_target = float(pt.replace('$', '').replace(',', '').strip())\n return price_target", "def _parse(self, data):\n for i, d in enumerate(data):\n c = self.calculations.get(i, False)\n if c is not False and i not in range(81, 91):\n c.value = c._to(d)\n continue\n elif c is not False and i in range(81, 91):\n c.value = c._to(data[i : i + 9])\n continue\n if c is False and i not in range(81, 91):\n LOGGER.warn(f\"Calculation '{i}' not in list of calculationss\")", "def parse_input():\n raw = input()\n split1 = raw.split(\" \")\n calculator(split1[2], split1[4], split1[3])", "def date_parser():", "def FindPay():\n ratev = float(rate.get())\n yearsv = float(years.get())\n amountv = float(amount.get())\n r = ratev/100/12\n p = 12*yearsv\n payment.insert(0,str(\"{0:.2f}\".format((r*amountv)/(1-(1+r)**-p))))\n payment.configure(state='readonly')", "def new_price_estimate() -> PriceEstimate:\n return PriceEstimate(\n product_id='',\n currency_code='',\n display_name='',\n estimate='')", "def __price(symbol: str, callback: function):\n pass", "def donationParser(inLoc, header=True):\n donations=[]\n file =open(inLoc, 'r', encoding='latin1')\n for line in file:\n if header:\n header = False\n continue\n front = line.find(\"\\\"$\")\n back = line.find(\".\", front)\n loc = line.find(\",\", front, back)\n if loc:\n line = list(line)\n line[loc] = \"\"\n line = \"\".join(line)\n\n temp = []\n line=line.rstrip().split(',')\n\n try:\n temp.append(dateutil.parser.parse(line[0]).astimezone())\n\n temp.append(line[1])\n temp.append(line[2])\n temp.append(Decimal(sub(r'[^\\d.]', '', line[3])))\n temp.append(\"\".join(line[4:]))\n donations.append(temp)\n except:\n donations[-1][4]+=\" \".join(line)\n return donations", "def parse_self_consumption(xml_text):\n\n if not xml_text: return None\n soup = BeautifulSoup(xml_text, 'html.parser')\n res = {}\n for timeseries in soup.find_all('timeseries'):\n is_consumption = len(timeseries.find_all('outBiddingZone_Domain.mRID'.lower())) > 0\n if not is_consumption: continue\n psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]\n if psr_type in ENTSOE_STORAGE_PARAMETERS: continue\n resolution = timeseries.find_all('resolution')[0].contents[0]\n datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])\n\n for entry in timeseries.find_all('point'):\n quantity = float(entry.find_all('quantity')[0].contents[0])\n if quantity == 0: continue\n position = int(entry.find_all('position')[0].contents[0])\n datetime = datetime_from_position(datetime_start, position, resolution)\n res[datetime] = res[datetime] + quantity if datetime in res else quantity\n return res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add each entity as Alarm Control Panel.
def async_add_alarm_entity(config: dict): entity_id = "{}.{}".format(PLATFORM, slugify(config["name"])) alarm_entity = AlarmoAreaEntity( hass=hass, entity_id=entity_id, name=config["name"], area_id=config["area_id"], ) hass.data[const.DOMAIN]["areas"][config["area_id"]] = alarm_entity async_add_devices([alarm_entity])
[ "def list_alarms(self, entity):\r\n uri = \"/%s/%s/alarms\" % (self.uri_base, utils.get_id(entity))\r\n resp, resp_body = self.api.method_get(uri)\r\n return [CloudMonitorAlarm(self, dct, entity)\r\n for dct in resp_body[\"values\"]]", "def spinup_alarms(self, database_class):\n logging.debug(\"Configuring Cloudwatch alarms \")\n disco_alarm_config = DiscoAlarmsConfig(self.vpc_name)\n disco_alarm = DiscoAlarm()\n instance_alarms = disco_alarm_config.get_alarms(database_class)\n disco_alarm.create_alarms(instance_alarms)", "def display_alarms(self):\n # And finally, display the alarms below the header\n all_alarms = self.alarm_mgr.get_all_alarms()\n print('All Alarms:\\n' + AlarmCli.dashes_line)\n if not all_alarms:\n print('\\tThere are not saved alarms.')\n else:\n for alarm in all_alarms:\n print(alarm)\n print('\\n') # Empty line for visual spacing", "def AddAppointments( self, Appointments ):\n\t\tfor App in Appointments:\n\t\t\tevent = Event()\n\t\t\tif App.has_key( 'Class' ): \n\t\t\t\tevent.add('summary', App['Subject']+\" - \"+App['Class'])\n\t\t\telse:\n\t\t\t\tevent.add('summary', App['Subject'])\n\t\t\tevent.add('dtstart', App['Hours'][0])\n\t\t\tevent.add('dtend', App['Hours'][1])\n\t\t\t\n\t\t\tif App.has_key( 'Location' ): event.add( 'location', App['Location'] )\n\t\t\t\n\t\t\tself.cal.add_component(event)\n\t\t\t# print \"Event added\", App", "def show_agenda_items():", "def make_hourgroups(self):\r\n all_activities = Activity.objects.all()\r\n for activity in all_activities:\r\n hg = HourGroup.objects.create(name=activity.name)\r\n hg.activities.add(activity)", "def _attachToBlinkyAlarms (self, blinkyAlarms, alarmManager):\n self._log(\"attach-to-blinky-alarms\").debug3(\"attaching\")\n\n # regular container functors \n blinkyAlarms.setValueSetFunctor (self._createFunctorAlarmsValueSet(alarmManager))\n blinkyAlarms.setNotifyTrxProgressFunctor (self._createFunctorTrxProgress(alarmManager), True) # TODO (shmulika): check what the True is for...\n blinkyAlarms.setDestroySelfFunctor (self._createFunctorAlarmsDestroySelf(blinkyAlarms, alarmManager)) \n\n # contained elements functors\n blinkyAlarms.setCreateThresholdsFunctor (self._createFunctorCreateThresholds(alarmManager))\n #blinkyAlarms.setDeleteThresholdsFunctor (self.createFunctorDeleteThresholds) # TODO(shmulika): is necessary?\n\n blinkyAlarms.setCreateSimulateListFunctor(self._createFunctorCreateSimulateList(alarmManager))\n #blinkyAlarms.setDeleteSimulateListFunctor(self.create) # TODO(shmulika): is necessary?\n \n blinkyAlarms.setDoActionFunctor (self._createFunctorDoAction(alarmManager))\n\n # error message functor \n alarmManager.setConfigMsgFunctor(lambda msgStr: blinkyAlarms.setConfigErrorStr(msgStr))\n\n # active the blinky node\n rc = blinkyAlarms.activate()\n if rc != ReturnCodes.kOk:\n self._log(\"attach-to-blinky-alarms-failed-activating\").error(\"failed to activate\")\n return ReturnCodes.kGeneralError \n\n # attach oper elements\n rc = self._attachToBlinkyList (self._operDomain, blinkyAlarms, alarmManager)\n if rc != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError \n\n rc = self._attachToBlinkySummary (self._operDomain, blinkyAlarms, alarmManager)\n if rc != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError \n\n rc = self._attachToBlinkyCounters (self._operDomain, blinkyAlarms, alarmManager)\n if rc != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError \n\n rc = self._attachToBlinkyDecalred (self._operDomain, blinkyAlarms, alarmManager)\n if rc != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError \n\n rc = self._attachToBlinkyAlarm (self._operDomain, blinkyAlarms, alarmManager)\n if rc != ReturnCodes.kOk:\n return ReturnCodes.kGeneralError \n\n self._log(\"attach-to-blinky-alarms-activated\").debug2(\"attached and activated\")\n return ReturnCodes.kOk", "def make_hourgroups(self):\n all_activities = Activity.objects.all()\n for activity in all_activities:\n hg = HourGroup.objects.create(name=activity.name)\n hg.activities.add(activity)", "def make_arms():\n f1 = box(pos=(-22,2.5,58), axis=(1,0,0),\n length=35, width=5, height=2, color=color.green)\n f2 = box(pos=(22,2.5,58), axis=(1,0,0),\n length=35, width=5, height=2, color=color.green)\n list_of_arms = [ f1, f2 ]\n return list_of_arms", "def new_controls_rest():\n controls = ControlsService().create(count=batch.BATTERY)\n ObjectsOwnersService().create(objs=controls)\n yield controls", "def add_steering_panel(self):", "def __get_all_addons(self, obj):\n from ..utils import ProgressMeter\n pm = ProgressMeter(_(\"Install all Addons\"), _(\"Installing...\"), message_area=True)\n pm.set_pass(total=len(self.addon_model))\n for row in self.addon_model:\n pm.step()\n (help_name, name, ptype, image, desc, use, rating, contact, \n download, url) = row\n load_addon_file(url, callback=pm.append_message)\n self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate)\n pm.message_area_ok.set_sensitive(True)\n self.__rebuild_load_list()\n self.__rebuild_reg_list()", "def add_all_activity(self, today_activities):\n # Destroying existing content\n for child in self.today_activity_frame.winfo_children():\n child.destroy()\n\n # Creating the content\n today_total_activity = int(today_activities[\"metadata\"][\"total_activity\"])\n\n for i in range(today_total_activity):\n activity = today_activities[\"ACTIVITY\"][f\"activity_{i}\"]\n\n self.add_one_row(activity[\"category\"], activity[\"subcategory\"], activity[\"description\"], activity[\"spent_time\"], activity[\"start_time\"], i)", "def createDomainAndBlinkyAlarmAndAttach (self, agent, alarmManager):\n priority=a.sys.blinky.domain_priority.DomainPriority.kDefault\n self._configDomain = agent.createConfigDomain(\"config-alarms\", priority)\n self._operDomain = agent.createConfigDomain(\"oper-alarms\", priority)\n self._maapiDomain = agent.createMaapiDomain(\"maapi-alarms\")\n\n self.blinkyAlarms = BlinkyAlarms.s_create(self._log, self._configDomain)\n self._attachToBlinkyAlarms(self.blinkyAlarms, alarmManager)\n \n # Do registertion\n self._operDomain.registrationDone() # TODO(shmulika): naamas might change this interface later for the oper-domain.\n self._configDomain.registerNode(self.blinkyAlarms) \n self._configDomain.registrationDone()\n self._configDomain.triggerSubscriptions()", "def list_alarms(self, entity):\r\n return self._entity_manager.list_alarms(entity)", "def migrate_component_indicators_data(apps, schema_editor):\n Component = apps.get_model('goals', 'Component')\n\n for component in Component.objects.select_related('indicator').iterator():\n component.indicators.add(component.indicator)", "def imprime_agenda(self):\n for persona in self.lista:\n persona.imprime_persona()\n print(\"****************************\")", "def instance_manage_alarms(instance):\n def last_event(ev_id):\n return event_log.instance_last_event(instance, ev_id)\n\n def alarm_raised(al_type):\n if instance.alarms:\n if any(x.alarm_type == al_type for x in instance.alarms):\n return True\n return False\n\n alarm_type = None\n alarm_timestamp = None\n additional_text = ''\n\n if instance.is_locked():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_STOPPED\n\n elif instance.is_failed():\n if instance.host_name is None or '' == instance.host_name:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_SCHEDULING_FAILED\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_FAILED\n\n elif instance.is_paused():\n # When nova launches an instance it sometimes puts the instance in the\n # paused state temporarily. Customers don't like seeing an alarm in\n # this case and it is too hard to fix nova, so we will hold off on\n # raising the alarm for 10 seconds. If the alarm is raised, we will\n # use the timestamp from when the paused state was entered.\n if instance.elapsed_time_in_state >= 10:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_PAUSED\n alarm_timestamp = instance.last_state_change_datetime.strftime(\n \"%Y-%m-%d %H:%M:%S.%f\")\n\n elif instance.is_suspended():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_SUSPENDED\n\n elif instance.is_rebooting():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_REBOOTING\n\n elif instance.is_rebuilding():\n if last_event(event_log.EVENT_ID.INSTANCE_EVACUATE_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_EVACUATING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_EVACUATING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_EVACUATING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_EVACUATING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_EVACUATING\n\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_REBUILDING\n\n elif instance.is_migrating():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_LIVE_MIGRATING\n\n elif instance.is_resizing():\n if last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_REVERT_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_REVERTING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_RESIZE_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_RESIZING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_RESIZING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZING\n\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZE_REVERTING\n\n elif instance.is_resized():\n if last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATING):\n if instance.action_data.initiated_from_cli():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATED\n additional_text = \"waiting for confirmation\"\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING):\n if instance.action_data.initiated_from_cli():\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATED\n additional_text = \"waiting for confirmation\"\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_REVERT_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_COLD_MIGRATE_REVERTING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATE_REVERTING\n\n elif alarm_raised(alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATED):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_COLD_MIGRATED\n additional_text = \"waiting for confirmation\"\n\n elif last_event(event_log.EVENT_ID.INSTANCE_RESIZE_REVERT_BEGIN):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZE_REVERTING\n\n elif last_event(event_log.EVENT_ID.INSTANCE_RESIZE_REVERTING):\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZE_REVERTING\n\n else:\n alarm_type = alarm.ALARM_TYPE.INSTANCE_RESIZED\n\n if alarm_type is not None:\n if not alarm_raised(alarm_type):\n instance_clear_alarm(instance.alarms)\n instance.alarms = instance_raise_alarm(instance, alarm_type,\n additional_text=additional_text,\n alarm_timestamp=alarm_timestamp)\n else:\n instance_clear_alarm(instance.alarms)\n instance.alarms = list()", "def set_alarm_time(times):\n alarm_time.append(times)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a unique ID to use for this entity.
def unique_id(self): return f"{self.entity_id}"
[ "def unique_id(self):\n return self.id", "def UniqueEntityId(self) -> str:", "def id(self):\r\n if not hasattr(self, '_id'):\r\n raise MissingID\r\n return self._id", "def id(self):\n return id(self._getobj_())", "def UniqueId(self) -> str:", "def get_entity_id(self):\n\n\t\treturn self.__entity_id", "def get_id(self) -> str:\r\n return self.resource_id", "def uuid(self):\n return self.raw.Id", "def unique_id(self):\n return \"{}.{}\".format(self.__class__, self.wink.deviceId())", "def entity_id(self) -> Optional[Text]:\n return self._entity_id", "def _get_id(self) -> \"std::string\" :\n return _core.Property__get_id(self)", "def new_id(self) -> str:\n return uuid.uuid4().hex", "def get_id(self):\n return self.data[self.system_idx][\"id\"]", "def id(self) -> int:\n return self._context.id", "def get_id(self):\n return self.data['id']", "def data_id(self) -> str:\n return self.entity_description.data_id", "def unique_id() -> str:\n return \"unique-id\"", "def get_uuid(self):\n return self._model_uuid", "def entity_id(self):\n return f\"sensor.{self._entity_id}\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the arm mode.
def arm_mode(self): return self._arm_mode
[ "def getRSelMode(self,targetDevice):\n if (targetDevice in self.adc_based_acquisition):\n return \"e5x\"\n elif (targetDevice in [\"SAML22\"]):\n return \"l22\"\n elif (targetDevice in [\"PIC32CZCA80\", \"PIC32CZCA90\"]):\n return \"pic32cz\"\n else:\n return \"std\"", "def get_arm(self, lr):\n if lr == \"l\":\n return self.larm\n else:\n return self.rarm", "def get_mode(self):\n return self.mode", "def get_mode(self,):\n return self.current_mode", "def robo_mode(self) -> str:\n return pulumi.get(self, \"robo_mode\")", "def _get_manufacturing_mode(self):\n try:\n if 'manufacturing_mode' in self.facts:\n return self.facts['manufacturing_mode']\n response = self.config(command_list=[\"show chassis\"]).response()\n fpc_search = re.search('fpc', response)\n manuf_search = re.search('boot -h -m manufacturing', response)\n self.facts['manufacturing_mode'] = bool(response and(fpc_search and manuf_search))\n return self.facts['manufacturing_mode']\n except Exception as exp:\n self.log(level='WARN', message=exp)\n self.log(level='WARN', message=\"Unable to set manufacturing mode attribute\")\n return None", "def mode(self):\n # type: () -> SrtMode\n return self._mode", "def _get_appearance_mode(self) -> str:\n if self.__appearance_mode == 0:\n return \"light\"\n else:\n return \"dark\"", "def get_screen_mode(self):\n\n\t\treturn(self.header[0x40] & 0x03)", "def get_mode_2f(self):\n return int(self.query(\"HARM?\"))", "def rnn_mode(self):\n return self._rnn_mode", "def mode(self) -> GameMode:\n return self._game.mode", "def getBoardMode(self):\n scpiQuery = ':BMOD:SLOT%d:PGRP:MODE? PGRP1' % (self._slotNo, )\n result = self._processQuery(scpiQuery, 'getBoardMode():', self._ontRemote.timeout)\n # remove '_MODE'\n offset = result.find(self._postfix)\n return result[:offset]", "def get_tune_mode(self, json_info):\n tune_mode = json_info[\"SocInfo\"][\"autoTilingMode\"]\n if self.offline_tune:\n tune_mode = \"RL\"\n return tune_mode", "def arm_config(self):\n return self._arm_config", "def get_arm_id(self):\n return self.__arm_id", "def preset_mode(self) -> str | None:\n if self.preset_modes is None:\n return None\n if not self._api.state.is_on:\n return None\n return self._api.state.fan_preset", "def get_cinder_rbd_mirroring_mode(cinder_ceph_app_name='cinder-ceph'):\n rbd_mirroring_mode_config = zaza.model.get_application_config(\n cinder_ceph_app_name).get('rbd-mirroring-mode')\n if rbd_mirroring_mode_config:\n rbd_mirroring_mode = rbd_mirroring_mode_config.get(\n 'value', DEFAULT_CINDER_RBD_MIRRORING_MODE).lower()\n else:\n rbd_mirroring_mode = DEFAULT_CINDER_RBD_MIRRORING_MODE\n\n return rbd_mirroring_mode", "def current_lantern_mode_image(self):\n if self.current_level.outdoors:\n return self.hud_map[HUD_LANTERN_MODE_SUNLIT]\n lantern = self.get_lantern()\n if not lantern or not lantern.oil_meter[0]:\n return self.hud_map[HUD_LANTERN_MODE_NONE]\n mode_name = LANTERN_MODE_MAP[lantern.mode]\n return self.hud_map[\"hud_lantern_mode_\" + mode_name]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send arm custom_bypass command.
async def async_alarm_arm_custom_bypass(self, code=None, skip_code=False): _LOGGER.debug("alarm_arm_custom_bypass") await self.async_handle_arm_request(STATE_ALARM_ARMED_CUSTOM_BYPASS, code=code, skip_code=skip_code)
[ "def disableProtection(self):\n self.write(\"PROT:OVP 0\")\n self.write(\"PROT:OCP 0\")\n self.write(\"PROT:OPP 0\")", "def _iac_dont(self, option):\n logger.debug('send IAC DONT %s', name_option(option))\n self.send_str(bytes(''.join((IAC, DONT, option))))", "def send_negative_action(self) -> None:\n cmd = 2 # ANCS_CMD_PERFORM_NOTIFICATION_ACTION,\n uid = self.id\n action_id = 1 # ANCS_ACTION_NEGATIVE\n buffer = struct.pack(\"<BIB\", cmd, uid, action_id)\n self.control_point.write(buffer)", "def hxRaw(self, cmd):\n \n cmdKeys = cmd.cmd.keywords\n ctrl = self.controller\n\n rawCmd = cmdKeys['raw'].values[0]\n cmd.fail('text=\"not implemented\"')", "def forward_proxy_bypass_default_action(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"forward_proxy_bypass_default_action\")", "async def antiraid(self, ctx):\n\n try:\n await ctx.message.delete()\n except discord.errors.NotFound:\n pass\n\n channel = discord.utils.get(ctx.guild.channels, name=self.lobby_channel)\n\n overwrite = dict(channel.overwrites)[ctx.guild.default_role]\n overwrite.send_messages = False\n\n await channel.set_permissions(ctx.guild.default_role, overwrite=overwrite, reason='AntiRaid Enabled')\n await ctx.send('AntiRaid enabled.')", "def clearProtection(self):\n self.write(\"PROT:CLE\")", "def disableMotors(self):\n self.doCommand('EM,0,0\\r')", "def skip_disable_keys(self):\n return \"\"\"--skip-disable-keys\"\"\"", "def send_disable_forwarding(self):\n pass", "async def permit_join_device_disable(self):\n data = '{\"cmdId\":' + str(Command.CANCEL_INCREACE_EQUIPMENT.value) + '}'\n run = self.construct_message(data)\n logging.info(f\"Disable join device with: {run}\")\n await self.send_data(run)", "def test_deny_skipped(self):\n\n requirement = self.tool_basic_requirement()\n requirement.save()\n\n requirement2 = self.tool_basic_requirement()\n requirement2.action = constants.REQ_ACTION_DENY\n requirement2.sender = \"IGNORED\"\n requirement2.save()\n\n helper = self.tool_get_helper()\n\n helper.connect(\"\", \"\", \"1.1.1.1\", \"\", {})\n helper.mail_from(\"test@company.com\", {})\n\n self.assertTrue(\n helper.enabled,\n \"Helper was unexpectedly disabled\"\n )", "async def defcon_disable_alias(self, ctx: Context) -> None:\n await self.invoke(ctx, \"defcon disable\")", "def hardwarebypass(self, element_id, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/elements/{}/hardwarebypass\".format(api_version,\n tenant_id,\n element_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"get\")", "def powerOff(self):\n self.instr.write(\"OUTP OFF\")", "async def hard_mode(self, ctx):\n self.hard_mode = not self.hard_mode\n #self.auto_hint = not self.hard_mode #disable auto hint in hard mode\n await ctx.channel.send(f\"Hard mode has been set to: {self.hard_mode}\")", "def handle_disable_app(self, hermes, intent_message):\n self.chmod_app(hermes, intent_message, i18n.RESULT_DISABLE_APP, 0o644)", "def trust(self, mac):\n\n self.log.info( f'Device being trusted: {mac}' ) # Log info\n self.sendCMD( f'trust {mac}' ) # Trust the device", "def setRemoteControl(self):\n self.write(\"SYS:REM USB\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Arm the alarm or switch between arm modes.
async def async_arm(self, arm_mode, **kwargs): skip_delay = kwargs.get("skip_delay", False) bypass_open_sensors = kwargs.get("bypass_open_sensors", False) self._arm_mode = arm_mode self._bypass_mode = bypass_open_sensors leave_delay = self._config[const.ATTR_MODES][arm_mode]["exit_time"] if ( self._state != STATE_ALARM_DISARMED or skip_delay or not leave_delay ): # immediate arm event (open_sensors, bypassed_sensors) = self.hass.data[const.DOMAIN]["sensor_handler"].validate_event( area_id=self.area_id, event=const.EVENT_ARM, bypass_open_sensors=bypass_open_sensors, arm_mode=arm_mode ) if open_sensors: # there where errors -> abort the arm _LOGGER.info( "Cannot transition from state {} to state {}, there are open sensors".format(self._state, arm_mode) ) await self.async_arm_failure(open_sensors) return False else: # proceed the arm if bypassed_sensors: self.bypassed_sensors = bypassed_sensors self.open_sensors = None if self.changed_by: _LOGGER.info("Alarm is armed ({}) by {}.".format(arm_mode, self.changed_by)) else: _LOGGER.info("Alarm is armed ({}).".format(arm_mode)) if self._state and self._state != STATE_ALARM_ARMING: async_dispatcher_send( self.hass, "alarmo_event", const.EVENT_ARM, self.area_id, { "arm_mode": arm_mode, "delay": 0 } ) await self.async_update_state(arm_mode) return True else: # normal arm event (from disarmed via arming) (open_sensors, _bypassed_sensors) = self.hass.data[const.DOMAIN]["sensor_handler"].validate_event( area_id=self.area_id, event=const.EVENT_LEAVE, bypass_open_sensors=bypass_open_sensors, arm_mode=arm_mode ) if open_sensors: # there where errors -> abort the arm _LOGGER.info("Cannot arm right now, there are open sensors") await self.async_arm_failure(open_sensors) return False else: # proceed the arm _LOGGER.info( "Alarm is now arming. Waiting for {} seconds.".format(leave_delay) ) async_dispatcher_send( self.hass, "alarmo_event", const.EVENT_ARM, self.area_id, { "arm_mode": arm_mode, "delay": leave_delay } ) self.delay = leave_delay self.open_sensors = None await self.async_update_state(STATE_ALARM_ARMING) @callback async def async_leave_timer_finished(now): """Update state at a scheduled point in time.""" _LOGGER.debug("async_leave_timer_finished") await self.async_arm(self.arm_mode, bypass_open_sensors=bypass_open_sensors) self.async_set_timer(leave_delay, async_leave_timer_finished) return True
[ "def do_arm(self, unused_line): # pylint: disable=invalid-name\n self._CheckState([actuator_types.kActuatorStateInit])\n self._CheckServosSelected()\n set_state_msg = pack_avionics_messages.ServoSetStateMessage()\n (set_state_msg\n .state_command) = actuator_types.kActuatorStateCommandArm\n (set_state_msg\n .servo_arming_signal) = safety_codes.SERVO_ARMING_SIGNAL\n print 'Arming.'\n\n for _ in xrange(self._NUM_RETRIES):\n set_state_msg.selected_servos = ServosAsBits(\n self._listener.GetUnarmedServos())\n self._set_state_aio_client.Send(\n set_state_msg, 'kMessageTypeServoSetState', OPERATOR)\n time.sleep(0.1)\n if self._listener.AllServosArmed():\n print 'Successfully armed.'\n return\n\n raise ServoClientError('Failed to arm.')", "def arm_away(self):\n self.armable.arm(ArmType.AWAY)", "def activate_arm(self):\n if not self._should_arm_activate():\n logging.info('Not activating arm; within timeout')\n return\n self._cycle_arm()", "def arm_drone(self):\n\n self.take_control()\n time.sleep(1) # To make sure we are in guided mode before arming\n self.arm()\n\n # Set the current global position as the home position\n self.set_home_as_current_position()\n\n if self.armed:\n self.state = States.ARMED\n else:\n print(\"Drone can not arm right now. \"\n \"Releasing control...\")\n #self.release_control()", "def arm_stay(self):\n self.armable.arm(ArmType.STAY)", "async def async_alarm_disarm(self, code=None):\n await self._device.set_disarmed_mode()", "def toggle_alarm(self):\r\n global alarm\r\n alarm = not alarm\r\n if(alarm):\r\n self._alarm_switch.config(text = \"Alarm: ON\")\r\n\r\n self._al_hour.config(state = 'enabled')\r\n self._al_min.config(state = 'enabled')\r\n self._al_set.config(state = 'enabled')\r\n self._al_am_pm_option.config(state = 'active')\r\n else:\r\n self._alarm_switch.config(text = \"Alarm: OFF\")\r\n\r\n self._al_hour.config(state = 'disabled')\r\n self._al_min.config(state = 'disabled')\r\n self._al_set.config(state = 'disabled')\r\n self._al_am_pm_option.config(state = 'disabled')", "def EnableAlarm(self, alarm,config):\t\t\t\t#dest = RTCC_ALM0/RTCC_ALM1\n\t\tcontrol = self.readRegister(CTRL)\n\t\tif (alarm == Alarm.ZERO):\n\t\t\tALARMREG = (control | ALM_0)\n\t\telse:\n\t\t\tALARMREG = (control | ALM_1)\n\t\t\t\n\t\tself.writeRegister(CTRL,control)\t\t\t\t#enable alarm control bit\t\t\n\t\tday = self.readRegister(control)\t\t\t\t#Set address to the alarm config/day register \n\t\tAlmarmCfg = ((day & 0x07) | (config & 0xF0))\n\t\tself.writeRegister(ALARMREG,AlmarmCfg)", "def arm_mode(self):\n return self._arm_mode", "def arm_and_takeoff(aTargetAltitude):\r\n\r\n print(\"Basic pre-arm checks\")\r\n # Don't let the user try to arm until autopilot is ready\r\n while not vehicle.is_armable:\r\n print(\" Waiting for vehicle to initialise...\")\r\n time.sleep(1)\r\n\r\n \r\n print(\"Arming motors\")\r\n # Copter should arm in GUIDED mode\r\n vehicle.mode = VehicleMode(\"GUIDED\")\r\n vehicle.armed = True\r\n\r\n while not vehicle.armed: \r\n print(\" Waiting for arming...\")\r\n time.sleep(1)\r\n\r\n print(\"Taking off!\")\r\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\r\n\r\n # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command \r\n # after Vehicle.simple_takeoff will execute immediately).\r\n while True:\r\n print(\" Altitude: \", vehicle.location.global_relative_frame.alt) \r\n if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.\r\n print(\"Reached target altitude\")\r\n break\r\n time.sleep(1)", "def start_alarm(self):\n if self.state is \"off\":\n self.off_to_on()", "def arm_and_takeoff(aTargetAltitude):\n\n print \"Basic pre-arm checks\"\n # Don't let the user try to arm until vehicle is ready\n while not vehicle.is_armable:\n print \" Waiting for vehicle to initialise...\"\n time.sleep(1)\n\n print \"Arming motors\"\n # Copter should arm in GUIDED mode\n vehicle.mode = VehicleMode(\"GUIDED\")\n vehicle.armed = True\n\n while not vehicle.armed: \n print \" Waiting for arming...\"\n time.sleep(1)\n\n print \"Taking off!\"\n vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude\n\n # Wait until the vehicle reaches a safe height before processing the goto (otherwise the command after Vehicle.simple_takeoff will execute immediately).\n while True:\n print \" Altitude: \", vehicle.location.global_relative_frame.alt \n if vehicle.location.global_relative_frame.alt>=aTargetAltitude*0.95: #Trigger just below target alt.\n print \"Reached target altitude\"\n break\n time.sleep(1)", "def execAlarm(self):\n self._alarm = None\n self.onAlarm()", "def arm(self) -> None:\n self.log.info('Arm the MIRcat QCL system.')\n at_temperature = ctypes.c_bool(False)\n is_armed = ctypes.c_bool(False)\n self._execute('MIRcatSDK_IsLaserArmed', [ctypes.byref(is_armed)])\n if not is_armed.value:\n self._execute('MIRcatSDK_ArmDisarmLaser')\n\n while not is_armed.value:\n self._execute('MIRcatSDK_IsLaserArmed', [ctypes.byref(is_armed)])\n time.sleep(1)\n\n self._execute('MIRcatSDK_AreTECsAtSetTemperature',\n [ctypes.byref(at_temperature)])\n tec_current = ctypes.c_uint16(0)\n qcl_temp = ctypes.c_float(0)\n num_qcl = ctypes.c_uint8()\n self._execute('MIRcatSDK_GetNumInstalledQcls', [ctypes.byref(num_qcl)])\n\n while not at_temperature.value:\n for i in range(0, num_qcl.value):\n self._execute('MIRcatSDK_GetQCLTemperature',\n [ctypes.c_uint8(i+1), ctypes.byref(qcl_temp)])\n self._execute('MIRcatSDK_GetTecCurrent',\n [ctypes.c_uint8(i+1), ctypes.byref(tec_current)])\n self._execute('MIRcatSDK_AreTECsAtSetTemperature',\n [ctypes.byref(at_temperature)])\n time.sleep(.1)", "async def test_switch_change_alarm_state(hass, utcnow):\n helper = await setup_test_component(hass, create_security_system_service)\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_home\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 0\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_away\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 1\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_arm_night\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 2\n\n await hass.services.async_call(\n \"alarm_control_panel\",\n \"alarm_disarm\",\n {\"entity_id\": \"alarm_control_panel.testdevice\"},\n blocking=True,\n )\n assert helper.characteristics[TARGET_STATE].value == 3", "def enable_arms(self):\n\n rospy.loginfo(\"Attempting to enabling robot.\")\n rs = baxter_interface.RobotEnable(baxter_interface.CHECK_VERSION)\n\n try:\n rs.enable()\n except Exception, e:\n rospy.logerr(e.strerror)\n rospy.logerr(\"Failed to enable arms.\")\n return False\n\n rospy.loginfo(\"Successfully enabled robot.\")\n return True", "def testAppArmedVehicleEKFBrake(self):\n self.mgr.shotMgr.isAppConnected.return_value = True\n self.mgr.isButtonConnected = Mock(return_value=True)\n self.mgr.shotMgr.currentShot = shots.APP_SHOT_NONE\n self.v.armed = True\n self.v.ekf_ok = True\n self.mgr.setButtonMappings()\n call1 = call(btn_msg.ButtonLoiter, shots.APP_SHOT_NONE, btn_msg.ARTOO_BITMASK_ENABLED, \"\\0\")\n self.mgr.setArtooButton.assert_has_calls( [call1] )", "def get_arm(self, lr):\n if lr == \"l\":\n return self.larm\n else:\n return self.rarm", "async def _alarm_room(self):\n await self._remind(alarm=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }