query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Make DataFrame from blocks, each columns is a different field, each row is for a different file.
def make_dataframe(block_name, blocks): names = {} # store names corresponding to column ids all_rows = [] # store list of dicts of column_id: value for k, v in blocks.iteritems(): # to hold table info for this file info = {} for line in v: # split around the #. parts[0] is the contents, parts[1] is the column header # (but note programs use diff conventions...) parts = [p.strip() for p in line.split('#')] data, comment = parts # for most blocks, we use the first part of parts[0] to ID what the row means # BUT this doens't work for all e.g. DCINFO id_not_first_blocks = ["DCINFO"] if block_name in id_not_first_blocks: pass else: col_id, contents = data.split() names[col_id] = comment info[col_id] = contents all_rows.append(info) # make a DataFrame for this block df = pd.DataFrame(all_rows, index=blocks.keys()) # convert column IDs to string names df.rename(columns=names, inplace=True) df.reindex_axis(sorted(df.columns), axis=1) df.sort_index(inplace=True) print df return df
[ "def _parse_block(self,idx):\n block_tmp = self._block_list[idx]\n blocktype = self._paragraph_or_table[idx]\n paragraph_count = sum(self._paragraph_or_table[:idx+1])\n table_count = idx + 1 - paragraph_count\n df = DataFrame()\n # paragraph\n if blocktype==1:\n l_runText = [r.text for r in block_tmp.runs]\n l_runID = arange(len(l_runText))\n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['paragraph_ID'] = paragraph_count - 1 # 0-starting index \n # table\n if blocktype==0:\n row_count = 0\n for row in block_tmp.rows:\n cell_count = 0\n for cell in row.cells:\n cell_para_count = 0\n for p in cell.paragraphs:\n l_runText = [r.text for r in p.runs]\n l_runID = arange(len(l_runText)) \n df = DataFrame({'string':l_runText,\n 'run_ID':l_runID},index=l_runID)\n df['table_ID'] = table_count - 1 # 0-starting index\n df['row_ID'] = row_count\n df['cell_ID'] = cell_count\n df['paragraph_ID'] = cell_para_count \n cell_para_count += 1\n cell_count += 1\n row_count += 1\n df['block_ID'] = idx\n self._block_dataframe_list[idx] = df", "def file_to_dataframe(file):\n # ottengo i campi del file\n fields = FileFieldName.objects.filter(file=file).all()\n\n data_frame = pd.DataFrame()\n for field in fields:\n # ottengo il campo in forma di serie\n serie = field_as_series(field)\n # modifico l'indice della serie in base a offset e stretching\n serie = apply_offset_stretching(serie, file.offset, file.stretching)\n # cambio il nome alla serie\n serie_name = get_column_name(file, field)\n serie = serie.rename(serie_name)\n\n # aggiungo la serie al dataframe\n data_frame = data_frame.join(serie, how='outer')\n\n return data_frame", "def extract_data_frame(self):\n data_frame = []\n data_block = self.extract_data_block()\n n = 0\n for block in data_block:\n int_block = []\n grid_refs = list(map(int, block[0].split(',')))\n int_block.append(grid_refs)\n for i in range(0, len(block[1:])):\n try:\n block_split = [int(x) for x in block[1:][i].split()]\n int_block.append(block_split)\n except:\n break\n data_frame.append(int_block)\n n += 1\n return data_frame", "def fill_chunks(url, bboxes):\n dframe = pd.DataFrame([bbox.astuple() for bbox in bboxes])\n dframe.columns = [cn.chunk_bx, cn.chunk_by, cn.chunk_bz,\n cn.chunk_ex, cn.chunk_ey, cn.chunk_ez]\n\n tags = [io.fname_chunk_tag(bbox) for bbox in bboxes]\n dframe[cn.chunk_tag] = tags\n\n dframe.index.name = \"id\"\n\n io.write_db_dframe(dframe, url, \"chunks\")", "def df_from_files(self):\n print('Creating dataframe...')\n num = len([name for name in os.listdir(self.raw) if not name[0] == '.'])\n files = os.path.join(self.raw, '~.info.json') # This is a weird hack\n files = files.replace('~', '{:05d}') # It allows path joining to work on Windows\n data = [json.load(open(files.format(i))) for i in range(1, num + 1)]\n\n columns = ['formats', 'tags', 'categories', 'thumbnails']\n lists = [[], [], [], []]\n deletes = {k: v for k, v in zip(columns, lists)}\n for dt in data:\n for col, ls in deletes.items():\n ls.append(dt[col])\n del dt[col]\n\n self.df = pd.DataFrame(data)\n self.df['upload_date'] = pd.to_datetime(self.df['upload_date'], format='%Y%m%d')\n self.df.to_csv(os.path.join(self.ran, 'df.csv'))\n\n self.tags = deletes['tags']\n pickle.dump(self.tags, open(os.path.join(self.ran, 'tags.txt'), 'wb'))", "def create_random_blocks(data):\n data = data.copy()\n data.reset_index(drop=True)\n\n # Threshold number of interesting points in block to be considered interesting\n thres = int(threshold_fraction * block_length)\n\n # Sets seed number at 42 to produce same selection of indices every run\n random.seed(42)\n\n blocks = []\n\n ran_indices = random.sample(range(len(data[data_columns[0]]) - block_length), n)\n\n # Slices DataFrame into blocks\n for i in ran_indices:\n block_slice = data[i: (i + block_length)]\n\n # Assume block label is False initially\n label = False\n\n # Finds 2 most common labels of the block\n mode = Counter(block_slice['LABELS']).most_common()\n\n if len(mode) > 1:\n # If mode is not False, mode must be a classification\n if mode[0][0] is not False:\n # If more than the threshold value of the block is the mode, label block as that mode\n if mode[0][1] > thres:\n label = mode[0][0]\n\n # If mode is False, the 2nd mode may be a classification that reaches threshold\n if mode[0][0] is False:\n # The 2nd mode must be a classification label. If it reaches threshold, label block as such\n if mode[1][1] > thres:\n label = mode[1][0]\n # Else, label block as False\n else:\n label = False\n else:\n label = mode[0][0]\n\n block = []\n\n for k in data_columns:\n channel = np.array(block_slice['%s' % k])\n block.append(channel)\n if len(channel) != block_length:\n print('%s: %s' % (k, len(channel)))\n\n # Adds tuple of the first index of the block, and the block\n blocks.append((i, label, np.array(block)))\n\n return blocks", "def as_data_frame(self, **kwargs):\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"What are you doing trying to export a Layout \"\n \"as a pandas DataFrame when you don't have \"\n \"pandas installed? Eh? Eh?\")\n if kwargs:\n files = self.get(return_type='file', **kwargs)\n else:\n files = self.files.values()\n data = pd.DataFrame.from_records([f.entities for f in files])\n data.insert(0, 'path', [f.path for f in files])\n return data", "def read_blocks_from_csv(file):\n blockgroups_df = pandas.read_csv(file, header=None, sep=\"\\t\")\n all_blocks = []\n for blockgroup in blockgroups_df.iterrows():\n text = blockgroup[1][0]\n blocks = text.split('\\n\\n')\n all_blocks += blocks\n\n return all_blocks", "def createDataFrame():\n\n # first CSV spans time frame 7/1/18 - 3/11/19\n # second CSV spans time frame 3/12/19 - 6/30/19\n library_stats1 = pd.read_csv(\"library_stats_page1.csv\", nrows=24)\n library_stats2 = pd.read_csv(\"library_stats_page2.csv\")\n\n frames = [library_stats1, library_stats2]\n df = pd.concat(frames, axis=1)\n\n del df['Time']\n\n return df", "def produce_df(data, rows, columns, row_names=None, column_names=None):\r\n row_index = pd.MultiIndex.from_product(rows, names=row_names)\r\n col_index = [i for i in range(1,len(columns[0])+1)]\r\n return pd.DataFrame(data, index=row_index, columns=col_index)", "def create_df(path_or_buffer, v='2'):\r\n column_names = load_column_names(v=v)\r\n return pd.read_csv(\r\n path_or_buffer, sep=\"\\t\", header=None, usecols=range(len(column_names)),\r\n names=column_names, index_col=0, dtype={'EventCode': 'object'}, encoding='utf-8'\r\n )", "def merge_chunked(filepath, leftframe, sep='\\t', csize=500000, encoding='iso8859-1'):\r\n chunks = pd.read_csv(filepath, sep=sep, chunksize=csize, encoding=encoding)\r\n out_df = pd.DataFrame()\r\n for chunk in chunks:\r\n logger.debug(\"Currently reading row %8d...\", chunk.index[0])\r\n chunk_filter = leftframe.merge(chunk, how='inner')\r\n out_df = pd.concat([out_df, chunk_filter], axis=0)\r\n return out_df", "def asDataFrame(vectorizer=None):\n ret = []\n if vectorizer is None:\n cols = [\"num\", \"file\", \"tag\"]\n else:\n cols = [\"num\", \"vector\", \"tag\"]\n for fname in listFiles():\n if fname.find(\"-\")<0:\n continue\n tag, ind = fname.split(\"-\", 1)\n if vectorizer is None:\n ret.append((int(ind.split(\".\", 1)[0]), fname, tag))\n else:\n ret.append((int(ind.split(\".\", 1)[0]), vectorizer(readFile(fname)), tag))\n return pd.DataFrame(ret, columns=cols).set_index(\"num\")", "def _create_edges_df(edge_file, left_len, right_len):\n outer_index = ['left']*left_len + ['right']*right_len\n inner_index = range(left_len) + range(right_len)\n index = pd.MultiIndex.from_arrays([outer_index, inner_index])\n\n edges = pd.read_csv(edge_file, header=None)\n edges.columns = index\n edges.index = index\n return edges", "def new_file_from_blocks(self, path, blocks=[], rewrite_timestamps=False,\n begin=1, end=None):\n blocks = sorted(blocks) #make sure they're in ascending order\n\n with open(path, \"w\") as output:\n header = self.get_header()\n if blocks:\n blocks = self.get_conv_blocks(select=blocks)\n else:\n blocks = self.get_conv_blocks(begin=begin, end=end)\n\n for line in header:\n if not line.is_end_header:\n output.write(line.line)\n\n for line in blocks.line_map:\n output.write(line.line)\n\n output.write(self.end_tag)", "def data_frame_creator(self):\n\n rgb_dir = self.dataset_dir + \"/color/\"\n rgb_data = [\n rgb_dir + rgb for rgb in os.listdir(rgb_dir)\n ]\n\n segmentation_dir = self.dataset_dir + \"/mask/\"\n segmentation_data = [\n segmentation_dir + segmentation\n for segmentation in os.listdir(segmentation_dir)\n ]\n\n dataset = {\n 'RGB': rgb_data,\n 'SEGMENTATION': segmentation_data\n }\n\n if self.shuffle:\n return pd.DataFrame(dataset).sample(frac=1, random_state=123)\n\n return pd.DataFrame(dataset)", "def from_csv(cls, filepath_or_buffer): \n records = pd.read_csv(filepath_or_buffer)\n\n return cls(records)\n\n # ------------------------------------------------------------------\n # Old implementation kept for future use:\n\n # # Read the data from the csv file, assuming the third column of the\n # # file represents timestamp and parsing it as a datetime.\n # records = pd.read_csv(\n # filepath,\n # index_col=[0, 1],\n # header=[0, 1], \n # parse_dates=[2]\n # )\n\n # # Convert the index's 'offset' level to TimedeltaIndex.\n # records.index = records.index.set_levels(\n # pd.TimedeltaIndex(data.index.get_level_values('offset')),\n # level='offset')\n\n # # Fix column level values, an artifact of blank level values in a\n # # .csv file.\n # fields = data.columns.get_level_values('field')\n\n # #srcs = data.columns.get_level_values('source').str.replace('Un.*', 'device')\n # srcs = data.columns.get_level_values('elev_source').str.replace('Un.*', 'device')\n \n # col_tups = [(field, src) for field, src in zip(fields, srcs)]\n # data.columns = pandas.MultiIndex.from_tuples(col_tups,\n # names=['field', 'source'])\n # data['time', 'device'] = \\\n # (data['timestamp', 'device'] \\\n # - data['timestamp', 'device'].iloc[0]).dt.total_seconds()\n\n # ------------------------------------------------------------------", "def load_unprocessed_dataframe(self, file_paths: List[str]) -> pd.DataFrame:\n dataset_paths = _glob_multiple(_list_of_strings(self.config.dataset_filenames), root_dir=self.raw_dataset_dir)\n train_paths = _glob_multiple(_list_of_strings(self.config.train_filenames), root_dir=self.raw_dataset_dir)\n validation_paths = _glob_multiple(\n _list_of_strings(self.config.validation_filenames), root_dir=self.raw_dataset_dir\n )\n test_paths = _glob_multiple(_list_of_strings(self.config.test_filenames), root_dir=self.raw_dataset_dir)\n dataframes = []\n if len(train_paths) > 0:\n train_df = self.load_files_to_dataframe(train_paths)\n train_df[SPLIT] = 0\n dataframes.append(train_df)\n if len(validation_paths) > 0:\n validation_df = self.load_files_to_dataframe(validation_paths)\n validation_df[SPLIT] = 1\n dataframes.append(validation_df)\n if len(test_paths) > 0:\n test_df = self.load_files_to_dataframe(test_paths)\n test_df[SPLIT] = 2\n dataframes.append(test_df)\n # If we have neither train/validation/test files nor dataset_paths in the config, use data files in root dir.\n if len(dataset_paths) == len(dataframes) == 0:\n dataset_paths = file_paths\n if len(dataset_paths) > 0:\n dataframes.append(self.load_files_to_dataframe(dataset_paths))\n return pd.concat(dataframes, ignore_index=True)", "def load_all_records(file_list: list, columns: list) -> pd.DataFrame: \n dfs = [pd.read_csv(f, usecols=columns) for f in file_list]\n combined_df = pd.concat(dfs)\n \n return combined_df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Donate the item(s)/ send to NPO
def donate(self):
[ "async def donate(self):\n await self.bot.say(\"You can donate to me here:\\n<https://www.paypal.me/avrae>\\n\\u2764\")", "async def donate(self, ctx, amount: CoinConverter):\n await self.transfer(ctx.author.id, ctx.guild.id, amount)\n await ctx.send(f'\\N{MONEY WITH WINGS} `{ctx.author!s}` > '\n f'`{amount}JC` > `{ctx.guild!s}` \\N{MONEY BAG}')", "async def buy(self, ctx, item, nb = 1):\n ID = ctx.author.id\n param = dict()\n param[\"ID\"] = ID\n # param[\"IDGuild\"] = ctx.guild.id\n param[\"nb\"] = nb\n param[\"item\"] = item\n\n ge.socket.send_string(gg.std_send_command(\"buy\", ID, ge.name_pl, param))\n desc = GF.msg_recv()\n lang = desc[1]\n if desc[0] == \"OK\":\n msg = discord.Embed(title = lang_P.forge_msg(lang, \"stats\", None, False, 1), color= 13752280, description = desc[2])\n msg.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.channel.send(embed = msg)\n else:\n await ctx.channel.send(desc[2])", "def send_thank_you():\n\n # Get name of donor\n donor_name = name_prompt()\n\n # Display list of donors when user types \"list\"\n while donor_name.lower() == \"list\":\n donations_list.get_formatted_list_of_donors()\n donor_name = name_prompt()\n\n # Get donation amount\n amt_input = donation_prompt()\n\n donations_list.add_donation(donor_name, float(amt_input))\n\n print(send_email(donations_list.get_last_donation(donor_name)))", "async def purchase(self, ctx, *, factory_name):\n author = ctx.author\n for item in self._factories[\"factory\"]:\n if item[\"name\"].lower() == factory_name.lower():\n for item2 in list(set(self.settings[\"user\"][str(author.id)][\"items\"])): \n itemamount = self.settings[\"user\"][str(author.id)][\"items\"].count(item2) \n if item[\"item\"] == item2:\n if item[\"price\"] <= itemamount:\n await ctx.send(\"You just bought a `{}`\".format(item[\"name\"]))\n for x in range(item[\"price\"]):\n self.settings[\"user\"][str(author.id)][\"items\"].remove(item2)\n self.settings[\"user\"][str(author.id)][\"items\"].append(item[\"name\"])\n dataIO.save_json(self._factories_file, self._factories)\n dataIO.save_json(self.location, self.settings)\n else:\n await ctx.send(\"You don't have enough `{}` to buy this :no_entry:\".format(item2))", "def donate_eqs(server, ids, receiver_id, session=\"\"):\r\n URL = f\"https://{server}.e-sim.org/\"\r\n if not session:\r\n session = login(server)\r\n results = []\r\n for ID in ids.split(\",\"):\r\n payload = {\"equipmentId\": ID.strip(), \"id\": receiver_id, \"reason\": \" \", \"submit\": \"Donate\"}\r\n send_eq = session.post(URL + \"donateEquipment.html\", data=payload)\r\n results.append(f\"ID {ID} - {send_eq.url}\\n\")\r\n print(\"\".join(results))\r\n return session", "async def sell(self, ctx, item, nb = 1):\n ID = ctx.author.id\n param = dict()\n param[\"ID\"] = ID\n # param[\"IDGuild\"] = ctx.guild.id\n param[\"nb\"] = nb\n param[\"item\"] = item\n\n ge.socket.send_string(gg.std_send_command(\"sell\", ID, ge.name_pl, param))\n desc = GF.msg_recv()\n lang = desc[1]\n if desc[0] == \"OK\":\n msg = discord.Embed(title = lang_P.forge_msg(lang, \"stats\", None, False, 2), color= 13752280, description = desc[2])\n msg.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n await ctx.channel.send(embed = msg)\n else:\n await ctx.channel.send(desc[2])", "def send_a_thank_you():\n\n # Prompt for donor's full name\n new_donor = prompt_for_donor()\n\n # Prompt for the donation amount\n prompt_for_donation_amount(new_donor)\n\n # Add donor to collection\n donor_list.add_donor(new_donor)\n\n # Print out a letter customized for the donor and amount\n print(new_donor.format_thank_you())", "def Donate(self, energy, wesenid):\n if(self.dead):\n return False\n o = self.worldObjects[wesenid]\n if((o.objectType == \"wesen\") and\n (o.position == self.position)):\n if(self._UseTime(\"donate\")):\n if(energy > self.energy):\n energy = self.energy\n if(not energy <= 0):\n o.energy += energy\n self.energy -= energy\n self._EnergyCheck()\n return True\n return False", "def buy(self, item):\n self.pay( item.get_cost() )\n self.add_item(item)", "def send_messages_to_donors(state):\n completed = 0\n total = 0\n success = True\n for donor in _Donors.values():\n message = generate_message(donor)\n state[\"name\"] = donor_name(donor)\n state[\"message\"] = message\n total += 1\n if (send_message_to_donor(state)):\n completed += 1\n else:\n success = False\n if (success):\n print((\"\\nThank-you messages transmitted to all donors for their\"\n \" latest donation or to bug them to donate in the future\"))\n else:\n success_factor = (100.0 * completed)/total\n print((\"{:.2f}% of the donor thank-you \"\n \"messages were transmitted\").format(success_factor))\n state = make_state()\n return success", "def buy_item(self, item):\n try:\n self.lock(item)\n num_left = self.validate_purchase(item)\n except InvalidItemType:\n print(\"Sorry, we don't sell {}\".format(str(item)))\n except OutOfStock:\n print(\"Sorry, the item is out of stock.\")\n else:\n print(\"Purchase complete. There are {} {}s left\".format(num_left, item.get_name()))\n finally:\n self.unlock(item)", "def post_purchase(cost, name, net_worths):\r\n person_row = net_worths[net_worths['personName'] == name].to_dict('records')[0]\r\n sen1 = f'The total cost of the items selected is ${cost}.'\r\n diff = person_row['finalWorth'] * billion - cost\r\n sen2 = f'If {name} bought all of the items selected at those quantities he would have ${diff} left.'\r\n quot = round(diff / POP_US, 2)\r\n sen3 = f'That is enough left over to pay everyone in the United States ${quot}.'\r\n return _add_strings(sen1, sen2, sen3)", "def thank_you_all():\n donor_dict.email_all()", "async def buy(self, ctx, *, auction_item: str):\n author = ctx.author\n await self._set_bank(author)\n i = 0;\n items = [item for item in self._shop[\"picitems\"] if item[\"name\"] in self.settings[\"user\"][str(author.id)][\"items\"]]\n for item2 in self._shop[\"picitems\"]:\n if item2[\"name\"].lower() == auction_item.lower():\n for item in items:\n i = i + 1\n if i >= 1:\n await ctx.send(\"You already own a pickaxe, sell your pickaxe and try again :no_entry:\")\n return\n filtered = filter(lambda x: x[\"name\"].lower() == auction_item.lower(), self._auction[\"items\"]) \n filtered = sorted(filtered, key=lambda x: x[\"price\"])\n if not filtered:\n await ctx.send(\"There is no `{}` on the auction house :no_entry:\".format(auction_item.title()))\n return\n server = ctx.guild\n channel = ctx.channel\n author = ctx.author\n \n if server.id not in PagedResultData.paged_results:\n PagedResultData.paged_results[server.id] = dict()\n \n if channel.id not in PagedResultData.paged_results[server.id]:\n PagedResultData.paged_results[server.id][channel.id] = dict()\n \n paged_result = PagedResult(filtered, lambda item: \"\\n**Name:** \" + item[\"name\"] + \"\\n**Price:** \" + str(item[\"price\"]) + \"\\n\" + (\"**Durability:** \" + str(item[\"durability\"]) + \"\\n\" if \"durability\" in item else \"\") + (\"**Amount:** \" + str(item[\"amount\"]) + \"\\n\" if \"amount\" in item else \"**Amount:** 1\"))\n paged_result.list_indexes = True\n paged_result.selectable = True\n \n async def selected(event):\n item = event.entry\n if item not in self._auction[\"items\"]:\n await channel.send(\"That item was recently bought :no_entry:\")\n return\n owner = discord.utils.get(self.bot.get_all_members(), id=int(item[\"ownerid\"]))\n if owner == ctx.message.author:\n await channel.send(\"You can't buy your own items :no_entry:\")\n return\n if item[\"price\"] > self.settings[\"user\"][str(author.id)][\"balance\"]:\n await channel.send(\"You don't have enough money for that item :no_entry:\")\n return\n self._auction[\"items\"].remove(item)\n \n self.settings[\"user\"][str(author.id)][\"balance\"] -= item[\"price\"]\n self.settings[\"user\"][str(owner.id)][\"balance\"] += item[\"price\"]\n \n try:\n if item[\"durability\"]:\n self.settings[\"user\"][str(author.id)][\"pickdur\"] = item[\"durability\"]\n except:\n pass\n \n try:\n if item[\"amount\"]:\n pass\n except:\n item[\"amount\"] = 1\n \n for x in range(0, item[\"amount\"]):\n self.settings[\"user\"][str(author.id)][\"items\"].append(item[\"name\"].title())\n try:\n await channel.send(\"You just bought `{} {}` for **${:,}** :tada:\".format(item[\"amount\"], item[\"name\"], item[\"price\"]))\n except:\n await channel.send(\"You just bought `1 {}` for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n try:\n await owner.send(\"Your `{}` just got bought on the auction house, it was sold for **${:,}** :tada:\".format(item[\"name\"], item[\"price\"]))\n except:\n pass\n \n dataIO.save_json(self._auction_file, self._auction)\n dataIO.save_json(self.location, self.settings)\n \n paged_result.on_select = selected\n\n message = await channel.send(embed=paged_result.get_current_page_embed())\n\n paged_result.message_id = message.id\n\n PagedResultData.paged_results[server.id][channel.id][author.id] = paged_result", "def ticktodoitem(itemid, todoid):\n\n # Update \"todoitem\" table to reflect item as completed\n tick_todo_item(itemid)\n\n # Render edittodo page after an item is updated\n return redirect(url_for(\"edittodo\", todoid=todoid))", "def send_money(self):\n pass", "async def send_item(\n request: Request,\n body: ItemSendRequest = Body(...),\n me: User = Depends(get_user),\n items: ItemRepository = Depends(),\n users: UserRepository = Depends(),\n):\n item = await items.get(_id=ObjectId(body.item_id), user_id=ObjectId(me.id))\n if not item:\n raise HTTPException(status_code=status.NOT_FOUND, detail=\"Item not found\")\n user = await users.get(email=body.email)\n if not user:\n raise HTTPException(status_code=status.NOT_FOUND, detail=\"User not found\")\n key = encode_share_key(body.item_id, user.id)\n url = f\"{request.url_for('receive_item')}?key={key}\"\n return ItemSendResponse(url=url)", "async def doubleornothing(self, ctx):\n author = ctx.author\n if self.settings[\"user\"][str(author.id)][\"balance\"] <= 0:\n await ctx.send(\"You don't have enough money to do double or nothing :no_entry:\")\n ctx.command.reset_cooldown(ctx)\n return\n msg = await ctx.send(\"This will bet **${:,}**, are you sure you want to bet this?\\nYes or No\".format(self.settings[\"user\"][str(author.id)][\"balance\"]))\n try:\n def don(m):\n return m.author == ctx.author\n response = await self.bot.wait_for(\"message\", check=don, timeout=30)\n except asyncio.TimeoutError:\n await msg.delete()\n await ctx.send(\"The bet has been canceled.\")\n ctx.command.reset_cooldown(ctx)\n return\n if \"yes\" in response.content.lower():\n await msg.delete()\n else:\n await msg.delete()\n await ctx.send(\"The bet has been canceled.\")\n ctx.command.reset_cooldown(ctx)\n return\n number = randint(0, 1)\n message = await ctx.send(\"You just put **${:,}** on the line and...\".format(self.settings[\"user\"][str(author.id)][\"balance\"]))\n await asyncio.sleep(2)\n if number == 0:\n await message.edit(content=\"You lost it all! **-${:,}**\".format(self.settings[\"user\"][str(author.id)][\"balance\"]))\n self.settings[\"user\"][str(author.id)][\"winnings\"] -= self.settings[\"user\"][str(author.id)][\"balance\"]\n self.settings[\"user\"][str(author.id)][\"balance\"] = 0\n if number == 1:\n await message.edit(content=\"You double your money! **+${:,}**\".format(self.settings[\"user\"][str(author.id)][\"balance\"]))\n self.settings[\"user\"][str(author.id)][\"winnings\"] += self.settings[\"user\"][str(author.id)][\"balance\"]\n self.settings[\"user\"][str(author.id)][\"balance\"] *= 2\n dataIO.save_json(self.location, self.settings) \n ctx.command.reset_cooldown(ctx)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When an IconScore receives some coins and calldata is None, fallback function is called.
def _fallback(context: 'IconScoreContext', score_address: 'Address'): icon_score = IconScoreEngine._get_icon_score(context, score_address) score_func = getattr(icon_score, ATTR_SCORE_CALL) score_func(STR_FALLBACK)
[ "def default_callback(self):\n raise ScoreExceededError(\"Score has been exceeded!\")", "def pickup_coin(self, x, y):\n \n # ADD CODE HERE\n if self.coins[y][x] > 0:\n coins = self.coins[y][x]\n self.coins[y][x] = 0\n return coins\n else:\n return 0", "def _collectCoin(self):\n coinHit = pygame.sprite.spritecollide(self._Player, \\\n self._coinList, True)\n self._score += (5 * len(coinHit))", "def zeroPriority(goneCost, heuristicCost):\n return 0", "def _bind_low (self, value):\n # put your own code in subclass\n # score should *NOT* become negative\n return max(0, value)", "def on_critical_amount_reached(cls, fluid_stack):", "def onUnknown(self, data):\n return CONTINUE", "def noHit (dataSet, threshold=None, state=None, rank=None):\n\tfor std in dataSet.getStandards (state):\n\t\tif rank and std.rank > rank:\n\t\t\tcontinue\n\t\tif std.hasHit(): return 0\n\treturn 1", "def typebonus(self):\n if self.rank == 1:\n self.hitpoints *= 1000\n return None\n elif self.rank == 2:\n self.hitpoints *= 100\n return None\n elif self.rank == 3:\n self.hitpoints *= 10\n return None\n elif self.rank == 4:\n self.hitpoints *= 2\n return None\n else:\n self.hitpoints *= 1\n return None", "def handle_compare_scores(self):\n if self.ai_hand.is_playing:\n if self.ai_hand.is_blackjack():\n self.handle_player_lost('You lost! AI has 21! New Deal?')\n elif self.ai_hand.is_bust():\n self.handle_player_win('You won! AI busted! New Deal?')\n elif max(0, min(21, self.player_hand.get_value())) > max(0, min(21, self.ai_hand.get_value())):\n self.handle_player_win('You won! New Deal?')\n else:\n self.handle_player_lost('You lost! New Deal?')", "def hook_card_cost(self, game, player, card):\n if self in player.piles[Piles.PLAYED]:\n return -1\n return 0", "def on_residual(self, pokemon, foe, battle):", "def custom_score(game, player):\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n if (not game.height % 2 or not game.width %2) :\n # for odd moves, examine if symmetry could be broken\n if (game.move_count % 2 == 1 and # second player's turn\n game.__active_player__ == player): # we're up - we went second\n # for future moves, consider if we can copy\n for move in game.get_legal_moves(game.__active_player__):\n if is_symmetric(game.forecast_move(move)):\n # symmetry can be maintained, this is a good state for 2nd player\n return 100\n\n # return 100 if we're second and can copy the opponent's move\n if (game.move_count % 2 == 0 and # our move followed our opponent\n game.__active_player__ != player and # it's the opponent's move\n is_symmetric(game)): # we made the board symmetric\n return 100\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n own_pos = game.get_player_location(player)\n opp_pos = game.get_player_location(game.get_opponent(player))\n dist = (own_pos[0] - opp_pos[0])^2 + (own_pos[1] - opp_pos[1])^2\n\n return float(0.1 * own_moves - 0.9 * opp_moves - .01 * dist)", "def notice_best(self, best_score, pop):\n\t\tself.__notice(\"Best\", best_score, pop)\n\t\treturn best_score", "def test_call_best(self):\n self._test_call(ranks=numpy.ones(shape=(self.num_ranks,)), num_candidates=self.num_candidates)", "def test_for_no_spares_or_strikes(self):\n game = BowlingGame()\n game.throw(0)\n game.throw(1)\n game.throw(1)\n game.throw(2)\n game.throw(2)\n game.throw(3)\n game.throw(3)\n game.throw(4)\n game.throw(4)\n game.throw(5)\n game.throw(5)\n game.throw(4)\n game.throw(4)\n game.throw(3)\n game.throw(3)\n game.throw(2)\n game.throw(2)\n game.throw(1)\n game.throw(1)\n game.throw(0)\n game.calculate_score()\n self.assertEqual(game.score, 50)", "def test_metric_default_return():\n\n from foreshadow.metrics import MetricWrapper\n\n def test(X):\n raise Exception\n\n metric_wrapper = MetricWrapper(test, 0)\n assert 0 == metric_wrapper.calculate([1, 2, 3])", "def callback(self, action: 'SoCallbackAction') -> \"void\":\n return _coin.SoScale_callback(self, action)", "def notice_score(self, score, pop):\n\t\tself.__notice(\"Score\", score, pop)\n\t\treturn score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If progress has insreased sufficiently, log it to ``logger``. If ``new_ratio``, rounded to ``decimals`` differs from ``old_ratio``, log to logger with INFO level and return rounded new_ratio. Else return unmodified ``old_ratio``.
def _log_progress(new_ratio, old_ratio, logger, decimals=2): new_ratio = round(new_ratio, decimals) if new_ratio != old_ratio: logger.info('%s', '{}%'.format(new_ratio * 100)) return new_ratio else: return old_ratio
[ "def likelihood_ratio(self, new_dist_info, old_dist_info):\n LL_old = old_dist_info[0]\n LL_new = new_dist_info[0]\n LR = torch.exp(LL_new - LL_old)\n return LR", "def equalRatio(self):\n self.userPkmn.battleDelegate.stats[self.stat] = self.lvl\n self.targetPkmn.battleDelegate.stats[self.stat] = self.lvl\n ratio = self.delegate.getStatRatio(self.user, self.target)\n \n assert ratio == 1, \"Ratio should be 1\"", "def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):\n # print(x_var)\n # print(new_dist_info_vars)\n logli_new = self.log_likelihood_sym(x_var, new_dist_info_vars)\n logli_old = self.log_likelihood_sym(x_var, old_dist_info_vars)\n\n return tf.exp(logli_new - logli_old)", "def __calculate_percentage(self, ratio, curve):\n\n return math.pow(\n 10,\n ((math.log(ratio) - curve[1]) / curve[2]) + curve[0]\n )", "def percent_difference_fraction_log(a, b):\n import numpy as np\n\n return (np.log10(a) - np.log10(b)) / np.log10(a) * 100", "def get_new_volume_percent(self):\r\n\r\n volume_difference = 0\r\n if self.valve_in_status == True:\r\n volume_difference += self.inflow_rate\r\n if self.valve_out_status == True:\r\n volume_difference -= self.outflow_rate\r\n self.current_volume += volume_difference\r\n\r\n # Check that the volume doesn't break the logical limits\r\n if self.current_volume > self.total_volume:\r\n self.current_volume = self.total_volume\r\n elif self.current_volume < 0:\r\n self.current_volume = 0\r\n\r\n # Calculate the level percent from the volume\r\n percent = self.current_volume*100/self.total_volume\r\n return percent", "def _compute_update_outlier_ratio(self):\n metrics_d = self._metrics_dict\n\n numerator = metrics_d['f0_ground_truth_untrackable_pitch_count']\n numerator += metrics_d['f0_gen_pitch_outlier_count']\n numerator += metrics_d['f0_gen_untrackable_pitch_count']\n\n denominator = copy.copy(numerator)\n denominator += metrics_d['f0_gen_trackable_pitch_count']\n\n if denominator == 0:\n outlier_ratio = np.nan\n else:\n outlier_ratio = numerator / denominator\n metrics_d['f0_outlier_ratio'] = outlier_ratio\n return outlier_ratio", "def fdf_changed(self, value):\n self.good_match_fraction_new = float(value) / 100.", "def progress(self):\n return min(1.0, self.n_steps / self.max_steps)", "def progress(self):\n perc = self.learning_coeff / DESIRED_LEARNING_COEFF * 100\n return max(min(perc, 100.0), 0.0)", "def evaluate_log_likelihood_ratio(self, *args, **kwargs):\n raise NotImplementedError", "def prepare_frac(ratio):\n if ratio.__class__ is Ratio:\n return ratio.fraction\n return ratio", "def _expected_agreement( numerator, denominator ):\n return '%.02f' % ( int ( 10000.0 * numerator / denominator ) / 100.0 )", "def getReductionRatio(self) -> retval:\n ...", "def percent(self) -> float:\n return (1.0 / self.maximum) * self.current", "def _log_progress_at_interval(self):\n if time.time() >= self._last_log_time + self._log_interval_sec:\n self._log_progress()", "def relative_water_level(self): # Task 2B new method\n levelratio = None # defaults output to none\n if isinstance(self.latest_level, float) and (\n self.typical_range_consistent()):\n # ^checks if data exists and is consistent\n levelratio = ((self.latest_level - self.typical_range[0]) / (\n self.typical_range[1] - self.typical_range[0]))\n # sets level ratio as output\n return levelratio # returns output", "def pct_change():\n original_value = bank_of_rick.original_value\n current_total_value = sum(total_value())\n return 100 * (current_total_value - original_value) / original_value", "def _update_vol_step(self, cur_vol):\n with self.lock:\n if cur_vol >= .25:\n self.vol_step = .05\n elif cur_vol < .05:\n self.vol_step = .01\n elif cur_vol < .15:\n self.vol_step = .02\n else:\n self.vol_step = .03" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log all uncaught exceptions in noninteractive mode. All python exceptions are handled by function, stored in ``sys.excepthook.`` By rewriting the default implementation, we can modify handling of all uncaught exceptions.
def _log_all_uncaught_exceptions(exc_type, exc_value, exc_traceback): # ignore KeyboardInterrupt if not issubclass(exc_type, KeyboardInterrupt): ROOT_LOGGER.error("", exc_info=(exc_type, exc_value, exc_traceback)) sys.__excepthook__(exc_type, exc_value, exc_traceback) return
[ "def setup_exceptionhook():\n\n def _pdb_excepthook(type, value, tb):\n if is_interactive():\n import traceback\n import pdb\n traceback.print_exception(type, value, tb)\n print()\n pdb.post_mortem(tb)\n else:\n lgr.warn(\"We cannot setup exception hook since not in interactive mode\")\n _sys_excepthook(type, value, tb)\n\n sys.excepthook = _pdb_excepthook", "def disable():\n sys.excepthook = sys.__excepthook__", "def noExceptions(self) :\n inhibitExceptions = True", "def unhook_exception_ipdb():\n assert hasattr(_custom_exception_hook, 'origin_hook')\n sys.excepthook = _custom_exception_hook.origin_hook", "def set_ipython_custom_exc(func):\n # https://mail.scipy.org/pipermail/ipython-dev/2012-April/008945.html\n # http://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook\n try:\n get_ipython().set_custom_exc((Exception,), func)\n except NameError:\n pass # get_ipython does not exist - ignore", "def hook_exception_ipdb():\n if not hasattr(_custom_exception_hook, 'origin_hook'):\n _custom_exception_hook.origin_hook = sys.excepthook\n sys.excepthook = _custom_exception_hook", "def use_custom_exception_handler(): # pragma: no cover\n sys.excepthook = _my_exception_handler", "def multiexit_except_hook(exctype, value, traceback):\n log.critical(\n 'Uncaught exception',\n exc_info=(exctype, value, traceback)\n )\n run_exitfuncs(1)", "def _handle_exception(self, exc_type, exc_value, exc_traceback):\n if issubclass(exc_type, KeyboardInterrupt):\n sys.__excepthook__(exc_type, exc_value, exc_traceback)\n return\n\n self.logger.error(\n \"Uncaught exception\", exc_info=(exc_type, exc_value, exc_traceback)\n )", "def _patch_ipython_excepthook(**kwargs):\n global ipy_tb\n\n blacklist = kwargs.get('suppressed_paths', [])\n blacklist.append('site-packages/IPython/')\n kwargs['suppressed_paths'] = blacklist\n\n if 'file' in kwargs:\n del kwargs['file']\n\n def format_tb(*exc_tuple, **__):\n unstructured_tb = format(exc_tuple, **kwargs)\n structured_tb = [unstructured_tb] # \\*coughs*\n return structured_tb\n\n import IPython\n shell = IPython.get_ipython()\n if ipy_tb is None:\n ipy_tb = shell.InteractiveTB.structured_traceback\n shell.InteractiveTB.structured_traceback = format_tb", "def _global_except_hook(exctype, value, traceback):\n try:\n if _orig_except_hook:\n _orig_except_hook(exctype, value, traceback)\n else:\n sys.__excepthook__(exctype, value, traceback)\n\n finally:\n import mpi4py.MPI\n rank = mpi4py.MPI.COMM_WORLD.Get_rank()\n sys.stderr.write('\\n')\n sys.stderr.write('******************************************\\n')\n sys.stderr.write('ChainerMN: \\n')\n sys.stderr.write(' Uncaught exception on rank {}. \\n'.format(rank))\n sys.stderr.write(' Calling MPI_Abort() to shut down MPI...\\n')\n sys.stderr.write('******************************************\\n')\n sys.stderr.write('\\n\\n')\n sys.stderr.flush()\n\n try:\n import mpi4py.MPI\n mpi4py.MPI.COMM_WORLD.Abort(1)\n except Exception as e:\n # Something is completely broken...\n # There's nothing we can do any more\n sys.stderr.write(\n 'Sorry, failed to stop MPI and the process may hang.\\n')\n sys.stderr.flush()\n raise e", "def uninstall_excepthook():\n\tsys.excepthook = sys.__excepthook__", "def log_uncaught_exceptions(ex_cls, ex, tb):\n # standard exception formatting to stdout\n traceback.print_tb(tb)\n \n # also send to logging framework\n stack = traceback.extract_tb(tb)\n filename, line_num = 'unknown', 0\n lines = []\n # If the exception is a CheckFailed exception, remove two layers of stack \n # so trace starts at the call site of the failed check.\n if isinstance(ex, FailedCheckException):\n stack = stack[0:-2]\n lines.insert(0, 'Stacktrace of failed check:')\n lines.insert(0, '%s' % (ex))\n else:\n lines.insert(0, 'Uncaught exception: %s' % (ex))\n if stack:\n filename, line_num, _, _ = stack[0]\n lines.extend(format_stacktrace(stack))\n for line in lines:\n if line.strip() != '':\n log_record = logger.makeRecord('FATAL', 50, filename, line_num, line, None, None)\n logger.handle(log_record)", "def add_hook():\n global _orig_except_hook\n\n if _orig_except_hook is not None:\n warnings.warn('chainermn.global_except_hook.add_hook() '\n 'seems to be called multiple times. Ignoring.',\n stacklevel=2)\n return\n\n _orig_except_hook = sys.excepthook\n sys.excepthook = _global_except_hook", "def doException(etype, eval, etrace):\n if hasattr(sys, 'ps1') or not sys.stderr.isatty():\n # we are in interactive mode or we don't have a tty-like\n # device, so we call the default hook\n sys.__excepthook__(etype, eval, etrace)\n else:\n import traceback, pdb\n # we are NOT in interactive mode, print the exception...\n traceback.print_exception(etype, eval, etrace, limit=2, file=sys.stdout)\n print\n # ...then start the debugger in post-mortem mode.\n pdb.pm()", "def trap():\n try:\n yield\n except Exception as e:\n Logger.log_exception(e)", "def exception_hook(except_type, except_value, traceback_obj):\n # logging.error(\"Caught an unhandled exception in Trawl Analyzer.\")\n # error_ret_value = 1\n # log_filename = \"trawl_analzyer_debug.log\"\n # log_filepath = os.path.join(os.getcwd(), log_filename)\n # notice = f\"An unhandled exception occurred and is captured in the log file\\n{log_filepath}\\n\"\n #\n # tbinfofile = io.StringIO()\n # traceback.print_tb(traceback_obj, None, tbinfofile)\n # tbinfofile.seek(0)\n # tbinfo = tbinfofile.read()\n #\n # except_summary = f\"Exception Summary: {except_type}: {except_value}\"\n #\n # time_str = arrow.now().format('MM/DD/YYYY, HH:mm:ss')\n\n # First, to the log file:\n # try:\n # logging.error(f\"Exception occurred at: {time_str}\")\n # logging.error(f\"{except_summary}\")\n # logging.error(f\"Exception Trace:\\n{tbinfo}\")\n # # logging.error(version_info)\n # except IOError:\n # pass\n\n if QApplication.instance():\n app = QApplication.instance()\n app.unhandledExceptionCaught.emit(except_type, except_value, traceback_obj)\n # msgbox = app.findChild(QObject, \"dlgUnhandledException\")\n # msgbox.show()\n # app.aboutToQuit.emit()\n # app.exit(error_ret_value)\n\n # Now to a message box\n # msg = f\"{time_str}\\n{except_summary}\\n\\nHit OK to exit Trawl Analyzer\"\n # msg = f\"{notice}\\n{msg}\"\n # errorbox = QMessageBox()\n # errorbox.setIcon(QMessageBox.Critical)\n # errorbox.setText(msg)\n # errorbox.exec_()\n\n\n else:\n logging.info(\"not a QApplication\")\n\n\n # Tell PyQt to exit with an error value\n # QCoreApplication.exit(error_ret_value)", "def log_die(self, fmt, *args, exception=None, exc_arg=\"\", end=os.linesep): \n self.log(fmt, *args, levels='unconditional', end=end)\n if exception is not None:\n raise exception(exc_arg)\n exit()", "def inhibit_os_error_reporting():\n global _OS_ERROR_REPORTING_INHIBITED\n if not _OS_ERROR_REPORTING_INHIBITED:\n _OS_ERROR_REPORTING_INHIBITED = True\n if sys.platform == 'win32':\n # Windows has a bad habit of opening a dialog when a console program\n # crashes, rather than just letting it crash. Therefore, when a program\n # crashes on Windows, we don't find out until the build step times out.\n # This code prevents the dialog from appearing, so that we find out\n # immediately and don't waste time waiting for a user to close the dialog.\n # https://msdn.microsoft.com/en-us/library/windows/desktop/ms680621.aspx\n SEM_FAILCRITICALERRORS = 1\n SEM_NOGPFAULTERRORBOX = 2\n SEM_NOALIGNMENTFAULTEXCEPT = 0x8000\n ctypes.windll.kernel32.SetErrorMode(\n SEM_FAILCRITICALERRORS|SEM_NOGPFAULTERRORBOX|\n SEM_NOALIGNMENTFAULTEXCEPT)\n # TODO(maruel): Other OSes.\n # - OSX, need to figure out a way to make the following process tree local:\n # defaults write com.apple.CrashReporter UseUNC 1\n # defaults write com.apple.CrashReporter DialogType none\n # - Ubuntu, disable apport if needed." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test band structure calculation by NaCl.
def test_band_structure(ph_nacl): ph_nacl.run_band_structure( _get_band_qpoints(), with_group_velocities=False, is_band_connection=False ) ph_nacl.get_band_structure_dict()
[ "def test_band_structure_bc(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=True\n )\n ph_nacl.get_band_structure_dict()", "def test_band_structure_gv(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=True, is_band_connection=False\n )\n ph_nacl.get_band_structure_dict()", "def test_bands_get(self):\n pass", "def test_negative_binomial(self):\n P = np.array([[0.5,0.4,0.8],\n [0.5,0.3,0.7],\n [0.5,0.3,0.9]])\n R = np.array([[1.,8.,10.],\n [2.,8.,24],\n [3.,6.,30.]])\n data, labels = simulation.generate_nb_data(P, R, 100)\n data = data.astype(float)\n #data += 1e-8\n ll = nb_ll(data, P, R)\n self.assertEqual(ll.shape, (100,3))\n self.assertFalse(np.isnan(ll).any())\n self.assertFalse(np.isinf(ll).any())\n # test derivative\n # test nb cluster\n # how to test the results... they're often not good...\n a,p,r = nb_cluster(data,3)\n self.assertEqual(p.shape, P.shape)\n self.assertEqual(r.shape, R.shape)\n p_nans = np.isnan(p)\n r_nans = np.isnan(r)\n self.assertFalse(p_nans.any())\n self.assertFalse(r_nans.any())\n # assert that all the points aren't being put into\n # the same cluster.\n self.assertTrue(purity(labels, a) > 0.8)\n self.assertFalse((a==a[0]).all())", "def test_landband_data(self):\n result = GenOrogMasks().gen_orography_masks(\n self.orography, self.landmask, self.land_threshold\n )\n self.assertArrayAlmostEqual(result.data, self.exp_landmask)", "def test_stats_nom(self):\n # stats is sum in quadrature of those provided\n true = (\n self.builder._sources[0]._stats**2 +\n self.builder._sources[1]._stats**2\n )\n # Should get the same spectrum using central parameters\n np.testing.assert_array_almost_equal(true, self.spec.stats)", "def test_modules():\n\n test_cell = LeakyCell(279.3, 14., 145., 16., 145., 100., 3., 100., 3., 115., 148., 115., 148., 1, 1, 1, 1, 0, 0, 0, 0, 2)\n\n assert round(test_cell.nernst_potential(1., 400., 20.), 4) == -0.0721\n\n assert round(test_cell.conductivity_k(test_cell.D_Na, test_cell.Z_Na, 3.2, test_cell.Na_si, test_cell.Na_di), 4) == 0.0078\n\n assert test_cell.total_charge( \\\n [test_cell.Na_si, test_cell.K_si, test_cell.Cl_si, 0, 0], 1e-1) == -9648\n \n assert test_cell.total_charge( \\\n [10, 10, 20, 0, 0], 10) == 0 \n\n assert test_cell.total_charge( \\\n [10, 10, 10, 5, 20], 10) == 0", "def test_not_binarize_bw(self):\n with Image.new('1', (1000,1000)) as im:\n self.assertEqual(im, nlbin(im))", "def test_SMEB():\n testing_function('sme', bilinear=True)", "def test_n(self):\n self.assertAlmostEqual(self.surfarr.n.value_si, self.n, 6)", "def test500(self):\n npix=17\n res=sdgrid(infiles=self.rawfile,gridfunction='BOX',npix=npix,cell='20arcsec',outfile=self.outfile,plot=False)\n self.assertEqual(res,None,\n msg='Any error occurred during gridding')\n self.getdata()\n\n # center is only nonzero pixel\n npol=2\n width=1\n nonzeropix_ref=self.generateNonzeroPix(npol,npix,width)\n nonzeropix=self.data.nonzero()[1]\n self.nonzero(nonzeropix_ref,nonzeropix)\n\n pol0=self.data[0,nonzeropix[0]]\n #self.check(0.625,pol0)\n #self.check(0.5,pol0)\n self.check(0.6666666667,pol0)\n \n pol1=self.data[0,nonzeropix[1]]\n #self.check(0.0625,pol1)\n #self.check(0.05,pol1)\n self.check(0.06666666667,pol1)", "def test_LabelObjectStatisticsBasic(self):\n\n self.delayDisplay(\"Starting test_LabelObjectStatisticsBasic\")\n #\n # first, get some data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n mrHead = sampleDataLogic.downloadMRHead()\n ctChest = sampleDataLogic.downloadCTChest()\n self.delayDisplay('Two data sets loaded')\n\n volumesLogic = slicer.modules.volumes.logic()\n\n mrHeadLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, mrHead, \"mrHead-label\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(ctChest, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for mismatch:\\n%s\" % warnings)\n\n self.assertTrue( warnings != \"\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(mrHead, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for match:\\n%s\" % warnings)\n\n self.assertTrue( warnings == \"\" )\n\n self.delayDisplay('test_LabelObjectStatisticsBasic passed!')", "def test_complex_conversion_to_nnf(self):\n\t\tformula = bf.And([bf.Var(\"a\"), bf.And([bf.Var(\"b\"), bf.Var(\"a\")]), bf.Or([bf.Var(\"c\"), bf.Var(\"b\")])])\n\t\tresult = bf.And([bf.Var(\"a\"), bf.And([bf.Var(\"b\"), bf.Var(\"a\")]), bf.Or([bf.Var(\"c\"), bf.Var(\"b\")])])\n\t\tself.assertEqual(result, au.nnf(formula), \"Invalid formula, expected the same as specified by result.\")\n\n\t\tformula = bf.And([bf.Not(bf.Var(\"a\")), bf.Or([bf.And([bf.Var(\"a\"), bf.Var(\"b\"), bf.Var(\"c\")]), bf.Not(bf.Var(\"b\"))])])\n\t\tresult = bf.And([bf.Not(bf.Var(\"a\")), bf.Or([bf.And([bf.Var(\"a\"), bf.Var(\"b\"), bf.Var(\"c\")]), bf.Not(bf.Var(\"b\"))])])\n\t\tself.assertEqual(result, au.nnf(formula), \"Invalid formula, expected the same as specified by result.\")\n\n\t\tformula = bf.And([bf.Not(bf.Var(\"a\")),bf.Or([bf.And([bf.Var(\"b\"), bf.Var(\"c\"), bf.Var(\"d\")]), bf.Not(bf.Var(\"c\"))]), bf.Var(\"a\")])\n\t\tresult = bf.And([bf.Not(bf.Var(\"a\")),bf.Or([bf.And([bf.Var(\"b\"), bf.Var(\"c\"), bf.Var(\"d\")]), bf.Not(bf.Var(\"c\"))]), bf.Var(\"a\")])\n\t\tself.assertEqual(result, au.nnf(formula), \"Invalid formula, expected the same as specified by result.\")", "def test_n(self):\n self.assertAlmostEqual(self.stick.n.value_si, self.n, 6)", "def test_init(self):\n\n # first, filename\n r = gr.Raster(datasets.get_path(\"landsat_B4\"))\n assert isinstance(r, gr.Raster)\n\n # second, passing a Raster itself (points back to Raster passed)\n r2 = gr.Raster(r)\n assert isinstance(r2, gr.Raster)\n\n # third, rio.Dataset\n ds = rio.open(datasets.get_path(\"landsat_B4\"))\n r3 = gr.Raster(ds)\n assert isinstance(r3, gr.Raster)\n assert r3.filename is not None\n\n # finally, as memoryfile\n memfile = rio.MemoryFile(open(datasets.get_path(\"landsat_B4\"), 'rb'))\n r4 = gr.Raster(memfile)\n assert isinstance(r4, gr.Raster)\n\n assert np.logical_and.reduce((np.array_equal(r.data, r2.data, equal_nan=True),\n np.array_equal(r2.data, r3.data, equal_nan=True),\n np.array_equal(r3.data, r4.data, equal_nan=True)))\n\n assert np.logical_and.reduce((np.all(r.data.mask == r2.data.mask),\n np.all(r2.data.mask == r3.data.mask),\n np.all(r3.data.mask == r4.data.mask)))\n\n # the data will not be copied, immutable objects will\n r.data[0, 0, 0] += 5\n assert r2.data[0, 0, 0] == r.data[0, 0, 0]\n\n r.nbands = 2\n assert r.nbands != r2.nbands", "def check_bcc():\n from pylada.crystal.cppwrappers import Structure, neighbors\n structure = Structure([[-0.5,0.5,0.5],[0.5,-0.5,0.5],[0.5,0.5,-0.5]])\\\n .add_atom(0,0,0,\"Mo\")\n print neighbors(structure, 12, [0,0,0])", "def test_brickarea_scalar(self):\n b = B.Bricks()\n barea = b.brickarea(0, 0)\n self.assertEqual(barea, np.array([0.0624999515], dtype='<f4')[0])", "def test_nonzero_landband_data(self):\n result = GenOrogMasks().gen_orography_masks(\n self.orography, self.landmask, self.nonzero_land_threshold\n )\n self.assertArrayAlmostEqual(result.data, self.exp_nonzero_landmask)", "def test_read_gdal_raster_stats_with_subdatasets_in_netcdf():\n netcdf_path = get_test_data_file(\"binary/stac_proj_extension/netcdf/multiple_bands.nc\")\n\n raster_metadata: AssetRasterMetadata = read_gdal_raster_metadata(str(netcdf_path))\n\n assert len(raster_metadata.statistics) == 13\n expected_band_names = {\n \"B02\",\n \"B03\",\n \"B04\",\n \"B05\",\n \"B06\",\n \"B07\",\n \"B08\",\n \"B11\",\n \"B12\",\n \"DEM\",\n \"temperature_mean\",\n \"VH\",\n \"VV\",\n }\n assert set(raster_metadata.statistics.keys()) == expected_band_names\n for band_name, band_stats in raster_metadata.statistics.items():\n assert band_stats.minimum is not None\n assert band_stats.maximum is not None\n assert band_stats.mean is not None\n assert band_stats.stddev is not None\n\n # valid_percent can be None though. gdalinfo does not always give us a value for this.\n if band_stats.valid_percent is None:\n logging.warning(f\"band:{band_name} has no value for valid_percent: {band_stats.valid_percent=}\")\n\n assert raster_metadata.projection == {\n \"proj:epsg\": 4326,\n # For some reason gdalinfo reports the bounds in the wrong order here.\n # I think the reason might be that the pixels are south-up instead of\n # north-up, i.e. the scale for the Y-axis of the pixel is negative.\n # Upper Left corner is BELOW Lower Left corner, which is unexpected.\n # gdalinfo reports that CRS is EPSG:4326, X=lon, Y=lat.\n #\n # From gdalinfo:\n # Corner Coordinates:\n # Upper Left ( 0.0, 0.0)\n # Lower Left ( 0.0, 3.0)\n # Upper Right ( 49.0, 0.0)\n # Lower Right ( 49.0, 3.0)\n # Center ( 24.5, 1.5)\n #\n # Would expect this proj:bbox value with the normal order of the corners:\n # \"proj:bbox\": approx([0.0, 0.0, 49.0, 3.O]),\n \"proj:bbox\": approx([0.0, 3.0, 49.0, 0.0]),\n \"proj:shape\": [49, 3],\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test band structure calculation with group velocity by NaCl.
def test_band_structure_gv(ph_nacl): ph_nacl.run_band_structure( _get_band_qpoints(), with_group_velocities=True, is_band_connection=False ) ph_nacl.get_band_structure_dict()
[ "def test_band_structure_bc(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=True\n )\n ph_nacl.get_band_structure_dict()", "def test_band_structure(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=False\n )\n ph_nacl.get_band_structure_dict()", "def test_compare_alpha_diversities(self):\n # test 'Dose' at 480 inputs\n category = 'Dose'\n depth = 480\n test_type = 'parametric'\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(self.rarefaction_file,\n self.mapping_file, category=category, depth=depth, \n test_type=test_type)\n \n # hardcoded order of the terms in the keys otherwise would comps fail\n exp_tcomps = \\\n {('Control','2xDose'): (1.1746048668554037, 0.44899351189030801),\n ('1xDose','2xDose'): (1.7650193854830403, 0.17574514418562981),\n ('Control','1xDose'): (0.43618805086434992, 0.7052689260099092)}\n \n # test each key in expected results -- this won't catch if \n # obs_tcomps has extra entries, but test that via the next call\n for k in exp_tcomps:\n self.assertFloatEqual(exp_tcomps[k],obs_tcomps[k])\n self.assertEqual(set(exp_tcomps.keys()),set(obs_tcomps.keys()))\n\n # test that returned alpha diversity averages are correct\n # dose\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'], \n # Control = ['Sam5']\n exp_ad_avgs = {'1xDose':(3.2511951575216664, 0.18664627928763661),\n '2xDose':(2.7539647172550001, 0.30099438035250015),\n 'Control':(3.3663303519925001, 0.0)}\n for k in exp_ad_avgs:\n self.assertFloatEqual(exp_ad_avgs[k],obs_ad_avgs[k])\n\n\n # test 'Dose' at 480 inputs with nonparametric test\n seed(0) # set the seed to reproduce random MC pvals\n category = 'Dose'\n depth = 480\n test_type = 'nonparametric'\n num_permutations = 100\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(self.rarefaction_file,\n self.mapping_file, category=category, depth=depth, \n test_type=test_type, num_permutations=num_permutations)\n exp_tcomps = {('1xDose', '2xDose'): (1.7650193854830403, 0.13), \n ('Control', '1xDose'): (0.43618805086434992, 0.83), ('Control', \n '2xDose'): (1.1746048668554037, 0.62)}\n # test each key in expected results -- this won't catch if \n # obs_tcomps has extra entries, but test that via the next call\n for k in exp_tcomps:\n self.assertFloatEqual(exp_tcomps[k],obs_tcomps[k])\n self.assertEqual(set(exp_tcomps.keys()),set(obs_tcomps.keys()))\n\n # test that returned alpha diversity averages are correct\n # dose\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'], \n # Control = ['Sam5']\n exp_ad_avgs = {'Control': (3.3663303519925001, 0.0), '1xDose': (3.2511951575216664, 0.18664627928763661), '2xDose': (2.7539647172550001, 0.30099438035250015)}\n\n for k in exp_ad_avgs:\n self.assertFloatEqual(exp_ad_avgs[k],obs_ad_avgs[k])\n\n\n # test it works with NA values\n # test 'Dose' at 500 inputs with paramteric test\n category = 'Dose'\n depth = 500\n test_type = 'parametric'\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(self.rarefaction_file,\n self.mapping_file, category=category, depth=depth, \n test_type=test_type)\n exp_tcomps = \\\n {('Control','2xDose'): (-0.63668873339963239, 0.63906168713487699), \n ('1xDose','2xDose'): (None,None), \n ('Control','1xDose'): (None,None)}\n self.assertFloatEqual(obs_tcomps, exp_tcomps)\n # test that it works with nonparametric test - this was erroring.\n seed(0)\n test_type = 'nonparametric'\n exp_tcomps = \\\n {('Control','2xDose'): (-0.63668873339963239, 0.672), \n ('1xDose','2xDose'): (None,None), \n ('Control','1xDose'): (None,None)}\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(self.rarefaction_file,\n self.mapping_file, category=category, depth=depth, \n test_type=test_type)\n self.assertFloatEqual(obs_tcomps, exp_tcomps)\n\n # test that returned alpha diversity averages are correct\n # dose\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'], \n # Control = ['Sam5']\n # will fail on nan comparison so avoid this\n exp_ad_avgs = {'1xDose':(nan, nan),\n '2xDose':(3.1955144893699998, 0.84206819489000018),\n 'Control':(2.2669008538500002, 0.0)}\n for k in exp_ad_avgs:\n if k!='1xDose':\n self.assertFloatEqual(exp_ad_avgs[k],obs_ad_avgs[k])\n if k=='1xDose':\n self.assertTrue(all(map(isnan,obs_ad_avgs[k])))\n\n\n # test that it works when no depth is passed\n category = 'Dose'\n depth = None #should return depth = 910\n test_type = 'parametric'\n obs_tcomps, obs_ad_avgs = compare_alpha_diversities(self.rarefaction_file,\n self.mapping_file, category=category, depth=depth, \n test_type=test_type)\n\n # hardcoded order of the terms in the keys otherwise would comps fail\n exp_tcomps = \\\n {('Control','2xDose'): (3.3159701868634883, 0.1864642327553255),\n ('1xDose','2xDose'): (-0.48227871733885291, 0.66260803238173183),\n ('Control','1xDose'): (0.83283756452373126, 0.49255115337550748)}\n self.assertFloatEqual(obs_tcomps, exp_tcomps)\n\n # test that returned alpha diversity averages are correct\n # dose\n # 1xDose = ['Sam1','Sam2','Sam6'], 2xDose = ['Sam3','Sam4'], \n # Control = ['Sam5']\n exp_ad_avgs = {'1xDose':(2.6763340901916668, 0.36025734786901326),\n '2xDose':(2.8358041871949999, 0.04611264137749993),\n 'Control':(3.1006488615725001, 0.0)}\n for k in exp_ad_avgs:\n self.assertFloatEqual(exp_ad_avgs[k],obs_ad_avgs[k])", "def test_boyd_9(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=1.800\n outlet_depth=0.80\n inlet_velocity=1.0\n outlet_velocity=0.5\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 0.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n\n\n Q_expected = 13.554\n v_expected = 3.329\n d_expected = 1.131\n \n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n\n if verbose:\n print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))\n print ('%s,%.2f'%('Delta E = ',delta_total_energy))\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST03 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def test_boyd_12(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=1.50\n inlet_velocity= 4.0\n outlet_depth=0.8\n outlet_velocity=4.0\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 10.0\n z_out = 10.0-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n\n\n Q_expected = 13.546\n v_expected = 3.136\n d_expected = 1.20\n \n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss) \n if verbose:\n print ('%s,%.3f'%('SPEC_E = ',inlet_specific_energy))\n print ('%s,%.3f'%('Delta E = ',delta_total_energy))\n \n print ('%s,%.3f,%.3f,%.3f' %('ANUGAcalcsTEST06 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def get_NPVs( self,\r\n leg,\r\n value_date,\r\n instrument,\r\n disc_cv_details ):\r\n \"\"\" Step one cal of leg1 \r\n leg1 = { \"currency\":...,\r\n \"balance_tb\":...,\r\n \"acc_cpn_detail\":...,\r\n \"pay_convention\":....,\r\n \"day_convention\":....,}\r\n \"\"\"\r\n leg1 = instrument[leg]\r\n Day_Counter = Day_Count.Day_Counter(leg1[\"day_convention\"])\r\n currency = leg1[\"currency\"]\r\n convention = self.convention[currency]\r\n cv_instrument = self.curve_instrument[currency]\r\n fx_instrument = self.cv_fx_instrument[currency]\r\n \"\"\" Discounting Curve settings below\r\n \"\"\"\r\n if disc_cv_details[\"type\"].upper() == \"XCS\":\r\n \"\"\" For XCS calculation we have to use \r\n dual curves method libor curve for \r\n coupon calculation and basis adjusted\r\n curve for discounting \r\n \"\"\"\r\n cv_dis = self.gen_swap_curve( value_date,\r\n convention,\r\n fx_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n disc_cv_details[\"type\"] = \"SWAP\"\r\n cv_fwd = self.gen_swap_curve( value_date,\r\n convention,\r\n cv_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n disc_cv_details[\"type\"] = \"XCS\"\r\n else:\r\n Day_Counter.set_convention_by_ccy(currency)\r\n cv_fwd = self.gen_swap_curve( value_date,\r\n convention,\r\n cv_instrument, \r\n disc_cv_details,\r\n Day_Counter )\r\n \r\n cv_dis = cv_fwd\r\n cf_tb = CF_Gen( leg1, \r\n cv_fwd,\r\n self.cv_keeper,\r\n Day_Counter )\r\n INT_flow = [[ele[\"End_Time\"],ele[\"Interests\"]] for ele in cf_tb]\r\n NPV_INT = Tools.cal_npv( INT_flow, cv_dis, Day_Counter )\r\n PRI_flow = [[ele[\"End_Time\"],ele[\"Principal\"]] for ele in cf_tb]\r\n NPV_PRI = Tools.cal_npv( PRI_flow, cv_dis, Day_Counter )\r\n return NPV_INT,NPV_PRI", "def test_02(self): \n # ------------------------------------------------------------------------\n # Test: vol_halfspace_unitcube\n # ------------------------------------------------------------------------\n block = BlockFunction(np.array([0,1]), lambda x: 1, lambda x: 0)\n \n # 2D\n w = np.array([-2,-1])\n z = -2\n v = block.vol_halfspace_unitcube(w,z)\n assert abs(v-0.25) < 1e-12, 'Volume should be 1/4.'\n \n v = block.vol_halfspace_unitcube(-w,-z)\n assert abs(v-0.75) < 1e-12, 'Volumes should add up to 1'\n \n # 3D\n w = np.array([0,1,0])\n z = 1\n v = block.vol_halfspace_unitcube(w,z)\n assert v is None, 'Degeneracy, answer should be None.'\n \n w = np.array([1,1,1])\n z = 1\n v = block.vol_halfspace_unitcube(w,z)\n assert abs(v-1/6) < 1e-12, 'Volume should be 1/6.'\n \n # ------------------------------------------------------------------------\n # Test slab_vol\n # ------------------------------------------------------------------------\n \n # Horizontal hyperplane: unit hypercube\n f1 = lambda x: x[1]\n bnd = np.array([[0,0,0],[1,1,1]]).transpose()\n df1_dx = lambda x: np.array([0,1,0]).transpose()\n bf = BlockFunction(bnd, f1, df1_dx)\n v = bf.slab_vol(0.5,1)\n assert abs(v-0.5)< 1e-12, 'Volume should be 1/2.'\n \n # Horizontal hyperplane, nonstandard hypercube\n bnd = np.array([[0,1,0],[0.5,2,2]]).transpose()\n bf = BlockFunction(bnd, f1, df1_dx)\n assert abs(bf.slab_vol(-1,1)) < 1e-12, 'Volume should be 0'\n assert abs(bf.slab_vol(1,4)-1) < 1e-12, 'Volume should be 1' \n \n # Skew hyperplane\n f2 = lambda x: x[0] + x[1] - 2\n df2_dx = lambda x: np.array([1,1])\n bnd = np.array([[1,1],[2,4]]).transpose()\n bf = BlockFunction(bnd, f2, df2_dx)\n assert abs(bf.slab_vol(0.5,3.5)-2.75)<1e-12, 'Volume should be 2.75'\n \n # 1d function\n f3 = lambda x: x**2\n bnd = np.array([0,1])\n df3_dx = lambda x: 2*x\n bf = BlockFunction(bnd, f3, df3_dx)\n assert abs(bf.slab_vol(0,1)-0.75) < 1e-12\n assert abs(bf.slab_vol(0.5,1)-0.25) < 1e-12", "def test_boyd_7(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n\n\n inlet_depth=0.150\n outlet_depth=0.15\n inlet_velocity=1.00\n outlet_velocity=0.5\n \n culvert_length=10.0\n culvert_width=3.6\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n culvert_slope=1 # % Downward\n z_in = 10.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n\n\n Q_expected = 0.5526\n v_expected = 1.146\n d_expected = 0.1339\n \n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n \n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n if verbose:\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST01 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def test_boyd_8(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=0.500\n outlet_depth=0.700\n inlet_velocity=1.50\n outlet_velocity=0.50\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 0.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n \n \n Q_expected = 0.224\n v_expected = 0.152\n d_expected = 0.409\n\n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n if verbose:\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST02 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def test_boundary_nondimensional_end_separation(self):\r\n rgn = np.random.rand()\r\n model = random_crack_model(varepsilon=800)\r\n compare = model.v_0(rgn, [1, 1])[0]\r\n self.assertAlmostEqual(\r\n model.v(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )\r\n model = random_crack_model(N=100, varepsilon=800)\r\n compare = 1 + model.N**3/3/model.kappa*rgn\r\n self.assertAlmostEqual(\r\n model.v(rgn)[0], compare, delta=np.abs(1e-1*compare)\r\n )", "def test_boyd_10(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=1.00\n outlet_depth=0.8\n inlet_velocity=1.0\n outlet_velocity=0.5\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.0\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 10.0\n z_out = 10.0-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n\n\n\n Q_expected = 5.164\n v_expected = 2.047\n d_expected = 0.70\n \n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss) \n\n if verbose:\n print ('%s,%.2f'%('SPEC_E = ',inlet_specific_energy))\n print ('%s,%.2f'%('Delta E = ',delta_total_energy))\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST04 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v ", "def pipeline():\n f = Dataset('/Users/akapadia/Scratch/SpaceApps/data/cru_vap_clim_1991-2000.nc', 'r')\n time = f.variables['time'][:]\n lons = f.variables['longitude'][:]\n lats = f.variables['latitude'][:]\n vap = numpy.array(f.variables['vap'][:])\n units = f.variables['vap'].units\n \n num_of_months = vap.shape[0]\n # for month in xrange(num_of_months):\n # print vap[month]\n \n JAN = 0\n lon = 11.58\n lat = 48.15\n lonIndex = getGeographicIndex(lon , lons)\n latIndex = getGeographicIndex(lat, lats)\n\n print vap[0][latIndex][lonIndex], units", "def test_LabelObjectStatisticsBasic(self):\n\n self.delayDisplay(\"Starting test_LabelObjectStatisticsBasic\")\n #\n # first, get some data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n mrHead = sampleDataLogic.downloadMRHead()\n ctChest = sampleDataLogic.downloadCTChest()\n self.delayDisplay('Two data sets loaded')\n\n volumesLogic = slicer.modules.volumes.logic()\n\n mrHeadLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, mrHead, \"mrHead-label\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(ctChest, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for mismatch:\\n%s\" % warnings)\n\n self.assertTrue( warnings != \"\" )\n\n warnings = volumesLogic.CheckForLabelVolumeValidity(mrHead, mrHeadLabel)\n\n self.delayDisplay(\"Warnings for match:\\n%s\" % warnings)\n\n self.assertTrue( warnings == \"\" )\n\n self.delayDisplay('test_LabelObjectStatisticsBasic passed!')", "def checkIfAGNfluxinR500c(pixel_no, frac_r500c, hd_agn, idx_cluster, idx_agn, d2d,\\\n min_flux_agn= 5e-15, redshift_limit=2, frac_cp_agn=0.03):\n hd_clu_params = getCluParams(pixel_no)\n log_Lx = hd_clu_params['CLUSTER_LX_soft_RF']\n \n # counts the clusters with changed flux\n count_change = 0\n\n # get the bkg agn flux \n bkg_agn_flux_px = getAGNbkgFlux(pixel_no, min_flux_agn=min_flux_agn)\n \n # arr whose Lx values are changed based on wether or not an AGN is within it\n scaled_LX_soft_RF_agn = hd_clu_params['CLUSTER_LX_soft_RF'] \n \n # get the r500c in degrees for the clusters with agn neighbours\n r500c = hd_clu_params[idx_cluster]['R500c_arcmin'].to(u.degree)\n print('Scaling flux if AGN exist inside %.1f times R_500c'%(frac_r500c))\n \n # if agn is within frac*R500c bin of the cluster : frac_r500c = 0-0.2, .2-0.5, 0.5-1, etc \n cond_for_agn_in_r500c = (d2d <= frac_r500c*r500c)\n agn_within_fr500c = np.where( cond_for_agn_in_r500c )\n\n idx_clu_w_agn = idx_cluster[agn_within_fr500c]\n idx_clu_unique_w_agn = np.unique(idx_clu_w_agn)\n\n idx_agn_in_clu = idx_agn[agn_within_fr500c]\n agn_flux = hd_agn[idx_agn_in_clu]['FX_soft']\n print('%.1f percent clusters have AGN neighbours'%(100*len(idx_clu_unique_w_agn)/len(hd_clu_params)))\n\n # get the fraction of agn flux wrt the original cluster flux\n cluster_flux = hd_clu_params[idx_clu_w_agn]['CLUSTER_FX_soft']\n \n for idx_unique in idx_clu_unique_w_agn:\n sum_over_idx = np.where(idx_unique == idx_clu_w_agn)\n \n r500x_clu = (hd_clu_params[idx_unique]['R500c_arcmin']*u.arcmin).to(u.degree)\n r500x_clu = r500x_clu/u.deg\n\n # get the contribution of the AGN with subtracted background AGN flux\n bkg_agn_flux = bkg_agn_flux_px*(np.pi)*(frac_r500c*r500x_clu)**2\n total_agn_flux = np.sum(agn_flux[sum_over_idx])\n frac_up = (total_agn_flux - bkg_agn_flux)/hd_clu_params[idx_unique]['CLUSTER_FX_soft']\n \n # scaling the cluster rest frame luminosity by this factor\n f_Lx_scale_up = (1 + frac_up) \n if f_Lx_scale_up > 1:\n count_change += 1\n scaled_LX_soft_RF_agn[idx_unique] = log_Lx[idx_unique] + np.log10(f_Lx_scale_up)\n else:\n scaled_LX_soft_RF_agn[idx_unique] = log_Lx[idx_unique]\n return scaled_LX_soft_RF_agn, count_change", "def __init__(self, name, size=50, sector=0, cadence=None):\n super(Source_cut_pseudo, self).__init__()\n if cadence is None:\n cadence = []\n self.name = name\n self.size = size\n self.sector = sector\n self.camera = 0\n self.ccd = 0\n self.wcs = []\n self.time = np.arange(10)\n self.flux = 20 * np.ones((100, 50, 50)) + np.random.random(size=(100, 50, 50))\n star_flux = np.random.random(100) * 1000 + 200\n star_x = np.random.random(100) * 50 - 0.5\n star_y = np.random.random(100) * 50 - 0.5\n star_x_round = np.round(star_x)\n star_y_round = np.round(star_y)\n for j in range(100):\n for i in range(100):\n self.flux[j, int(star_y_round[i]), int(star_x_round[i])] += star_flux[i]\n try:\n self.flux[j, int(star_y_round[i]), int(star_x_round[i]) + 1] += star_flux[i]\n except:\n continue\n self.flux_err = []\n self.gaia = []\n self.cadence = cadence\n self.quality = []\n self.mask = np.ones(np.shape(self.flux[0]))\n\n # t_tic = Table()\n # t_tic[f'tic'] = tic_id[in_frame]\n t = Table()\n t[f'tess_mag'] = - star_flux\n t[f'tess_flux'] = star_flux\n t[f'tess_flux_ratio'] = star_flux / np.max(star_flux)\n t[f'sector_{self.sector}_x'] = star_x\n t[f'sector_{self.sector}_y'] = star_y\n gaia_targets = t # TODO: sorting not sorting all columns\n gaia_targets.sort('tess_mag')\n self.gaia = gaia_targets", "def test_vlan_groups_partial_update(self):\n pass", "def test_check_ncs_group_list(self):\n phil_groups = ncs_group_master_phil.fetch(\n iotbx.phil.parse(phil_str)).extract()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=test_pdb_str_2)\n ncs_obj_phil = ncs.input(\n hierarchy=pdb_inp.construct_hierarchy(),\n ncs_phil_groups=phil_groups.ncs_group)\n nrgl = ncs_obj_phil.get_ncs_restraints_group_list()\n pdb_inp = iotbx.pdb.input(lines=test_pdb_str_2,source_info=None)\n ph = pdb_inp.construct_hierarchy()\n # passing test\n self.assertTrue(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))\n # make sure test fails when it suppose to\n nrgl[0].copies[1].t = matrix.col([100, -89.7668, 5.8996])\n self.assertFalse(nu.check_ncs_group_list(nrgl,ph,chain_max_rmsd=1))", "def test_box_scores_v_simulation(self):\n pass", "def test_block_specific_SVDs(self):\n U, D, V = self.ajive.blocks['x'].joint.get_UDV()\n rank = 1\n n, d = self.X.shape\n checks = svd_checker(U, D, V, n, d, rank)\n self.assertTrue(all(checks.values()))\n\n U, D, V = self.ajive.blocks['x'].individual.get_UDV()\n rank = 1\n n, d = self.X.shape\n checks = svd_checker(U, D, V, n, d, rank)\n self.assertTrue(all(checks.values()))\n\n U, D, V = self.ajive.blocks['y'].joint.get_UDV()\n rank = 1\n n, d = self.Y.shape\n checks = svd_checker(U, D, V, n, d, rank)\n self.assertTrue(all(checks.values()))\n\n U, D, V = self.ajive.blocks['y'].individual.get_UDV()\n rank = 2\n n, d = self.Y.shape\n checks = svd_checker(U, D, V, n, d, rank)\n self.assertTrue(all(checks.values()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test band structure calculation with band connection by NaCl.
def test_band_structure_bc(ph_nacl): ph_nacl.run_band_structure( _get_band_qpoints(), with_group_velocities=False, is_band_connection=True ) ph_nacl.get_band_structure_dict()
[ "def test_band_structure(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=False, is_band_connection=False\n )\n ph_nacl.get_band_structure_dict()", "def test_band_structure_gv(ph_nacl):\n ph_nacl.run_band_structure(\n _get_band_qpoints(), with_group_velocities=True, is_band_connection=False\n )\n ph_nacl.get_band_structure_dict()", "def test_bands_get(self):\n pass", "def test_landband_data(self):\n result = GenOrogMasks().gen_orography_masks(\n self.orography, self.landmask, self.land_threshold\n )\n self.assertArrayAlmostEqual(result.data, self.exp_landmask)", "def test_read_gdal_raster_stats_with_subdatasets_in_netcdf():\n netcdf_path = get_test_data_file(\"binary/stac_proj_extension/netcdf/multiple_bands.nc\")\n\n raster_metadata: AssetRasterMetadata = read_gdal_raster_metadata(str(netcdf_path))\n\n assert len(raster_metadata.statistics) == 13\n expected_band_names = {\n \"B02\",\n \"B03\",\n \"B04\",\n \"B05\",\n \"B06\",\n \"B07\",\n \"B08\",\n \"B11\",\n \"B12\",\n \"DEM\",\n \"temperature_mean\",\n \"VH\",\n \"VV\",\n }\n assert set(raster_metadata.statistics.keys()) == expected_band_names\n for band_name, band_stats in raster_metadata.statistics.items():\n assert band_stats.minimum is not None\n assert band_stats.maximum is not None\n assert band_stats.mean is not None\n assert band_stats.stddev is not None\n\n # valid_percent can be None though. gdalinfo does not always give us a value for this.\n if band_stats.valid_percent is None:\n logging.warning(f\"band:{band_name} has no value for valid_percent: {band_stats.valid_percent=}\")\n\n assert raster_metadata.projection == {\n \"proj:epsg\": 4326,\n # For some reason gdalinfo reports the bounds in the wrong order here.\n # I think the reason might be that the pixels are south-up instead of\n # north-up, i.e. the scale for the Y-axis of the pixel is negative.\n # Upper Left corner is BELOW Lower Left corner, which is unexpected.\n # gdalinfo reports that CRS is EPSG:4326, X=lon, Y=lat.\n #\n # From gdalinfo:\n # Corner Coordinates:\n # Upper Left ( 0.0, 0.0)\n # Lower Left ( 0.0, 3.0)\n # Upper Right ( 49.0, 0.0)\n # Lower Right ( 49.0, 3.0)\n # Center ( 24.5, 1.5)\n #\n # Would expect this proj:bbox value with the normal order of the corners:\n # \"proj:bbox\": approx([0.0, 0.0, 49.0, 3.O]),\n \"proj:bbox\": approx([0.0, 3.0, 49.0, 0.0]),\n \"proj:shape\": [49, 3],\n }", "def test_init(self):\n\n # first, filename\n r = gr.Raster(datasets.get_path(\"landsat_B4\"))\n assert isinstance(r, gr.Raster)\n\n # second, passing a Raster itself (points back to Raster passed)\n r2 = gr.Raster(r)\n assert isinstance(r2, gr.Raster)\n\n # third, rio.Dataset\n ds = rio.open(datasets.get_path(\"landsat_B4\"))\n r3 = gr.Raster(ds)\n assert isinstance(r3, gr.Raster)\n assert r3.filename is not None\n\n # finally, as memoryfile\n memfile = rio.MemoryFile(open(datasets.get_path(\"landsat_B4\"), 'rb'))\n r4 = gr.Raster(memfile)\n assert isinstance(r4, gr.Raster)\n\n assert np.logical_and.reduce((np.array_equal(r.data, r2.data, equal_nan=True),\n np.array_equal(r2.data, r3.data, equal_nan=True),\n np.array_equal(r3.data, r4.data, equal_nan=True)))\n\n assert np.logical_and.reduce((np.all(r.data.mask == r2.data.mask),\n np.all(r2.data.mask == r3.data.mask),\n np.all(r3.data.mask == r4.data.mask)))\n\n # the data will not be copied, immutable objects will\n r.data[0, 0, 0] += 5\n assert r2.data[0, 0, 0] == r.data[0, 0, 0]\n\n r.nbands = 2\n assert r.nbands != r2.nbands", "def test_simulate_rir_ism_multi_band(self, channel):\n room_dim = torch.rand(3, dtype=self.dtype, device=self.device) + 5\n mic_array = torch.rand(channel, 3, dtype=self.dtype, device=self.device) + 1\n source = torch.rand(3, dtype=self.dtype, device=self.device) + 4\n max_order = 3\n # absorption is set as a Tensor with dimensions `(7, 6)` indicating there are\n # 6 walls and each wall has 7 absorption coefficients corresponds to 7 octave bands, respectively.\n absorption = torch.rand(7, 6, dtype=self.dtype, device=self.device)\n walls = [\"west\", \"east\", \"south\", \"north\", \"floor\", \"ceiling\"]\n room = pra.ShoeBox(\n room_dim.detach().numpy(),\n fs=16000,\n materials={\n walls[i]: pra.Material(\n {\n \"coeffs\": absorption[:, i]\n .reshape(\n -1,\n )\n .detach()\n .numpy(),\n \"center_freqs\": [125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0],\n }\n )\n for i in range(len(walls))\n },\n max_order=max_order,\n ray_tracing=False,\n air_absorption=False,\n )\n # mic_locs is a numpy array of dimension `(D, channel)`.\n mic_locs = mic_array.transpose(0, 1).double().detach().numpy()\n room.add_microphone_array(mic_locs)\n room.add_source(source.tolist())\n room.compute_rir()\n max_len = max([room.rir[i][0].shape[0] for i in range(channel)])\n expected = torch.zeros(channel, max_len, dtype=self.dtype, device=self.device)\n for i in range(channel):\n expected[i, 0 : room.rir[i][0].shape[0]] = torch.from_numpy(room.rir[i][0])\n actual = F.simulate_rir_ism(room_dim, source, mic_array, max_order, absorption)\n self.assertEqual(expected, actual, atol=1e-3, rtol=1e-3)", "def check_bcc():\n from pylada.crystal.cppwrappers import Structure, neighbors\n structure = Structure([[-0.5,0.5,0.5],[0.5,-0.5,0.5],[0.5,0.5,-0.5]])\\\n .add_atom(0,0,0,\"Mo\")\n print neighbors(structure, 12, [0,0,0])", "def calcNDBI(img, bandName = 'ndbi'):\n ndbi = img.expression('(swir1 - nir)/(swir1 + nir)', {\n 'swir1': img.select('swir1'),\n 'nir': img.select('nir'),\n })\n\n return img.addBands(ndbi.rename([bandName]))", "def calcBCI(img, geometry, bandName = 'bci'):\n\n b = img.select('brightness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n b = b.getInfo()['brightness_min'], b.getInfo()['brightness_max']\n\n g = img.select('greenness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n g = g.getInfo()['greenness_min'], g.getInfo()['greenness_max']\n \n w = img.select('wetness').reduceRegion(\n reducer = ee.Reducer.minMax(),\n geometry = geometry,\n scale = 30,\n maxPixels = 1e13\n )\n w = w.getInfo()['wetness_min'], w.getInfo()['wetness_max']\n \n H = img.expression('(brightness - min)/(max - min)',{\n 'brightness': img.select('brightness'),\n 'min': b[0],\n 'max': b[1]\n }).rename('H')\n \n V = img.expression('(greenness - min)/(max - min)',{\n 'greenness': img.select('greenness'),\n 'min': g[0],\n 'max': g[1]\n }) .rename('V')\n\n L = img.expression('(wetness - min)/(max - min)',{\n 'wetness': img.select('wetness'),\n 'min': w[0],\n 'max': w[1]\n }).rename('L')\n\n HVL = ee.Image(H).addBands(V).addBands(L)\n\n BCI = HVL.expression('(0.5 * (H + L) - V)/(0.5* (H + L) + V)', {\n 'H': HVL.select('H'),\n 'V': HVL.select('V'),\n 'L': HVL.select('L')\n })\n \n return (img.addBands(\n HVL.expression('(0.5 * (H + L) - V)/(0.5* (H + L) + V)', {\n 'H': HVL.select('H'),\n 'V': HVL.select('V'),\n 'L': HVL.select('L')\n }).rename([bandName])))", "def filt_test(self, band, z):\n\n #use the SNCosmo function for extracting the bandpass\n b = sncosmo.get_bandpass(band)\n \n #obtain the wavelength and transmission values as python readable arrays\n wv = b.wave\n trans = b.trans\n\n #redshifted wavelength for the rest frame filter \n wv_red = wv*(1+z)\n\n #integrate the total flux in the region of the redshifted filter\n tran_int = simps(trans, wv_red)\n \n #define array for filling the filters that have any wavelength overlap\n\n overlap_array = []\n print \"Checking the filter list\", self.flist\n\n for i in self.flist:\n \n #extract the bandpass for NIRcam\n bp = sncosmo.get_bandpass(i)\n \n wv_obs= bp.wave\n tran_obs = bp.trans\n\n \n if wv_red[0] > wv_obs[-1]:\n print \"The filter being tested is\", i\n print \"The redshifted filter is very very red\"\n\n elif wv_red[-1] < wv_obs[0]:\n print \"The filter being tested is\", i\n print \"The redshifted filter is not red enough\"\n\n else:\n print \"There is some wavelength overlap with filter\", i\n overlap_array.append(i)\n\n print \"The NIRcam filters which overlap with the redshifted filter are: \", overlap_array\n \n overlap_percent=[]\n for j in overlap_array:\n bp = sncosmo.get_bandpass(j)\n \n wv_obs = bp.wave\n\n cond = (wv_red > wv_obs[0] ) & (wv_red < wv_obs[-1])\n \n overlap_int=simps(trans[cond], wv_red[cond])\n\n overlap_percent.append([j, overlap_int*100/tran_int])\n\n #store the overlap percentage\n overlap_percent=np.array(overlap_percent)\n\n\n print \"The percentages of the overlap are\", overlap_percent\n return overlap_percent[overlap_percent[:,1].astype('float32')==max(overlap_percent[:,1].astype('float32')) ][0]", "def test_obs_waveform_get(external_getter, code):\n net, sta, loc, cha = code.split('.')\n\n st = external_getter.obs_waveform_get(code)\n assert(len(st) == 3)\n\n stats = st.select(component=\"Z\")[0].stats\n assert stats.network == net\n assert stats.station == sta", "def test_brickq_scalar(self):\n b = B.Bricks()\n bq = b.brickq(0, -90)\n self.assertEqual(bq, 1)\n bq = b.brickq(0, 90)\n self.assertEqual(bq, 0)\n bq = b.brickq(0, 0)\n self.assertEqual(bq, 0)", "def test_SMEB():\n testing_function('sme', bilinear=True)", "def test_brickarea_scalar(self):\n b = B.Bricks()\n barea = b.brickarea(0, 0)\n self.assertEqual(barea, np.array([0.0624999515], dtype='<f4')[0])", "def test_brickq_array(self):\n b = B.Bricks()\n bqs = b.brickq(self.ra, self.dec)\n self.assertEqual(len(bqs), len(self.ra))\n self.assertTrue((bqs == self.brickqs).all())", "def test_layout_with_classical_bits(self):\n qc = QuantumCircuit.from_qasm_str(\n \"\"\"\nOPENQASM 2.0;\ninclude \"qelib1.inc\";\nqreg q4833[1];\nqreg q4834[6];\nqreg q4835[7];\ncreg c982[2];\ncreg c983[2];\ncreg c984[2];\nrzz(0) q4833[0],q4834[4];\ncu(0,-6.1035156e-05,0,1e-05) q4834[1],q4835[2];\nswap q4834[0],q4834[2];\ncu(-1.1920929e-07,0,-0.33333333,0) q4833[0],q4834[2];\nccx q4835[2],q4834[5],q4835[4];\nmeasure q4835[4] -> c984[0];\nccx q4835[2],q4835[5],q4833[0];\nmeasure q4835[5] -> c984[1];\nmeasure q4834[0] -> c982[1];\nu(10*pi,0,1.9) q4834[5];\nmeasure q4834[3] -> c984[1];\nmeasure q4835[0] -> c982[0];\nrz(0) q4835[1];\n\"\"\"\n )\n res = transpile(qc, FakeKolkata(), layout_method=\"sabre\", seed_transpiler=1234)\n self.assertIsInstance(res, QuantumCircuit)\n layout = res._layout.initial_layout\n self.assertEqual(\n [layout[q] for q in qc.qubits], [13, 10, 11, 12, 17, 14, 22, 26, 5, 16, 25, 19, 7, 8]\n )", "async def _get_expected(self):\n # Convert to complex64 from pairs of real and imag int\n vis = (self._data[..., 0] + self._data[..., 1] * 1j).astype(np.complex64)\n # Scaling\n vis /= self.cbf_attr['n_accs']\n # Time averaging\n time_ratio = int(np.round(await self._telstate['sdp_l0_int_time']\n / self.cbf_attr['int_time']))\n batch_edges = np.arange(0, vis.shape[0], time_ratio)\n batch_sizes = np.minimum(batch_edges + time_ratio, vis.shape[0]) - batch_edges\n vis = np.add.reduceat(vis, batch_edges, axis=0)\n vis /= batch_sizes[:, np.newaxis, np.newaxis]\n timestamps = self._timestamps[::time_ratio] / self.cbf_attr['scale_factor_timestamp'] \\\n + 0.5 * (await self._telstate['sdp_l0_int_time'])\n # Baseline permutation\n bls = BaselineOrdering(self.cbf_attr['bls_ordering'], self.user_args.antenna_mask)\n inv_permutation = np.empty(len(bls.sdp_bls_ordering), np.int)\n for i, p in enumerate(bls.permutation):\n if p != -1:\n inv_permutation[p] = i\n vis = vis[..., inv_permutation]\n # Sanity check that we've constructed inv_permutation correctly\n np.testing.assert_array_equal(\n await self._telstate['sdp_l0_bls_ordering'],\n self.cbf_attr['bls_ordering'][inv_permutation])\n flags = np.empty(vis.shape, np.uint8)\n channel_mask = self.fake_channel_mask()\n channel_mask[820:840] = True # Merge in band mask\n channel_data_suspect = self.fake_channel_data_suspect()[np.newaxis, :, np.newaxis]\n flags[:] = channel_data_suspect * np.uint8(CAM)\n for i, (a, b) in enumerate(bls.sdp_bls_ordering):\n if a.startswith('m091') or b.startswith('m091'):\n # data suspect sensor is True\n flags[:, :, i] |= CAM\n if a == 'm090v' or b == 'm090v':\n # input_data_suspect is True\n flags[:, :, i] |= CAM\n flags[:, :, i] |= channel_mask * np.uint8(STATIC)\n if a[:-1] != b[:-1]:\n # RFI model, which doesn't apply to auto-correlations\n flags[:, 1024, i] |= np.uint8(STATIC)\n if a.startswith('m093') == b.startswith('m093'):\n # Short baseline\n flags[:, 852:857, i] |= np.uint8(STATIC)\n return vis, flags, timestamps", "def test_boyd_8(self):\n # FIXME(Ole): This test fails (20 Feb 2009)\n\n g=9.81\n culvert_slope=1 # Downward\n\n inlet_depth=0.500\n outlet_depth=0.700\n inlet_velocity=1.50\n outlet_velocity=0.50\n \n culvert_length=10.0\n culvert_width=3.60\n culvert_height=1.20\n culvert_blockage = 0.00\n culvert_barrels = 1.0\n \n culvert_type='box'\n manning=0.013\n sum_loss=1.5\n\n inlet_specific_energy=inlet_depth + 0.5*inlet_velocity**2/g \n z_in = 0.0\n z_out = z_in-culvert_length*culvert_slope/100\n E_in = z_in+inlet_depth + 0.5*inlet_velocity**2/g\n E_out = z_out+outlet_depth + 0.5*outlet_velocity**2/g\n delta_total_energy = E_in-E_out\n \n \n Q_expected = 0.224\n v_expected = 0.152\n d_expected = 0.409\n\n if verbose:\n print(50*'=')\n print('width ',culvert_width)\n print('depth ',culvert_height)\n print('blockage',culvert_blockage)\n print('flow_width ',culvert_width)\n print('length ' ,culvert_length)\n print('driving_energy ',inlet_specific_energy)\n print('delta_total_energy ',delta_total_energy)\n print('outlet_enquiry_depth ',outlet_depth)\n print('sum_loss ',sum_loss)\n print('manning ',manning)\n \n Q, v, d, flow_area, case= boyd_box_function(culvert_width, \n culvert_height,\n culvert_blockage, \n culvert_barrels, \n culvert_width, \n culvert_length, \n inlet_specific_energy, \n delta_total_energy, \n outlet_depth, \n sum_loss,\n manning)\n\n# Q, v, d = boyd_generalised_culvert_model(inlet_depth,\n# outlet_depth,\n# inlet_velocity,\n# outlet_velocity,\n# inlet_specific_energy, \n# delta_total_energy, \n# g,\n# culvert_length,\n# culvert_width,\n# culvert_height,\n# culvert_type,\n# manning,\n# sum_loss)\n if verbose:\n print ('%s,%.2f,%.2f,%.2f' %('ANUGAcalcsTEST02 Q-v-d',Q,v,d))\n print('%s,%.2f,%.2f,%.2f' %('Spreadsheet_Boydcalcs', Q_expected, v_expected, d_expected))\n\n assert numpy.allclose(Q, Q_expected, rtol=1.0e-1) #inflow\n assert numpy.allclose(v, v_expected, rtol=1.0e-1) #outflow velocity\n assert numpy.allclose(d, d_expected, rtol=1.0e-1) #depth at outlet used to calc v " ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
parse the request packet & create trip
def trip_get(reqdata): pass
[ "def _ParseProtoPayloadRequest(\n self,\n request: Dict[str, Any],\n timesketch_record: Dict[str, Any]) -> None:\n request_attributes = [\n 'name', 'description', 'direction', 'member', 'targetTags', 'email',\n 'account_id'\n ]\n for attribute in request_attributes:\n if attribute in request:\n timesketch_attribute = 'request_{0:s}'.format(attribute)\n timesketch_record[timesketch_attribute] = request[attribute]\n\n # Firewall specific attributes.\n if 'sourceRanges' in request:\n source_ranges = ', '.join(request['sourceRanges'])\n timesketch_record['source_ranges'] = source_ranges\n\n if 'alloweds' in request:\n for allowed in request['alloweds']:\n attribute_name = 'allowed_{0:s}_ports'.format(allowed['IPProtocol'])\n if 'ports' in allowed:\n timesketch_record[attribute_name] = allowed['ports']\n else:\n timesketch_record[attribute_name] = 'all'\n\n if 'denieds' in request:\n for denied in request['denieds']:\n attribute_name = 'denied_{0:s}_ports'.format(denied['IPProtocol'])\n if 'ports' in denied:\n timesketch_record[attribute_name] = denied['ports']\n else:\n timesketch_record[attribute_name] = 'all'\n\n # Service account specific attributes\n if 'service_account' in request:\n service_account_name = request['service_account'].get('display_name')\n timesketch_record['service_account_display_name'] = service_account_name", "def test_parse_trip_str_basic(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_BASIC)\n \n self.assertEqual(trip.trip_str, TRIP_BASIC)\n self.assertEqual(trip.pairing_number, 'E0004')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, 1),\n datetime.datetime(2019, 6, 2),\n datetime.datetime(2019, 6, 5)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=7, minutes=20))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=7, minutes=20))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=10, minutes=15))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(trip.total_per_diem, '23.06')\n\n self.assertEqual(len(trip.turns), 1)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=6, minute=48))\n self.assertEqual(trip.turns[0].hotel_name, None)\n self.assertEqual(trip.turns[0].hotel_phone, None)\n self.assertEqual(trip.turns[0].shuttle_name, None)\n self.assertEqual(trip.turns[0].shuttle_phone, None)\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=7, minutes=20))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=10, minutes=15))\n\n self.assertEqual(len(trip.turns[0].flights), 2)\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '76C')\n self.assertEqual(flight.flight_number, '1886')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'IAH')\n self.assertEqual(flight.time_depart, datetime.time(hour=8, minute=3))\n self.assertEqual(flight.time_arrive, datetime.time(hour=10, minute=50))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=3, minutes=47))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=1, minutes=25))\n\n flight = trip.turns[0].flights[1]\n self.assertEqual(flight.aircraft_type, '76C')\n self.assertEqual(flight.flight_number, '748')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'IAH')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=12, minute=15))\n self.assertEqual(flight.time_arrive, datetime.time(hour=16, minute=48))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=3, minutes=33))\n self.assertEqual(\n flight.time_layover, None)", "def formRequestPacket(request):\r\n magicNumber = 0x497E\r\n packetType = 0x0001\r\n #Assign the appropriate request type\r\n #Checks already conducted in input phase\r\n if request == \"date\":\r\n requestType = 0x0001\r\n elif request == \"time\":\r\n requestType = 0x0002\r\n \r\n #Create and fill out the bytearray\r\n requestPacket = bytearray(6)\r\n requestPacket[0:2] = magicNumber.to_bytes(2, byteorder=\"big\")\r\n requestPacket[2:4] = packetType.to_bytes(2, byteorder=\"big\")\r\n requestPacket[4:6] = requestType.to_bytes(2, byteorder=\"big\")\r\n return requestPacket", "def _parse_request(self) -> None:\n action_and_name_from_request = self._request.split(PROTOCOL)\n self._action_from_request = action_and_name_from_request[0].split()[0]\n self._name_from_request = ' '.join(action_and_name_from_request[0].split()[1:])\n self._phone_from_request = self._request.split('\\r\\n')[1]", "def processRtspRequest(self, data):\n\t\t# Get the request type\n\t\trequest = data.split('\\n')\n\t\tline1 = request[0].split(' ')\n\t\trequestType = line1[0]\n\t\t\n\t\t# Get the media file name\n\t\tfilename = line1[1]\n\t\t\n\t\t# Get the RTSP sequence number \n\t\tseq = request[1].split(' ')\n\n\t\t# Process SETUP request\n\t\tprint(requestType)\n\t\tif requestType == self.SETUP:\n\t\t\tif self.state == self.INIT:\n\t\t\t\t# Update state\n\t\t\t\tprint(\"processing SETUP\\n\")\n\t\t\t\ttry:\n\t\t\t\t\t#print(filename)\n\t\t\t\t\tself.clientInfo['videoStream'] = VideoStream(filename)\n\t\t\t\t\tself.state = self.READY\n\t\t\t\t\tself.clientInfo['videoStream'].totalFrame(filename)\n\t\t\t\t\t\n\t\t\t\texcept IOError:\n\t\t\t\t\tself.replyRtsp(self.FILE_NOT_FOUND_404, seq[1])\n\t\t\t\t\n\t\t\t\t# Generate a randomized RTSP session ID\n\t\t\t\tself.clientInfo['session'] = randint(100000, 999999)\n\t\t\t\t# Send RTSP reply\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\ttotalTime = (\"tt\" + ' ' + str(self.clientInfo['videoStream'].totalTime()) + ' ' + str(self.clientInfo['videoStream'].getFPS())).encode()\n\t\t\t\tself.clientInfo['rtspSocket'][0].send(totalTime)\n\t\t\t\t# Get the RTP/UDP port from the last line\n\n\t\t\t\tself.clientInfo['rtpPort'] = request[2].split(' ')[3]\n\t\t\n\t\t# Process PLAY request \t\t\n\t\telif requestType == self.PLAY:\n\t\t\tif self.state == self.READY or self.state == self.SWITCH:\n\t\t\t\tprint(\"processing PLAY\\n\")\n\t\t\t\tself.state = self.PLAYING\n\t\t\t\t\n\t\t\t\t# Create a new socket for RTP/UDP\n\t\t\t\tself.clientInfo[\"rtpSocket\"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\n\t\t\t\t# Create a new thread and start sending RTP packets\n\t\t\t\tself.clientInfo['event'] = threading.Event()\n\t\t\t\tself.clientInfo['event'].clear()\n\t\t\t\tself.clientInfo['worker'] = threading.Thread(target=self.sendRtp) \n\t\t\t\tself.clientInfo['worker'].start()\n\t\t\n\t\t# Process PAUSE request\n\t\telif requestType == self.PAUSE:\n\t\t\tif self.state == self.PLAYING:\n\t\t\t\tprint(\"processing PAUSE\\n\")\n\t\t\t\tself.state = self.READY\n\t\t\t\t\n\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\n\t\t# Process TEARDOWN request\n\t\telif requestType == self.TEARDOWN:\n\t\t\t\tprint(\"processing TEARDOWN\\n\")\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\texcept: pass\n\t\t\t\tself.state = self.INIT\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\t# Close the RTP socket\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['rtpSocket'].close()\n\t\t\t\texcept: pass\n\t\t\n\t\t# Process FORWARD request\n\t\telif requestType == self.FORWARD:\n\t\t\tif self.state == self.PLAYING or self.state == self.READY:\n\t\t\t\tprint(\"processing FORWARD\\n\")\n\t\t\t\tself.clientInfo['videoStream'].moveForward()\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\n\t\t# Process BACKWARDvrequest\n\t\telif requestType == self.BACKWARD:\n\t\t\tif self.state == self.PLAYING or self.state == self.READY:\n\t\t\t\tprint(\"processing BACKWARD\\n\")\n\t\t\t\tself.clientInfo['videoStream'].moveBackward()\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\telif requestType == self.DESCRIBE:\n\t\t\t\tprint(\"processing DESCRIBE\\n\")\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\t\n\t\t\t\tv = 0 #protocol version\n\t\t\t\ts = 'Video streaming by using RTP and RTSP protocol'\n\t\t\t\tt = datetime.now()\n\t\t\t\tm = 'video ' + str(self.clientInfo['rtpPort']) + ' RTP/UDP'\n\t\t\t\ta = 'control:streamid=' + str(self.clientInfo['session']) + '\\na=mimetype:string;\\\"video/MJPEG\\\"'\n\t\t\t\tsdp1 ='\\n\\nv=' + str(v) + '\\ns=' + s + '\\nt=' + str(t) +'\\nm=' + m + '\\na=' + a\n\t\t\t\tsdp = 'cc' + 'Content-Base:' + filename + '\\nContent-Type:application/sdp' + '\\nContent-Length:' + str(len(sdp1)) + sdp1\n\t\t\t\tself.clientInfo['rtspSocket'][0].send(sdp.encode())\t\t\t\t \n\t\telif requestType == self.GETLIST:\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\texcept: pass\n\t\t\t\tself.state = self.SWITCH\n\t\t\t\tprint(\"processing GETLIST\\n\")\n\t\t\t\tjsonFile = open(\"videoList.txt\",\"r\")\n\t\t\t\toutput = ''\n\t\t\t\tfor line in jsonFile.readlines():\n\t\t\t\t\toutput += line\n\t\t\t\treply = 'RTSP/1.0 200 OK\\nCSeq: ' + seq[1] + '\\nSession: ' + str(self.clientInfo['session']) + '\\n' + output\n\t\t\t\tconnSocket = self.clientInfo['rtspSocket'][0] ## because this is RTSP/TCP, unlike the rpt sender,\n\t\t\t\tconnSocket.send(reply.encode())", "def test_parse_trip_str_hotel(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_HOTEL)\n \n self.assertEqual(trip.trip_str, TRIP_HOTEL)\n self.assertEqual(trip.pairing_number, 'E0008')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, 1),\n datetime.datetime(2019, 6, 4)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=10, minutes=54))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=10, minutes=54))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=25, minutes=50))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(trip.total_per_diem, '58.11')\n\n self.assertEqual(len(trip.turns), 2)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=5, minute=45))\n self.assertEqual(trip.turns[0].hotel_name, 'HYATT REGENCY AP')\n self.assertEqual(trip.turns[0].hotel_phone, '424-702-1234')\n self.assertEqual(trip.turns[0].shuttle_name, None)\n self.assertEqual(trip.turns[0].shuttle_phone, None)\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=5, minutes=49))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=7, minutes=19))\n\n self.assertEqual(\n trip.turns[1].report_time, datetime.time(hour=22, minute=15))\n self.assertEqual(trip.turns[1].hotel_name, None)\n self.assertEqual(trip.turns[1].hotel_phone, None)\n self.assertEqual(trip.turns[1].shuttle_name, None)\n self.assertEqual(trip.turns[1].shuttle_phone, None)\n self.assertEqual(\n trip.turns[1].time_block, datetime.timedelta(hours=5, minutes=5))\n self.assertEqual(\n trip.turns[1].time_afb, datetime.timedelta(hours=6, minutes=20))\n\n self.assertEqual(len(trip.turns[0].flights), 1)\n self.assertEqual(len(trip.turns[1].flights), 1)\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '78J')\n self.assertEqual(flight.flight_number, '240')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'LAX')\n self.assertEqual(flight.time_depart, datetime.time(hour=7, minute=0))\n self.assertEqual(flight.time_arrive, datetime.time(hour=9, minute=49))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=5, minutes=49))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=13, minutes=26))\n\n flight = trip.turns[1].flights[0]\n self.assertEqual(flight.aircraft_type, '77G')\n self.assertEqual(flight.flight_number, '415')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'LAX')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=23, minute=15))\n self.assertEqual(flight.time_arrive, datetime.time(hour=7, minute=20))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=5, minutes=5))\n self.assertEqual(\n flight.time_layover, None)", "def test_parse_trip_str_no(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_NO_HOUR)\n \n self.assertEqual(trip.trip_str, TRIP_NO_HOUR)\n self.assertEqual(trip.pairing_number, 'E0501')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, 5),\n datetime.datetime(2019, 6, 8)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=10, minutes=15))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=10, minutes=15))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=12, minutes=40))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(trip.total_per_diem, '28.48')\n\n self.assertEqual(len(trip.turns), 1)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=7, minute=30))\n self.assertEqual(trip.turns[0].hotel_name, None)\n self.assertEqual(trip.turns[0].hotel_phone, None)\n self.assertEqual(trip.turns[0].shuttle_name, None)\n self.assertEqual(trip.turns[0].shuttle_phone, None)\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=10, minutes=15))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=12, minutes=40))\n\n self.assertEqual(len(trip.turns[0].flights), 2)\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '73G')\n self.assertEqual(flight.flight_number, '1063')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'MEX')\n self.assertEqual(flight.time_depart, datetime.time(hour=8, minute=30))\n self.assertEqual(flight.time_arrive, datetime.time(hour=12, minute=50))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=5, minutes=20))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=0, minutes=55))\n\n flight = trip.turns[0].flights[1]\n self.assertEqual(flight.aircraft_type, '73G')\n self.assertEqual(flight.flight_number, '1066')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'MEX')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=13, minute=45))\n self.assertEqual(flight.time_arrive, datetime.time(hour=19, minute=40))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=4, minutes=55))\n self.assertEqual(\n flight.time_layover, None)", "def putRequest(self,tripRequest):\n\t\t\"\"\"Cannot modify a trip request after the deadlne and can only modify the time range + mode\"\"\"\n\t\tparser-reqparse.RequestParser()\n\t\tparser.add_argument(\"id\")\n\t\targs=parser.parse_args()\n\n\t\tif TripRequest[id] in TripRequests:\n\t\t\treturn \"That trip does not exist\", 404\n\n\t\tt=datetime.datetime.now()\n\n\t\tif t.hour > 15 :\n\t\t \treturn 'The deadline to modify a trip for today has passed', 404\n\t\telif t.hour > 21:\n\t\t \treturn ' The deadline to modiy a trip for tomorrow AM has passed', 404\n\t\telse:\t\t\n\t\t\ttripRequest[id] = {\n\t\t\tmode: args[mode],\n\t\t\tselectedTimeRange: args[DateTimeRange]\n\t\t\t}\n\t\treturn tripRequest, 200", "def build_pkt(pkt):\n\n def serialize(layers):\n \"\"\"Concatenate packet layers and serialize.\"\"\"\n result = packet.Packet()\n for layer in reversed(layers):\n result.add_protocol(layer)\n result.serialize()\n return result\n\n layers = []\n assert \"eth_dst\" in pkt and \"eth_src\" in pkt\n ethertype = None\n if \"arp_source_ip\" in pkt and \"arp_target_ip\" in pkt:\n ethertype = ether.ETH_TYPE_ARP\n arp_code = pkt.get(\"arp_code\", arp.ARP_REQUEST)\n layers.append(\n arp.arp(\n src_ip=pkt[\"arp_source_ip\"],\n dst_ip=pkt[\"arp_target_ip\"],\n opcode=arp_code,\n )\n )\n elif \"ipv6_src\" in pkt and \"ipv6_dst\" in pkt:\n ethertype = ether.ETH_TYPE_IPV6\n if \"router_solicit_ip\" in pkt:\n layers.append(icmpv6.icmpv6(type_=icmpv6.ND_ROUTER_SOLICIT))\n elif \"neighbor_advert_ip\" in pkt:\n layers.append(\n icmpv6.icmpv6(\n type_=icmpv6.ND_NEIGHBOR_ADVERT,\n data=icmpv6.nd_neighbor(\n dst=pkt[\"neighbor_advert_ip\"],\n option=icmpv6.nd_option_sla(hw_src=pkt[\"eth_src\"]),\n ),\n )\n )\n elif \"neighbor_solicit_ip\" in pkt:\n layers.append(\n icmpv6.icmpv6(\n type_=icmpv6.ND_NEIGHBOR_SOLICIT,\n data=icmpv6.nd_neighbor(\n dst=pkt[\"neighbor_solicit_ip\"],\n option=icmpv6.nd_option_sla(hw_src=pkt[\"eth_src\"]),\n ),\n )\n )\n elif \"echo_request_data\" in pkt:\n layers.append(\n icmpv6.icmpv6(\n type_=icmpv6.ICMPV6_ECHO_REQUEST,\n data=icmpv6.echo(id_=1, seq=1, data=pkt[\"echo_request_data\"]),\n )\n )\n layers.append(\n ipv6.ipv6(src=pkt[\"ipv6_src\"], dst=pkt[\"ipv6_dst\"], nxt=inet.IPPROTO_ICMPV6)\n )\n elif \"ipv4_src\" in pkt and \"ipv4_dst\" in pkt:\n ethertype = ether.ETH_TYPE_IP\n proto = inet.IPPROTO_IP\n if \"echo_request_data\" in pkt:\n echo = icmp.echo(id_=1, seq=1, data=pkt[\"echo_request_data\"])\n layers.append(icmp.icmp(type_=icmp.ICMP_ECHO_REQUEST, data=echo))\n proto = inet.IPPROTO_ICMP\n net = ipv4.ipv4(src=pkt[\"ipv4_src\"], dst=pkt[\"ipv4_dst\"], proto=proto)\n layers.append(net)\n elif \"actor_system\" in pkt and \"partner_system\" in pkt:\n ethertype = ether.ETH_TYPE_SLOW\n layers.append(\n slow.lacp(\n version=1,\n actor_system=pkt[\"actor_system\"],\n actor_port=1,\n partner_system=pkt[\"partner_system\"],\n partner_port=1,\n actor_key=1,\n partner_key=1,\n actor_system_priority=65535,\n partner_system_priority=1,\n actor_port_priority=255,\n partner_port_priority=255,\n actor_state_defaulted=0,\n partner_state_defaulted=0,\n actor_state_expired=0,\n partner_state_expired=0,\n actor_state_timeout=1,\n partner_state_timeout=1,\n actor_state_collecting=1,\n partner_state_collecting=1,\n actor_state_distributing=1,\n partner_state_distributing=1,\n actor_state_aggregation=1,\n partner_state_aggregation=1,\n actor_state_synchronization=pkt[\"actor_state_synchronization\"],\n partner_state_synchronization=1,\n actor_state_activity=0,\n partner_state_activity=0,\n )\n )\n elif \"chassis_id\" in pkt and \"port_id\" in pkt:\n ethertype = ether.ETH_TYPE_LLDP\n return valve_packet.lldp_beacon(\n pkt[\"eth_src\"],\n pkt[\"chassis_id\"],\n str(pkt[\"port_id\"]),\n 1,\n org_tlvs=pkt.get(\"org_tlvs\", None),\n system_name=pkt.get(\"system_name\", None),\n )\n assert ethertype is not None, pkt\n if \"vid\" in pkt:\n tpid = ether.ETH_TYPE_8021Q\n layers.append(vlan.vlan(vid=pkt[\"vid\"], ethertype=ethertype))\n else:\n tpid = ethertype\n eth = ethernet.ethernet(dst=pkt[\"eth_dst\"], src=pkt[\"eth_src\"], ethertype=tpid)\n layers.append(eth)\n result = serialize(layers)\n return result", "def parse_request(self):\n self.method, self.location, self.http_version = \\\n self.request_line.decode(\"utf-8\").split()", "def prepare_pnr_data_map(self, request=None, response=None):\n\n try:\n\n response_root = response[\"body\"][\"responseData\"].get(\"CreatePassengerNameRecordRS\", {})\n request_root = request.get(\"RequestBody\", {})\n\n # -- PNR Number\n pnr_no = response_root[\"ItineraryRef\"].get(\"ID\", None)\n # -- ROUTE -> ONE (1)\n route_1_origin_location_code = request_root[\"OriginDestinationInformation\"][0][\"OriginLocation\"].get(\n \"LocationCode\", None\n )\n route_1_destination_location_code = request_root[\"OriginDestinationInformation\"][0][\"DestinationLocation\"].get(\n \"LocationCode\", None\n )\n route_1_departure_date = parse(\n request_root[\"OriginDestinationInformation\"][0].get(\n \"DepartureDateTime\", None\n )\n )\n # -- ROUTE -> TWO (2)\n if len(request_root[\"OriginDestinationInformation\"]) >= 2:\n route_2_origin_location_code = request_root[\"OriginDestinationInformation\"][1][\"OriginLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_2_destination_location_code = request_root[\"OriginDestinationInformation\"][1][\"DestinationLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_2_departure_date = parse(\n request_root[\"OriginDestinationInformation\"][1].get(\n \"DepartureDateTime\", \"\"\n )\n )\n else:\n route_2_origin_location_code, route_2_destination_location_code, route_2_departure_date = None, None, None\n # -- ROUTE -> THREE (3)\n if len(request_root[\"OriginDestinationInformation\"]) >= 3:\n route_3_origin_location_code = request_root[\"OriginDestinationInformation\"][2][\"OriginLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_3_destination_location_code = request_root[\"OriginDestinationInformation\"][2][\"DestinationLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_3_departure_date = parse(\n request_root[\"OriginDestinationInformation\"][2].get(\n \"DepartureDateTime\", \"\"\n )\n )\n else:\n route_3_origin_location_code, route_3_destination_location_code, route_3_departure_date = None, None, None\n # -- ROUTE -> FOUR (4)\n if len(request_root[\"OriginDestinationInformation\"]) >= 4:\n route_4_origin_location_code = request_root[\"OriginDestinationInformation\"][3][\"OriginLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_4_destination_location_code = request_root[\"OriginDestinationInformation\"][3][\"DestinationLocation\"].get(\n \"LocationCode\", \"\"\n )\n route_4_departure_date = parse(\n request_root[\"OriginDestinationInformation\"][3].get(\n \"DepartureDateTime\", \"\"\n )\n )\n else:\n route_4_origin_location_code, route_4_destination_location_code, route_4_departure_date = None, None, None\n # -- Carrier Code\n carrier_code = request_root[\"TargetItinerary\"][\"ScheduleDescription\"][0][\"Carrier\"].get(\n \"OperatingCarrierCode\", None\n )\n # -- Flight Number\n flight_number = request_root[\"TargetItinerary\"][\"ScheduleDescription\"][0][\"Carrier\"].get(\n \"OperatingFlightNumber\", None\n )\n # -- Cabin Class\n cabin_class = request_root[\"TargetItinerary\"][\"PassengerInfo\"][0][\"FareDescription\"][0][\"Segment\"][0].get(\n \"CabinClass\", None\n )\n # -- Total Amount\n total_amount = request_root[\"TargetItinerary\"][\"TotalFare\"].get(\n \"TotalFare\", None\n )\n # -- Currency\n currency = request_root[\"TargetItinerary\"][\"TotalFare\"].get(\n \"FareCurrency\", None\n )\n\n # prepare data dictionary\n data_map = {\n # -- PNR Number\n \"pnr_no\": pnr_no,\n # -- ROUTE -> (1)\n \"route_1_origin_location_code\": route_1_origin_location_code,\n \"route_1_destination_location_code\": route_1_destination_location_code,\n \"route_1_departure_date\": route_1_departure_date,\n # -- ROUTE -> (2)\n \"route_2_origin_location_code\": route_2_origin_location_code,\n \"route_2_destination_location_code\": route_2_destination_location_code,\n \"route_2_departure_date\": route_2_departure_date,\n # -- ROUTE -> (3)\n \"route_3_origin_location_code\": route_3_origin_location_code,\n \"route_3_destination_location_code\": route_3_destination_location_code,\n \"route_3_departure_date\": route_3_departure_date,\n # -- ROUTE -> (4)\n \"route_4_origin_location_code\": route_4_origin_location_code,\n \"route_4_destination_location_code\": route_4_destination_location_code,\n \"route_4_departure_date\": route_4_departure_date,\n \"carrier_code\": carrier_code,\n \"flight_number\": flight_number,\n \"cabin_class\": cabin_class,\n \"total_amount\": total_amount,\n \"currency\": currency,\n }\n\n # return the data map\n return data_map\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to prepare data map for PNR Database!\"\n )\n )", "def create(self, request, action):\n try:\n put_data = None\n xml_skip = r\"^data=*\"\n try:\n put_data = request.POST['data']\n except KeyError:\n put_data = request.read()\n\n request_type = request.META.get(\"HTTP_HTTP_ACCEPT\",\n request.META.get(\n 'HTTP_ACCEPT','application/xml'))\n err_response = {\n \"clientOrderID\": \"\",\n \"errorCode\": \"\",\n \"errorMessage\": \"\",\n }\n\n if action == \"createOrder\":\n #based on http accept type of request parse over the\n #requested encoded string. If accept doesnt match return error.\n if request_type == \"application/json\":\n json_data = json.loads(put_data)\n elif request_type == \"application/xml\":\n put_data = re.sub(xml_skip, '', put_data)\n put_data = unidecode(put_data)\n put_data = urllib.unquote_plus(put_data)\n xml_to_json = xml2json(urllib.unquote(put_data))\n if not xml_to_json:\n err_response[\"errorCode\"] = 4003\n err_response[\"errorMessage\"] = \"Invalid XML structure.\"\n return {\"createOrderResponse\": err_response}\n json_data =\\\n json.loads(xml_to_json)[\"transaction\"]\n else:\n err_response[\"errorCode\"] = 4003\n err_response[\"errorMessage\"] =\\\n \"HTTP accept must be application/json or application/xml.\"\n return {\"createOrderResponse\": err_response}\n #this condition created the feeder structure from the above\n #converted xml/json - json_data\n post_dict = dict()\n prd_description = None\n post_dict[\"shipments\"] = []\n client_name = request.user.userprofile.client.name\n #pushing hard coded pickup location for CMU to handle.\n #(cmu breaks for missing pickup_location)\n\n od = json_data.get(\"orderDetails\", {})\n cust_dict = json_data.get(\"customerDetails\", {})\n\n #this is to dump extra product details or sub order details\n #in serialised form in key- etc\n product_details = od.get(\"productDetails\", None)\n etc = unicode(unidecode(cPickle.dumps(product_details)))\n if type(product_details) is dict:\n prd_description = product_details.get(\"productDescription\", None)\n client_oid = od.get(\"clientOrderID\", None)\n if not client_oid:\n err_response[\"errorCode\"] = 3011\n err_response[\"errorMessage\"] =\\\n \"clientOrderID is required.\"\n return {\"createOrderResponse\": err_response}\n #feeder dict structure for cmu\n consignee_name = \"%s\" % ((\" \").join([\n unicode(cust_dict.get(\"prefix\", \"\")),\n unicode(cust_dict.get(\"firstName\", \"\")),\n unicode(cust_dict.get(\"lastName\", \"\"))]))\n feed_dict = {\n \"order\": client_oid,\n \"client\": client_name,\n \"name\": unidecode(consignee_name),\n \"order_date\": od.get(\"deliveryDate\", None),\n \"total_amount\": od.get(\"orderAmount\", None),\n \"cod_amount\": od.get(\"orderAmount\", None),\n \"pin\": od.get(\"pincode\", None),\n \"add\": unidecode(cust_dict.get(\"address\", None)),\n \"phone\": cust_dict.get(\"contactNo\", None),\n \"email\": cust_dict.get(\"email\", None),\n \"products_desc\": prd_description,\n \"payment_mode\": \"Cash\",\n \"invoice_url\": od.get(\"InvoiceUrl\", None),\n \"extra_parameters\": etc,\n }\n post_dict[\"shipments\"].append(feed_dict)\n #by now the feeder dict is ready and is part of post_dict\n res = gharpay_cmu_feeder(post_dict, err_response, request)\n return res\n\n if action == \"cancelOrder\":\n \"\"\"\n This action type handles cancelation or orders.\n \"\"\"\n cancel_response = {\n \"cancelOrderResponse\": {\n \"result\": \"false\",\n \"orderID\": \"\"\n }}\n #based on http accept type of request parse over the\n #requested encoded string. If accept doesnt match return error.\n if request_type == \"application/json\":\n json_data = json.loads(put_data)\n elif request_type == \"application/xml\":\n json_data =\\\n json.loads(xml2json(put_data))\n else:\n return cancel_response\n cancel_order_data = json_data.get(\"cancelOrder\", {})\n oid = cancel_order_data.get(\"orderID\", None)\n #if oid not send in request, return error response\n if not oid:\n return cancel_response\n cancel_response[\"cancelOrderResponse\"][\"orderID\"] = oid\n pkg = find_by_refnum(oid)\n if (not pkg) or pkg.get(\"fail\"):\n return cancel_response\n #If pakcage in Open/Scheduled states, allow cancellation of pkg\n if pkg[\"success\"].get(\"cs\").get(\"ss\", None) in \\\n [\"Open\", \"Scheduled\"]:\n #Queue task and send a success response\n add_status.delay(\n pkg[\"success\"][\"wbn\"], \"Canceled\", request.user.username,\n remarks=\"Marked as cancelled by client\")\n cancel_response[\"cancelOrderResponse\"][\"result\"] = \"true\"\n return cancel_response\n #Should never reach here. return false if ss not present.\n return cancel_response\n err_response[\"errorCode\"] = 4003\n err_response[\"errorMessage\"] =\\\n \"Action requested is invalid.\"\n return {\"createOrderResponse\": err_response}\n except Exception as e:\n err_response = dict()\n err_response[\"errorCode\"] = 4004\n err_response[\"errorMessage\"] =\"Package creation API error.\" \\\n \"Please contact tech.admin@delhivery.com \" \\\n \"and quote this error - {0}\".format(e.message)\n return {\"createOrderResponse\": err_response}", "def get_triplets(self, input_file_path, output_file_path):\n\n data = self.get_content(input_file_path)\n\n triplets = []\n\n print('Extracting triplets...')\n for sent in data:# where each sent is a tuple of the form (sent_id, list of data-per-line-lists)\n sent_id = sent[0]\n #print('Working on Sent no:', sent_id)\n\n A0 = []\n A1 = []\n V = []\n A0_1 = []\n A1_1 = []\n V_1 = []\n A0_2 = []\n A1_2 = []\n V_2 = []\n A0_3 = []\n A1_3 = []\n V_3 = []\n\n for line_list in sent[1]:# where each line_list is the data on one line in input file:\n #print(line_list)\n if len(line_list) >= 6:\n if line_list[5].endswith('A0') and line_list[1] not in {'PRP', 'WDT', 'PRP$'}:\n A0.append(line_list[0])\n elif line_list[5].endswith('A1') and line_list[1] not in {'PRP', 'WDT', 'PRP$'}:\n A1.append(line_list[0])\n elif line_list[5].endswith('V') and line_list[1] in {'VB','VBD','VBG','VBN','VBP','VBZ', 'IN'}:\n V.append(line_list[0])\n\n if len(line_list) >= 7:\n if line_list[6].endswith('A0') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A0_1.append(line_list[0])\n elif line_list[6].endswith('A1') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A1_1.append(line_list[0])\n elif line_list[6].endswith('V') and line_list[1] in {'VB','VBD','VBG','VBN','VBP','VBZ', 'IN'}:\n V_1.append(line_list[0])\n\n if len(line_list) >= 8:\n if line_list[7].endswith('A0') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A0_2.append(line_list[0])\n elif line_list[7].endswith('A1') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A1_2.append(line_list[0])\n elif line_list[7].endswith('V') and line_list[1] in {'VB','VBD','VBG','VBN','VBP','VBZ', 'IN'}:\n V_2.append(line_list[0])\n\n if len(line_list) >= 9:\n if line_list[8].endswith('A0') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A0_3.append(line_list[0])\n elif line_list[8].endswith('A1') and line_list[1] not in {'PRP', 'WDT', 'PRP$', 'WP'}:\n A1_3.append(line_list[0])\n elif line_list[8].endswith('V') and line_list[1] in {'VB','VBD','VBG','VBN','VBP','VBZ', 'IN'}:\n V_3.append(line_list[0])\n\n # triplets found as 4-tuples (including sent_id)\n # if triplet has all four components, keep it\n '''\n filtering constraints: max length for subjects and objects: 8 words.\n max length for verbs: 3 words.\n '''\n\n if 9 > len(A0) > 0 and 4 > len(V) > 0 and 9 > len(A1) > 0:\n triplets.append((sent_id, A0, V, A1))\n if 9 > len(A0_1) > 0 and 4 > len(V_1) > 0 and 9 > len(A1_1) > 0:\n triplets.append((sent_id, A0_1, V_1, A1_1))\n if 9 > len(A0_2) > 0 and 4 > len(V_2) > 0 and 9 > len(A1_2) > 0:\n triplets.append((sent_id, A0_2, V_2, A1_2))\n if 9 > len(A0_3) > 0 and 4 > len(V_3) > 0 and 9 > len(A1_3) > 0:\n triplets.append((sent_id, A0_3, V_3, A1_3))\n\n print('Outputting triplets ...')\n\n with open(output_file_path, 'w', encoding='utf-8') as fo:\n for trip in triplets:\n print('Sentence:', trip[0], file=fo)\n print('A0:', file = fo)\n for word in trip[1]:\n print(word, file=fo)\n print('V:', file = fo)\n for word in trip[2]:\n print(word, file=fo)\n print('A1:', file = fo)\n for word in trip[3]:\n print(word, file=fo)\n print(\"\", file=fo)\n\n print('Done')\n return", "def create_request(self):\n date_time = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')\n present_time = date_time[0:-3] + 'Z'\n # Using the web service post() method to create request\n response = requests.post(url=bid_url, headers={'Authorization': self.api_key}, json={\n \"type\": self.bid_type.get(),\n \"initiatorId\": self.current_user.id,\n \"dateCreated\": present_time,\n \"subjectId\": Subject().get_id_by_name(self.subject.get()),\n \"additionalInfo\": {\"competency\": self.competency.get(), \"hours_per_week\": self.hours_per_session.get(),\n \"sessions_per_week\": self.sessions_per_week.get(),\n \"rate_per_session\": self.rate_per_session.get()}\n }\n )\n json_data = response.json()\n # Destroying current window and jumping to next screen by calling the main() method from the NewRequestDetails \n # class\n self.window.destroy()\n NewRequestDetails(json_data).main()", "def test_parse_trip_str_deadhead(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_DH)\n \n self.assertEqual(trip.trip_str, TRIP_DH)\n self.assertEqual(trip.pairing_number, 'E0022')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, 20)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=12, minutes=34))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=11, minutes=0))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=1, minutes=34))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=42, minutes=20))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(trip.total_per_diem, '95.24')\n\n self.assertEqual(len(trip.turns), 2)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=13, minute=45))\n self.assertEqual(trip.turns[0].hotel_name, 'HOLIDAY INN GATEWAY')\n self.assertEqual(trip.turns[0].hotel_phone, '415-441-4000')\n self.assertEqual(trip.turns[0].shuttle_name, 'AIRLINE COACH SERVICE')\n self.assertEqual(trip.turns[0].shuttle_phone, '650-697-7733')\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=5, minutes=40))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=9, minutes=55))\n\n self.assertEqual(\n trip.turns[1].report_time, datetime.time(hour=22, minute=30))\n self.assertEqual(trip.turns[1].hotel_name, None)\n self.assertEqual(trip.turns[1].hotel_phone, None)\n self.assertEqual(trip.turns[1].shuttle_name, None)\n self.assertEqual(trip.turns[1].shuttle_phone, None)\n self.assertEqual(\n trip.turns[1].time_block, datetime.timedelta(hours=5, minutes=20))\n self.assertEqual(\n trip.turns[1].time_afb, datetime.timedelta(hours=6, minutes=35))\n\n self.assertEqual(len(trip.turns[0].flights), 2)\n self.assertEqual(len(trip.turns[1].flights), 1)\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '20S')\n self.assertEqual(flight.flight_number, '1992')\n self.assertEqual(flight.dead_head, True)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'IAD')\n self.assertEqual(flight.time_depart, datetime.time(hour=14, minute=30))\n self.assertEqual(flight.time_arrive, datetime.time(hour=16, minute=4))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=1, minutes=34))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=1, minutes=41))\n\n flight = trip.turns[0].flights[1]\n self.assertEqual(flight.aircraft_type, '77U')\n self.assertEqual(flight.flight_number, '340')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'IAD')\n self.assertEqual(flight.airport_arrive, 'SFO')\n self.assertEqual(flight.time_depart, datetime.time(hour=17, minute=45))\n self.assertEqual(flight.time_arrive, datetime.time(hour=20, minute=25))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=5, minutes=40))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=27, minutes=5))\n\n flight = trip.turns[1].flights[0]\n self.assertEqual(flight.aircraft_type, '77G')\n self.assertEqual(flight.flight_number, '1796')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'SFO')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=23, minute=30))\n self.assertEqual(flight.time_arrive, datetime.time(hour=7, minute=50))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=5, minutes=20))\n self.assertEqual(\n flight.time_layover, None)", "def create_discover_payload(self):\n discoverRequest = ET.Element(\"discoverRequest\")\n type = ET.SubElement(discoverRequest, \"type\")\n type.text = self._module.paramgram[\"type\"]\n if self._module.paramgram[\"root_ip\"] and self._module.paramgram[\"type\"] == \"SmartScan\":\n rootIP = ET.SubElement(discoverRequest, \"rootIP\")\n rootIP.text = self._module.paramgram[\"root_ip\"]\n includeRange = ET.SubElement(discoverRequest, \"includeRange\")\n includeRange.text = self._module.paramgram[\"include_range\"]\n excludeRange = ET.SubElement(discoverRequest, \"excludeRange\")\n excludeRange.text = self._module.paramgram[\"exclude_range\"]\n # PROCESS OPTIONS\n noPing = ET.SubElement(discoverRequest, \"noPing\")\n noPing.text = str(self._module.paramgram[\"no_ping\"]).lower()\n onlyPing = ET.SubElement(discoverRequest, \"onlyPing\")\n onlyPing.text = str(self._module.paramgram[\"only_ping\"]).lower()\n\n delta = ET.SubElement(discoverRequest, \"delta\")\n delta.text = str(self._module.paramgram[\"delta\"]).lower()\n\n vmOff = ET.SubElement(discoverRequest, \"vmOff\")\n vmOff.text = str(self._module.paramgram[\"vm_off\"]).lower()\n\n vmTemplate = ET.SubElement(discoverRequest, \"vmTemplate\")\n vmTemplate.text = str(self._module.paramgram[\"vm_templates\"]).lower()\n\n discoverRoute = ET.SubElement(discoverRequest, \"discoverRoute\")\n discoverRoute.text = str(self._module.paramgram[\"discover_routes\"]).lower()\n\n winexeBased = ET.SubElement(discoverRequest, \"winexeBased\")\n winexeBased.text = str(self._module.paramgram[\"winexe_based\"]).lower()\n\n unmanaged = ET.SubElement(discoverRequest, \"unmanaged\")\n unmanaged.text = str(self._module.paramgram[\"unmanaged\"]).lower()\n\n monitorWinEvents = ET.SubElement(discoverRequest, \"monitorWinEvents\")\n monitorWinEvents.text = str(self._module.paramgram[\"monitor_win_events\"]).lower()\n\n monitorWinPatch = ET.SubElement(discoverRequest, \"monitorWinPatch\")\n monitorWinPatch.text = str(self._module.paramgram[\"monitor_win_patches\"]).lower()\n\n monitorInstSw = ET.SubElement(discoverRequest, \"monitorInstSw\")\n monitorInstSw.text = str(self._module.paramgram[\"monitor_installed_sw\"]).lower()\n\n nameResolutionDnsFirst = ET.SubElement(discoverRequest, \"nameResolutionDnsFirst\")\n nameResolutionDnsFirst.text = str(self._module.paramgram[\"name_resolution_dns_first\"]).lower()\n\n xmlstr = ET.tostring(discoverRequest, 'utf-8')\n return xmlstr", "def parseRequest(self):\n\t\tself.rBuf = self.rBuf.replace('\\x0c','')", "def test_parse_trip_str_three_days(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_THREE_DAYS)\n \n self.assertEqual(trip.trip_str, TRIP_THREE_DAYS)\n self.assertEqual(trip.pairing_number, 'E0032')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, 7),\n datetime.datetime(2019, 6, 14)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=15, minutes=0))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=13, minutes=37))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=52, minutes=10))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=1, minutes=23))\n self.assertEqual(trip.total_per_diem, '117.36')\n\n self.assertEqual(len(trip.turns), 3)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=15, minute=45))\n self.assertEqual(trip.turns[0].hotel_name, 'MARRIOTT AIRPORT')\n self.assertEqual(trip.turns[0].hotel_phone, '281-443-2310')\n self.assertEqual(trip.turns[0].shuttle_name, None)\n self.assertEqual(trip.turns[0].shuttle_phone, None)\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=3, minutes=44))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=5, minutes=14))\n\n self.assertEqual(\n trip.turns[1].report_time, datetime.time(hour=9, minute=20))\n self.assertEqual(trip.turns[1].hotel_name, 'MARRIOTT AIRPORT')\n self.assertEqual(trip.turns[1].hotel_phone, '281-443-2310')\n self.assertEqual(trip.turns[1].shuttle_name, None)\n self.assertEqual(trip.turns[1].shuttle_phone, None)\n self.assertEqual(\n trip.turns[1].time_block, datetime.timedelta(hours=4, minutes=50))\n self.assertEqual(\n trip.turns[1].time_afb, datetime.timedelta(hours=7, minutes=48))\n\n self.assertEqual(\n trip.turns[2].report_time, datetime.time(hour=9, minute=2))\n self.assertEqual(trip.turns[2].hotel_name, None)\n self.assertEqual(trip.turns[2].hotel_phone, None)\n self.assertEqual(trip.turns[2].shuttle_name, None)\n self.assertEqual(trip.turns[2].shuttle_phone, None)\n self.assertEqual(\n trip.turns[2].time_block, datetime.timedelta(hours=5, minutes=3))\n self.assertEqual(\n trip.turns[2].time_afb, datetime.timedelta(hours=9, minutes=53))\n\n self.assertEqual(len(trip.turns[0].flights), 1)\n self.assertEqual(len(trip.turns[1].flights), 2)\n self.assertEqual(len(trip.turns[2].flights), 2)\n\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '77Y')\n self.assertEqual(flight.flight_number, '598')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'IAH')\n self.assertEqual(flight.time_depart, datetime.time(hour=17, minute=0))\n self.assertEqual(flight.time_arrive, datetime.time(hour=19, minute=44))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=3, minutes=44))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=14, minutes=36))\n\n flight = trip.turns[1].flights[0]\n self.assertEqual(flight.aircraft_type, '77G')\n self.assertEqual(flight.flight_number, '1101')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'IAH')\n self.assertEqual(flight.airport_arrive, 'DEN')\n self.assertEqual(flight.time_depart, datetime.time(hour=10, minute=20))\n self.assertEqual(flight.time_arrive, datetime.time(hour=11, minute=47))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=2, minutes=27))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=1, minutes=43))\n\n flight = trip.turns[1].flights[1]\n self.assertEqual(flight.aircraft_type, '77G')\n self.assertEqual(flight.flight_number, '313')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'DEN')\n self.assertEqual(flight.airport_arrive, 'IAH')\n self.assertEqual(flight.time_depart, datetime.time(hour=13, minute=30))\n self.assertEqual(flight.time_arrive, datetime.time(hour=16, minute=53))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=2, minutes=23))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=17, minutes=9))\n\n flight = trip.turns[2].flights[0]\n self.assertEqual(flight.aircraft_type, '76C')\n self.assertEqual(flight.flight_number, '1403')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'IAH')\n self.assertEqual(flight.airport_arrive, 'ORD')\n self.assertEqual(flight.time_depart, datetime.time(hour=10, minute=2))\n self.assertEqual(flight.time_arrive, datetime.time(hour=12, minute=40))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=2, minutes=38))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=3, minutes=35))\n\n flight = trip.turns[2].flights[1]\n self.assertEqual(flight.aircraft_type, '77Y')\n self.assertEqual(flight.flight_number, '1995')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'ORD')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=16, minute=15))\n self.assertEqual(flight.time_arrive, datetime.time(hour=19, minute=40))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=2, minutes=25))\n self.assertEqual(flight.time_layover, None)", "def test_parse_trip_str_full_calendar(self):\n trip = trip_packet.trip.parse_trip_str(\n datetime.datetime(2019, 6, 1), TRIP_FULL_CALENDAR)\n \n self.assertEqual(trip.trip_str, TRIP_FULL_CALENDAR)\n self.assertEqual(trip.pairing_number, 'E0007')\n self.assertEqual(trip.days_available, [\n datetime.datetime(2019, 6, x) for x in range(6, 31)])\n \n self.assertEqual(\n trip.total_credit, datetime.timedelta(hours=8, minutes=0))\n self.assertEqual(\n trip.total_block, datetime.timedelta(hours=8, minutes=0))\n self.assertEqual(\n trip.total_deadhead, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(\n trip.total_tafb, datetime.timedelta(hours=10, minutes=54))\n self.assertEqual(\n trip.total_rig, datetime.timedelta(hours=0, minutes=0))\n self.assertEqual(trip.total_per_diem, '24.52')\n\n self.assertEqual(len(trip.turns), 1)\n self.assertEqual(\n trip.turns[0].report_time, datetime.time(hour=7, minute=30))\n self.assertEqual(trip.turns[0].hotel_name, None)\n self.assertEqual(trip.turns[0].hotel_phone, None)\n self.assertEqual(trip.turns[0].shuttle_name, None)\n self.assertEqual(trip.turns[0].shuttle_phone, None)\n self.assertEqual(\n trip.turns[0].time_block, datetime.timedelta(hours=8, minutes=0))\n self.assertEqual(\n trip.turns[0].time_afb, datetime.timedelta(hours=10, minutes=54))\n\n self.assertEqual(len(trip.turns[0].flights), 2)\n flight = trip.turns[0].flights[0]\n self.assertEqual(flight.aircraft_type, '76S')\n self.assertEqual(flight.flight_number, '1523')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'EWR')\n self.assertEqual(flight.airport_arrive, 'SJU')\n self.assertEqual(flight.time_depart, datetime.time(hour=8, minute=45))\n self.assertEqual(flight.time_arrive, datetime.time(hour=12, minute=41))\n self.assertEqual(flight.time_total, datetime.timedelta(hours=3, minutes=56))\n self.assertEqual(\n flight.time_layover, datetime.timedelta(hours=1, minutes=24))\n\n flight = trip.turns[0].flights[1]\n self.assertEqual(flight.aircraft_type, '76S')\n self.assertEqual(flight.flight_number, '1173')\n self.assertEqual(flight.dead_head, False)\n self.assertEqual(flight.airport_depart, 'SJU')\n self.assertEqual(flight.airport_arrive, 'EWR')\n self.assertEqual(flight.time_depart, datetime.time(hour=14, minute=5))\n self.assertEqual(flight.time_arrive, datetime.time(hour=18, minute=9))\n self.assertEqual(\n flight.time_total, datetime.timedelta(hours=4, minutes=4))\n self.assertEqual(\n flight.time_layover, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read a messagepack file and return individual messages
def read_file(filename): with open(filename, 'rb') as file: unpacker = msgpack.Unpacker(file, raw=False) for msg in unpacker: yield msg
[ "def read_msgpack(path: PathType) -> Any:\n\n with copen(path, \"rb\") as fr:\n return unpack(fr, use_list=False, raw=False, strict_map_key=False, ext_hook=ext_hook)", "def read_messages (file_of_messages):\n line = file_of_messages.readline()\n collection_of_messages = []\n while (line != \"\"):\n collection_of_messages.append(line.strip())\n line = file_of_messages.readline()\n return (collection_of_messages)", "def read_message(self):\n while True:\n if self.next_chunk_size is None:\n chunk_size = self.wire.read(2)\n self.next_chunk_size, = struct_unpack(\">H\", chunk_size)\n if self.next_chunk_size:\n chunk_data = self.wire.read(self.next_chunk_size)\n self.next_chunk_size = None\n self.data_buffer.append(chunk_data)\n else:\n self.next_chunk_size = None\n break\n buffer = UnpackableBuffer(b\"\".join(self.data_buffer))\n self.data_buffer = []\n unpacker = Unpacker(buffer, self.packstream_version)\n return unpacker.unpack_message()", "def load_message(filename):\n with open(filename, 'r') as f:\n return f.read()", "def read_message(message_file):\n chars = []\n\n with open(message_file) as f:\n for line in f:\n for char in line:\n chars.append(char)\n\n return chars", "def read_messages(input_file):\n messages = []\n while True:\n line = input_file.readline()\n if line == '':\n break\n length = int(line)\n message_data = input_file.read(length)\n input_file.readline()\n message = email.message_from_string(message_data)\n messages.append(message)\n\n return messages", "def unpack(filename):\n with open(filename, 'rb') as infile:\n unpacked = msgpack.unpack(infile)\n return msgpack_transform(unpacked[1], unpacked[0])", "def msgpack_loader(mp_path):\n tic = time.time()\n with open(mp_path, \"rb\") as f:\n buffer = f.read()\n print(f\"[I/O: {time.time() - tic:.1f}s]\", end=\" \")\n tic = time.time()\n ## super danger! yang :utf-8 ==> latin\n data = msgpack_np.unpackb(buffer, object_hook=msgpack_np.decode, encoding=\"latin\") \n print(f\"[deserialisation: {time.time() - tic:.1f}s]\", end=\" \")\n return data", "def read_messages(self):\n\n msg = \"\"\n try:\n while True:\n line = self.message_queue.get_nowait()\n # msg += line.decode(\"utf-8\")\n msg += line\n except Empty:\n pass # finished\n return msg", "def parse_file(self, message):\n try:\n self.model.parse_file(message.data)\n \n except Exception as exception:\n self.view.show_exception('Error reading file', 'The following error happened while reading the file:\\n%s' % str(exception))", "def parse_message(text_file: str) -> str:\n f = open(text_file, \"r\")\n return \"\".join(f.readlines())", "def get_bag_file_msg_by_type(bag_file_path, msg_type):\n bag_file = rosbag.Bag(bag_file_path)\n bag_topics = bag_file.get_type_and_topic_info()[1]\n messages = []\n for topic, msg, t in bag_file.read_messages():\n if topic not in bag_topics or bag_topics[topic].msg_type != msg_type:\n continue\n # serialize and deserialize message to get rid of bag file type\n msg_string = to_cpp(msg)\n msg = from_cpp(msg_string, get_message_class(msg_type))\n messages.append(msg)\n bag_file.close()\n return messages", "def load_messages(filepath, start=0, limit=None, verbose=False):\n return __load_data(filepath, message.parse_csv_row, start, limit, verbose)", "def load_messages():\n with open('messages.txt', 'r') as messages:\n messageList = []\n for line in messages:\n # Adds explanatory line for each reply message\n messageList.append(line.strip() + \"\\n*****\\n^(This comment was made by a bot! PM me if you would like to suggest a positive message (:)\")\n return messageList", "def read_messages(self) -> List[DltMessage]:\r\n return [message for message in self.__iter__()]", "def read_mo(fileobj: SupportsRead[bytes]) -> Catalog:\n catalog = Catalog()\n headers = {}\n\n filename = getattr(fileobj, 'name', '')\n\n buf = fileobj.read()\n buflen = len(buf)\n unpack = struct.unpack\n\n # Parse the .mo file header, which consists of 5 little endian 32\n # bit words.\n magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?\n if magic == LE_MAGIC:\n version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])\n ii = '<II'\n elif magic == BE_MAGIC:\n version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])\n ii = '>II'\n else:\n raise OSError(0, 'Bad magic number', filename)\n\n # Now put all messages from the .mo file buffer into the catalog\n # dictionary\n for _i in range(msgcount):\n mlen, moff = unpack(ii, buf[origidx:origidx + 8])\n mend = moff + mlen\n tlen, toff = unpack(ii, buf[transidx:transidx + 8])\n tend = toff + tlen\n if mend < buflen and tend < buflen:\n msg = buf[moff:mend]\n tmsg = buf[toff:tend]\n else:\n raise OSError(0, 'File is corrupt', filename)\n\n # See if we're looking at GNU .mo conventions for metadata\n if mlen == 0:\n # Catalog description\n lastkey = key = None\n for item in tmsg.splitlines():\n item = item.strip()\n if not item:\n continue\n if b':' in item:\n key, value = item.split(b':', 1)\n lastkey = key = key.strip().lower()\n headers[key] = value.strip()\n elif lastkey:\n headers[lastkey] += b'\\n' + item\n\n if b'\\x04' in msg: # context\n ctxt, msg = msg.split(b'\\x04')\n else:\n ctxt = None\n\n if b'\\x00' in msg: # plural forms\n msg = msg.split(b'\\x00')\n tmsg = tmsg.split(b'\\x00')\n if catalog.charset:\n msg = [x.decode(catalog.charset) for x in msg]\n tmsg = [x.decode(catalog.charset) for x in tmsg]\n else:\n if catalog.charset:\n msg = msg.decode(catalog.charset)\n tmsg = tmsg.decode(catalog.charset)\n catalog[msg] = Message(msg, tmsg, context=ctxt)\n\n # advance to next entry in the seek tables\n origidx += 8\n transidx += 8\n\n catalog.mime_headers = headers.items()\n return catalog", "def load_message(message_id):\n\n # Create the directory from which _load_message will open the file.\n file_name = os.path.join('messages/', '{}.json'.format(message_id))\n\n # Use _load_message to get that file's contents into a dict.\n msg_dict = _load_message(file_name)\n\n # Assuming it's supposed to return the dict and NOT a list.\n return msg_dict", "def read_po(file):\n converted = ''\n for line in file:\n line = line.rstrip('\\n')\n m = re.search(r'^(msgid|msgstr) \"(.+)\"$', line)\n if m:\n converted += '%s \"\"\\n' % m.group(1)\n converted += '\"%s\"\\n' % m.group(2)\n else:\n converted += '%s\\n' % line\n return pofile.read_po(StringIO.StringIO(converted))", "def msgpack_loader(mp_path: Path, verbose: bool):\n tic = time.time()\n with open(mp_path, \"rb\") as f:\n buffer = f.read()\n if verbose:\n print(f\"[I/O: {time.time() - tic:.1f}s]\", end=\" \")\n tic = time.time()\n data = msgpack_np.unpackb(buffer, raw=False)\n if verbose:\n print(f\"[deserialisation: {time.time() - tic:.1f}s]\", end=\" \")\n return data", "def getMessageFeatures(msgfeatures):\n features = []\n with open(msgfeatures, \"r\") as f:\n for line in f:\n line = line.strip()\n if line != \"\":\n features.append(line)\n return features" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get pointcloud data from msg
def get_pointclouds(msg): return msg['pointclouds']
[ "def process_cloud(self, msg):\n with self.m:\n self.actualP = msg #Receive pointcloud", "def point_cloud_msg(self, points, stamp):\n ros_dtype = sensor_msgs.msg.PointField.FLOAT32\n dtype = np.float32\n itemsize = np.dtype(dtype).itemsize\n\n data = points.astype(dtype).tobytes()\n\n fields = [sensor_msgs.msg.PointField(\n name=n, offset=i*itemsize, datatype=ros_dtype, count=1)\n for i, n in enumerate('xyzrgb')]\n\n header = Header(frame_id=\"camera_frame\", stamp=stamp)\n\n return PointCloud2(\n header=header,\n height=1,\n width=points.shape[0],\n is_dense=False,\n is_bigendian=False,\n fields=fields,\n point_step=(itemsize * 6),\n row_step=(itemsize * 6 * points.shape[0]),\n data=data\n )", "def unpack_point_msg(msg, stamped=False):\n if stamped:\n p = msg.point\n else:\n p = msg\n\n return p.x, p.y, p.z", "def callback_points(self, data):\n\t\t\n\t\tcloud_msg = pc2.read_points(data, field_names = (\"x\", \"y\", \"z\"), skip_nans=False)\n\t\tcloud_data = []\n\t\tfor p in cloud_msg:\n\t\t cloud_data.append([p[0],p[1],p[2]])\n\t\tcloud_data = np.array(cloud_data)\n\t\tself.cloud2 = np.reshape(cloud_data, [640, 480,3], order='F')", "def get_pointcloud(gp, point_fc, point_ident, zcoord):\n pointcloud = {}\n point_desc = gp.describe(point_fc)\n\n row = gp.SearchCursor(point_fc)\n for item in nens.gp.gp_iterator(row):\n feat = item.GetValue(point_desc.ShapeFieldName)\n item_id = item.GetValue(point_ident)\n\n point_x, point_y = calculate_xy(gp, feat)\n pnt_xyz = (point_x,\n point_y,\n float(item.GetValue(zcoord)))\n\n if item_id not in pointcloud:\n pointcloud[item_id] = [pnt_xyz]\n else:\n pointcloud[item_id].append(pnt_xyz)\n\n return pointcloud", "def crop_cloud_to_xyz(cloud_msg, bounding_box):\n return crop_cloud_msg_to_ndarray(cloud_msg, bounding_box, fields=['x', 'y', 'z'])", "def cloud_msg_to_ndarray(cloud_msg, fields=['x', 'y', 'z', 'r', 'g', 'b']):\n assert isinstance(cloud_msg, PointCloud2)\n cloud_record = ros_numpy.numpify(cloud_msg)\n cloud_record = ros_numpy.point_cloud2.split_rgb_field(cloud_record)\n cloud_array = np.zeros((*cloud_record.shape, len(fields)))\n index = 0\n for field in fields:\n cloud_array[:, :, index] = cloud_record[field]\n index += 1\n return cloud_array", "def extract_point_cloud(self):\n tsdf_vol = self._tsdf_vol.cpu().numpy()\n color_vol = self._color_vol.cpu().numpy()\n vol_origin = self._vol_origin.cpu().numpy()\n\n # Marching cubes\n verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]\n verts_ind = np.round(verts).astype(int)\n verts = verts*self._voxel_size + vol_origin\n\n # Get vertex colors\n rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]\n colors_b = np.floor(rgb_vals / self._const)\n colors_g = np.floor((rgb_vals - colors_b*self._const) / 256)\n colors_r = rgb_vals - colors_b*self._const - colors_g*256\n colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T\n colors = colors.astype(np.uint8)\n\n pc = np.hstack([verts, colors])\n return pc", "def pointcloud_import(self, pcd_fname):\n\t\tprint('pointcloud filename:')\n\t\tprint(pcd_fname)\n\t\tpc = pypcd.PointCloud.from_path(pcd_fname)\n\n\t\t# flatten into vector\n\t\ttemp = []\n\t\ttemp.append(pc.pc_data['x'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp.append(pc.pc_data['y'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp.append(pc.pc_data['z'][~np.isnan(pc.pc_data['x'])])\n\t\ttemp = np.array(temp)\n\t\tprint(temp.shape)\n\t\tobs_pc = temp.flatten('F') #flattened column wise, [x0, y0, z0, x1, y1, z1, x2, y2, ...]\n\n\t\treturn obs_pc", "def crop_cloud_msg_to_ndarray(cloud_msg, bounding_box, fields=['x', 'y', 'z', 'r', 'g', 'b'], offset=0):\n assert isinstance(bounding_box, BoundingBox2D)\n\n # fit box to cloud dimensions\n bounding_box = fit_box_to_image((cloud_msg.width, cloud_msg.height), bounding_box, offset)\n\n cloud_array = cloud_msg_to_ndarray(cloud_msg, fields=fields)\n cloud_array = cloud_array[\n bounding_box.x: bounding_box.x + bounding_box.height,\n bounding_box.y: bounding_box.y + bounding_box.width, :]\n return cloud_array", "def load_point_cloud(name, down_sample=False):\n plydata = PlyData.read(name)\n pcd = o3d.io.read_point_cloud(name)\n if down_sample:\n downpcd = pcd.voxel_down_sample(voxel_size=down_sample)\n pcd_tree = o3d.geometry.KDTreeFlann(pcd)\n try:\n return np.asarray(pcd.points), np.asarray(plydata.elements[0].data['class']), pcd_tree\n except:\n return np.asarray(pcd.points), pcd_tree", "def extract_array_from_pcd_obj(pcd):\n x = np.array(pcd.pc_data[\"x\"]).reshape(-1, 1)\n y = np.array(pcd.pc_data[\"y\"]).reshape(-1, 1)\n z = np.array(pcd.pc_data[\"z\"]).reshape(-1, 1)\n intensity = np.array(pcd.pc_data[\"intensity\"]).reshape(-1, 1)\n data = np.hstack([x, y, z, intensity])\n return data", "def construct_pointcloud(points):\n\n pc = PointCloud()\n pc.points = Vector3dVector(np.asanyarray(points))\n\n return pc", "def unpack_vector_msg(msg, stamped=False):\n if stamped:\n v = msg.vector\n else:\n v = msg\n\n return v.x, v.y, v.z", "def getArrayFromPointData(data_outVTK, field_name):\n\n # function display \n print '---- DAEPy::getArrayFromPointData ----'\n\n coord = np.array(\n [data_outVTK.GetPoint(i) for i in range(data_outVTK.GetNumberOfPoints())],\n dtype=np.float32)\n\n print '--> extract fields', [f for f in field_name]\n data_arr = [vtk_to_numpy(data_outVTK.GetPointData().GetArray(f)) for f in field_name]\n\n\n print ''\n return [coord] + data_arr", "def pc2_to_list(point_cloud2):\n\n # print str(point_cloud2.fields)\n\n # print len(list(pc2.read_points(point_cloud2, field_names=(\"x\",\"y\",\"z\"))))\n # print len(list(pc2.read_points(point_cloud2, field_names=(\"x\",\"y\",\"z\", \"rgb\"))))\n cloud_gen = pc2.read_points(point_cloud2, field_names=(\"x\",\"y\",\"z\", \"rgb\"))\n return list(cloud_gen)", "def ReadGCSData(self, tosend):\n debug('GCSCommands.ReadGCSData(tosend=%r)', tosend)\n checksize((1,), tosend)\n answer = self.__msgs.read(tosend, gcsdata=None)\n answer = getgcsheader(answer)\n debug('GCSCommands.ReadGCSData = %r', answer)\n return answer", "def point_data(self):\r\n return self._point_data", "def test_point_cloud():\n mesh = pv.Plane()\n cloud = pv.PolyData(mesh.points)\n assert point_cloud(cloud)", "def _pointGroupsCallback(self, msg : PointGroups) -> None:\n\n self.pointGroupsDict = []\n for group in msg.groups: \n tmp_dict = [{'map_pos' : [tmp for tmp in group.map_pos], \n 'group_id' : group.group_id,\n 'map_origin' : [group.map_origin[0], group.map_origin[1]],\n 'map_dims' : [group.map_dims[0], group.map_dims[1]],\n 'map_resol' : group.map_resolution,\n 'assoc_fl' : group.associated_file}]\n \n self.pointGroupsDict.append(tmp_dict)\n \n self.pointGroupsReceived = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create test input tensor.
def create_test_input(batch_size, height, width, channels): if None in [batch_size, height, width, channels]: return tf.placeholder(tf.float32, (batch_size, height, width, channels)) else: return tf.to_float( np.tile( np.reshape( np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]), [1, height, width, 1]), [batch_size, 1, 1, channels]))
[ "def gen_test_tensor_cpd():\n return TensorCPD(*gen_test_data())", "def _create_dummy_input(func_graph, template_tensor):\n with func_graph.as_default():\n return array_ops.placeholder(\n template_tensor.dtype, shape=template_tensor.shape)", "def new_tensor(data, is_batch=False, is_sequence=False, name=None):\n if hasattr(data, \"dtype\"):\n if data.dtype.name.startswith(\"float\"):\n data = np.array(data, FLOAT_TYPE)\n elif data.dtype.name.startswith(\"int\"):\n data = np.array(data, INT_TYPE)\n else:\n data = np.array(data, FLOAT_TYPE)\n shape = list(data.shape)\n if is_batch:\n shape[0] = None\n if is_sequence:\n shape[1] = None\n return placeholder_with_default(data, tuple(shape), name)", "def read_tensor_from_test_data(data_path, batch_size):\n print(\"Getting data tensor for test case...\")\n data = pd.read_csv(data_path, header = None, delimiter = ' ')\n data = np.array(data, dtype = 'float32')\n data_tensor_slice = tf.data.Dataset.from_tensor_slices(data)\n print(\"data tensor before batch\", data_tensor_slice)\n print(\"batch_size:\", batch_size)\n data_tensor_slice = data_tensor_slice.batch(batch_size, drop_remainder=True)\n #iterator = data_tensor_slice.make_one_shot_iterator()\n iterator = tf.data.Iterator.from_structure(data_tensor_slice.output_types, data_tensor_slice.output_shapes)\n data_tensor = iterator.get_next()\n pred_init_op = iterator.make_initializer(data_tensor_slice)\n print(\"Data_tensor:\",data_tensor)\n return data_tensor, pred_init_op", "def test_numpy_input_fn(self):\n label_dimension = 2\n batch_size = 10\n train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(\n label_dimension, batch_size)\n\n self._test_complete_flow(\n train_input_fn=train_input_fn,\n eval_input_fn=eval_input_fn,\n predict_input_fn=predict_input_fn,\n input_dimension=label_dimension,\n label_dimension=label_dimension,\n batch_size=batch_size)", "def test_init(self):\n # ------ tests for basic properties of a tensor with default mode names\n true_shape = (2, 4, 8)\n true_size = reduce(lambda x, y: x * y, true_shape)\n true_order = len(true_shape)\n true_data = np.ones(true_size).reshape(true_shape)\n true_default_mode_names = ['mode-0', 'mode-1', 'mode-2']\n true_default_state = State(normal_shape=true_shape)\n tensor = Tensor(array=true_data)\n np.testing.assert_array_equal(tensor.data, true_data)\n assert (tensor.frob_norm == 8.0)\n assert (tensor.shape == true_shape)\n assert (tensor.ft_shape == true_shape)\n assert (tensor.order == true_order)\n assert (tensor.size == true_size)\n assert (tensor.mode_names == true_default_mode_names)\n assert (tensor._state == true_default_state)\n assert (tensor._data is not true_data) # check that is not a reference\n\n # ------ tests for creating a Tensor object with custom mode names\n true_custom_mode_names = ['time', 'frequency', 'channel']\n tensor = Tensor(array=true_data, mode_names=true_custom_mode_names)\n assert (tensor.mode_names == true_custom_mode_names)\n\n # ------ tests for creating a Tensor object in custom state\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J*K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n true_mode_names = [\"mode-0\", \"mode-1_mode-2\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_mode_names)\n\n # ------ tests for creating a Tensor object with in custom state and with custom mode names\n I, J, K = 2, 4, 8\n true_data = np.ones(I * J * K).reshape(I, J * K)\n true_ft_shape = (I, J, K)\n true_mode_order = ([0], [1, 2])\n custom_mode_names = [\"time\", \"frequency\", \"channel\"]\n true_custom_mode_names = [\"time\", \"frequency_channel\"]\n custom_state = dict(normal_shape=true_ft_shape,\n mode_order=true_mode_order,\n rtype=\"T\")\n tensor = Tensor(array=true_data, custom_state=custom_state, mode_names=custom_mode_names)\n assert (tensor.ft_shape == true_ft_shape)\n assert (tensor.mode_names == true_custom_mode_names)", "def create_test_taskgen(\n ds_test,\n n_samples=800,\n batch_size=4,\n nways=5,\n kquery=15,\n kshots = (5, 1),\n):\n\n return _generic_eval_taskgen([('test', ds_test)], None, n_samples, batch_size, nways, kquery, kshots)", "def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image", "def __init_input(self, im_shape):\n op_name = 'input'\n with tf.variable_scope(op_name):\n self.input_tensor = tf.placeholder(\n dtype=tf.float32, shape=(None,*im_shape))\n self.tensors[op_name] = self.input_tensor\n self.__inshape = self.input_tensor.get_shape().as_list()", "def input_tensors(self):\n pass", "def create_test_set(inputfile):\n testset = TrainTestSets(\"-t %s\" % inputfile)\n\n return testset", "def create_tensor(dtype, name=None, persistable=False):\n check_dtype(\n dtype,\n 'dtype',\n [\n 'bool',\n 'float16',\n 'float32',\n 'float64',\n 'int8',\n 'int32',\n 'int32',\n 'int64',\n ],\n 'create_tensor',\n )\n helper = LayerHelper(\"create_tensor\", **locals())\n return helper.create_variable(\n name=helper.name, dtype=dtype, persistable=persistable\n )", "def _get_input_tensor_name(): # TODO: only for OID API pretrained\n return 'image_tensor:0'", "def __init_tensors(self, im_shape):\n self.__init_tensor_register()\n self.__init_input(im_shape)", "def input_fn():\n serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,\n shape=[default_batch_size],\n name='input_example_tensor')\n inputs = {'examples': serialized_tf_example}\n features = parsing_ops.parse_example(serialized_tf_example, feature_spec)\n labels = None # these are not known in serving!\n return InputFnOps(features, labels, inputs)", "def create_input_fn(split, batch_size):\n\n def input_fn():\n \"\"\"input_fn for tf.estimator.Estimator.\"\"\"\n\n indir = FLAGS.input_dir\n tfrecord = 'train_data*.tfrecord' if split == 'train' else 'validation_data.tfrecord'\n\n def parser(serialized_example):\n\n features_ = {'img': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.string)}\n\n if split != 'train':\n features_['cl_live'] = tf.FixedLenFeature([], tf.string)\n features_['cl_mem'] = tf.FixedLenFeature([], tf.string)\n\n fs = tf.parse_single_example(\n serialized_example,\n features=features_\n )\n\n fs['img'] = tf.reshape(tf.cast(tf.decode_raw(fs['img'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['label'] = tf.reshape(tf.decode_raw(fs['label'], tf.uint8), [__vh, __vw])\n fs['label'] = tf.cast(tf.one_hot(fs['label'], N_CLASSES), tf.float32)\n if split != 'train':\n fs['cl_live'] = tf.reshape(tf.cast(tf.decode_raw(fs['cl_live'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['cl_mem'] = tf.reshape(tf.cast(tf.decode_raw(fs['cl_mem'], tf.uint8),\n tf.float32) / 255.0, [__vh, __vw, 3])\n fs['cl_live'] = tf.reshape(tf.image.resize_images(fs['cl_live'],\n (vh, vw)), [vh, vw, 3])\n fs['cl_mem'] = tf.reshape(tf.image.resize_images(fs['cl_mem'],\n (vh, vw)), [vh, vw, 3])\n\n return fs\n\n if split == 'train':\n files = tf.data.Dataset.list_files(indir + tfrecord, shuffle=True,\n seed=np.int64(time()))\n else:\n files = [indir + tfrecord]\n\n dataset = tf.data.TFRecordDataset(files)\n dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(400, seed=np.int64(time())))\n # dataset.shuffle(\n # 400, seed=np.int64(time()), reshuffle_each_iteration=True).repeat()\n # dataset.map(parser, num_parallel_calls=n_cpus() // 2).batch(batch_size if split == 'train' else batch_size // 3)\n # dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(400, seed=np.int64(time())))\n dataset = dataset.apply(tf.data.experimental.map_and_batch(parser,\n batch_size if split == 'train' else batch_size // 3,\n num_parallel_calls=n_cpus() // 2))\n\n dataset = dataset.prefetch(buffer_size=2)\n\n return dataset\n\n return input_fn", "def _create_input_spec(self, input_shape):\n dim = input_shape[self.axis]\n self.input_spec = tf.keras.layers.InputSpec(\n ndim=len(input_shape), axes={self.axis: dim})", "def gen_random_input(self):\n self.data = np.random.random(size=self.data_desc[\"shape\"]).astype(\n \"float32\"\n )", "def test_one_hot_encode_input():\n pass", "def assert_is_model_tensor(self, x: TensorLike) -> None:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the hairy graph vector space.
def __init__(self, n_vertices, n_loops, n_hairs, even_edges): self.n_vertices = n_vertices self.n_loops = n_loops self.n_hairs = n_hairs self.even_edges = even_edges self.sub_type = "even_edges" if even_edges else "odd_edges" # we count only the internal edges self.n_edges = self.n_loops + self.n_vertices - 1 super(CHairyGraphVS, self).__init__() self.ogvs = OrdinaryGraphComplex.OrdinaryGVS( self.n_vertices + self.n_hairs, self.n_loops, even_edges)
[ "def __init_graph(self) -> None:\n self.graph = Graph()", "def __init__(self):\n self.g = nx.DiGraph()", "def zero(self):\n v = np.zeros(self.get_dimension())\n self.set_vector(v)", "def __init__(self, **kwargs):\n if not hasattr(self, \"graph\"):\n self.graph = -np.ones(self.keypoints_shape[0])\n if not hasattr(self, \"swap_index\"):\n self.swap_index = -np.ones(self.keypoints_shape[0])\n return", "def __init__(self, graph=None, **kwargs):\n super(HierS, self).__init__(graph, MurckoRingSystemFragmenter(), 'hiers')", "def _initialize_layer_stack(self):\n self.layer_stack = [(l, (pl, nl)) if self.pdk[nl]['Direction'] == 'h' else (l, (nl, pl)) \\\n for l, (pl, nl) in self.pdk.get_via_stack() if l.startswith('V')]", "def __init__(self):\n # Define vector displacement from COM to Sun\n self.r_s = np.array([-constants.MASS_JUPITER * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])\n\n # Define vector displacement from COM to Jupiter\n self.r_j = np.array([constants.MASS_SUN * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])", "def _init_h(self):\n self.H = np.random.random((self._num_bases, self._num_samples)) + 0.2", "def __init__(self, v):\n if v < 0:\n raise ValueError('Number of vertices must be non-negative')\n self.V = v\n self.E = 0\n self._indegree = [0 for _ in range(v)]\n self.adj = defaultdict(Bag)\n for v in range(v):\n self.adj[v] = Bag()", "def _initial_state_vector(hmodel: Model):\n isv = InitialVector()\n logger.debug(\"Building initial state vector\")\n for s_id, s in hmodel.states.items():\n if isinstance(s, Init) or isinstance(s, ShallowHistory):\n t = s.transitions[0]\n nca = nearest_common_ancestor(t.source, t.dest)\n parent_states = find_parent_states(t.dest, nca)\n # TODO: This will produce redundant assignments, states could possibly\n # be a set\n isv.states += parent_states\n isv.states.append(t.dest)\n\n _initial_state_vector_inner(hmodel, hmodel.root, isv, [])\n\n logger.debug(\"Initial state vector done\")\n return isv", "def initialize_computational_space(self, **kwargs):\n x_start = kwargs.pop('x_start', 0)\n x_end = kwargs.pop('x_end', 1)\n x_step = kwargs.pop('x_step', 0.1)\n\n y_start = kwargs.pop('y_start', 0)\n y_end = kwargs.pop('y_end', 1)\n z_start = kwargs.pop('z_start', 0)\n z_end = kwargs.pop('z_end', 1)\n yz_step = kwargs.pop('yz_step', 0.1)\n\n self.x_range = np.arange(x_start, x_end + x_step, x_step)\n self.y_range = np.arange(y_start, y_end + yz_step, yz_step)\n self.z_range = np.arange(z_start, z_end + yz_step, yz_step)", "def __init__(self, sum_vector_space):\n super(DeleteEdgesD, self).__init__(sum_vector_space,\n DeleteEdgesGO.generate_op_matrix_list(sum_vector_space))", "def __init__(self, vectors):\n self._clusterAttribution = {}\n self._centers = {}\n self._vectors = vectors\n self._hasChanged = False\n self._boxDims = {}\n self._boxSpacing = 15", "def _initialize_state_vector(self):\n np.random.seed(self.seed)\n self.initial_state = [0.0] * self.num_state_variables", "def __init__(self, hgrid: Hgrid, vgrid: Vgrid, fgrid: Fgrid):\n self._hgrid = hgrid\n self._vgrid = vgrid\n self._fgrid = fgrid\n self._open_boundaries = OpenBoundaries(hgrid)\n self._ncor = Coriolis.AUTO\n self._nws: Union[NWS, None] = None\n self._hydrology: List[Hydrology] = []", "def __init__(self, x=0., y=0.):\n if hasattr(x, \"__getitem__\"):\n x, y = x\n self._v = [float(x), float(y)]\n else:\n self._v = [float(x), float(y)]", "def space():\n space = Space()\n categories = {'asdfa': 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical('yolo', categories, shape=2)\n space.register(dim)\n dim = Integer('yolo2', 'uniform', -3, 6)\n space.register(dim)\n dim = Real('yolo3', 'alpha', 0.9)\n space.register(dim)\n return space", "def init_hessian(cls, x):\n\n x = numpy.ravel(x)\n\n # generate directions\n N = x.size\n M = (N*(N+1))/2\n L = (N*(N-1))/2\n S = numpy.zeros((N,M), dtype=x.dtype)\n\n s = 0\n i = 0\n for n in range(1,N+1):\n S[-n:,s:s+n] = numpy.eye(n)\n S[-n,s:s+n] = numpy.ones(n)\n s+=n\n i+=1\n S = S[::-1].T\n\n data = numpy.zeros(numpy.hstack([3,S.shape]), dtype=x.dtype)\n data[0] = x\n data[1] = S\n return cls(data)", "def __init__(self, p, verbose = 0):\n super(HadamardOpXi64, self).__init__(p, 0, verbose)\n # Next set sizes for Hadamard-like matrices\n self.set_sizes()\n # Next set the default position for method expand_hadamard \n self.free_cy_pos = -1 \n # Next set positions where Hadamard operation muse be done\n self.hadamard_operations = 3 + (15 << self.LOG_INT_FIELDS)\n # make tables and directives for code generation\n self.tables.update(self.make_tables())\n self.directives.update(self.make_directives())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produces all connected hairy graphs with nhairs hairs, that are the last vertices in the ordering. Graphs can have multiple hairs, but not tadpoles or multiple edges.
def get_hairy_graphs(self, nvertices, nloops, nhairs, include_novertgraph=false): # Idea: produce all bipartite graphs, the second color being either of degree 1 or 2. # Degree 1 vertices are hairs, degree 2 vertices are edges and are removed later. nedges = nloops + nvertices - 1 # number of internal edges n_vertices_1 = nvertices n_vertices_2 = nhairs + nedges n_edges_bip = nhairs + 2 * nedges deg_range_1 = (3, n_edges_bip + 1) deg_range_2 = (1, 2) # check if valid unordered = [] if (nvertices >= 1 and nloops >= 0 and nhairs >= 0 and n_edges_bip >= n_vertices_2 and n_edges_bip <= 2*n_vertices_2 and n_edges_bip >= 3 * n_vertices_1 and n_edges_bip <= n_vertices_1 * n_vertices_2): bipartite_graphs = NautyInterface.list_bipartite_graphs2( n_vertices_1, n_vertices_2, deg_range_1, deg_range_2, n_edges_bip) unordered = [self._bip_to_ordinary( G, nvertices, nedges, nhairs) for G in bipartite_graphs] # Produce all permutations of the hairs # all_perm = [ range(0,nvertices) + p for p in Permutations(range(nvertices, nvertices+nhairs)) ] # return [G.relabel(p, inplace=False) for p in all_perm ] if include_novertgraph and nvertices == 0 and nhairs == 2 and nloops == 0: unordered.append(Graph([(0, 1)])) return unordered
[ "def complete_to_chordal_graph(G):\n H = G.copy()\n alpha = {node: 0 for node in H}\n if nx.is_chordal(H):\n return H, alpha\n chords = set()\n weight = {node: 0 for node in H.nodes()}\n unnumbered_nodes = list(H.nodes())\n for i in range(len(H.nodes()), 0, -1):\n # get the node in unnumbered_nodes with the maximum weight\n z = max(unnumbered_nodes, key=lambda node: weight[node])\n unnumbered_nodes.remove(z)\n alpha[z] = i\n update_nodes = []\n for y in unnumbered_nodes:\n if G.has_edge(y, z):\n update_nodes.append(y)\n else:\n # y_weight will be bigger than node weights between y and z\n y_weight = weight[y]\n lower_nodes = [\n node for node in unnumbered_nodes if weight[node] < y_weight\n ]\n if nx.has_path(H.subgraph(lower_nodes + [z, y]), y, z):\n update_nodes.append(y)\n chords.add((z, y))\n # during calculation of paths the weights should not be updated\n for node in update_nodes:\n weight[node] += 1\n H.add_edges_from(chords)\n return H, alpha", "def hamiltonianPath(self):\n for path in itertools.permutations(sorted(self.index.values())):\n for i in xrange(len(path)-1):\n if ((path[i],path[i+1]) not in self.edge):\n break\n else:\n return [self.vertex[i] for i in path]\n return []", "def all_paths(graph):\n all_paths = []\n # uses the paths_len_2 function to get started\n paths_n = paths_len_2(graph)\n\n all_paths += paths_n\n\n # this loop will do all the >2 length paths\n for i in range(3, len(graph)+1):\n paths_len_n = []\n for path in paths_n:\n t_connections = graph[path[-1]]\n for n in t_connections:\n if not n in path:\n paths_len_n.append(path + [n])\n all_paths += paths_len_n\n paths_n = paths_len_n\n\n return all_paths", "def generate_graph(self):\n\n for i, (h1, h2, d) in enumerate(self.helix_graph.graph.edges(data=True)): # noqa: E501\n # It is easier to just add in all the nodes first, and then remove\n # the 'start' and 'end' dummy nodes at the end.\n self.graph.add_node(f'H{h1}',\n nucleotides=self.helix_graph.graph.node[h2]['nucleotides'], # noqa: E501\n bipartite='helix')\n self.graph.add_node(f'H{h2}',\n nucleotides=self.helix_graph.graph.node[h2]['nucleotides'], # noqa: E501\n bipartite='helix')\n\n # Add in the bulge nodes. These are what were the edges in the\n # helix_graph.\n self.graph.add_node(f'B{i}',\n nucleotides=d['nucleotides'],\n bipartite='bulge')\n\n self.graph.add_edge(f'H{h1}', f'B{i}')\n self.graph.add_edge(f'H{h2}', f'B{i}')\n\n self.graph.remove_node(f'Hstart')\n self.graph.remove_node(f'Hend')", "def _chordal_graph_cliques(G):\n if not is_chordal(G):\n raise nx.NetworkXError(\"Input graph is not chordal.\")\n\n for C in (G.subgraph(c).copy() for c in connected_components(G)):\n if C.number_of_nodes() == 1:\n yield frozenset(C.nodes())\n else:\n unnumbered = set(C.nodes())\n v = arbitrary_element(C)\n unnumbered.remove(v)\n numbered = {v}\n clique_wanna_be = {v}\n while unnumbered:\n v = _max_cardinality_node(C, unnumbered, numbered)\n unnumbered.remove(v)\n numbered.add(v)\n new_clique_wanna_be = set(C.neighbors(v)) & numbered\n sg = C.subgraph(clique_wanna_be)\n if _is_complete_graph(sg):\n new_clique_wanna_be.add(v)\n if not new_clique_wanna_be >= clique_wanna_be:\n yield frozenset(clique_wanna_be)\n clique_wanna_be = new_clique_wanna_be\n else:\n raise nx.NetworkXError(\"Input graph is not chordal.\")\n yield frozenset(clique_wanna_be)", "def gen_maze_longer_paths():\n G = nx.grid_graph(DIM)\n tree = nx.Graph()\n old_node = choice(list(G))\n tree.add_node(old_node)\n all_neighbors = Setch(*G.neighbors(old_node))\n while tree.order() < G.order():\n neighbors = [node for node in G.neighbors(old_node) \\\n if node not in tree]\n try:\n new_node = choice(neighbors)\n neighbors.remove(new_node)\n except IndexError: # Dead-end\n new_node = all_neighbors.choose()\n nodes_in_tree, neighbors = [], []\n for node in G.neighbors(new_node):\n (nodes_in_tree if node in tree else neighbors).append(node)\n old_node = choice(nodes_in_tree)\n all_neighbors.remove(new_node)\n tree.add_edge(old_node, new_node)\n all_neighbors += neighbors\n old_node = new_node\n return tree", "def get_bottom_blocks(tower):\n g = nx.subgraph(tower.graph.copy(), np.arange(1, len(tower) + 1))\n ids = []\n for i in tower.ordered_blocks:\n if len(g.pred[i]) == 0:\n ids.append(i)\n return ids", "def complete_reverse_graph(gph):\n\n revgph = {n: set() for n in gph}\n for n, e in gph.items():\n for n2 in e:\n n2_edges = revgph.setdefault(n2, set())\n n2_edges.add(n)\n\n gph_missing_n = revgph.keys() - gph.keys()\n gph = {**{k: set(v) for k, v in gph.items()}, **{n: set() for n in gph_missing_n}}\n return gph, revgph", "def moralize(self):\n moral_graph = self.to_undirected()\n\n for node in super(DynamicBayesianNetwork, self).nodes():\n moral_graph.add_edges_from(combinations(self.get_parents(node), 2))\n\n return moral_graph", "def hedgehog_graph(nvertices):\n G = Graph(2*nvertices)\n for j in range(nvertices):\n G.add_edge(j, j+nvertices)\n G.add_edge(j, (j+1) % nvertices)\n return G", "def A(G):\n partition_list = []\n # for i in set(combinations(G.nodes, 2)) - set(G.edges): # set of tuples\n for i in set(nx.non_edges(G)):\n partition = []\n partition.append(set(i))\n for v in set(G.nodes) - set(i):\n partition.append({v})\n partition_list.append(partition)\n return partition_list", "def clique_graph(g):\r\n cg = networkx.MultiGraph()\r\n for v in nodes(g):\r\n cg.add_node(v, **g.node[v])\r\n for e in edges(g):\r\n if isinstance(e, Hyperedge):\r\n eh = e.h\r\n else:\r\n eh = e\r\n for u in eh:\r\n for v in eh:\r\n if v != u:\r\n cg.add_edge(u, v, **edge(g, e))\r\n return cg", "def make_path(n: int) -> Graph:\n assert(n >= 0)\n\n g = Graph(n)\n for i in range(n-1):\n g.add_edge(i, i+1)\n \n return g", "def make_complete_graph(N):\n G = nx.Graph()\n for i in range(N):\n for j in range(N):\n if i == j:\n pass\n else:\n if (i, j) in G.edges():\n pass\n else:\n G.add_edge(i, j)\n labels = {}\n for node in G.nodes():\n labels[node] = str(node)\n return G", "def generate_connected_K(nb):\n\n graph = nx.Graph()\n graph = nx.disjoint_union(graph, nx.complete_graph(nb))\n graph = nx.disjoint_union(graph, nx.complete_graph(nb))\n\n for i in range(nb - 2):\n graph.add_edge(i, nb + i)\n return graph", "def __create_graph(self):\n self.clear() \n self.__ordered_network()\n self.__create_new_random_connections()", "def HouseGraph():\n pos_dict = {0:(-1,0),1:(1,0),2:(-1,1),3:(1,1),4:(0,2)}\n edges = [(0, 1), (0, 2), (1, 3), (2, 3), (2, 4), (3, 4)]\n return graph.Graph(edges, pos=pos_dict, name=\"House Graph\")", "def build_graph(iterations):\n graph = BASE_GRAPH\n outer_list = BASE_OUTER_LIST\n next_node_number = len(graph)\n for i in range(iterations):\n graph_copy = copy.deepcopy(graph)\n outer_list_copy = list(outer_list)\n outer_list = []\n for j in range(4):\n new_graph = copy_graph(graph_copy, outer_list_copy, next_node_number)\n # add edges to and from 0 node\n for node in new_graph[1]:\n new_graph[0][node].add(0)\n graph[0].add(node)\n next_node_number += len(new_graph[0])\n graph.update(new_graph[0])\n outer_list.extend(new_graph[1])\n return graph", "def makeGraph(self):\n r = self.get_rows()\n c = self.get_cols()\n\n #first of all... initializing the knights and storing them as initial nodes of the graph\n for k in self._knights:\n kgt = self.setGraph().insertNode(k.get_position(), k)\n self._knights_nodes.append(kgt) #storing the list of knights' nodes\n #node with a knight: knight_position + knight_weight\n k.completeTour(r, c) #calculating the complete tour for every knight\n for knight in self._knights:\n for step in knight.getMoves():\n move_from = step[0]\n move_to = step[1]\n node = self.setGraph().insertNode(move_from)\n moveNode = self.setGraph().insertNode(move_to)\n self.setGraph().linkNode(node, moveNode)\n knight.refreshBuffer() #just to free some memory..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the contract edges differential with the underlying sum vector space.
def __init__(self, sum_vector_space): super(ContractEdgesD, self).__init__(sum_vector_space, ContractEdgesGO.generate_op_matrix_list(sum_vector_space))
[ "def __init__(self, sum_vector_space):\n super(DeleteEdgesD, self).__init__(sum_vector_space,\n DeleteEdgesGO.generate_op_matrix_list(sum_vector_space))", "def _init_distance_vector(self):\r\n for router in [self.sourceRouter]+list(self.neighbours.keys()):\r\n self.routingTable[router] = {}\r\n self.routingTable[router][router] = {}\r\n self.routingTable[router][router]['distance'] = 0\r\n self.routingTable[router][router]['nextHopRouter'] = router\r\n\r\n for neighbourRouter, routerAddress in self.neighbours.items():\r\n sourceDV = self.routingTable[self.sourceRouter]\r\n neighbourDV = self.routingTable[neighbourRouter]\r\n\r\n sourceDV[neighbourRouter] = {}\r\n sourceDV[neighbourRouter]['distance'] = routerAddress['link_cost']\r\n sourceDV[neighbourRouter]['nextHopRouter'] = neighbourRouter\r\n\r\n neighbourDV[self.sourceRouter] = {}\r\n neighbourDV[self.sourceRouter]['distance'] = routerAddress['link_cost']\r\n neighbourDV[self.sourceRouter]['nextHopRouter'] = self.sourceRouter", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorD_swiginit(self,_vnl_vectorPython.new_vnl_vectorD(*args))", "def dt_allocate(self): \n self.DT = Constrained_Delaunay_triangulation_2()", "def __init__(self, d=8.5):\n self.l = 0.\n self.b = 0.\n self.d = d\n self.r = np.zeros(3) # r unit vector in cartesian coordinates\n self.t = np.zeros(3) # theta unit vector in cartesian coordinates\n self.p = np.zeros(3) # phi unit vector in cartesian coordinates\n\n self.u1 = np.zeros(3) # new base vector 1\n self.u2 = np.zeros(3) # new base vector 2\n self.u3 = np.zeros(3) # new base vector 3\n return", "def newDihedralConstraint(self, **attrlinks):\n return DihedralConstraint(self, **attrlinks)", "def construct_d_vine(nodes):", "def __init__(self, *args):\n _vnl_vectorPython.vnl_vectorLD_swiginit(self,_vnl_vectorPython.new_vnl_vectorLD(*args))", "def contract_exchange_descriptors(desc):\n # desc[0:6] = rho_data\n # desc[6:7] = g0\n # desc[7:10] = g1\n # desc[10:15] = g2\n # desc[15] = g0-r^2\n # g1 order: x, y, z\n # g2 order: xy, yz, z^2, xz, x^2-y^2\n\n N = desc.shape[1]\n res = np.zeros((12,N))\n rho_data = desc[:6]\n\n rho, s, alpha, tau_w, tau_unif = get_dft_input2(desc[:6])\n sprefac = 2 * (3 * np.pi * np.pi)**(1.0/3)\n n43 = rho**(4.0/3)\n svec = desc[1:4] / (sprefac * n43 + 1e-16)\n\n res[0] = rho\n res[1] = s**2\n res[2] = alpha\n\n # other setup\n g0 = desc[6]\n g1 = desc[7:10]\n g2 = desc[10:15]\n\n # g1_norm and 1d dot product\n g1_norm = np.linalg.norm(g1, axis=0)**2\n dot1 = np.einsum('an,an->n', svec, g1)\n\n # Clebsch Gordan https://en.wikipedia.org/wiki/Table_of_Clebsch%E2%80%93Gordan_coefficients\n g2_norm = 0\n for i in range(5):\n g2_norm += g2[i] * g2[i]\n g2_norm /= np.sqrt(5)\n\n res[3] = g0\n res[4] = g1_norm\n res[5] = dot1\n res[6] = g2_norm\n\n sgc = contract21(g2, svec)\n sgg = contract21(g2, g1)\n\n res[7] = np.einsum('pn,pn->n', sgc, svec)\n res[8] = np.einsum('pn,pn->n', sgc, g1)\n res[9] = np.einsum('pn,pn->n', sgg, g1)\n\n res[10] = desc[15]\n res[11] = desc[16]\n\n # res\n # 0: rho\n # 1: s\n # 2: alpha\n # 3: g0\n # 4: norm(g1)**2\n # 5: g1 dot svec\n # 6: norm(g2)**2\n # 7: svec dot g2 dot svec\n # 8: g1 dot g2 dot svec\n # 9: g1 dot g2 dot g1\n # 10: g0-r^2\n # 11: g0-r^4\n return res", "def __init__(self, vectors):\n self._clusterAttribution = {}\n self._centers = {}\n self._vectors = vectors\n self._hasChanged = False\n self._boxDims = {}\n self._boxSpacing = 15", "def init(self, init_edge):\n self.t = 0\n self.tendrils.insert(init_edge, 1)\n self.status[init_edge] = BOUNDARY\n self.Z = 2", "def __init__(self, deg, even_edges):\n self.even_edges = even_edges\n self.sub_type = OrdinaryGraphComplex.sub_types.get(even_edges)\n super(VertexLoopDegSlice, self).__init__(\n [OrdinaryGraphComplex.OrdinaryGVS(v, deg - v, self.even_edges) for v in range(0, deg + 1)], deg)", "def __init__(self, unary, pairwise):\n super(Energy, self).__init__()\n\n self.unary = unary\n self.pairwise = pairwise", "def __init__(self, span, lhs, rhs, dot=0, bindings=None):\n if bindings is None: bindings = {}\n \n # If the edge is complete, then substitute in the bindings,\n # and then throw them away. (If we didn't throw them away, we\n # might think that 2 complete edges are different just because\n # they have different bindings, even though all bindings have\n # already been applied.)\n if dot == len(rhs) and bindings:\n lhs = self._bind(lhs, bindings)\n rhs = [self._bind(elt, bindings) for elt in rhs]\n bindings = {}\n\n # Initialize the edge.\n TreeEdge.__init__(self, span, lhs, rhs, dot)\n self._bindings = bindings", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoCreaseAngleElement_init(self, state)", "def __init__(self, eigvec, data, weights):\n self.eigvec = eigvec\n self.ncomp = eigvec.shape[0]\n \n self.set_data(data, weights)", "def _initialize_state_vector(self):\n np.random.seed(self.seed)\n self.initial_state = [0.0] * self.num_state_variables", "def _init_dynamics(self):\n pass", "def __init__(self):\n # Define vector displacement from COM to Sun\n self.r_s = np.array([-constants.MASS_JUPITER * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])\n\n # Define vector displacement from COM to Jupiter\n self.r_j = np.array([constants.MASS_SUN * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])", "def init_d2v(self):\n \n feather = self.dd['feather']\n lr = self.dd['lr']\n mid = self.dd['mid']\n\n # remove feather idx from lr\n body_lr = []\n for (l, r) in lr:\n if l in feather or r in feather:\n continue\n else:\n body_lr.append([l, r])\n\n # d2vert\n body_lr = torch.tensor(body_lr)\n mid = torch.tensor(mid)\n num_v = len(self.dd['V'])\n num_d = len(body_lr) + len(mid)\n\n d2vert = torch.zeros([num_v, num_d]).float()\n for i in range(len(body_lr)):\n ml, mr = body_lr[i]\n d2vert[ml, i] = 1\n d2vert[mr, i] = 1\n for i in range(len(mid)):\n m = mid[i]\n d2vert[m, i+len(body_lr)] = 1\n \n # d2vert_inv\n d2vert_inv = torch.zeros([num_v, num_d]).float()\n for i in range(len(body_lr)):\n ml, mr = body_lr[i]\n d2vert_inv[ml, i] = 1\n d2vert_inv[mr, i] = -1\n for i in range(len(mid)):\n m = mid[i]\n d2vert_inv[m, i+len(body_lr)] = 1\n\n\n d2v = torch.stack([d2vert_inv, d2vert])\n d2v = d2v[:, None, :, :]\n\n return d2v" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Await and return the message or None on timeout.
def waitForMsg(self): rospy.loginfo('Waiting for message...') if self.timeout is not None: timeout_time = rospy.Time.now() + rospy.Duration.from_sec(self.timeout) while self.timeout is None or rospy.Time.now() < timeout_time: self.mutex.acquire() if self.msg is not None: rospy.loginfo('Got message.') message = self.msg if not self.latch: self.msg = None self.mutex.release() return message self.mutex.release() if self.preempt_requested(): self.service_preempt() rospy.loginfo('waitForMsg is preempted!') return 'preempted' rospy.sleep(.1) # TODO: maybe convert ROSInterruptException into valid outcome rospy.loginfo('Timeout on waiting for message!') return None
[ "def recv(self, timeout=None):\n\n if message := self.recv_message(timeout):\n return message.payload\n return None", "async def send_command_and_await(self, message: str, timeout: int = None) -> DroneResponse:\n pass", "def recv_message(self, timeout=None):\n\n if self.subscriber.poll(timeout):\n channel, timestamp, payload = self.subscriber.recv_multipart()\n return Message(channel.decode(\"utf-8\"), msgpack.unpackb(timestamp), msgpack.unpackb(payload))\n return None", "def call(self, message, timeout=None):\n self.send(message.pack())\n return self.recv(timeout=timeout)", "def wait_for_message(self):\n self._mutex.acquire()\n return self.topic_data", "def receive_message(self):\n # TODO - figure out best way to add blocking/nonblocking\n # For now - only non-blocking version which returns None\n msg = self._inc_mq.get_message()\n if msg:\n msg = msg.payload, msg.address[0], msg.address[1]\n return msg", "def get(self, timeout=None):\n\n try:\n res = self._q.get(timeout=timeout)\n except Queue.Empty:\n raise multiprocessing.TimeoutError(\"Timed out\")\n\n if isinstance(res, Exception):\n raise res\n return res", "async def async_receive(self):\n return await self.input_queue.get()", "def wait(self, timeout=None):\n if hasattr(self, '_result'):\n return\n try:\n self.get(timeout)\n except Exception:\n pass", "async def get_private_text_input(self, timeout: int) -> typing.Optional[str]:\n\n try:\n text = await self.bot.wait_for('message',\n check=self.bot.checks.wait_for_message_check(self.ctx),\n timeout=timeout)\n except asyncio.TimeoutError:\n await self.ctx.send(\":zzz: You took too long to reply.\")\n return None\n\n if not text.content:\n await self.ctx.send(\":x: You didn't reply with text.\")\n return None\n\n else:\n try:\n await text.delete()\n except Exception:\n pass\n\n return text.content", "def read(self, timeout_sec=None):\n # try:\n # return self._queue.get(timeout=timeout_sec)\n # except Queue.Empty:\n # # Timeout exceeded, return None to signify no data received.\n # return None", "def wait(self, timeout=None):\n\n try:\n res = self._q.get(timeout=timeout)\n except Queue.Empty:\n pass\n else:\n self._q.put(res)", "async def wait_for_response(self) -> discord.Message:\n\n def check(msg: discord.Message):\n if isinstance(msg.channel, discord.DMChannel):\n # Accept any message, because we are in a DM.\n return True\n return msg.channel == self.channel and msg.author == self.author\n\n return await self.bot.wait_for(\"message\", check=check)", "def result(self, message, timeout=None):\n if not isinstance(message, Message):\n message = Message.from_dict(self, message)\n self.logger.debug('result_message', extra=message.to_dict())\n return self.unit.result(message, timeout)", "def read(self, source, timeout=1):\n q = self._recv_queue[source]\n try:\n value = q.get(timeout=timeout)\n except queue.Empty:\n return None\n q.task_done()\n return value", "async def _get_response(self) -> str:\n resp = await self.ser.read_until_async(expected=b\";\", size=100)\n log.debug(\" got %s\", resp)\n if resp is None:\n raise TimeoutError(\"Timed out in _get_response\")\n return resp.decode()", "def get_message(inner_self, block=True, timeout=0.1, get_partition_info=None):\n messages = inner_self.get_messages(\n count=1,\n block=block,\n timeout=timeout\n )\n message = messages[0] if messages else None\n\n if get_partition_info or (get_partition_info is None and inner_self._partition_info):\n fake_partition_info = 0\n else:\n fake_partition_info = None\n\n if fake_partition_info is not None and message is not None:\n return fake_partition_info, message\n else:\n return message", "def wait_for_response(self, target_id):\n try:\n out = ''\n while self.is_connected or self.connect():\n try:\n msg = self.sock.recv(1024)\n if self.debug:\n print (msg) # pragma: no cover\n except socket.timeout: # pragma: no cover\n print (\"socket timed out\") # pragma: no cover\n self.is_connected = False # pragma: no cover\n continue # pragma: no cover\n except socket.error: # pragma: no cover\n traceback.print_exc(file=sys.stdout) # pragma: no cover\n raise # pragma: no cover\n\n out += msg\n if msg == '':\n self.is_connected = False # pragma: no cover\n\n # get the list of messages by splitting on newline\n raw_messages = out.split(\"\\n\")\n\n # the last one isn't complete\n out = raw_messages.pop()\n for raw_message in raw_messages:\n message = json.loads(raw_message)\n\n id = message.get('id')\n error = message.get('error')\n result = message.get('result')\n\n if id == target_id:\n if error:\n raise Exception( # pragma: no cover\n \"received error %s\" % message) # pragma: no cover\n else:\n return result\n else:\n # just print it for now\n print (message) # pragma: no cover\n except: # pragma: no cover\n traceback.print_exc(file=sys.stdout) # pragma: no cover\n\n self.is_connected = False # pragma: no cover", "def wait_for_message_start(self):\n\n byte_read = None\n while byte_read != MSG_START:\n byte_read = self._read1()\n if byte_read == '':\n # Timeout\n return None\n self.logger.debug_verbose(\"Wait for message start, byte_read=%r\" % byte_read)\n if byte_read in CTRL_CHARS:\n self.control_char_cb(byte_read)\n # Discard the unrecognized character\n\n return MSG_START", "async def wait_for_plm_command(plm, cmd, loop):\n try:\n with async_timeout.timeout(10, loop=loop):\n while not plm.transport.lastmessage == cmd.hex:\n await asyncio.sleep(.1, loop=loop)\n _LOGGER.info('Expected message sent %s', cmd)\n return True\n except asyncio.TimeoutError:\n _LOGGER.error('Expected message not sent %s', cmd)\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Formats the message for launchalert (doesn't include mention)
async def launchalertformatter(launch): launchtime_tz = launch.net utc = datetime.now(timezone.utc) T_minus = chop_microseconds(launchtime_tz - utc) T_plus = timedelta(0) T = T_minus if T_minus < timedelta(0): T_plus = chop_microseconds(utc - launchtime_tz) T = T_plus T_str = "T+ {0}".format(T_plus) else: T_str = "T- {0}".format(T_minus) launchname = launch.name tz = launchtime_tz.tzname() launchtime = launchtime_tz.replace(tzinfo=None) probability = launch.probability launchstatus = launch.get_status() if probability == -1: probabilitystr = " not available" else: probabilitystr = '{0}%'.format(probability) msg = '' msg += '**__{0}__**\nNET {1} {2}\nWeather probability: {3}\n{4}\nStatus: {5}\n' msg = msg.format(launchname, launchtime, tz, probabilitystr, T_str, launchstatus.description) for formatter in (description, videourl): msg = formatter(msg, launch) return msg
[ "def get_message_alert(alert):\n measurement = get_measurement_from_rule(alert.alert)\n return \"At site %s \\n %s is %s %s%s\" % (alert.site, get_measurement_verbose_name(measurement),\n alert.alert.get_operator_display(), alert.alert.value,\n get_measurement_unit(measurement))", "def format_message(self):\n return ''.join(self.format_message_parts())", "def format_alert(alert: dict) -> dict:\n return {\n 'name': alert.get('name'),\n 'kind': alert.get('kind'),\n **alert.get('properties', {})\n }", "def get_slack_alert_msg(subject, alert):\n msg = ''\n data_point, value = get_alert_check_value(alert.alert)\n\n msg += subject\n msg += '\\n'\n msg += 'rule: ' + str(alert.alert)\n msg += '\\n'\n msg += 'found: ' + str(value)\n msg += '\\n'\n msg += 'At site: ' + str(alert.site)\n\n return msg", "def event_message() -> str:\n\n return \"Automation for the people!\"", "def format(self, event):\n return self.getMessage(event)", "def create_notification_line(msg):\n local_time = util.format_date(msg[\"time\"])\n message_line = click.style(\"{} : {} from {}\\n\".format(local_time, msg[\"type\"],\n msg[\"from\"]),\n fg=\"cyan\")\n message_line += \"{}\\n\".format(msg[\"content\"])\n return message_line", "def respond_to_shelter_alert():\n \n response = MessagingResponse()\n\n # Based on users response, message is returned\n inbound_message = request.form.get(\"Body\")\n\n # Respond to the user \n if inbound_message == \"Yes\":\n response.message(\"Contact us for an appointment.\")\n else:\n response.message(\"Check PAWS Finder for updates.\")\n\n return str(response)", "def format_message(self, op_name) -> str: # type: ignore[override]\n return self.message_default_template.format(op_name=op_name)", "def message(self): \n if self.content != '':\n return self.content\n else:\n return self.get_notification_type_display()", "def format_msg(msg):\n return re.sub(\"\\n\\s+\", \" \", msg)", "def alert(self, msg, title=u'Alert'):\n self._msgBox(str(msg), title, MESSAGEBOX, MSG_BUTTONS.BUTTONS_OK)", "def confirm_alert(self, kind, alert_content: dict, message: Message):\n alert_time = alert_content.get(\"alert_time\")\n name = alert_content.get(\"name\")\n file = alert_content.get(\"audio_file\")\n repeat = alert_content.get(\"repeat_frequency\", alert_content.get(\"repeat_days\"))\n final = alert_content.get(\"end_repeat\")\n script = alert_content.get(\"script_filename\")\n priority = alert_content.get(\"priority\")\n utterance = message.data.get(\"utterance\")\n\n # No Time Extracted\n if not alert_time:\n if kind == 'timer':\n self.speak_dialog('ErrorHowLong', private=True)\n else:\n self.speak_dialog(\"ErrorNoTime\", {\"kind\": kind}, private=True)\n # TODO: Get time and create event DM\n return\n # This shouldn't be possible...\n if not alert_time.tzinfo:\n LOG.warning(f\"Alert without tzinfo! {alert_time}\")\n if self.server:\n hint = \"Please make sure your location is set in your profile and try again\"\n else:\n hint = \"Please tell me your location and try again\"\n self.speak_dialog(\"ErrorScheduling\", {\"kind\": kind, \"hint\": hint}, private=True)\n return\n\n # LOG.debug(\">>>>>\" + str(alert_time))\n spoken_time_remaining = self._get_spoken_time_remaining(alert_time, message)\n spoken_alert_time = nice_time(alert_time, use_24hour=self.preference_unit(message)['time'] == 24)\n\n if isinstance(repeat, list):\n repeat = [int(r) for r in repeat]\n\n data = {'user': self.get_utterance_user(message),\n 'name': name,\n 'time': str(alert_time),\n 'kind': kind,\n 'file': file,\n 'script': script,\n 'priority': priority,\n 'repeat': repeat,\n 'final': str(final),\n 'utterance': utterance,\n 'context': message.context}\n\n self._write_event_to_schedule(data)\n\n if self.request_from_mobile(message):\n self._create_mobile_alert(kind, data, message)\n return\n if kind == \"timer\":\n self.speak_dialog('ConfirmTimer', {'duration': spoken_time_remaining}, private=True)\n if self.gui_enabled:\n self._display_timer_status(name, alert_time)\n return\n if not repeat:\n if data['file']:\n self.speak_dialog(\"ConfirmPlayback\", {'name': name,\n 'time': spoken_alert_time,\n 'duration': spoken_time_remaining}, private=True)\n elif data.get('script'):\n self.speak_dialog(\"ConfirmScript\", {'name': name,\n 'time': spoken_alert_time,\n 'duration': spoken_time_remaining}, private=True)\n else:\n self.speak_dialog('ConfirmSet', {'kind': kind,\n 'time': spoken_alert_time,\n 'duration': spoken_time_remaining}, private=True)\n else:\n if isinstance(repeat, int):\n repeat_interval = \"every \"\n repeat_interval += self._get_spoken_time_remaining(datetime.now(self._get_user_tz(message)) +\n timedelta(seconds=repeat), message)\n elif len(repeat) == 7:\n repeat_interval = 'every day'\n else:\n repeat_interval = \"every \" + \", \".join([WEEKDAY_NAMES[day] for day in repeat])\n if data['file']:\n self.speak_dialog('RecurringPlayback', {'name': name,\n 'time': spoken_alert_time,\n 'days': repeat_interval}, private=True)\n elif data.get('script'):\n self.speak_dialog('RecurringPlayback', {'name': name,\n 'time': spoken_alert_time,\n 'days': repeat_interval}, private=True)\n else:\n self.speak_dialog('ConfirmRecurring', {'kind': kind,\n 'time': spoken_alert_time,\n 'days': repeat_interval}, private=True)", "def send_notification():\n\n title = \"Sucessfully Joined Zoom Meeting!\"\n message = f'The following zoom meeting password is copied to your clipboard: {zoom_details[PASSWORD]}'\n command = f'''\n osascript -e 'display notification \"{message}\" with title \"{title}\"'\n '''\n os.system(command)", "def _get_formatted_message(label, context):\r\n current_language = get_language()\r\n\r\n # Setting the environment to the default language\r\n activate(settings.LANGUAGE_CODE)\r\n\r\n c = Context(context)\r\n template = 'notification/%s/notice.html' % label\r\n try:\r\n msg = loader.get_template(template).render(c)\r\n except TemplateDoesNotExist:\r\n logger.error(\"Template '%s' doesn't exist.\" % template)\r\n msg = None\r\n\r\n # Reset environment to original language\r\n activate(current_language)\r\n\r\n return msg", "def alert():\n showinfo(\"A propos\", \"Jeu crée par Sanjeevan et Enrick\\n\\nProjet M1106 - Année 2019/2020\")", "def display(self, message: str, scope: str = None, action: str = None):\n level = \"info\"\n if self.code.startswith(\"E\"):\n level = \"error\"\n elif self.code.startswith(\"W\"):\n level = \"warning\"\n\n return _etl_message(level,\n scope or self.scope,\n action or self.action,\n self.code,\n message)", "def _construct_msg(self) -> str:\n return '\\n'.join([\n self._formatted_filename(), self._err_description()])", "def alertdialog(self, title, description):\n\t\treturn self._message({'type':'alertdialog', 'title':title, 'description':description})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells information about next launch. n Notifies launch notify group. id Includes launch ID. d Includes mission description. v Includes video URL.
async def nextlaunch(self, ctx, *args): if not can_answer(ctx): return launches = launchlibrary.Launch.next(api, 1) if launches: launch = launches[0] launchname = launch.name launchtime_tz = launch.net utc = datetime.now(timezone.utc) tz = launchtime_tz.tzname() T = chop_microseconds(launchtime_tz - utc) launchtime = launchtime_tz.replace(tzinfo=None) probability = launch.probability if probability == -1: probabilitystr = "not available" else: probabilitystr = '{0}%'.format(probability) msg = '' if '-n' in args: if can_notify: msg = notify(msg, ctx) else: msg = "Notifying disabled. " msg += '**__{0}__**\nNET {1} {2}\nWeather probability: {3}\nT- {4}\n' msg = msg.format(launchname, launchtime, tz, probabilitystr, T) for arg, formatter in (('-id', id), ('-d', description), ('-v', videourl)): if arg in args: msg = formatter(msg, launch) await send(ctx, msg, args)
[ "async def launchbyid(self, ctx, *args):\n if not can_answer(ctx):\n return\n launchid = False\n for arg in args:\n if str(arg).isdigit():\n launchid = int(arg)\n if launchid:\n launch = launchlibrary.Launch.fetch(api, id=launchid)[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No ID provided.\"\n await send(ctx, msg, args)", "async def launchbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n launches = launchlibrary.Launch.fetch(api, name=name)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No launch found with name provided.\"\n await send(ctx, msg, args)", "def start_launch(self, context, **kwargs):\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerunOf=self._cfg.rerun_of,\n **kwargs\n )\n if not self._skip_analytics:\n send_event(self.agent_name, self.agent_version)", "async def spacex(self, context):\n url = \"https://api.spacexdata.com/v4/launches/next\"\n async with aiohttp.ClientSession() as session:\n raw_response = await session.get(url)\n response = await raw_response.text()\n response = json.loads(response)\n launchpadurl = f\"https://api.spacexdata.com/v4/launchpads/{response['launchpad']}\"\n raw_response = await session.get(launchpadurl)\n launchpadresponse = await raw_response.text()\n launchpadresponse = json.loads(launchpadresponse)\n launchtime = response['date_unix']\n launchtime = datetime.fromtimestamp(launchtime, tz.UTC)\n now = datetime.now(tz=tz.tzutc())\n countdown = relativedelta.relativedelta(launchtime, now)\n launchtime = launchtime.strftime(\"%Y-%m-%d %H:%M:%S UTC\")\n cd = \"L- \"\n if countdown.days > 0:\n cd += f\"{countdown.days} days, \"\n if countdown.hours > 0:\n cd += f\"{countdown.hours} hours, \\n\"\n if countdown.minutes > 0:\n cd += f\"{countdown.minutes} mins, \"\n cd += f\"{countdown.seconds} secs\"\n embed = discord.Embed(\n title=\"Next SpaceX launch:\",\n color=0x00FF00\n )\n embed.add_field(name=\"Name:\", value=f\"{response['name']}\", inline=False)\n if not str(response['links']['patch']['small']).startswith(\"https\"):\n embed.set_thumbnail(url=\"https://cdn.iconscout.com/icon/free/png-256/spacex-282142.png\")\n else:\n embed.set_thumbnail(url=response['links']['patch']['small'])\n if str(response['links']['wikipedia']).startswith(\"https\"):\n embed.add_field(name=\"Wikipedia:\", value=f\"[{response['name']} page]({response['links']['wikipedia']})\", inline=False)\n embed.add_field(name=\"Launch time:\", value=launchtime, inline=True)\n embed.add_field(name=\"Launches in:\", value=cd, inline=True)\n embed.add_field(name=\"Launches From:\", value=f\"{launchpadresponse['full_name']}, {launchpadresponse['region']}\", inline=False)\n embed.add_field(name=\"Details:\", value=response['details'])\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n webhook = await context.channel.create_webhook(name=\"lidstuff\")\n await webhook.send(embed=embed, username=context.message.author.display_name, avatar_url=context.message.author.avatar_url)\n await webhook.delete()\n await context.message.delete()\n else:\n await context.send(embed=embed)", "def get_launch_description(self, idx):\n return self.results[idx][\"mission\"][\"description\"]", "def onNewEpisode(self):\n self.consoleMsg('---> Starting Episode {}/{} <---'.format(self.currentEpisode,self.nEpisodes),topBorder=True)\n if self.resetEnvOnNewEpisode: self.envReset()\n self.done = False\n self.episodeStartTime = time.time()\n self.onNewEpisode_user()", "def announceTestStart(self, test, number, result=None):", "def launch(pipeline, id, revision, command_only, params_in, params_out, save_all, show_hidden, url):\n from nf_core.launch import Launch\n\n launcher = Launch(pipeline, revision, command_only, params_in, params_out, save_all, show_hidden, url, id)\n if not launcher.launch_pipeline():\n sys.exit(1)", "def go_for_launch(self, go_for_launch):\n\n\n self._go_for_launch = go_for_launch", "def propose_experiment_paused(self, it):\n if self.verbose:\n print 'step {0:s}'.format(str(it).zfill(len(str(self.max_it))))\n #print '\\t> training surrogates'\n #self.train_surrogates()\n # Are we drawing new design points or not?\n if isinstance(self.X_design, int):\n num_design = self.X_design\n X_design = design.latin_center(num_design, self.num_dim)\n else:\n X_design = self.X_design\n if self.verbose:\n print '\\t> done'\n print '\\t> computing expected improvement'\n ei = self.compute_expected_improvement(X_design)\n if self.verbose:\n print '\\t> done'\n i = np.argmax(ei)\n ei_max = ei[i]\n self.ei_values.append(ei_max)\n rel_ei_max = ei_max / self.ei_values[0]\n if self.verbose:\n print '\\t> rel_ei_max = {0:1.3f}'.format(rel_ei_max)\n if it >= self.add_at_least and rel_ei_max < self.rtol:\n if self.verbose:\n print '*** Converged (rel_ei_max = {0:1.7f} < rtol = {1:1.2e})'.format(rel_ei_max, self.rtol)\n print '\\t> writing final status'\n self.plot_status(it,final=True)\n return\n if self.verbose:\n print '\\t> adding design point', i\n print '\\t> X_d[i, :]', X_design[i, :]\n print '\\t> starting simulation'\n #print self.Y_pareto\n k = self.active_cells\n #for k in k:\n #print k\n lplus = self.active_cells_lplus\n #for lplus in lplus:\n #print lplus\n #y = self.obj_funcs(X_design[i,:])\n #print \"Run the experiment/code at the following design\"+str(X_design[i,:])\n self.response = \"Run the experiment/code at the following design\"+str(X_design[i,:])\n self.X_design_paused = X_design\n self.i_paused = i", "def get_launch(self, launch_id, count=1):\n assert count == 1\n record = self.launches.get(launch_id)\n if record:\n ret = json.loads(record)\n else:\n ret = None\n return defer.succeed(ret)", "def get_launch_name(self, idx):\n return self.results[idx][\"name\"]", "def execute_launch(self):\n self.ignition()\n self.ascent()\n self.setup_circularization()\n return", "def generate_launch_description():\n\n robot_parameters_file = os.path.join(\n get_package_share_directory('pr_bringup'),\n 'config',\n 'pr_config_params.yaml'\n )\n\n controller_params_file = os.path.join(\n get_package_share_directory('pr_bringup'),\n 'config',\n 'pr_pdg.yaml'\n )\n\n mocap_config = os.path.join(\n get_package_share_directory('pr_bringup'),\n 'config',\n 'mocap_server.yaml'\n )\n\n robot_yaml_file = open(robot_parameters_file)\n pr_params = yaml.load(robot_yaml_file)\n\n controller_yaml_file = open(controller_params_file)\n controller_params = yaml.load(controller_yaml_file)\n\n mocap_yaml_file = open(mocap_config)\n mocap_params = yaml.load(mocap_yaml_file)\n\n robot = controller_params['robot']['robot_name']\n robot_config = controller_params['robot']['config'] \n\n pr_config_params = pr_params[robot]['config'][robot_config]\n pr_physical_properties = pr_params[robot]['physical_properties']\n\n ref_file_q = controller_params['ref_path']['q']\n ref_file_x = controller_params['ref_path']['x']\n\n with open(ref_file_q, 'r') as f:\n first_reference_q = fromstring(f.readline(), dtype=float, sep=\" \").tolist()\n \n with open(ref_file_x, 'r') as f:\n first_reference_x = fromstring(f.readline(), dtype=float, sep=\" \").tolist()\n\n pr_pdg = ComposableNodeContainer(\n node_name='pr_container',\n node_namespace='',\n package='rclcpp_components',\n node_executable='component_container',\n composable_node_descriptions=[\n ComposableNode(\n package='pr_sensors_actuators',\n node_plugin='pr_sensors_actuators::Motor',\n node_name='motor_0',\n remappings=[\n (\"control_action\", \"control_action\"),\n (\"end_flag\", \"end_flag\")\n ],\n parameters=[\n {\"vp_conversion\": controller_params['actuators']['vp_conversion'][0]},\n {\"max_v\": controller_params['actuators']['v_sat']}\n ]\n ),\n ComposableNode(\n package='pr_sensors_actuators',\n node_plugin='pr_sensors_actuators::Motor',\n node_name='motor_1',\n remappings=[\n (\"control_action\", \"control_action\"),\n (\"end_flag\", \"end_flag\")\n ],\n parameters=[\n {\"vp_conversion\": controller_params['actuators']['vp_conversion'][1]},\n {\"max_v\": controller_params['actuators']['v_sat']}\n ]\n ),\n ComposableNode(\n package='pr_sensors_actuators',\n node_plugin='pr_sensors_actuators::Motor',\n node_name='motor_2',\n remappings=[\n (\"control_action\", \"control_action\"),\n (\"end_flag\", \"end_flag\")\n ],\n parameters=[\n {\"vp_conversion\": controller_params['actuators']['vp_conversion'][2]},\n {\"max_v\": controller_params['actuators']['v_sat']}\n ]\n ),\n ComposableNode(\n package='pr_sensors_actuators',\n node_plugin='pr_sensors_actuators::Motor',\n node_name='motor_3',\n remappings=[\n (\"control_action\", \"control_action\"),\n (\"end_flag\", \"end_flag\")\n ],\n parameters=[\n {\"vp_conversion\": controller_params['actuators']['vp_conversion'][3]},\n {\"max_v\": controller_params['actuators']['v_sat']}\n ]\n ),\n ComposableNode(\n package='pr_aux',\n node_plugin='pr_aux::Derivator',\n node_name='derivator',\n remappings=[\n (\"joint_position\", \"joint_position\"),\n (\"joint_velocity\", \"joint_velocity\")\n ],\n parameters=[\n {\"initial_value\": first_reference_q},\n {\"ts\": controller_params['ts']}\n ]\n ),\n ComposableNode(\n package='pr_ref_gen',\n node_plugin='pr_ref_gen::RefPose',\n node_name='ref_pose_gen',\n remappings=[\n (\"ref_pose\", \"ref_pose\"),\n (\"end_flag\", \"end_flag\"),\n (\"joint_position\", \"joint_position\")\n ],\n parameters=[\n {\"ref_path\": ref_file_q},\n {\"is_cart\": False},\n {\"robot_config_params\": pr_config_params}\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::ForwardKinematics',\n node_name='for_kin',\n remappings=[\n (\"joint_position\", \"joint_position\"),\n (\"x_coord\", \"x_coord\"),\n ],\n parameters=[\n {\"robot_config_params\": pr_config_params},\n {\"initial_position\": first_reference_x},\n {\"tol\": controller_params['dir_kin']['tol']},\n {\"iter\": controller_params['dir_kin']['iter']},\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::InverseKinematics',\n node_name='inv_kin',\n remappings=[\n (\"x_coord\", \"x_coord\"),\n (\"q_sol\", \"q_sol\"),\n ],\n parameters=[\n {\"robot_config_params\": pr_config_params},\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::IndependentJacobian',\n node_name='ind_jac',\n remappings=[\n (\"q_sol\", \"q_sol\"),\n (\"ind_jac\", \"ind_jac\"),\n ],\n parameters=[\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::DependentJacobian',\n node_name='dep_jac',\n remappings=[\n (\"x_coord\", \"x_coord\"),\n (\"q_sol\", \"q_sol\"),\n (\"dep_jac\", \"dep_jac\")\n ],\n parameters=[\n {\"robot_config_params\": pr_config_params}\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::RastT',\n node_name='rast_t',\n remappings=[\n (\"dep_jac\", \"dep_jac\"),\n (\"ind_jac\", \"ind_jac\"),\n (\"rast_t\", \"rast_t\")\n ],\n parameters=[\n ]\n ),\n\n ComposableNode(\n package='pr_modelling',\n node_plugin='pr_modelling::QGrav',\n node_name='q_grav',\n remappings=[\n (\"x_coord\", \"x_coord\"),\n (\"q_sol\", \"q_sol\"),\n (\"rast_t\", \"rast_t\")\n ],\n parameters=[\n {\"p11\": pr_physical_properties['p11']},\n {\"p12\": pr_physical_properties['p12']},\n {\"p21\": pr_physical_properties['p21']},\n {\"p22\": pr_physical_properties['p22']},\n {\"p31\": pr_physical_properties['p31']},\n {\"p32\": pr_physical_properties['p32']},\n {\"p41\": pr_physical_properties['p41']},\n {\"p42\": pr_physical_properties['p42']},\n {\"pm\": pr_physical_properties['pm']},\n ]\n ),\n\n ComposableNode(\n package='pr_controllers',\n node_plugin='pr_controllers::PDGController',\n node_name='controller',\n remappings=[\n (\"ref_pose\", \"ref_pose\"),\n (\"joint_position\", \"joint_position\"),\n (\"joint_velocity\", \"joint_velocity\"),\n (\"q_grav\", \"q_grav\")\n ],\n parameters=[\n {\"kp_gain\": controller_params['controller']['kp']},\n {\"kv_gain\": controller_params['controller']['kv']},\n ]\n ),\n \n ComposableNode(\n package='pr_sensors_actuators',\n node_plugin='pr_sensors_actuators::Encoders',\n node_name='position_sensors',\n remappings=[\n (\"joint_position\", \"joint_position\")\n ],\n parameters=[\n {\"ts_ms\": controller_params['ts']*1000},\n {\"initial_position\": first_reference_q}\n ]\n ), \n ComposableNode(\n package='pr_mocap',\n node_plugin='pr_mocap::PRXMocap',\n node_name='mocap',\n remappings=[\n (\"x_coord_mocap\", \"x_coord_mocap\")\n ],\n parameters=[\n {\"server_address\": mocap_params[\"server_address\"]},\n {\"server_command_port\": mocap_params[\"server_command_port\"]},\n {\"server_data_port\": mocap_params[\"server_data_port\"]},\n {\"marker_names\": mocap_params[\"marker_names\"][robot]},\n {\"robot_5p\": robot==\"robot_5p\"},\n ]\n ),\n ComposableNode(\n package='pr_mocap',\n node_plugin='pr_mocap::ErrorModel',\n node_name='model_error',\n remappings=[\n (\"x_mocap_error\", \"x_mocap_error\")\n ],\n parameters=[\n {\"tol\": 0.01}\n ]\n ), \n ],\n output='screen',\n )\n\n return launch.LaunchDescription([pr_pdg])", "def launch_next(self, task=None, result=None):\n if task:\n next_task = self.next(task)\n if next_task:\n # noinspection PyUnresolvedReferences\n return next_task.send(result=result)\n else:\n return self.set_status(task.status, result)\n elif len(self.tasks) > 0:\n return self.tasks[0].send(result=result)\n else:\n return Result(retcode=1, stderr=\"Nothing to do, empty operation !\")", "def new_episode(self):\n pass", "async def listbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n num = 5\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n for arg in args:\n if arg[1:].isdigit() and arg.startswith('-'):\n num = int(arg[1:])\n launches = launchlibrary.Launch.fetch(api, name=name)\n msg = discord.Embed(title=\"Listing launches found with {0}:\\n\".format(name))\n if launches:\n for launch in launches[:num]:\n net = launch.net\n value = \"Date: {0}\".format(net.date())\n if net.time() != datetime(2000, 1, 1, 0).time(): # check if time is set to 0\n value += \", Time: {0}\".format(net.time())\n if \"-s\" in args:\n value += \", Status: {0}\".format(launch.get_status().name)\n if \"-id\" in args:\n value += \", ID: {0}\".format(launch.id)\n msg.add_field(name=launch.name, value=value, inline=False)\n await ctx.send(embed=msg)\n else:\n msg = \"No launches found with provided name.\"\n await send(ctx, msg, args)", "def next_activity(self, id):\n assert id in self.activities()\n A = self.activitylist()\n k = [k for (k,a) in enumerate(A) if a.id() == id][0]\n return A[k+1] if k<len(A)-1 else None", "def runEpisode(self):\n self.mainUpdate()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Enables launch alerts until next shutdown. Only authorities can use this. [int] Minutes before launch to alert. (default = 15, max = 99)
async def launchalert(self, ctx, alerttime='15'): author = ctx.author if author.guild_permissions.administrator or author.id in authorities: await ctx.send("Command currently disabled") # if len(alerttime) < 2: # alerttime = int(alerttime) # msg = "Launch alerts enabled. Alerts at T- {0}minutes".format(alerttime) # await ctx.send(msg) # else: # await ctx.send("You sure would like to know early.")
[ "def do_something_every_hour():\n sleep(5)", "def on_enable_notifier(self, widget):\n try:\n if widget.get_active():\n subprocess.run([\"systemctl\", \"--user\", \"enable\", \"mintupdate-automation-notifier.timer\"])\n else:\n subprocess.run([\"systemctl\", \"--user\", \"disable\", \"mintupdate-automation-notifier.timer\"])\n except:\n self.application.logger.write(f\"Exception in on_enable_notifier:\\n{traceback.format_exc()}\")", "def catch_alarm():\n comm_time_to_call_heart_beat = True", "def manage():\n\t\tif Awake.refreshCounter[0] % 10 == 0:\n\t\t\tif Awake.config.isChanged():\n\t\t\t\tAwake.config.load()\n\t\tAwake.refreshCounter[0] += 1\n\n\t\tif Awake.config.activated:\n\t\t\tAwake.awakeCounter[0] -= 1\n\t\t\tif Awake.awakeCounter[0] <= 0:\n\t\t\t\tuseful.syslog(\"Sleep %d s\"%Awake.config.sleepDuration)\n\n\t\t\t\t# Set the wake up on PIR detection\n\t\t\t\tAwake.setPinWakeUp()\n\t\t\t\tmachine.deepsleep(Awake.config.sleepDuration*1000)", "def _thread_sleep(self) -> None:\n local_jm_interval = 2\n if isinstance(self._launcher, (LocalLauncher)):\n time.sleep(local_jm_interval)\n else:\n time.sleep(CONFIG.jm_interval)", "def setcheckinterval(interval):\n\tpass", "def TriggerEnableScheduledDailyActivities(self):\n pass", "def _cron(self):\n while True:\n self.check_update()\n sleep(60)", "def set_SleepTimeEnabled(self, value):\n super(UpdateAccountSettingsInputSet, self)._set_input('SleepTimeEnabled', value)", "def ramp_up_minutes_to_wait_on_logoff(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"ramp_up_minutes_to_wait_on_logoff\")", "def wait_for_alarm_start(raiden_apps):\n while True:\n if len([\n True for app in raiden_apps\n if app.raiden.alarm.last_block_number is None\n ]) == 0:\n return\n gevent.sleep(0.5)", "def once_per_hour():\n log.info('2 minutes passed')", "def set_other_update_automation_level(self, widget):\n index = widget.get_active()\n model = self.combobox_other_updates.get_model()\n # the second column is the update interval days\n days = model[index][1]\n self.settings.set_int(\"regular-auto-launch-interval\", days)", "def cron_hourly(self):\n self.poll() # Performs a /status/scan\n self.show_messages() # Display any messages (in real client this could be handled various ways\n self.expire_data()", "def logSleep(self, timeSlept):\n self.slept = timeSlept/3600", "def wake_up(self):\n self.send_action(\"wake_up\")", "def ramp_up_minutes_to_wait_on_logoff(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"ramp_up_minutes_to_wait_on_logoff\")", "def setStartupMessage():\n pass", "def activate_powerup(self, instance, value):\n\n # Unschedule remove powerup so that full length of additional pellets is experienced\n Clock.unschedule(self.__remove_powerup)\n Clock.unschedule(self.__indicate_powerup_end)\n self.game.sounds['power_up'].play()\n self.powered_up = True\n Clock.schedule_once(self.__remove_powerup, self.game.powerup_length)\n Clock.schedule_once(self.__indicate_powerup_end, self.game.powerup_length - POWERUP_END_WARNING_TIME)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells information about launch with provided ID. [int] ID of the launch. r Includes holdreason and failreason v Includes video URL. d Includes mission description.
async def launchbyid(self, ctx, *args): if not can_answer(ctx): return launchid = False for arg in args: if str(arg).isdigit(): launchid = int(arg) if launchid: launch = launchlibrary.Launch.fetch(api, id=launchid)[0] launchname = launch.name launchstatus = launch.get_status().description launchtime_tz = launch.net tz = launchtime_tz.tzname() launchtime = launchtime_tz.replace(tzinfo=None) msg = '**__{0}__**\n{1}\nNET {2} {3}\n' msg = msg.format(launchname, launchstatus, launchtime, tz) for arg, formatter in (('-r', reasons), ('-d', description), ('-v', videourl)): if arg in args: msg = formatter(msg, launch) else: msg = "No ID provided." await send(ctx, msg, args)
[ "def get_launch(self, launch_id, count=1):\n assert count == 1\n record = self.launches.get(launch_id)\n if record:\n ret = json.loads(record)\n else:\n ret = None\n return defer.succeed(ret)", "def get_launch(self, launch_id, count=1):\n return self._get_record(launch_id, self._launch_column_family, count)", "async def launchbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n launches = launchlibrary.Launch.fetch(api, name=name)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No launch found with name provided.\"\n await send(ctx, msg, args)", "async def nextlaunch(self, ctx, *args):\n if not can_answer(ctx):\n return\n launches = launchlibrary.Launch.next(api, 1)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchtime_tz = launch.net\n utc = datetime.now(timezone.utc)\n tz = launchtime_tz.tzname()\n T = chop_microseconds(launchtime_tz - utc)\n launchtime = launchtime_tz.replace(tzinfo=None)\n probability = launch.probability\n if probability == -1:\n probabilitystr = \"not available\"\n else:\n probabilitystr = '{0}%'.format(probability)\n msg = ''\n if '-n' in args:\n if can_notify:\n msg = notify(msg, ctx)\n else:\n msg = \"Notifying disabled. \"\n msg += '**__{0}__**\\nNET {1} {2}\\nWeather probability: {3}\\nT- {4}\\n'\n msg = msg.format(launchname, launchtime, tz, probabilitystr, T)\n for arg, formatter in (('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n await send(ctx, msg, args)", "def get_launch_description(self, idx):\n return self.results[idx][\"mission\"][\"description\"]", "def print_activity_run_details(activity_run):\r\n print(\"\\n\\tActivity run details\\n\")\r\n print(\"\\tActivity run status: {}\".format(activity_run.status))\r\n if activity_run.status == 'Succeeded':\r\n print(\"\\tNumber of bytes read: {}\".format(activity_run.output['dataRead']))\r\n print(\"\\tNumber of bytes written: {}\".format(activity_run.output['dataWritten']))\r\n print(\"\\tCopy duration: {}\".format(activity_run.output['copyDuration']))\r\n else:\r\n print(\"\\tErrors: {}\".format(activity_run.error['message']))", "def detail_report(id: int):\n\n # Load the peak data.\n db = Persistence()\n if not (activity := db.load_by_id(id)):\n print(f\"Cannot find activity #{id}\")\n return\n\n # Calculate transient data\n calculate_transient_values(activity)\n\n # Print our data\n _print_basic_data(activity)\n _print_power(activity)\n _print_heart(activity)\n\n # Finish off\n if activity.aerobic_decoupling:\n _print_aerobic_decoupling(activity)\n _print_peaks(activity)\n\n # Done\n print()", "def video_details(self, id=None):\n\n\t\tdetails = None\n\n\t\tif id is None:\n\t\t\tcontent_list = self.get_control('Content List')\n\t\t\tpos = content_list.getSelectedPosition()\n\n\t\t\tdesc, id = self.data[pos]\n\n\t\tdlg = xbmcgui.DialogProgress()\n\t\tdlg.create('YouTube', 'Getting video details')\n\n\t\tself.yt.set_report_hook(self.progress_handler, dlg)\n\n\t\ttry:\n\t\t\tdetails = self.yt.get_video_details(id)\n\t\texcept DownloadAbort, e:\n\t\t\t# Just fall through as return value defaults to None\n\t\t\tpass\n\t\texcept DownloadError, e:\n\t\t\terr_dlg = xbmcgui.Dialog()\n\t\t\terr_dlg.ok('YouTube', 'There was an error.', e.value)\n\n\t\tdlg.close()\n\n\t\treturn details", "def print_activity_run_details(activity_run):\n now = datetime.utcnow()\n\n print(f\"{now} - Activity run status: {activity_run.status}\")\n if activity_run.status in ['Succeeded', 'InProgress']:\n print(f\"{now} - activity_run: {activity_run}\")\n else:\n print(f\"{now} - Errors: {activity_run.error}\")", "def game_details(id):\n game = Game.query.filter(\n Game.api_id == id).first()\n\n if not game:\n game = add_game_to_db(id)\n\n collection_api_ids = get_collection_api_ids(g.user)\n\n return render_template('game_detail.html', game=game, collection_api_ids=collection_api_ids)", "async def raffle_desc(self, ctx, id: int, *, description):\n if id == 0:\n await ctx.message.delete()\n return\n else:\n raffle = await self.bot.mdb['raffle'].find_one({'id': int(id)})\n if raffle is not None:\n await ctx.message.delete()\n raffle['description'] = description\n totalTickets = raffle.get('totalTickets', 0)\n embed = self.createRaffleEmbed(raffle['title'], raffle['description'], raffle['id'], totalTickets, raffle['cost'])\n guild = ctx.guild\n ch = guild.get_channel(int(raffle['channelId']))\n message = await ch.fetch_message(int(raffle['msgId']))\n await message.edit(content=None, embed=embed)\n await self.bot.mdb['raffle'].replace_one({\"id\": raffle['id']}, raffle)\n await ctx.send(f\"Description of raffle `{id}` was changed to: `{description}`.\")\n else:\n await ctx.send(f\"Raffle with Id `{id}` not found.\")\n await ctx.message.delete()", "def launch(pipeline, id, revision, command_only, params_in, params_out, save_all, show_hidden, url):\n from nf_core.launch import Launch\n\n launcher = Launch(pipeline, revision, command_only, params_in, params_out, save_all, show_hidden, url, id)\n if not launcher.launch_pipeline():\n sys.exit(1)", "def _get_irida_sistr_run_info(self):\n info = OrderedDict()\n info['appname'] = self.appname\n info['version'] = __version__\n info['command_line'] = self.command_line\n info['irida_url'] = self.irida_url\n info['username'] = self.username\n info['app_run_date'] = datetime.now()\n\n if self.sample_created_min_date:\n info['sample_created_min_date'] = self.sample_created_min_date\n\n return info", "def start_launch(self, context, **kwargs):\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerunOf=self._cfg.rerun_of,\n **kwargs\n )\n if not self._skip_analytics:\n send_event(self.agent_name, self.agent_version)", "def view(job_id: str, debug: bool) -> None:\n try:\n fdp_hist.show_job_log(os.getcwd(), job_id)\n except fdp_exc.FAIRCLIException as e:\n e.err_print()\n if e.level.lower() == \"error\":\n sys.exit(e.exit_code)", "def run(self, imap_obj: imaplib.IMAP4):\n typ, data = imap_obj.xatom(\n 'ID',\n '(\"name\" \"{}\" \"version\" \"{}\")'.format(__description__, __version__)\n )\n self.check_response(typ, data)\n data = self.parse_string_pair(\n self.untagged_value(imap_obj, 'ID', '')\n )\n return from_id_command(data, imap_obj.host, imap_obj.port)", "def _get_lti_launch_debug_values(self):\n ai = self._application_instance\n\n return {\n \"Organization ID\": ai.organization.public_id if ai.organization else None,\n \"Application Instance ID\": ai.id,\n \"LTI version\": ai.lti_version,\n }", "async def listbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n num = 5\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n for arg in args:\n if arg[1:].isdigit() and arg.startswith('-'):\n num = int(arg[1:])\n launches = launchlibrary.Launch.fetch(api, name=name)\n msg = discord.Embed(title=\"Listing launches found with {0}:\\n\".format(name))\n if launches:\n for launch in launches[:num]:\n net = launch.net\n value = \"Date: {0}\".format(net.date())\n if net.time() != datetime(2000, 1, 1, 0).time(): # check if time is set to 0\n value += \", Time: {0}\".format(net.time())\n if \"-s\" in args:\n value += \", Status: {0}\".format(launch.get_status().name)\n if \"-id\" in args:\n value += \", ID: {0}\".format(launch.id)\n msg.add_field(name=launch.name, value=value, inline=False)\n await ctx.send(embed=msg)\n else:\n msg = \"No launches found with provided name.\"\n await send(ctx, msg, args)", "def vip_details(request, id):\n template = loader.get_template('vip/vip_details.html')\n\n try:\n vip = Vip.objects.get(id=id)\n\n lessons = Lesson.objects.filter(lecturer_id=id)\n\n context = {\n 'vip': vip,\n 'lessons': lessons\n }\n\n except Vip.DoesNotExist:\n context = None\n\n return CustomHttpResponse.send(template, context, request)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells information about launch with provided name. "str" Name of the launch. (always first) id Includes id of the launch. r Includes holdreason and failreason. v Includes video URL. d Includes mission description.
async def launchbyname(self, ctx, name, *args): if not can_answer(ctx): return for arg in args: if arg.startswith('-'): break else: name = name + ' ' + arg launches = launchlibrary.Launch.fetch(api, name=name) if launches: launch = launches[0] launchname = launch.name launchstatus = launch.get_status().description launchtime_tz = launch.net tz = launchtime_tz.tzname() launchtime = launchtime_tz.replace(tzinfo=None) msg = '**__{0}__**\n{1}\nNET {2} {3}\n' msg = msg.format(launchname, launchstatus, launchtime, tz) for arg, formatter in (('-r', reasons), ('-id', id), ('-d', description), ('-v', videourl)): if arg in args: msg = formatter(msg, launch) else: msg = "No launch found with name provided." await send(ctx, msg, args)
[ "async def launchbyid(self, ctx, *args):\n if not can_answer(ctx):\n return\n launchid = False\n for arg in args:\n if str(arg).isdigit():\n launchid = int(arg)\n if launchid:\n launch = launchlibrary.Launch.fetch(api, id=launchid)[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No ID provided.\"\n await send(ctx, msg, args)", "async def listbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n num = 5\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n for arg in args:\n if arg[1:].isdigit() and arg.startswith('-'):\n num = int(arg[1:])\n launches = launchlibrary.Launch.fetch(api, name=name)\n msg = discord.Embed(title=\"Listing launches found with {0}:\\n\".format(name))\n if launches:\n for launch in launches[:num]:\n net = launch.net\n value = \"Date: {0}\".format(net.date())\n if net.time() != datetime(2000, 1, 1, 0).time(): # check if time is set to 0\n value += \", Time: {0}\".format(net.time())\n if \"-s\" in args:\n value += \", Status: {0}\".format(launch.get_status().name)\n if \"-id\" in args:\n value += \", ID: {0}\".format(launch.id)\n msg.add_field(name=launch.name, value=value, inline=False)\n await ctx.send(embed=msg)\n else:\n msg = \"No launches found with provided name.\"\n await send(ctx, msg, args)", "async def nextlaunch(self, ctx, *args):\n if not can_answer(ctx):\n return\n launches = launchlibrary.Launch.next(api, 1)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchtime_tz = launch.net\n utc = datetime.now(timezone.utc)\n tz = launchtime_tz.tzname()\n T = chop_microseconds(launchtime_tz - utc)\n launchtime = launchtime_tz.replace(tzinfo=None)\n probability = launch.probability\n if probability == -1:\n probabilitystr = \"not available\"\n else:\n probabilitystr = '{0}%'.format(probability)\n msg = ''\n if '-n' in args:\n if can_notify:\n msg = notify(msg, ctx)\n else:\n msg = \"Notifying disabled. \"\n msg += '**__{0}__**\\nNET {1} {2}\\nWeather probability: {3}\\nT- {4}\\n'\n msg = msg.format(launchname, launchtime, tz, probabilitystr, T)\n for arg, formatter in (('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n await send(ctx, msg, args)", "def _get_launch_data(self, name):\n fw = self.partial_fw # assure stage 1\n if not self._launches[name]:\n launch_ids = self._lids[name]\n if launch_ids:\n data = self._lc.find({'launch_id': {\"$in\": launch_ids}})\n result = list(map(Launch.from_dict, data))\n else:\n result = []\n setattr(fw, name, result) # put into real FireWork obj\n self._launches[name] = True\n return getattr(fw, name)", "def get_launch_name(self, idx):\n return self.results[idx][\"name\"]", "def get_launch(self, launch_id, count=1):\n assert count == 1\n record = self.launches.get(launch_id)\n if record:\n ret = json.loads(record)\n else:\n ret = None\n return defer.succeed(ret)", "def get_launch_description(self, idx):\n return self.results[idx][\"mission\"][\"description\"]", "def get_launch(self, launch_id, count=1):\n return self._get_record(launch_id, self._launch_column_family, count)", "def _get_irida_sistr_run_info(self):\n info = OrderedDict()\n info['appname'] = self.appname\n info['version'] = __version__\n info['command_line'] = self.command_line\n info['irida_url'] = self.irida_url\n info['username'] = self.username\n info['app_run_date'] = datetime.now()\n\n if self.sample_created_min_date:\n info['sample_created_min_date'] = self.sample_created_min_date\n\n return info", "def get_studio_info_from_name(str):\r\n\r\n url = f\"https://api.themoviedb.org/3/search/company?api_key={TMDB_API_KEY}&language=en-US&page=1&include_adult=false&query='{str}'\"\r\n\r\n response = requests.get(url)\r\n\r\n search_results = json.loads(response.text)\r\n\r\n recommendations = dict()\r\n count = 1\r\n \r\n for index, title in enumerate(search_results['results']):\r\n recommendations[title.get('id')] = title\r\n\r\n return recommendations", "def print_activity_run_details(activity_run):\r\n print(\"\\n\\tActivity run details\\n\")\r\n print(\"\\tActivity run status: {}\".format(activity_run.status))\r\n if activity_run.status == 'Succeeded':\r\n print(\"\\tNumber of bytes read: {}\".format(activity_run.output['dataRead']))\r\n print(\"\\tNumber of bytes written: {}\".format(activity_run.output['dataWritten']))\r\n print(\"\\tCopy duration: {}\".format(activity_run.output['copyDuration']))\r\n else:\r\n print(\"\\tErrors: {}\".format(activity_run.error['message']))", "def print_activity_run_details(activity_run):\n now = datetime.utcnow()\n\n print(f\"{now} - Activity run status: {activity_run.status}\")\n if activity_run.status in ['Succeeded', 'InProgress']:\n print(f\"{now} - activity_run: {activity_run}\")\n else:\n print(f\"{now} - Errors: {activity_run.error}\")", "def start_launch(self, context, **kwargs):\n self._launch_id = self._rp.launch_id or self._rp.start_launch(\n name=self._cfg.launch_name,\n start_time=timestamp(),\n attributes=self._get_launch_attributes(),\n description=self._cfg.launch_description,\n rerun=self._cfg.rerun,\n rerunOf=self._cfg.rerun_of,\n **kwargs\n )\n if not self._skip_analytics:\n send_event(self.agent_name, self.agent_version)", "def fetch_launch_plan(\n self, project: str = None, domain: str = None, name: str = None, version: str = None\n ) -> FlyteLaunchPlan:\n if name is None:\n raise user_exceptions.FlyteAssertion(\"the 'name' argument must be specified.\")\n launch_plan_id = _get_entity_identifier(\n self.client.list_launch_plans_paginated,\n ResourceType.LAUNCH_PLAN,\n project or self.default_project,\n domain or self.default_domain,\n name,\n version,\n )\n admin_launch_plan = self.client.get_launch_plan(launch_plan_id)\n flyte_launch_plan = FlyteLaunchPlan.promote_from_model(launch_plan_id, admin_launch_plan.spec)\n\n wf_id = flyte_launch_plan.workflow_id\n workflow = self.fetch_workflow(wf_id.project, wf_id.domain, wf_id.name, wf_id.version)\n flyte_launch_plan._interface = workflow.interface\n flyte_launch_plan.guessed_python_interface = Interface(\n inputs=TypeEngine.guess_python_types(flyte_launch_plan.interface.inputs),\n outputs=TypeEngine.guess_python_types(flyte_launch_plan.interface.outputs),\n )\n return flyte_launch_plan", "def help_launch(self):\n _launch_parser.print_help()", "async def get_info(self, name: str, build_id: int) -> dict:\n response = await self.jenkins._request(\n 'GET',\n f'/job/{name}/{build_id}/api/json'\n )\n\n return await response.json()", "def put_launch(self, launch):\n launch_id = launch['launch_id']\n state = launch['state']\n value = json.dumps(launch)\n return self.client.insert(launch_id, self._launch_column_family,\n value, column=state)", "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n autoscale_headrooms: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecAutoscaleHeadroomArgs']]]]] = None,\n autoscale_headrooms_automatics: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecAutoscaleHeadroomsAutomaticArgs']]]]] = None,\n instance_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n labels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecLabelArgs']]]]] = None,\n metadatas: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecMetadataArgs']]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecNetworkInterfaceArgs']]]]] = None,\n node_pool_name: Optional[pulumi.Input[str]] = None,\n ocean_id: Optional[pulumi.Input[str]] = None,\n resource_limits: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecResourceLimitsArgs']]] = None,\n restrict_scale_down: Optional[pulumi.Input[bool]] = None,\n root_volume_size: Optional[pulumi.Input[int]] = None,\n root_volume_type: Optional[pulumi.Input[str]] = None,\n scheduling_tasks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecSchedulingTaskArgs']]]]] = None,\n service_account: Optional[pulumi.Input[str]] = None,\n shielded_instance_config: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecShieldedInstanceConfigArgs']]] = None,\n source_image: Optional[pulumi.Input[str]] = None,\n storage: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecStorageArgs']]] = None,\n strategies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecStrategyArgs']]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n taints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OceanLaunchSpecTaintArgs']]]]] = None,\n update_policy: Optional[pulumi.Input[pulumi.InputType['OceanLaunchSpecUpdatePolicyArgs']]] = None) -> 'OceanLaunchSpec':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _OceanLaunchSpecState.__new__(_OceanLaunchSpecState)\n\n __props__.__dict__[\"autoscale_headrooms\"] = autoscale_headrooms\n __props__.__dict__[\"autoscale_headrooms_automatics\"] = autoscale_headrooms_automatics\n __props__.__dict__[\"instance_types\"] = instance_types\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"metadatas\"] = metadatas\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"network_interfaces\"] = network_interfaces\n __props__.__dict__[\"node_pool_name\"] = node_pool_name\n __props__.__dict__[\"ocean_id\"] = ocean_id\n __props__.__dict__[\"resource_limits\"] = resource_limits\n __props__.__dict__[\"restrict_scale_down\"] = restrict_scale_down\n __props__.__dict__[\"root_volume_size\"] = root_volume_size\n __props__.__dict__[\"root_volume_type\"] = root_volume_type\n __props__.__dict__[\"scheduling_tasks\"] = scheduling_tasks\n __props__.__dict__[\"service_account\"] = service_account\n __props__.__dict__[\"shielded_instance_config\"] = shielded_instance_config\n __props__.__dict__[\"source_image\"] = source_image\n __props__.__dict__[\"storage\"] = storage\n __props__.__dict__[\"strategies\"] = strategies\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"taints\"] = taints\n __props__.__dict__[\"update_policy\"] = update_policy\n return OceanLaunchSpec(resource_name, opts=opts, __props__=__props__)", "def _make_run_description(args):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Lists launches with provided name. [int] The number of launches listed. Default is 5, max 10. s Include launch status. id Include the IDs of the launches.
async def listbyname(self, ctx, name, *args): if not can_answer(ctx): return num = 5 for arg in args: if arg.startswith('-'): break else: name = name + ' ' + arg for arg in args: if arg[1:].isdigit() and arg.startswith('-'): num = int(arg[1:]) launches = launchlibrary.Launch.fetch(api, name=name) msg = discord.Embed(title="Listing launches found with {0}:\n".format(name)) if launches: for launch in launches[:num]: net = launch.net value = "Date: {0}".format(net.date()) if net.time() != datetime(2000, 1, 1, 0).time(): # check if time is set to 0 value += ", Time: {0}".format(net.time()) if "-s" in args: value += ", Status: {0}".format(launch.get_status().name) if "-id" in args: value += ", ID: {0}".format(launch.id) msg.add_field(name=launch.name, value=value, inline=False) await ctx.send(embed=msg) else: msg = "No launches found with provided name." await send(ctx, msg, args)
[ "async def launchbyname(self, ctx, name, *args):\n if not can_answer(ctx):\n return\n for arg in args:\n if arg.startswith('-'):\n break\n else:\n name = name + ' ' + arg\n launches = launchlibrary.Launch.fetch(api, name=name)\n if launches:\n launch = launches[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-id', id), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No launch found with name provided.\"\n await send(ctx, msg, args)", "def _get_launch_data(self, name):\n fw = self.partial_fw # assure stage 1\n if not self._launches[name]:\n launch_ids = self._lids[name]\n if launch_ids:\n data = self._lc.find({'launch_id': {\"$in\": launch_ids}})\n result = list(map(Launch.from_dict, data))\n else:\n result = []\n setattr(fw, name, result) # put into real FireWork obj\n self._launches[name] = True\n return getattr(fw, name)", "async def launchbyid(self, ctx, *args):\n if not can_answer(ctx):\n return\n launchid = False\n for arg in args:\n if str(arg).isdigit():\n launchid = int(arg)\n if launchid:\n launch = launchlibrary.Launch.fetch(api, id=launchid)[0]\n launchname = launch.name\n launchstatus = launch.get_status().description\n launchtime_tz = launch.net\n tz = launchtime_tz.tzname()\n launchtime = launchtime_tz.replace(tzinfo=None)\n msg = '**__{0}__**\\n{1}\\nNET {2} {3}\\n'\n msg = msg.format(launchname, launchstatus, launchtime, tz)\n for arg, formatter in (('-r', reasons), ('-d', description), ('-v', videourl)):\n if arg in args:\n msg = formatter(msg, launch)\n else:\n msg = \"No ID provided.\"\n await send(ctx, msg, args)", "def get_launches(self, state=None, min_state=None, max_state=None):\n records = self._get_records(self.launches, state, min_state, max_state)\n return defer.succeed(records)", "def get_launches(self, state=None, min_state=None, max_state=None):\n return self._get_records(self._launch_column_family,\n state=state,\n min_state=min_state,\n max_state=max_state)", "def get_launch(self, launch_id, count=1):\n assert count == 1\n record = self.launches.get(launch_id)\n if record:\n ret = json.loads(record)\n else:\n ret = None\n return defer.succeed(ret)", "def lunch_list(request):\n\t\n\tobject_list = Lunch.objects.filter(employee_id_id=request.user.id).order_by('-id')[:1]\n\treturn render(request, 'lunch/lunch_list.html', {'object_list':object_list})", "def list_jobs(state='ALL',workflow='ALL',app='ALL',name=''):\n from balsam.launcher.dag import BalsamJob as Job\n from balsam.core.models import ApplicationDefinition as App\n jobs = Job.objects.all()\n print(f'Total number of jobs: {len(jobs)}')\n if state != 'ALL':\n jobs = jobs.filter(state=state)\n if workflow != 'ALL':\n jobs = jobs.filter(workflow=workflow)\n if app != 'ALL':\n jobs = jobs.filter(application=app)\n if name:\n jobs = jobs.filter(name__icontains=name)\n print(f'Selected number of jobs: {len(jobs)}')\n if len(jobs) > 0: \n t = '{:<20}'.format('Name')\n t += ' {:>8}'.format('Nodes')\n t += ' {:>12}'.format('Ranks')\n t += ' {:^8}'.format('ID')\n if state =='JOB_FINISHED':\n t += '{:>12}'.format('Runtime')\n elif state =='ALL':\n t += '{:>15}'.format('State')\n print(t)\n for job in jobs:\n s = '{:<20.15}'.format(job.name)\n s += ' {:>8}'.format(job.num_nodes)\n s += ' {:>12}'.format(job.num_ranks)\n s += ' {:>8}'.format(str(job.job_id).split('-')[0]) \n\n if state =='JOB_FINISHED':\n s += '{:>12.3f}'.format(job.runtime_seconds)\n elif state =='ALL':\n s += '{:>15}'.format(job.state)\n print(s)\n return", "def list_runs(self, project, pipeline_id):\n route_values = {}\n if project is not None:\n route_values['project'] = self._serialize.url('project', project, 'str')\n if pipeline_id is not None:\n route_values['pipelineId'] = self._serialize.url('pipeline_id', pipeline_id, 'int')\n response = self._send(http_method='GET',\n location_id='7859261e-d2e9-4a68-b820-a5d84cc5bb3d',\n version='6.0-preview.1',\n route_values=route_values)\n return self._deserialize('[Run]', self._unwrap_collection(response))", "def get_launch(self, launch_id, count=1):\n return self._get_record(launch_id, self._launch_column_family, count)", "def show_runs():\n # return render_template(\"runs.html\", runs=data.runs(), type=type)\n return render_template(\"runs.html\", runs=[], type=type)", "def list_runs(self):\n res = self.api_client.ListRuns()\n return res.response().result", "def list(args: argparse.Namespace):\n for submission_id, submission_name in ssds.Staging().list():\n print(submission_id, submission_name)", "def list_instances(name):\n\titems = []\n\ttry:\n\t\tif len(name) <2:\n\t\t\titems.append(alp.Item(\n\t\t\t\ttitle='Searching',\n\t\t\t\tsubtitle='Please type more then one character to start searching',\n\t\t\t\tvalid=False\n\t\t\t))\n\t\telse:\n\t\t\tec2 = boto.connect_ec2()\n\t\t\tfor r in ec2.get_all_instances():\n\t\t\t\tgroups = ';'.join([g.name or g.id for g in r.groups])\n\t\t\t\tfor instance in r.instances:\n\t\t\t\t\tinstance_name = instance.tags.get('Name', instance.tags.get('name', ''))\n\t\t\t\t\tif not name.lower() in instance_name.lower():\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif instance.public_dns_name:\n\t\t\t\t\t\targ = 'ssh ~/.ssh/%s.pem %s\\n' % (instance.key_name, instance.public_dns_name)\n\t\t\t\t\telse:\n\t\t\t\t\t\targ = 'ssh vpc\\nssh %s\\n' % instance.private_ip_address\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\titems.append(alp.Item(\n\t\t\t\t\t\ttitle=instance_name,\n\t\t\t\t\t\tsubtitle='[%s]: %s' % (instance.id, groups),\n\t\t\t\t\t\tvalid=True,\n\t\t\t\t\t\targ=arg\n\t\t\t\t\t))\n\t\t\t\t\n\t\tif len(items) == 0:\n\t\t\titems.append(alp.Item(\n\t\t\t\ttitle='No Results Found',\n\t\t\t\tsubtitle='Please refine your search and try again'\n\t\t\t))\n\texcept Exception, e:\n\t\talp.log(str(e))\n\t\titems = [alp.Item(\n\t\t\ttitle='Problem Searching',\n\t\t\tsubtitle='%s' % str(e).replace(\"'\", ''),\n\t\t\tvalid=False\n\t\t)]\n\t\talp.log(items[0].get())\n\talp.feedback(items)", "def listjobsbyrun(request, runtag):\n\n # Put the column headers into a dict form the template can understand.\n colheads = [ ( \"Job Name\", \"jobname\" ),\n ( \"Start Time\", \"start_time\" ),\n ( \"Wall Time\", \"walltime\" ),\n ( \"Exit Stage\", \"exit_status__stage_fail\" ),\n ( \"Status\", \"exit_status__description\" ), ]\n\n # Sort out any other filters we might have specified.\n allowed_filters = [attr for (name, attr) in colheads]\n allowed_filters += ['walltime__gt', 'walltime__lt']\n jobfilters = dict([(key, val) for (key, val) in request.GET.items()\n if key in allowed_filters])\n\n # Get all the jobs associated with a particular run, and use the\n # \"order_by\" field to order the QuerySet accordingly.\n sort_headers = SortHeaders(request, colheads,\n default_order_field=0,\n default_order_type='asc',\n additional_params=jobfilters)\n run = js.PipelineRun.objects.get(runtag=runtag)\n job_list = run.pipelinejob_set.select_related('exit_status')\n if len(jobfilters) < 1: job_list = job_list.all()\n else: job_list = job_list.filter(**jobfilters)\n job_list = job_list.order_by(sort_headers.get_order_by())\n\n # Put the data themselves into a dict form the template can understand.\n rows = [ ({ 'html': \"class=nowrap\", 'data': job.jobname, },\n { 'html': \"class=nowrap\", 'data': job.start_time, },\n { 'html': \"class=nowrap\", 'data': \"{0:.1f} sec\".format(job.walltime), },\n { 'html': \"class=nowrap\", 'data': job.exit_status.stage_fail, },\n { 'data': job.exit_status.description }) for job in job_list ]\n\n # Add some useful links to the data in the columns.\n for r in rows:\n # The user can click on a job name to view the log file.\n r[0]['url'] = reverse('joblogs',args=(r[0]['data'],))\n # The user can filter on failure conditions just by clicking on them.\n for i in (3, 4):\n tmpdict = dict(request.GET.items())\n tmpdict.update({ allowed_filters[i]: r[i]['data'] })\n r[i]['url'] = '?{0}'.format('&amp;'.join(\n ['{0}={1}'.format(key, val) for key, val in tmpdict.items()]))\n\n # Render the page\n return render_to_response(\"listjobsby_run.html\",\n { \"colheads\": colheads, \"rows\": rows, \"runtag\": runtag,\n \"headers\": list(sort_headers.headers()),\n \"jobfilters\" : jobfilters },\n context_instance=RequestContext(request))", "def _list_runs(api_call):\n\n xml_string = _perform_api_call(api_call)\n\n runs_dict = xmltodict.parse(xml_string)\n # Minimalistic check if the XML is useful\n if 'oml:runs' not in runs_dict:\n raise ValueError('Error in return XML, does not contain \"oml:runs\": %s'\n % str(runs_dict))\n elif '@xmlns:oml' not in runs_dict['oml:runs']:\n raise ValueError('Error in return XML, does not contain '\n '\"oml:runs\"/@xmlns:oml: %s'\n % str(runs_dict))\n elif runs_dict['oml:runs']['@xmlns:oml'] != 'http://openml.org/openml':\n raise ValueError('Error in return XML, value of '\n '\"oml:runs\"/@xmlns:oml is not '\n '\"http://openml.org/openml\": %s'\n % str(runs_dict))\n\n if isinstance(runs_dict['oml:runs']['oml:run'], list):\n runs_list = runs_dict['oml:runs']['oml:run']\n elif isinstance(runs_dict['oml:runs']['oml:run'], dict):\n runs_list = [runs_dict['oml:runs']['oml:run']]\n else:\n raise TypeError()\n\n runs = dict()\n for run_ in runs_list:\n run_id = int(run_['oml:run_id'])\n run = {'run_id': run_id,\n 'task_id': int(run_['oml:task_id']),\n 'setup_id': int(run_['oml:setup_id']),\n 'flow_id': int(run_['oml:flow_id']),\n 'uploader': int(run_['oml:uploader'])}\n\n runs[run_id] = run\n\n return runs", "def listRuns(self, minrun=-1, maxrun=-1, logical_file_name=\"\", block_name=\"\", dataset=\"\"):\n\ttry:\n\t\tconn = self.dbi.connection()\n\t\ttran=False\n\t\tret=self.runlist.execute(conn, minrun, maxrun, logical_file_name, block_name,\n\t\tdataset, tran)\n\t\treturn ret\n\n\texcept Exception, ex:\n\t\traise ex\n\t\t\n\tfinally:\n\t\tconn.close()", "def splits(request, id_):\n\n activity = get_object_or_404(Activity, pk=id_, user=request.user)\n\n template = {\n 'activity': activity,\n }\n\n return render(request, 'activities/splits.html', template)", "def get_workflow_runs_by_name(self, workflow_name):\n variables = {\n 'name': workflow_name\n }\n\n return self.query(\"\"\"\n query workflowRunsByNameQuery($name: String!) {\n workflowRunsByName(name: $name) {\n id\n name\n createdBy {\n id\n firstName\n lastName\n email\n }\n deleted\n deletedAt\n updatedAt\n createdAt\n }\n }\n \"\"\",\n variables=variables\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells information about rocket with provided name. "str" Name of the rocket. (always first) id Includes id of the rocket. fid Includes rocketfamily id. aid Includes agency id. p Includes pad ids. w Includes wikipedia URL.
async def rocketbyname(self, ctx, name, *args): if not can_answer(ctx): return rockets = launchlibrary.Rocket.fetch(api, name=name) if rockets: rocket = rockets[0] rocketname = rocket.name msg = '**__{0}__**\n' msg = msg.format(rocketname) for arg, formatter in (('-id', id), ('-fid', familyid), ('-aid', agencyid), ('-p', padids), ('-w', rocketwikiurl)): if arg in args: msg = formatter(msg, rocket) else: msg = "No rocket found with name provided." await send(ctx, msg, args)
[ "def id_standard( obj_name ):\n # name, side (1=blue, 2=red)\n standards = {'feige34': 1, 'feige110' : 1, 'hz44' : 1, \n 'bd284211' : 1, 'g191b2b' : 1, 'bd174708' : 2,\n 'bd262606' : 2, 'hd84937' : 2, 'hd19445' : 2}\n \n # cleanup the input object name\n obj_name = obj_name.lower().replace(' ','')\n \n # get the best match\n try:\n std_name = get_close_matches( obj_name, standards, 1 )[0]\n except IndexError:\n # print 'no match for',obj_name\n return None\n print 'identified',obj_name,'as',std_name\n return std_name, standards[std_name]", "def getRatName(bot, ratid):\n if (str(ratid) is not '0') and str(ratid) in savedratnames.keys():\n return savedratnames.get(ratid)['name'], savedratnames.get(ratid)['platform']\n if str(ratid) == 'None':\n return 'unknown', 'unknown'\n try:\n result = callapi(bot=bot, method='GET', uri='/rats/' + str(ratid))\n except ratlib.api.http.APIError:\n print('got Api error during api call')\n return 'unknown', 'unknown'\n try:\n data = result['data'][0]['attributes']\n name = data['name']\n platform = data['platform']\n ret = name, platform\n except:\n print('Couldn\\'t parse Ratname from api response for ratid' + str(ratid))\n ret = 'unknown', 'unknown'\n # print('returning '+str(ret)+' as name for '+ratid)\n return ret", "def ReachName(self, riv, rch):\n geo = self._geometry\n res = geo.ReachName(riv, rch)\n\n reach_name, river_id, river_reach = res\n\n return reach_name", "def __init__(self, race, name):\r\n self.race = race\r\n self.name = name", "def get_studio_info_from_name(str):\r\n\r\n url = f\"https://api.themoviedb.org/3/search/company?api_key={TMDB_API_KEY}&language=en-US&page=1&include_adult=false&query='{str}'\"\r\n\r\n response = requests.get(url)\r\n\r\n search_results = json.loads(response.text)\r\n\r\n recommendations = dict()\r\n count = 1\r\n \r\n for index, title in enumerate(search_results['results']):\r\n recommendations[title.get('id')] = title\r\n\r\n return recommendations", "def rocket(rocket,parameters='',timeOut=1):\n requestUrl = urldata.Domain.main + urldata.Domain.main_rockets + \"/\" + rocket\n return utils.makeRequest(requestUrl,timeOut)", "def name2tank_str(self, name: str) -> str:\n try:\n return self.tanks[\"tankStr\"][name]\n except:\n debug('Tank ' + name + ' not found')\n return name", "def add_card(self, name, api_url='https://api.scryfall.com'):\n\n if name not in self.db:\n print(\"Fetching {}\".format(name))\n query = \"{}/cards/named?exact={}\".format(api_url, name)\n time.sleep(self.rate_limit)\n r = requests.get(query)\n if r.status_code == 200:\n self.db[name] = json.loads(r.text)\n # https://scryfall.com/docs/api/layouts\n # Split cards should be handled differently - unclear how.\n # Currently, just ignore other card faces, but if layout\n # is 'split', this doesn't make sense.\n\n if 'card_faces' in self.db[name]:\n # Use the global cmc; other card details from front face\n cmc = self.db[name].get('cmc', 0)\n self.db[name] = self.db[name]['card_faces'][0]\n self.db[name]['cmc'] = cmc\n\n # Compatibility with previous Deckbrew format\n self.db[name]['cost'] = self.db[name]['mana_cost']\n\n # Parse typeline\n typeline = self.db[name]['type_line'].lower()\n\n # Parse card type into types (including supertype) and creature type\n # A set might make more logical sense, but for now easier\n # to use lists with default JSON encoder.\n # Cover unlikely event of an empty typeline\n self.db[name]['types'] = list()\n\n if '—' in typeline:\n self.db[name]['types'] = typeline.split('—', 1)[0].strip().split(' ')\n self.db[name]['subtypes'] = typeline.split('—', 1)[1].strip().split(' ')\n else:\n self.db[name]['types'] = typeline.split(' ')\n self.db[name]['subtypes'] = list()\n\n logging.debug(\"%s\\n\\tCost: %s\\n\\t%s\\n\\t%s %s\", name,\n self.db[name]['cost'],\n Faction.who_can_play(self.db[name]['cost']),\n self.db[name]['types'],\n self.db[name]['subtypes'])\n\n r.connection.close()\n\n # Too many requests: wait, decrease rate, try again\n elif r.status_code == 429:\n logging.warning(\"Rate limit exceeded, throttling...\")\n self.rate_limit += .1\n time.sleep(2)\n self.add_card(name)\n\n # Otherwise fail\n elif r.status_code == 404:\n r.connection.close()\n exit(\"Card {} not found in API!\".format(name))\n\n else:\n r.connection.close()\n exit(\"Error communicating with API: status code {}\".\n format(r.status_code))", "def stereotype(name):", "def RiverName(self, riv):\n geo = self._geometry\n res = geo.RiverName(riv)\n\n river_name, river_id = res\n\n return river_name", "def racial(self):\r\n if race == \"Orc\":\r\n return \"Rage\"", "def registergamer(name):\n\n connection = connect()\n cursor = connection.cursor()\n bleached_name = bleach.clean(name, strip=True)\n cursor.execute(\"insert into gamer (gamer_name) values (%s)\", (bleached_name,))\n connection.commit()\n connection.close()", "def get_summoner_rank(name: str):\n\n # We need to get id\n user = watcher.summoner.by_name(MY_REGION, name)\n\n # First check if we have existing record for given summoner name\n summoner_cached = check_cached(user[\"name\"], Summoners, Summoners.summoner_name)\n\n # If data exists, form data and return here.\n if summoner_cached:\n return create_summoner_profile_data(summoner_cached[\"dict\"])\n\n # Cached value doesn't exist; Grab data from API.\n ranked_stat = watcher.league.by_summoner(MY_REGION, user[\"id\"])\n\n # Init 'profile_data' to contain all data needed in one place.\n profile_data = {}\n\n # Format keys and save into 'profile_data'\n profile_data[\"summoner_name\"] = user[\"name\"]\n profile_data[\"summoner_level\"] = user[\"summonerLevel\"]\n profile_data[\"puuid\"] = user[\"puuid\"]\n\n # Get summoner Icon Image\n profileiconid = user[\"profileIconId\"]\n\n version = watcher.data_dragon.versions_for_region(MY_REGION)[\"v\"]\n profile_data[\"summoner_icon_image_url\"] = (\n \"http://ddragon.leagueoflegends.com/\"\n + f\"cdn/{version}/img/profileicon/{profileiconid}.png\"\n )\n\n # Find solo queue data.\n solo_rank_stat = pydash.find(ranked_stat, {\"queueType\": \"RANKED_SOLO_5x5\"})\n if solo_rank_stat:\n profile_data[\"tier_division\"] = solo_rank_stat[\"tier\"]\n profile_data[\"tier_rank\"] = solo_rank_stat[\"rank\"]\n profile_data[\"solo_win\"] = solo_rank_stat[\"wins\"]\n profile_data[\"solo_loss\"] = solo_rank_stat[\"losses\"]\n profile_data[\"league_points\"] = solo_rank_stat[\"leaguePoints\"]\n\n # If summoner does not have any rank information\n else:\n profile_data[\"tier_division\"] = \"UNRANKED\"\n profile_data[\"tier_rank\"] = \"I\"\n profile_data[\"solo_win\"] = 0\n profile_data[\"solo_loss\"] = 0\n profile_data[\"league_points\"] = 0\n\n summoner_profile = create_summoner_profile_data(profile_data)\n\n summoner_data = Summoners(summoner_profile)\n summoner_data.create()\n\n return summoner_profile", "def add_rocket(self, rocket):\r\n\t\tself.rockets.append(rocket)", "def add_name(self):\n self.curr_iden = self.curr_word\n self.curr_obj.insert_attr_name(self.curr_word)", "def set_name(self, room_name):\n self.name = room_name", "def name(self) -> str | None:\n return self.door.name", "def __str__(self):\n return \"{song} rated with: {rate}\".format(song=str(self.rated_song), rate=self.rating)", "async def randname(self, ctx, race=None, option=None):\n if race is None:\n return await ctx.send(f\"Your random name: {self.old_name_gen()}\")\n\n embed = EmbedWithAuthor(ctx)\n race_names = await search_and_select(ctx, compendium.names, race, lambda e: e['race'])\n if option is None:\n table = await get_selection(ctx, [(t['name'], t) for t in race_names['tables']])\n else:\n table = await search_and_select(ctx, race_names['tables'], option, lambda e: e['name'])\n embed.title = f\"{table['name']} {race_names['race']} Name\"\n embed.description = random.choice(table['choices'])\n await ctx.send(embed=embed)", "def name_from_id(id):\n if id in EXISTING:\n return EXISTING[id]\n\n h = int(id, 16)\n i = h % len(ADJS)\n adj = ADJS[i]\n\n h = int(''.join(list(reversed(str(h)[1:]))))\n nouns = [n for n in NOUNS if n[:3] != adj[:3]] # try to filter out redundancies\n i = h % len(nouns)\n noun = nouns[i]\n\n return 'The Planet of {} {}'.format(adj, noun)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tells information about rocket with provided ID. [int] ID of the rocket. fid Includes rocketfamily id. aid Includes agency id. p Includes pad ids. w Includes wikipedia URL.
async def rocketbyid(self, ctx, *args): if not can_answer(ctx): return for arg in args: if arg.isdigit(): id = int(arg) rockets = launchlibrary.Rocket.fetch(api, id=id) if rockets: rocket = rockets[0] rocketname = rocket.name msg = '**__{0}__**\n' msg = msg.format(rocketname) for arg, formatter in (('-fid', familyid), ('-aid', agencyid), ('-p', padids), ('-w', rocketwikiurl)): if arg in args: msg = formatter(msg, rocket) else: msg = "No ID provided." await send(ctx, msg, args)
[ "def get(self, redflag_id):\n redflag = RedFlagModel.find_redflag(redflag_id, DB)\n if redflag is not None:\n return {'status': 200, 'data': redflag}, 200\n return not_found('That redflag cannot be found')", "def lookup_rival(self, rival_id):\n uri = 'https://p.eagate.573.jp/game/ddr/ddra20/p/rival/kensaku.html?mode=6&slot=&code=%i' % rival_id\n\n html = self.eagate.get_page(uri)\n\n rival_parser = DDRRivalTableParser.DDRRivalTableParser()\n rival_parser.feed(html)\n if len(rival_parser.rivals) == 0:\n return None\n else:\n return rival_parser.rivals[0]", "def read(self, id):", "def print_info(recid, docid, info):\n print '%i:%i:%s' % (recid, docid, info)", "def get_nfinfo_by_id(netflixid):\r\n url = \"https://unogs-unogs-v1.p.rapidapi.com/aaapi.cgi\"\r\n\r\n querystring = {\"t\":\"loadvideo\",\"q\":f\"{netflixid}\"}\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"\",\r\n 'x-rapidapi-host': \"unogs-unogs-v1.p.rapidapi.com\"\r\n }\r\n headers['x-rapidapi-key'] = os.environ.get('API_TOKEN_1')\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n n_list = []\r\n n_payload = json.loads(response.text)\r\n n_list.append(n_payload)\r\n\r\n n_dictionary = n_list[0].values()\r\n new_list = list(n_dictionary)\r\n n_str_result = new_list[0]\r\n\r\n nfinfo = n_str_result['nfinfo']\r\n\r\n return nfinfo", "def printIDVoteHistory(bioguide_id):\n\tprint bioguide_id + \":\", vote_dict[bioguide_id][\"name\"], vote_dict[bioguide_id][\"party\"]\n\tvote_history = getIDVoteHistory(bioguide_id)\n\tfor vote in sorted(vote_history):\n\t\tprint \"\\t\" + vote + \":\", vote_history[vote]", "def bullfighters_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=BULLFIGHTER_TYPE_URI,\n rdf_type_name=BULLFIGHTER_TYPE_NAME, \n kls=Bullfighter)", "async def raffle_title(self, ctx, id: int, *, title):\n if id == 0:\n await ctx.message.delete()\n return\n else:\n raffle = await self.bot.mdb['raffle'].find_one({'id': int(id)})\n if raffle is not None:\n await ctx.message.delete()\n raffle['title'] = title\n totalTickets = raffle.get('totalTickets', 0)\n embed = self.createRaffleEmbed(raffle['title'], raffle['description'], raffle['id'], totalTickets, raffle['cost'])\n guild = ctx.guild\n ch = guild.get_channel(int(raffle['channelId']))\n message = await ch.fetch_message(int(raffle['msgId']))\n await message.edit(content=None, embed=embed)\n await self.bot.mdb['raffle'].replace_one({\"id\": raffle['id']}, raffle)\n await ctx.send(f\"Title of raffle `{id}` was changed to: `{title}`.\")\n else:\n await ctx.send(f\"Raffle with Id `{id}` not found.\")\n await ctx.message.delete()", "def entry(self, id, frmt):\n self.devtools.check_param_in_list(frmt, self.format_entry)\n url = self.version + \"/ws/reaction/%s/%s\" % (frmt, id)\n\n if frmt==\"rxn\":\n response = self.http_get(url, frmt=frmt)\n else:\n response = self.http_get(url, frmt=\"xml\")\n response = self.easyXML(response)\n return response", "def soccerleagues_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=SOCCERLEAGUE_TYPE_URI,\n rdf_type_name=SOCCERLEAGUE_TYPE_NAME, \n kls=SoccerLeague)", "def ride_info(request, ride_id):\n context_instance = RequestContext(request)\n user = context_instance['user']\n ride = Ride.objects.get(pk=ride_id).filled_out()\n\n # If they have submitted a request and it is in bounds of the ride, let them\n # see this ride.\n # Next, check if they are part of this ride. If they are, let them see it.\n # Otherwise, don't let them see it\n user_sr = ride.get_sr_for_user(user)\n if not user_sr:\n if '_search_request' in request.session:\n # user is attempting to add this ride\n sr_post = request.session['_search_request']\n rr_form = RideRequestForm(sr_post)\n user_sr = rr_form.save(commit = False)\n user_sr.user = context_instance['user']\n ride.update_with_sr(user_sr)\n if not ride.in_bounds():\n messages.add_message(request, messages.ERROR,\n \"Error: This ride is out of your bounds.\" +\n \" You do not have access to this ride.\")\n return redirect('/rides/search/')\n else:\n messages.add_message(request, messages.ERROR,\n \"Error: You do not have access to this ride.\")\n return redirect('/')\n else:\n \tride.update_with_sr(user_sr)\n \t\n # encrypt the ride id and user_name\n enc_name = 'not implemented'\n enc_ride_id = 0\n\n data = {\n 'subtitle': 'Ride Details',\n 'ride': ride,\n 'enc_name': enc_name,\n 'enc_ride_id': enc_ride_id,\n 'user_names': ','.join(['\"%s %s\"' % (rider.first_name, rider.last_name) for rider in ride.riders]),\n 'start_latlongs': ['new google.maps.LatLng(%f, %f)' % (req.start_lat, req.start_long) for req in ride.requests],\n 'end_latlongs': [ 'new google.maps.LatLng(%f, %f)' % (req.end_lat, req.end_long) for req in ride.requests],\n 'user_in_ride': user in ride.riders}\n \n return render_to_response('detail.html', data,\n context_instance=context_instance)", "def getRatName(bot, ratid):\n if (str(ratid) is not '0') and str(ratid) in savedratnames.keys():\n return savedratnames.get(ratid)['name'], savedratnames.get(ratid)['platform']\n if str(ratid) == 'None':\n return 'unknown', 'unknown'\n try:\n result = callapi(bot=bot, method='GET', uri='/rats/' + str(ratid))\n except ratlib.api.http.APIError:\n print('got Api error during api call')\n return 'unknown', 'unknown'\n try:\n data = result['data'][0]['attributes']\n name = data['name']\n platform = data['platform']\n ret = name, platform\n except:\n print('Couldn\\'t parse Ratname from api response for ratid' + str(ratid))\n ret = 'unknown', 'unknown'\n # print('returning '+str(ret)+' as name for '+ratid)\n return ret", "def show_star(star_id):\n\n star, consts = get_star_info(star_id)\n\n return render_template(\"star_info.html\",\n star=star,\n constellations=consts)", "def get_gif_route(gif_id: int):\n gif = Gif.get_by_id(gif_id)\n if not gif:\n return {\"error\": f\"Gif with the id {gif_id} not found\"}, HTTPStatus.NOT_FOUND\n token_username: str = current_token.sub # type: ignore\n if (\n not bool(current_token.scope[\"admin\"]) # type: ignore\n and gif.owner.username != token_username\n ):\n return {\n \"error\": \"unable to access gif owned by another user\"\n }, HTTPStatus.FORBIDDEN\n return gif.to_json(), HTTPStatus.OK", "def get(self, ride_id):\n request = Ride.read(ride_id)\n return {'status':'success', 'message': 'Fetch successful', 'data': request}", "def get_guide(id):\n r = urllib2.urlopen('http://snapguide.com/api/v1/guide/' + id)\n return r.read()", "def post_gif_route(gif_id: int): # pylint: disable=too-many-return-statements\n gif = Gif.get_by_id(gif_id)\n if not gif:\n return {\"error\": f\"Gif with the id {gif_id} not found\"}, HTTPStatus.NOT_FOUND\n token_username: str = current_token.sub # type: ignore\n if (\n not bool(current_token.scope[\"admin\"]) # type: ignore\n and gif.owner.username != token_username\n ):\n return {\n \"error\": \"unable to modify gif owned by another user\"\n }, HTTPStatus.FORBIDDEN\n req_json: t.Optional[dict] = request.get_json()\n if not req_json:\n return {\"error\": \"missing request body\"}, HTTPStatus.BAD_REQUEST\n if \"name\" in req_json:\n gif_name = req_json[\"name\"]\n for user_gif in gif.owner.gifs:\n if user_gif.name == gif_name:\n return {\"error\": \"duplicate gif name\"}, HTTPStatus.BAD_REQUEST\n gif.name = gif_name\n if \"beats_per_loop\" in req_json:\n beats_per_loop = req_json[\"beats_per_loop\"]\n if not isinstance(beats_per_loop, (int, float)):\n return {\"error\": \"beats_per_loop must be a number\"}, HTTPStatus.BAD_REQUEST\n gif.beats_per_loop = beats_per_loop\n if \"custom_tempo\" in req_json:\n custom_tempo = req_json[\"custom_tempo\"]\n if not isinstance(custom_tempo, (int, float)):\n return {\"error\": \"custom_tempo must be a number\"}, HTTPStatus.BAD_REQUEST\n gif.custom_tempo = custom_tempo\n db.session.commit()\n return gif.to_json(), HTTPStatus.OK", "def name_from_id(id):\n if id in EXISTING:\n return EXISTING[id]\n\n h = int(id, 16)\n i = h % len(ADJS)\n adj = ADJS[i]\n\n h = int(''.join(list(reversed(str(h)[1:]))))\n nouns = [n for n in NOUNS if n[:3] != adj[:3]] # try to filter out redundancies\n i = h % len(nouns)\n noun = nouns[i]\n\n return 'The Planet of {} {}'.format(adj, noun)", "def get_drink(id): # noqa: E501\n return 'do some magic!'", "def get_imdb_details_with_imdb_id(imdb_id):\r\n\r\n url = \"https://movie-database-imdb-alternative.p.rapidapi.com/\"\r\n\r\n querystring = {\"i\":f\"{imdb_id}\",\"r\":\"json\"}\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"\",\r\n 'x-rapidapi-host': \"movie-database-imdb-alternative.p.rapidapi.com\"\r\n }\r\n headers['x-rapidapi-key'] = os.environ.get('API_TOKEN_1')\r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n search_results = json.loads(response.text)\r\n\r\n ratings = {}\r\n\r\n for rating in search_results['Ratings']:\r\n key_name = str(rating['Source']).lower().replace(\" \", \"_\")\r\n search_results[\"ratings_\"+key_name] = rating\r\n\r\n return search_results" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
find all the docs on the basis of list of MACS and time frame
def let_the_docs_out(self, post_data): doc_list = [] mac_list = post_data['mac'] if 'time' in post_data and post_data['time']: time_frame = post_data['time'] start_time = time_frame[0] end_time = time_frame[1] else: utc_1970 = datetime.datetime(1970, 1, 1) utc_now = datetime.datetime.utcnow() offset = utc_now - datetime.timedelta(minutes=30) start_time = int((offset - utc_1970).total_seconds()) end_time = int((utc_now - utc_1970).total_seconds()) for mac in mac_list: if not DB.devices.find({"lower_snum": mac.lower()}).count(): continue cursor = DB.devices.find({"lower_snum": mac.lower(), "timestamp" \ : {"$gt": start_time, "$lt": end_time}}) for doc in cursor: doc_list.append(doc) return doc_list
[ "def test_long_doc_lst(self):\n\n # Long document list - created manually for a unique test\n doc_lst = [\n {\n \"_id\": \"test1\",\n \"chebi\": \"CHEBI:1391\",\n },\n {\n \"_id\": \"test2\",\n \"pubchem\": \"178014\",\n },\n {\n # this test document should still be returned\n \"_id\": \"test3\",\n },\n {\n \"_id\": \"test4\",\n \"drugbank\": \"DB11940\",\n },\n {\n \"_id\": \"test5\",\n \"chebi\": \"CHEBI:28689\",\n },\n {\n \"_id\": \"test6\",\n \"pubchem\": \"164045\",\n },\n {\"_id\": \"test7\", \"drugbank\": \"DB01076\"},\n {\n \"_id\": \"test8\",\n \"drugbank\": \"DB03510\",\n },\n {\n \"_id\": \"test9\",\n \"pubchem\": \"40467070\",\n },\n {\n \"_id\": \"test10\",\n \"chebi\": \"CHEBI:135847\",\n },\n {\n \"_id\": \"test11\",\n \"pubchem\": \"10484732\",\n },\n {\n \"_id\": \"test12\",\n \"pubchem\": \"23305354\",\n },\n ]\n\n answers = [\n \"SHXWCVYOXRDMCX-UHFFFAOYSA-N\",\n \"CXHDSLQCNYLQND-XQRIHRDZSA-N\",\n \"test3\",\n \"XMYKNCNAZKMVQN-NYYWCZLTSA-N\",\n \"FMGSKLZLMKYGDP-USOAJAOKSA-N\",\n \"YAFGHMIAFYQSCF-UHFFFAOYSA-N\",\n \"XUKUURHRXDUEBC-KAYWLYCHSA-N\",\n \"RXRZOKQPANIEDW-KQYNXXCUSA-N\",\n \"BNQDCRGUHNALGH-ZCFIWIBFSA-N\",\n \"CGVWPQOFHSAKRR-NDEPHWFRSA-N\",\n \"PCZHWPSNPWAQNF-LMOVPXPDSA-N\",\n \"FABUFPQFXZVHFB-CFWQTKTJSA-N\",\n ]\n\n # Test a list being passed with 12 documents\n @KeyLookup(graph_mychem, [(\"chebi\", \"chebi\"), (\"drugbank\", \"drugbank\"), (\"pubchem\", \"pubchem\")], [\"inchikey\"])\n def load_document(data_folder):\n for d in doc_lst:\n yield d\n\n res_lst = load_document(\"data/folder/\")\n res_cnt = 0\n for res in res_lst:\n res_cnt += 1\n if not res[\"_id\"] in answers:\n print(res)\n self.assertTrue(res[\"_id\"] in answers)\n self.assertEqual(res_cnt, 12)", "def search_results_by_date(self, documents):\n newlist = sorted(documents, key=itemgetter('metadata_created_date'))\n return newlist", "def getDocs():\n docs = dict()\n double_transcripts=0\n for file in glob.glob(INPUT_FOLDER+\"*\"):\n # get RG number\n rg_number = file.split('/')[-1].split(\"_\")[0]\n\n # find last occurrence of '.' and replace it with '*' \n k = rg_number.rfind(\".\")\n mongo_rg = rg_number[:k] + \"*\" + rg_number[k+1:]\n\n\n # add it to dictionary\n\n if mongo_rg in docs.keys():\n docs[mongo_rg].append(file.split('/')[-1])\n else:\n docs[mongo_rg] = [file.split('/')[-1]]\n \n \n \n # return\n return docs", "def get_documents(file_type: str, args):\n\n # To debug if start and end of records works well in the loop for.\n # listTmp = [i for i in range(100)]\n # print(listTmp[start:end])\n\n # Getting valid page number and page length , by calling a function form utils.\n page, per_page, start, end = utils.get_valid_pagination_args(args)\n\n file_type = file_type.strip()\n\n mongo_documents = list(mongo.db.documents.find(\n {\"dataType\": file_type}).sort(\"name\", 1))\n total_records = len(mongo_documents)\n error = None\n documents = mongo_documents[start:end]\n for document in documents:\n link = None\n if document[\"format\"] == \"link\":\n\n link = document[\"link\"]\n\n clinicalCases = list(mongo.db.clinicalCases.find(\n {\"sourceId\": document[\"_id\"]}, {\"locationId\": 0}\n ))\n\n for case in clinicalCases:\n case.update({\"_id\": str(case[\"_id\"]),\n \"sourceId\": str(case[\"sourceId\"]),\n })\n try:\n for version in case[\"versions\"]:\n version.pop('locationId', None)\n except:\n pass\n\n document.update({\"_id\": str(document[\"_id\"]),\n \"link\": link,\n \"clinicalCases\": clinicalCases})\n\n data = {\n \"documents\": documents,\n \"totalRecords\": total_records,\n \"currentPage\": page,\n \"perPage\": per_page,\n \"error\": error,\n }\n\n return data", "def get_docs():\n fields = ['Title', 'Description', 'OperatingSystem']\n all_docs = {}\n\n connection = Connection()\n db = connection.linux_laptops\n docs = db.docs\n\n for f in fields:\n all_docs.update(_get_docs(docs, f))\n return all_docs", "def list_retentionTime_MS2(data, id_list_ms2):\n rt_list_ms2 = []\n\n for i in id_list_ms2:\n rt_list_ms2.append(data[i].get('retentionTime')) #syntax for accessing one particular key of one particular dictionary in the mzxml file\n\n return rt_list_ms2", "def _listResults(self, metadataPrefix, set=None, from_=None, until=None):\n if until and until < self.earliestDatestamp:\n raise BadArgumentError('until argument value is earlier than earliestDatestamp.')\n if not from_:\n from_ = self.earliestDatestamp\n if not until:\n until = datetime.datetime.now()\n #(from_ < self.earliestDatestamp)\n if (until < from_):\n raise BadArgumentError('until argument value is earlier than from argument value.')\n q = cqlparse('rec.lastModificationDate > \"%s\" and rec.lastModificationDate < \"%s\"' % (from_, until))\n # actually need datestamp values as well as results - interact with indexes directly for efficiency\n pm = self.db.get_path(session, 'protocolMap') # get CQL ProtocolMap\n idx = pm.resolveIndex(session, q.leftOperand)\n q.config = pm\n res = {}\n for src in idx.sources[u'data']:\n res.update(src[1].process(session, [[str(from_)]]))\n res.update(src[1].process(session, [[str(until)]]))\n from_ = min(res.keys())\n until = max(res.keys())\n # tweak until value to make it inclusive\n until = until[:-1] + chr(ord(until[-1])+1)\n termList = idx.fetch_termList(session, from_, 0, '>=', end=until)\n # create list of datestamp, resultSet tuples\n tuples = []\n for t in termList:\n try:\n tuples.append((datetime.datetime.strptime(t[0], u'%Y-%m-%dT%H:%M:%S'), idx.construct_resultSet(session, t[1])))\n except ValueError:\n tuples.append((datetime.datetime.strptime(t[0], u'%Y-%m-%d %H:%M:%S'), idx.construct_resultSet(session, t[1])))\n return tuples", "def query(self, query_str: str)->list:\n url_dict = {} #stores data of end urls \n urls_tf_idf_total = {}#used to keep track of tf.idf for the queries\n result_list = [] #used to store the results\n json_data = json.load(open(BOOKKEPING_LOC))\n split_query = query_str.split()\n counter = 0\n for query in split_query: #iterate through query by splitting with space\n result = self._collection.find({\"_id\": query})\n try:\n token_value = result.next()\n docs_dict = token_value[\"Doc_info\"]\n results_count = 0 #potentially have to take out if want all queries for selecting\n for doc_id, attributes in sorted(docs_dict.items(), key=get_tfidf, reverse=True):\n #keeping track of updates. those with more updates = matched more queries = higher priority\n #even if lower tf.idf\n if(json_data[doc_id] in urls_tf_idf_total):\n urls_tf_idf_total[json_data[doc_id]][0] += 1\n urls_tf_idf_total[json_data[doc_id]][1] += docs_dict[doc_id][\"tf-idf\"]\n else:\n urls_tf_idf_total[json_data[doc_id]] = [1,docs_dict[doc_id][\"tf-idf\"]]\n results_count += 1\n if (results_count == 10):\n break\n except StopIteration:#could not find query\n pass\n #search for urls that match the most words and continues until 10 queries are reached\n #or if there are no more urls to retrieve\n counter = len(split_query)\n while(1):\n if(len(url_dict) >= 10 or counter == 0): \n break\n for url,tf_idf in list(urls_tf_idf_total.items()):#list part necessary in python3\n if( tf_idf[0] == counter): #iterates through ALL the words matching. Stopping prematurely\n #will result in queries being missed before moving to the next best match.\n url_dict[url] = tf_idf\n counter -= 1 #used to keep track of how many queries are matching.\n #higher priority towards queries with more words matching\n #return urls sorted by tf_idf\n sorted_values = sorted(url_dict.items(), key=lambda x: (x[1][0],x[1][1]), reverse = True)\n #return 10 top urls from sorted_values\n for url,tf_idf in sorted_values:\n if(len(result_list) < 10):\n result_list.append((url,tf_idf))\n else:\n break\n return result_list", "def srcdocs(self, i=1):\n res = []\n db = self.srcdb(i=i)\n for did in db:\n res += [dict(db[did])]\n return res", "def get_docs(self, query):\n data = {}\n tot_docs = Doc.objects().count()\n for word in query:\n ind = Index.objects(key=word).first()\n if not ind:\n continue\n data[word] = {\n \"idf\": math.log(\n tot_docs / len(ind.documents), 10\n ), # calculate idf of the query word\n \"docs\": ind.documents, # Documents which contain word\n }\n return data", "def get_docs(db_vectors):\n docs = []\n categories = []\n for vector in db_vectors:\n data = vector.data\n label = vector.lbl.assigned_id if vector.lbl else None\n doc = json.loads(data)\n docs.append(doc)\n categories.append(label)\n return docs, categories", "def get_doc_objects(self, flat_log):\n logger.info('Recovering image_ids, drawing_ids, and suggestions')\n image_ids = set()\n drawing_ids = []\n suggestions = {}\n\n for line in flat_log:\n try:\n i = line.index('{')\n line_dict = json.loads(line[i:])\n except ValueError:\n pass # either chunked or changelog header without dict, no action needed\n else:\n if has_element(line_dict):\n elem_dict = line_dict['epm']['ee_eo']\n if has_img(elem_dict):\n image_ids.add(elem_dict['img_cosmoId'])\n elif has_drawing(elem_dict, drawing_ids):\n drawing_ids.append(new_drawing(elem_dict))\n elif 'type' in line_dict:\n if is_insert_suggestion(line_dict):\n sug_id = line_dict['sug_id']\n if sug_id in suggestions:\n suggestions[sug_id] = ins_sugg_text(line_dict, suggestions[sug_id])\n else:\n suggestions[sug_id] = new_suggestion(line_dict)\n elif is_delete_suggestion(line_dict):\n suggestion = find_sugg_by_index(line_dict, suggestions)\n if suggestion:\n suggestions[suggestion.sug_id] = rm_sugg_text(line_dict, suggestion)\n\n sugg_obj = self.KumoObj(filename='suggestions.txt', content=json.dumps(suggestions, ensure_ascii=False))\n return image_ids, drawing_ids, sugg_obj", "def _index_ek80(self):\n ind2 = []\n with open(self.fn ,'rb') as bin_file:\n #bin_file.seek(7)\n position = bin_file.tell()\n \n raw = bin_file.read(self.BLOCK_SIZE)\n while len(raw) > 4:\n \n for match in re.finditer(b'NME0|XML0|RAW3|TAG0|self.mru0|FIL1', raw):\n \n if match:\n if match.span()[0] >= 4:\n l = unpack('i', raw[match.span()[0]-self.LENGTH_SIZE : match.span()[0]])[0]\n ind2.append([match.group(), position + match.span()[0],l])\n else:\n bin_file.seek(position + self.BLOCK_SIZE - 4)\n position = bin_file.tell()\n # Read the next block for regex search\n bin_file.seek(position - 4)\n position = position - 4\n raw = bin_file.read(self.BLOCK_SIZE)\n idx = pd.DataFrame(ind2, columns=['datagram','start','length'])\n return(idx)", "def find(self, where_dict):\n matching_list = []\n for document in self.documents:\n if self.check_document(document, where_dict):\n matching_list.append(document)\n print(matching_list)\n return matching_list", "def mongo_samples_by_date(config: Config, start_datetime: datetime, end_datetime: datetime) -> List[SampleDoc]:\n with create_mongo_client(config) as client:\n mongo_db = get_mongo_db(config, client)\n samples_collection = get_mongo_collection(mongo_db, COLLECTION_SAMPLES)\n return list(\n samples_collection.find(\n {\n FIELD_CREATED_AT: {\"$gte\": start_datetime, \"$lt\": end_datetime},\n }\n )\n )", "def lines_from_doc(doc): # doc: list of AMRGraph objects\n lines = []\n for g in doc:\n print(g.id)\n lines.extend(g.meta.split('\\n'))\n # lines.extend(lines_from_sent(g,n_min,n_max))\n lines.extend(lines_from_sent(g))\n return lines", "def get_lm_matched_docs(query, searcher, qparser, topk=2000):\n #did_dict = {}\n dids = []\n scores = []\n query = qparser.parse(query)\n # searcher.setSimilarity(LMDirichletSimilarity())\n scoreDocs = searcher.search(query, topk).scoreDocs\n # print(\"Found %d document(s) that matched query '%s':\" % (len(scoreDocs), query))\n\n for scoreDoc in scoreDocs:\n if len(dids) > 1000:\n break\n\n doc = searcher.doc(scoreDoc.doc)\n did = doc.get(\"id\")\n\n if check_if_spam(did):\n continue\n #text = doc.get(\"raw\")\n #did_dict[did] = {}\n #did_dict[did]['text'] = text\n #did_dict[did]['score'] = scoreDoc.score\n dids.append(did)\n scores.append(scoreDoc.score)\n\n return dids, scores", "def get_found_documents(self, document_names_list):\n documents = CouchDocument.view(\n 'dmscouch/all',\n keys=document_names_list,\n include_docs=True)\n # Converting documents to omit couchdb ViewResults iteration bug\n results = []\n for doc in documents:\n results.append(doc)\n return results", "def parse_docs(self):\n self.docs = []\n self.fstoi = {UNK_TOKEN: 0}\n self.fitos = {0: UNK_TOKEN}\n self.idx2multi = {}\n self.multi2idx = {}\n for line in self.text:\n line = line.strip()\n if self.byte_fmt:\n line = line.decode(\"utf-8\")\n doc_words = []\n doc_feats = []\n doc_multifeats = []\n for tok in line.split(\" \"):\n word, *feats = tok.split(\"|\")\n word_n = self.stoi.get(word.lower(), self.stoi[\"UNK\"])\n feats = dict(zip(self.cnames, feats))\n feats_p = []\n multifeats_p = []\n for fn, f in feats.items():\n if self.is_multi(fn):\n fs = f.split(\";\")\n fs_n = []\n for f in fs:\n # First assign global feature id\n f = f\"{fn}:{f}\"\n if f not in self.fstoi:\n new_n = len(self.fstoi)\n self.fstoi[f] = new_n\n self.fitos[new_n] = f\n f_n = self.fstoi[f]\n\n # Next map it to a one hot index\n if f_n not in self.multi2idx:\n new_n = len(self.multi2idx)\n self.multi2idx[f_n] = new_n\n self.idx2multi[new_n] = f\n\n fs_n.append(f_n)\n self.cnames2fis[fn].add(f_n)\n multifeats_p.append(fs_n)\n else:\n if fn == \"lemma\":\n # Lowercase lemmas\n f = f.lower()\n if not f:\n f = UNK_TOKEN\n else:\n f = f\"{fn}:{f}\"\n if f not in self.fstoi:\n new_n = len(self.fstoi)\n self.fstoi[f] = new_n\n self.fitos[new_n] = f\n f_n = self.fstoi[f]\n feats_p.append(f_n)\n # Update feature name\n self.cnames2fis[fn].add(f_n)\n doc_words.append(word_n)\n doc_feats.append(feats_p)\n doc_multifeats.append(multifeats_p)\n self.docs.append((doc_words, doc_feats, doc_multifeats))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets a wall at grid[row][col]. Can't set a wall on top of the source/target square.
def setWall(self, row, col): if self.grid[row][col] != 2 and self.grid[row][col] != 3: self.grid[row][col] = 1 #print("Wall set at (", row, ", ", col, ")")
[ "def make_wall(width, height, wall_cell):\n # add your code here", "def create_wall(window, grid, mouse, wall, empty, start, end):\n\n x = int(mouse.x / (window.getWidth() / grid.get_length()))\n y = int(mouse.y / (window.getHeight() / grid.get_height()))\n if (x != start[0] or y != start[1]) and (x != end[0] or y != end[1]):\n if grid.value_at(x, y) == wall:\n print(\"Wall destroyed at (\" + str(x) + \", \" + str(y), end=\").\")\n grid.modify_tile(x, y, empty)\n grid.tile_at(x, y).update_colour()\n grid.tile_at(x, y).draw(window)\n else:\n print(\"Wall placed at (\" + str(x) + \", \" + str(y), end=\"). \")\n grid.modify_tile(x, y, wall)\n grid.tile_at(x, y).update_colour()\n grid.tile_at(x, y).draw(window)", "def add_wall(self, start_row, end_row, start_col, end_col):\n return _wall(self, start_row, end_row, start_col, end_col)", "def _change_wall(self,):\n \n pass", "def break_wall(self, other):\n if self.i == other.i:\n if self.j == other.j - 1:\n self.walls['right'] = False\n other.walls['left'] = False\n elif self.j == other.j + 1:\n self.walls['left'] = False\n other.walls['right'] = False\n else:\n raise ValueError('Can break a wall only between two neighboring cells')\n elif self.j == other.j:\n if self.i == other.i - 1:\n self.walls['bottom'] = False\n other.walls['top'] = False\n elif self.i == other.i + 1:\n self.walls['top'] = False\n other.walls['bottom'] = False\n else:\n raise ValueError('Can break a wall only between two neighboring cells')\n else:\n raise ValueError('Can break a wall only between two neighboring cells')", "def create_wall(event):\n x, y = square_clicked(event.x, event.y)\n for node in nodes:\n if node.x == x and node.y == y and not node.wall:\n node.wall = True\n c.create_rectangle(x, y, x + NODE_SIZE, y + NODE_SIZE, fill=\"grey\")\n break", "def create_wall(\n grid: np.ndarray,\n thickness: float,\n height: float,\n origin: Sequence[float] = (0, 0, 0), # (x, y, theta)\n) -> bpy.types.Object:\n # create the wall object and add it to the scene\n h, w = grid.shape\n verts, faces = [], []\n for y in range(h + 1):\n for x in range(w + 1):\n verts.append((x, y, 0))\n if y < h and x < w and grid[y][x]:\n bottom_left = x + (w + 1) * y\n top_left = bottom_left + w + 1\n top_right, bottom_right = top_left + 1, bottom_left + 1\n faces.append([bottom_left, bottom_right, top_right, top_left])\n\n mesh = bpy.data.meshes.new(name=\"Wall\")\n mesh.from_pydata(verts, [], faces)\n obj = bpy.data.objects.new(mesh.name, mesh)\n collection = bpy.data.collections.get('Collection')\n collection.objects.link(obj)\n\n # activate the object for following operations\n obj.select_set(True)\n bpy.context.view_layer.objects.active = obj\n\n # remove redundant geometry\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.delete_loose()\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.dissolve_limited()\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # model transformation (according to origin, thickness, and height)\n bpy.ops.transform.resize(value=(thickness, thickness, 1))\n origin_x, origin_y, origin_theta = origin\n if origin_x or origin_y:\n bpy.ops.transform.translate(value=(origin_x, origin_y, 0))\n if origin_theta:\n bpy.context.scene.tool_settings.transform_pivot_point = 'CURSOR'\n bpy.ops.transform.rotate(value=origin_theta, orient_axis='Z')\n\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.extrude_region_move(TRANSFORM_OT_translate={\"value\": (0, 0, height)})\n bpy.ops.object.mode_set(mode='OBJECT')\n\n return obj", "def setWall(self, x1_y1, x2_y2):\n x1, y1 = x1_y1\n x2, y2 = x2_y2\n if x1 > x2: # make sure x1 < x2\n (x1,y1,x2,y2) = (x2,y2,x1,y1)\n if x2 - x1 == 0:\n x1 -= 0.001\n dx = (x2 - x1)\n dy = (y2 - y1)\n m = dy / dx # slope\n b = y1 - x1 * m\n x = x1\n (lx,ly) = (x1,x2)\n step = dx / math.sqrt(dx * dx + dy * dy)\n while x < x2:\n y = x * m + b\n blockx = math.floor(x + 0.5)\n blocky = math.floor(y + 0.5)\n self.occupied.add( (blockx, blocky) )\n if x != x1 and lx != blockx and ly != blocky:\n self.occupied.add( (blockx-1, blocky) )\n (lx, ly) = (blockx, blocky)\n x +=step\n # Remove these walls from dirt\n self.dirt = self.dirt - self.occupied\n self.dirtStarting = self.dirtStarting - self.occupied", "def set_walls(self, walls):\n for agent in self.agent_maps:\n if self.agent_maps[agent].walls == []:\n self.agent_maps[agent].walls = walls\n self.agent_maps[agent].normalize()", "def update_grid(window, grid, c_path, p_path, empty, wall, path):\n\n for i in p_path:\n if grid.value_at(i[0], i[1]) != wall:\n grid.modify_tile(i[0], i[1], empty)\n grid.tile_at(i[0], i[1]).update_colour()\n grid.tile_at(i[0], i[1]).draw(window)\n\n for i in c_path:\n grid.modify_tile(i[0], i[1], path)\n grid.tile_at(i[0], i[1]).update_colour()\n grid.tile_at(i[0], i[1]).draw(window)", "def is_wall(self, row, col):\n \n return self.maze[row][col] == WALL", "def set_patch(patch_top_left_corner, patch_dims, grid, value=np.True_):\n x = patch_top_left_corner[0]\n y = patch_top_left_corner[1]\n x_end = x + patch_dims[0]\n y_end = y + patch_dims[1]\n\n grid[x:x_end, y:y_end] = value\n\n return None", "def draw_walls(self):\n\t\twall_keys = list(self.gridworld.wall_map.keys())\n\t\tfor i in range(0, len(wall_keys)):\n\t\t\twall_loc = eval(wall_keys[i])\n\t\t\t#top left triangle\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, wall_loc[1]*self.cell_height) #top left of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell\n\t\t\t#bottom right triangle\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom right of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell", "def make_cross_wall(self):\n if self.orient == \"e\":\n self.cross_wall = self.coord1.wall_south\n if self.orient == \"s\":\n self.cross_wall = self.coord1.wall_east", "def set_square(self, x, y):\n self.board[y][x] = 1", "def walls(self):", "def random_walls():\n scale_walls = get_bool_opt(\"random_blackwall_scale\")\n texture_lock = get_bool_opt('tile_texture_lock', True)\n for solid in VMF.iter_wbrushes(world=True, detail=True):\n for face in solid:\n orient = get_face_orient(face)\n # Only modify black walls and ceilings\n if (scale_walls and\n face.mat.casefold() in BLACK_PAN and\n orient is not ORIENT.floor):\n\n random.seed(face_seed(face) + '_SCALE_VAL')\n # randomly scale textures to achieve the P1 multi-sized\n # black tile look without custom textues\n scale = random.choice(get_grid_sizes(face))\n split = face.uaxis.split()\n split[-1] = scale\n face.uaxis = \" \".join(split)\n\n split = face.vaxis.split()\n split[-1] = scale\n face.vaxis = \" \".join(split)\n alter_mat(face, face_seed(face), texture_lock)", "def __init__(self, width, height, walls=[]):\n self.width = width\n self.height = height\n self.action_to_pos = {\n 'North': (1, 0),\n 'South': (-1, 0),\n 'East': (0, 1),\n 'West': (0, -1),\n 'Stop': (0, 0),\n }\n self._walls = walls\n self.cells = self.generate_cells()\n self.normalize()", "def add_wall(self, x, y):\n\t\twall = Wall(self, x, y)\n\t\tself.add_tile(wall, x, y)\n\t\twall.update()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Togggles the Source Tile. If the source is not set, sets it. If the source is set, removes it if Source == (row, col), or moves it to (row, col) otherwise.
def toggleSource(self, row, col): # if the source is not set, set it if self.getSource() == (None, None): self.setSource(row, col) # if the source is set else: # if the source is the current square, remove it if self.grid[row][col] == 2: self.removeSource() # if the source is not the current square, remove it, and make current square the source elif self.grid[row][col] == 0 or self.grid[row][col] == 1: self.removeSource() self.setSource(row, col)
[ "def toggleTarget(self, row, col):\n # if the target is not set, set it\n if self.getTarget() == (None, None):\n self.setTarget(row, col)\n # if the target is set\n else:\n # if the target is the current square, remove it\n if self.grid[row][col] == 3:\n self.removeTarget()\n # if the target is not the current square, remove it, and make current square the target\n elif self.grid[row][col] == 0 or self.grid[row][col] == 1: \n self.removeTarget()\n self.setTarget(row, col)", "def test_grid_layer_update_grid_source(self):\n pass", "def shuffle(self) -> None:\n combined = list(zip(self.source, self.target))\n random.shuffle(combined)\n self.source[:], self.target[:] = zip(*combined)", "def _remember_source(self, src_key: str, dest_key: str) -> None:\n\n src_map = self.source_map.setdefault(src_key, {})\n src_map[dest_key] = True", "def fill_src(self):\n src = self.wavelet_generator()\n\n nstot = self.src_pos.shape[1]\n self.src = np.stack([src] * nstot, 1)", "def scatter(self):\n if self._moving_between_tiles:\n self.__move_between_tiles()\n else:\n check_next_coord, jump = self._calculate_new_coord()\n self.__update_target_tile_scatter(check_next_coord)\n if self.__coord_dict.get(check_next_coord).is_wall() or self.__check_neighbours():\n path = self.astar.find_path(self._coord, self.__target_tile)\n self._direction = self.astar.dictionary[path[0]]\n if jump:\n self._set_on_opposite_side()\n self._moving_between_tiles = True\n self._draw_character(self._coord, self.__image)", "def tile_shift_up(self):\n self._tile_set.modified=True\n self._tile_set[self.current_tile_num].shift_up()\n self._ui.update_tile(self._tlayer, self._tile_set,\n self.current_tile_num, self.current_pal)", "def remove_source(src):\n src.stop()\n try:\n src.data.release_data_flag = 1\n src.cell_scalars_name = ''\n src.cell_tensors_name = ''\n src.cell_vectors_name = ''\n src.point_scalars_name = ''\n src.point_tensors_name = ''\n src.point_vectors_name = ''\n except AttributeError:\n pass\n src.start()\n src.stop()\n src.remove()", "def movetarget(self):\n x, y = self.target[0], self.target[1]\n neigh = [(nx, ny) for nx in [x - 1, x, x + 1] for ny in [y - 1, y, y + 1] if (nx, ny) != (x, y) if\n (nx, ny) in self.cells]\n nextstep = neigh[randint(0, len(neigh) - 1)]\n self.target = nextstep", "def insert(self, source):\n i = int(source.getIxx()*self._xSize/self._xMax + 0.5)\n j = int(source.getIyy()*self._ySize/self._yMax + 0.5)\n if i in range(0, self._xSize) and j in range(0, self._ySize):\n if i == 0 and j == 0:\n return\n\n self._psfImage.set(i, j, self._psfImage.get(i, j) + 1)\n\n if False:\n print \"Inserting %d at (%d, %d)\" % (source.getId(), i, j),\n print \"(%d, %d) (flux = %.0f), (%.1f %.1f)\" % (source.getXAstrom(), source.getYAstrom(),\n source.getPsfFlux(),\n source.getIxx(), source.getIyy())", "def drop_piece(self):\r\n self.last_update = time.time()\r\n self.pieces[0].x, self.pieces[0].y = self.pieces[3].x, self.pieces[3].y", "def _ensure_source(self, conf, source):\n if source not in conf[K_SOURCES]:\n conf[K_SOURCES][source] = deepcopy(self._sources_schema)", "def set_first_pixel_destination(self, dest):\n if dest.lower() == 'bl':\n self._first_pixel = self.__bottom_left\n return self\n elif dest.lower() == 'br':\n self._first_pixel = self.__bottom_right\n return self\n elif dest.lower() == 'tl':\n self._first_pixel = self.__top_left\n return self\n elif dest.lower() == 'tr':\n self._first_pixel = self.__top_right\n return self\n else:\n raise ImageError(\n \"'{0}' is not a valid pixel destination\".format(dest),\n 'pixel_dest_position'\n )", "def _reset_source(self):", "def toggle_snap_to_vertex():\r\n pass", "def toggle_snap_to_terrain():\r\n pass", "def clear(row):\n for i in range(self.cols):\n self.blocks.remove(Point(i,row))\n for j in range(row+1, self.rows):\n for i in range(self.cols):\n target = Point(i,j)\n destination = Point(i,j-1)\n if target in self.blocks:\n self.blocks.remove(target)\n self.blocks.add(destination)", "def test_grid_layer_get_grid_source(self):\n pass", "def next_source(self):\n was_playing = self._playing\n self.pause()\n self._timer.reset()\n\n if self._source:\n # Reset source to the beginning\n self.seek(0.0)\n self.source.is_player_source = False\n\n playlists = self._playlists\n if not playlists:\n return\n\n try:\n new_source = next(playlists[0])\n except StopIteration:\n self._playlists.popleft()\n if not self._playlists:\n new_source = None\n else:\n # Could someone queue an iterator which is empty??\n new_source = next(self._playlists[0])\n\n if new_source is None:\n self._source = None\n self.delete()\n self.dispatch_event('on_player_eos')\n else:\n old_audio_format = self._source.audio_format\n old_video_format = self._source.video_format\n self._source = new_source.get_queue_source()\n\n if self._audio_player:\n if old_audio_format == self._source.audio_format:\n self._audio_player.clear()\n self._audio_player.source = self._source\n else:\n self._audio_player.delete()\n self._audio_player = None\n if old_video_format != self._source.video_format:\n self._texture = None\n pyglet.clock.unschedule(self.update_texture)\n\n self._set_playing(was_playing)\n self.dispatch_event('on_player_next_source')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Togggles the Target Tile. If the target is not set, sets it. If the target is set, removes it if Target == (row, col), or moves it to (row, col) otherwise.
def toggleTarget(self, row, col): # if the target is not set, set it if self.getTarget() == (None, None): self.setTarget(row, col) # if the target is set else: # if the target is the current square, remove it if self.grid[row][col] == 3: self.removeTarget() # if the target is not the current square, remove it, and make current square the target elif self.grid[row][col] == 0 or self.grid[row][col] == 1: self.removeTarget() self.setTarget(row, col)
[ "def movetarget(self):\n x, y = self.target[0], self.target[1]\n neigh = [(nx, ny) for nx in [x - 1, x, x + 1] for ny in [y - 1, y, y + 1] if (nx, ny) != (x, y) if\n (nx, ny) in self.cells]\n nextstep = neigh[randint(0, len(neigh) - 1)]\n self.target = nextstep", "def move_target(self):\n old_loc = self.target\n new_locations = TARGET_MOVE_VECTORS + np.array(self.target)\n np.random.shuffle(new_locations)\n new_locations = list(map(tuple, new_locations))\n for new_loc in new_locations:\n if self.in_bounds(new_loc):\n self.target = new_loc\n self.last_move = (old_loc, new_loc)\n debug_print(f'[LANDSCAPE]: Target moved: {self.last_move}', 7)\n break", "def set_target(self, target, slew_only=False):\n target = katpoint.Target(target)\n target.body.compute(self.observer)\n return target", "def toggleSource(self, row, col):\n # if the source is not set, set it\n if self.getSource() == (None, None):\n self.setSource(row, col)\n # if the source is set\n else:\n # if the source is the current square, remove it\n if self.grid[row][col] == 2:\n self.removeSource()\n # if the source is not the current square, remove it, and make current square the source\n elif self.grid[row][col] == 0 or self.grid[row][col] == 1:\n self.removeSource()\n self.setSource(row, col)", "def select_target(self):\n # If there's a selected unit, outline it\n if self.sel_unit:\n pygame.gfxdraw.rectangle(\n self.screen,\n self.sel_unit.rect,\n SELECT_COLOR)\n \n # Mark potential targets\n for tile_pos in self._attackable_tiles:\n screen_pos = self.map.screen_coords(tile_pos)\n self.draw_reticle(screen_pos)", "def configureTargetXY(self, xTarget, yTarget, xTable, yTable, x1Table, y1Table):\r\n Li = (y1Table - yTable)\r\n\r\n x = abs(xTarget - xTable)\r\n y = abs(yTarget - yTable)\r\n\r\n ptx = x/Li\r\n pty = y/Li\r\n\r\n if ptx >= 2:\r\n ptx = 2\r\n\r\n if pty > 1:\r\n pty = 1\r\n \r\n\r\n self.game.target.targetPosX = self.game.SizeOfTable * ptx\r\n self.game.target.targetPosY = self.game.SizeOfTable * pty", "def turn_towards(self, target=None):\n\n # If no target, use opponent turtle's position\n if target == None:\n target = self.other_position\n\n # If given a specific heading, generate a coordinate to turn towards\n if type(target) == int or type(target) == float:\n target = (int(self.x + 10000*math.cos(math.radians(target))),\n int(self.y - 10000*math.sin(math.radians(target))))\n\n # Turn towards coordinates\n if type(target) == tuple:\n # Find relative heading to target\n turn = self.relative_heading_towards(target)\n turn = min(max(turn, -self.max_turn_speed), self.max_turn_speed)\n turn /= self.max_turn_speed\n\n # Turn in the needed direction\n self.left(turn)", "def mark_hit(self):\n self._board_object.update_tile(self._row, self._column, 'h')", "def setTextureTarget(self, target: 'int') -> \"void\":\n return _coin.SoGLDisplayList_setTextureTarget(self, target)", "def solve_row1_tile(self, target_col):\n assert self.row1_invariant(target_col), \"input error\"\n current_pos = self.current_position(1, target_col)\n # move to the position of target tile (0|X)\n solve_string = \"l\" * (target_col - current_pos[1]) + \"u\" * (1 - current_pos[0])\n if current_pos[0] == 0:\n solve_string += \"rdlur\" * (target_col - current_pos[1])\n else:\n solve_string += RIGHTWARD_UP * (target_col - current_pos[1] - 1)\n solve_string += \"ur\"\n\n #print solve_string\n self.update_puzzle(solve_string)\n assert self.row0_invariant(target_col), \"solver error\"\n return solve_string", "def adjust_move_target(self,idx,loc):\n self.crowd.adjustMoveTarget(idx,pyrecast.uintp_getitem(loc[0],0),loc[1])", "def drive_toward_target(self, target): # type: (Pose2D) -> None\n while True:\n if self.bumper_pressed is not None:\n raise CollisionException(self.bumper_pressed)\n dp, dtheta = self.get_deltas_to_target(target)\n if np.linalg.norm(dp) < self.position_threshold:\n return\n if np.linalg.norm(dp) < self.speed:\n return\n v = dp / np.linalg.norm(dp) * self.speed\n vtheta = dtheta if dtheta < self.angular_speed else np.sign(dtheta) * self.angular_speed\n self.publish_twist(v, vtheta)\n self.rate.sleep()", "def move_to(self, target):\n left = min(p.x for p in self)\n top = min(p.y for p in self)\n dx = target.x - left\n dy = target.y - top\n self.move(Point(dx,dy))", "def __clearTargets(self):\n log(\"MState __clearTargets\",4)\n for p in self.players:\n p.target = None\n self.mafia_target = None", "def move_target(self, target_position=None):\r\n\r\n current_x, current_y = self._get_target_location()\r\n\r\n # Move north with probability 0.25\r\n # Otherwise move in other directions with probabiliy 0.75\r\n if target_position is None:\r\n new_x, new_y = self.move_maze(1, position=(current_x, current_y), noise=0.75)\r\n else:\r\n new_x, new_y = target_position\r\n # Update rewards with new target location\r\n self.rewards[current_x, current_y] = 0\r\n self.rewards[new_x, new_y] = 1", "def draw(self, target):\n for offset in np.transpose(np.where(self.brush)):\n offset -= np.array(self.brush.shape) // 2\n coord = target + offset\n if coord[0] < self.board.shape[0] and coord[1] < self.board.shape[1]:\n self.board[coord[0], coord[1]] = 1", "def target(self,p,target_option):\n log(\"MState target\",3)\n if not self.time == \"Night\":\n log(\"{} couldn't target {}: Not Night\".format(p,target_option))\n return False\n\n # Check if the player is represented as an object or a string\n try:\n player = self.getPlayer(p)\n except Exception as e:\n log(\"Couldn't find target from {}: {}\".format(p,e))\n return False\n try:\n target_number = ord(target_option)-ord('A')\n if target_number == len(self.players):\n target = self.null\n elif target_number == None:\n target = None\n else:\n target = self.players[target_number]\n player.target = target\n except Exception as e:\n log(\"{} failed to target {}: {}\".format(player.id, target_option, e))\n return False\n\n if player.role == \"MILKY\" and player.target == player:\n self.mainComm.send(\"Ewwww please don't milk yourself in front of me\", player.id)\n player.target = None\n return True\n\t\t\t\n self.mainComm.send(\"It is done, targeted {}\".format(target_option),player.id)\n\n if type(target) == Player:\n target_id = target.id\n target_role = target.role\n else:\n target_id = \"_\"\n target_role = \"_\"\n\n self.record(' '.join([\"TARGET\",player.id,player.role,target_id,target_role]))\n # Check if Night is over\n self.__checkToDay()\n return True", "def replan_move_target(self,idx,loc):\n self.crowd.requestMoveTargetReplan(idx,pyrecast.uintp_getitem(loc[0],0),loc[1])", "def query_tile(self, coord):\n if self.target == coord:\n\n roll = random.uniform(0, 1)\n if roll > self.prob_map[coord]:\n targ_ter = self.terrain_names[int(self.t_id_map[self.target])]\n debug_print((f'[LANDSCAPE]: found the target in {targ_ter} '\n f'after {self.misses} searches in the tile'), 6)\n self.misses = 0\n return True\n self.misses += 1\n # Moves the target if not found and if enabled\n if self.is_target_moving:\n self.move_target()\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if Node = (row, col) is the target, else returns false.
def isTarget(self, node): return (node == self.target)
[ "def toBeLabeled(self, row, col):\n\n #must check to see if the square is black. If the square is black than\n #it cannot be labeled\n if self._grid[row][col].isBlack():\n return False\n\n #must check to see if the square is furthest left or on the top row,\n #which will always be white\n if row == 0 or col == 0:\n return True\n\n #must check to see if there are black squares above or below\n if self._grid[row - 1][col].isBlack() or\\\n self._grid[row][col - 1].isBlack():\n return True\n\n #If the second conditions are not met we return false\n return False", "def row1_invariant(self, target_col):\n if self.current_position(0, 0) != (1, target_col):\n return False\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.current_position(row, col) != (row, col):\n print 'lower rows not satisify the solved position'\n return False\n for col in range(target_col + 1, self.get_width()):\n if self.current_position(1, col) != (1, col) or self.current_position(0, col) != (0, col):\n print 'right tiles at the target row not satisfy the solved position'\n return False\n return True", "def check_on_target(self, bx, by):\n on_target = False\n for (ty, tx) in self.tgt_positions:\n # if the box is on a target, this is fine\n if (bx, by) == (tx, ty):\n on_target = True\n return on_target", "def row0_invariant(self, target_col):\n if self.current_position(0, 0) != (0, target_col):\n return False\n for row in range(2, self.get_height()):\n for col in range(self.get_width()):\n if self.current_position(row, col) != (row, col):\n print 'lower rows not satisify the solved position'\n return False\n for col in range(target_col, self.get_width()):\n if self.current_position(1, col) != (1, col):\n print 'right tiles at the row 1 not satisfy the solved position'\n return False\n for col in range(target_col+1, self.get_width()):\n if self.current_position(1, col) != (1, col):\n print 'right tiles at the row 0 not satisfy the solved position'\n return False\n return True", "def check_match(self, row, col, a_terrain):\n terrain = self.get_coord(row, col)\n if terrain is None:\n return False\n else:\n return terrain[0] == a_terrain", "def isMineAt(board, row, col):\n if board[row][col] == \"X\":\n return True\n else:\n return False", "def in_maze(self,node):\r\n return (0 <= node[0] < self.size) and (0 <= node[1] < self.size)", "def row1_invariant(self, target_col):\r\n # Generates solved puzzle\r\n solved_puzzle = [[col + self.get_width() * row\r\n for col in range(self.get_width())]\r\n for row in range(self.get_height())] \r\n # Check whether tile zero is at (1,j)\r\n zero_row, zero_col = self.current_position(0, 0)\r\n if (zero_row != 1) or (zero_col != target_col): \r\n return False\r\n # Check that all tiles in row 1 to the right of position (1, j) are positioned\r\n # at their solved location\r\n if (target_col < self.get_width()):\r\n for col in range(target_col + 1, self.get_width()):\r\n if solved_puzzle[1][col] != self.get_number(1, col):\r\n return False\r\n # Check that all tiles in rows > 1 are positioned at their solved location\r\n if (target_col != 1): \r\n for row in range(2, self.get_height()):\r\n for col in range(self.get_width()):\r\n if solved_puzzle[row][col] != self.get_number(row, col):\r\n return False \r\n return True", "def is_out(board: Board, row: int, col: int) -> bool:\n return row < 0 or row >= board.row or col < 0 or col >= board.col", "def check_visited_position(self):\n return (self.cur_i, self.cur_j) in self.visited_positions", "def checkCell(board, i, j):\n\n move_i = []\n move_j = []\n board_size = len(board)\n if i > 0:\n move_i.append(-1)\n move_j.append(0)\n if i < (board_size - 1):\n move_i.append(1)\n move_j.append(0)\n if j > 0:\n move_j.append(-1)\n move_i.append(0)\n if j < (board_size - 1):\n move_j.append(1)\n move_i.append(0)\n for k in range(len(move_i)):\n if board[i + move_i[k]][j + move_j[k]] == board[i][j]:\n return True\n return False", "def check_color_tile(self, inp_row, inp_col, inp_color):\n if self.list_tiles[inp_row][inp_col] is not None:\n tile = self.list_tiles[inp_row][inp_col]\n if tile.color == inp_color:\n return True\n return False", "def _exit_found(self, row: int, col: int):\n return row == self._exit.row and col == self._exit.col", "def has_move(self, x, y):\n origin = x, y\n return any(self.get_color(x, y) == EMPTY for x, y in self.edge_neighbours(origin))", "def on_board(s, (x,y)):\n\t\tif x < 0 or y < 0 or x > 7 or y > 7:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def check_if_next_tile_is_hit(self):\n board = self._board_object.get_board()\n if self._direction == 'down':\n if board[self._row + 1][self._column] == 'a' or board[self._row + 1][self._column] == 'h':\n return True\n if self._direction == 'up':\n if board[self._row - 1][self._column] == 'a' or board[self._row - 1][self._column] == 'h':\n return True\n if self._direction == 'right':\n if board[self._row][self._column + 1] == 'a' or board[self._row][self._column + 1] == 'h':\n return True\n if self._direction == 'left':\n if board[self._row][self._column - 1] == 'a' or board[self._row][self._column - 1] == 'h':\n return True\n return False", "def test_cell(puzzle, i, j, val):\n for x, y in all_mates(i, j):\n if puzzle[x][y] == val:\n return False\n return True", "def movable(self, target):\n if target in [self.up, self.right, self.down, self.left]:\n return True\n return False", "def adjacent_enemy(inp, rowI, colI, enemy):\n if any(\n x[0] == enemy\n for x in [\n inp[rowI + 1][colI],\n inp[rowI - 1][colI],\n inp[rowI][colI + 1],\n inp[rowI][colI - 1],\n ]\n ):\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a matrix of integers from a matrix of Nodes. Used after the backtracking algorithm is done to return a more common format of the sudoku board.
def node_to_arr(node_matrix): sol = [] for y in range(9): row = [] for x in range(9): row.append(node_matrix[y][x].value) sol.append(row) return sol
[ "def matrix_adjacency_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for i in nodes:\r\n row = []\r\n for j in nodes:\r\n if [i, j] in graph:\r\n row.append(1)\r\n else:\r\n row.append(0)\r\n matrix.append(row)\r\n\r\n return matrix", "def matrix_incidence_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for node in nodes:\r\n row = []\r\n for j in graph:\r\n if len(edge) > 1:\r\n if node == edge[0] and node == edge[1]:\r\n row.append(2)\r\n elif node == edge[0]:\r\n row.append(1)\r\n elif node == edge[1]:\r\n row.append(-1)\r\n else:\r\n row.append(0)\r\n else:\r\n row.append(0)\r\n\r\n matrix.append(row)\r\n\r\n return matrix", "def get_adjacency_matrix(self):\n \n #initialize an empty 2D list\n length = len(self.nodes)\n matrix = [x[:] for x in [[0]*length]*length]\n for edge in self.edges:\n fromIndex = self.nodes.index(edge.node_from)\n toIndex = self.nodes.index(edge.node_to)\n matrix[fromIndex][toIndex] = edge.value\n return matrix", "def board_to_matrix(board, rows, columns):\n return np.array(board).reshape(rows, columns)", "def constructNodeConnectivityMatrix(edges):\n\n # First get a list of nodes in graph\n nodes = []\n for edge in edges:\n for node in range(2):\n if edge[node] not in nodes:\n nodes.append(edge[node])\n\n\n # Initialise empty connectivity matrix\n connectivity_matrix = []\n for row in range(len(nodes)):\n connectivity_matrix.append([0] * len(nodes))\n\n # Iterate over each edge. Add edge to matrix\n for edge in edges:\n connectivity_matrix[edge[0]][edge[1]] = 1\n connectivity_matrix[edge[1]][edge[0]] = 1\n\n return connectivity_matrix", "def generateMatrix(self, n: int) -> List[List[int]]:\n i, j, curr = 0, 0, 1\n direction = (0, 0)\n visited = set()\n matrix = [[1] * n for _ in range(n)]\n\n while len(visited) < n ** 2:\n y, x = i + direction[0], j + direction[1]\n if 0 <= y < n and 0 <= x < n and (y, x) not in visited:\n matrix[y][x] = curr\n visited.add((y, x))\n i, j = y, x\n curr += 1\n else:\n if direction == (0, 1):\n direction = (1, 0)\n elif direction == (1, 0):\n direction = (0, -1)\n elif direction == (0, -1):\n direction = (-1, 0)\n else:\n direction = (0, 1)\n\n if direction == (0, 0):\n direction = (0, 1)\n return matrix", "def get_adjacency_matrix(self):\n l = len(self.nodes) + 1\n edgeArray = np.zeros( (l,l), dtype=np.int)\n #print edgeArray\n for edge in self.edges:\n edgeArray[edge.node_from.value][edge.node_to.value] = edge.value\n return edgeArray.tolist()", "def _get_matrix(self):\n for row in self.active_sheet.rows:\n row_container = []\n for cell in row:\n row_container.append(cell.value)\n self.matrix.append(tuple(row_container))", "def _create_nodes(self):\n nodes = []\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[i])):\n value = self.matrix[i][j]\n\n # Nothing to do\n if value == 0:\n continue\n\n node = None\n\n # convert all 1's to DancingNode\n if value == 1:\n node = DancingNode(value)\n\n # convert all column headers to ColumnNode\n if value != 1 and value != 0:\n node = ColumnNode(value)\n\n node.row_id = i\n node.column_id = j\n nodes.append(node)\n self.matrix[i][j] = node\n\n return nodes", "def adjlist2adjmat(a):\n n = array(sum(a + [[]])).max() + 1\n m = zeros([n, n], int)\n for i, r in enumerate(a):\n for l in r:\n m[i, l] = m[i, l] + 1\n return m", "def matrix_to_relations(matrix):\n relations = []\n for row_id, row in enumerate(matrix):\n for col_id, column in enumerate(row):\n if column == 1:\n relations.append((row_id, col_id))\n return relations", "def _initialize(self, matrix: List[List[int]], column_names: Optional[Iterable[AnyStr]] = None) -> None:\n if not matrix:\n return\n\n if column_names is None:\n num_columns = len(matrix[0])\n if num_columns <= 26:\n column_names = (chr(ord('A') + i) for i in range(num_columns))\n else:\n column_names = (str(i + 1) for i in range(num_columns))\n\n # create the column list headers\n prev_column = self.root\n for column_name in column_names:\n column = Column(name=column_name, left=prev_column)\n prev_column.right = column\n prev_column = column\n prev_column.right = self.root\n self.root.left = prev_column\n\n # create the nodes\n prev_row_nodes = {column: column for column in self.traverse_right(self.root)}\n for i, row in enumerate(matrix):\n node = None\n prev_col_node = None\n for column, value in zip(self.traverse_right(self.root), row):\n if value == 1:\n node = Node(column)\n prev_row_node = prev_row_nodes[column]\n node.up = prev_row_node\n prev_row_node.down = node\n prev_row_nodes[column] = node\n if prev_col_node is not None:\n node.left = prev_col_node\n prev_col_node.right = node\n prev_col_node = node\n if node is not None:\n if node.left is None:\n first = node\n else:\n first = node.left\n while first.left is not None:\n first = first.left\n node.right = first\n first.left = node\n\n for column, node in prev_row_nodes.items():\n node.down = column\n column.up = node", "def cria_matriz(numero_linhas, numero_colunas):\n\n matriz = [] # lista vazia\n for i in range(numero_linhas):\n #cria a linha i\n linha = [] #lista vazia\n for j in range(numero_colunas):\n valor = int(input(\"Digite o elemento [\" + str(i) + \"][\" + str(j) + \"]: \"))\n linha.append(valor)\n\n #adicona linha à matrix\n matriz.append(linha)\n \n return matriz", "def ceros_forma1(m, n):\n matriz = list()\n for r in range(m):\n renglon = list()\n for i in range(n):\n renglon.append(0)\n matriz.append(renglon)\n return matriz", "def matrix_to_board(matrix):\n return matrix.reshape(1, -1).tolist()[0]", "def make_number(self, matrix):\n for key, val in matrix.items():\n number = val[0, 0]\n break\n return number", "def identity_matrix(n):\n m = np.zeros((n,n), dtype=np.int32)\n for i in range(n): m[i,i] = 1\n return m", "def map_to_graph(matrix):\n graph = {element: unit_neighbors(element, matrix) for element in unit_elements(matrix)}\n\n return graph", "def generate_cnk_matrix(self):\r\n total = self.rator_number\r\n cnk_matrix = np.zeros((total - 1, total))\r\n\r\n for column in range(total):\r\n cnk_matrix[:, column] = np.concatenate((np.where(self.combination_list[:, 0] == column)[0],\r\n np.where(self.combination_list[:, 1] == column)[0]))\r\n\r\n return cnk_matrix.astype(int)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the Nei & Gojobori (1986) section of the resuls. Nei_Gojobori results are organized in a lower triangular mattrix, with the sequence names labeling
def parse_ng86(lines, results): sequences = [] for line in lines: # Find all floating point numbers in this line line_floats_res = re.findall("-*\d+\.\d+", line) line_floats = [float(val) for val in line_floats_res] matrix_row_res = re.match("(.+)\s{5,15}",line) if matrix_row_res is not None: seq_name = matrix_row_res.group(1).strip() sequences.append(seq_name) results[seq_name] = {} for i in range(0, len(line_floats), 3): NG86 = {} NG86["omega"] = line_floats[i] NG86["dN"] = line_floats[i+1] NG86["dS"] = line_floats[i+2] results[seq_name][sequences[i//3]] = {"NG86":NG86} results[sequences[i//3]][seq_name] = {"NG86":NG86} return (results, sequences)
[ "def parse_others(lines, results, sequences):\n # Example:\n # 2 (Pan_troglo) vs. 1 (Homo_sapie)\n\n # L(i): 143.0 51.0 28.0 sum= 222.0\n # Ns(i): 0.0000 1.0000 0.0000 sum= 1.0000\n # Nv(i): 0.0000 0.0000 0.0000 sum= 0.0000\n # A(i): 0.0000 0.0200 0.0000\n # B(i): -0.0000 -0.0000 -0.0000\n # LWL85: dS = 0.0227 dN = 0.0000 w = 0.0000 S = 45.0 N = 177.0\n # LWL85m: dS = -nan dN = -nan w = -nan S = -nan N = -nan (rho = -nan)\n # LPB93: dS = 0.0129 dN = 0.0000 w = 0.0000\n seq_name1 = None\n seq_name2 = None\n for line in lines:\n comp_res = re.match(\"\\d+ \\((.+)\\) vs. \\d+ \\((.+)\\)\", line)\n if comp_res is not None:\n seq_name1 = comp_res.group(1)\n seq_name2 = comp_res.group(2)\n elif seq_name1 is not None and seq_name2 is not None:\n if \"dS =\" in line:\n stats = {}\n line_stats = line.split(\":\")[1].strip()\n stats_split = line_stats.split()\n for i in range(0, len(stats_split), 3):\n stat = stats_split[i].strip(\"()\")\n if stat == \"w\":\n stat = \"omega\"\n value = stats_split[i+2].strip(\"()\")\n try:\n stats[stat] = float(value)\n except:\n stats[stat] = None\n if \"LWL85:\" in line:\n results[seq_name1][seq_name2][\"LWL85\"] = stats\n results[seq_name2][seq_name1][\"LWL85\"] = stats\n elif \"LWL85m\" in line:\n results[seq_name1][seq_name2][\"LWL85m\"] = stats\n results[seq_name2][seq_name1][\"LWL85m\"] = stats\n elif \"LPB93\" in line:\n results[seq_name1][seq_name2][\"LPB93\"] = stats\n results[seq_name2][seq_name1][\"LPB93\"] = stats\n return results", "def double_helix_parser(input_file, output_file, helicies_length = 6, helix_gap = 3, pro_eitherside = 3):\n res_no_l = [] # for residue names \n res_name_l = [] # for amino acid names\n sec_str_l = [] # for sec structure prediction\n\n two_helix_l = [] # contains a list aminoacids (also a list)\n\n # Extracts the residue no, amino acid and secstr and signs to variables\n rx_seq = re.compile(r\"^(\\w+?)\\s+?(\\w+?)\\s+?(\\S)\", re.MULTILINE)\n text = fileread(input_file)\n\n\n # assign the matched groups in the text to the res_no_l, res_name_l and sec_str_str\n for match in rx_seq.finditer(text):\n res_no, res_name, sec_str = match.groups()\n\n res_no_l.append(res_no)\n res_name_l.append(res_name)\n sec_str_l += sec_str\n\n\n # creates dictionaries for each with the chain as the key\n chains_sec_str_d = keychain_value_str(res_no_l, sec_str_l)\n chains_res_no_d = keychain_value_list(res_no_l, res_no_l)\n chains_res_name_d = keychain_value_list(res_no_l, res_name_l)\n\n\n\n # which a Pro is found a in the res_name_d[chain] its secstr in sec_str_d is replaced with a P\n # We will then search for this P later on \n\n counter = 0 \n for chain in chains_res_name_d:\n #print(chains_res_name_d[chain])\n counter = 0 \n for residue in chains_res_name_d[chain]:\n #print(chains_res_name_d[chain][counter])\n if residue == 'PRO':\n chains_sec_str_d[chain] = chains_sec_str_d[chain][:counter] + 'P' + chains_sec_str_d[chain][counter + 1:]\n #print(chains_res_no_d[chain][counter])\n counter += 1 \n\n # only adds if a proline is found in the gap\n # contains 2 groups, the 1st group being the whole helix and group 2 being the gap\n for x in chains_sec_str_d:\n \n regex = \"([h|H]{6,}(?:.?){1}(P)(?:.?){1}[h|H]{6,})\"\n p = re.compile(r\"\" +regex +\"\")\n\n # if one is found it prints out the residues numbers of that helix\n for match in p.finditer(chains_sec_str_d[x]):\n # adjusted to check for Proline around the gap 1 before and 1 after\n two_helix_l += [chains_res_no_d[x][ (match.start(1)) : (match.end(1)) ]]\n match_groups =(match.groups())\n\n # finds the location of the proline for mutation using mutmod\n pro_res = (x + str(match.start(2)))\n print(pro_res + \" :\" + match.group(2))\n\n\n tempstr = \"\"\n\n for protein in two_helix_l:\n for residue in protein:\n tempstr += (residue + \"\\n\")\n tempstr +=(\"\\n\")\n\n\n output = open(output_file, 'w')\n output.write(tempstr)\n output.close()\n #print('#####################')\n #print(tempstr)\n #print('#####################')", "def parse_rsa_data(rsa_outfile, ignore_hets=True):\n\n naccess_rel_dict = OrderedDict()\n\n with open(rsa_outfile, 'r') as f:\n for line in f:\n if line.startswith('RES'):\n res_name = line[4:7]\n chain_id = line[8]\n resseq = int(line[9:13])\n icode = line[13]\n res_id = (' ', resseq, icode)\n all_atoms_abs = line[16:22].strip()\n all_atoms_rel = line[23:28].strip()\n side_chain_abs = line[29:35].strip()\n side_chain_rel = line[36:41].strip()\n main_chain_abs = line[42:48].strip()\n main_chain_rel = line[49:54].strip()\n non_polar_abs = line[55:61].strip()\n non_polar_rel = line[62:67].strip()\n all_polar_abs = line[68:74].strip()\n all_polar_rel = line[75:80].strip()\n\n if all_atoms_rel =='N/A' and main_chain_rel =='N/A' and all_polar_rel =='N/A' and non_polar_rel =='N/A' and side_chain_rel =='N/A' and ignore_hets:\n continue\n\n naccess_rel_dict[(chain_id, res_id)] = {\n 'res_name' : res_name,\n 'all_atoms_abs' : ssbio.utils.conv_to_float(all_atoms_abs, inf_str='N/A'),\n 'all_atoms_rel' : ssbio.utils.conv_to_float(all_atoms_rel, inf_str='N/A'),\n 'side_chain_abs': ssbio.utils.conv_to_float(side_chain_abs, inf_str='N/A'),\n 'side_chain_rel': ssbio.utils.conv_to_float(side_chain_rel, inf_str='N/A'),\n 'main_chain_abs': ssbio.utils.conv_to_float(main_chain_abs, inf_str='N/A'),\n 'main_chain_rel': ssbio.utils.conv_to_float(main_chain_rel, inf_str='N/A'),\n 'non_polar_abs' : ssbio.utils.conv_to_float(non_polar_abs, inf_str='N/A'),\n 'non_polar_rel' : ssbio.utils.conv_to_float(non_polar_rel, inf_str='N/A'),\n 'all_polar_abs' : ssbio.utils.conv_to_float(all_polar_abs, inf_str='N/A'),\n 'all_polar_rel' : ssbio.utils.conv_to_float(all_polar_rel, inf_str='N/A')}\n\n return naccess_rel_dict", "def CMfinderParser(lines):\n for info, alignment, struct in RfamParser(lines,strict=False):\n struct = wuss_to_vienna(struct)\n pairs = struct.toPairs()\n return [alignment, pairs]", "def obtain_rel_dicts(result,numbers,chain_name,current_class,seq_pos,seq_pos_n,gpcr_pdb,gpcr_aa,gnum_classes_rel,multiple_chains,simplified=False,add_aa=False,seq_pdb=False,all_struc_num=False):\n chain_nm_seq_pos=\"\"\n rs_by_seg={1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: [], 10: [], 11: [], 12: [], 13: [], 14: [], 15: [], 16: [], 17: []}\n if multiple_chains:\n chain_nm_seq_pos=\":\"+chain_name\n pos_gnum = numbers[current_class]\n for pos in result:\n if pos[0] != \"-\": #Consider only num in the pdb\n db_pos=pos[1][1]\n if db_pos:\n gnum_or_nth=\"\"\n this_gnum = pos_gnum[db_pos][1]\n if this_gnum: #If exist GPCR num for this position\n if simplified:\n (chain_num,bw,gpcrdb)=re.split('\\.|x', this_gnum)\n this_gnum=chain_num+\"x\"+gpcrdb\n if add_aa:\n this_pdb=str(pos[0][1])+\"-\"+chain_name+\"-\"+pos_gnum[db_pos][0]\n else:\n this_pdb=str(pos[0][1])+\"-\"+chain_name\n else:\n if add_aa:\n this_pdb=[pos[0][1],chain_name,pos_gnum[db_pos][0]]\n else:\n this_pdb=[pos[0][1],chain_name]\n gpcr_pdb[this_gnum]=this_pdb\n gpcr_aa[this_gnum]=[pos_gnum[db_pos][0], chain_name]\n gnum_or_nth=this_gnum\n rs_by_seg[pos_gnum[db_pos][2]].append(pos[0][1]+chain_nm_seq_pos) #Chain!!\n if type(seq_pdb)==dict:\n seq_pdb[db_pos]={\"pdb\":[pos[0][1],chain_name],\"gnum\":gnum_or_nth}\n seq_pos[seq_pos_n][2]=gnum_or_nth\n seq_pos_n+=1\n #######\n seg_li=[]\n for seg in range(2,17):\n slen=len(rs_by_seg[seg])\n if slen==0:\n seg_li.append([])\n elif slen==1:\n seg_li.append([rs_by_seg[seg][0]])\n else:\n seg_li.append([rs_by_seg[seg][0],rs_by_seg[seg][-1]])\n #######\n other_classes=list({\"A\",\"B\",\"C\",\"F\"} - set(current_class))\n other_classes_ok=[]\n for name in other_classes:\n if numbers[name]:\n other_classes_ok.append(name)\n gnum_classes_rel[name]={}\n for pos, (res,gnum,segm) in pos_gnum.items():\n if gnum:\n for class_name in other_classes_ok:\n gnum_altclass=numbers[class_name][pos][1]\n if gnum_altclass:\n if all_struc_num:\n (chain_num,bw,gpcrdb)=re.split('\\.|x',gnum)\n my_num=chain_num+\"x\"+gpcrdb\n (achain_num,abw,agpcrdb)=re.split('\\.|x',gnum_altclass)\n alt_gnum=achain_num+\"x\"+agpcrdb\n else:\n my_num=gnum.split(\"x\")[0]\n alt_gnum=gnum_altclass.split(\"x\")[0]\n gnum_classes_rel[class_name][alt_gnum]=my_num\n if type(seq_pdb)==dict:\n return(gpcr_pdb,gpcr_aa,gnum_classes_rel,other_classes_ok,seq_pos,seq_pos_n,seg_li,seq_pdb)\n else:\n return(gpcr_pdb,gpcr_aa,gnum_classes_rel,other_classes_ok,seq_pos,seq_pos_n,seg_li)", "def _parse_residue(self, residue):\n \n # Filter Element Nodes\n childs = [ child for child in residue.childNodes if child.nodeType == child.ELEMENT_NODE ]\n \n # Parse info out\n resi = int(childs[0].firstChild.data.strip())\n resn = childs[1].firstChild.data.strip()\n icode = childs[3].firstChild.data\n chain = childs[4].firstChild.data.strip()\n model = int(childs[5].firstChild.data.strip())\n atoms = childs[6:]\n \n # Output\n \n return {'name': resn, 'number': resi,\n 'icode': icode, 'chain': chain, \n 'model': model, 'atoms': atoms}", "def _parse_molecule(lines, file_extension):\n if file_extension == '.pdb':\n #Extract residue information and assign column\n i = 0\n column_for_res = {}\n res_for_column = {}\n name_for_res = {}\n atoms_in_res = {}\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n if atom_name in ['CA', 'CB', 'C', 'N', 'O']:\n altloc = line[16]\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n resname = line[17:20]\n if resname in list(CONVERT_RES_NAMES):\n resname = CONVERT_RES_NAMES[resname]\n if res not in list(column_for_res):\n column_for_res[res] = i\n res_for_column[i] = res\n name_for_res[res] = resname\n atoms_in_res[res] = set()\n i += 1\n atoms_in_res[res].add(atom_name)\n\n #Extract coordinates and atoms information\n alphas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n betas = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n carbons = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n nitrogens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n oxygens = [[0.0, 0.0, 0.0] for i in range(0, len(list(column_for_res)))]\n side_chains = []\n coords_array = [] #For calculate grid size\n\n for line in lines:\n record_type = line[0:6]\n if record_type == \"ATOM \":\n atom_fullname = line[12:16]\n # get rid of whitespace in atom names\n split_list = atom_fullname.split()\n if len(split_list) != 1:\n # atom name has internal spaces, e.g. \" N B \", so\n # we do not strip spaces\n atom_name = atom_fullname\n else:\n # atom name is like \" CA \", so we can strip spaces\n atom_name = split_list[0]\n\n chainid = line[21]\n resid = line[22:26].split()[0]\n res = str(resid) + \":\" + str(chainid)\n\n # atomic coordinates\n try:\n x = float(line[30:38])\n y = float(line[38:46])\n z = float(line[46:54])\n except Exception:\n raise Exception(\"Invalid or missing coordinate(s) at \\\n residue %s, atom %s\" % (res, name))\n coord = [x, y, z]\n\n if atom_name == \"CA\":\n # Coordinates for the grid\n coords_array.append(coord)\n # Coordinates for searching sites\n alphas[column_for_res[res]] = coord\n elif atom_name == \"CB\":\n # Coordinates for searching sites\n betas[column_for_res[res]] = coord\n elif atom_name == \"C\":\n # Coordinates for searching sites\n carbons[column_for_res[res]] = coord\n elif atom_name == \"N\":\n # Coordinates for searching sites\n nitrogens[column_for_res[res]] = coord\n elif atom_name == \"O\":\n # Coordinates for searching sites\n oxygens[column_for_res[res]] = coord\n else: # Atom belongs to a side-chain\n # Coordinates for discarding clashes\n side_chains.append(coord)\n\n coords_array = np.array(coords_array)\n centroid = np.mean(coords_array, axis=0)\n max_distance = np.max(np.linalg.norm(coords_array - centroid, axis=1)) \\\n + DIST_PROBE_ALPHA['ALL'][1]\n\n alphas = np.array(alphas)\n betas = np.array(betas)\n carbons = np.array(carbons)\n nitrogens = np.array(nitrogens)\n oxygens = np.array(oxygens)\n side_chains = np.array(side_chains)\n return centroid, max_distance, alphas, betas, carbons, nitrogens, \\\n oxygens, column_for_res, res_for_column, name_for_res, \\\n atoms_in_res, side_chains", "def getMRnaCodon(i, sequence):\n return self.getMRna(sequence)[i * 3:i * 3 + 3]", "def enumerate_primers(target_file_name,\n fwd_starts,\n rev_starts,\n hairpin_tm_max = 30, \n primer_conc = 200, \n homo_tm_max = 30,\n kmer_sizes = [18, 19, 20, 21, 23, 24, 25, 26, 27, 28],\n tm_min = 55,\n tm_max = 60,\n min_occurrence = 10,\n no_3_T = True,\n no_poly_3_GC = True,\n no_poly_run = True,\n max_degen = 60,\n mv_conc=50, \n dv_conc=1.5,\n look_ahead = 50):\n records = read_fasta(target_file_name)\n seqs_all = [str(record.seq) for record in records]\n # Enumerate oligos\n fwd_unique = enumerate_oligos(starts = fwd_starts,\n kmer_sizes = kmer_sizes, \n seqs_all = seqs_all,\n look_ahead = look_ahead)\n rev_unique = enumerate_oligos(starts = rev_starts,\n kmer_sizes = kmer_sizes, \n seqs_all = seqs_all,\n look_ahead = look_ahead)\n # Filter oligos\n fwd_oligos = filter_oligos(fwd_unique, \n rev = False, \n hairpin_tm_max = hairpin_tm_max, \n homo_tm_max = homo_tm_max, \n tm_max = tm_max, \n tm_min = tm_min,\n min_occurrence = min_occurrence, \n primer_conc = primer_conc,\n no_3_T = no_3_T, \n no_poly_3_GC = no_poly_3_GC,\n no_poly_run = no_poly_run,\n max_degen = max_degen,\n mv_conc = mv_conc, \n dv_conc = dv_conc)\n rev_oligos = filter_oligos(rev_unique, \n rev = True, \n hairpin_tm_max = hairpin_tm_max, \n homo_tm_max = homo_tm_max, \n tm_max = tm_max, \n tm_min = tm_min,\n min_occurrence = min_occurrence, \n primer_conc = primer_conc,\n no_3_T = no_3_T, \n no_poly_3_GC = no_poly_3_GC, \n no_poly_run = no_poly_run,\n max_degen = max_degen,\n mv_conc = mv_conc, \n dv_conc = dv_conc)\n # Remove redundancy\n fwd_unique = pandas.Series({oligo:fwd_unique[oligo] for oligo in fwd_oligos})\n fwd_reduced = discard_redundant(fwd_unique,\n max_degen = max_degen, \n rev = False, \n primer_conc = primer_conc, \n verbose = False,\n min_diff = 0,\n mv_conc = mv_conc,\n dv_conc = dv_conc)\n fwd_unique = fwd_reduced[\"oligo_series\"]\n fwd_cover = fwd_unique.map(len).sort(inplace = False, ascending = False)\n rev_unique = pandas.Series({oligo:rev_unique[oligo] for oligo in rev_oligos})\n rev_reduced = discard_redundant(rev_unique,\n max_degen = max_degen, \n rev = True, \n primer_conc = primer_conc, \n verbose = False,\n min_diff = 0,\n mv_conc = mv_conc,\n dv_conc = dv_conc)\n redundancy = {\"rev\":rev_reduced[\"redundant\"], \"fwd\":fwd_reduced[\"redundant\"]}\n # Calculate coverage\n rev_unique = rev_reduced[\"oligo_series\"]\n rev_cover = rev_unique.map(len).sort(inplace = False, ascending = False)\n # Make all possible combinations of primers as pairs\n pairs = enumerate_pairs(fwd_unique, rev_unique)\n if not pairs:\n return\n all_pairs = set()\n for pair in pairs.values():\n all_pairs = all_pairs.union(pair)\n all_fwd = set()\n for fwd in fwd_unique.values:\n all_fwd = all_fwd.union(fwd)\n all_rev = set()\n for rev in rev_unique.values:\n all_rev = all_rev.union(rev)\n seqs_detected = all_fwd.intersection(all_rev)\n # Report results\n print \"Coverage of all fwd: %d \" % len(all_fwd)\n print \"Coverage of all rev: %d \" % len(all_rev)\n print \"Joint coverage of fwd and rev: %d\" % len(all_fwd.intersection(all_rev))\n print \"Max possible coverage: %d\" % len(all_pairs)\n print \"Number of Foward Oligos: %d\" % len(fwd_unique)\n print \"Number of Reverse Oligos: %d\" % len(rev_unique)\n return (fwd_unique, rev_unique, redundancy, pairs, seqs_detected)", "def generate_read_list(reads, MUML): \n name_list = []\n read_list = []\n tempstr = \"\"\n for i in range(len(reads)):\n if \"#SequenceCount\" in reads[i]:\n sc = int(reads[i].split()[1])\n if reads[i][0] == \"#\" or reads[i][0] == \"=\":\n continue\n if reads[i][0] == \">\":\n name_list.append(reads[i][:-1])\n # print(reads[i])\n elif i == len(reads) - 2:\n tempstr = tempstr+reads[i][:-1]\n read_list.append([i for i in re.split(\"A|C|T|G|-\",tempstr) if i] )\n tempstr = \"\"\n elif reads[i+1][0] == \">\":\n tempstr = tempstr+reads[i][:-1]\n read_list.append([i for i in re.split(\"A|C|T|G|-\",tempstr) if i] )\n tempstr = \"\"\n elif reads[i+1][0] == \"=\":\n tempstr = tempstr+reads[i][:-1]\n read_list.append([i for i in re.split(\"A|C|T|G|-\",tempstr) if i] )\n tempstr = \"\"\n else:\n tempstr = tempstr+reads[i][:-1]\n final_reads = []\n for i in range(len(read_list)):\n if i % sc == 0:\n final_reads+=read_list[i]\n clusters = {}\n for i in range(len(final_reads)):\n if len(final_reads[i]) > MUML:\n clusters.update({\"cluster\"+str(i):final_reads[i]})\n return clusters", "def to_mrna(seq):\n start_codon = \"AUG\"\n stop = [\"UAG\", \"UGA\", \"UAA\"]\n start_positions = []\n final_mrnas = []\n i = 0\n while i < len(seq) - 2:\n if seq[i:i+3] == start_codon: # At start codon\n start_positions.append(i)\n i += 3\n\n for pos in start_positions:\n mrna = \"\"\n i = pos\n is_orf = True\n while i < (len(seq)-2) and is_orf:\n if seq[i:i+3] in stop: # Stop codon reached\n is_orf = False\n final_mrnas.append(mrna)\n else:\n mrna += seq[i:i+3]\n i += 3\n\n return final_mrnas", "def renumera_atomos(res,total_provisional):\n\t\n\tupdated_total = total_provisional + 1\n\tres_renum = ''\n\t\n\tfor atomo in res.split(\"\\n\"):\n\t\tif(atomo == ''): continue\n\t\tres_renum += \"ATOM %4d %s\\n\" % (updated_total,atomo[12:])\n\t\tupdated_total += 1\n\treturn (res_renum,updated_total)", "def parse_nk(content):\n ret = defaultdict()\n for line in content.split('\\n')[:-1]:\n values = line.split(' ')\n try:\n ret[str(values[0])][values[1]] = long(values[2])\n except KeyError:\n ret[values[0]] = {}\n ret[values[0]][values[1]] = long(values[2])\n except IndexError:\n pass\n return ret", "def parse_cmm_data(file_path):\n\toutput = {'specimen_code':'',\n\t\t\t 'datum_flatness':0,\n\t\t\t 'col_1_dist':0,\n\t\t\t 'col_1_flatness':0,\n\t\t\t 'col_1_parallelism':0,\n\t\t\t 'col_2_dist':0,\n\t\t\t 'col_2_flatness':0,\n\t\t\t 'col_2_parallelism':0,\n\t\t\t 'col_3_dist':0,\n\t\t\t 'col_3_flatness':0,\n\t\t\t 'col_3_parallelism':0,\n\t\t\t 'col_4_dist':0,\n\t\t\t 'col_4_flatness':0,\n\t\t\t 'col_4_parallelism':0}\n\n\tif not type(file_path) == str:\n\t\traise TypeError('Expecting argument of type string.')\n\n\twith open(file_path) as fs:\n\t\toutput['specimen_code'] = re.search('(\\d-\\d{6}-\\d).txt',fs.name).group(1)\n\t\tfile_contents = fs.read()\n\t \n\tpat_1 = r'==> Plane \\(1\\)\\n\\.\\.: Flatness\\nFlatness\\s+(\\d\\.\\d+)'\n\tpat_3 = r'==> Plane \\(3\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_4 = r'==> Plane \\(4\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_5 = r'==> Plane \\(5\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\tpat_6 = r'==> Plane \\(6\\)\\n\\.\\.: Plane\\nZ\\s+(\\d\\.\\d+)\\nFlatness\\s+(\\d\\.\\d+)\\n\\n\\.\\.: Parallelism\\n.*\\nParallelism\\s+(\\d\\.\\d+)'\n\n\tre_1 = re.compile(pat_1)\n\tre_3 = re.compile(pat_3)\n\tre_4 = re.compile(pat_4)\n\tre_5 = re.compile(pat_5)\n\tre_6 = re.compile(pat_6)\n\t\n\tmatch_obj = re.search(re_1,file_contents)\n\toutput['datum_flatness'] = match_obj.group(1)\n\n\tmatch_obj = re.search(re_3,file_contents)\n\toutput['col_1_dist'] = match_obj.group(1)\n\toutput['col_1_flatness'] = match_obj.group(2)\n\toutput['col_1_parallelism'] = match_obj.group(3)\n\n\tmatch_obj = re.search(re_4,file_contents)\n\toutput['col_2_dist'] = match_obj.group(1)\n\toutput['col_2_flatness'] = match_obj.group(2)\n\toutput['col_2_parallelism'] =match_obj.group(3)\n\n\tmatch_obj = re.search(re_5,file_contents)\n\toutput['col_3_dist'] = match_obj.group(1)\n\toutput['col_3_flatness'] = match_obj.group(2)\n\toutput['col_3_parallelism'] = match_obj.group(3)\n\n\tmatch_obj = re.search(re_6,file_contents)\n\toutput['col_4_dist'] = match_obj.group(1)\n\toutput['col_4_flatness'] = match_obj.group(2)\n\toutput['col_4_parallelism'] = match_obj.group(3)\n\n\treturn output", "def findResIndexes(self):\n self.reactant['solvated']={}\n self.reactant['solvated']['pdb']=self.simdir+'/reactant/solvated/'+self.id+'.premin.pdb'\n nres=162 #protein residues + DHP + NPD + energy-sink\n pdbf=self.reactant['solvated']['pdb']\n ptin=open(pdbf,'r'); l=ptin.readline()\n resindexes=[]; currires=None; catalytic=False\n while l:\n if l[0:5]=='ATOM ':\n iat=int(l[6:11])-1; ires=l[22:26]; resname=l[17:20]\n if not currires:\n currires=ires #initialize residue index\n group=[] #initialize list of atom residues\n if ires!=currires:\n resindexes.append(group)\n currires=ires\n group=[]\n if resname=='Na+' and not catalytic:\n \"\"\"introduce the catalytic site residue\"\"\"\n for index in self.hot_spot['indexes']:\n group.append(index)\n resindexes.append(group); catalytic=True; group=[];\n if iat not in self.hot_spot['indexes']: group.append(iat)\n l=ptin.readline()\n resindexes.append(group) #enter last group\n self.resinfo={'indexes':resindexes,'nres':nres}\n pdb.set_trace()\n return True", "def motifs(m):\n with open(m, 'r') as mot:\n\n motif = []\n motife = []\n for line in mot:\n line = line.strip()\n motif.append(line)\n line = line.lower()\n motife.append('(?=('+(''.join(iupac.get(ch, ch) for ch in line))+'))')\n # creates a regex for variable IUPAC codes and adds a lookahead to find consecutive motifs\n return motif, motife", "def parse_atlas_residue(line):\n tokens = line.split()\n morph_id = tokens[0]\n residue_idx = int(tokens[1])\n residue_type = tokens[2]\n is_hinge = (tokens[3].strip() == '1')\n\n return AtlasResidueInfo(morph_id, residue_idx, residue_type, is_hinge)", "def read_intron_pos3(file_intron_exon):\n \n all_intron_poss = []\n mRNA2exons = collections.defaultdict(list) \n for l in open(file_intron_exon):\n #AT1G01010.1_exon1\n #AT1G01010.1_intron1\n d = l.rstrip(\"\\n\").split(\"\\t\")\n exon_or_intron_name = d[3]\n chr_name = d[0]\n start, end = int(d[1])+1, int(d[2])\n strand = d[5]\n mRNA, exon_or_intron_id = exon_or_intron_name.split(\"_\")\n if exon_or_intron_id.startswith(\"i\"):\n all_intron_poss.append((exon_or_intron_name, mRNA, start, end))\n else:\n mRNA2exons[mRNA].append((chr_name, start, end, strand))\n mRNA_pos = {}\n for mRNA, mRNA_exons in mRNA2exons.items():\n total_exon_num = len(mRNA_exons)\n chr_name = mRNA_exons[0][0]\n strand = mRNA_exons[0][3]\n mRNA_start = min([i[1] for i in mRNA_exons])\n mRNA_end = max([i[2] for i in mRNA_exons])\n mRNA_pos[mRNA] = [chr_name, mRNA_start, mRNA_end, strand]\n intron_rel_poss = collections.defaultdict(list)\n for intron_id, mRNA, intron_start, intron_end in all_intron_poss:\n chr_name, mRNA_start, mRNA_end, strand = mRNA_pos[mRNA]\n if strand == \"+\":\n pos3 = int(intron_end) - mRNA_end\n else:\n pos3 = mRNA_start - int(intron_start)\n intron_rel_poss[mRNA].append([pos3, intron_id])\n for mRNA, d in intron_rel_poss.items():\n d.sort(key=lambda x: x[0])\n return intron_rel_poss", "def rej_infile(self):\n\n rejfilename = self.filebase + \"-rej.txt\"\n\n rejfile = open(rejfilename, 'w')\n rejfile.write(\"/--Data\\n\")\n rejfile.write(\"Vnaught 0\\n\\n\")\n rejfile.write(\"Loci\\tSNP\\n\")\n rejfile.write(\"Ancestral\\t-1\\n\")\n rejfile.write(\"RecombRt\\t0\\n\")\n rejfile.write(\"NumLoci\\t\")\n rejfile.write(str(len(self.sequence[0].seq)))\n rejfile.write(\"\\n\")\n rejfile.write(\"Length\\t1\\n\")\n rejfile.write(\"\\n\")\n rejfile.write(\"\\n\")\n\n rejfile.write(\"Tag\\t\")\n rejfile.write(\"Population\\n\")\n\n outseq = {}\n for seq in self.sequence:\n outseq[seq.id] = str(seq.seq)\n for x in sorted(outseq.keys()):\n rejfile.write(str(x))\n rejfile.write(\"\\t\")\n rejfile.write(\"X\")\n rejfile.write(\"\\t\")\n for y in list(outseq[x]):\n rejfile.write(y)\n rejfile.write(\"\\t\")\n rejfile.write(\"\\n\")\n\n rejfile.close()", "def parse_jgi_annotation(jgi_file):\n\n import re\n\n cog_number = {}\n cog_category = {}\n product_name = {}\n pfam_number = {}\n\n description_cogs = {}\n description_pfams = {}\n\n input_file = open(jgi_file, 'r')\n\n for line in input_file:\n line = line.rstrip('\\n')\n if not line.startswith(\"gene_oid\"):\n\n gene_oid, locus_tag, source, cluster_information, gene_information, evalue = line.split(\"\\t\")\n\n search_cog_number = re.match('(COG\\d+)', source)\n\n if source.startswith(\"COG_category\"):\n cog_category[gene_oid] = cluster_information\n\n if search_cog_number:\n cog_number[gene_oid] = source\n description_cogs[source] = cluster_information\n\n if source.startswith(\"pfam\"):\n pfam_number[gene_oid] = source\n\n description_pfams[source] = cluster_information\n\n if source.startswith(\"Product_name\"):\n product_name[gene_oid] = gene_information\n\n return cog_number, cog_category, product_name, pfam_number, description_cogs, description_pfams" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the Yang & Nielsen (2000) part of the results. Yang & Nielsen results are organized in a table with each row comprising one pairwise species comparison. Rows are labeled by spequence number rather than by sequence name.
def parse_yn00(lines, results, sequences): # Example (header row and first table row): # seq. seq. S N t kappa omega dN +- SE dS +- SE # 2 1 67.3 154.7 0.0136 3.6564 0.0000 -0.0000 +- 0.0000 0.0150 # +- 0.0151 for line in lines: # Find all floating point numbers in this line line_floats_res = re.findall("-*\d+\.\d+", line) line_floats = [float(val) for val in line_floats_res] row_res = re.match("\s+(\d+)\s+(\d+)", line) if row_res is not None: seq1 = int(row_res.group(1)) seq2 = int(row_res.group(2)) seq_name1 = sequences[seq1-1] seq_name2 = sequences[seq2-1] YN00 = {} YN00["S"] = line_floats[0] YN00["N"] = line_floats[1] YN00["t"] = line_floats[2] YN00["kappa"] = line_floats[3] YN00["omega"] = line_floats[4] YN00["dN"] = line_floats[5] YN00["dN SE"] = line_floats[6] YN00["dS"] = line_floats[7] YN00["dS SE"] = line_floats[8] results[seq_name1][seq_name2]["YN00"] = YN00 results[seq_name2][seq_name1]["YN00"] = YN00 seq_name1 = None seq_name2 = None return results
[ "def parse_others(lines, results, sequences):\n # Example:\n # 2 (Pan_troglo) vs. 1 (Homo_sapie)\n\n # L(i): 143.0 51.0 28.0 sum= 222.0\n # Ns(i): 0.0000 1.0000 0.0000 sum= 1.0000\n # Nv(i): 0.0000 0.0000 0.0000 sum= 0.0000\n # A(i): 0.0000 0.0200 0.0000\n # B(i): -0.0000 -0.0000 -0.0000\n # LWL85: dS = 0.0227 dN = 0.0000 w = 0.0000 S = 45.0 N = 177.0\n # LWL85m: dS = -nan dN = -nan w = -nan S = -nan N = -nan (rho = -nan)\n # LPB93: dS = 0.0129 dN = 0.0000 w = 0.0000\n seq_name1 = None\n seq_name2 = None\n for line in lines:\n comp_res = re.match(\"\\d+ \\((.+)\\) vs. \\d+ \\((.+)\\)\", line)\n if comp_res is not None:\n seq_name1 = comp_res.group(1)\n seq_name2 = comp_res.group(2)\n elif seq_name1 is not None and seq_name2 is not None:\n if \"dS =\" in line:\n stats = {}\n line_stats = line.split(\":\")[1].strip()\n stats_split = line_stats.split()\n for i in range(0, len(stats_split), 3):\n stat = stats_split[i].strip(\"()\")\n if stat == \"w\":\n stat = \"omega\"\n value = stats_split[i+2].strip(\"()\")\n try:\n stats[stat] = float(value)\n except:\n stats[stat] = None\n if \"LWL85:\" in line:\n results[seq_name1][seq_name2][\"LWL85\"] = stats\n results[seq_name2][seq_name1][\"LWL85\"] = stats\n elif \"LWL85m\" in line:\n results[seq_name1][seq_name2][\"LWL85m\"] = stats\n results[seq_name2][seq_name1][\"LWL85m\"] = stats\n elif \"LPB93\" in line:\n results[seq_name1][seq_name2][\"LPB93\"] = stats\n results[seq_name2][seq_name1][\"LPB93\"] = stats\n return results", "def obtain_parse_wiki_snp500():\n\n # stores the current time, for the created_at record\n now = datetime.now()\n\n # get the symbols from website\n url = 'http://en.wikipedia.org/wiki/List_of_S%26P_500_companies'\n hp = HTMLTableParser()\n stock_table = hp.parse_url(url)[0] # select the first table\n stock_table = stock_table.replace('\\n', '', regex=True) # delete '\\n'\n stock_table.columns = ['Symbol', 'Security', 'SEC filings', 'GICS Sector', 'GICS Sub Industry',\n 'Headquarters location', 'Date first added', 'CIK', 'Founded']\n df_symbol = stock_table[['Symbol', 'Security', 'GICS Sector']]\n df_symbol = df_symbol.rename(columns={'Symbol': 'ticker', 'Security': 'name', 'GICS Sector': 'sector'})\n df_symbol['instrument'] = 'stock'\n df_symbol['currency'] = 'USD'\n df_symbol['created_date'] = now\n df_symbol['last_updated_date'] = now\n return df_symbol", "def double_helix_parser(input_file, output_file, helicies_length = 6, helix_gap = 3, pro_eitherside = 3):\n res_no_l = [] # for residue names \n res_name_l = [] # for amino acid names\n sec_str_l = [] # for sec structure prediction\n\n two_helix_l = [] # contains a list aminoacids (also a list)\n\n # Extracts the residue no, amino acid and secstr and signs to variables\n rx_seq = re.compile(r\"^(\\w+?)\\s+?(\\w+?)\\s+?(\\S)\", re.MULTILINE)\n text = fileread(input_file)\n\n\n # assign the matched groups in the text to the res_no_l, res_name_l and sec_str_str\n for match in rx_seq.finditer(text):\n res_no, res_name, sec_str = match.groups()\n\n res_no_l.append(res_no)\n res_name_l.append(res_name)\n sec_str_l += sec_str\n\n\n # creates dictionaries for each with the chain as the key\n chains_sec_str_d = keychain_value_str(res_no_l, sec_str_l)\n chains_res_no_d = keychain_value_list(res_no_l, res_no_l)\n chains_res_name_d = keychain_value_list(res_no_l, res_name_l)\n\n\n\n # which a Pro is found a in the res_name_d[chain] its secstr in sec_str_d is replaced with a P\n # We will then search for this P later on \n\n counter = 0 \n for chain in chains_res_name_d:\n #print(chains_res_name_d[chain])\n counter = 0 \n for residue in chains_res_name_d[chain]:\n #print(chains_res_name_d[chain][counter])\n if residue == 'PRO':\n chains_sec_str_d[chain] = chains_sec_str_d[chain][:counter] + 'P' + chains_sec_str_d[chain][counter + 1:]\n #print(chains_res_no_d[chain][counter])\n counter += 1 \n\n # only adds if a proline is found in the gap\n # contains 2 groups, the 1st group being the whole helix and group 2 being the gap\n for x in chains_sec_str_d:\n \n regex = \"([h|H]{6,}(?:.?){1}(P)(?:.?){1}[h|H]{6,})\"\n p = re.compile(r\"\" +regex +\"\")\n\n # if one is found it prints out the residues numbers of that helix\n for match in p.finditer(chains_sec_str_d[x]):\n # adjusted to check for Proline around the gap 1 before and 1 after\n two_helix_l += [chains_res_no_d[x][ (match.start(1)) : (match.end(1)) ]]\n match_groups =(match.groups())\n\n # finds the location of the proline for mutation using mutmod\n pro_res = (x + str(match.start(2)))\n print(pro_res + \" :\" + match.group(2))\n\n\n tempstr = \"\"\n\n for protein in two_helix_l:\n for residue in protein:\n tempstr += (residue + \"\\n\")\n tempstr +=(\"\\n\")\n\n\n output = open(output_file, 'w')\n output.write(tempstr)\n output.close()\n #print('#####################')\n #print(tempstr)\n #print('#####################')", "def parse_nhmmer(nhmmertbl):\n df_tbl = pd.read_table(\n nhmmertbl,\n comment=\"#\",\n engine=\"python\",\n sep=r\"\\s*|-\\n\",\n index_col=False,\n header=None,\n usecols=range(14)\n )\n columns_dict = {0: \"target\",\n 1: \"t_accession\",\n 2: \"query\",\n 3: \"q_accession\",\n 4: \"hmmfrom\",\n 5: \"hmmto\",\n 6: \"alifrom\",\n 7: \"alito\",\n 8: \"envfrom\",\n 9: \"envto\",\n 10: \"sqlen\",\n 11: \"strand\",\n 12: \"e_value\",\n 13: \"score\",\n 14: \"bias\"}\n df_tbl = df_tbl.rename(columns=columns_dict)\n return df_tbl", "def obtain_parse_wiki_snp500():\n\n # Stores the current time, for the created_at record\n now = datetime.datetime.utcnow()\n\n # Use libxml to download the list of S&P500 companies and obtain the symbol\n url = 'http://en.wikipedia.org/wiki/List_of_S%26P_500_companies'\n page = urlopen(url).read()\n soup = bs.BeautifulSoup(page, 'html.parser')\n table = soup.find('table',{'class':'wikitable sortable'}).tbody\n rows = table.find_all('tr')\n\n # Obtain the symbol information for each row in the S&P500 table\n symbols = []\n for symbol in range(1,len(rows)):\n tds = rows[symbol].find_all('td')\n sd = {'ticker': tds[0].text.replace('\\n', ''),\n 'name': tds[1].text,\n 'sector': tds[3].text}\n\n # Create a tuple (for the DB format) and append to the grand list\n symbols.append((sd['ticker'], 'stock', sd['name'],\n sd['sector'], 'USD', now, now))\n\n return symbols", "def _extract_named_tables_from_gerald_xml(self, tree):\n # using the function to convert to lower instead of just writing it\n # makes the tag easier to read (IMO)\n useful_tables = ['LaneResultsSummary'.lower(),]\n\n tables ={}\n for child in tree.getchildren():\n if child.tag.lower() in useful_tables:\n read_tree = child.find('Read')\n # we want 0 based.\n read = int(read_tree.find('readNumber').text)-1\n for element in read_tree.getchildren():\n if element.tag.lower() == \"lane\":\n lrs = LaneResultSummaryGA()\n lrs.set_elements_from_gerald_xml(read, element)\n self.lane_results[lrs.end][lrs.lane] = lrs\n # probably not useful\n return tables", "def parse_results(self, html):\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n info = [tag.getText().strip() for tag in soup.find_all('span', {'class': 'nobr'})]\r\n name, matnr, degree, semester = info[1], info[3], info[5], info[7]\r\n student = Student(name, matnr, degree, semester)\r\n\r\n elements = [tag.getText().strip() for tag in soup.find_all('th', {'class': 'Konto'})]\r\n no_elems = len(elements)\r\n\r\n raw_results = [\r\n unicodedata.normalize(\"NFKD\", tag.getText().strip())\r\n for tag in soup.find_all('td', {'class': 'posrecords'})\r\n ]\r\n res_tuples = list(group(raw_results, no_elems))\r\n results = []\r\n for tup in res_tuples:\r\n grade_lst = list(tup)\r\n seme = grade_lst[elements.index('Semester')]\r\n exam = grade_lst[elements.index('Prüfungsname')]\r\n grad = float(grade_lst[elements.index('Note')].replace(',', '.'))\r\n ects = int(parse_ects(grade_lst[elements.index('ECTS')]))\r\n pasd = grade_lst[elements.index('Status')]\r\n results.append(Result(seme, exam, grad, ects, pasd))\r\n return results, student", "def extract_results(page, semester=None):\n\t# Guess the semester if it isn't provided\n\tif semester is None:\n\t\tsemester = guess_semester()\n\n\t# Unpack the semester tuple\n\tyear, semester = semester\n\n\t# Create a BeautifulSoup object to allow HTML parsing\n\tsoup = BeautifulSoup(page)\n\n\t# Find the heading that precedes the year's results\n\tyear_heading = soup.find(text=\"Results for Academic Year: {:d}\".format(year))\n\n\tif year_heading is None:\n\t\tprint(\"Couldn't find results for year {:d}.\".format(year))\n\t\tprint(\"This could indicate a download error.\")\n\t\treturn []\n\n\t# Find the table that contains this heading\n\tyear_table = year_heading.find_parent(\"table\")\n\n\t# Find the semester block, which should be a sibling of the year block\n\tdef correct_semester(tag):\n\t\tdesired_text = \"Semester {:d}\".format(semester)\n\t\tif tag.find(text=desired_text):\n\t\t\treturn True\n\t\treturn False\n\n\tresult_block = year_table.find_next_sibling(correct_semester)\n\n\t# Get a list in the form [MATH, 2969, Graph Theory, 74.0, Credit..]\n\traw_results = result_block.find_all('td', 'instructions')\n\n\t# Convert each subject into a sensible dictionary\n\tresults = []\n\tnsubjects = len(raw_results) // 5\n\tfor i in range(nsubjects):\n\t\tresult = {}\n\t\tresult[\"subject\"] = raw_results[5*i].string\n\t\tresult[\"subject\"] += raw_results[5*i + 1].string\n\t\tmark = raw_results[5*i + 3].string\n\t\tmark = int(float(mark)) if (mark != None) else None\n\t\tresult['mark'] = mark\n\t\tresult[\"grade\"] = raw_results[5*i + 4].string\n\n\t\t# If this subject hasn't been dropped, add it\n\t\tif result[\"grade\"] != \"Withdrawn\":\n\t\t\tresults.append(result)\n\n\treturn results", "def extract_names(filename):\n\n # The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.\n names = []\n\n # Open and read the file.\n f = open(filename, 'rU')\n text = f.read()\n # Could process the file line-by-line, but regex on the whole text\n # at once is even easier.\n\n # Get the year.\n year_match = re.search(r'Popularity\\sin\\s(\\d\\d\\d\\d)', text)\n if not year_match:\n # We didn't find a year, so we'll exit with an error message.\n sys.stderr.write('Couldn\\'t find the year!\\n')\n sys.exit(1)\n year = year_match.group(1)\n names.append(year)\n\n # Extract all the data tuples with a findall()\n # each tuple is: (rank, boy-name, girl-name)\n tuples = re.findall(r'<td>(\\d+)</td><td>(\\w+)</td>\\<td>(\\w+)</td>', text)\n # print(tuples)\n\n # Store data into a dict using each name as a key and that\n # name's rank number as the value.\n # (if the name is already in there, don't add it, since\n # this new rank will be bigger than the previous rank).\n names_to_rank = {}\n for rank_tuple in tuples:\n (rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars\n if boyname not in names_to_rank:\n names_to_rank[boyname] = rank\n if girlname not in names_to_rank:\n names_to_rank[girlname] = rank\n # You can also write:\n # for rank, boyname, girlname in tuples:\n # ...\n # To unpack the tuples inside a for-loop.\n\n # Get the names, sorted in the right order\n sorted_names = sorted(names_to_rank.keys())\n\n # Build up result list, one element per line\n for name in sorted_names:\n names.append(name + \" \" + names_to_rank[name])\n\n return names", "def test_2000(self):\n\t\tdf = self.obj.raw_data[self.obj.raw_data['year'] == 2000]\n\t\t#-Number of Observations-#\n\t\tassert df.shape[0] == 857189, \"Total Number of observations: %s != 857189\" % (df.shape[0])\n\t\tnum_obs_117100 = len(df[df['icode'] == '117100'])\n\t\t#-Number of 117100 observations-#\n\t\tassert num_obs_117100 == 8732, \"number of icode: 117100 observations (%s) != 8732\" % (num_obs_117100)\n\t\t#- Number of Fiji observations-#\n\t\tnum_obs_Fiji = len(df[df['importer'] == 'Fiji'])\n\t\tassert num_obs_Fiji == 1088, \"number of 'Fiji' observations (%s) != 1088\" % (num_obs_Fiji)\n\t\t#-Test Some Product Codes-#\n\t\tnum_obs_0023 = len(df[df['sitc4'] == '0023'])\n\t\tassert num_obs_0023 == 0, \"number of SITC4: 0023 observations (%s) != 0\" % (num_obs_0023)\n\t\tnum_obs_0013 = len(df[df['sitc4'] == '0013'])\n\t\tassert num_obs_0013 == 201, \"number of SITC4: 0013 observations (%s) != 201\" % (num_obs_0013)\n\t\tnum_sitc4_codes = len(df['sitc4'].unique())\n\t\tassert num_sitc4_codes == 1288, \"number of SITC4 codes (%s) != 1288\" % (num_sitc4_codes)", "def scrap_yahoo_jp(soup, filename):\n names = soup.findAll('h4')\n errornames = []\n for name in names:\n n = ''.join(name.findAll(text=True))\n n = n.encode('utf8')\n errornames.append('_'.join(n.split()))\n print errornames\n # Number of tables to scrap for errornames\n span = [1,1,1,5,2,1,1,3,3]\n tables = soup.findAll('table')\n start = 0\n i = 0\n alldata = []\n # Scrap data from each table\n for d in span:\n end = start+d\n dataset = []\n for table in tables[start:end]:\n rows = table.findAll('tr')\n data = parse_yjp_rows(rows[1:])\n dataset.append(data)\n dataset = reduce(lambda x,y: x+y, dataset)\n #dataset = remove_dup(dataset)\n alldata.append(dataset)\n outfile = filename+str(errornames[i])\n write_data_csv(outfile, dataset)\n start = end\n i = i+1\n alldata = reduce(lambda x,y: x+y, alldata)\n write_data_csv(filename+'yjp_all_dump', alldata)", "def parse_ugshee_style2():\n\n d_sheet = BOOK.sheet_by_name(D_SHEET)\n u_sheet = BOOK.sheet_by_name(U_SHEET)\n g_sheet = BOOK.sheet_by_name(G_SHEET)\n\n val = []\n\n h = \"\"\n for row in range(DSTARTROW,d_sheet.nrows):\n # h = u_sheet.cell_value(row, 1)\n if(d_sheet.cell_value(row, 9) != \"\"):\n h = conv_subject(d_sheet.cell_value(row, 9).encode('utf8', 'ignore'))\n print h\n val.append([YEAR,HIJRI,h,'male','diploma', 'local', d_sheet.cell_value(row, 8)])\n val.append([YEAR,HIJRI,h,'female','diploma', 'local', d_sheet.cell_value(row, 7)])\n\n\n #undergrade data has the following order [um, uf, um , uf ..... etc]\n #undergrad_data = []\n h = \"\"\n for row in range(USTARTROW,u_sheet.nrows):\n # h = u_sheet.cell_value(row, 1)\n if(u_sheet.cell_value(row, 10) != \"\"):\n h = conv_subject(u_sheet.cell_value(row, 10).encode('utf8', 'ignore'))\n print h\n\n jumlah = u_sheet.cell_value(row, 9)\n if(jumlah.strip() == u'\\u062c\\u0645\\u0644\\u0629'):\n #undergrad_data.append(u_sheet.cell_value(row, 3))\n #undergrad_data.append(u_sheet.cell_value(row, 4))\n val.append([YEAR,HIJRI,h,'male','undergrad', 'local', u_sheet.cell_value(row, 8)])\n val.append([YEAR,HIJRI,h,'female','undergrad', 'local', u_sheet.cell_value(row, 7)])\n\n #print undergrad_data\n #print len(undergrad_data)\n\n #grad_data = []\n h = \"\"\n mPhD = 0\n fPhD = 0\n for row in range(GSTARTROW,g_sheet.nrows):\n #h = g_sheet.cell_value(row, 1)\n\n if(g_sheet.cell_value(row, 10) != \"\"):\n h = conv_subject(g_sheet.cell_value(row, 10).encode('utf8', 'ignore'))\n print h\n\n level = g_sheet.cell_value(row, 9).strip()\n print 'l'\n print level\n\n if(level == u'\\u062F\\u0643\\u062A\\u0648\\u0631\\u0627\\u0647'):\n print ' inside phd'\n mPhD = g_sheet.cell_value(row, 8)\n fPhD = g_sheet.cell_value(row, 7)\n elif (level == u'\\u0645\\u0627\\u062C\\u0633\\u062A\\u064A\\u0631'):\n #grad_data.append(g_sheet.cell_value(row, 3))\n #grad_data.append(g_sheet.cell_value(row, 4))\n val.append([YEAR,HIJRI,h,'male','grad', 'local', g_sheet.cell_value(row, 8)+ mPhD ])\n val.append([YEAR,HIJRI,h,'female','grad', 'local', g_sheet.cell_value(row, 7)+ fPhD ])\n\n #print grad_data\n #print len(grad_data)\n\n #data = []\n #for i in range(0, len(grad_data), 2):\n # mu\n # data.append(undergrad_data[i])\n # mg\n # data.append(grad_data[i])\n # fu\n # data.append(undergrad_data[i+1])\n # fg\n # data.append(grad_data[i+1])\n\n print val\n print len(val)\n\n write_data(val)", "def yaronunasike(self):\n [self.Isthana2,self.Iyatna2] = identify(self.Linary[self.Index + 1])\n # PMS: if ( set_memberP(Linary[Index - 1],Yar) && (Index + 2 < linmax) then\n # PMS: if (Iyatna2 eq isparsa5) || (Linary[Index + 2] == sktanunasika ) {\n # PMS: we won't exercise the nasalized option for the semivowels \n # y, v && l; just for the stops\n if (set_memberP(self.Linary[self.Index - 1], Jhay)) and (self.Iyatna2 == isparsa5):\n [self.Isthana1,self.Iyatna1] = identify(self.Linary[self.Index - 1])\n self.Linary[self.Index - 1] = Soundary[self.Isthana1][isparsa5]", "def parse_experiment_result(\n outstr: str,\n prior_counts: Optional[Sequence[float]] = None,\n n_dirichlet_samples: int = 1000000,\n score_scale: float = 4.0,\n random_state: Union[int, RandomState, None] = None,\n **kwargs: Any,\n) -> Tuple[float, float, float]:\n wdl_strings = re.findall(r\"Score of.*:\\s*([0-9]+\\s-\\s[0-9]+\\s-\\s[0-9]+)\", outstr)\n array = np.array(\n [np.array([int(y) for y in re.findall(r\"[0-9]+\", x)]) for x in wdl_strings]\n )\n diffs = np.diff(array, axis=0, prepend=np.array([[0, 0, 0]]))\n\n # Parse order of finished games to be able to compute the correct pentanomial scores\n finished = np.array(\n [int(x) - 1 for x in re.findall(r\"Finished game ([0-9]+)\", outstr)]\n )\n diffs = diffs[np.argsort(finished)]\n\n counts = {\"WW\": 0, \"WD\": 0, \"WL/DD\": 0, \"LD\": 0, \"LL\": 0}\n DD = 0 # Track DD separately to compute draw rate\n for i in range(0, len(diffs) - 1, 2):\n match = diffs[i] + diffs[i + 1]\n if match[0] == 2:\n counts[\"WW\"] += 1\n elif match[0] == 1:\n if match[1] == 1:\n counts[\"WL/DD\"] += 1\n else:\n counts[\"WD\"] += 1\n elif match[1] == 1:\n counts[\"LD\"] += 1\n elif match[2] == 2:\n counts[\"WL/DD\"] += 1\n DD += 1\n else:\n counts[\"LL\"] += 1\n counts_array = np.array(list(counts.values()))\n score, error = counts_to_penta(\n counts=counts_array,\n prior_counts=prior_counts,\n n_dirichlet_samples=n_dirichlet_samples,\n score_scale=score_scale,\n random_state=random_state,\n **kwargs,\n )\n draw_rate = (DD + 0.5 * counts[\"WD\"] + 0.5 * counts[\"LD\"] + 1.0) / (\n counts_array.sum() + 3.0\n )\n return score, error, draw_rate", "def _gather_broken_taxa_info(broken_response, label_format):\n broken_dict = {}\n relabel = {}\n relabel_ott_ids = {}\n for taxon in broken_response:\n remap = broken_response[taxon] # Where on the tree is that taxon now?\n ott_id = taxon.strip('ott')\n tax_inf = OT.taxon_info(ott_id=ott_id).response_dict\n tax_inf['tax_url'] = \"https://tree.opentreeoflife.org/taxonomy/browse?id={}\".format(ott_id)\n tax_inf['synth_url'] = \"https://tree.opentreeoflife.org/opentree/argus/ottol@{}\".format(ott_id)\n tax_inf['MRCA_location_in_synth'] = remap\n taxon_name = tax_inf.get('name', taxon)\n if label_format == 'name':\n taxon_label = \"{}\".format(taxon_name)\n elif label_format == 'name_and_id':\n taxon_label = \"{}_{}\".format(taxon_name, taxon)\n else:\n taxon_label = taxon\n if remap not in relabel:\n relabel[remap] = [] #Sometimes multiple taxa map to the same node or id\n relabel_ott_ids[remap] = []\n relabel[remap].append(\"{}\".format(taxon_label))\n relabel_ott_ids[remap].append(ott_id)\n tax_inf['broken_taxa_mapping_to_same_node'] = relabel[remap]\n broken_dict[ott_id] = tax_inf\n return relabel, relabel_ott_ids, broken_dict", "def extract_names(filename):\n #a = open('baby1990.html').read()\n a=open(filename).read()\n\n\n ## 1. Extract all the text from the file and print it\n\n # <td>1</td><td>Michael</td><td>Jessica</td>\n # <td>(.*?)</td> is the regular expression to locate the target lines\n pattern = re.compile(r'<td>(.*?)</td>',re.S)\n items = re.findall(pattern, a)\n # modify the first item\n items[0]='1'\n print(items)\n\n\n # 2. Find and extract the year and print it\n\n # <h3 align=\"center\">Popularity in 1990</h3>\n # <h3 align=\"center\">(.*?)</h3> is the regular expression to locate the target lines\n pattern2 = re.compile(r'<h3 align=\"center\">(.*?)</h3>',re.S)\n year = re.findall(pattern2, a)\n m=str(year)\n # extract the year\n m=m[16:20]\n print(m)\n\n\n # 3. Extract the names and rank numbers and print them\n\n # create the new list\n name_list=[]\n i=0\n while i <=(len (items) -3):\n name_list.append(str(items[i+1]+' '+items[i]))\n name_list.append(str(items[i+2]+' '+items[i]))\n i=i+3\n \n print(name_list)\n\n\n # 4. Get the names data into a dict and print it\n # choose whichever number is smaller for a name appears more than once\n tuples = re.findall(r'<td>(\\d+)</td><td>(\\w+)</td>\\<td>(\\w+)</td>', a)\n\n # creat a dict to store name and number\n # each name is a key\n # so if a name is already stored, it will not be added\n \n name_rank = {}\n for rank_tuple in tuples:\n (rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars\n if boyname not in name_rank:\n name_rank[boyname] = rank\n if girlname not in name_rank:\n name_rank[girlname] = rank\n # print the dict\n print(name_rank)\n\n # 4. Build the [year, 'name rank', ... ] list and print it\n\n # sort the dict based on key\n sorted_name = sorted(name_rank.keys())\n\n # creat the result list\n name_result_list=[m]\n for name in sorted_name:\n name_result_list.append(name + \" \" + name_rank[name])\n \n # print the new list\n #print(name_result_list)\n\n\n \n\n return name_result_list", "def test_parser(self):\n atab = Tabular(str(DATA_DIR / \"test.atab\"))\n self.assertEqual(len(atab.hits), 2, 'Read 2 hits')\n\n for i, hits in enumerate(zip(atab.hits, self._EXPECTED_HITS)):\n with self.subTest(\"Checking hit against known data\", hit=i):\n self._test_hits_equal(hits[0], hits[1])", "def getdata(data,vers='dr2',posn_match=30,verbose=True) :\n\n tab=Table()\n tab.add_column(Column(data['APOGEE_ID'],name='twomass'))\n tab.add_column(Column(data['RA'],name='apogee_ra'))\n tab.add_column(Column(data['DEC'],name='apogee_dec'))\n #if type(data['APOGEE_ID'][0]) is str or type(data['APOGEE_ID'][0]) is np.str_ : \n try:\n j=np.where(np.core.defchararray.find(data['APOGEE_ID'],'2M') == 0)[0]\n out,ind=np.unique(np.core.defchararray.replace(data['APOGEE_ID'][j],'2M',''),return_index=True)\n except :\n j=np.where(np.core.defchararray.find(data['APOGEE_ID'],b'2M') == 0)[0]\n out,ind=np.unique(np.core.defchararray.replace(data['APOGEE_ID'][j],b'2M',b''),return_index=True)\n tab['twomass'][ind] = out\n #tab.add_column(Column(out,name='twomass'))\n #tab.add_column(Column(data['RA'][ind],name='apogee_ra'))\n #tab.add_column(Column(data['DEC'][ind],name='apogee_dec'))\n xmlfilename= tempfile.mktemp('.xml',dir=os.getcwd())\n tab.write(xmlfilename,format='votable',overwrite=True)\n if vers == 'dr2' :\n try :\n job= Gaia.launch_job_async(\n \"\"\"SELECT tmass_match.original_ext_source_id, g.source_id, g.ra, g.dec, g.parallax, g.parallax_error, \n g.pmra, g.pmra_error, g.pmdec, g.pmdec_error, g.ref_epoch,\n g.phot_g_mean_mag, g.phot_bp_mean_mag, g.phot_rp_mean_mag, \n g.radial_velocity, g.radial_velocity_error, g.a_g_val, g.e_bp_min_rp_val, \n dist.r_est, dist.r_lo, dist.r_hi\n FROM gaiadr2.gaia_source AS g\n INNER JOIN gaiadr2.tmass_best_neighbour AS tmass_match ON tmass_match.source_id = g.source_id\n INNER JOIN tap_upload.my_table as ids on ids.twomass = tmass_match.original_ext_source_id\n LEFT OUTER JOIN external.gaiadr2_geometric_distance as dist ON g.source_id = dist.source_id\"\"\",\n upload_resource=xmlfilename,upload_table_name='my_table',verbose=verbose)\n twomass_gaia = job.get_results()\n except:\n print(\"error with gaia 2mass search\")\n twomass_gaia = None\n else : twomass_gaia = None\n\n try: \n if vers == 'dr2' :\n job= Gaia.launch_job_async(\n \"\"\"SELECT g.source_id, g.ra, g.dec, g.parallax, g.parallax_error, \n g.pmra, g.pmra_error, g.pmdec, g.pmdec_error, g.ref_epoch,\n g.phot_g_mean_mag, g.phot_bp_mean_mag, g.phot_rp_mean_mag, \n g.radial_velocity, g.radial_velocity_error, g.a_g_val, g.e_bp_min_rp_val, \n dist.r_est, dist.r_lo, dist.r_hi,\n distance(\n point('', ids.apogee_ra, ids.apogee_dec),\n point('', g.ra, g.dec)\n ) * 3600 as dist_arcsec\n FROM gaiadr2.gaia_source as g\n JOIN tap_upload.my_table as ids on 1 = contains(\n point('', ids.apogee_ra, ids.apogee_dec),\n circle('', g.ra, g.dec, {:f})\n )\n LEFT OUTER JOIN external.gaiadr2_geometric_distance as dist ON g.source_id = dist.source_id\"\"\".format(posn_match/3600.),\n upload_resource=xmlfilename,upload_table_name='my_table',verbose=verbose)\n posn_gaia = job.get_results()\n print('returned ', len(posn_gaia))\n elif vers == 'edr3' :\n service = pyvo.dal.TAPService(\"https://dc.zah.uni-heidelberg.de/tap\")\n # this used to work but stopped\n #posn_gaia = service.search(\n # \"\"\"SELECT * FROM gedr3dist.litewithdist as g\n # JOIN TAP_UPLOAD.coords as coords \n # ON contains(POINT('ICRS', g.ra, g.dec),CIRCLE('ICRS',coords.apogee_ra, coords.apogee_dec,{:f})) = 1\"\"\".format(posn_match/3600.),\n # uploads={'coords' : tab})\n # Markus at GAVO recommended:\n posn_gaia = service.search(\n \"\"\"WITH withpar AS (\n SELECT *\n FROM gaia.edr3lite AS db\n JOIN TAP_UPLOAD.coords AS coords\n ON distance(db.ra, db.dec, coords.apogee_ra, coords.apogee_dec)< {:f}) \n SELECT * from withpar\n JOIN gedr3dist.main as dist using (source_id)\n \"\"\".format(posn_match/3600.), uploads={'coords' : tab},maxrec=1000000)\n print('pyvo returned: ',len(posn_gaia))\n\n #m1,m2=match.match(posn_gaia_archive['source_id'],posn_gaia['source_id'])\n #print(len(m1),len(m2))\n\n except: \n print(\"error with gaia position search\")\n posn_gaia = None\n finally: os.remove(xmlfilename)\n\n return twomass_gaia, posn_gaia", "def parse_lookup_table(lookup_table_file):\n labels_dict = dict()\n with open(lookup_table_file, \"r\") as lookup_table:\n for line in lookup_table:\n\n # parse line for label code\n row = line.split(\" \")\n for i in range(row.count(\"\")):\n row.remove(\"\")\n code = row[0]\n\n # continue if the code is a number\n if code.isalnum():\n name = row[1]\n\n # determine hemisphere\n if \"Left\" in name or \"lh\" in name:\n hemisphere = \"lh\"\n elif \"Right\" in name or \"rh\" in name:\n hemisphere = \"rh\"\n else:\n hemisphere = \"N/A\"\n\n # determine location\n # set location to None. Then update it depending on the name.\n location = None\n\n if \"wm\" in name:\n location = \"wm\"\n elif \"ctx\" in name or \"gyrus\" in name:\n location = \"gm\"\n elif \"CC\" in name:\n location = \"cc\"\n elif \"Ventricle\" in name:\n location = \"ventricle\"\n\n cerebellum_names = [\n \"Cbm\",\n \"Cerebellum\",\n \"Cerebellum\",\n \"Cerebellar\",\n \"4th-Ventricle\",\n \"Brain-Stem\",\n \"VentralDC\",\n ]\n subcortical_names = [\n \"Thalamus\",\n \"Caudate\",\n \"Putamen\",\n \"Pallidum\",\n \"Hippocampus\",\n \"Amygdala\",\n \"Accumbens\",\n \"Inf-Lat-Vent\",\n ]\n\n for designated_name, list_of_locations in [\n (\"cerebellum\", cerebellum_names),\n (\"subcortical\", subcortical_names),\n ]:\n for location_name in list_of_locations:\n if location_name in name:\n location = designated_name\n\n if not location:\n location = \"UNKNOWN\"\n\n labels_dict[code] = dict(\n name=name, hemisphere=hemisphere, location=location\n )\n\n return labels_dict", "def extract_names(filename):\n # +++your code here+++\n lines = utils_lines(filename)\n # print('lines', lines)\n\n # Extract the year and print it\n m = re.search('(?<=Popularity in )\\d{4}', lines)\n year = m.group()\n # print(year)\n\n # Extract the names and rank numbers and just print them\n names_and_rank = re.findall('(?<=<td>)\\w+', lines)\n names_data = dict()\n for i in range(0, len(names_and_rank), 3):\n # print(names_and_rank[i:i+3])\n # Get the names data into a dict and print it\n names_data[names_and_rank[i]] = names_and_rank[i + 1:i + 3]\n # print(names_data)\n\n # Build the [year, 'name rank', ... ] list and print it\n baby_names = [year]\n for k, v in names_data.items():\n name_rank_str = v[0] + ' ' + str(k)\n baby_names.append(name_rank_str)\n\n # print(baby_names)\n # print(sorted(baby_names))\n\n return sorted(baby_names)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the results from the other methods. The remaining methods are grouped together. Statistics for all three are listed for each of the pairwise species comparisons, with each method's results on its own line. The stats in this section must be handled differently due to the possible presence of NaN values, which won't get caught by my typical "line_floats" method used above.
def parse_others(lines, results, sequences): # Example: # 2 (Pan_troglo) vs. 1 (Homo_sapie) # L(i): 143.0 51.0 28.0 sum= 222.0 # Ns(i): 0.0000 1.0000 0.0000 sum= 1.0000 # Nv(i): 0.0000 0.0000 0.0000 sum= 0.0000 # A(i): 0.0000 0.0200 0.0000 # B(i): -0.0000 -0.0000 -0.0000 # LWL85: dS = 0.0227 dN = 0.0000 w = 0.0000 S = 45.0 N = 177.0 # LWL85m: dS = -nan dN = -nan w = -nan S = -nan N = -nan (rho = -nan) # LPB93: dS = 0.0129 dN = 0.0000 w = 0.0000 seq_name1 = None seq_name2 = None for line in lines: comp_res = re.match("\d+ \((.+)\) vs. \d+ \((.+)\)", line) if comp_res is not None: seq_name1 = comp_res.group(1) seq_name2 = comp_res.group(2) elif seq_name1 is not None and seq_name2 is not None: if "dS =" in line: stats = {} line_stats = line.split(":")[1].strip() stats_split = line_stats.split() for i in range(0, len(stats_split), 3): stat = stats_split[i].strip("()") if stat == "w": stat = "omega" value = stats_split[i+2].strip("()") try: stats[stat] = float(value) except: stats[stat] = None if "LWL85:" in line: results[seq_name1][seq_name2]["LWL85"] = stats results[seq_name2][seq_name1]["LWL85"] = stats elif "LWL85m" in line: results[seq_name1][seq_name2]["LWL85m"] = stats results[seq_name2][seq_name1]["LWL85m"] = stats elif "LPB93" in line: results[seq_name1][seq_name2]["LPB93"] = stats results[seq_name2][seq_name1]["LPB93"] = stats return results
[ "def parse_summarized_results(lines):\n result = BenchSummary([], [], [], [], [], [], [], [], [])\n # Begin iterating over lines\n for line in lines:\n if not line or line.startswith('#'):\n continue\n line = line.strip()\n values = line.split('\\t')\n result.label.append(values[0])\n result.wall_mean.append(float(values[1]))\n result.wall_stdev.append(float(values[2]))\n result.user_mean.append(float(values[3]))\n result.user_stdev.append(float(values[4]))\n result.kernel_mean.append(float(values[5]))\n result.kernel_stdev.append(float(values[6]))\n result.mem_mean.append(float(values[7]))\n result.mem_stdev.append(float(values[8]))\n return result", "def _parse_result(self, result_str):\n result_str_replaced = (\n result_str\n .replace(\" \", \"\") # Whitespaces\n .replace(\"\\u2009\", \"\") # Thin whitespaces\n .replace(\"\\u2212\", \"-\") # All possible symbols for 'minus'\n .replace(\"\\u2013\", \"-\")\n .replace(\"\\uFE63\", \"-\")\n .replace(\"\\u002D\", \"-\")\n .replace(\"\\uFF0D\", \"-\")\n )\n match = self._REGEX_STAT_TEST.match(result_str_replaced) # See regex above\n if match is None: # Statistical test not recognized\n raise ValueError(f\"The input {result_str} is not recognized. Please correct it\")\n\n test_props = match.groupdict() # Test recognized, accessing properties\n test_type = test_props[\"testtype\"]\n df1_raw = test_props[\"df1\"]\n df2_raw = test_props[\"df2\"]\n stat_raw = test_props[\"stat\"]\n\n # Testing that degrees of freedom are correctly entered\n if (test_type == \"F\") and ((test_props[\"df1\"] is None) or (test_props[\"df2\"] is None)):\n raise ValueError(\n f\"Error in {result_str}: The test statistics {test_type} requires you to specify the numerator \\\n and denominator degrees of freedom.\")\n if (test_type not in [\"z\", \"p\"]) and (test_props[\"df1\"] is None):\n raise ValueError(\n f\"Error in {result_str}: The test statistics {test_type} requires you to specify the degrees of \\\n freedom.\")\n\n stat_raw = float(stat_raw)\n if test_type == \"F\":\n family = \"F\"\n df1 = float(df1_raw)\n df2 = float(df2_raw)\n stat = stat_raw\n elif test_type == \"t\":\n family = \"F\"\n df1 = 1\n df2 = float(df1_raw)\n stat = stat_raw ** 2\n elif test_type == \"r\":\n family = \"F\"\n df1 = 1\n df2 = float(df1_raw)\n stat = (stat_raw / (np.sqrt((1 - stat_raw ** 2) / df2))) ** 2\n elif test_type == \"chi2\":\n family = \"Chi2\"\n df1 = float(df1_raw)\n df2 = None\n stat = stat_raw\n elif test_type == \"z\":\n family = \"Chi2\"\n df1 = 1\n df2 = None\n stat = stat_raw ** 2\n else:\n family = \"Chi2\"\n df1 = 1\n df2 = None\n stat = norm.ppf(1 - stat_raw / 2) ** 2\n\n return result_str_replaced, family, df1, df2, stat", "def parse_results(self):\n with open(self.mts_result_csv_file, encoding='utf-8') as stream_:\n self.__logger.info(\"Parsing file : %s\", self.mts_result_csv_file)\n reader = csv.reader(stream_, delimiter=';')\n rownum = 0\n _tests_data = []\n msg = prettytable.PrettyTable(\n header_style='upper', padding_width=5,\n field_names=['MTS test', 'MTS test case',\n 'status'])\n for row in reader:\n _test_dict = {}\n nb_values = len(row)\n if rownum > 0:\n # If there's only one delimiter,\n # it is the name of the <test> elt\n if nb_values == 2:\n test_name = row[0]\n _test_dict['parent'] = test_name\n elif nb_values == 3:\n testcase_name = row[0].lstrip()\n testcase_status = row[2]\n self.total_tests += 1\n if testcase_status == 'OK':\n self.pass_tests += 1\n elif testcase_status == 'Failed':\n self.fail_tests += 1\n elif testcase_status == '?':\n self.skip_tests += 1\n _test_dict['status'] = testcase_status\n _test_dict['name'] = testcase_name\n msg.add_row(\n [test_name,\n _test_dict['name'],\n _test_dict['status']])\n rownum += 1\n _tests_data.append(_test_dict)\n try:\n self.result = 100 * (\n self.pass_tests / self.total_tests)\n except ZeroDivisionError:\n self.__logger.error(\"No test has been run\")\n self.__logger.info(\"MTS Test result:\\n\\n%s\\n\", msg.get_string())\n self.details = {}\n self.details['description'] = \"Execution of some MTS tests\"\n self.details['total_tests'] = self.total_tests\n self.details['pass_tests'] = self.pass_tests\n self.details['fail_tests'] = self.fail_tests\n self.details['skip_tests'] = self.skip_tests\n self.details['tests'] = _tests_data", "def _DisplayResults(self):\n print\n print '=' * 78\n print 'DIAGNOSTIC RESULTS'.center(78)\n print '=' * 78\n\n if 'latency' in self.results:\n print\n print '-' * 78\n print 'Latency'.center(78)\n print '-' * 78\n print ('Operation Size Trials Mean (ms) Std Dev (ms) '\n 'Median (ms) 90th % (ms)')\n print ('========= ========= ====== ========= ============ '\n '=========== ===========')\n for key in sorted(self.results['latency']):\n trials = sorted(self.results['latency'][key])\n op, numbytes = key.split('_')\n numbytes = int(numbytes)\n if op == 'METADATA':\n print 'Metadata'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DOWNLOAD':\n print 'Download'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'UPLOAD':\n print 'Upload'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n if op == 'DELETE':\n print 'Delete'.rjust(9), '',\n print MakeHumanReadable(numbytes).rjust(9), '',\n self._DisplayStats(trials)\n\n if 'write_throughput' in self.results:\n print\n print '-' * 78\n print 'Write Throughput'.center(78)\n print '-' * 78\n write_thru = self.results['write_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(write_thru['file_size']),\n write_thru['num_copies'],\n MakeHumanReadable(write_thru['total_bytes_copied']))\n print 'Write throughput: %s/s.' % (\n MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8))\n\n if 'read_throughput' in self.results:\n print\n print '-' * 78\n print 'Read Throughput'.center(78)\n print '-' * 78\n read_thru = self.results['read_throughput']\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\n MakeHumanReadable(read_thru['file_size']),\n read_thru['num_times'],\n MakeHumanReadable(read_thru['total_bytes_copied']))\n print 'Read throughput: %s/s.' % (\n MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8))\n\n if 'listing' in self.results:\n print\n print '-' * 78\n print 'Listing'.center(78)\n print '-' * 78\n\n listing = self.results['listing']\n insert = listing['insert']\n delete = listing['delete']\n print 'After inserting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n insert['time_took'])\n print ' Number of listing calls made: %s' % insert['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in insert['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, insert['files_seen_after_listing'])))\n\n print 'After deleting %s objects:' % listing['num_files']\n print (' Total time for objects to appear: %.2g seconds' %\n delete['time_took'])\n print ' Number of listing calls made: %s' % delete['num_listing_calls']\n print (' Individual listing call latencies: [%s]' %\n ', '.join('%.2gs' % lat for lat in delete['list_latencies']))\n print (' Files reflected after each call: [%s]' %\n ', '.join(map(str, delete['files_seen_after_listing'])))\n\n if 'sysinfo' in self.results:\n print\n print '-' * 78\n print 'System Information'.center(78)\n print '-' * 78\n info = self.results['sysinfo']\n print 'IP Address: \\n %s' % info['ip_address']\n print 'Temporary Directory: \\n %s' % info['tempdir']\n print 'Bucket URI: \\n %s' % self.results['bucket_uri']\n print 'gsutil Version: \\n %s' % self.results.get('gsutil_version',\n 'Unknown')\n print 'boto Version: \\n %s' % self.results.get('boto_version', 'Unknown')\n\n if 'gmt_timestamp' in info:\n ts_string = info['gmt_timestamp']\n timetuple = None\n try:\n # Convert RFC 2822 string to Linux timestamp.\n timetuple = time.strptime(ts_string, '%a, %d %b %Y %H:%M:%S +0000')\n except ValueError:\n pass\n\n if timetuple:\n # Converts the GMT time tuple to local Linux timestamp.\n localtime = calendar.timegm(timetuple)\n localdt = datetime.datetime.fromtimestamp(localtime)\n print 'Measurement time: \\n %s' % localdt.strftime(\n '%Y-%m-%d %I:%M:%S %p %Z')\n\n print 'Google Server: \\n %s' % info['googserv_route']\n print ('Google Server IP Addresses: \\n %s' %\n ('\\n '.join(info['googserv_ips'])))\n print ('Google Server Hostnames: \\n %s' %\n ('\\n '.join(info['googserv_hostnames'])))\n print 'Google DNS thinks your IP is: \\n %s' % info['dns_o-o_ip']\n print 'CPU Count: \\n %s' % info['cpu_count']\n print 'CPU Load Average: \\n %s' % info['load_avg']\n try:\n print ('Total Memory: \\n %s' %\n MakeHumanReadable(info['meminfo']['mem_total']))\n # Free memory is really MemFree + Buffers + Cached.\n print 'Free Memory: \\n %s' % MakeHumanReadable(\n info['meminfo']['mem_free'] +\n info['meminfo']['mem_buffers'] +\n info['meminfo']['mem_cached'])\n except TypeError:\n pass\n\n if 'netstat_end' in info and 'netstat_start' in info:\n netstat_after = info['netstat_end']\n netstat_before = info['netstat_start']\n for tcp_type in ('sent', 'received', 'retransmit'):\n try:\n delta = (netstat_after['tcp_%s' % tcp_type] -\n netstat_before['tcp_%s' % tcp_type])\n print 'TCP segments %s during test:\\n %d' % (tcp_type, delta)\n except TypeError:\n pass\n else:\n print ('TCP segment counts not available because \"netstat\" was not '\n 'found during test runs')\n\n if 'disk_counters_end' in info and 'disk_counters_start' in info:\n print 'Disk Counter Deltas:\\n',\n disk_after = info['disk_counters_end']\n disk_before = info['disk_counters_start']\n print '', 'disk'.rjust(6),\n for colname in ['reads', 'writes', 'rbytes', 'wbytes', 'rtime',\n 'wtime']:\n print colname.rjust(8),\n print\n for diskname in sorted(disk_after):\n before = disk_before[diskname]\n after = disk_after[diskname]\n (reads1, writes1, rbytes1, wbytes1, rtime1, wtime1) = before\n (reads2, writes2, rbytes2, wbytes2, rtime2, wtime2) = after\n print '', diskname.rjust(6),\n deltas = [reads2-reads1, writes2-writes1, rbytes2-rbytes1,\n wbytes2-wbytes1, rtime2-rtime1, wtime2-wtime1]\n for delta in deltas:\n print str(delta).rjust(8),\n print\n\n if 'tcp_proc_values' in info:\n print 'TCP /proc values:\\n',\n for item in info['tcp_proc_values'].iteritems():\n print ' %s = %s' % item\n\n if 'boto_https_enabled' in info:\n print 'Boto HTTPS Enabled: \\n %s' % info['boto_https_enabled']\n\n if 'using_proxy' in info:\n print 'Requests routed through proxy: \\n %s' % info['using_proxy']\n\n if 'google_host_dns_latency' in info:\n print ('Latency of the DNS lookup for Google Storage server (ms): '\n '\\n %.1f' % (info['google_host_dns_latency'] * 1000.0))\n\n if 'google_host_connect_latencies' in info:\n print 'Latencies connecting to Google Storage server IPs (ms):'\n for ip, latency in info['google_host_connect_latencies'].iteritems():\n print ' %s = %.1f' % (ip, latency * 1000.0)\n\n if 'proxy_dns_latency' in info:\n print ('Latency of the DNS lookup for the configured proxy (ms): '\n '\\n %.1f' % (info['proxy_dns_latency'] * 1000.0))\n\n if 'proxy_host_connect_latency' in info:\n print ('Latency connecting to the configured proxy (ms): \\n %.1f' %\n (info['proxy_host_connect_latency'] * 1000.0))\n\n if 'request_errors' in self.results and 'total_requests' in self.results:\n print\n print '-' * 78\n print 'In-Process HTTP Statistics'.center(78)\n print '-' * 78\n total = int(self.results['total_requests'])\n numerrors = int(self.results['request_errors'])\n numbreaks = int(self.results['connection_breaks'])\n availability = (((total - numerrors) / float(total)) * 100\n if total > 0 else 100)\n print 'Total HTTP requests made: %d' % total\n print 'HTTP 5xx errors: %d' % numerrors\n print 'HTTP connections broken: %d' % numbreaks\n print 'Availability: %.7g%%' % availability\n if 'error_responses_by_code' in self.results:\n sorted_codes = sorted(\n self.results['error_responses_by_code'].iteritems())\n if sorted_codes:\n print 'Error responses by code:'\n print '\\n'.join(' %s: %s' % c for c in sorted_codes)\n\n if self.output_file:\n with open(self.output_file, 'w') as f:\n json.dump(self.results, f, indent=2)\n print\n print \"Output file written to '%s'.\" % self.output_file\n\n print", "def generate_main_results():\n\n # Generate results \n results = generate_results()\n\n # Calculate results\n mean_sem_scores = sort_tbl(generate_mean_std_tbl(*calculate_mean_sem_scores(results)), ovrs_order=OVERSAMPLERS_NAMES, clfs_order=CLASSIFIERS_NAMES)\n keys = mean_sem_scores[['Classifier', 'Metric']]\n mean_sem_perc_diff_scores = []\n for oversampler in ('SMOTE', 'K-MEANS SMOTE', 'SOMO', 'G-SMOTE'):\n perc_diff_scores = sort_tbl(generate_mean_std_tbl(*calculate_mean_sem_perc_diff_scores(results, [oversampler, 'G-SOMO'])), ovrs_order=OVERSAMPLERS_NAMES, clfs_order=CLASSIFIERS_NAMES)\n perc_diff_scores = perc_diff_scores.rename(columns={'Difference': oversampler}).drop(columns=['Classifier', 'Metric'])\n mean_sem_perc_diff_scores.append(perc_diff_scores)\n mean_sem_perc_diff_scores = pd.concat([keys, pd.concat(mean_sem_perc_diff_scores, axis=1)], axis=1)\n mean_sem_ranking = sort_tbl(generate_mean_std_tbl(*calculate_mean_sem_ranking(results)), ovrs_order=OVERSAMPLERS_NAMES, clfs_order=CLASSIFIERS_NAMES)\n\n # Generate main results\n main_results_names = ('mean_sem_scores', 'mean_sem_perc_diff_scores', 'mean_sem_ranking')\n main_results = zip(main_results_names, (mean_sem_scores, mean_sem_perc_diff_scores, mean_sem_ranking))\n \n return main_results", "def extractNNBenchResults(lines):\n nnbench_results = {}\n SUCCESSFUL_FILE_OPS = \"Successful file operations\"\n MAPS_MISSED_BARRIER = \"maps that missed the barrier\"\n TPS_OPEN_READ = \"TPS: Open/Read\"\n AVG_EXEC_TIME_OPEN_READ = \"Avg Exec time (ms): Open/Read\"\n AVG_LAT_OPEN = \"Avg Lat (ms): Open\"\n RAW_DATA_AL_TOTAL_1 = \"RAW DATA: AL Total #1\"\n RAW_DATA_AL_TOTAL_2 = \"RAW DATA: AL Total #2\"\n RAW_DATA_TPS_TOTAL = \"RAW DATA: TPS Total (ms)\"\n RAW_DATA_LONGEST_MAP_TIME = \"RAW DATA: Longest Map Time (ms)\"\n RAW_DATA_LATE_MAPS = \"RAW DATA: Late maps\"\n RAW_DATA_EXCEPTIONS = \"RAW DATA: # of exceptions\"\n\n for line in lines:\n if SUCCESSFUL_FILE_OPS in line:\n addResult(nnbench_results, SUCCESSFUL_FILE_OPS, line)\n if MAPS_MISSED_BARRIER in line:\n addResult(nnbench_results, MAPS_MISSED_BARRIER, line)\n if TPS_OPEN_READ in line:\n addResult(nnbench_results, TPS_OPEN_READ, line)\n if AVG_EXEC_TIME_OPEN_READ in line:\n addResult(nnbench_results, AVG_EXEC_TIME_OPEN_READ, line)\n if AVG_LAT_OPEN in line:\n addResult(nnbench_results, AVG_LAT_OPEN, line)\n if RAW_DATA_AL_TOTAL_1 in line:\n addResult(nnbench_results, RAW_DATA_AL_TOTAL_1, line)\n if RAW_DATA_AL_TOTAL_2 in line:\n addResult(nnbench_results, RAW_DATA_AL_TOTAL_2, line)\n if RAW_DATA_TPS_TOTAL in line:\n addResult(nnbench_results, RAW_DATA_TPS_TOTAL, line)\n if RAW_DATA_LONGEST_MAP_TIME in line:\n addResult(nnbench_results, RAW_DATA_LONGEST_MAP_TIME, line)\n if RAW_DATA_LATE_MAPS in line:\n addResult(nnbench_results, RAW_DATA_LATE_MAPS, line)\n if RAW_DATA_EXCEPTIONS in line:\n addResult(nnbench_results, RAW_DATA_EXCEPTIONS, line)\n\n return nnbench_results", "def summariseResult(self, test):", "def parse_results(self, html):\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n info = [tag.getText().strip() for tag in soup.find_all('span', {'class': 'nobr'})]\r\n name, matnr, degree, semester = info[1], info[3], info[5], info[7]\r\n student = Student(name, matnr, degree, semester)\r\n\r\n elements = [tag.getText().strip() for tag in soup.find_all('th', {'class': 'Konto'})]\r\n no_elems = len(elements)\r\n\r\n raw_results = [\r\n unicodedata.normalize(\"NFKD\", tag.getText().strip())\r\n for tag in soup.find_all('td', {'class': 'posrecords'})\r\n ]\r\n res_tuples = list(group(raw_results, no_elems))\r\n results = []\r\n for tup in res_tuples:\r\n grade_lst = list(tup)\r\n seme = grade_lst[elements.index('Semester')]\r\n exam = grade_lst[elements.index('Prüfungsname')]\r\n grad = float(grade_lst[elements.index('Note')].replace(',', '.'))\r\n ects = int(parse_ects(grade_lst[elements.index('ECTS')]))\r\n pasd = grade_lst[elements.index('Status')]\r\n results.append(Result(seme, exam, grad, ects, pasd))\r\n return results, student", "def summarize_results(res_dir):\n\n print(\"Gathering Results...\")\n\n with open(\"simulations.csv\", 'a') as f:\n f.write('Datetime $ N $ M $ d $ parts $ ttype $ method $ instance $ time_c $ time_i $ n_iter $ '\n 'inter_s $ inter_e $ intra_s $ intra_e $ total $ true_inter $ clustering\\n')\n\n results = {}\n for k in CLUSTER_SIZE:\n results[k] = {}\n for l in DATAFOLDER.values():\n results[k][l] = {}\n for m in ['G', 'R', 'S', 'BCS', 'BCG', 'BCR', 'RSR', 'RSG', 'RSS', 'NN']:\n results[k][l][m] = {'N': [], 'avg_cl': [], 'max_cl': [], 'min_cl': [], 'sd_cl': [],\n 'avg_ii': [], 'max_ii': [], 'min_ii': [], 'sd_ii': [], 'avg_tot': [],\n 'max_tot': [], 'min_tot': [], 'sd_tot': [], 'avg_iter': [], 'min_iter': [],\n 'max_iter': [], 'sd_iter': [], 'avg_optgap': [], 'min_optgap': [], 'max_optgap': [],\n 'sd_optgap': [], 'avg_optper': [], 'min_optper': [], 'max_optper': [],\n 'sd_optper': [],\n 'avg_impper': [], 'min_impper': [], 'max_impper': [], 'sd_impper': []}\n\n log_dir = os.path.join(os.getcwd(), \"..\", \"Logs\", res_dir, \"*.log\")\n logs = glob.glob(log_dir)\n\n for f in logs:\n ttype, method, N, M, d, parts = get_info(f)\n print(get_info(f))\n info = get_results(f)\n\n res = results[d][DATAFOLDER[int(ttype)]][method]\n res['N'].append(N if int(N) >= 64 else str(N) + '_' + str(M))\n res['avg_cl'].append(info[0])\n res['max_cl'].append(info[1])\n res['min_cl'].append(info[2])\n res['sd_cl'].append(info[3])\n res['avg_ii'].append(info[4])\n res['max_ii'].append(info[5])\n res['min_ii'].append(info[6])\n res['sd_ii'].append(info[7])\n res['avg_tot'].append(info[8])\n res['max_tot'].append(info[9])\n res['min_tot'].append(info[10])\n res['sd_tot'].append(info[11])\n res['avg_iter'].append(info[12])\n res['min_iter'].append(info[13])\n res['max_iter'].append(info[14])\n res['sd_iter'].append(info[15])\n res['avg_optgap'].append(info[16])\n res['min_optgap'].append(info[17])\n res['max_optgap'].append(info[18])\n res['sd_optgap'].append(info[19])\n res['avg_optper'].append(info[20])\n res['min_optper'].append(info[21])\n res['max_optper'].append(info[22])\n res['sd_optper'].append(info[23])\n res['avg_impper'].append(info[24])\n res['min_impper'].append(info[25])\n res['max_impper'].append(info[26])\n res['sd_impper'].append(info[27])\n\n res_filename = os.path.join(os.getcwd(), RES_PATH, res_dir + \"__\" + str(date.today()))\n fjson = res_filename + \".json\"\n fcsv = res_filename + \".csv\"\n print(\"Saving Results in : \" + res_filename)\n\n with open(fjson, 'w') as fp:\n dump(results, fp, indent=4)\n\n if os.path.exists(\"simulations.csv\"):\n os.rename(\"simulations.csv\", fcsv)", "def generate_csv(method):\n k_sorted = {}\n r_sorted = {}\n runned_methods = {}\n runned_methods[method] = []\n\n # csv headers\n csv_k_sorted_header = [\n ['Sorted by K values'],\n ['']\n ]\n\n csv_recommended_sorted_header = [\n ['Sorted by recommended artist values'],\n ['']\n ]\n\n sub_header = ['', 'MAP', 'MAR', 'F1 score']\n all_jsons = sorted(glob.glob(OUTPUT_DIR + '/*.json'), key=os.path.getmtime)\n\n for one_json in all_jsons:\n with open(one_json) as data_file:\n data = json.load(data_file)\n\n runned_methods[method].append(data)\n\n for result_obj in runned_methods[method]:\n data_neighbors = [\n result_obj['neighbors'],\n result_obj['avg_prec'],\n result_obj['avg_rec'],\n result_obj['f1_score']\n ]\n\n data_recommended_artists = [\n result_obj['recommended_artists'],\n result_obj['avg_prec'],\n result_obj['avg_rec'],\n result_obj['f1_score']\n ]\n\n try:\n k_sorted[result_obj['neighbors']].append(data_recommended_artists)\n except:\n k_sorted[result_obj['neighbors']] = []\n k_sorted[result_obj['neighbors']].append(sub_header)\n k_sorted[result_obj['neighbors']].append(data_recommended_artists)\n\n try:\n r_sorted[result_obj['recommended_artists']].append(data_neighbors)\n except:\n r_sorted[result_obj['recommended_artists']] = []\n r_sorted[result_obj['recommended_artists']].append(sub_header)\n r_sorted[result_obj['recommended_artists']].append(data_neighbors)\n\n # sort items by K or R to compare values and get the best\n for key, value in r_sorted.items():\n # fill with meta info\n csv_recommended_sorted_header.append([''])\n csv_recommended_sorted_header.append([str(key) + ' recommended artists. '])\n\n for data in value:\n csv_recommended_sorted_header.append(data)\n\n for key, value in k_sorted.items():\n # fill with meta info\n csv_k_sorted_header.append([''])\n csv_k_sorted_header.append([str(key) + ' neighbors. '])\n\n for data in value:\n csv_k_sorted_header.append(data)\n\n b = open(OUTPUT_DIR + '/sorted_neighbors.csv', 'w')\n a = csv.writer(b)\n\n a.writerows(csv_k_sorted_header)\n b.close()\n\n b = open(OUTPUT_DIR + '/sorted_recommender.csv', 'w')\n a = csv.writer(b)\n\n a.writerows(csv_recommended_sorted_header)\n b.close()", "def summarize_srm_results(results):\n\n def estimate_srm_stats(results, var_name, tailed=1):\n estimate = results[var_name].mean()\n standardized = (results[var_name] / results[\"total_variance\"]).mean()\n se = results[var_name].std() / np.sqrt(len(results[var_name]))\n t = estimate / se\n if tailed == 1:\n p = 1 - stats.t.cdf(t, len(results[var_name]) - 1)\n elif tailed == 2:\n p = 2 * (1 - stats.t.cdf(t, len(results[var_name]) - 1))\n else:\n raise ValueError(\"tailed can only be [1,2]\")\n return (estimate, standardized, se, t, p)\n\n def print_srm_stats(results, var_name, tailed=1):\n estimate, standardized, se, t, p = estimate_srm_stats(\n results, var_name, tailed\n )\n print(\n f\"{var_name:<40} {estimate:^10.2f}{standardized:^10.2f} {se:^10.2f} {t:^10.2f} {p:^10.4f}\"\n )\n\n def print_single_group_srm_stats(results, var_name):\n estimate = results[var_name].mean()\n standardized = (results[var_name] / results[\"total_variance\"]).mean()\n print(\n f\"{var_name:<40} {estimate:^10.2f}{standardized:^10.2f} {np.nan:^10.2f} {np.nan:^10.2f} {np.nan:^10.4f}\"\n )\n\n def print_srm_covariances(results, var_name):\n estimate, _, se, t, p = estimate_srm_stats(\n results, f\"{var_name}_covariance\", tailed=2\n )\n standardized = results[f\"{var_name}_correlation\"].mean()\n print(\n f\"{var_name:<40} {estimate:^10.2f}{standardized:^10.2f} {se:^10.2f} {t:^10.2f} {p:^10.4f}\"\n )\n\n def print_single_srm_covariances(results, var_name):\n estimate = results[f\"{var_name}_covariance\"].mean()\n standardized = results[f\"{var_name}_correlation\"].mean()\n print(\n f\"{var_name:<40} {estimate:^10.2f}{standardized:^10.2f} {np.nan:^10.2f} {np.nan:^10.2f} {np.nan:^10.4f}\"\n )\n\n if isinstance(results, pd.Series):\n n_groups = 1\n group_size = results[\"actor_effect\"].shape[0]\n elif isinstance(results, pd.DataFrame):\n n_groups = len(results)\n group_size = np.mean([x.shape for x in results[\"actor_effect\"]])\n\n print(\"Social Relations Model: Results\")\n print(\"\\n\")\n print(f\"Number of Groups: {n_groups:<20}\")\n print(f\"Average Group Size: {group_size:<20}\")\n print(\"\\n\")\n print(\n f\"{'':<40} {'Estimate':<10} {'Standardized':<10} {'se':<10} {'t':<10} {'p':<10}\"\n )\n if isinstance(results, pd.Series):\n print_single_group_srm_stats(results, \"actor_variance\")\n print_single_group_srm_stats(results, \"partner_variance\")\n print_single_group_srm_stats(results, \"relationship_variance\")\n print_single_srm_covariances(results, \"actor_partner\")\n print_single_srm_covariances(results, \"dyadic_reciprocity\")\n elif isinstance(results, pd.DataFrame):\n print_srm_stats(results, \"actor_variance\")\n print_srm_stats(results, \"partner_variance\")\n print_srm_stats(results, \"relationship_variance\")\n print_srm_covariances(results, \"actor_partner\")\n print_srm_covariances(results, \"dyadic_reciprocity\")\n print(\"\\n\")\n print(\n f\"{'Actor Reliability':<20} {results['actor_reliability'].mean():^20.2f}\"\n )\n print(\n f\"{'Partner Reliability':<20} {results['partner_reliability'].mean():^20.2f}\"\n )\n print(\"\\n\")", "def _parse_result(self, elem):\n logger.debug('Parsing sample level results data from message...')\n results = namedtuple('Result', 'run_id assay_id sample_role sample_type sample_id result units result_status '\n 'username flags cntrl_cts comments dwp_id mwp_id mwp_position start_ts end_ts')\n sample_role = elem.spm.spm_11.spm_11_1.value\n if sample_role == 'P':\n sample_type = elem.spm.spm_4.spm_4_1.value\n else:\n sample_type = elem.oul_r22_container[0].inv.inv_1.inv_1_1.value\n sample_id = elem.spm.spm_2.eip_1.ei_1.to_er7()\n # TODO implement parsing results (log10 numeric, TND, <LOD\n raw_result = elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[1].obx.obx_5.value\n result = self.process_result(raw_result)\n units = elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[1].obx.obx_6.obx_6_1.value\n result_status = elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[1].obx.obx_11.value\n username = elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[1].obx.obx_16.value\n flags_raw = elem.oul_r22_order[1].nte.nte_3.value\n flags_raw = flags_raw[2:].split(',')\n if flags_raw[0] == 'NONE' and len(flags_raw) == 1:\n flags = [\"\"]\n else:\n flags = flags_raw\n if sample_role == 'Q' and sample_type != 'NEGCONTROL':\n cts_raw = elem.oul_r22_order[1].nte[1].nte_3.value\n cntrl_cts = C4800.parse_cntrl_ct(cts_raw)\n else:\n cntrl_cts = json.dumps({\"\":\"\"})\n if sample_role == 'P':\n comments = elem.oul_r22_order[1].nte[2].nte_3.value\n else:\n comments = ''\n dwp_id = elem.oul_r22_container[2].inv.inv_5.inv_5_1.value\n mwp_id = elem.oul_r22_container[1].inv.inv_5.inv_5_1.value\n mwp_position = elem.oul_r22_container[1].inv.inv_6.inv_6_1.value\n start_ts = datetime.strptime(elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[0]\n .obx.obx_5.obx_5_1.value,\n '%Y%m%d%H%M%S')\n end_ts = datetime.strptime(elem.oul_r22_order[0].oul_r22_obxtcdsidnte_suppgrp[0]\n .obx.obx_5.obx_5_2.value,\n '%Y%m%d%H%M%S')\n return results(self.run_info.id, self.get_assay_info().id, sample_role, sample_type, sample_id, result, units,\n result_status, username, flags, cntrl_cts, comments, dwp_id, mwp_id, mwp_position, start_ts,\n end_ts)", "def _extract_lane_results_for_end(self, tables, table_name, end):\n # parse lane result summary\n lane_summary = tables[table_name]\n # this is version 1 of the summary file\n if len(lane_summary[-1]) == 8:\n # strip header\n headers = lane_summary[0]\n # grab the lane by lane data\n lane_summary = lane_summary[1:]\n\n # len(lane_summary[-1] = 10 is version 2 of the summary file\n # = 9 is version 3 of the Summary.htm file\n elif len(lane_summary[-1]) in (9, 10):\n # lane_summary[0] is a different less specific header row\n headers = lane_summary[1]\n lane_summary = lane_summary[2:10]\n # after the last lane, there's a set of chip wide averages\n\n # append an extra dictionary if needed\n if len(self.lane_results) < (end + 1):\n self.lane_results.append({})\n\n for r in lane_summary:\n lrs = LaneResultSummaryGA(html=r)\n lrs.end = end\n self.lane_results[lrs.end][lrs.lane] = lrs", "def generate_statistical_results():\n \n # Generate results \n results = generate_results()\n\n # Calculate statistical results\n friedman_test = sort_tbl(generate_pvalues_tbl(apply_friedman_test(results)), ovrs_order=OVERSAMPLERS_NAMES, clfs_order=CLASSIFIERS_NAMES)\n holms_test = sort_tbl(generate_pvalues_tbl(apply_holms_test(results, control_oversampler='G-SOMO')), ovrs_order=OVERSAMPLERS_NAMES[:-1], clfs_order=CLASSIFIERS_NAMES)\n \n # Generate statistical results\n statistical_results_names = ('friedman_test', 'holms_test')\n statistical_results = zip(statistical_results_names, (friedman_test, holms_test))\n\n return statistical_results", "def multi_results(benchmark):\n # Read in results\n tensat_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n taso_root = os.path.join(os.path.dirname(tensat_root), \"TASO\")\n\n taso_benchmark_name = benchmark\n if benchmark == 'nasneta':\n taso_benchmark_name = 'nasnet_a'\n elif benchmark == 'vgg':\n taso_benchmark_name = 'vgg19-7'\n taso_runtime_file = os.path.join(taso_root, \"examples/{}_time.txt\".format(taso_benchmark_name))\n\n with open(taso_runtime_file, 'r') as f:\n content = f.readlines()\n\n orig_runtimes = []\n for line in content[-5:]:\n times = line.split('\\t')\n orig_runtimes.append(float(times[0]))\n orig_mean = np.mean(orig_runtimes)\n\n\n # iter=0\n mean_iter_0, mean_sat_iter_0, mean_ext_iter_0, mean_nodes_iter_0 = get_iter_stats(benchmark, tensat_root, iter=0)\n\n # iter=1\n mean_iter_1, mean_sat_iter_1, mean_ext_iter_1, mean_nodes_iter_1 = get_iter_stats(benchmark, tensat_root, iter=1)\n\n # iter=2\n mean_iter_2, mean_sat_iter_2, mean_ext_iter_2, mean_nodes_iter_2 = get_iter_stats(benchmark, tensat_root, iter=2)\n\n # iter=3\n mean_iter_3, mean_sat_iter_3, mean_ext_iter_3, mean_nodes_iter_3 = get_iter_stats(benchmark, tensat_root, iter=3)\n\n # Plot runtime & optimizer time v.s. iter\n speedup = [orig_mean/mean_iter_0, orig_mean/mean_iter_1, orig_mean/mean_iter_2]\n optimizer_time = [mean_sat_iter_0+mean_ext_iter_0, mean_sat_iter_1+mean_ext_iter_1, mean_sat_iter_2+mean_ext_iter_2]\n if mean_iter_3 > 0:\n speedup.append(orig_mean/mean_iter_3)\n optimizer_time.append(mean_sat_iter_3+mean_ext_iter_3)\n\n speedup = [(i-1)*100 for i in speedup]\n\n nodes = [mean_nodes_iter_0, mean_nodes_iter_1, mean_nodes_iter_2, mean_nodes_iter_3]\n\n result = {}\n result['speedup'] = speedup\n result['optimizer'] = optimizer_time\n result['nodes'] = nodes\n\n return result", "def parse_results(filename):\n with open(filename) as f:\n line_list = f.readlines()\n\n vs_lines = [line for line in line_list if ' vs ' in line]\n\n results = []\n\n for line in vs_lines:\n m = re.search(r'(.+) vs (.+) \\(result: ([0-9]+)-([0-9]+)\\)', line)\n if m is not None:\n result = MatchResult(\n blue=m.group(1),\n orange=m.group(2),\n blue_goals=int(m.group(3)),\n orange_goals=int(m.group(4)),\n blue_shots=0,\n orange_shots=0,\n blue_saves=0,\n orange_saves=0,\n blue_points=0,\n orange_points=0)\n\n results.append(result)\n\n return results", "def test_QueryTaxResult_build_summarized_result_2():\n # make mini taxonomy\n gA_tax = (\"gA\", \"a;b\")\n gB_tax = (\"gB\", \"a;c\")\n taxD = make_mini_taxonomy([gA_tax, gB_tax])\n # make gather results\n gather_results = [{'query_name': 'queryA', 'name': 'gA', 'f_unique_weighted': 0.5,'f_unique_to_query': 0.5,'unique_intersect_bp': 50}, \n {'query_name': 'queryA', \"name\": 'gB', 'f_unique_weighted': 0.4,'f_unique_to_query': 0.3,'unique_intersect_bp': 30},\n {'query_name': 'queryB', \"name\": 'gB', 'f_unique_weighted': 0.3,'f_unique_to_query': 0.3,'unique_intersect_bp': 30}]\n gres = make_QueryTaxResults(gather_info=gather_results, taxD=taxD)\n \n for query_name, q_res in gres.items():\n q_res.build_summarized_result() # summarize and build result\n sk = q_res.summarized_lineage_results['superkingdom']\n phy = q_res.summarized_lineage_results['phylum']\n assert len(sk) == 2\n assert sk[0].lineage == RankLineageInfo(lineage_str=\"a\")\n print(phy)\n if query_name == 'queryA':\n # check superkingdom results\n assert sk[0].fraction == approx(0.8)\n assert sk[0].f_weighted_at_rank == approx(0.9)\n assert sk[0].bp_match_at_rank == 80\n assert sk[1].fraction == approx(0.2)\n assert sk[1].f_weighted_at_rank == approx(0.1)\n assert sk[1].bp_match_at_rank == 20\n assert sk[1].lineage == RankLineageInfo()\n # check phylum results\n assert len(phy) == 3\n assert phy[0].fraction == approx(0.5)\n assert phy[0].f_weighted_at_rank == approx(0.5)\n assert phy[0].bp_match_at_rank == 50\n assert phy[0].lineage == RankLineageInfo(lineage_str=\"a;b\")\n assert phy[1].fraction == approx(0.3)\n assert phy[1].f_weighted_at_rank == approx(0.4)\n assert phy[1].bp_match_at_rank == 30\n assert phy[1].lineage == RankLineageInfo(lineage_str=\"a;c\")\n assert phy[2].fraction == approx(0.2)\n assert phy[2].f_weighted_at_rank == approx(0.1)\n assert phy[2].bp_match_at_rank == 20\n assert phy[2].lineage == RankLineageInfo()\n if query_name == 'queryB':\n # check superkingdom results\n assert sk[0].fraction == approx(0.3)\n assert sk[0].f_weighted_at_rank == approx(0.3)\n assert sk[0].bp_match_at_rank == 30\n assert sk[1].fraction == approx(0.7)\n assert sk[1].f_weighted_at_rank == approx(0.7)\n assert sk[1].bp_match_at_rank == 70\n assert sk[1].lineage == RankLineageInfo()\n # check phylum results\n assert len(phy) == 2\n assert phy[0].fraction == approx(0.3)\n assert phy[0].f_weighted_at_rank == approx(0.3)\n assert phy[0].bp_match_at_rank == 30\n assert phy[0].lineage == RankLineageInfo(lineage_str=\"a;c\")\n assert phy[1].fraction == approx(0.7)\n assert phy[1].f_weighted_at_rank == approx(0.7)\n assert phy[1].bp_match_at_rank == 70\n assert phy[1].lineage == RankLineageInfo()", "def get_results(filename):\n\n ttype, method, N, M, d, parts = get_info(filename)\n\n with open(filename) as f:\n time_c, time_i, iter, inter_s, inter_e, intra_s, intra_e, tot, tinter, tintra, f = read_simulation(f, ttype,\n method)\n time_c = parse_time(time_c)\n time_i = parse_time(time_i)\n iter = parse_int(iter)\n inter_s = parse_float(inter_s)\n inter_e = parse_float(inter_e)\n intra_s = parse_float(intra_s)\n intra_e = parse_float(intra_e)\n tot = parse_float(tot)\n tinter = tinter if None in tinter else parse_float(tinter)\n tintra = tintra if None in tintra else parse_float(tintra)\n\n instances = len(time_c)\n\n avg_cl = get_avg_time(time_c)\n min_cl = min(time_c)\n max_cl = max(time_c)\n sd_cl = sqrt(sum((t - float(avg_cl)) ** 2 for t in time_c) / instances)\n\n avg_ii = get_avg_time(time_i)\n min_ii = min(time_i)\n max_ii = max(time_i)\n sd_ii = sqrt(sum((t - float(avg_ii)) ** 2 for t in time_i) / instances)\n\n time_tot = time_c + time_i\n avg_tot = get_avg_time(time_tot)\n min_tot = min(time_tot)\n max_tot = max(time_tot)\n sd_tot = sqrt(sum((t - float(avg_tot)) ** 2 for t in time_tot) / instances)\n\n avg_iter = sum(iter) / instances\n min_iter = min(iter)\n max_iter = max(iter)\n sd_iter = sqrt(sum((t - float(avg_iter)) ** 2 for t in iter) / instances)\n\n if None in tinter:\n avg_optgap = min_optgap = max_optgap = sd_optgap = None\n else:\n opt_gap = []\n for i in range(len(intra_e)):\n opt_gap.append(abs(float(tintra[i]) - intra_e[i]) / intra_e[i])\n\n avg_optgap = sum(opt_gap) / instances\n min_optgap = min(opt_gap)\n max_optgap = max(opt_gap)\n sd_optgap = sqrt(sum((t - float(avg_optgap)) ** 2 for t in opt_gap) / instances)\n\n if None in tinter:\n avg_optper = min_optper = max_optper = sd_optper = None\n else:\n opt_per = []\n for i in range(len(intra_e)):\n opt_per.append(abs(float(tintra[i]) - intra_e[i]) / float(tintra[i]))\n\n avg_optper = sum(opt_per) / instances\n min_optper = min(opt_per)\n max_optper = max(opt_per)\n sd_optper = sqrt(sum((t - float(avg_optper)) ** 2 for t in opt_per) / instances)\n\n improvement_per = []\n for i in range(len(intra_e)):\n improvement_per.append(abs(intra_s[i] - intra_e[i]) / intra_s[i])\n\n avg_impper = sum(improvement_per) / instances\n min_impper = min(improvement_per)\n max_impper = max(improvement_per)\n sd_impper = sqrt(sum((t - float(avg_impper)) ** 2 for t in improvement_per) / instances)\n\n return avg_cl, max_cl, min_cl, sd_cl, \\\n avg_ii, max_ii, min_ii, sd_ii, avg_tot, max_tot, min_tot, sd_tot, avg_iter, min_iter, \\\n max_iter, sd_iter, avg_optgap, min_optgap, max_optgap, sd_optgap, avg_optper, min_optper, \\\n max_optper, sd_optper, avg_impper, min_impper, max_impper, sd_impper", "def parse_ratios(self, data):\n try:\n csv_reader = csv.reader(data)\n for row in csv_reader:\n if row:\n self.ratios_data.append(row)\n if not len(self.ratios_data):\n logging.error('No Morningstar rtios data')\n return False\n self.roic = extract_float_data_for_key(self.ratios_data, 'Return on Invested Capital %')\n self.roic_averages = compute_averages_for_data(self.roic)\n if not self.roic_averages:\n logging.error('Failed to parse ROIC')\n self.long_term_debt = extract_float_data_for_key(self.ratios_data, 'Long-Term Debt')\n self.sales_growth_rate_averages = extract_averages_from_data_for_key(self.ratios_data, 'Revenue %')\n if not self.sales_growth_rate_averages:\n logging.error('Failed to parse Sales Averages')\n self.eps_growth_rate_averages = extract_averages_from_data_for_key(self.ratios_data, 'EPS %')\n if not self.eps_growth_rate_averages:\n logging.error('Failed to parse EPS averages.')\n debt_equity = extract_float_data_for_key(self.ratios_data, 'Debt/Equity')\n if not debt_equity or not len(debt_equity):\n logging.error('Failed to parse Debt-to-Equity ratio.')\n else:\n self.debt_equity_ratio = debt_equity[-1]\n except Exception as e:\n logging.error(traceback.format_exc())\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs a hillclimber with two random chosen houses. It takes two random chosen houses, checks if it is possible to switch and if the costs would be lower.
def hillclimber(map): for i in range(40000): house1 = map.houses[random.randrange(150)] house2 = map.houses[random.randrange(150)] battery1 = house1.connected battery2 = house2.connected if battery1 is not None and battery2 is not None: if battery1.id == battery2.id: pass elif battery1.power + house1.output - house2.output < 0: pass elif battery2.power + house2.output - house1.output < 0: pass elif (distance(house1, battery2) + distance(house2, battery1)) < (distance(house1, battery1) + distance(house2, battery2)): map.swap(house1, house2) elif battery1 is None: check(house1, map.batteries) else: check(house2, map.batteries)
[ "def hill_climber(self, iterations):\n\n random_houses = list(self.houses.values())\n random_houses_2 = list(self.houses.values())\n iterations = int(iterations)\n count = 0\n misses = -iterations\n prices = []\n\n # Do untill we have <iterations> succesfull configurations\n while count < iterations:\n disconnect(self)\n # connect random houses to the closest option within the constraints\n # While one or more batteries are over their capacity or not every\n # house is linked to a battery\n while check_linked(self) is False or check_full(self) is True:\n # print(misses)\n misses += 1\n\n # shuffle order of houses\n random.shuffle(random_houses)\n # remove connections, if any\n disconnect(self)\n\n # for every house find closest battery to connect to provided\n # that this house wont over-cap the battery\n for house in random_houses:\n\n for i in range(4):\n if house.output + self.batteries[list(house.diffs)[i]].filled() <= self.batteries[list(house.diffs)[i]].capacity:\n house.link = self.batteries[list(house.diffs)[i]]\n self.batteries[list(house.diffs)[i]].linked_houses.append(house)\n break\n base_copy = copy.copy([self.houses, self.batteries])\n base_cost = calculate_cost(self)\n\n print(\"Start Hillclimb\")\n step_back_cost = base_cost\n step_back = base_copy\n\n climbs = 0\n hillcount = 0\n alt_directions = 150 * 150\n\n random.shuffle(random_houses)\n random.shuffle(random_houses_2)\n\n # shorten time by checking calbe diff instead of whole gridcost\n while hillcount < alt_directions:\n # loop while the new step is inefficient\n for house_1 in random_houses:\n for house_2 in random_houses_2:\n # take a step if not the same batteries\n if not (house_1.link == house_2.link):\n switch_houses(self, house_1, house_2)\n step_cost = calculate_cost(self)\n if (step_cost < step_back_cost) and (check_full(self) is False):\n climbs += 1\n step_back = copy.copy([self.houses, self.batteries])\n step_back_cost = step_cost\n hillcount = 0\n else:\n switch_houses(self, house_1, house_2)\n #self.houses, self.batteries = step_back[0], step_back[1]\n hillcount += 1\n\n print(f\"bc={base_cost}, hilltop = {step_cost}\")\n time_var = time.strftime(\"%d%m%Y\")\n prices.append(step_cost)\n\n if step_cost is min(prices):\n house_batt = [self.houses, self.batteries]\n with open(f\"hill_climber_batt_lowest_WIJK{self.input}_{time_var}.dat\", \"wb\") as f:\n pickle.dump(house_batt, f)\n with open(f\"sequence_lowest_WIJK{self.input}_{time_var}.dat\", \"wb\") as f:\n pickle.dump(random_houses, f)\n count += 1\n print(count)\n\n print(f\"min: {min(prices)}\")\n print(f\"max: {max(prices)}\")\n print(f\"mean: {np.mean(prices)}\")\n print(f\"unsuccesfull iterations: {misses}\")", "def switch_houses(self, new_grid):\n\n # Get two unique random batteries\n random_battery = random.choice(list(new_grid.batteries.values())) \n random_battery_2 = random.choice(list(new_grid.batteries.values()))\n\n while random_battery == random_battery_2:\n random_battery_2 = random.choice(list(new_grid.batteries.values()))\n\n # Get a random house from each battery\n random_house = random.choice(random_battery.connect)\n random_house_2 = random.choice(random_battery_2.connect)\n \n # Check if switching houses doesn't exceed battery capacity constraint\n if self.compare_output(random_house, random_house_2, random_battery, random_battery_2):\n \n # Check if switching houses results in shorter distances\n if self.compare_distance(random_house, random_house_2, random_battery, random_battery_2):\n \n # Switch houses\n random_battery.disconnect_house(random_house)\n random_battery_2.disconnect_house(random_house_2)\n \n random_battery.set_connection(random_house_2)\n random_battery_2.set_connection(random_house)\n return True\n return False", "def switch_house(self, houseA, houseB):\n\n # check if switched houses coud validly be placed within\n # grid after switching\n switchValidityHouseA = self.house_inside_grid_check(houseA)\n switchValidityHouseB = self.house_inside_grid_check(houseB)\n if ((switchValidityHouseA is True and\n switchValidityHouseB is True)):\n\n # backup houses' coordination\n backUpHouseAX = houseA.x\n backUpHouseAY = houseA.y\n backUpHouseBX = houseB.x\n backUpHouseBY = houseB.y\n\n # remove houses from grid\n self.remove_house(houseA)\n self.remove_house(houseB)\n\n # switch both houses' coordinates\n houseA.x = backUpHouseBX\n houseA.y = backUpHouseBY\n houseB.x = backUpHouseAX\n houseB.y = backUpHouseAY\n\n # place houses on new locations\n aSucces = self.place_house(houseA, houseA.x, houseA.y)\n bSucces = self.place_house(houseB, houseB.x, houseB.y)\n\n # check if houses are placed succesful,\n # if not remove two houses\n if any([(aSucces is False and bSucces is True),\n (bSucces is False and aSucces is True)]):\n if bSucces is False:\n self.remove_house(houseA)\n\n if aSucces is False:\n self.remove_house(houseB)\n\n # place house back at orignal location\n self.place_house(houseA, backUpHouseAX, backUpHouseAY)\n self.place_house(houseB, backUpHouseBX, backUpHouseBY)\n\n if any([aSucces is False and bSucces is False]):\n # place house back at orignal location\n self.place_house(houseA, backUpHouseAX, backUpHouseAY)\n self.place_house(houseB, backUpHouseBX, backUpHouseBY)", "def monty_hall(trials = 1000):\r\n #List for win or not results if you switch\r\n change_result = []\r\n #List for proportion of current number of wins and losses\r\n change_probs = []\r\n #similar lists as above assuming door choice is not switched\r\n stay_result = []\r\n stay_probs = []\r\n \r\n counter = 0\r\n \r\n while counter < trials:\r\n #reset the doors so all are blank and then assign one at random to take a value of 1\r\n #1 indicates a win, 0 indicates a loss\r\n doors = [0,0,0]\r\n rand_no = random.randint(0,2)\r\n doors[rand_no] = 1\r\n \r\n #Pick a door at random - at this stage you have 1/3 probability of being right so random is fine\r\n selected_door = random.randint(0,2)\r\n \r\n #Monty hall reveals a door that's not a winner\r\n #The code does this by cycling through the two remaning doors and picking one that's not a winner\r\n if doors[selected_door-1] != 1:\r\n opened_door = selected_door-1\r\n elif doors[selected_door-2] !=1:\r\n opened_door = selected_door-2\r\n \r\n #Now remove the two doors to leave only one result\r\n #Track what the result is if you stayed\r\n initial_door_result = doors.pop(selected_door) \r\n doors.pop(opened_door)\r\n #There's only one door left\r\n new_door_result = doors[0]\r\n \r\n #add results of changing door selection\r\n change_result.append(new_door_result)\r\n #calculate the percentage of times the correct door was selected\r\n current_percent = sum(change_result)/len(change_result)\r\n change_probs.append(current_percent)\r\n \r\n #add results of not changing door selection\r\n stay_result.append(initial_door_result)\r\n stay_percent = sum(stay_result)/len(stay_result) \r\n stay_probs.append(stay_percent)\r\n \r\n \r\n counter+=1\r\n \r\n #create graph showing results\r\n charted = plot_data(change_probs, stay_probs)\r\n \r\n return(change_probs, \r\n stay_probs, \r\n charted)", "def single_cost(self, coordinates):\n\n # set default price of a single family home\n self.single = 285000 \n\n # calculate percentage of extra housing worth per extra square meter space\n self.percentage_single = self.single * 0.03\n\n # make default total price of all single family houses\n self.total_single = self.single_amount * self.single\n\n # retrieve coordinates\n coordinateslist = coordinates\n\n # check the outline of every single family house for extra free space\n for coordinate in coordinateslist:\n single = 1 \n bungalow = 2 \n maison = 3 \n\n # free space is calculated by checking the distance between house and its surroundings.\n distance = 3 \n\n x_coordinaat = coordinate[0]\n y_coordinaat = coordinate[1]\n\n # check for free space around house until another house is found\n check = True\n while check == True:\n\n # the distance between house and its surroundings is increased by one for each run to check for more free space\n # the free space around a house is checked on each side (up, down, left and right)\n x = x_coordinaat - distance\n x_ver = x_coordinaat + 8 + distance\n y = y_coordinaat - distance\n y_ver = y_coordinaat + 8 + distance\n\n # reset coordinates when out of boundary, because extra free space is able to 'go over' the boundaries\n if x < 0:\n x = 0 \n\n if x_ver > 160:\n x_ver = 160\n\n if y < 0:\n y = 0\n\n if y_ver > 180:\n y_ver = 180\n\n # remove current house from the gridmap\n self.neighbourhood[(y_coordinaat - 2):(y_coordinaat + 10),(x_coordinaat - 2):(x_coordinaat + 10)] = 0\n \n # check for other house in given range of free space\n try:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n\n # when no house is found, recalculate total price and add one extra meter of free space\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1\n \n # check for IndexError to be sure a coordinate does not go out of range\n except IndexError:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1\n\n # redraw house on the gridmap\n self.neighbourhood[(y_coordinaat - 2):(y_coordinaat + 10),(x_coordinaat - 2):(x_coordinaat + 10)] = 5\n self.neighbourhood[y_coordinaat:(y_coordinaat + 8),x_coordinaat:(x_coordinaat + 8)] = 1\n\n return self.total_single", "def greedy(grid):\n # repeat untill all houses are connected meaning a solution is found\n while grid.unconnected_houses != []:\n # disconnect 3 random houses if solution is not found, this\n # prevents that no solution will be found and makes outcome worse\n if len(grid.unconnected_houses) < 3:\n for i in range(3):\n grid.disconnect(random.randint(0, 150))\n\n\n # find min and max output value of all houses\n all_outputs = [house.max_output for house in grid.houses]\n min_out = min(all_outputs)\n max_out = max(all_outputs)\n\n # calculate factor, this represent the average amount allowed leftover capacity\n leftover_when_all_connected = sum(battery.max_capacity for battery in grid.batteries) - sum(all_outputs)\n factor = leftover_when_all_connected / len(grid.batteries)\n\n\n # for each battery loop over houses find current closest house and connect\n # when leftover capacity is under the max output a house can possibly have\n # but over 5 the algorithm tries to find a better fit\n for battery in grid.batteries:\n for counter in range(len(grid.unconnected_houses)):\n\n closest_house = battery.find_closest_house(grid.unconnected_houses)\n\n # input check\n if closest_house == None:\n break\n\n house_id_connect = closest_house.id\n\n # leftover capcity after adding current house that will be connected\n leftover_cap = battery.current_capacity - closest_house.max_output\n\n if max_out > leftover_cap > factor:\n # find better option to connect if present\n for house in grid.unconnected_houses:\n # difference of current loop house\n difference_current = battery.current_capacity - house.max_output\n # see if current house is a better fit\n if difference_current < leftover_cap and difference_current > 0:\n # set house_id and difference to new option\n house_id_connect = house.id\n leftover_cap = battery.current_capacity - house.max_output\n\n # check if a combination of two houses can fit the leftover capacity better\n if leftover_cap > factor * 2:\n current_best = float('inf')\n for house1 in grid.unconnected_houses:\n for house2 in grid.unconnected_houses:\n combi = house1.max_output + house2.max_output - battery.current_capacity\n if 0 < combi < factor and combi < current_best:\n house_id_connect = house1.id\n current_best = combi\n\n # connect house that is best option according to heursitics\n grid.connect(house_id_connect, battery.id)\n\n return grid", "def two_step(self):\n\n if random() <= self.luck:\n print(\"You two-step like a Texan! Fun +10\")\n person.fun(self, 10, 10)\n elif random() >= (1 - self.luck):\n print(\"The dance floor is too crowded for you to show your moves. \"\n + \"Fun -5\")\n person.fun(self, -5, 10)\n else:\n print(\"You dance with someone who gives you a free drink. Fun +5, \"\n + \"BAC +0.03\")\n person.drink(self, 1)\n person.fun(self, 5, 10)", "def same_hailstone(a, b):\n n = a\n while n != 1:\n if n == b:\n return True\n if n % 2 == 0:\n n = n/2\n else:\n n = n * 3 + 1\n\n # Then try b\n n = b\n while n != 1:\n if n == a:\n return True\n if n % 2 == 0:\n n = n/2\n else:\n n = n * 3 + 1\n \n return False", "def random_algorithm():\n\n count = 0\n battery_price = 5000\n tot_rand_costs = 5 * battery_price\n temp_houses = copy.deepcopy(houses)\n random.shuffle(temp_houses)\n\n for battery in batteries:\n house_list = []\n \n\n for i in range(30):\n\n house = random.choice(temp_houses)\n\n temp_houses.remove(house)\n\n battery.connect_house(house)\n house.route_calc(battery)\n house.add_costs(battery)\n tot_rand_costs = tot_rand_costs + house.costs\n \n house.connect_to_battery(battery)\n\n house_list.append(house.id)\n\n if battery.capacity < 0:\n battery.remove_house(house)\n battery.connect_house(random.choice(houses))\n\n\n return tot_rand_costs \n \n\n # print(f\"kosten!:{tot_rand_costs}\")", "def add_missing_houses(self, results):\n # Assign all values\n total_distance = results[0]\n connections = results[1]\n missing_houses = results[2]\n max = 150 - len(missing_houses)\n\n # Find spot for every missing house\n for house in missing_houses:\n #Find battery with lowest current capacity\n lowest_cap = 1510\n lowest_bat = 0\n for battery in self.batteries:\n if self.batteries[battery].currentCapacity <= lowest_cap:\n lowest_cap = self.batteries[battery].currentCapacity\n lowest_bat = battery\n\n # Calculate free space needed in lowest battery to place missing house\n missing_output = self.houses[house].max_output\n space_needed = (lowest_cap + missing_output) - self.batteries[lowest_bat].capacity\n\n # Iterate over all connections and find connections with lowest_bat\n last_connection1 = max - 1\n space_found = 0\n for connection1 in range(max):\n last_connection2 = max - 1\n if connections[last_connection1]['battery'] == lowest_bat:\n houseN1 = connections[last_connection1]['house']\n max_output1 = connections[last_connection1]['max_output_house']\n battery1 = connections[last_connection1]['battery']\n\n # Go over all connections that not with lowest_bat\n for connection2 in range(max):\n battery2 = connections[last_connection2]['battery']\n if battery2 != lowest_bat:\n houseN2 = connections[last_connection2]['house']\n max_output2 = connections[last_connection2]['max_output_house']\n if max_output2 < max_output1:\n\n # Check if max capacity not reached with switch\n new_cap1 = self.batteries[battery1].currentCapacity - max_output1 + max_output2\n new_cap2 = self.batteries[battery2].currentCapacity - max_output2 + max_output1\n if new_cap1 <= self.batteries[battery1].capacity or new_cap2 <= self.batteries[battery2].capacity:\n\n # Switch batteries\n # Adapt current capacity batteries\n self.batteries[battery1].currentCapacity -= max_output1\n self.batteries[battery2].currentCapacity -= max_output2\n\n self.batteries[battery1].currentCapacity += max_output2\n self.batteries[battery2].currentCapacity += max_output1\n\n # Adapt total distance\n distance1 = connections[last_connection1]['distance']\n distance2 = connections[last_connection2]['distance']\n total_distance -= (distance1 + distance2)\n\n # Add new distances\n new_distance1 = self.distances[houseN1 - 1][battery2 - 1]\n new_distance2 = self.distances[houseN2 - 1][battery1 - 1]\n\n total_distance += (new_distance1 + new_distance2)\n\n #Adapt connections\n connections[last_connection1]['battery'] = battery2\n connections[last_connection1]['distance'] = new_distance1\n connections[last_connection2]['battery'] = battery1\n connections[last_connection2]['distance'] = new_distance2\n\n #Calculate space found\n space_found += (max_output1 - max_output2)\n\n break\n\n last_connection2 -= 1\n\n # Check if enough space found for last house\n if space_found >= space_needed:\n if (space_found + self.batteries[lowest_bat].currentCapacity) <= self.batteries[lowest_bat].capacity:\n break\n\n last_connection1 -= 1\n\n #Place missing house\n missing_distance = self.distances[house -1][lowest_bat - 1]\n missing_ouput = self.houses[house].max_output\n self.batteries[lowest_bat].currentCapacity += missing_ouput\n total_distance += missing_distance\n connections.append({'house': house, 'battery': lowest_bat,\n 'distance': missing_distance, 'max_output_house': missing_ouput})\n\n return [total_distance, connections]", "def pool(self):\n\n if random() <= self.luck:\n print(\"You made every ball in! Fun +10\")\n person.fun(self, 10, 10)\n elif random() >= (1 - self.luck):\n print(\"You scratched on the 8 ball and lost the game. Fun -10\")\n person.fun(self, -10)\n else:\n print(\"You enjoyed playing a good game. Fun +5\")\n person.fun(self, 5, 10)", "def bungalow_cost(self, coordinates):\n\n # set default price of a bungalow\n self.bungalow = 399000\n\n # calculate percentage of extra housing worth per extra square meter space\n self.percentage_bungalow = self.bungalow * 0.04\n\n # make default total price of all bungalows\n self.total_bungalow = self.bungalow_amount * self.bungalow\n\n # retrieve coordinates\n coordinateslist = coordinates\n\n # check the outline of every bungalow for extra free space\n for coordinate in coordinateslist:\n\n single = 1 \n bungalow = 2 \n maison = 3\n \n # free space is calculated by checking the distance between house and its surroundings.\n distance = 4\n\n x_coordinaat = coordinate[0]\n y_coordinaat = coordinate[1]\n \n # check for free space around house until another house is found\n check = True\n while check == True:\n\n # distance between house and its surroundings is increased by one for each run to check for more free space\n # free space around a house is checked on each side (up, down, left and right)\n x = x_coordinaat - distance\n x_ver = x_coordinaat + 11 + distance\n y = y_coordinaat - distance\n y_ver = y_coordinaat + 7 + distance\n\n # reset coordinates when out of boundary, because extra free space is able to 'go over' the boundaries\n if x < 0:\n x = 0 \n\n if x_ver > 160:\n x_ver=160\n\n if y < 0:\n y = 0\n \n if y_ver > 180:\n y_ver = 180\n \n # remove current house from the gridmap\n self.neighbourhood[(y_coordinaat - 3):(y_coordinaat + 10),(x_coordinaat - 3):(x_coordinaat + 14)] = 0\n\n # check for other house in given range of free space\n try: \n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n\n # when no house is found, recalculate total price and add one extra meter of free space\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1 \n\n # check for IndexError to be sure a coordinate does not go out of range\n except IndexError:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n else:\n self.total_single = self.total_single + self.percentage_single\n distance += 1 \n\n # redraw house on the gridmap\n self.neighbourhood[(y_coordinaat - 3):(y_coordinaat + 10),(x_coordinaat - 3):(x_coordinaat + 14)] = 5\n self.neighbourhood[y_coordinaat:(y_coordinaat + 7),x_coordinaat:(x_coordinaat + 11)] = 2\n\n return self.total_bungalow", "def hotcold(a1,a2):\n \n # Initialize player and clue locations locally.\n x,x2=a1[0],a2[0]\n y,y2=a1[1],a2[1]\n\n # Generate distance between player and clue.\n rad = ((x2-x)**2 + (y2-y)**2)**0.5\n\n if rad<37 and rad>18:\n print(\"COLD\")\n\n elif rad<=18 and rad>7:\n print(\"WARM\")\n\n else:\n print(\"HOT\")", "def maison_cost(self, coordinates):\n \n # set default price of a maison\n self.maison = 610000\n\n # calculate percentage of extra housing worth per extra square meter space\n self.percentage_maison = self.maison * 0.06\n\n # make default total price of all maisons\n self.total_maison = self.maison_amount * self.maison\n\n # getting the coordinates from placing class\n coordinateslist = coordinates\n\n # check the outline of every maison\n for coordinate in coordinateslist:\n\n # free space is calculated by checking the distance between house and its surroundings.\n distance = 7\n\n x_coordinaat = coordinate[0]\n y_coordinaat = coordinate[1]\n \n single = 1 \n bungalow = 2 \n maison = 3 \n \n # check for free space around house until another house is found\n check = True\n while check == True:\n\n # distance between house and its surroundings is increased by one for each run to check for more free space\n # free space around a house is checked on each side (up, down, left and right)\n x = x_coordinaat - distance\n x_ver = x_coordinaat + 12 + distance\n y = y_coordinaat - distance\n y_ver = y_coordinaat + 10 + distance\n\n # reset coordinates when out of boundary, because extra free space is able to 'go over' the boundaries\n if x < 0:\n x = 0 \n\n if x_ver > 160:\n x_ver = 160\n\n if y < 0:\n y = 0\n \n if y_ver > 180:\n y_ver = 180\n \n # remove current house from the gridmap\n self.neighbourhood[(y_coordinaat-6):(y_coordinaat+16), (x_coordinaat-6):(x_coordinaat+18)] = 0\n\n # check for other house in given range of free space\n try: \n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n\n # when no house is found, recalculate total price and add one extra meter of free space\n else:\n self.total_maison = self.total_maison + self.percentage_maison\n distance += 1\n\n # check for IndexError to be sure a coordinate does not go out of range\n except IndexError:\n if single in self.neighbourhood[y:y_ver, x:x_ver] or bungalow in self.neighbourhood[y:y_ver, x:x_ver] or maison in self.neighbourhood[y:y_ver, x:x_ver]:\n check = False\n else:\n self.total_maison = self.total_maison + self.percentage_maison\n distance += 1\n\n # redraw house on the gridmap\n self.neighbourhood[(y_coordinaat - 6):(y_coordinaat + 16),(x_coordinaat - 6):(x_coordinaat + 18)] = 5\n self.neighbourhood[y_coordinaat:(y_coordinaat + 10),x_coordinaat:(x_coordinaat + 12)] = 3\n \n return self.total_maison", "def test_for_game_of_spares_version_B(self):\n game = BowlingGame()\n for _ in range(9):\n game.throw(0)\n game.throw(10)\n game.throw(1)\n game.throw(9)\n game.throw(0)\n game.calculate_score()\n self.assertEqual(game.score, 101)", "def bid_algorithm(budget_left, auction_id, last_bid, won, price_paid, last_two_aves,high_bid_warning, high_bid_count ):\n \n if high_bid_warning:\n if high_bid_count < 10:\n high_bid_count+=1\n return 0\n else:\n high_bid_count = 0\n high_bid_warning = False\n random_seed= random.random()\n if random_seed < 0.06 and random_seed > 0:\n bid_amount = random.random() * 200 + 400\n if bid_amount < budget_left:\n return bid_amount\n bid_amount = 0\n if not won:\n diff_slot_1 = last_two_aves[0][0] - last_two_aves[1][0]\n diff_slot_2 = last_two_aves[0][1] - last_two_aves[1][1]\n diff_slot_3 = last_two_aves[0][2] - last_two_aves[1][2]\n if diff_slot_1 <= 0 and last_two_aves[0][0] + 2 < budget_left:\n bid_amount = last_two_aves[0][0] + 1 + random.random()\n elif diff_slot_2 <= 0 and last_two_aves[0][1] + 2 < budget_left:\n bid_amount = last_two_aves[0][1] + 1 + random.random()\n elif diff_slot_3 <= 0 and last_two_aves[0][2] + 2 < budget_left:\n bid_amount = last_two_aves[0][2] + 1 + random.random()\n else:\n if diff_slot_1 >= 2* last_two_aves[1][0]:\n high_bid_warning = True\n return 0\n if last_two_aves[0][2] < budget_left:\n bid_amount = last_two_aves[0][2]\n # print(bid_algorithm)\n else:\n bid_amount = 0\n elif won and price_paid + 2 < budget_left:\n bid_amount = price_paid + 1 + random.random()\n elif auction_id in range(auction_id- (int) (auction_id/4), auction_id +1):\n bid_amount = budget_left/5\n else:\n bid_amount = budget_left\n# print(bid_amount)\n return bid_amount", "def MH(\n thresh,\n accept,\n reject,\n accept_postburnin,\n reject_postburnin,\n accept_quantity,\n reject_quantity,\n iteration,\n burn,\n):\n\n quantity = None\n if np.log(np.random.uniform(0, 1, size=1)[0]) < min(0, thresh):\n accept += 1\n quantity = accept_quantity\n if iteration > burn:\n accept_postburnin += 1\n else:\n reject += 1\n quantity = reject_quantity\n if iteration > burn:\n reject_postburnin += 1\n return accept, reject, accept_postburnin, reject_postburnin, quantity", "def machine_vs_machine(heur_fn1, heur_fn2):\n # Initialize game state\n state = make_init_state()\n comp1_side = randrange(2) # Randomly assign side\n\n while not goal_fn(state):\n # Print status\n print_board(state.board)\n if comp1_side:\n print(\" Comp. AI 1: Black(x), Comp. AI 2: White(o)\")\n else:\n print(\" Comp. AI 1: White(o), Comp. AI 2: Black(x)\")\n\n # Check whose round it is\n if comp1_side == state.side:\n state = next_state_by_heur(state, heur_fn1)\n print(\" Comp. AI 1 played {}\".format(state.action))\n else:\n state = next_state_by_heur(state, heur_fn2)\n print(\" Comp. AI 2 played {}\".format(state.action))\n print()\n\n print(\" Final Result\")\n print_board(state.board)\n print_points(state.board)\n if is_winner(state.board, comp1_side):\n print(\" Comp. AI 1 won!!!\")\n elif is_winner(state.board, abs(comp1_side - 1)):\n print(\" Comp. AI 2 won!!!\")\n else:\n print(\" It's a Draw!\")\n print(\" Game Ends\")", "def make_bricks(small, big, goal):\n return (goal - big*5 - small <= 0) and (goal % 5 - small <= 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Main method of the 'Worker' layer of this project. This method starts the distributed working phase which will consume urls from the seed database and scrape apps data out of the html pages, storing the result into the apps_data collection on MongoDB
def scrape_apps(self): # Arguments Parsing args_parser = self.get_arguments_parser() self._args = vars(args_parser.parse_args()) # Log Handler Configuring self._logger = Utils.configure_log(self._args) # MongoDB Configuring if not Utils.configure_mongodb(self,**self._params): self._logger.fatal('Error configuring MongoDB') sys.exit(errno.ECONNREFUSED) # Making sure indexes exist self._mongo_wrapper.ensure_index('IsBusy'); self._mongo_wrapper.ensure_index('_id', self._params['apps_collection']) # Proxies Loading self._proxies = Utils.load_proxies(self._args) # if "Debug Http" is set to true, "verify" must be "false" self._verify_certificate = not self._args['debug_https'] self._is_using_proxies = self._proxies != None # Control Variables - Used on the 'retrying logic' retries, max_retries = 0, 8 parser = html_parser() # Loop only breaks when there are no more apps to be processed while True: # Finds an app to be processed and toggles it's state to 'Busy' seed_record = self._mongo_wrapper.find_and_modify() if not seed_record: break try: url = seed_record['_id'] # Do we need to normalize the url ? if 'http://' not in url and 'https://' not in url: url = 'https://play.google.com' + url self._logger.info('Processing: %s' % url) # Is this app processed already ? if self._mongo_wrapper.app_processed(url, self._params['apps_collection']): self._logger.info('Duplicated App : %s. Skipped' % url) self._mongo_wrapper.remove_app_from_queue(seed_record) continue # Get Request for the App's Page response = requests.get(url, HTTPUtils.headers, verify=self._verify_certificate, proxies=Utils.get_proxy(self)) # Sanity Checks on Response if not response.text or response.status_code != requests.codes.ok: self._logger.info('Error Opening App Page : %s' % url) retries += 1 # Retries logic are different if proxies are being used if self._is_using_proxies: Utils.sleep() try: # Scraping Data from HTML app = parser.parse_app_data(response.text) # Stamping URL into app model app['Url'] = url app['_id'] = url # Reaching related apps related_apps = parser.parse_related_apps(response.text) if not related_apps: app['RelatedUrls'] = None else: app['RelatedUrls'] = related_apps self._logger.info('Related Apps: %s - %d' % (url, len(related_apps))) # Inserting data into MongoDB self._mongo_wrapper._insert(app, self._params['apps_collection']) # Re-Feeding seed collection with related-app urls if app['RelatedUrls']: for url in app['RelatedUrls']: if not self._mongo_wrapper.app_processed(url, self._params['apps_collection']) and \ not self._mongo_wrapper.app_processed(url, self._params['seed_collection']): self._mongo_wrapper.insert_on_queue(url, self._params['seed_collection']) except Exception as exception: self._logger.error(exception) # Toggling app state back to false self._mongo_wrapper.toggle_app_busy(url,False, self._params['seed_collection']) except Exception as exception: self._logger.error(exception)
[ "def main():\r\n args = get_args()\r\n scrape_n_books(args.num_books, args.start_url, args.real_time)\r\n scrape_n_authors(args.num_authors, args.real_time)\r\n\r\n # Update after scraping\r\n # get path and store data in json\r\n if not args.real_time:\r\n curr_dir = os.path.dirname(os.path.abspath(__file__))\r\n books_path = os.path.join(curr_dir, \"data\", \"books.json\")\r\n authors_path = os.path.join(curr_dir, \"data\", \"authors.json\")\r\n data_to_json(books_path, settings.books)\r\n data_to_json(authors_path, settings.authors)\r\n\r\n update_db_from_json(\"books.json\", \"books\")\r\n update_db_from_json(\"authors.json\", \"authors\")", "def run(self):\n\n self.clean_files()\n self.read_jobs()\n self.write_scripts()\n self.submit_jobs()", "def runworker():\n app.run(debug=False)", "def main():\n #Get number of pages\n\n number_of_pages = get_number_of_pages()\n pages=list(np.arange(1,number_of_pages+1))\n timer_utils=TimerUtils()\n timer_utils.start(f'Start getting products information of {len(pages)} pages')\n\n # Split to different = number of process\n buckets = np.array_split(pages, NUMBER_OF_PROCESS)\n pool =Pool(NUMBER_OF_PROCESS)\n jobs=[]\n index\t=0\n\n now=dt.now()\n dt_string=now.strftime(\"%d/%m/%Y %H:%M:%S\")\n logging.info(f'Start\t{NUMBER_OF_PROCESS} workers\tat {dt_string}')\n\n #Create a global variable.\n while index\t<\tlen(buckets):\n process_id\t=index\n pages\t= buckets[index]\n process\t=pool.apply_async(get_products_df,\targs=(process_id,\tpages,))\n jobs.append(process)\n index\t+=1\n\n \t#C1ose the pool\n pool.close()\n\n \t#wait\tuntil\tfinishing\tall\tprocess\n results=[job.get() for job in\tjobs]\n timer_utils.stop(f'End getting products\tinformation\tof total {len(pages)} pages')", "def url_main(self):\n log = self.logger\n st = time.time()\n print(\"Running...\\nCheck logs/threat_hub.log for detailed events\")\n log.info(\"URL Threat intell hub started\")\n log.info(\"database cleanup started\")\n self.process.flush_db()\n log.info(\"database flushed successfully\")\n try:\n for url in self.crawl.url_targets:\n response = self.crawl.send_request(url=url)\n if not response:\n continue\n else:\n dataset = self.process.shape_up(raw_data=response)\n # self.first_run(dataset, url)\n self.normal_run(dataset, url)\n log.info(f\"Total {self.process.db.get_total_url()} url records present.\")\n log.info(f\"Total {self.process.db.get_phishing_count()} phishing urls present\")\n log.info(f\"Total {self.process.db.get_malware_count()} malware urls present\")\n log.info(f\"Total {self.process.db.get_ransom_count()} ransomware urls present\")\n\n except Exception as err:\n log.exception(err, exc_info=False)\n finally:\n end = time.time()\n log.info(\"Finished executing threat intell hub in {} seconds\".format(end - st))\n print(\"Finished...\")", "def run_worker():\n from asu.utils.garbagecollector import GarbageCollector\n from asu.utils.boss import Boss\n from asu.utils.updater import Updater\n\n logging.basicConfig(level=logging.DEBUG)\n log = logging.getLogger(__name__)\n\n log.info(\"start garbage collector\")\n gaco = GarbageCollector()\n gaco.start()\n\n log.info(\"start boss\")\n boss = Boss()\n boss.start()\n\n log.info(\"start updater\")\n uper = Updater()\n uper.start()", "def main():\n hostname,dbname = \"127.0.0.1\",\"sparkifydb\"\n create_tables.main(hostname, dbname)\n\n process_song_data(hostname, dbname, \"./data/song_data/\")\n process_log_data(hostname, dbname, \"./data/log_data/\")", "def run(self):\n print(\"************* Start! *****************\")\n print(\"************* Extracting data... *****************\")\n data = self.__extract_data()\n print(\"************* Data extracted *****************\")\n print(\"************* Transforming data... *****************\")\n clusters = self.__transform_data(data)\n print(\"************* Transformation is done *****************\")\n print(\"************* Saving data *****************\")\n self.__load(clusters)\n print(\"************* End! *****************\")", "def run():\n # Initialize db\n connection = init_db()\n sql_helper.nuke_tables(connection)\n\n logger.info(\"Populating probabilistic database...\")\n number_of_elements = 1000000\n generator.run(connection, size=number_of_elements)\n logger.info(\"Populating complete!\")\n\n # Let the benchmark test the database\n benchmark_results = benchmark.runBenchmark(connection, logger)\n\n # Clear the database\n logger.info(\"Clearing the database...\")\n sql_helper.nuke_tables(connection)\n logger.info(\"Clear complete\")\n\n # Close the db connection.\n connection.close()\n logger.info(\"Database connection ended.\")\n\n # Save the results to a file\n date_time = datetime.now().strftime(\"%Y%m%d-%H%M\")\n export_results(results=benchmark_results, filename=\"{}_{}-elements_maybms-benchmark-result.csv\".format(date_time, number_of_elements))\n\n logger.info(\"Bye!\")", "def harvest(self):\n self.stopped = False\n self.setupdirs()\n self.setUpCrosswalk()\n self.updateHarvestRequest()\n\n parsed_url = urlparse.urlparse(self.harvestInfo['uri'])\n self.data_cite_api_url = \"%s://%s\" %(parsed_url.scheme, parsed_url.netloc)\n self.logger.logMessage(\"DataCiteQueryHarvester Started\")\n page_count = 1\n self.retrieveDOIMetadata(page_count)\n while self.recordCount > 0:\n page_count += 1\n self.retrieveDOIMetadata(page_count)\n self.setStatus(\"Generated %s File(s)\" % str(page_count))\n self.logger.logMessage(\"Generated %s File(s)\" % str(page_count))\n self.runCrossWalk()\n self.postHarvestData()\n self.finishHarvest()", "def run_scrapping():\n logging.info(\"Starting the scrapping process...\")\n try:\n # Create an empty list variable.\n search_history = []\n # Run the for to scrap 2000 articles from wikipedia.\n for i in range(2000):\n\n # Send the request to wikipedia with the random url and get the response.\n response = requests.get(base_url)\n\n # Check if the current url is already exist in search_history list or not.\n if str(response.url) not in search_history:\n # if not exist then add it to the list.\n search_history.append(response.url)\n\n # Create the file with write mode and encoding format utf-8.\n f = open(module_directory + \"/DataSet/\" + str(i) + \".txt\", \"w\", encoding=\"utf-8\")\n # And write the response of get_body_content function.\n f.write(get_body_content(response.text))\n\n # Sleep for 2 second for not messing up with wikipedia server.\n sleep(2)\n\n # Save the search_history list which contains all the called urls into the file.\n f_ = open(module_directory + \"/DataSet/url_list.txt\", \"w\")\n f_.write(\"\\n\".join(search_history))\n\n return True\n\n except Exception as e:\n # log the error.\n traceback.print_exc()\n logging.error(\"Error: %s\", e)\n print(\"Error: %s\", e)\n return False", "def direct_results():\n\n countries = load_yaml_file(\"data/countries.yaml\")\n\n dic_uuids = {}\n\n #for country in [\"CH\",]:\n for country in countries:\n job_id = str(uuid.uuid1())\n\n dic_uuids[job_id] = country\n\n # Add task to db\n task = Task(\n id=job_id,\n progress=0,\n )\n db.session.add(task)\n db.session.commit()\n\n # fetch current year\n year = int(datetime.datetime.now().year)\n\n params_dict = {\n (\"Functional unit\",): {\n \"powertrain\": [\"ICEV-p\", \"ICEV-d\", \"ICEV-g\", \"BEV\"],\n \"year\": [year],\n \"size\": [\"Medium\"],\n \"fu\": {\"unit\": \"vkm\", \"quantity\": 1},\n },\n (\"Driving cycle\",): \"WLTC\",\n (\"Background\",): {\n \"country\": country,\n \"energy storage\": {\n \"electric\": {\"Medium\": {\"type\": \"NMC-622\", \"origin\": \"CN\"}}\n },\n },\n (\"Foreground\",): {\n (\"Glider\", \"all\", \"all\", \"average passengers\", \"none\"): {\n (year, \"loc\"): 1.5\n },\n (\"Glider\", \"all\", \"all\", \"cargo mass\", \"none\"): {(year, \"loc\"): 20.0},\n (\"Driving\", \"all\", \"all\", \"lifetime kilometers\", \"none\"): {\n (year, \"loc\"): 200000.0\n },\n (\"Driving\", \"all\", \"all\", \"kilometers per year\", \"none\"): {\n (year, \"loc\"): 12000.0\n },\n },\n }\n data, i = app.calc.process_results(params_dict, \"en\", job_id)\n data = json.loads(data)\n data.append(job_id)\n\n with open(\n f\"data/precalculated results/quick_results_{country}.pickle\", \"wb\"\n ) as f:\n pickle.dump(data, f)\n\n # generate inventories\n for software in [\"brightway2\", \"simapro\"]:\n for ecoinvent_version in [\n #\"3.6\",\n #\"3.7\",\n \"3.8\"\n ]:\n if software == \"brightway2\" or (\n software == \"simapro\" and ecoinvent_version == \"3.7\"\n ):\n data = i.export_lci(\n ecoinvent_version=ecoinvent_version,\n software=software,\n format=\"string\",\n )\n\n with open(\n f\"data/inventories/quick_inventory_{country}_{software}_{ecoinvent_version}.pickle\",\n \"wb\",\n ) as f:\n pickle.dump(data, f)\n\n with open(\"data/precalculated results/quick_results_job_ids.pickle\", \"wb\") as f:\n pickle.dump(dic_uuids, f)\n\n res = make_response(jsonify({\"job id\": 0}), 200)\n return res", "async def fetch(self, app_name):\n await self.crawl_to_app(app_name) \n print(\"Test complete.\") \n return print(\"Extracting data\") #place holder for data extraction call ", "def run_import():\n db_engine = get_db_engine()\n\n with open(\"DRUGIDS\") as fp:\n drug_ids = [line.strip() for line in fp]\n\n # Scrape the site, and pull the data we need\n # This would be the \"Source\" in a DAG pipline, I.e. the first node\n logger.info(\"Scraping the Drugbank Site\")\n drug_metadata = pull_drugs(drug_ids)\n\n # Check the Database against the \"action\" and \"alt_identifier\" types\n # we observe from the site, and the one's we already have in the database.\n # Insert / Update accordingly. After the update, we can then insert\n # data with references to these tables.\n # This is simply a transformation (T in ETL), again another node / step\n # in the pipeline.\n logger.info(\"Equalizing Type IDs\")\n equalize_type_ids(db_engine, drug_metadata)\n\n # Transform the Metadata dicts into lists of tuples, 1 list per relation\n # so we can bulk insert accordingly\n # The sink in the graph.\n logger.info(\"Transforming data to tuples for insertion\")\n db_rows_to_insert: Dict = transform_to_db_rows(db_engine, drug_metadata)\n\n # Insert the rows.\n logger.info(\"Inserting Data\")\n write_rows_to_db(db_engine, db_rows_to_insert)", "def algorithm(self):\n # init\n self.transfer = Transfer()\n # Read info from files written by PostJobs and bookkeeping from previous run.\n self.transfer.readInfo()\n self.rucioClient = self._initRucioClient(self.transfer.username, self.transfer.restProxyFile)\n # Get info what's already in Rucio containers\n self.transfer.readInfoFromRucio(self.rucioClient)\n self.crabRESTClient = self._initCrabRESTClient(\n self.transfer.restHost,\n self.transfer.restDBInstance,\n self.transfer.restProxyFile,\n )\n # build dataset\n BuildDBSDataset(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 1\n RegisterReplicas(self.transfer, self.rucioClient, self.crabRESTClient).execute()\n # do 2\n MonitorLockStatus(self.transfer, self.rucioClient, self.crabRESTClient).execute()", "def process():\n db = DataParser.get_connection()\n cursor = db.cursor()\n DataParser.set_up_database(cursor)\n config = DataParser.get_config()\n cursor.execute(\"use %s\" % config[\"database\"][\"database_name\"])\n DataParser.import_articles(cursor)\n DataParser.import_citations(cursor)\n DataParser.import_words(cursor)\n DataParser.import_users(cursor)\n DataParser.clean_up(db, cursor)", "def run(self):\n try:\n wakeup = None\n while True:\n now = Timestamp.now()\n wokeup = wakeup\n (pending, wakeup) = self.process_fetchers(now)\n if wakeup == -1:\n print \"Nothing to process.\"\n break\n remaining = wakeup - now\n if pending:\n print \"Waiting for \" + str(remaining)\n asyncore.loop(timeout = remaining, count = 1)\n else:\n ts = int(now)\n result = self.analyzer.analyze(ts)\n self.reporter.show(result, ts)\n if not self.continuous:\n break\n print \"sleeping for \" + str(remaining)\n time.sleep(remaining)\n print \"Wokeup ... \"\n except KeyboardInterrupt:\n sys.exit(0)\n finally:\n for f in self.fetchers:\n f.cleanup()", "def run():\n\n scraper.main(log_to_stdout=True)", "def crawl(self):\n\n #Iteration tracker for checking when to regenerate driver\n iter_ = 0 \n\n #Set DB scan start\n now = datetime.now()\n self.db.set_start(now)\n failures = []\n status = {}\n with open(os.getcwd() + '/scan-status.txt', 'r') as f:\n for line in f.readlines():\n category = line.split(' ')[0]\n pagenum = line.split(' ')[1]\n try:\n pagenum.replace('\\n', '')\n except:\n pass\n status[category] = pagenum\n \n #Iterate through targets\n for target in self.targets:\n if status[target.split('/t5/')[1].split('/')[0]] == 'DONE\\n':\n continue\n if iter_ > 0:\n #Regenerate driver if necessary\n if '-p' not in sys.argv:\n print('Regenerating driver...... \\n')\n self.regenerate_driver()\n # time.sleep(2)\n\n #time.sleep(2)\n\n #Generate a category object from target URL\n category = self.parse_page(target, iter_ + 1)\n\n #If something went wrong with creating the object, throw relevant exception to \n #trigger restart\n if len(category.threadlist) == 0:\n raise DBError\n print(f'\\nCreated CATEGORY: {category.__str__()}')\n\n #Get threads remaining from old cache\n threads = []\n if category.name in self.db.pred.keys():\n for url, thread in self.db.pred[category.name].threads.items():\n if url not in category.threads.keys():\n threads.append(url)\n \n #Go through remaining threads and add parsed objects to category object\n if len(threads) > 0:\n with Bar(f'Finishing remaining threads in category {category.name}', max=len(threads)) as bar:\n for url in threads:\n thread = None\n if '-p' not in sys.argv:\n self.driver.get(url)\n #Attempt to parse thread page\n try:\n thread = self.scraper.parse(self.driver.page_source, url, target.split('/t5/')[1].split('/')[0], iter_)\n #This indicates a thread has been made inaccessible, add it to deleted threads\n except AttributeError:\n if target.split('/t5/')[1].split('/')[0] in self.db.stats.deleted_threads.keys():\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]].append(url)\n else:\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]] = [url]\n else:\n r = requests.get(url)\n try:\n thread = self.scraper.parse(r.text, url, target.split('/t5/')[1].split('/')[0], iter_)\n #This indicates a thread has been made inaccessible, add it to deleted threads\n except AttributeError:\n if target.split('/t5/')[1].split('/')[0] in self.db.stats.deleted_threads.keys():\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]].append(url)\n else:\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]] = [url]\n #time.sleep(2)\n category.add(thread)\n bar.next()\n iter_ += 1\n if '-full' not in sys.argv:\n self.db.add(category)\n for elem in failures:\n if elem not in self.db.stats.failures:\n self.db.stats.failures.append(elem)\n return self.db\n else:\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read production capacities, either offline (locally) or online (from scenario explorer)
def get_production_capacities(conf, source='offline', verbose=True): # if offline source, read local data if source == 'offline': hourly_capacities = pd.read_csv('Input/ProductionCapacities.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :] # if online source, read data from openENTRANCE scenario explorer if source == 'online': openentrance_capacities = pyam.read_iiasa( 'openentrance', model=conf['openEntrance']['capacities']['model'], variable=conf['openEntrance']['capacities']['variable'], region=conf['openEntrance']['capacities']['region'], scenario=conf['openEntrance']['capacities']['scenario']) openentrance_capacities = openentrance_capacities.filter(year=conf['openEntrance']['capacities']['year']) if verbose: print('Production capacities (openENTRANCE):') print(openentrance_capacities.timeseries()) # try to match downloaded data to technologies specified in .yaml file. If that fails, use local data try: installed_capacities = {source: openentrance_capacities.filter(variable=conf['openEntrance']['capacities']['variable'] + source).timeseries()[int(conf['openEntrance']['capacities']['year'])][-1] for source in conf['openEntrance']['sources']} except (IndexError, ValueError, AttributeError): warnings.warn('Capacities data from scenario explorer does not fit sources supplied in Settings.yaml - using mock-up data.') installed_capacities = {source: 1 for source in conf['openEntrance']['sources']} # translate installed capacities to hourly capacities # for dispatchable sources, this is trivial; for non-dispatchable sources, use profiles supplied locally hourly_capacities = {source: np.repeat(installed_capacities[source], len(conf['T'])) if source in conf['openEntrance']['dispatchable_sources'] else pd.read_csv('input/' + source + '.csv', header=None).iloc[:, 0].values[conf['T']] * installed_capacities[source] for source in conf['openEntrance']['sources']} return hourly_capacities
[ "def get_production_costs(conf, source='offline', verbose=True):\n\n # if offline source, read local data\n if source == 'offline':\n hourly_costs = pd.read_csv('Input/ProductionCosts.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :]\n # if online source, read data from openENTRANCE scenario explorer\n if source == 'online':\n openentrance_costs = pyam.read_iiasa(\n 'openentrance',\n model=conf['openEntrance']['costs']['model'],\n variable=conf['openEntrance']['costs']['variable'],\n region=conf['openEntrance']['costs']['region'],\n scenario=conf['openEntrance']['costs']['scenario'])\n openentrance_costs = openentrance_costs.filter(year=conf['openEntrance']['costs']['year'])\n if verbose:\n print('Production costs (openENTRANCE):')\n print(openentrance_costs.timeseries())\n\n # try to match downloaded data to technologies specified in .yaml file. If that fails, use local data\n try:\n static_costs = {source: openentrance_costs.filter(variable=conf['openEntrance']['costs']['variable'] + source).timeseries()[int(conf['openEntrance']['capacities']['year'])][-1] for source in conf['openEntrance']['sources']}\n except (IndexError, ValueError, AttributeError):\n warnings.warn('Cost data from scenario explorer does not fit sources supplied in Settings.yaml - using mock-up data.')\n static_costs = {source: 1 for source in conf['openEntrance']['sources']}\n # translate to hourly costs\n hourly_costs = {source: np.repeat(static_costs[source], len(conf['T'])) for source in conf['openEntrance']['sources']}\n\n return hourly_costs", "def performance_capacity_monitoring(n1, ip, x_api_session):\n os.system(\"cls\")\n SelectManagedSystem_obj = SelectManagedSystem.SelectManagedSystem()\n managedsystem_object = SelectManagedSystem_obj.\\\n get_managedsystem_uuid(ip, x_api_session)\n managedsystem_uuid = SelectManagedSystem_obj.managedsystem_uuid\n virtualioserver_object = ListVirtualIOServer.ListVirtualIOServer()\n object_list = virtualioserver_object.list_VirtualIOServer(ip,\n managedsystem_uuid,\n x_api_session)\n if managedsystem_uuid != \"\":\n st = 'y'\n n = n1\n if n == 1:\n ManagedSystemPcmPreference_object = ManagedSystemPcm.ManagedSystemPcmPreference(ip,\n managedsystem_uuid,\n x_api_session)\n while True:\n print((\"\\n\\n\",\"ManagedSystemPcmPreference\".center(50)))\n print_list = ['Get ManagedSystemPcmPreference','Set/Update ManagedSystemPcmPreference','Return to PCM Menu']\n #select any performance_capacity_monitoring operation\n x = int(print_obj.print_on_screen(print_list) )\n if x == 1:\n get_managedsystempcmpreference_object = ManagedSystemPcmPreference_object.get_managedsystempcmpreference()\n ManagedSystemPcmPreference_object.print_managedsystempcmpreference(get_managedsystempcmpreference_object)\n \n elif x == 2:\n set_managedsystempcmpreference_object = ManagedSystemPcmPreference_object.\\\n set_managedsystempcmpreference()\n \n elif x == 3:\n os.system(\"cls\")\n break\n else:\n print(\"\\nTry again using valid option\")\n back_to_menu()\n elif n == 2:\n #object creation and Method call to Longterm monitor\n LongTermMonitor_object = LongTermMonitor.LongTermMonitor(ip,\n managedsystem_uuid,\n x_api_session)\n LongTermMonitor_object.get_longtermmonitor(object_list)\n \n back_to_menu()\n \n elif n == 3:\n #object creation and Method call to Shortterm monitor\n ShortTermMonitor_object = ShortTermMonitor.ShortTermMonitor(ip,\n managedsystem_uuid,\n x_api_session)\n ShortTermMonitor_object.get_shorttermmonitor(object_list)\n \n back_to_menu()\n \n elif n == 4:\n #object creation and Method call to Processed Metrics\n process_metrics_object = ProcessedMetrics.ProcessedMetrics(ip,managedsystem_uuid ,x_api_session)\n process_metrics_object.get_processedmetrics()\n back_to_menu()\n \n else:\n print(\"\\nTry again using valid option\")\n back_to_menu()\n else:\n back_to_menu()", "def __get_battery_capacities(self):\n\n capacities = subprocess.check_output(self.__IOREG_CAPACITY_COMMAND, shell=True).decode(\n 'utf-8').strip().split('\\n')\n\n max_capacity = 0\n current_capacity = 0\n\n for capacity in capacities:\n entry_set = capacity.split('=')\n key = re.sub('^(.*?\")', '', entry_set[0]).replace('\" ', '')\n\n if (key == 'MaxCapacity'):\n max_capacity = entry_set[1].strip()\n elif key == 'CurrentCapacity':\n current_capacity = entry_set[1].strip()\n\n return max_capacity, current_capacity", "def retrieveInventoryData(client):\n resource_classes = ['dpm-resources']\n api_features = client.consoles.console.list_api_features()\n if 'secure-boot-with-certificates' in api_features:\n resource_classes.append('certificate-resources')\n\n inventory_list = client.get_inventory(resource_classes)\n error_msgs = []\n for item in inventory_list:\n if item.get('class') == 'inventory-error':\n msg = (\"Inventory error {} for resource with URI {}: {}; \"\n \"Details: {}\".format(\n item.get('inventory-error-code'),\n item.get('uri'),\n item.get('inventory-error-text'),\n dict(item.get('inventory-error-details'))))\n error_msgs.append(msg)\n if error_msgs:\n raise ConsistencyError(\n \"Some resources could not be fully inventoried:\\n {}\".\n format('\\n '.join(error_msgs)))\n return inventory_list", "def retrieve_airconditioning_settings(self):\n ac_URL=\"http://\" + self.ip_catalog + \":\" + self.port_catalog + \"/ac\"\n self.ac_settings=requests.get(ac_URL).json()[\"ac\"] \n print(self.ac_settings)\n print(\"Type settings: \",type(self.ac_settings))\n return self.ac_settings", "def query_data():\n \n # Access JIRA backend and gets data\n db_accessor = JiraGT()\n db_accessor.connect()\n data = db_accessor.get_compliance_data()\n db_accessor.disconnect()\n return data", "def fetch_statistics(conf):\n return fetch_json(\"http://%s:%d/monitor/statistics.json\" % (conf[\"host\"], conf[\"port\"]), timeout=30)", "async def mcap():\n try:\n if ctx.message.server.id == \"388915017187328002\":\n return\n except:\n pass\n\n btc = requests.get(\"https://www.bitstamp.net/api/ticker/\")\n supply = get_supply()\n trtl = requests.get(\"https://tradeogre.com/api/v1/ticker/BTC-TRTL\")\n try:\n trtl_json = trtl.json()\n btc_json = btc.json()\n except ValueError:\n await client.say(\"Unable to get market cap!\")\n return\n mcap = float(trtl.json()['price'])*float(btc.json()['last'])*supply\n await client.say(\"{0}'s Marketcap is **${1:,.2f}** USD\".format(config['coin'], mcap))", "def _fetchCapabilities(self, options):\n grass.debug('Fetching capabilities file.')\n cap_url = options['url']\n\n if 'WMTS' in options['driver']:\n cap_url += \"?SERVICE=WMTS&REQUEST=GetCapabilities&VERSION=1.0.0\"\n elif 'OnEarth' in options['driver']:\n cap_url += \"?REQUEST=GetTileService\"\n else:\n cap_url += \"?SERVICE=WMS&REQUEST=GetCapabilities&VERSION=\" + options['wms_version'] \n \n try:\n cap = self._fetchDataFromServer(cap_url, options['username'], options['password'])\n except (IOError, HTTPException), e:\n if urllib2.HTTPError == type(e) and e.code == 401:\n grass.fatal(_(\"Authorization failed to '%s' when fetching capabilities.\") % options['url'])\n else:\n grass.fatal(_(\"Unable to fetch capabilities from: '%s'\") % options['url'])\n \n return cap", "def load_electricity_consumption():\n _load_consumption_from_api(settings.ELECTRICITY_CONSUMPTION_URL, models.ElectricityConsumption)", "def get_environment_impact(driver):\n attributes_grid=driver.find_element_by_id('attributes_grid')\n elements=attributes_grid.find_elements_by_tag_name('*')\n for element in elements:\n if 'Eco-Score' in element.text:\n return element.find_element_by_tag_name('span').text\n return UNKNOWN_VALUE", "def test_vmware_service_resources_management_get(self):\n pass", "def get_production_details(production, jtracked, ktracked):\n\n prod_dict = {}\n\n for server in server_dict:\n\n try:\n\n db = mdb.connect(read_default_file='../.my.cnf', read_default_group='guest',\n host=server, port=server_dict[server]['port'])\n cur = db.cursor()\n\n query_string = '''\n SELECT DISTINCT production, jtracked, ktracked\n FROM summary.production\n WHERE decoded > 0\n '''\n\n query_string += ' AND production = \\'' + production + '\\''\n\n if jtracked and ktracked:\n query_string += ' AND (jtracked = 1 OR ktracked = 1)'\n elif jtracked and not ktracked:\n query_string += ' AND jtracked = 1'\n elif not jtracked and ktracked:\n query_string += ' AND ktracked = 1'\n\n cur.execute(query_string)\n\n if cur.rowcount > 0:\n row = cur.fetchone()\n prod_dict[row[0]] = {'server': server,\n 'roadset': get_roadset(int(row[0][4:10])),\n 'jtracked': int(row[1]),\n 'ktracked': int(row[2])}\n\n db.close()\n\n except mdb.Error, e:\n\n print \"Error %d: %s\" % (e.args[0], e.args[1])\n return 1\n\n return prod_dict", "def showLimits():\r\n limits = css.serviceInfo.limits\r\n print limits", "def List(apig):\n\t\t\t\treturn apig.client.get_usage_plans()['items']", "def get_capabilities(self):\n params = {\n \"request\": OGCOperationEnum.GET_CAPABILITIES.value,\n \"version\": self.service_version.value if self.service_version is not None else \"\",\n \"service\": (self.service_type.value if self.service_type is not None else \"\").upper(),\n }\n concat = \"&\" if self.service_connect_url[-1] != \"&\" else \"\"\n self.service_connect_url = \"{}{}{}\".format(self.service_connect_url, concat, urlencode(params))\n ows_connector = CommonConnector(\n url=self.service_connect_url,\n external_auth=self.external_authentification,\n connection_type=ConnectionEnum.REQUESTS\n )\n ows_connector.http_method = 'GET'\n try:\n ows_connector.load()\n if ows_connector.status_code != 200:\n raise ConnectionError(ows_connector.status_code)\n except ReadTimeout:\n raise ConnectionError(CONNECTION_TIMEOUT.format(self.service_connect_url))\n\n tmp = ows_connector.content.decode(\"UTF-8\")\n # check if tmp really contains an xml file\n xml = xml_helper.parse_xml(tmp)\n\n if xml is None:\n raise Exception(tmp)\n\n self.service_capabilities_xml = tmp\n self.connect_duration = ows_connector.run_time\n self.descriptive_document_encoding = ows_connector.encoding", "def GetCompleteStats(ctx):\n \"\"\"The data returned from GetCompleteStats changes frequently, and is not guaranteed to accurately show performance from the system. It is not recommended to ever use GetCompleteStats for collecting performance data or any other management integration with a SolidFire cluster.\"\"\"\n if ctx.element is None:\n ctx.logger.error(\"You must establish at least one connection and specify which you intend to use.\")\n exit()\n\n\n\n ctx.logger.info(\"\")\n try:\n str = ctx.element.get_complete_stats()\n except common.ApiServerError as e:\n ctx.logger.error(e.message)\n exit()\n except BaseException as e:\n ctx.logger.error(e.__str__())\n exit()\n\n cli_utils.print_result(str, ctx.logger, as_json=ctx.json, depth=ctx.depth, filter_tree=ctx.filter_tree)", "def get_capabilities(capability=None):\n capabilities = {\n # We rely on consumption because DSMR readings might be flushed in the future.\n 'electricity': ElectricityConsumption.objects.exists(),\n 'electricity_returned': ElectricityConsumption.objects.filter(\n # We can not rely on meter positions, as the manufacturer sometimes initializes meters\n # with testing data. So we just have to wait for the first power returned.\n currently_returned__gt=0\n ).exists(),\n 'gas': GasConsumption.objects.exists(),\n 'weather': WeatherSettings.get_solo().track and TemperatureReading.objects.exists()\n }\n capabilities['any'] = any(capabilities.values())\n\n # Single selection.\n if capability is not None:\n return capabilities[capability]\n\n return capabilities", "def test_list_cloud_access(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read production costs, either offline (locally) or online (from scenario explorer)
def get_production_costs(conf, source='offline', verbose=True): # if offline source, read local data if source == 'offline': hourly_costs = pd.read_csv('Input/ProductionCosts.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :] # if online source, read data from openENTRANCE scenario explorer if source == 'online': openentrance_costs = pyam.read_iiasa( 'openentrance', model=conf['openEntrance']['costs']['model'], variable=conf['openEntrance']['costs']['variable'], region=conf['openEntrance']['costs']['region'], scenario=conf['openEntrance']['costs']['scenario']) openentrance_costs = openentrance_costs.filter(year=conf['openEntrance']['costs']['year']) if verbose: print('Production costs (openENTRANCE):') print(openentrance_costs.timeseries()) # try to match downloaded data to technologies specified in .yaml file. If that fails, use local data try: static_costs = {source: openentrance_costs.filter(variable=conf['openEntrance']['costs']['variable'] + source).timeseries()[int(conf['openEntrance']['capacities']['year'])][-1] for source in conf['openEntrance']['sources']} except (IndexError, ValueError, AttributeError): warnings.warn('Cost data from scenario explorer does not fit sources supplied in Settings.yaml - using mock-up data.') static_costs = {source: 1 for source in conf['openEntrance']['sources']} # translate to hourly costs hourly_costs = {source: np.repeat(static_costs[source], len(conf['T'])) for source in conf['openEntrance']['sources']} return hourly_costs
[ "def get_detailed_costs(self):\n # TODO: Cache for a day\n # TODO: Generate path to cost csv\n today = datetime.date.today()\n csv_file = \"906142011005-aws-billing-detailed-line-items-with-resources-and-tags-%s-%02d.csv\" % (today.year, today.month)\n stringio = cStringIO.StringIO(self.get_file_contents(config.BILLING_BUCKET, \"/%s.zip\" % csv_file))\n raw_csv = io.TextIOWrapper(zipfile.ZipFile(stringio).open(csv_file))\n return csv.DictReader(raw_csv)", "def get_production_capacities(conf, source='offline', verbose=True):\n\n # if offline source, read local data\n if source == 'offline':\n hourly_capacities = pd.read_csv('Input/ProductionCapacities.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :]\n # if online source, read data from openENTRANCE scenario explorer\n if source == 'online':\n openentrance_capacities = pyam.read_iiasa(\n 'openentrance',\n model=conf['openEntrance']['capacities']['model'],\n variable=conf['openEntrance']['capacities']['variable'],\n region=conf['openEntrance']['capacities']['region'],\n scenario=conf['openEntrance']['capacities']['scenario'])\n openentrance_capacities = openentrance_capacities.filter(year=conf['openEntrance']['capacities']['year'])\n if verbose:\n print('Production capacities (openENTRANCE):')\n print(openentrance_capacities.timeseries())\n\n # try to match downloaded data to technologies specified in .yaml file. If that fails, use local data\n try:\n installed_capacities = {source: openentrance_capacities.filter(variable=conf['openEntrance']['capacities']['variable'] + source).timeseries()[int(conf['openEntrance']['capacities']['year'])][-1] for source in conf['openEntrance']['sources']}\n except (IndexError, ValueError, AttributeError):\n warnings.warn('Capacities data from scenario explorer does not fit sources supplied in Settings.yaml - using mock-up data.')\n installed_capacities = {source: 1 for source in conf['openEntrance']['sources']}\n # translate installed capacities to hourly capacities\n # for dispatchable sources, this is trivial; for non-dispatchable sources, use profiles supplied locally\n hourly_capacities = {source: np.repeat(installed_capacities[source], len(conf['T'])) if source in conf['openEntrance']['dispatchable_sources'] else pd.read_csv('input/' + source + '.csv', header=None).iloc[:, 0].values[conf['T']] * installed_capacities[source] for source in conf['openEntrance']['sources']}\n\n return hourly_capacities", "def Get_Cost_Usage(cost_explorer,start: str,end: str):\n\n\t\t\ttimePeriod = {\n\t\t\t\t'Start': start,\n\t\t\t\t'End': end,\n\t\t\t\t}\n\n\t\t\tgranularity = 'MONTHLY'\n\n\t\t\tmetrics = ['AmortizedCost','BlendedCost','UsageQuantity']\n\t\t\t\t\n\t\t\tgroupby = [\n\t\t\t\t{\n\t\t\t\t\t'Type': 'DIMENSION',\n\t\t\t\t\t'Key': 'SERVICE'\n\t\t\t\t}]\n\n\t\t\traise ValueError('This service costs $0.01 per request')\n\t\t\tresponse = cost_explorer.client.get_cost_and_usage(\n\t\t\t\tTimePeriod = timePeriod,\n\t\t\t\tGranularity = granularity,\n\t\t\t\tMetrics = metrics,\n\t\t\t\tGroupBy = groupby\n\t\t\t\t)\n\n\t\t\treturn response", "def ex_get_pricing(self):\r\n action = '/pricing/'\r\n response = self.connection.request(action=action, method='GET')\r\n return response.object", "def load_gas_consumption():\n _load_consumption_from_api(settings.GAS_CONSUMPTION_URL, models.GasConsumption)", "def fetch():\n fetch_json() # fetch json files\n # format everything\n format_cot(COT)\n format_coinmetrics_data(ADDRESSES)\n format_coinmetrics_data(REALIZEDCAP)\n format_coinmetrics_data(PRICE)\n format_coinmetrics_data(MARKETCAP)\n format_coinmetrics_data(SUPPLY)\n format_fear_greed(FEAR_GREED)\n for info in DATA_INFO:\n if info['url'] == (BLOCKCHAIN_URL or COINMETRICS_URL):\n remove_zero_values(info['path'])", "def cost_usage_data():\n\n return {\n \"ResultsByTime\": [\n {\n \"Estimated\": True,\n \"TimePeriod\": {\n \"Start\": \"2019-11-06\",\n \"End\": \"2019-11-07\"\n },\n \"Total\": {\n \"BlendedCost\": {\n \"Amount\": \"2.523114075\",\n \"Unit\": \"USD\"\n },\n \"UnblendedCost\": {\n \"Amount\": \"2.5219729944\",\n \"Unit\": \"USD\"\n },\n \"UsageQuantity\": {\n \"Amount\": \"470666.3868981499\",\n \"Unit\": \"N/A\"\n }\n },\n \"Groups\": []\n }\n ]\n}", "def get_price(url):\n global ALLOWANCE\n source = \"\"\n try:\n source = requests.get(url).text\n source = json.loads(source)\n ALLOWANCE = source[\"allowance\"][\"remaining\"]\n except:\n print(\"\\nError loading {}:\\n{}\".format(url, source))\n return \"0\"\n return source[\"result\"][\"price\"]", "def financial_data(str):\n inquiry = 'https://finnhub.io/api/v1/stock/metric?symbol=' + str + '&metric=all&token='\n summary = requests.get(inquiry)\n print(summary.json())", "def monthly_cost_storage(self) -> 'outputs.MoneyResponse':\n return pulumi.get(self, \"monthly_cost_storage\")", "def test_get_options_prices_realtime(self):\n pass", "def getDetailedCosts(self):\n self.detailedCosts = [self.foundation, self.transportation, self.roadsCivil, self.portStaging, self.installation, \\\n self.electrical, self.engPermits, self.pai, self.scour, self.suretyBond] \n \n return self.detailedCosts", "def getWorkloadSummary(request):\n conn = httplib.HTTPSConnection(cmsweb_url, \\\n cert_file = os.getenv('X509_USER_PROXY'), \\\n key_file = os.getenv('X509_USER_PROXY'))\n r1 = conn.request('GET', '/couchdb/workloadsummary/' + request)\n r2 = conn.getresponse()\n data = r2.read()\n s = json.loads(data)\n conn.close()\n return s", "def fetchJSON():\n\n curr_token_url = \"https://wowtokenprices.com/current_prices.json\"\n month_token_url = \"https://wowtokenprices.com/history_prices_30_day.json\"\n checkPages(curr_token_url, month_token_url)", "async def parse_costs_for_furnishings_from_depot(\n client: httpx.AsyncClient, url: str\n) -> Dict[str, int]:\n soup = await fetch_soup(client, url)\n\n return {\n re.sub(r\"Blueprint: \", \"\", get_tag_text(row.find_all(\"a\")[1])): locale.atoi(\n get_tag_text(row.find_all(\"td\")[1])\n )\n for table in soup.find_all(\"table\", {\"class\": \"article-table\"})[1:3]\n for row in table.find(\"tbody\").find_all(\"tr\")[1:-1]\n }", "def load_electricity_consumption():\n _load_consumption_from_api(settings.ELECTRICITY_CONSUMPTION_URL, models.ElectricityConsumption)", "def query_data():\n \n # Access JIRA backend and gets data\n db_accessor = JiraGT()\n db_accessor.connect()\n data = db_accessor.get_compliance_data()\n db_accessor.disconnect()\n return data", "def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()", "def price_oz():\n rs = grequests.get(Gold.URL, timeout=2)\n response = grequests.map([rs], exception_handler=lambda x, y: \"\")[0]\n if hasattr(response, \"status_code\") and response.status_code == 200:\n return float(response.json()[0].split(\",\")[1])\n return 0.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read initial demand, either offline (locally) or online (from scenario explorer)
def get_initial_demand(conf, source='offline', verbose=True): # if offline source, read local data if source == 'offline': initial_demand = pd.read_csv('Input/InitialDemand.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :] # if online source, read data from openENTRANCE scenario explorer if source == 'online': initial_demand = pyam.read_iiasa( 'openentrance', model=conf['openEntrance']['initial_demand']['model'], variable=conf['openEntrance']['initial_demand']['variable'], region=conf['openEntrance']['initial_demand']['region'], scenario=conf['openEntrance']['initial_demand']['scenario']) initial_demand = initial_demand.filter(year=conf['openEntrance']['initial_demand']['year']) if verbose: print('Initial demand:') print(initial_demand.timeseries()) # set yearly aggregate yearly_aggregate = initial_demand.as_pandas()['value'].sum() # create normalised hourly pattern from offline data hourly_basis = pd.read_csv('Input/InitialDemand.csv', index_col=0).iloc[conf['t_start']:conf['t_end'], :] hourly_basis_normalised = hourly_basis['2050'] / hourly_basis['2050'].max() # create non-normalised hourly demand initial_demand = yearly_aggregate * hourly_basis_normalised initial_demand = initial_demand.to_numpy() return initial_demand
[ "def get_current_demand(self, callback=None):\n xml_demand = self._query_eagle()\n current_demand = self._parse_demand(xml_demand)\n\n if callback:\n return callback(current_demand)\n else:\n return current_demand", "def get_demand(self, j=None):\n if j is None: return api.get_Demands()\n return api.get_demand(j)", "def test_feed_demands_url(self):\n Feed().display()", "def test_retrieve(self):\n response = self.client.get(\n \"/api_musculib/declination/\" + str(self.machine.id) + \"/\"\n )\n assert len(response.data) > 0 and response.status_code == 200", "def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe", "def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()", "def get(name, time='now'):\n sensor_param = SensorParam(name, 'weather_data', ['temperature', 'rainfull_prob'], fetch, save, time)\n client = SensorUtil()\n return client.get_data(sensor_param)", "def load_isd_daily_temp_data(\n self, start, end, read_from_cache=True, write_to_cache=True, fetch_from_web=True\n ):\n return load_isd_daily_temp_data(\n self.usaf_id,\n start,\n end,\n read_from_cache=read_from_cache,\n write_to_cache=write_to_cache,\n fetch_from_web=fetch_from_web,\n )", "def fetch_current(self) -> Optional[WeatherData]:", "def DownstreamInitialStepLoadRate(self):\r\n\t\treturn self._get_attribute('downstreamInitialStepLoadRate')", "def _download_data(self):\n self.raw_data = requests.get(self.api_address).json()\n self.age = datetime.now()", "def load_electricity_consumption():\n _load_consumption_from_api(settings.ELECTRICITY_CONSUMPTION_URL, models.ElectricityConsumption)", "def fetch_data(self):\r\n print(\"Fetching Data from USGS Water Services API\")\r\n self.response = requests.get(self.complete_url)\r\n self.response.raise_for_status()", "def api_fetch():\n headers = {\n 'Authorization': 'apikey {}'.format(settings.DEMOCRACY_WORKS_API_KEY),\n 'Accept': 'application/json'\n }\n\n response = requests.get(\n settings.DEMOCRACY_WORKS_API_URL,\n headers=headers)\n\n logger.info(u'Sync: API Pull - URL: %s Status Code: %s Time: %s',\n settings.DEMOCRACY_WORKS_API_URL, response.status_code,\n response.elapsed.total_seconds())\n\n if response.status_code != 200:\n raise Exception(\n 'Bad Response from Democracy Works {}'.format(\n response.status_code))\n\n return response.json()", "def fetch():\n fetch_json() # fetch json files\n # format everything\n format_cot(COT)\n format_coinmetrics_data(ADDRESSES)\n format_coinmetrics_data(REALIZEDCAP)\n format_coinmetrics_data(PRICE)\n format_coinmetrics_data(MARKETCAP)\n format_coinmetrics_data(SUPPLY)\n format_fear_greed(FEAR_GREED)\n for info in DATA_INFO:\n if info['url'] == (BLOCKCHAIN_URL or COINMETRICS_URL):\n remove_zero_values(info['path'])", "def test_fetch_historic():\n store = ecbxrate.ExchangeRateStore('sqlite:///:memory:')\n count, date = store.initialise()\n assert count != 0\n assert store.last_updated() is not None", "def test_get(self):\n # Clear any existing sampling\n self.clear_sample_data()\n\n # Clear the asynchronous callback results\n self.clear_async_data()\n\n # Notify the driver to start sampling\n self.driver.start_sampling()\n\n # Test simple telemetered data handling\n self.create_sample_data_set_dir('telemetered_one.dat', TELEM_DIR, TELEM_FILE_ONE)\n self.assert_data(TELEM_PARTICLES, 'telemetered.one.yml', count=2, timeout=10)\n\n # # Test simple recovered data handling\n self.create_sample_data_set_dir('recovered_one.dat', RECOV_DIR, RECOV_FILE_ONE)\n self.assert_data(RECOV_PARTICLES, 'recovered.one.yml', count=2, timeout=10)", "def extract_doric(input_path):\n\n data = dr.ExtractDataAcquisition(input_path)\n return data", "def test_query_results_min_daily(self):\n with self.app.test_client() as client:\n response = client.get('/_submit_query', \n query_string={\n \"microsite_id\" : \"DUMMYID\",\n \"site\" : \"DUMMYSITE\",\n \"biomimic_type\" : \"Dummybiomimictype\",\n \"country\" : \"Dummycountry\",\n \"state_province\" : \"Dummystate\",\n \"location\" : \"Dummylocation\",\n \"field_lat\" : \"36.621933330000\",\n \"field_lon\" : \"-121.905316700000\",\n \"zone\" : \"DummyZone\",\n \"sub_zone\" : \"DummySubZone\",\n \"wave_exp\" : \"DummyWave\",\n \"start_date\": \"7/1/2000\",\n \"end_date\": \"7/2/2000\",\n \"output_type\" : \"Min\",\n \"analysis_type\" : \"Daily\"},\n follow_redirects=False) \n self.assertIn(b\"13.5\", response.data)\n self.assertNotIn(b\"14\", response.data)\n \n #Test the download functionality\n response = client.get('/download')\n self.assertIn(b\"13.5\", response.data)\n self.assertNotIn(b\"14\", response.data)\n self.assertIn(b\"biomimic_type:Dummybiomimictype\", response.data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is a parameterless function.
def parameterless(): return None
[ "def dummy_function(*args, **kwargs):\n return", "def example(param):\n\tassert param >0 \n\t# do stuf here...", "def __call__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __special__(self):\n pass", "def __do_nothing(*args):\n pass", "def param_ex(arg):\r\n return arg", "def mi_funcion():\n pass", "def noops(self, x, *args, **kwargs):\n return x", "def dummy_wrap(self, *args, **kwargs):\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)", "def addParameter(parameter): #@NoSelf", "def _NoOpFunctionForTesting(self):\n pass", "def foo4(_, *, _): # [duplicate-argument-name, duplicate-argument-name]", "def _dummy_callback(self, arg):\n pass", "def test_passed_noDefaultValues(self):\n\n def func(a, b, c=1, d=2, e=3):\n pass\n\n self.assertEqual(self.checkPassed(func, 1, 2, e=7), dict(a=1, b=2, e=7))", "def foo1(_, _): # [duplicate-argument-name, duplicate-argument-name]", "def no_arg(_args):\n return []", "def myFunc(arg1, arg2=None):\n print(arg1, arg2)", "def accepts_none(fn):\n fn.accepts_none = True \n return fn", "def _execute_function_without_arguments(fun):\n return fun()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle changeset actions. Main function to interacte with changeset, use argument to control the actions. This function is using update_stack to handle all the dirty works as both functions are processing cloudformation arguments and having the same arguments.
def changeset_stack( profile: Union[str, bool] = False, region: Union[str, bool] = False, replace: bool = False, local_path: Union[str, bool] = False, root: bool = False, wait: bool = False, info: bool = False, execute: bool = False, delete: bool = False, extra: bool = False, bucket: str = None, version: Union[str, bool] = False, ) -> None: cloudformation = Cloudformation(profile, region) cloudformation.set_stack() # if not creating new changeset if info or execute or delete: fzf = Pyfzf() response: Dict[str, Any] = cloudformation.client.list_change_sets( StackName=cloudformation.stack_name ) # get the changeset name fzf.process_list( response.get("Summaries", []), "ChangeSetName", "StackName", "ExecutionStatus", "Status", "Description", ) if info: selected_changeset = str(fzf.execute_fzf()) describe_changes(cloudformation, selected_changeset) # execute the change set elif execute: selected_changeset = fzf.execute_fzf() if get_confirmation("Execute changeset %s?" % selected_changeset): response = cloudformation.client.execute_change_set( ChangeSetName=selected_changeset, StackName=cloudformation.stack_name, ) cloudformation.wait( "stack_update_complete", "Wating for stack to be updated ..." ) print("Stack updated") elif delete: selected_changeset = fzf.execute_fzf(multi_select=True) for changeset in selected_changeset: print("(dryrun) Delete changeset %s" % changeset) if get_confirmation("Confirm?"): for changeset in selected_changeset: cloudformation.client.delete_change_set( ChangeSetName=changeset, StackName=cloudformation.stack_name ) else: changeset_name = input("Enter name of this changeset: ") if not changeset_name: raise NoNameEntered("No changeset name specified") changeset_description = input("Description: ") # since is almost same operation as update stack # let update_stack handle it, but return update details instead of execute cloudformation_args = update_stack( cloudformation.profile, cloudformation.region, replace, local_path, root, wait, extra, bucket, version, dryrun=True, cloudformation=cloudformation, ) cloudformation_args[ "cloudformation_action" ] = cloudformation.client.create_change_set cloudformation_args["ChangeSetName"] = changeset_name if changeset_description: cloudformation_args["Description"] = changeset_description response = cloudformation.execute_with_capabilities(**cloudformation_args) response.pop("ResponseMetadata", None) print(json.dumps(response, indent=4, default=str)) print(80 * "-") print("Changeset create initiated") if wait: cloudformation.wait( "change_set_create_complete", "Wating for changset to be created ...", ChangeSetName=changeset_name, ) print("Changeset created") describe_changes(cloudformation, changeset_name)
[ "def execute_change_set(self, change_set_name):\n self._protect_execution()\n change_set = self.describe_change_set(change_set_name)\n status = change_set.get(\"Status\")\n reason = change_set.get(\"StatusReason\")\n if status == \"FAILED\" and self.change_set_creation_failed_due_to_no_changes(\n reason\n ):\n self.logger.info(\n \"Skipping ChangeSet on Stack: {} - there are no changes\".format(\n change_set.get(\"StackName\")\n )\n )\n return 0\n\n self.logger.debug(\n \"%s - Executing Change Set '%s'\", self.stack.name, change_set_name\n )\n response = self.connection_manager.call(\n service=\"cloudformation\",\n command=\"execute_change_set\",\n kwargs={\n \"ChangeSetName\": change_set_name,\n \"StackName\": self.stack.external_name,\n },\n )\n status = self._wait_for_completion(boto_response=response)\n return status", "def changeset(web):\n ctx = webutil.changectx(web.repo, web.req)\n\n return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx))", "def lambda_handler(event, context):\n \n try:\n repo_name = event['Records'][0]['eventSourceARN'].split(':')[-1]\n reference = event['Records'][0]['codecommit']['references'][0]\n commit_id = reference['commit']\n ref = os.path.split(reference[\"ref\"])\n root = os.path.basename(ref[0])\n created = reference.get(\"created\")\n deleted = reference.get(\"deleted\")\n if created and root == \"heads\" and ref[1] and ref[1] != \"master\":\n data = json.loads(event['Records'][0]['customData'])\n logger.info('Putting updates trigger for branch %s' % ref[1])\n put_trigger(repo_name, ref[1], data)\n pipeline_name = data[\"pipeline_name\"]\n bucket = data[\"bucket\"]\n logger.info('Getting and archiving codecommit repository content')\n codecommit = AWSCodeCommit(cc_client, repo_name, logger)\n commit_info = cc_client.get_commit(\n repositoryName=repo_name, \n commitId=commit_id\n )\n commit_info['commit']['branchName'] = ref[1]\n commit_info['commit']['RepositoryName'] = repo_name\n codecommit.archive(commit_id, {\"commit_info.json\": json.dumps(commit_info, indent=4)})\n s3_client.put_object(Bucket=bucket,\n Key=\"artifacts/%s\" % pipeline_name,\n Body=codecommit.content)\n logger.info('Starting pipeline execution')\n cp_client.start_pipeline_execution(name=pipeline_name)\n if deleted and root == \"heads\" and ref[1] and ref[1] != \"master\":\n logger.info('Poping updates trigger for branch %s' % ref[1])\n pop_trigger(repo_name, ref[1])\n except Exception as e:\n logger.exception(\"An error occured when processing codecommit trigger event : %s\" % str(e), exc_info=1)", "def create_change_set(self, change_set_name):\n create_change_set_kwargs = {\n \"StackName\": self.stack.external_name,\n \"Parameters\": self._format_parameters(self.stack.parameters),\n \"Capabilities\": [\n \"CAPABILITY_IAM\",\n \"CAPABILITY_NAMED_IAM\",\n \"CAPABILITY_AUTO_EXPAND\",\n ],\n \"ChangeSetName\": change_set_name,\n \"NotificationARNs\": self.stack.notifications,\n \"Tags\": [\n {\"Key\": str(k), \"Value\": str(v)} for k, v in self.stack.tags.items()\n ],\n }\n create_change_set_kwargs.update(self.stack.template.get_boto_call_parameter())\n create_change_set_kwargs.update(self._get_role_arn())\n self.logger.debug(\n \"%s - Creating Change Set '%s'\", self.stack.name, change_set_name\n )\n self.connection_manager.call(\n service=\"cloudformation\",\n command=\"create_change_set\",\n kwargs=create_change_set_kwargs,\n )\n # After the call successfully completes, AWS CloudFormation\n # starts creating the Change Set.\n self.logger.info(\n \"%s - Successfully initiated creation of Change Set '%s'\",\n self.stack.name,\n change_set_name,\n )", "def apply_modality_change(\n ctx,\n modality,\n ):\n\n # Ensure any required extensions are available\n install_extensions_and_parse_config()\n\n modality_cmds = ctx.obj.config.get('modality-source-preparation', {}).get(modality, ())\n\n def change_applicator(repo, author, committer):\n has_changed_files = False\n commit_message = modality\n for cmd in modality_cmds:\n try:\n cmd[\"changed-files\"]\n except (KeyError, TypeError):\n pass\n else:\n has_changed_files = True\n try:\n commit_message = cmd[\"commit-message\"]\n except (KeyError, TypeError):\n pass\n\n if not has_changed_files:\n # Force clean builds when we don't know how to discover changed files\n repo.git.clean('-xd', force=True)\n\n volume_vars = ctx.obj.volume_vars.copy()\n volume_vars.setdefault('HOME', os.path.expanduser('~'))\n\n for cmd in modality_cmds:\n if isinstance(cmd, str):\n cmd = {\"sh\": cmd}\n\n if 'description' in cmd:\n desc = cmd['description']\n log.info('Performing: %s', click.style(desc, fg='cyan'))\n\n if 'sh' in cmd:\n args = shlex.split(cmd['sh'])\n env = os.environ.copy()\n while args:\n m = _env_var_re.match(args[0])\n if not m:\n break\n env[m.group('var')] = expand_vars(volume_vars, m.group('val'))\n args.pop(0)\n\n args = [expand_vars(volume_vars, arg) for arg in args]\n try:\n echo_cmd(subprocess.check_call, args, cwd=repo.working_dir, env=env, stdout=sys.__stderr__)\n except subprocess.CalledProcessError as e:\n log.error(\"Command fatally terminated with exit code %d\", e.returncode)\n ctx.exit(e.returncode)\n\n if 'changed-files' in cmd:\n changed_files = cmd[\"changed-files\"]\n if isinstance(changed_files, str):\n changed_files = [changed_files]\n changed_files = [expand_vars(volume_vars, f) for f in changed_files]\n repo.index.add(changed_files)\n\n if not has_changed_files:\n # 'git add --all' equivalent (excluding the code_dir)\n add_files = set(repo.untracked_files)\n remove_files = set()\n with repo.config_reader() as cfg:\n try:\n code_dir = cfg.get_value('hopic.code', 'dir')\n except (NoOptionError, NoSectionError):\n pass\n else:\n if code_dir in add_files:\n add_files.remove(code_dir)\n if (code_dir + '/') in add_files:\n add_files.remove(code_dir + '/')\n\n for diff in repo.index.diff(None):\n if not diff.deleted_file:\n add_files.add(diff.b_path)\n remove_files.add(diff.a_path)\n remove_files -= add_files\n if remove_files:\n repo.index.remove(remove_files)\n if add_files:\n repo.index.add(add_files)\n\n if not repo.index.diff(repo.head.commit):\n log.info(\"No changes introduced by '%s'\", commit_message)\n return None\n commit_message = dedent(f\"\"\"\\\n {commit_message.rstrip()}\n\n Merged-by: Hopic {get_package_version(PACKAGE)}\n \"\"\")\n\n commit_params = {'message': commit_message}\n # If this change was a merge make sure to produce a merge commit for it\n try:\n commit_params['parent_commits'] = (\n repo.commit('ORIG_HEAD'),\n repo.commit('MERGE_HEAD'),\n )\n except git.BadName:\n pass\n return commit_params\n\n return change_applicator", "def response_action(self, request, queryset):\n\n # There can be multiple action forms on the page (at the top\n # and bottom of the change list, for example). Get the action\n # whose button was pushed.\n try:\n action_index = int(request.POST.get('index', 0))\n except ValueError:\n action_index = 0\n\n # Construct the action form.\n data = request.POST.copy()\n data.pop(helpers.ACTION_CHECKBOX_NAME, None)\n data.pop(\"index\", None)\n\n # Use the action whose button was pushed\n try:\n data.update({'action': data.getlist('action')[action_index]})\n except IndexError:\n # If we didn't get an action from the chosen form that's invalid\n # POST data, so by deleting action it'll fail the validation check\n # below. So no need to do anything here\n pass\n\n action_form = self.action_form(data, auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n\n # If the form's valid we can handle the action.\n if action_form.is_valid():\n action = action_form.cleaned_data['action']\n select_across = action_form.cleaned_data['select_across']\n func = self.get_actions(request)[action][0]\n\n # Get the list of selected PKs. If nothing's selected, we can't\n # perform an action on it, so bail. Except we want to perform\n # the action explicitly on all objects.\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n if not selected and not select_across:\n # Reminder that something needs to be selected or nothing will\n # happen\n msg = _(\"Items must be selected in order to perform \"\n \"actions on them. No items have been changed.\")\n messages.add_message(request, messages.WARNING, msg)\n return None\n\n if not select_across:\n # Perform the action only on the selected objects\n queryset = queryset.filter(pk__in=selected)\n\n response = func(self, request, queryset)\n\n # Actions may return an HttpResponse-like object, which will be\n # used as the response from the POST. If not, we'll be a good\n # little HTTP citizen and redirect back to the changelist page.\n if isinstance(response, HttpResponseBase):\n return response\n else:\n return HttpResponseRedirect(request.get_full_path())\n else:\n msg = _(\"No action selected.\")\n messages.add_message(request, messages.WARNING, msg)\n return None", "def apply(self, *args) -> \"void\":\n return _coin.SoReorganizeAction_apply(self, *args)", "def update(args, config, cf_conn, template):\n print(\"Updating CloudFormation Stack %s...\" % config['stack_name'])\n stack_id = cf_conn.update_stack(\n config['stack_name'],\n template_body=template.to_json(),\n parameters=cf_params(),\n tags=config['tags'],\n capabilities=['CAPABILITY_IAM']\n )\n print('Updated ' + stack_id)", "def update_changeset_yaml(yaml_obj, f, commit):\n\n log.debug(yaml_obj)\n repo_filename = f['filename']\n blob_url = f['blob_url']\n\n committer_user = None\n if 'committer' in commit and 'login' in commit['committer']:\n user_qs = users_models.User.objects.filter(\n github_login=commit['committer']['login'])\n if user_qs.exists():\n committer_user = user_qs[0]\n\n qs = models.Changeset.objects.filter(repo_filename=repo_filename)\n if not qs.exists():\n log.warn('Changeset does not exists.')\n return None\n changeset = qs[0]\n\n with transaction.commit_on_success():\n models.ChangesetDetail.objects.filter(changeset=changeset).delete()\n\n changeset_obj = yaml_obj['changeset']\n new_changeset_obj = {}\n for k, v in changeset_obj.iteritems():\n if (k in [\n 'database_schema', 'type', 'classification']):\n new_changeset_obj[k] = v\n else:\n log.warn(u'Ignored changeset field %s.' % (k,))\n changeset_obj = new_changeset_obj\n changeset_obj['database_schema'] = (\n schemaversions_models.DatabaseSchema.objects.get(\n name=changeset_obj['database_schema']))\n changeset_obj['version_control_url'] = blob_url\n log.debug(pprint.pformat(changeset_obj))\n for k, v in changeset_obj.iteritems():\n setattr(changeset, k, v)\n changeset.review_status = models.Changeset.REVIEW_STATUS_NEEDS\n changeset.save()\n\n for changeset_detail_obj in yaml_obj['changeset_details']:\n changeset_detail_obj['changeset'] = changeset\n new_changeset_detail_obj = {}\n for k, v in changeset_detail_obj.iteritems():\n if (k in\n [\n 'description', 'apply_sql', 'revert_sql',\n 'apply_verification_sql',\n 'revert_verification_sql',\n 'changeset']):\n new_changeset_detail_obj[k] = v\n else:\n log.warn(u'Ignored changeset detail field %s.' % (k,))\n changeset_detail_obj = new_changeset_detail_obj\n log.debug(pprint.pformat(changeset_detail_obj))\n models.ChangesetDetail.objects.create(**changeset_detail_obj)\n\n models.ChangesetAction.objects.create(\n changeset=changeset,\n type=models.ChangesetAction.TYPE_CHANGED_WITH_DATA_FROM_GITHUB_REPO,\n timestamp=timezone.now())\n\n event_handlers.on_changeset_updated(changeset)\n log.debug('changeset = %s' % (changeset,))\n return changeset", "def alter(self, action: int, environment) -> None:\n\n environment.step(action)", "def update_app_actions(self, action):", "def changelist_view(self, request, extra_context=None):\n from django.contrib.admin.views.main import ERROR_FLAG\n opts = self.model._meta\n app_label = opts.app_label\n if not self.has_view_or_change_permission(request):\n raise PermissionDenied\n\n try:\n cl = self.get_changelist_instance(request)\n except IncorrectLookupParameters:\n # Wacky lookup parameters were given, so redirect to the main\n # changelist page, without parameters, and pass an 'invalid=1'\n # parameter via the query string. If wacky parameters were given\n # and the 'invalid=1' parameter was already in the query string,\n # something is screwed up with the database, so display an error\n # page.\n if ERROR_FLAG in request.GET:\n return SimpleTemplateResponse('admin/invalid_setup.html', {\n 'title': _('Database error'),\n })\n return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')\n\n # If the request was POSTed, this might be a bulk action or a bulk\n # edit. Try to look up an action or confirmation first, but if this\n # isn't an action the POST will fall through to the bulk edit check,\n # below.\n action_failed = False\n selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)\n\n actions = self.get_actions(request)\n # Actions with no confirmation\n if (actions and request.method == 'POST' and\n 'index' in request.POST and '_save' not in request.POST):\n if selected:\n response = self.response_action(request, queryset=cl.get_queryset(request))\n if response:\n return response\n else:\n action_failed = True\n else:\n msg = _(\"Items must be selected in order to perform \"\n \"actions on them. No items have been changed.\")\n self.message_user(request, msg, messages.WARNING)\n action_failed = True\n\n # Actions with confirmation\n if (actions and request.method == 'POST' and\n helpers.ACTION_CHECKBOX_NAME in request.POST and\n 'index' not in request.POST and '_save' not in request.POST):\n if selected:\n response = self.response_action(request, queryset=cl.get_queryset(request))\n if response:\n return response\n else:\n action_failed = True\n\n if action_failed:\n # Redirect back to the changelist page to avoid resubmitting the\n # form if the user refreshes the browser or uses the \"No, take\n # me back\" button on the action confirmation page.\n return HttpResponseRedirect(request.get_full_path())\n\n # If we're allowing changelist editing, we need to construct a formset\n # for the changelist given all the fields to be edited. Then we'll\n # use the formset to validate/process POSTed data.\n formset = cl.formset = None\n\n # Handle POSTed bulk-edit data.\n if request.method == 'POST' and cl.list_editable and '_save' in request.POST:\n if not self.has_change_permission(request):\n raise PermissionDenied\n FormSet = self.get_changelist_formset(request)\n modified_objects = self._get_list_editable_queryset(request, FormSet.get_default_prefix())\n formset = cl.formset = FormSet(request.POST, request.FILES, queryset=modified_objects)\n if formset.is_valid():\n changecount = 0\n for form in formset.forms:\n if form.has_changed():\n obj = self.save_form(request, form, change=True)\n self.save_model(request, obj, form, change=True)\n self.save_related(request, form, formsets=[], change=True)\n change_msg = self.construct_change_message(request, form, None)\n self.log_change(request, obj, change_msg)\n changecount += 1\n\n if changecount:\n msg = ngettext(\n \"%(count)s %(name)s was changed successfully.\",\n \"%(count)s %(name)s were changed successfully.\",\n changecount\n ) % {\n 'count': changecount,\n 'name': model_ngettext(opts, changecount),\n }\n self.message_user(request, msg, messages.SUCCESS)\n\n return HttpResponseRedirect(request.get_full_path())\n\n # Handle GET -- construct a formset for display.\n elif cl.list_editable and self.has_change_permission(request):\n FormSet = self.get_changelist_formset(request)\n formset = cl.formset = FormSet(queryset=cl.result_list)\n\n # Build the list of media to be used by the formset.\n if formset:\n media = self.media + formset.media\n else:\n media = self.media\n\n # Build the action form and populate it with available actions.\n if actions:\n action_form = self.action_form(auto_id=None)\n action_form.fields['action'].choices = self.get_action_choices(request)\n media += action_form.media\n else:\n action_form = None\n\n selection_note_all = ngettext(\n '%(total_count)s selected',\n 'All %(total_count)s selected',\n cl.result_count\n )\n\n context = {\n **self.admin_site.each_context(request),\n 'module_name': str(opts.verbose_name_plural),\n 'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},\n 'selection_note_all': selection_note_all % {'total_count': cl.result_count},\n 'title': cl.title,\n 'is_popup': cl.is_popup,\n 'to_field': cl.to_field,\n 'cl': cl,\n 'media': media,\n 'has_add_permission': self.has_add_permission(request),\n 'opts': cl.opts,\n 'action_form': action_form,\n 'actions_on_top': self.actions_on_top,\n 'actions_on_bottom': self.actions_on_bottom,\n 'actions_selection_counter': self.actions_selection_counter,\n 'preserved_filters': self.get_preserved_filters(request),\n **(extra_context or {}),\n }\n\n request.current_app = self.admin_site.name\n\n\n # Call the changelisttab funtion\n context[\"admintab\"] =self.change_list_tab\n request, context = self.changelisttab(request, context)\n\n return TemplateResponse(request, self.change_list_template or [\n 'admin/%s/%s/change_list.html' % (app_label, opts.model_name),\n 'admin/%s/change_list.html' % app_label,\n 'admin/change_list.html'\n ], context)", "def run_actions(self):\n # dedupe the list of actions\n actions = deque([])\n for action in self.args.action:\n if action not in actions:\n actions.append(action)\n\n while actions:\n action = actions.popleft()\n self._log.debug(\"running action: %s\", action)\n\n if action == \"push\":\n self.connect()\n self._log.warning(\"not implemented\")\n elif action == \"pull\":\n self.connect()\n self.pull_template_files()\n self.pull_xochitl_files()\n self.convert_xochitl_files()\n elif action == \"pull-raw\":\n self.connect()\n self.pull_xochitl_files()\n elif action == \"pull-web\":\n self.pull_pdf_files()\n elif action == \"convert-raw\":\n self.convert_xochitl_files()\n elif action == \"clean-local\":\n self.clean_local()\n else:\n self._log.warning(\"unknown action: %s\", action)\n\n self.close()\n self._log.info(\"actions completed, see %s\", self.backup_dir)", "def do_set(self, s):\r\n \r\n # parse all the weird characters in the argument string\r\n s = self.parse(s)\r\n # list all the skills we want to change\r\n args = s.lower().split()\r\n # try to get the new level\r\n try:\r\n n = int(args[0])\r\n # if we can't, the user wrote the command in the wrong way\r\n # tell him to get some help\r\n # maybe some rehab\r\n # LOL\r\n except ValueError:\r\n # try to catch the case where <set> <skill> <amount> is used\r\n if len(args) == 2:\r\n # Do some sneaky stuff so the rest of the function works\r\n #properly: put the arguments in the default order\r\n try:\r\n n = int(args[1])\r\n args[0], args[1] = args[1], args[0]\r\n except ValueError:\r\n print(\"*** Syntax Error: <set> <amount> <skill1> \"\\\r\n \"<skill2> ...\")\r\n print(\"Please type <help> <set> for help with the set \"\\\r\n \"command\")\r\n return\r\n else:\r\n print(\"*** Syntax Error: <set> <amount> <skill1> <skill2> ...\")\r\n print(\"Please type <help> <set> for help with the set command\")\r\n return\r\n # pop the new level from the string\r\n args.pop(0)\r\n # if no args are remaining the user missed the skills in the command\r\n if len(args) == 0:\r\n print(\"*** Syntax Error: missing <skill> argument\")\r\n print(\"Please type <help> <set> for help with the set command\")\r\n # parse the other skills\r\n # if the only argument missing is \"all\", swap it with all the skills\r\n elif len(args) == 1 and args[0] == \"all\":\r\n args.pop()\r\n args = self.skills\r\n # loop through the arguments\r\n for skill in args:\r\n # if we don't recognize the skill, warn the user\r\n if skill not in self.skills:\r\n print(\"*** Unknown skill \" + skill)\r\n else:\r\n # don't let the user reduce a skill further than it was\r\n if n < self.init_stats[skill]:\r\n print(\"You can't set \" + skill + \" to a lower level \"\\\r\n \"than the one it had when we started (\" +\r\n str(self.init_stats[skill]) + \")...\")\r\n continue\r\n # else, find the difference (new - old)\r\n d = n - self.values[skill]\r\n # if we are reducing a skill, (new-old) gives a negative int\r\n #so the following \"if\" statement ALWAYS lets us reduce it\r\n # if (new-old) is >0, evaluate if we have enough tokens\r\n #to set that skill to that level\r\n if self.available_tokens >= d:\r\n self.values[skill] = n\r\n # remember that if we were reducing a skill, d is <0\r\n #so this subtraction actually increases available_tkns\r\n self.available_tokens -= d\r\n print(skill.capitalize() + \" set to level \" + str(n))\r\n # If we don't have enough tokens, warn the user and exit\r\n else:\r\n print(\"You do not have enough skill tokens (\"\r\n + str(self.available_tokens) + \") to set \"\\\r\n + skill + \" to level \" + str(n))\r\n return\r\n # Remind the user how many tokens he has left\r\n print(\"You have \" + str(self.available_tokens) +\" available tokens.\")", "def _handle_group_m2m_changed(self, instance, action, pk_set, reverse,\n **kwargs):\n backend = search_backend_registry.current_backend\n\n if not (backend and\n search_backend_registry.on_the_fly_indexing_enabled):\n return\n\n if not hasattr(self._pending_user_changes, 'data'):\n self._pending_user_changes.data = {}\n\n if action in ('post_add', 'post_remove'):\n if reverse:\n # When using the reverse relation, the instance is the User and\n # the pk_set is the PKs of the groups being added or removed.\n users = [instance]\n else:\n # Otherwise the instance is the Group and the pk_set is the set\n # of User primary keys.\n users = User.objects.filter(pk__in=pk_set)\n\n for user in users:\n self.handle_save(instance=user, instance_kwarg='instance',\n sender=User)\n elif action == 'pre_clear':\n # When ``reverse`` is ``True``, a User is having their groups\n # cleared so we don't need to worry about storing any state in the\n # pre_clear phase.\n #\n # Otherwise, a ReviewGroup is having their users cleared. In both\n # the pre_clear and post_clear phases, the ``pk_set`` argument will\n # be empty, so we cache the PKs of the current members of the\n # groups so we know to reindex them.\n if not reverse:\n self._pending_user_changes.data[instance.pk] = list(\n instance.users.values_list('pk', flat=True))\n elif action == 'post_clear':\n if reverse:\n # When ``reverse`` is ``True``, we just have to reindex a\n # single user.\n self.handle_save(instance=instance, instance_kwarg='instance',\n sender=User)\n else:\n # Here, we are reindexing every user that got removed from the\n # group via clearing.\n pks = self._pending_user_changes.data.pop(instance.pk)\n\n for user in User.objects.filter(pk__in=pks):\n self.handle_save(instance=user, instance_kwarg='instance',\n sender=User)", "def process_changes(self, change, scale, lines):\n if change and change.has_data():\n lines.append('#<{}>'.format(self.level_name))\n lines.append('')\n change.process(self.level_package, scale, lines)\n lines.append('#</{}>'.format(self.level_name))\n lines.append('')", "def pushcommand(orig, ui, repo, *args, **kwargs):\n\n ReviewID(kwargs['reviewid'])\n\n if kwargs['rev'] and kwargs['changeset']:\n raise util.Abort(_('cannot specify both -r and -c'))\n\n # There isn't a good way to send custom arguments to the push api. So, we\n # inject some temporary values on the repo. This may fail in many\n # scenarios, most of them related to server operation.\n repo.noreviewboardpush = kwargs['noreview']\n repo.reviewid = kwargs['reviewid']\n\n # -c implies -r <rev> with an identical base node.\n if kwargs['changeset']:\n kwargs['rev'] = [kwargs['changeset']]\n repo.pushsingle = True\n else:\n repo.pushsingle = False\n\n try:\n return orig(ui, repo, *args, **kwargs)\n finally:\n repo.noreviewboardpush = None\n repo.reviewid = None\n repo.pushsingle = None", "def parse_commands():\n\n # Action classes\n class SetupAction(argparse.Action):\n \"\"\"The setup action class that is called when setup is found in the command line.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when setup is called.\"\"\"\n\n choice = int(input('You can now edit config files using QUBEKit, choose an option to continue:\\n'\n '1) Edit a config file\\n'\n '2) Create a new master template\\n'\n '3) Make a normal config file\\n>'))\n\n if choice == 1:\n inis = Configure.show_ini()\n name = input(f'Enter the name or number of the config file to edit\\n'\n f'{\"\".join(f\"{inis.index(ini)}:{ini} \" for ini in inis)}\\n>')\n # make sure name is right\n if name in inis:\n Configure.ini_edit(name)\n else:\n Configure.ini_edit(inis[int(name)])\n\n elif choice == 2:\n Configure.ini_writer('master_config.ini')\n Configure.ini_edit('master_config.ini')\n\n elif choice == 3:\n name = input('Enter the name of the config file to create\\n>')\n Configure.ini_writer(name)\n Configure.ini_edit(name)\n\n else:\n raise KeyError('Invalid selection; please choose from 1, 2 or 3.')\n\n sys_exit()\n\n class CSVAction(argparse.Action):\n \"\"\"The csv creation class run when the csv option is used.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when csv is called.\"\"\"\n\n generate_bulk_csv(values)\n sys_exit()\n\n class ProgressAction(argparse.Action):\n \"\"\"Run the pretty progress function to get the progress of all running jobs.\"\"\"\n\n def __call__(self, pars, namespace, values, option_string=None):\n \"\"\"This function is executed when progress is called.\"\"\"\n\n pretty_progress()\n sys_exit()\n\n parser = argparse.ArgumentParser(prog='QUBEKit', formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"QUBEKit is a Python 3.6+ based force field derivation toolkit for Linux operating systems.\nOur aims are to allow users to quickly derive molecular mechanics parameters directly from quantum mechanical calculations.\nQUBEKit pulls together multiple pre-existing engines, as well as bespoke methods to produce accurate results with minimal user input.\nQUBEKit aims to use as few parameters as possible while also being highly customisable.\"\"\", epilog=\"\"\"QUBEKit should currently be considered a work in progress.\nWhile it is stable we are constantly working to improve the code and broaden its compatibility. \nWe use lots of software written by many different people;\nif reporting a bug please (to the best of your ability) make sure it is a bug with QUBEKit and not with a dependency.\nWe welcome any suggestions for additions or changes.\"\"\")\n\n # Add all of the command line options in the arg parser\n parser.add_argument('-c', '--charge', default=0, type=int, help='Enter the charge of the molecule, default 0.')\n parser.add_argument('-m', '--multiplicity', default=1, type=int, help='Enter the multiplicity of the '\n 'molecule, default 1.')\n parser.add_argument('-ddec', '--ddec_version', choices=[3, 6], type=int,\n help='Enter the ddec version for charge partitioning, does not effect ONETEP partitioning.')\n parser.add_argument('-geo', '--geometric', choices=[True, False], type=bool,\n help='Turn on geometric to use this during the qm optimisations, recommended.')\n parser.add_argument('-bonds', '--bonds_engine', choices=['psi4', 'g09'],\n help='Choose the QM code to calculate the bonded terms.')\n parser.add_argument('-charges', '--charges_engine', choices=['onetep', 'chargemol'],\n help='Choose the method to do the charge partioning.')\n parser.add_argument('-density', '--density_engine', choices=['onetep', 'g09', 'psi4'],\n help='Enter the name of the QM code to calculate the electron density of the molecule.')\n parser.add_argument('-solvent', '--solvent',\n help='Enter the dielectric constant or the name of the solvent you wish to use.')\n # maybe separate into known solvents and IPCM constants?\n parser.add_argument('-convergence', '--convergence', choices=['GAU', 'GAU_TIGHT', 'GAU_VERYTIGHT'],\n help='Enter the convergence criteria for the optimisation.')\n parser.add_argument('-param', '--parameter_engine', choices=['xml', 'gaff', 'gaff2', 'openff'],\n help='Enter the method of where we should get the initial molecule parameters from, '\n 'if xml make sure the xml has the same name as the pdb file.')\n parser.add_argument('-mm', '--mm_opt_method', default='openmm', choices=['openmm', 'rdkit_mff', 'rdkit_uff'],\n help='Enter the mm optimisation method for pre qm optimisation.')\n parser.add_argument('-config', '--config_file', default='default_config', choices=Configure.show_ini(),\n help='Enter the name of the configuration file you wish to use for this run from the list '\n 'available, defaults to master.')\n parser.add_argument('-theory', '--theory',\n help='Enter the name of the qm theory you would like to use.')\n parser.add_argument('-basis', '--basis',\n help='Enter the basis set you would like to use.')\n parser.add_argument('-restart', '--restart', choices=['parametrise', 'mm_optimise', 'qm_optimise', 'hessian',\n 'mod_sem', 'density', 'charges', 'lennard_jones',\n 'torsion_scan', 'torsion_optimise'],\n help='Enter the restart point of a QUBEKit job.')\n parser.add_argument('-end', '-end', choices=['mm_optimise', 'qm_optimise', 'hessian', 'mod_sem', 'density',\n 'charges', 'lennard_jones', 'torsion_scan', 'torsion_optimise',\n 'finalise'], help='Enter the end point of the QUBEKit job.')\n parser.add_argument('-progress', '--progress', nargs='?', const=True,\n help='Get the current progress of a QUBEKit single or bulk job.', action=ProgressAction)\n parser.add_argument('-combination', '--combination', default='opls', choices=['opls', 'amber'],\n help='Enter the combination rules that should be used.')\n parser.add_argument('-skip', '--skip', nargs='+', choices=['mm_optimise', 'qm_optimise', 'hessian', 'mod_sem',\n 'density', 'charges', 'lennard_jones',\n 'torsion_scan', 'torsion_optimise', 'finalise'],\n help='Option to skip certain stages of the execution.')\n\n # Add mutually exclusive groups to stop wrong combinations of options,\n # e.g. setup should not be ran with another command\n groups = parser.add_mutually_exclusive_group()\n groups.add_argument('-setup', '--setup_config', nargs='?', const=True,\n help='Setup a new configuration or edit an existing one.', action=SetupAction)\n groups.add_argument('-sm', '--smiles', help='Enter the smiles string of a molecule as a starting point.')\n groups.add_argument('-bulk', '--bulk_run',\n help='Enter the name of the csv file to run as bulk, bulk will use smiles unless it finds '\n 'a molecule file with the same name.')\n groups.add_argument('-csv', '--csv_filename',\n help='Enter the name of the csv file you would like to create for bulk runs.',\n action=CSVAction)\n groups.add_argument('-i', '--input', help='Enter the molecule input pdb file (only pdb so far!)')\n\n return parser.parse_args()", "def _testCommitManifestChange(self, changes=None, **kwargs):\n self.PatchObject(validation_pool.ValidationPool, '_FilterNonCrosProjects',\n side_effect=lambda x, _: (x, []))\n return self.PerformSync(changes=changes, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
try to move to the room neighboring in {direction} of the players current room
def move_to(self, direction): if self.room.neighbor[direction]: #check if room in dir exists self.__set_room__(self.room.neighbor[direction]) return True else: return False
[ "def move(rooms, exits, direction):\n\n # Next room to go to\n return rooms[exits[direction]]", "def move(self, direction):\n if not self.dead:\n cells = self.cells\n\n if direction == 0:\n self.direction = 0\n for i in range(len(cells) - 1):\n cells[i].top = cells[i+1].top\n cells[i].left = cells[i+1].left\n cells[-1].top -= cells[-1].grid_size\n\n if direction == 1:\n self.direction = 1\n for i in range(len(cells) - 1):\n cells[i].top = cells[i+1].top\n cells[i].left = cells[i+1].left\n cells[-1].left += cells[-1].grid_size\n\n if direction == 2:\n self.direction = 2\n for i in range(len(cells) - 1):\n cells[i].top = cells[i+1].top\n cells[i].left = cells[i+1].left\n cells[-1].top += cells[-1].grid_size\n\n if direction == 3:\n self.direction = 3\n for i in range(len(cells) - 1):\n cells[i].top = cells[i+1].top\n cells[i].left = cells[i+1].left\n cells[-1].left -= cells[-1].grid_size", "def move(self, direction):\n self.older_position = self.position\n new_position = getattr(self.position, direction)\n if new_position in self.map.empty:\n self.position = new_position\n self.take_item()", "def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[0], self.position[1]+1)\n\n elif self.direction == \"w\":\n self.position = (self.position[0], self.position[1]-1)", "def next_room(game):\n player = game.player\n player_directions = {'north': 'up', 'up': 'up',\n 'south': 'down', 'down': 'down',\n 'east': 'right', 'right': 'right',\n 'west': 'left', 'left': 'left', }\n direction_check = input(\"Which direction would you like to try \").lower()\n while direction_check not in player_directions:\n direction_check = input(\"Please input cardinal direction. \").lower()\n direction = player_directions[direction_check]\n return game.rooms[player.position['location']].paths[direction]", "def change_position(self, cell: Cell, direction):\n \n new_x = 0\n new_y = 0\n\n if direction == 'N':\n new_x = cell.x - 1\n new_y = cell.y\n elif direction == 'E':\n new_x = cell.x \n new_y = cell.y + 1\n elif direction == 'W':\n new_x = cell.x\n new_y = cell.y - 1\n elif direction == 'S':\n new_x = cell.x + 1\n new_y = cell.y\n\n if (new_x < 0 or new_x > 7 or new_y < 0 or new_y > 7):\n raise Exception(\"Knight Drowns\")\n \n return self.board[new_x][new_y]", "async def send_move(self, direction: Direction):", "def move_to_room(rooms, current_room, direction):\n# While loop runs the statements in its body while the current_room exists in the rooms dictionary.\n while current_room in rooms:\n print() # Prints newline\n show_status(current_room) # Func call to show current_room.\n next_move = user_prompt() # Func call and assigns value to next_move.\n print() # Prints newline\n# IF statement checks to see if next_move contains \"exit\" if so the loop breaks and the game ends.\n if next_move == 'exit':\n print('Thank you for playing! Goodbye.')\n break\n# IF statement checks to see if next_move is one of the rooms dictionary keys.\n# if so, it will assign current_room with the value of the key it found.\n# ELSE IF next_move is NOT in directions tuple, show \"Invalid Entry\" message to user.\n# ELSE show \"You can't go that way.\" message to user\n if next_move in rooms[current_room]:\n current_room = rooms[current_room][next_move]\n elif next_move not in direction:\n print(\"Invalid Entry\")\n else:\n print()\n print(\"You can\\'t go that way.\")\n print()", "def move(self, direction):\n change = False\n initial_tiles = self._initial_tiles_dict.get(direction)\n offset = OFFSETS[direction]\n if direction == UP or direction == DOWN:\n size = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n size = self._grid_width\n for tile_index in initial_tiles: \n tile_indices = []\n # iterate through adding offset\n for dummy_i in range(size):\n tile_indices.append(tile_index)\n tile_index = [x + y for x, y in zip(tile_index,offset)]\n tile_index = tuple(tile_index)\n to_merge = []\n for tile_index in tile_indices:\n tile = self.get_tile(tile_index[0], tile_index[1])\n to_merge.append(tile)\n merged = merge(to_merge)\n for tile_index, tile_value in zip(tile_indices, merged):\n if tile_value != self.get_tile(tile_index[0], tile_index[1]):\n self.set_tile(tile_index[0], tile_index[1], tile_value)\n change = True\n \n if change == True:\n self.new_tile()", "def move(self, knight, direction):\n print (knight)\n knight.cell.knight = None\n try: \n new_cell = self.change_position(knight.cell, direction)\n print (new_cell.knight)\n print (\"New Cell: \", new_cell, \"\\n\")\n\n except:\n item, position = self.kill_knight(knight, 2)\n\n # item = knight.equipped\n # last_pos = knight.cell\n # knight.status = 'DROWNED'\n # knight.cell = None\n # knight.equipped = None\n # knight.base_attack = 0\n # knight.base_defence = 0\n\n if item:\n item.cell = position\n position.items.append(item)\n position.items.sort(key=attrgetter('value'))\n\n else: \n print (\"Defender: \", new_cell.knight)\n if (new_cell.knight is not None):\n print (\"BATTLE\")\n winner, loser = self.attack(knight, new_cell.knight)\n print (\"Winner: \", winner)\n print (\"Loser: \", loser ,\"\\n\")\n\n winner.cell = new_cell\n new_cell.knight = winner\n if winner.equipped:\n winner.equipped.cell = new_cell\n item, position = self.kill_knight(loser, 1)\n # item_ = loser.equipped\n # last_pos = loser.cell\n # loser.status = \"DEAD\"\n # loser.cell = None\n # loser.equipped = None\n # loser.attack_score = 0\n # loser.defense_score = 0\n\n if item:\n item.cell = position\n position.items.append(item)\n position.items.sort(key=attrgetter('value'))\n\n return winner\n \n if (new_cell.knight is None and len(new_cell.items) == 0):\n knight.cell = new_cell\n new_cell.knight = knight\n if knight.equipped:\n knight.equipped.cell = new_cell\n\n elif (len(new_cell.items) > 0):\n knight.cell = new_cell\n new_cell.knight = knight\n if knight.equipped:\n knight.equipped.cell = new_cell\n\n new_cell.items.sort(key=attrgetter('value'))\n if not knight.equipped:\n knight.equipped = new_cell.items.pop()\n\n return knight", "def move_to(self):\n #self.find_wall()\n \n t = self.find_best_way()\n if t:\n click(t)\n else:\n click(random.choice(locations))", "def move(self, direction):\n\n if self.move_grid(direction=direction):\n self.add_random_number()", "def _move_door(self,):\n\n pass", "def move(self, direction):\n if direction in (UP, DOWN):\n n = self.grid_height\n else:\n n = self.grid_width\n\n has_moved = False\n\n for tup in self.INITIAL_TILES[direction]:\n tmp_list = []\n for i in range(n):\n row, col = tup[0] + OFFSETS[direction][0] * i, tup[1] + OFFSETS[direction][1] * i\n tmp_list.append(self.get_tile(row, col))\n new_list = merge(tmp_list)\n for i in range(n):\n row, col = tup[0] + OFFSETS[direction][0] * i, tup[1] + OFFSETS[direction][1] * i\n self.set_tile(row, col, new_list[i])\n if new_list[i] != tmp_list[i]:\n has_moved = True\n\n if has_moved:\n self.new_tile()", "def next_pos(i, j, move):\n return i + directions[move].row, j + directions[move].col", "def move_to_perimeter(self, environment: env.map.Map):\n\n position_before = np.copy(self.geometry.position)\n\n if self.current_path is None:\n # move to next block position in designated direction (which could be the shortest path or\n # just some direction chosen e.g. at the start, which is assumed here)\n directions = [np.array([1, 0, 0]), np.array([-1, 0, 0]), np.array([0, 1, 0]), np.array([0, -1, 0])]\n counts = self.count_in_direction(environment)\n\n if any([c != 0 for c in counts]):\n sorter = sorted(range(len(directions)), key=lambda i: counts[i])\n directions = [directions[i] for i in sorter]\n self.current_grid_direction = directions[0]\n else:\n self.current_grid_direction = random.sample(directions, 1)[0]\n\n self.current_path = Path()\n destination_x = (self.current_grid_position + self.current_grid_direction)[0] * Block.SIZE + \\\n environment.offset_origin[0]\n destination_y = (self.current_grid_position + self.current_grid_direction)[1] * Block.SIZE + \\\n environment.offset_origin[1]\n self.current_path.add_position([destination_x, destination_y, self.geometry.position[2]])\n\n next_position, current_direction = self.move(environment)\n if simple_distance(self.geometry.position, next_position) <= Agent.MOVEMENT_PER_STEP:\n self.geometry.position = next_position\n ret = self.current_path.advance()\n\n if not ret:\n self.current_grid_position += self.current_grid_direction\n\n self.current_blocks_per_attachment += 1\n\n self.update_local_occupancy_map(environment)\n\n try:\n # this is basically getting all values of the occupancy map at the locations where the hole map\n # has the value of the hole which we are currently over\n result = all(self.local_occupancy_map[self.hole_boundaries[self.hole_map[\n self.current_grid_position[2], self.current_grid_position[1],\n self.current_grid_position[0]]]] != 0)\n # if the result is True, then we know that there is a hole and it is closed already\n except (IndexError, KeyError):\n result = False\n\n if not self.current_block_type_seed:\n self.current_blocks_per_attachment += 1\n\n if environment.block_below(self.geometry.position, self.current_structure_level) is None and \\\n (check_map(self.hole_map, self.current_grid_position, lambda x: x < 2) or not result):\n # have reached perimeter, two possibilities:\n # - carrying normal block -> should find an attachment site\n # - carrying seed -> should do a survey of the current component\n\n if not self.current_block_type_seed:\n self.current_task = Task.FIND_ATTACHMENT_SITE\n else:\n self.current_task = Task.SURVEY_COMPONENT\n self.task_history.append(self.current_task)\n self.current_grid_direction = np.array(\n [-self.current_grid_direction[1], self.current_grid_direction[0], 0], dtype=\"int64\")\n else:\n destination_x = (self.current_grid_position + self.current_grid_direction)[0] * Block.SIZE + \\\n environment.offset_origin[0]\n destination_y = (self.current_grid_position + self.current_grid_direction)[1] * Block.SIZE + \\\n environment.offset_origin[1]\n self.current_path.add_position([destination_x, destination_y, self.geometry.position[2]])\n else:\n self.geometry.position = self.geometry.position + current_direction\n\n self.per_task_distance_travelled[Task.MOVE_TO_PERIMETER] += simple_distance(position_before,\n self.geometry.position)", "def move(self):\n if self._moving_between_tiles:\n self.__move_between_tiles()\n else:\n check_next_coord, jump = self._calculate_new_coord()\n # Checks if the next calculated coordinate is a wall or if there is an intersection/turn (and extreme mode deactivated)\n # If so it wil calculate a new direction. This is so for most of the following moving methods (scatter/frightened)\n if (self.__coord_dict.get(\n check_next_coord).is_wall() or self.__check_neighbours()) and not self.__extreme_mode:\n self.__update_target_tile()\n self._direction = self.astar.get_direction(self._coord,\n self.astar.get_closest_tile(self.__update_target_tile()))\n elif self.__extreme_mode:\n # Calculates immediately a new direction\n self.__update_target_tile()\n self._direction = self.astar.get_direction(self._coord,\n self.astar.get_closest_tile(self.__update_target_tile()))\n # Looks if the ghosts need to perform a set on opposite side movement\n if jump:\n self._set_on_opposite_side()\n self._moving_between_tiles = True\n self._draw_character(self._coord, self.__image)", "def move(self, direction: str):\n if direction == \"left\":\n if self.x > 0:\n self.x -= 1\n if direction == \"right\":\n if self.x < self.xlim:\n self.x += 1\n if direction == \"down\":\n if self.y < self.ylim:\n self.y += 1\n if direction == \"up\":\n if self.y > 0:\n self.y -= 1", "def test_move(self):\n\n map1 = \"\"\"\\\n OOOOOO\n OODOJO\n OOJJOO\n OOOOOO\"\"\"\n rd.seed(5)\n m = Ma.Map(map1)\n m.populate_map((1, 2), [Fa.Carnivore(\n age=10, weight=50) for _ in range(100)])\n m.populate_map((1, 2), [Fa.Herbivore(\n age=15, weight=30) for _ in range(10)])\n\n new_cell = m.migrate_to((1, 2))\n m.move()\n\n assert m.island[1, 2].total_pop + m.island[\n new_cell].total_pop == 110\n assert m.island[1, 2].total_pop == 62\n assert m.island[new_cell].total_pop == 48" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Operation for efficiently calculating the dot product when one or all operands is sparse. Supported format are CSC and CSR. The output of the operation is dense.
def dot(x, y): if hasattr(x, 'getnnz'): x = theano.sparse.as_sparse_variable(x) if hasattr(y, 'getnnz'): y = theano.sparse.as_sparse_variable(y) x_is_sparse_variable = theano.sparse.basic._is_sparse_variable(x) y_is_sparse_variable = theano.sparse.basic._is_sparse_variable(y) if not x_is_sparse_variable and not y_is_sparse_variable: raise TypeError() return _dot(x, y)
[ "def safe_sparse_dot(a, b, dense_output=False):\n if type(a) == SparseLR and type(b) == np.ndarray:\n return a.dot(b)\n if type(b) == SparseLR and type(a) == np.ndarray:\n return (b.T).dot(a.T).T\n if type(a) == SparseLR and type(b) == SparseLR:\n raise NotImplementedError\n if sparse.issparse(a) or sparse.issparse(b):\n ret = a * b\n if dense_output and hasattr(ret, \"toarray\"):\n ret = ret.toarray()\n return ret\n else:\n return np.dot(a, b)", "def sparseVectorDotProduct(v1, v2):\n # BEGIN_YOUR_ANSWER (our solution is 3 lines of code, but don't worry if you deviate from this)\n return sum(v1[k] * v2[k] for k in v1)\n # END_YOUR_ANSWER", "def dot(x, y, sparse=False):\n if sparse:\n res = tf.sparse_tensor_dense_matmul(x, y)\n else:\n res = tf.matmul(x, y)\n return res", "def dot(x, y, sparse = False):\r\n if sparse:\r\n res = x.mm(y)\r\n else:\r\n res = torch.matmul(x, y)\r\n return res", "def multiple_safe_sparse_dot(*matrices):\n if len(matrices) < 2:\n raise ValueError(\"Argument 'matrices' must have at least 2 matrices\")\n\n r = matrices[0]\n for m in matrices[1:]:\n r = safe_sparse_dot(r, m)\n\n return r", "def test_sparse_tensordot():\n # GIVEN accepted numpy doc example for tensordot\n a = np.arange(60.).reshape(3, 4, 5) # noqa\n b = np.arange(24.).reshape(4, 3, 2) # noqa\n # WHEN the sparse version of tensordot is performed\n c_sparse = sparse.tensordot(a, b, axes=([1, 0], [0, 1]))\n # THEN the expected results are seen\n c = c_sparse.todense()\n assert c.shape == (5, 2)\n assert np.array_equal(c,\n np.array(\n [[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]]\n ))", "def batch_dot(a, b, transpose_a=False, transpose_b=False, forward_stype=\"default\"):\n return _api_internal.batch_dot(a, b, transpose_a, transpose_b, forward_stype)", "def dot(self, matrix) -> np.ndarray:\n prod = self.sparse_mat.dot(matrix)\n if len(matrix.shape) == 1:\n for (x, y) in self.low_rank_tuples:\n prod += x * matrix.dot(y)\n else:\n matrixT = matrix.T\n for (x, y) in self.low_rank_tuples:\n prod += x[:, np.newaxis].dot(matrixT.dot(y)[:, np.newaxis].T)\n return prod", "def dot_product(v1, v2):\n return sum(num1 * num2 for num1, num2 in zip(v1, v2))", "def test_vector_dot():\n amat = np.array([1, 2, 3], float)\n bmat = np.array([5, 6, 7], float)\n out1 = (amat * bmat).sum()\n out2 = my_ddot(amat, bmat)\n\n assert out1 == out2", "def test_dot(self):\n\n # If no arrays, return 0\n self.assertAllClose(linalg.dot(),\n 0)\n # If only one array, return itself\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]]),\n [[1,2,3],\n [4,5,6]])\n # Basic test of two arrays: (2,3) * (3,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]]),\n [[31,19],\n [85,55]])\n # Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)\n self.assertAllClose(linalg.dot([[1,2,3],\n [4,5,6]],\n [[7,8],\n [9,1],\n [2,3]],\n [[4],\n [5]],\n [[6,7]]),\n [[1314,1533],\n [3690,4305]])\n\n # Test broadcasting: (2,2,2) * (2,2,2,2)\n self.assertAllClose(linalg.dot([[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[9,1],\n [2,3]],\n [[4,5],\n [6,7]]]]),\n [[[[ 7, 10],\n [ 15, 22]],\n\n [[ 67, 78],\n [ 91, 106]]],\n\n\n [[[ 13, 7],\n [ 35, 15]],\n\n [[ 56, 67],\n [ 76, 91]]]])\n\n # Inconsistent shapes: (2,3) * (2,3)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1,2,3],\n [4,5,6]],\n [[1,2,3],\n [4,5,6]])\n # Other axes do not broadcast: (2,2,2) * (3,2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]]],\n [[[1,2],\n [3,4]],\n [[5,6],\n [7,8]],\n [[9,1],\n [2,3]]])\n # Do not broadcast matrix axes: (2,1) * (3,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [[1],\n [2]],\n [[1,2,3],\n [4,5,6]])\n # Do not accept less than 2-D arrays: (2) * (2,2)\n self.assertRaises(ValueError,\n linalg.dot,\n [1,2],\n [[1,2,3],\n [4,5,6]])", "def test_dot_product(self):\n vector1 = Vector(*self.test_vector)\n vector2 = Vector(*self.test_vector_alternate)\n\n dot_product = sum(\n x * y for x, y in zip(self.test_vector, self.test_vector_alternate)\n )\n\n self.assertEqual(dot_product, vector1.dot(vector2))\n self.assertEqual(dot_product, vector2.dot(vector1))", "def dot_product(vec1, vec2):\n\treturn (vec1[0]*vec2[0])+(vec1[1]*vec2[1])", "def dot_product(u, v):\n return sum([u * v for (u, v) in zip(u, v)])", "def DotProduct(x, y):\n xshape = tf.shape(x)\n yshape = tf.shape(y)\n prod = tf.reduce_sum(tf.expand_dims(x, 1) * tf.expand_dims(y, 0), 4)\n return tf.reshape(prod, [xshape[0] * yshape[0], xshape[1], xshape[2], 1])", "def dot_product(u, v):\n if u.rest is Link.empty and v.rest is Link.empty:\n return u.first * v.first\n if u.rest is Link.empty or v.rest is Link.empty:\n raise IndexError('Vectors and u and v are not the same length.')\n return u.first * v.first + dot_product(u.rest, v.rest)", "def test_dot_product() -> np.ndarray:\r\n input_matrix = 4.25 * np.array([[3, -np.sqrt(3)], [-np.sqrt(3), 5]])\r\n v1, v2 = input_matrix[:, 0], input_matrix[:, 1]\r\n dot = get_dot_product(v1, v2)\r\n assert isinstance(dot, float), f\"Dot product should be a float but is {type(dot)}\"\r\n np.testing.assert_allclose(dot, -250.28134169370276)", "def tensor_dot(A,B):\n aux = np.array([[A[i,j]*B[i,j] for i in range(dims)]\n for j in range(dims)])\n return np.sum(aux, axis=(0,1))", "def csr_mul_np(indptr, indices, sparse_data, dense, shape):\n import scipy.sparse\n x = sparse_data.reshape(sparse_data.shape[0], -1)\n y = np.broadcast_to(dense, shape).reshape(shape[0], shape[1], -1)\n expect = []\n for i in range(x.shape[-1]):\n sparse = scipy.sparse.csr_matrix((x[..., i], indices, indptr), shape=shape[:2])\n out = sparse.multiply(y[..., i])\n expect.append(out.data)\n expect = np.moveaxis(np.stack(expect, 0).reshape(shape[2:] + [sparse_data.shape[0]]), -1, 0)\n return expect" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the cycle variable, which display the cycle entry.
def reset_cycle(self): self.cycle = None
[ "def reset(self):\n self.numb = self.starter", "def cycleDown(self):\n self.openVideo(self.cycle_vid.down(), self.image_holder.cur_idx)\n self.changeDescription()", "def reset(self) -> None:\n self.progress_text.set(\"\")\n self.progress_pct_value.set(0.0)\n self.progress_pct_text.set(\"\")\n self.frame.update()", "def cycle(self, cycle):\n if self.local_vars_configuration.client_side_validation and cycle is None: # noqa: E501\n raise ValueError(\"Invalid value for `cycle`, must not be `None`\") # noqa: E501\n\n self._cycle = cycle", "def setCycleStamp(self, long: int) -> None:\n ...", "def Reset():\n global stopper\n stopper = False", "def ResetLCD ( self ):\n\t\tself.ClearLCD()\n\t\tself.SetLCDCursorStyle ( 0 )\n\t\tself.SetLED (0,0,0)\n\t\tself.SetLED(1,0,0)\n\t\tself.SetLED(2,0,0)\n\t\tself.SetLED(3,0,0)", "def set_duty_cycle(self):\n if self.laser_status:\n self._fiber_shooting_logic.set_duty_cycle(self._mw.duty_cycle_doubleSpinBox.value())\n else:\n pass\n return", "def reset(self):\n\n self.timestep = 0\n self.historyLayer.reset()", "def reset(self):\n self.ui.lcd_reset()\n self.ui.clear()\n self.ui.lcd_home()\n self.ui.cursor()", "def reset_graph(self):\n if self.array_graph.algorithm.solving == 1:\n return\n self.array_graph.set_graph_density(self.density_slider.value())\n self.array_graph.set_algorithm(self.algorithm_list.currentText())\n self.update_signal_source()\n self.set_iterations_label()", "def reset(self):\n if (self.val != self.valinit):\n self.set_val(self.valinit)", "def dip_reset(force):\n if force:\n settings.reset()", "def resetCounterDisplay(self):\n counterDisplayText = \"Node Counters:\\n\\n\"\n for n in self.nodeTypes:\n self.nodeWithTypes[n] = []\n counterDisplayText += n + \": 0\\n\"\n counterDisplayText += \"\\nEdge Counters:\\n\\n\"\n for e in self.edgeTypes:\n self.edgeWithTypes[e] = dict()\n counterDisplayText += e + \": 0\\n\"\n self.counter_label.setText(counterDisplayText)", "def reset(value):", "def _reset_repeat(self):\r\n if self.initial_repeat == (0, 0):\r\n pygame.key.set_repeat()\r\n else:\r\n pygame.key.set_repeat(*self.initial_repeat)", "def reset(self):\n\n self.history = []\n self.output.value[:] = self.initialHistory", "def reset_terminal(self) -> None:\n self.display_back_ground()\n self.display_menu()\n self.print_moves()", "def cycle_relay(self, label):\n self.change_relay_state(self.relay_labels[label], TruckerBoardCommands.CYCLE_DELAY)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the stack list
def reset_stack(self): self.stack = []
[ "def clear(self):\n self.stack = list()", "def reset_stack(self):\n\n for i in range(0, self._num_players):\n self._obs_stacks[i].fill(0.0)", "def test_stack_reset(stack_with_content):\n stack_with_content.reset()\n\n assert stack_with_content.pop() is None", "def restore_state(self):\n if self:\n self.pop()\n else:\n log.warning(\"Can't reset empty state\")", "def flush_cache(self):\n self.__stacks = None", "def reset(self):\n self.reward_list = []\n self.action_list = []", "def reset(self):\n self.layers = {}\n self.stack = []\n self._set_mask()\n self.n_vox_in_vol = len(np.where(self.current_mask)[0])", "def reset(self):\n self.fuse = []", "def reset(self):\n self.rband = [0]\n self.lband = []", "def reset(self):\n\n self.history = []\n self.output.value[:] = self.initialHistory", "def reset(self):\n self.hand = []\n self.score = 0", "def Reset(self):\r\n self.grid = self.EmptyGrid()\r\n self.count = self.EmptyGrid()", "def reset(self):\n self._frames = collections.deque()\n self._flush()", "def reset(self):\n self.devices_home = []", "def reset(self):\r\n # replace with your code\r\n self._grid[:] = []\r\n for _ in range(self._grid_height):\r\n self._grid.append([0] * self._grid_width)\r\n \r\n # List comprehension\r\n #self._grid = [[row + col for col in range(self._grid_width)]\r\n # for row in range(self._grid_height)]\r\n \r\n self.new_tile()\r\n self.new_tile()\r\n \r\n # Need to add code to reset all vars on restart game\r", "def __init__(self):\n self._list = [] #Hold items in the stack.\n self._top = -1 #Denotes the top of the stack", "def reset(self):\n self.losses = []\n self.batch_sizes = []", "def reset_positions(self):\n self.positions = []", "def reset_traverse(self):\n self.traverse = []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resets the traverse list
def reset_traverse(self): self.traverse = []
[ "def reset(self):\n self.fuse = []", "def reset(self):\n # A copy is necessary here so that the modifications to the list don't affect the traversal.\n for qubit in copy(self.live_qubits):\n self.free_qubit(qubit)\n qubit.resource_manager = None\n self.live_qubits = []\n self.dead_qubits = []\n self.in_use = {}", "def reset(self):\n self.reward_list = []\n self.action_list = []", "def reset(self):\n for vertex in self.__graph_dict.values():\n vertex.visited = False", "def clear(self):\n self.listwalker.clear()", "def reset(self):\n self.lines = []\n self.total_todos = 0\n self.active_todos = []\n self.done_todos = []", "def reset(self):\n self._lnprob = []\n self._chain = []\n self._epsilon = 0.", "def reset_values(self):\n self.pointer = self.structure\n self.root = None", "def resetIterator(cls):", "def reset(self):\n self.rrt.reset()\n\tself.bestPath = None\n\tself.bestPathCost = None\n self.lastPruneCost = None\n\tself.updateBestCost()", "def reset_value(self):\n for node in self.nodes:\n # 每个节点不递归清楚自己子节点的值(否则会多次Clear同一个Node)\n node.reset_value(False)", "def reset(self):\n\n self.history = []\n self.output.value[:] = self.initialHistory", "def reset_vertices(self):\n\n for v in self.vertices.values():\n v.visited = False", "def reset(self):\n self.losses = []\n self.batch_sizes = []", "def reset_children(self):\n for child in self.children_iter():\n child.parent = None\n self.children = list()\n self.children_by_name = dict()", "def reset(self):\n for beam in self._beams:\n beam.reset()\n self._free_beams = [beam for beam in self._beams]\n self._allocated_beams = []\n self._tilings = []\n self._dynamic_tilings = []", "def clear_walk_data(self):\n self.walk_data = None", "def reset(self, do_resets=None):\n pass", "def reset(self):\n nodes = list(filter(lambda node: isinstance(node, gn.StatefulTransformation), self.graph.nodes))\n list(map(lambda node: node.reset(), nodes))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Einzelne Client Verbindung funktion
def client_verbindung(client): name = client.recv(BUFFERSIZE).decode("utf8") willkommen = 'Willkomen %s! Um sich auszuloggen schreiben Sie bitte {quit}!' %name client.send(bytes(willkommen, "utf8")) msg = "%s hat sich Verbunden!" %name broadcast(bytes(msg, "utf8")) clients[client] = name while True: msg = client.recv(BUFFERSIZE) if msg != bytes("{quit}", "utf8"): broadcast(msg, name+": ") else: client.send(bytes("{quit}", "utf8")) client.close() del clients[client] broadcast(bytes("%s hat sich ausgeloggt." %name, "utf8")) break
[ "def connected(client):", "def __init__(self):\n self.socket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.endereco = socket.gethostname()\n self.porta = 9997\n self.socket_client.connect((self.endereco, self.porta))\n self.menu = info\n self.apresentacao = msgInicial\n print('Cliente iniciado')", "def cargarCliente(self):", "def version():\n print('Lizzy Client', VERSION)", "def client():\n return TestClient()", "def _init_client(self):\n pass", "def __init__(self):\n self.clients = {}", "def helloWorld(self, client):\n client.say('Hello World!')\n return", "def display_one_client(client):\r\n if client:\r\n print(client)\r\n else:\r\n print('No such client')", "def on_connect(self, client, userdata, rc):\n print(\"Connected with result code: \" + str(rc))\n self.subscribe(\"orchestra/glock\")", "def get_client_info(): # real signature unknown; restored from __doc__\n pass", "def serverGreeting(self, caps):", "def recv(self):", "def onAccept(self, sock):", "def __client(self):\n\t\tself.__set_event_callbacks()\n\t\tself.__connect_with_credentials()\n\t\tself.__subscribe()", "def acknowledgeNewClient(self, client, server):\n print(\"New client connected: {}\".format(client))", "def testclient(url,port):\r\n\r\n go=\"opc.tcp://\"+url+\":\"+port #Binding URL in a OPC UA Webservice format\r\n global client # Global defining a client for accessing it on other functions\r\n client=Client(go) #Calling Client function\r\n \r\n client.connect()\r\n\r\n\r\n print(\"connected\")\r\n\r\n\r\n #Queue\r\n root = client.get_root_node() # Assigning the Client Node ID to a variable\r\n return root", "def test_vfc_client_adapter(self):\n self.assertEqual('U8247.21L.212A64A-V25-C4', self.dwrap.loc_code)\n self.assertEqual(25, self.dwrap.lpar_id)\n self.assertEqual(2, self.dwrap.vios_id)\n self.assertEqual('Client', self.dwrap.side)\n self.assertEqual(4, self.dwrap.lpar_slot_num)\n self.assertEqual(10, self.dwrap.vios_slot_num)\n self.assertEqual(['C05076087CBA0169', 'C05076087CBA0168'],\n self.dwrap.wwpns)", "def display_all_clients(clients):\r\n for client in clients:\r\n print(client)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
remove quotes from a TensorFlow string.
def normalize_text(text): text = tf.strings.regex_replace(text,"'(.*)'", r"\1") return text
[ "def normalize_text(text):\n text = tf.strings.lower(text)\n text = tf.strings.strip(text)\n # text = tf.strings.regex_replace(text,\"'(.*)'\", r\"\\1\")\n return text", "def no_quote(s):\r\n return s", "def strip_quotes(value: Any) -> Any:\n if isinstance(value, str):\n value = value.strip(\"\\'\\\"\") # behavior like dict-parser, IMPORTANT FOR EVERY STRING-FAKER\n return value", "def parse_tf_example(tf_example_str):\n return tf.parse_single_example(\n serialized=tf_example_str,\n features={\n 'sequence': tf.FixedLenFeature([], dtype=tf.string),\n 'mutation_sequence': tf.FixedLenFeature([], dtype=tf.string),\n 'partition': tf.FixedLenFeature([], dtype=tf.string),\n 'is_viable': tf.FixedLenFeature([], dtype=tf.int64),\n 'num_mutations': tf.FixedLenFeature([], dtype=tf.int64),\n 'viral_selection': tf.FixedLenFeature([], dtype=tf.float32),\n },\n )", "def unquote(self, value):\n if value.startswith('\"') and value.endswith('\"'):\n return value[1:-1].replace('\\\\\"', '\"')\n return value", "def stripQuotes(s):\n if s[0]=='\"':\n return s[1:-1]\n return s", "def deserialize_tf_device(device_name: str) -> str:\n # /replica:0/task:0/device:GPU:0 = _replica-0_task-0_device-GPU-0\n device_name = device_name.replace(\"_\", \"/\")\n device_name = device_name.replace(\"-\", \":\")\n return device_name", "def strip_quotes(input_string):\n if ((input_string[0] == input_string[len(input_string)-1])\n and (input_string[0] in ('\"', \"'\"))):\n return input_string[1:-1]\n return input_string", "def make_string_sparql(prop, string, item_label=None, qualifier=False):\n value = '\\\"%s\\\"' % string.strip('\\'\"')\n return make_sparql_triple(prop, value, item_label, qualifier)", "def _cleanquery(querystring):\n return querystring.replace('\\'', '\\\"')", "def _remove_escapes(str):\n return str.replace(\"\\\\\", \"\")", "def clean_param(param):\n return str(eval_simple_expr(strip_quotes(param)))", "def cleanToken(self, token):\n\t\tif len(token) == 0: return token\n\t\tquotes = \"'\\\"\"\n\t\tif len(token) > 1 and token[0] in quotes and token[-1] in quotes:\n\t\t\ttoken = token[1:-1]\n\t\ttoken = token.replace(\"\\\\\", \"\")\n\t\treturn token", "def _strip_quotes(value):\n if value.startswith('\"') and value.endswith('\"'):\n # This is a quoted string. Remove the first and\n # last quote, then unescape interior quotes.\n value = value[1:-1]\n value = value.replace('\\\\\"', '\"')\n return value", "def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str:\n new_quote = \"'\" if old_quote == '\"' else '\"'\n parts = []\n previous_index = 0\n for start, end in iter_fexpr_spans(fstring):\n parts.append(fstring[previous_index:start])\n parts.append(fstring[start:end].replace(old_quote, new_quote))\n previous_index = end\n parts.append(fstring[previous_index:])\n return \"\".join(parts)", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def preprocess(program_str):\n return clean_spaces(program_str)", "def sanitize(tag):\r\n tag = tag.replace(\"'\",\"''\")\r\n return tag", "def sqlite_string(text: str) -> str:\n return \"'{}'\".format(text.replace(\"'\", \"''\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Authentication specified to Aroio with the username and password
def authenticate( aroio_name: str, aroio_password: str, username: str, password: str) -> bool: if username != aroio_name: return False if not Authentication.verify_password(plain=password,hashed=aroio_password): return False return True
[ "def instamojo_auth(self, username, password):\n res = self.instamojo_api_request(method='POST', path='auth/', username=username, password=password)\n if res['success']:\n self.mojo_token = res['token']\n return res", "def login(username, password):", "def __authenticate(self):\n self.__logger.info('Authenticating on controlroom \\'%s\\' with username \\'%s\\'', self.__url, self.__username)\n self.__logger.debug('Using password \\'%s\\'', self.__password)\n auth_data = {\n 'username': self.__username,\n 'password': self.__password\n }\n auth_json = json.dumps(auth_data)\n auth_headers = {'Content-type': 'application/json'}\n try:\n r = requests.post(self.__url + '/' + self.__api['authentication'], data=auth_json, headers=auth_headers)\n except Exception as error:\n self.__logger.error('Unable to send POST request to Control Room: %s', str(error))\n raise\n self.__auth_info['request_code'] = r.status_code\n self.__auth_info['request_text'] = r.text\n if r.status_code != 200:\n self.__logger.info('Invalid response: %d', r.status_code)\n self.__logger.info('Reason: %s', r.reason)\n self.__logger.info('Description: %s', r.text)\n raise errors.Error(r.reason)\n self.__auth_info['request_token'] = r.json()['token']\n return True", "def authenticate_connection(username, password, db=None):\n return CONNECTION.authenticate(username, password, db=db)", "def _radius_auth(self, server, username, password):\n client = self._get_client(server)\n packet = self._get_auth_packet(username, password, client)\n return self._perform_radius_auth(client, packet)", "def login(username, password, host, port=8728):\n transport = create_transport(host, port)\n protocol = API(transport=transport, encoding='ASCII')\n routeros = RouterOS(protocol=protocol)\n\n try:\n sentence = routeros('/login')\n token = sentence[0]['ret']\n encoded = encode_password(token, password)\n routeros('/login', **{'name': username, 'response': encoded})\n except (ConnectionError, TrapError, FatalError):\n transport.close()\n raise\n\n return routeros", "def basicAuth(self):\n # Use basic authentication\n\n authstring = bytes(\"{u}:{p}\".format(u=self.username, p=self.password), \"ascii\")\n\n # Use \"basic\" auth by default\n auth = b64encode(authstring).decode(\"ascii\")\n self.client.credentials(HTTP_AUTHORIZATION=\"Basic {auth}\".format(auth=auth))", "def authenticateAdmin(self):\r\n\r\n try:\r\n endpoint = \"/openam/json/authenticate\"\r\n\r\n headers = {\r\n \"Content-type\": \"application/json\",\r\n \"X-OpenAM-Username\": OpenAM.user,\r\n \"X-OpenAM-Password\": self.password\r\n }\r\n\r\n # Request\r\n conn = httplib.HTTPConnection(self.base)\r\n conn.request(\"POST\", endpoint, \"\", headers)\r\n\r\n # Response\r\n response = conn.getresponse()\r\n return response.status, response.read()\r\n except Exception, e:\r\n if settings.DEBUG:\r\n print_exc()\r\n return 500, str(e)", "def __init__(self, username: str, password: str):\n self.username = username\n self.password = password", "def login(username, password, authenticator, **options):\n\n return get_component(AuthenticationPackage.COMPONENT_NAME).login(username, password,\n authenticator, **options)", "def authenticate(username, password, service=\"login\"):\n\n pam_api = pam.pam()\n return pam_api.authenticate(username, password, service)", "def authenticate():\n\n if settings.user_login == 'read_only':\n log.error('Write access denied for read_only user.')\n sys.exit(1)\n else:\n log.info('Authenticating login: %s' % (settings.user_login))\n if settings.user_login == 'kaboom':\n password = 'password'\n elif settings.user_login == 'hvm':\n password = settings.hvm_password\n else:\n password = getpass.getpass('password: ')\n\n try:\n payload = {'form.submitted': True,\n 'api.client': True,\n 'return_url': '/api',\n 'login': settings.user_login,\n 'password': password\n }\n r = session.post(settings.api_protocol\n + '://'\n + settings.api_host\n + '/login', data=payload)\n\n if r.status_code == requests.codes.ok:\n\n cookies = session.cookies.get_dict()\n log.debug('Cookies are: %s' %(cookies))\n try:\n write_cookie(cookies)\n return cookies\n except Exception, e:\n log.error('Exception: %s' % e)\n\n else:\n log.error('Authentication failed')\n sys.exit(1)\n\n except Exception, e:\n log.error('Exception: %s' % e)\n log.error('Authentication failed')\n sys.exit(1)", "def _direct_authenticate(self):\n _logger.debug('%s', where_am_i())\n try:\n self.oci_config = self._read_oci_config(fname=self.config_file, profile=self.config_profile)\n self._identity_client = oci_sdk.identity.IdentityClient(self.oci_config)\n except Exception as e:\n _logger.debug('Direct authentication failed: %s', str(e))\n raise Exception(\"Direct authentication failed\") from e", "def authenticate(self, handler, data):\n password = self.passwords.get(data['username'])\n if password == data['password']:\n return data['username']", "def __connect_with_credentials(self):\n\t\tself.client_.username_pw_set(\"xgvutxaa\", \"9cMIpVoL4Ujj\")\n\t\tself.client_.connect('spectacular-pharmacist.cloudmqtt.com',1883,3600)", "def interactive_login():\n solvebio.access_token = None\n solvebio.api_key = None\n client.set_token()\n\n domain, email, password = _ask_for_credentials()\n if not all([domain, email, password]):\n print(\"Domain, email, and password are all required.\")\n return\n\n try:\n response = client.post('/v1/auth/token', {\n 'domain': domain.replace('.solvebio.com', ''),\n 'email': email,\n 'password': password\n })\n except SolveError as e:\n print('Login failed: {0}'.format(e))\n else:\n solvebio.api_key = response['token']\n client.set_token()", "def auth(username, password):\n client = Client(authenticate=False)\n\n try:\n client.login(username, password)\n except LoginError:\n raise PixiError(\"Invalid credentials.\")\n\n config = Config(validate=False)\n config[\"pixi\"][\"refresh_token\"] = client.refresh_token\n config.save()\n\n click.echo(\"Successfully authenticated; token written to config.\")", "def basic_auth(self) -> 'outputs.BasicAuthResponse':\n return pulumi.get(self, \"basic_auth\")", "def __get_auth__(self, username, password, business_unit, vendor, app):\n\t\turl = 'https://api.incontact.com/InContactAuthorizationServer/Token'\n\t\ttoken = b64encode(f'{app}@{vendor}:{business_unit}'.encode()\n\t\t\t).decode()\n\t\theaders = {'Authorization': f'basic {token}'}\n\t\trequest_body = {\n\t\t\t'grant_type': 'password',\n\t\t\t'username': username,\n\t\t\t'password': password,\n\t\t\t'scope': ''\n\t\t}\n\t\tresponse = self.__make_request__(\n\t\t\t'POST',\n\t\t\turl,\n\t\t\theaders=headers,\n\t\t\tjson=request_body\n\t\t)\n\t\tif response.status_code != 200:\n\t\t\traise AuthenticationError(\n\t\t\t\tstatus_code=response.status_code,\n\t\t\t\tresponse_message=response.text\n\t\t\t)\n\n\t\treturn response.json()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns cost of navigating between two nodes
def cost(self, node_1, node_2): (x_coord_1, y_coord_1) = node_1 (x_coord_2, y_coord_2) = node_2 return abs(x_coord_1 - x_coord_2) + abs(y_coord_1 - y_coord_2)
[ "def cost(self, from_node, to_node):\n return 1", "def new_cost(self, from_node, to_node):\n return from_node.cost + self.dist(from_node, to_node)", "def get_estimated_cost(start_node, destination_node):\r\n delta_x = abs(start_node.x - destination_node.x)\r\n delta_y = abs(start_node.y - destination_node.y)\r\n if delta_x < delta_y:\r\n return math.sqrt(2 * delta_x^2) + delta_y - delta_x\r\n else:\r\n return math.sqrt(2 * delta_y^2) + delta_x - delta_y", "def evaluate_costs(self, id1):\n costs = {node: 9999 for node in self.nodes}\n parents = {node: None for node in self.nodes}\n\n costs[id1] = 0\n\n for node in self.nodes:\n for next_node, weight in self.nodes[node].connections:\n if costs[node] + weight < costs[next_node.node_id]:\n costs[next_node.node_id] = costs[node] + weight\n parents[next_node.node_id] = node\n\n return parents, costs", "def _cost(node_and_neighborhood):\n v, neighborhood = node_and_neighborhood\n return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)", "def total_cost(node):\n if node.parent:\n return total_cost(node.parent) + 1 \n else:\n return 1", "def cost(node):\n return fixed_cost(node)", "def compute_cost(node: pipelines.Node, node_durations_and_run_times: Dict[tuple, object]) -> float: # object = [float, float]\n path = tuple(node.path())\n if node.cost is None:\n node.cost = (\n max([compute_cost(downstream, node_durations_and_run_times)\n for downstream in node.downstreams] or [0])\n + (node_durations_and_run_times.get(path, [0, 0])[1] or 0))\n\n return node.cost", "def getDistanceBt2Nodes(self, a, b):\n if a < b:\n return self.__d_dict[a][b]\n elif a > b:\n return self.__d_dict[b][a]\n else:\n print 'a =',a,', b =',b,\"; Error! Need two different customers!\"\n return 0", "def heuristic_cost_estimate(self, node):\n # TODO: Return the heuristic cost estimate of a node\n \n d=self.distance(node,self.goal)\n \n return d", "def tour_cost(g: Graph, tour: List[int]) -> float:\n return sum([g.edge_weight(tour[i], tour[i+1]) for i in range(len(tour)-1)])", "def cost_estimate(start, end):\n return euclidean_distance(start[0], start[1], end[0], end[1])", "def cost(self):\n return self._link.cost(self._direction)", "def cost_minus(G, node, nodes):\n\tnodes = set(nodes)\n\tcur = set(G.adj[node])\n\n\treturn len(cur - nodes)", "def path_cost(path):\n if len(path) < 3:\n return 0\n else:\n action, total_cost = path[-2]\n return total_cost", "def total_cost(self):\n return sum(self.edges[e].V for e in self.edges)", "def cost_network(self):\n self.rail.cost_network()\n self.road.cost_network()", "def find_spanning_tree_path(self, from_node, to_node):\r\n # Follow the tree's links back from to_node to from_node.\r\n path_nodes = []\r\n path_links = []\r\n current_node = to_node\r\n while current_node != from_node:\r\n # Add this node to the path.\r\n path_nodes.append(current_node)\r\n\r\n # Find the previous node.\r\n prev_node = current_node.from_node\r\n\r\n # Find the link that leads to current_node.\r\n prev_link = None\r\n for link in prev_node.links:\r\n if link.node1 == current_node:\r\n prev_link = link\r\n break\r\n\r\n # Make sure we found the link.\r\n assert prev_link != None\r\n\r\n # Add the link to the path.\r\n path_links.append(prev_link)\r\n\r\n # Move to the next node.\r\n current_node = prev_node\r\n\r\n # Add the start node.\r\n path_nodes.append(from_node)\r\n\r\n # Reverse the order of the nodes and links.\r\n path_nodes.reverse()\r\n path_links.reverse()\r\n\r\n # Unmark all nodes and links.\r\n self.deselect_nodes()\r\n self.deselect_links()\r\n\r\n # Marks the path's nodes and links.\r\n for node in path_nodes:\r\n node.visited = True\r\n for link in path_links:\r\n link.visited = True\r\n\r\n # Calculate the cost of the path.\r\n cost = 0\r\n for link in path_links:\r\n cost += link.cost\r\n\r\n # Return the cost.\r\n return cost, path_nodes, path_links", "def distance(self,n1,n2):\r\n \r\n for (i,j) in self.bfs(n1): #perform a breadth-first search with n1 as starting point\r\n if(i == n2): #look for corresponding tuple\r\n return j #result of a bfs is always shortest path\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a farthest point given a node
def farthest_node(self, node_1): nodes = self.__flood_fill(node_1) highest_cost_node = (-1, -1) highest_cost = -1 for node_2 in nodes: cost = self.cost(node_1, node_2) if cost > highest_cost: highest_cost_node = node_2 highest_cost = cost return highest_cost_node
[ "def farthestPoint(pointList, p):\r\n return None", "def get_farthest(self):\n return self.__get_node(len(self.__neighbours) - 1)", "def get_nearest_node(self, point, return_dist=False):\n return ox.get_nearest_node(self.G_risk, point, return_dist=return_dist)", "def closest_point(self,graph, current_point):\n closest_point = None\n dist = 100000\n for p in graph.nodes:\n d = LA.norm(np.array(p) - np.array(current_point))\n if d < dist:\n closest_point = p\n dist = d\n return closest_point", "def _closest_node(node, nodes):\n\n if len(nodes) == 1:\n nodes = np.reshape(nodes, [-1, 1])\n\n kdtree = KDTree(nodes)\n distance, index = kdtree.query(node)\n\n return index, distance", "def rightmost(pts):\n return withmax(xcoord, pts)", "def furthest(reference,points):\n max_dis = -float('inf')\n for point in points:\n dis = distance(reference,point)\n if dis > max_dis:\n max_dis = dis\n closest_point = point\n return closest_point, max_dis", "def find_closest_node(self, state):\n # list of nodes from previous time step\n node_min = self.root_node\n dist_min = self.state_sampler.distance(node_min.state, state)\n # find the node in last_nodes_list with minimal distance to state\n for n in self.node_list:\n dist = self.state_sampler.distance(n.state, state)\n if dist < dist_min:\n dist_min = dist\n node_min = n\n return node_min", "def best_last_node(self):\n # rospy.loginfo(\"IN best_last_node func\")\n if self.goal_node in self.nodes:\n return self.goal_node\n\n distances_to_goal = [\n self.euclidian_norm(self.goal_node, node) for node in self.nodes\n ]\n # print distances_to_goal\n goal_indices = [\n distances_to_goal.index(distance)\n for distance in distances_to_goal\n if distance <= self.max_step_size\n ]\n if len(goal_indices) == 0:\n self.goal_node.parent = self.get_nearest_node(self.goal_node)\n return self.goal_node\n\n min_cost = min([self.nodes[i].cost for i in goal_indices])\n for i in goal_indices:\n if self.nodes[i].cost == min_cost:\n return self.nodes[i]\n # return None", "def nearest_node(point, nodes,sindex): \n return nearest(point, nodes,sindex)", "def get_closest(self, point):\n distance = (self.dpath[:, 1] - point[1]) ** 2 + (self.dpath[:, 0] - point[0]) ** 2\n i = np.where(distance == distance.min())\n return i[0][0]", "def get_nearest_node(self, n):\n distances = [self.euclidian_norm(n, node) for node in self.nodes]\n # print(distances)\n near_ind = distances.index(min(distances))\n return self.nodes[near_ind]", "def get_at(self, point):\n kdnode, distance = self.entities.search_nn(point)\n nearest_neighbor = kdnode.data\n\n if self.world.terrain.is_equivalent_point(nearest_neighbor.pos, point):\n return nearest_neighbor\n else:\n return None", "def best_child(self, node):\n if node.player_to_move == 1:\n cmp = max(node.children, key=attrgetter(\"q\"))\n else:\n cmp = min(node.children, key=attrgetter(\"q\"))\n return choice([n for n in node.children if n.q == cmp.q])", "def find_nearest_set_point(self, p):\n #print \"I'm in permutations_by_transpositions.py in find_nearest_set_point\"\n # converting point\n c = [-2 * x for x in p]\n return self.find_min_of_linear_function(c)\n #qres.put_nowait(self.find_min_of_linear_function(c))", "def GetMaxPoint(self):\n ...", "def get_nearest_edge_of_node(self, node):\n self._is_right_node_name(node)\n\n all_edges = self.get_edges_of_node(node)\n nearest_edge = min(all_edges)\n another_node = nearest_edge.get_another_node(node)\n\n return another_node, nearest_edge", "def near_nodes(self, node):\n nnode = self.tree.len + 1\n r = ceil(5.5*np.log(nnode))\n return self.tree.k_nearest(node,r)", "def return_right_point(points_list: List[tuple]) -> tuple:\n return max(points_list)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure node is in bounds
def is_node_in_bounds(self, node): (x_coord, y_coord) = node if x_coord < 0 or x_coord >= self.width: return False elif y_coord < 0 or y_coord >= self.height: return False else: return True
[ "def CheckBounds(self, ):\n ...", "def outside_arena():\r\n return not (0 < node.x < bounds[0] and 0 < node.y < bounds[1])", "def is_out_of_bounds(self, agent):\n x = agent.x\n y = agent.y\n\n if x < 0 or x >= self.width:\n return True\n if y < 0 or y >= self.height:\n return True\n return False", "def _enforce_boundaries(self, pos):\n xlim = self._grid.shape[0] - 1\n ylim = self._grid.shape[1] - 1\n x, y = pos\n x = max([0, x])\n x = min([xlim, x])\n y = max([0, y])\n y = min([ylim, y])\n return (x, y)", "def _inBoundsHorizontally(self): \r\n\t\tx, y = self._getPos()\r\n\t\treturn BALL_RADIUS <= x < (self.containerWidth - BALL_RADIUS)", "def is_bound(self):\n # TODO: make this a function\n\n return len(self.edges()) > 0", "def check_bounds(next_rect, snek):\r\n if next_rect[0] < 0 or next_rect[1] < 0 or next_rect[0] >= 35 or next_rect[1] >= 30 or next_rect in snek:\r\n return False\r\n return True", "def _bounds_violated(self,loc):\n if self.x_bounds[0]: \n if (self.x_bounds[0] >= loc[0]): return True\n if self.x_bounds[1]: \n if (loc[0] >= self.x_bounds[1]): return True\n if self.y_bounds[0]: \n if (self.y_bounds[0] >= loc[1]): return True\n if self.y_bounds[1]: \n if (loc[1] >= self.y_bounds[1]): return True\n if self.z_bounds[0]: \n if (self.z_bounds[0] >= loc[2]): return True\n if self.z_bounds[1]: \n if (loc[2] >= self.z_bounds[1]): return True\n else:\n return False", "def in_bounds(self, t):\n return And(self.x(t) >= 0, self.x(t) < self.grid.width,\n self.y(t) >= 0, self.y(t) < self.grid.height)", "def isBound(self):\n return self.__bound > 0", "def out_of_bounds(position, bounds):\n return (position[0] < 0 or position[0] >= bounds[0] \n or position[1] < 0 or position[1] >= bounds[1])", "def check_bounds(self):\n if np.isnan(self.value).all():\n return\n if np.isnan(self.bounds).all():\n return\n if np.bitwise_or(self.value < self.bounds[0], self.value > self.bounds[-1]).any(): #pylint: disable=unsubscriptable-object\n raise ValueError(\"Value outside bounds: %.s [%s,%s]\" % (self.value, self.bounds[0], self.bounds[-1])) #pylint: disable=unsubscriptable-object", "def in_maze(self,node):\r\n return (0 <= node[0] < self.size) and (0 <= node[1] < self.size)", "def in_bounds(p):\n x, y = p\n return x >= 0 and x < SCREEN_WIDTH and y >= 0 and y < SCREEN_HEIGHT", "def boundary_check(self):\r\n if self.position[0] < 0:\r\n self.velocity[0] *= -.8\r\n self.position[0] = 0\r\n elif self.position[0] > size[0] - self.offset:\r\n self.velocity[0] *= -.8\r\n self.position[0] = size[0] - self.offset\r\n if self.position[1] < 0:\r\n self.velocity[1] *= -.8\r\n self.position[1] = 0\r\n elif self.position[1] > size[1] - self.offset:\r\n self.velocity[1] *= -.8\r\n self.position[1] = size[1] - self.offset", "def test_out_of_bounds_nth_node(self):", "def test_empty_point_bounds():\n p = Point()\n assert p.bounds == ()", "def position_in_bounds(self, position):\n if position[0] < 0 or position[0] >= self.n:\n return False\n if position[1] < 0 or position[1] >= self.m:\n return False\n return True", "def in_bounds(self, coord):\n coord_x = coord[0]\n coord_y = coord[1]\n return (0 <= coord_x < self.dim\n and 0 <= coord_y < self.dim)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A description for the portal.
def portal_description(self) -> Optional[str]: return pulumi.get(self, "portal_description")
[ "def Description(self):\n portal_transforms = getToolByName(self, 'portal_transforms')\n data = portal_transforms.convertTo('text/plain', self.getBiography(), mimetype='text/html')\n if data:\n descr = data.getData()\n return '%s...' % (' '.join(descr.split(' ')[:140]))", "def description_detaillee(self):\n return self.__description_detaillee", "def description(self):\n if self._description is None:\n self._description = CCAPI.get_product(self.id).description\n return self._description", "def print_desc(self):\n print(self.description)\n return", "def spn_description(self) -> str:\n return self._spn_description", "def description_html(self):\n return self.description", "def get_descripcion(self):\n return self.descripcion", "def get_description(self):\n description = self.description\n if not description:\n description = getattr(self.page.specific, 'excerpt', None)\n\n return description", "def description_courte(self):\n return self.__description_courte", "def describe_item(self):\n if self.name_item is not None:\n print(\"\\nLook! It seems there is \" + self.desc_item + \"!\")\n\n else:\n print(\"\")", "def _description_string(self) -> str:", "def description(self):\r\n return self._agent_properties.get('AgentProperties', {}).get('userDescription')", "def help_description():\n return \"Help Description // To fill\"", "def description(self):\n return ServiceManager().get_service_display_name(self.name)", "def installable_description(self):", "def description(self):\n return self.data_hash['activity']['description']", "def description(self):\n return type_get_description(self)", "def description(self) -> str:\n return 'Your Goal is a Perimeter Goal, COLOUR: ' + \\\n colour_name(self.colour)", "def get_view_description(self, html=False):\n func = self.settings.VIEW_DESCRIPTION_FUNCTION\n return func(self.__class__, getattr(self, '_request', None), html)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The ID of the portal.
def portal_id(self) -> Optional[str]: return pulumi.get(self, "portal_id")
[ "def id(self) -> int:\n return self._context.id", "def id(self):\n return self.getAttribute('id')", "def get_id(self):\n return self._hostID", "def getPortletId(self):\n random_id = md5.new()\n random_id.update(str(time.time()))\n return 'portletbanners-%s' % random_id.hexdigest()", "def get_id(self):\n return self.data['id']", "def id(self):\n return self._dptId", "def id(self):\n if not self._id:\n self._id = self._layer.GetLayerId()\n return self._id", "def id(self):\r\n if not hasattr(self, '_id'):\r\n raise MissingID\r\n return self._id", "def get_id(self):\n return self.data[self.system_idx][\"id\"]", "def get_id(self):\n return self[\"ds_id\"]", "def id(self):\n return id(self._getobj_())", "def url_id(self):\n return self.id", "def get_id(self) -> str:\r\n return self.resource_id", "def get_ad_id(self):\n return self.get_item(conf.AD_ID_KEY)", "def id(self):\n return self.__person_id", "def GetPageId(self):\n return self.id", "def portal_name(self) -> Optional[str]:\n return pulumi.get(self, \"portal_name\")", "def get_id(self):\n\n url = self.base_url + \"/bioreactor/0\"\n response = rest_get_json(url)\n id = None\n try:\n id = response[\"id\"]\n except KeyError:\n pass # log message\n\n return id", "def unique_id(self):\n return self.id" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A friendly name for the portal.
def portal_name(self) -> Optional[str]: return pulumi.get(self, "portal_name")
[ "def get_name():\n return _(strings.bot_title)", "def title(self):\n msg = __(u\"Evenementiels portlet\")\n return self.portlet_title or msg", "def full_name(self):\n return '{} ({})'.format(\n self.name,\n self.location,\n )", "def title(self):\n msg = __(u\"Boutique portlet\")\n return self.portlet_title or msg", "def short_name(self):\n return self._name", "def display_name(self) -> str:\n raise NotImplementedError()", "def friendly_name(self) -> str:\n return self.device_info.friendly_name", "def get_friendly_name(self):\n try:\n return self.unit.friendly_name + ' '\n except (AttributeError):\n return ''", "def name(self):\n return self.get_name('en_US')", "def get_name(self):\n return self.__name_army", "def name(self):\n return self._name.lower()", "def portal_description(self) -> Optional[str]:\n return pulumi.get(self, \"portal_description\")", "def sitename(self):\n return SelfCallString(self.__str__())", "def name(self):\n return self._pr.title", "def breadcrumb_detail_name(self):\n return self.name", "def get_nice_fullname(self):\n if self.Fullname and len(self.Fullname) > 2:\n return self.Fullname\n return self.get_nice_name()", "def display_name_field(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"display_name_field\")", "def name(self):\n return \"Sauna Light\"", "def name_func(self):\n return('The major your are asking about is' + ': '+ self.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sa phase error. DIN/ISO Sa.
def Sa(self): return Sa(self.phase)
[ "def try_phase():\n global init_simp, smp_trace,aigs\n n = n_phases()\n print 'Phases = %d'%n\n## if ((n == 1) or (n_ands() > 45000) or init_simp == 0):\n if ((n == 1) or (n_ands() > 60000)):\n return False\n## init_simp = 0\n res = a_trim()\n## print hist\n print 'Trying phase abstraction - Max phase = %d'%n\n abc('w %s_phase_temp.aig'%f_name)\n na = n_ands()\n nl = n_latches()\n ni = n_pis()\n no = n_pos()\n z = ok_phases(n) # factors n into prime factors\n print z,\n if len(z) == 1:\n return False\n #p = choose_phase()\n p = z[1]\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase %d is incompatible'%p\n abc('r %s_phase_temp.aig'%f_name)\n if len(z)< 3:\n return False\n else:\n p = z[2]\n #print 'Trying phase = %d: '%p,\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase %d is incompatible'%p\n abc('r %s_phase_temp.aig'%f_name)\n return False\n else:\n smp_trace = smp_trace + ['phase -F %d'%p]\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n else:\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n smp_trace = smp_trace + ['phase -F %d'%p]\n print 'Simplifying with %d phases: => '%p,\n smp_trace = smp_trace + ['simplify(1)']\n simplify(1)\n## res = a_trim() #maybe we don't need this because rel_cost uses n_real_inputs\n ps()\n cost = rel_cost([ni,nl,na])\n print 'New relative cost = %f'%(cost)\n if cost < -.01:\n abc('w %s_phase_temp.aig'%f_name)\n if ((n_latches() == 0) or (n_ands() == 0)):\n return True\n if n_phases() == 1: #this bombs out if no latches. Need to see if any more phases to be tried.\n aigs_pp('push','phase') #this code can be simplified - \n print 'n_phases = %d'%n_phases()\n return False\n else:\n aigs_pp('push','phase')\n result = try_phase()\n return result\n elif len(z)>2: #Try the next eligible phase.\n abc('r %s_phase_temp.aig'%f_name)\n if p == z[2]: #already tried this\n return False\n p = z[2]\n print 'Trying phase = %d: => '%p,\n abc('phase -F %d'%p)\n if no == n_pos(): #nothing happened because p is not mod period\n print 'Phase = %d is not compatible'%p\n return False\n abc('r %s_phase_temp.aig'%f_name)\n abc('&get;&frames -o -F %d;&scl;&put'%p)\n smp_trace = smp_trace + ['phase -F %d'%p]\n print 'Simplify with %d phases: '%p,\n simplify(1)\n## res =a_trim() #maybe we don't need this because rel_cost uses n_real_inputs\n cost = rel_cost([ni,nl,na])\n print 'New relative cost = %f'%(cost)\n if cost < -.01:\n print 'Phase abstraction with %d phases obtained:'%p,\n print_circuit_stats()\n abc('w %s_phase_temp.aig'%f_name)\n if ((n_latches() == 0) or (n_ands() == 0)):\n return True\n if n_phases() == 1: # this bombs out if no latches\n return True\n else:\n aigs_pp('push','phase')\n result = try_phase()\n return result\n else:\n smp_trace = smp_trace + ['de_phase']\n abc('r %s_phase_temp.aig'%f_name)\n return False", "def E_sos(a, H, dc, v_sound):\n M_matrix = M(a, H)\n\n try:\n M_inv = np.linalg.inv(M_matrix)\n except:\n M_inv = np.zeros((4, 4))\n\n T = R(a, H, model=\"3D\") / v_sound\n print(T)\n\n return np.matmul(M_inv, T) * dc # This is a 4x1 array with all the errors in position and that final Rm", "def test_enforce_phase():\n\n # Check that all remaining pairs in the mc DATA set are phase encoding.\n assert np.all(DATA.mc['encodingepisodes'] > 0)\n\n # Check that all remaining pairs in the stmc DATA set are in the mc DATA set.\n phasepair = DATA.mc[DATA.mc['encodingepisodes'] > 0]['pair']\n assert np.all([1 if pair in list(phasepair) else 0 for pair in DATA.stmc['pair']])", "def on_phase_error(ctx: ExecutionContext, err: str):\n # Set info/state.\n phase_state = factory.create_state(ExecutionAspect.PHASE, ctx, status=ExecutionStatus.ERROR)\n\n # Update cache.\n cache.orchestration.set_state(phase_state)\n cache.orchestration.update_phase_info(ctx, ExecutionStatus.ERROR)\n\n # Inform.\n logger.log_error(f\"WFLOW :: {ctx.run_type} :: {ctx.run_index_label} :: {ctx.phase_index_label} -> unhandled error\")\n logger.log_error(err)", "def exempi_error(self):\n filename = pkg_resources.resource_filename(__name__,\n \"samples/sig05-002a.xmp\")\n xmpfile = XMPFiles()\n xmpfile.open_file(filename, open_forupdate = True )\n xmp = xmpfile.get_xmp()\n xmpfile.can_put_xmp( xmp )", "def test_invalid_sid(self):\n invalid_sid = \"z\"\n self.assertFalse(stage_one(self.ccd, invalid_sid))", "def test_sanger_to_solexa(self):\n #The point of this test is the writing code doesn't actually use the\n #solexa_quality_from_phred function directly. For speed it uses a\n #cached dictionary of the mappings.\n seq = \"N\"*94\n qual = \"\".join(chr(33+q) for q in range(0,94))\n expected_sol = [min(62,int(round(QualityIO.solexa_quality_from_phred(q)))) \\\n for q in range(0,94)]\n in_handle = StringIO(\"@Test\\n%s\\n+\\n%s\" % (seq,qual))\n out_handle = StringIO(\"\")\n #Want to ignore the data loss warning\n #(on Python 2.6 we could check for it!)\n warnings.simplefilter('ignore', UserWarning)\n SeqIO.write(SeqIO.parse(in_handle, \"fastq-sanger\"),\n out_handle, \"fastq-solexa\")\n warnings.resetwarnings()\n out_handle.seek(0)\n record = SeqIO.read(out_handle, \"fastq-solexa\")\n self.assertEqual(str(record.seq), seq)\n self.assertEqual(record.letter_annotations[\"solexa_quality\"],\n expected_sol)", "def get_SARAS(Dl=500, Dh=1, Dm=1):\n\tN = 10\n\tfreq_low_low = np.linspace(50, 300, N)\n\tfreq_low = np.linspace(301, 2000, N)\n\tfreq_high = np.linspace(2001, 25000, N)\n\n\t''' CONTINUUM SPECIFICATION '''\n\tSARAS_cont_low_low = -17.2708 * np.log10(freq_low_low)-192.0714\n\tSARAS_cont_low = -17.2708 * np.log10(freq_low)-192.0714\n\tSARAS_cont_high = -0.065676 * np.log10(freq_high)-248.8661\n\n\t''' SPECTRAL LINE SPECIFICATION '''\n\tSARAS_spec_low_low = SARAS_cont_low_low + 15.\n\tSARAS_spec_low = SARAS_cont_low + 15.\n\tSARAS_spec_high = SARAS_cont_high + 15.\n\n\t''' RBW's '''\n\tRBW_cont_low_low = 10.*np.log10((1./100.) * freq_low_low * 1.E6)\n\tRBW_cont_low = 10.*np.log10((1./100.) * freq_low * 1.E6)\n\tRBW_cont_high = 10.*np.log10((1./100.) * freq_high * 1.E6)\n\n\tRBW_spec_low_low = 10.*np.log10((0.001/100.) * freq_low_low * 1.E6)\n\tRBW_spec_low = 10.*np.log10((0.001/100.) * freq_low * 1.E6)\n\tRBW_spec_high = 10.*np.log10((0.001/100.) * freq_high * 1.E6)\n\n\t''' PATH LOSS '''\n\tc0 = 3E8\n\tif Dl != 0:\n\t\tD = Dl\n\t\tpathloss_low_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low_low * 1.E6)))**2)\n\telse:\n\t\tpathloss_low_low = 0\n\n\tif Dh != 0:\n\t\tD = Dh\n\t\tpathloss_low = 10 * np.log10(((4*np.pi*D)/(c0/(freq_low * 1.E6)))**2)\n\t\tpathloss_high = 10 * np.log10(((4*np.pi*D)/(c0/(freq_high * 1.E6)))**2)\n\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low + pathloss_low\n\t\tPSD_cont_thresh_high = SARAS_cont_high + pathloss_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low + pathloss_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high + pathloss_high\n\telse:\n\t\t''' PSD THRESHOLD LEVELS '''\n\t\tPSD_cont_thresh_low_low = SARAS_cont_low_low + pathloss_low_low\n\t\tPSD_cont_thresh_low = SARAS_cont_low \n\t\tPSD_cont_thresh_high = SARAS_cont_high\n\n\t\tPSD_spec_thresh_low_low = SARAS_spec_low_low + pathloss_low_low\n\t\tPSD_spec_thresh_low = SARAS_spec_low\n\t\tPSD_spec_thresh_high = SARAS_spec_high\n\n\n\t''' E-FIELD THRESHOLD LEVELS '''\n\t''' E-field at distance Dm '''\n\n\tDm = 10.\n\tE_cont_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low_low)/10.)*0.001) * ((1./100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_low = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_low)/10.)*0.001) * ((1./100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_cont_high = 20.*np.log10(np.sqrt(((10.**((PSD_cont_thresh_high)/10.)*0.001) * ((1./100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tE_spec_low_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low_low)/10.)*0.001) * ((0.001/100.) * freq_low_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_low = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_low)/10.)*0.001) * ((0.001/100.) * freq_low * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\tE_spec_high = 20.*np.log10(np.sqrt(((10.**((PSD_spec_thresh_high)/10.)*0.001) * ((0.001/100.) * freq_high * 1.E6) *377.) / (4*np.pi*Dm**2.)) / 1E-6)\n\n\tfreq = []\n\tfreq.extend(freq_low_low)\n\tfreq.extend(freq_low)\n\tfreq.extend(freq_high)\n\n\tE_cont_threshold = []\n\tfor a in range(0, len(E_cont_low_low)):\n\t\tE_cont_threshold.append(E_cont_low_low[a])\n\tfor a in range(0, len(E_cont_low)):\n\t\tE_cont_threshold.append(E_cont_low[a])\n\tfor a in range(0, len(E_cont_high)):\n\t\tE_cont_threshold.append(E_cont_high[a])\n\n\tE_spec_threshold = []\n\tfor a in range(0, len(E_spec_low_low)):\n\t\tE_spec_threshold.append(E_spec_low_low[a])\n\tfor a in range(0, len(E_spec_low)):\n\t\tE_spec_threshold.append(E_spec_low[a])\n\tfor a in range(0, len(E_spec_high)):\n\t\tE_spec_threshold.append(E_spec_high[a])\n\n\tP_cont_threshold = []\n\tfor a in range(0, len(RBW_cont_low_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low_low[a] + RBW_cont_low_low[a])\n\tfor a in range(0, len(RBW_cont_low)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_low[a] + RBW_cont_low[a])\n\tfor a in range(0, len(RBW_cont_high)):\n\t\tP_cont_threshold.append(PSD_cont_thresh_high[a] + RBW_cont_high[a])\n\n\tP_spec_threshold = []\n\tfor a in range(0, len(RBW_spec_low_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low_low[a] + RBW_spec_low_low[a])\n\tfor a in range(0, len(RBW_spec_low)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_low[a] + RBW_spec_low[a])\n\tfor a in range(0, len(RBW_spec_high)):\n\t\tP_spec_threshold.append(PSD_spec_thresh_high[a] + RBW_spec_high[a])\n\n\treturn freq, E_cont_threshold, E_spec_threshold, P_cont_threshold, P_spec_threshold", "def test_phase_estimated(self, phase):\n estimates = []\n wire_range = range(2, 10)\n\n for wires in wire_range:\n dev = qml.device(\"default.qubit\", wires=wires)\n m = qml.RX(phase, wires=0).matrix\n target_wires = [0]\n estimation_wires = range(1, wires)\n\n with qml.tape.QuantumTape() as tape:\n # We want to prepare an eigenstate of RX, in this case |+>\n qml.Hadamard(wires=target_wires)\n\n qml.templates.QuantumPhaseEstimation(\n m, target_wires=target_wires, estimation_wires=estimation_wires\n )\n qml.probs(estimation_wires)\n\n tape = tape.expand()\n res = tape.execute(dev).flatten()\n initial_estimate = np.argmax(res) / 2 ** (wires - 1)\n\n # We need to rescale because RX is exp(- i theta X / 2) and we expect a unitary of the\n # form exp(2 pi i theta X)\n rescaled_estimate = (1 - initial_estimate) * np.pi * 4\n estimates.append(rescaled_estimate)\n\n # Check that the error is monotonically decreasing\n for i in range(len(estimates) - 1):\n err1 = np.abs(estimates[i] - phase)\n err2 = np.abs(estimates[i + 1] - phase)\n assert err1 >= err2\n\n # This is quite a large error, but we'd need to push the qubit number up more to get it\n # lower\n assert np.allclose(estimates[-1], phase, rtol=1e-2)", "def _advection_phase(self):\n self.cip2d.run(\n self.u,\n self.dudx,\n self.dudy,\n self.u,\n self.v,\n self.wet_pwet_horizontal_links,\n self.horizontal_up_links,\n self.vertical_up_links,\n self.grid.dx,\n self.dt_local,\n out_f=self.u_temp,\n out_dfdx=self.dudx_temp,\n out_dfdy=self.dudy_temp,\n )\n\n self.cip2d.run(\n self.v,\n self.dvdx,\n self.dvdy,\n self.u,\n self.v,\n self.wet_pwet_vertical_links,\n self.horizontal_up_links,\n self.vertical_up_links,\n self.grid.dx,\n self.dt_local,\n out_f=self.v_temp,\n out_dfdx=self.dvdx_temp,\n out_dfdy=self.dvdy_temp,\n )\n\n self.cip2d.run(\n self.h,\n self.dhdx,\n self.dhdy,\n self.u_node,\n self.v_node,\n self.wet_pwet_nodes,\n self.horizontal_up_nodes,\n self.vertical_up_nodes,\n self.grid.dx,\n self.dt_local,\n out_f=self.h_temp,\n out_dfdx=self.dhdx_temp,\n out_dfdy=self.dhdy_temp,\n )\n\n for i in range(self.number_gclass):\n self.cip2d.run(\n self.Ch_i[i, :],\n self.dChdx_i[i, :],\n self.dChdy_i[i, :],\n self.u_node,\n self.v_node,\n self.wet_pwet_nodes,\n self.horizontal_up_nodes,\n self.vertical_up_nodes,\n self.grid.dx,\n self.dt_local,\n out_f=self.Ch_i_temp[i, :],\n out_dfdx=self.dChdx_i_temp[i, :],\n out_dfdy=self.dChdy_i_temp[i, :],\n )\n\n if self.model == \"4eq\":\n self.cip2d.run(\n self.Kh,\n self.dKhdx,\n self.dKhdy,\n self.u,\n self.v,\n self.wet_pwet_links,\n self.horizontal_up_links,\n self.vertical_up_links,\n self.grid.dx,\n self.dt_local,\n out_f=self.Kh_temp,\n out_dfdx=self.dKhdx_temp,\n out_dfdy=self.dKhdy_temp,\n )\n\n # remove abnormal values\n self._remove_abnormal_values()\n\n # update gradient terms\n self.update_gradients2()\n\n # update values after calculating advection terms\n # map node values to links, and link values to nodes.\n self.update_values()\n map_values(\n self,\n h=self.h,\n dhdx=self.dhdx,\n dhdy=self.dhdy,\n u=self.u,\n dudx=self.dudx,\n dudy=self.dudy,\n v=self.v,\n dvdx=self.dvdx,\n dvdy=self.dvdy,\n Ch_i=self.Ch_i,\n dChdx_i=self.dChdx_i,\n dChdy_i=self.dChdy_i,\n Ch=self.Ch,\n eta=self.eta,\n h_link=self.h_link,\n u_node=self.u_node,\n v_node=self.v_node,\n Ch_link=self.Ch_link,\n U=self.U,\n U_node=self.U_node,\n )\n update_up_down_links_and_nodes(self)", "def phase_spherical_variance():\n pass", "def test_adcps_error(self):\n with open(os.path.join(RESOURCE_PATH, 'node59p1_error.adcps.dat')) as stream_handle:\n\n parser = AdcpsJlnSioParser(self.config, stream_handle, self.exception_callback)\n # 2 records with error messages in them\n particles = parser.get_records(2)\n # make sure no particles were returned for the failure messages\n self.assertEqual(len(particles), 0)\n\n self.assertIsInstance(self.exception_callback_value[0], RecoverableSampleException)\n self.assertIsInstance(self.exception_callback_value[1], RecoverableSampleException)", "def phase_data(self):\n self.instrument.write(\"PHAS\") # Set display to phase format\n # time.sleep(5)\n try:\n start_time = time.perf_counter_ns()\n data_degree = self.instrument.query(\"OUTPFORM\") # Output format is a list of form (degrees, 0)\n end_time = time.perf_counter_ns()\n total_time = (end_time - start_time)/(10 ** 9)\n # print(f\"Phase data time in s: {total_time}\")\n except Exception as e:\n print(e)\n return False\n return data_degree", "def pose_error(self):\n if self.phase_number == 0:\n return self.hold_point - self.chaser_position\n elif self.phase_number == 1:\n return self.docking_port - self.chaser_position", "def _sandhiPrepException(self):\n if self.Pada1 in sandhi_exceptions_set:\n self.Exception = True\n else:\n self.Exception = False\n return self.Exception", "def test_flat_invertible_phase_space(self):\n \n E_cm = 5000.0\n \n # Try to run the above for a 2->8.\n my_PS_generator = PS.FlatInvertiblePhasespace(\n [0.]*2, [100. + 10.*i for i in range(8)],\n beam_Es =(E_cm/2., E_cm/2.), beam_types=(0, 0) )\n # Try to run the above for a 2->1. \n # my_PS_generator = FlatInvertiblePhasespace([0.]*2, [5000.0])\n \n random_variables = [random.random() for _ in range(my_PS_generator.nDimPhaseSpace())]\n\n# import time\n# start = time.time()\n# n_loops = 1\n# for _ in range(n_loops):\n momenta, wgt = my_PS_generator.generateKinematics(E_cm, random_variables)\n# end = time.time()\n# misc.sprint('Time per call',(end-start)/float(n_loops))\n #print \"\\n =========================\"\n #print \" || PS generation ||\"\n #print \" =========================\" \n #print \"\\nRandom variables :\\n\",random_variables\n #print \"\\n%s\\n\"%momenta.__str__(n_initial=my_PS_generator.n_initial)\n #print \"Phase-space weight : %.16e\\n\"%wgt,\n \n variables_reconstructed, wgt_reconstructed = \\\n my_PS_generator.invertKinematics(E_cm, momenta)\n\n #print \"\\n =========================\"\n #print \" || Kinematic inversion ||\"\n #print \" =========================\"\n #print \"\\nReconstructed random variables :\\n\",variables_reconstructed\n differences = [abs(variables_reconstructed[i]-random_variables[i]) \n for i in range(len(variables_reconstructed))]\n\n self.assertLess(max(differences[i]/random_variables[i] for i in range(len(differences))), 1.0e-10)\n self.assertLess(abs(wgt-wgt_reconstructed)/abs(wgt), 1.0e-10)\n \n #print \"Reconstructed weight = %.16e\"%wgt_reconstructed\n #if differences:\n # print \"\\nMax. relative diff. in reconstructed variables = %.3e\"%\\\n # max(differences[i]/random_variables[i] for i in range(len(differences)))\n #print \"Rel. diff. in PS weight = %.3e\\n\"%((wgt_reconstructed-wgt)/wgt)", "def test_flat_invertible_phase_space(self):\n \n E_cm = 5000.0\n \n # Try to run the above for a 2->8.\n my_PS_generator = PS.FlatInvertiblePhasespace(\n [0.]*2, [100. + 10.*i for i in range(8)],beam_Es =(E_cm/2.,E_cm/2.), beam_types=(0,0))\n # Try to run the above for a 2->1. \n # my_PS_generator = FlatInvertiblePhasespace([0.]*2, [5000.0])\n \n random_variables = [random.random() for _ in range(my_PS_generator.nDimPhaseSpace())]\n \n# import time\n# start = time.time()\n n_loops = 1\n for _ in range(n_loops):\n momenta, wgt = my_PS_generator.generateKinematics(E_cm, random_variables)\n# end = time.time()\n# misc.sprint('Time per call',(end-start)/float(n_loops))\n if self.verbosity > 1:\n print \"\\n =========================\"\n print \" || PS generation ||\"\n print \" =========================\"\n print \"\\nRandom variables :\\n\",random_variables\n print \"\\n%s\\n\"%momenta.__str__(n_initial=my_PS_generator.n_initial)\n print \"Phase-space weight : %.16e\\n\" % wgt\n \n variables_reconstructed, wgt_reconstructed = \\\n my_PS_generator.invertKinematics(E_cm, momenta)\n \n if self.verbosity > 1:\n print \"\\n =========================\"\n print \" || Kinematic inversion ||\"\n print \" =========================\"\n print \"\\nReconstructed random variables :\\n\", variables_reconstructed\n print \"\\nReconstructed weight : %.16e\\n\" % wgt_reconstructed\n differences = [abs(variables_reconstructed[i]-random_variables[i])\n for i in range(len(variables_reconstructed))]\n\n self.assertLess(max(differences[i]/random_variables[i] for i in range(len(differences))), 1.0e-10)\n self.assertLess(abs(wgt-wgt_reconstructed)/abs(wgt), 1.0e-10)\n \n #if differences:\n # print \"\\nMax. relative diff. in reconstructed variables = %.3e\"%\\\n # max(differences[i]/random_variables[i] for i in range(len(differences)))\n #print \"Rel. diff. in PS weight = %.3e\\n\"%((wgt_reconstructed-wgt)/wgt)", "def calc_error(self):\n\t\tt0 = time.time() \n\t\ttest_in = self.ds.test_in.data.get_value()\n\t\ttest_obs = self.ds.test_obs.data.get_value()\n\t\tself.SGDlogger.info(\"Time loading in shared variables: {:.3f}\".format(time.time() - t0))\n\t\tt0 = time.time() \n\t\terr = self.error_fn(test_in, test_obs)\n\t\tself.SGDlogger.info(\"Current error: {}\".format(err))\n\t\tself.SGDlogger.info(\"Time calculating error: {:.3f}\".format(time.time() -t0))\n\t\treturn err", "def test_fail_qasm_file(self):\n self.assertRaises(QISKitError,\n QuantumCircuit.from_qasm_file, \"\")", "def error(self):\n\n self._set_status(AnalysisTask.STATUS_ERROR)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Standard deviation of phase error.
def std(self): return std(self.phase)
[ "def stddev(self):\n m = self.mean()\n n = np.sum(self.counts)\n dx = self.axis().center - m \n return np.sqrt(np.sum(self.counts*dx**2)/n)", "def calculate_stdev(self):\n\n return np.array(self.data).std()", "def std(signal):\n return np.std(signal)", "def std(self) -> float:\n return math.sqrt(self.var())", "def stddev(data):\n counts = len(data)\n ave = average(data)\n total = sum(data * data)\n return (total - ave**2) / counts", "def get_sd(self):\n variable = self.get_y_variable_name()\n y_values = self.line_data[variable]\n sd = np.std(y_values)\n\n return sd", "def standard_deviation(data, sample=True):\n return math.sqrt(variance(data, sample))", "def stdeviation(data):\n return statistics.stdev(data)", "def stdev(self, nums):\n mean = float(sum(nums)) / len(nums)\n return math.sqrt(sum((n - mean) ** 2 for n in nums) / float(len(nums)))", "def stdDev(X):\n\tmean = float(sum(X))/len(X)\n\ttot = 0.0\n\tfor x in X:\n\t\ttot += (x - mean)**2\n\treturn (tot/len(X))**0.5\t# square root of mean squared difference", "def phase_spherical_variance():\n pass", "def semideviation(r):\n return r[r<0].std(ddof=0)", "def semideviation(r):\r\n is_negative = r<0\r\n return r[is_negative].std(ddof=0)", "def std_from_mad(self, x):\n return 1.4826 * (np.median(np.abs(x - np.median(x))))", "def getStdDev(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n return ExponentialDistBase.getStdDev(self)", "def stddev(values):\n total = 0.0\n totalSquared = 0.0\n n = 0\n\n for value in values:\n total += value\n totalSquared += value * value\n n += 1\n\n # Need at least two values.\n if n < 2:\n raise InsufficientData\n\n n = float(n)\n return sqrt((totalSquared - total * total / n) / (n - 1))", "def stdDevFromStats():\n\n volList = read()\n\n stdDevValue = stdev(volList) # Calling the standard deviation function from module\n print(\"Standard Deviation value from stat module :\" + str(stdDevValue)+ \"\\n\")", "def getSTD(self):\r\n return np.std(self.members)", "def get_sd(df,**kwargs):\n logger.debug(\"Get Standard Deviation...\")\n return df.std()", "def errsigma68(x):\n\t \n\ty = []\n\tfor i in range(1000):\n\t\txr = resample(x)\n\t\ty.append(Sigma68(xr))\n\ty = np.array(y)\n\treturn np.std(y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Diameter of the data in x.
def diameter_x(self): return self.x[-1] - self.x[0]
[ "def diameter(self):\n\t\treturn self.r * 2", "def diameter(self):\n self._assertarrays_loaded()\n return self._check_nonempty_property('_diameter')", "def density(self) -> float:\n pass", "def get_width(self):\n\t\treturn self.x[1] - self.x[0]", "def density(self):\n return self.num_edges / (self.num_nodes * (self.num_nodes - 1))", "def getDensity(self, x):\n return (math.exp(-math.pow((x - self.mu), 2) / (2 * self.sigmaSquared)) / self.normConst);", "def get_diameter(self, graph):\n return diameter(graph)", "def diameter(H, A):\n\tD = 1329*(10**(-H/5))/np.sqrt(A)\n\treturn D", "def get_pixel_size_x(self):\n x_pixel_size = 0.000075\n return x_pixel_size * 1000", "def dim(self):\n return self._dim", "def d(self):\n return self.random_unit_vectors.components_.shape[1]", "def getDimensions(self):\n return (self.x_dim, self.y_dim, self.z_dim)", "def spatial_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n # if(i != j):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n Temp2 = 0\n for m in range(len(pathlist[k]) - 1):\n Temp2 += self.dmatrix[pathlist[k][m], pathlist[k][m+1]]\n distance.append(Temp2)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.diameter = Temp", "def get_diam(self):\n return int(max(self.image_width, self.image_length))", "def get_point_size(self):\n return self.info.instrument.get_point_size()", "def x_width(self):\n return self.bin_end - self.bin_start", "def diameter(root):\n if root is None:\n return 0\n\n left_height = height(root.left)\n right_height = height(root.right)\n\n left_diameter = diameter(root.left)\n right_diameter = diameter(root.right)\n\n return max(left_height + right_height + 1,\n max(left_diameter, right_diameter)\n )", "def bottom_diameter(self):\n\t\treturn self.r_1 * 2", "def dimension(self):\n return self.field(Field.POSITION).shape[1]", "def diameter(self, time, *particle):\n\n\t\tdiameters = self[time].particles.diameter\t\t\t# diameters at frame time\n\t\tif particle == ():\treturn diameters\t\t\t\t# returns all diameters\n\t\treturn np.array(itemgetter(*particle)(diameters))\t# diameters at frame time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Diameter of the data in y.
def diameter_y(self): return self.y[-1] - self.x[0]
[ "def diameter(self):\n\t\treturn self.r * 2", "def diameter(self):\n self._assertarrays_loaded()\n return self._check_nonempty_property('_diameter')", "def get_pixel_size_y(self):\n y_pixel_size = 0.000075\n return y_pixel_size * 1000", "def density(self) -> float:\n pass", "def y_step_size(self):\n return (self.y_upper - self.y_lower) / self.ny", "def get_diameter(self, graph):\n return diameter(graph)", "def bottom_diameter(self):\n\t\treturn self.r_1 * 2", "def graph_y(self, y):\n \n return self.ymax - (y * (self.ymax - self.ymin) / float(self.height))", "def get_height(self):\n\t\treturn self.y[1] - self.y[0]", "def density(self):\n return self.num_edges / (self.num_nodes * (self.num_nodes - 1))", "def GetY(self) -> \"double\":\n return _itkVersorPython.itkVersorD_GetY(self)", "def diameter(H, A):\n\tD = 1329*(10**(-H/5))/np.sqrt(A)\n\treturn D", "def d(self):\n return self.random_unit_vectors.components_.shape[1]", "def _ydist(self):\n\t\treturn self.geom.y - self.last.y", "def height(self):\n return capi.get_band_ysize(self.ptr)", "def get_diam(self):\n return int(max(self.image_width, self.image_length))", "def top_diameter(self):\n\t\treturn self.r_2 * 2", "def pixelsizey(self) -> ErrorValue:\n return ErrorValue(self._data['pixelsizey'], self._data['pixelsizey.err'])", "def deviationAngle(self):\n\n return (self.n - 1.0) * self.alpha", "def get_width(self):\n\t\treturn self.x[1] - self.x[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Phase is the Z ("height" or "opd") data.
def phase(self): return self.data
[ "def phase_data(self):\n self.instrument.write(\"PHAS\") # Set display to phase format\n # time.sleep(5)\n try:\n start_time = time.perf_counter_ns()\n data_degree = self.instrument.query(\"OUTPFORM\") # Output format is a list of form (degrees, 0)\n end_time = time.perf_counter_ns()\n total_time = (end_time - start_time)/(10 ** 9)\n # print(f\"Phase data time in s: {total_time}\")\n except Exception as e:\n print(e)\n return False\n return data_degree", "def phase(dp):\n from tayph.vartests import typetest\n import numpy as np\n from astropy.io import ascii\n from astropy.time import Time\n from astropy import units as u, coordinates as coord\n import tayph.util as ut\n dp=check_dp(dp)#Path object\n d=ascii.read(dp/'obs_times',comment=\"#\")#,names=['mjd','time','exptime','airmass'])\n #Not using the named columns because I may not know for sure how many columns\n #there are, and read-ascii breaks if only some columns are named.\n #The second column has to be a date array though.\n\n # t = Time(d['col2'],scale='utc', location=coord.EarthLocation.of_site('paranal'))# I determined that the difference between this and geodetic 0,0,0 is zero.\n t = Time(d['col2'],scale='utc', location=coord.EarthLocation.from_geodetic(0,0,0))\n\n jd = t.jd\n P=paramget('P',dp)\n RA=paramget('RA',dp)\n DEC=paramget('DEC',dp)\n Tc=paramget('Tc',dp)#Needs to be given in BJD!\n\n typetest(P,float,'P in sp.phase()')\n typetest(Tc,float,'Tc in sp.phase()')\n typetest(RA,str,'RA in sp.phase()')\n typetest(DEC,str,'DEC in sp.phase()')\n\n ip_peg = coord.SkyCoord(RA,DEC,unit=(u.hourangle, u.deg), frame='icrs')\n ltt_bary = t.light_travel_time(ip_peg)\n\n n=0.0\n Tc_n=Time(Tc,format='jd',scale='tdb')\n while Tc_n.jd >= min(jd):\n Tc_n=Time(Tc-100.0*n*P,format='jd',scale='tdb')#This is to make sure that the Transit central time PRECEDES the observations (by tens or hundreds or thousands of years). Otherwise, the phase could pick up a minus sign somewhere and be flipped. I wish to avoid that.\n n+=1\n BJD = t.tdb + ltt_bary\n diff = BJD-Tc_n\n phase=((diff.jd) % P)/P\n return phase", "def phase(self):\n return np.angle(self)", "def phase(self):\n return Phase(self._phase).label", "def getPhaseTensor(self,rotate=180,thetar=0):\r\n \r\n pt=PhaseTensor(self.z,self.zvar,rotate=rotate,rotz=thetar)\r\n return pt", "def _is_phase(phase):\n return phase in [\"+1\", \"-1\"]", "def phase(dp):\n from lib.utils import typetest\n import numpy as np\n import pdb\n from astropy.io import ascii\n from astropy.time import Time\n from astropy import units as u, coordinates as coord\n import lib.utils as ut\n typetest('dp',dp,str)\n d=ascii.read(dp+'obs_times',comment=\"#\")#,names=['mjd','time','exptime','airmass'])\n #Removed the named columns because I may not know for sure how many columns\n #there are, and read-ascii breaks if only some columns are named.\n #The second column has to be a date array though.\n # t1=ut.start()\n # t = Time(d['col2'],scale='utc', location=coord.EarthLocation.of_site('paranal'))# I determined that the difference between this and geodetic 0,0,0 is zero.\n # ut.end(t1)\n # t2=ut.start()\n t = Time(d['col2'],scale='utc', location=coord.EarthLocation.from_geodetic(0,0,0))\n # ut.end(t2)\n\n jd = t.jd\n P=paramget('P',dp)\n RA=paramget('RA',dp)\n DEC=paramget('DEC',dp)\n Tc=paramget('Tc',dp)#Needs to be given in BJD!\n\n typetest('P',P,float)\n typetest('Tc',Tc,float)\n\n ip_peg = coord.SkyCoord(RA,DEC,unit=(u.hourangle, u.deg), frame='icrs')\n ltt_bary = t.light_travel_time(ip_peg)\n\n n=0.0\n Tc_n=Time(Tc,format='jd',scale='tdb')\n while Tc_n.jd >= min(jd):\n Tc_n=Time(Tc-100.0*n*P,format='jd',scale='tdb')#This is to make sure that the Transit central time PRECEDES the observations (by tens or hundreds or thousands of years). Otherwise, the phase could pick up a minus sign somewhere and be flipped. I hate that.\n n+=1\n BJD = t.tdb + ltt_bary\n diff = BJD-Tc_n\n phase=((diff.jd) % P)/P\n return phase", "def spectral_phase(self):\r\n return lib.phase(self._spectrum)", "def phase(self, phase):\n if phase is None:\n raise ValueError(\"Invalid value for `phase`, must not be `None`\")\n\n self._phase = phase", "def test_phase_estimated(self, phase):\n estimates = []\n wire_range = range(2, 10)\n\n for wires in wire_range:\n dev = qml.device(\"default.qubit\", wires=wires)\n m = qml.RX(phase, wires=0).matrix\n target_wires = [0]\n estimation_wires = range(1, wires)\n\n with qml.tape.QuantumTape() as tape:\n # We want to prepare an eigenstate of RX, in this case |+>\n qml.Hadamard(wires=target_wires)\n\n qml.templates.QuantumPhaseEstimation(\n m, target_wires=target_wires, estimation_wires=estimation_wires\n )\n qml.probs(estimation_wires)\n\n tape = tape.expand()\n res = tape.execute(dev).flatten()\n initial_estimate = np.argmax(res) / 2 ** (wires - 1)\n\n # We need to rescale because RX is exp(- i theta X / 2) and we expect a unitary of the\n # form exp(2 pi i theta X)\n rescaled_estimate = (1 - initial_estimate) * np.pi * 4\n estimates.append(rescaled_estimate)\n\n # Check that the error is monotonically decreasing\n for i in range(len(estimates) - 1):\n err1 = np.abs(estimates[i] - phase)\n err2 = np.abs(estimates[i + 1] - phase)\n assert err1 >= err2\n\n # This is quite a large error, but we'd need to push the qubit number up more to get it\n # lower\n assert np.allclose(estimates[-1], phase, rtol=1e-2)", "def s11_phase_func(x, *p):\n return np.angle(((p[2] - p[1]) / p[2] + 2 * 1j * (x - p[0]) * p[1] / p[0]) / (\n (p[1] + p[2]) / p[2] + 2 * 1j * (x - p[0]) * p[1] / p[0]))", "def phase_spherical_variance():\n pass", "def convert_intf_phase(infilename, outfilename):\n slc = isce_read_write.read_complex_data(infilename);\n phase = np.angle(slc);\n ny, nx = np.shape(phase);\n isce_read_write.write_isce_data(phase, nx, ny, \"FLOAT\", outfilename);\n return;", "def phase_plane(self):\n plt.figure(figsize=(8, 5))\n plt.plot(self.V, self.W, color='cornflowerblue')\n plt.plot(self.V, self.V - (self.V**3)/3 + self.I, color=\"slateblue\")\n plt.plot(self.V, (self.V + self.a)/(self.b), color=\"red\")\n plt.xlabel('Voltage [V]', fontsize=12)\n plt.ylabel('Recovery [W]', fontsize=12)\n plt.grid(alpha=0.3)", "def phase(angle):\n return angle % (2*math.pi)", "def phase_shift(H, op):\n return -2*np.pi*np.matmul(H, op.tran) / op.DEN", "def getResPhase(self,ffactor=1,thetar=0):\r\n \r\n return ResPhase(self.z,self.period,zvar=self.zvar,rotz=thetar,\r\n ffactor=ffactor)", "def phase(self):\n PP = self._complex_amplitude.real\n QQ = self._complex_amplitude.imag\n return math.atan2(QQ, PP) # result between -pi and pi.", "def setPhase(self, arr):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the CreateDatabase action. This should create all the tables that should exist in the database.
def test_create_database(self): # Setup the tables CreateDatabase.run(app=self.app) engine = create_engine(TestManagePy.postgresql_url) connection = engine.connect() for model in [User, Library, Permissions]: exists = engine.dialect.has_table(connection, model.__tablename__) self.assertTrue(exists) # Clean up the tables Base.metadata.drop_all(bind=self.app.db.engine)
[ "def test_create_tables(self):\n self._db.create_tables()\n tables = json.loads(self._db.get_database_info())\n expected_tables = db_connection.Database.get_columns().keys()\n for table in expected_tables:\n assert table in tables.keys()", "def create_tables():\n with db:\n for table in MODELS:\n if not table.table_exists():\n log.info('Creating database table: %s', table.__name__)\n db.create_tables([table], safe=True)\n else:\n log.debug('Skipping database table %s, it already exists.',\n table.__name__)", "def create_database(self):\n self._create_tables()\n self._create_functions()\n self._create_triggers()", "def createDatabaseTables(self):\n\t\tself.createBlockTable()\n\t\tself.createTransactionTable()\n\t\tself.createInputTable()\n\t\tself.createOutputTable()\n\t\tself.createClusterTable()\n\t\tself.createAddressTable()\n\t\tself.createLedgerTable()", "def setUp(self):\n db.create_all()\n self.db = db", "def create_tables():\n database = SqliteDatabase('customer.db')\n database.connect()\n database.create_tables([Customer])\n logging.info('Create customer table')\n database.close()", "def test_destroy_database(self):\n\n # Setup the tables\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n Base.metadata.create_all(bind=self.app.db.engine)\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n DestroyDatabase.run(app=self.app)\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertFalse(exists)", "def setUp(self):\n with database() as db:\n db.query('DROP TABLE IF EXISTS test_data')\n db.query('CREATE TABLE test_data (variable INTEGER)')", "def test_database_setup():\n\n TEST_DB_NAME = 'test_db.sqlite3'\n with contextlib.suppress(FileNotFoundError):\n TIMEOUT_SECONDS = 60\n cutoff_time = datetime.now() + timedelta(seconds=TIMEOUT_SECONDS)\n db_deleted = False\n while not db_deleted and datetime.now() < cutoff_time:\n try:\n os.remove(TEST_DB_NAME)\n db_deleted = True\n except PermissionError:\n # DB file lock is probably still held by last Django server instance.\n # Let's give it a moment to release it.\n pass\n\n if not db_deleted:\n raise TimeoutError(f\"Could not delete {TEST_DB_NAME}\")\n\n # Just doing:\n # `subprocess.call(f'sqlite3 db.sqlite3 .schema | sqlite3 {self.TEST_DB_NAME}', shell=True)`\n # would be nicer, but unfortunately sqlite creates a default table (sqlite_sequence) that we need to\n # remove from the schema before passing it back in again\n schema_byte_string = subprocess.check_output('sqlite3 db.sqlite3 .schema', shell=True)\n schema_string = str(schema_byte_string, 'utf-8')\n schema_one_line = schema_string.replace('\\r','').replace('\\n','')\n schema_without_sqlite_sequence = schema_one_line.replace('CREATE TABLE sqlite_sequence(name,seq);','')\n subprocess.call(f'echo {schema_without_sqlite_sequence} | sqlite3 {TEST_DB_NAME}', shell=True)\n\n # populate new database as is needed for testing\n with open('logs/test_setup_log.txt', 'a') as log:\n subprocess.call(\n ['py', 'manage.py', 'test_setup', '--settings=charity_configuration.test_settings'],\n stdout=log,\n )", "def test_create_product_table(self):\n _db = Database.instance(\":memory:\")\n try:\n create_product_table()\n except Exception as e:\n print(e)", "def test_createTableTask(self):\n createTablesTask.run()\n conn_string = \"postgresql://{}:{}@{}:{}/{}\".format(DB_USER, DB_PASSWORD, HOST, DB_PORT, DB_NAME)\n engine = create_engine(conn_string, echo=True)\n self.assertTrue(engine.dialect.has_table(engine, 'users'))\n self.assertTrue(engine.dialect.has_table(engine, 'artists'))\n self.assertTrue(engine.dialect.has_table(engine, 'songs'))\n self.assertTrue(engine.dialect.has_table(engine, 'songplays'))\n self.assertTrue(engine.dialect.has_table(engine, 'staging_events'))\n self.assertTrue(engine.dialect.has_table(engine, 'staging_songs'))\n self.assertTrue(engine.dialect.has_table(engine, 'time'))", "def test_db_create():\n _test_call(\n mysql.db_create,\n \"CREATE DATABASE IF NOT EXISTS `test``'\\\" db`;\",\n \"test`'\\\" db\",\n )", "def create_db_and_tables():\r\n engine = create_connection_db()\r\n delete_db(engine)\r\n create_db(engine)\r\n create_tables_db(engine)", "def create_test_db(self):\n self.engine = sqlalchemy.create_engine(\"sqlite:///:memory:\")\n self.slave = self.engine\n self.metadata = Metadata()\n self.create_db()\n self.reset_db()", "def test_database(self):\n tester = os.path.exists(\"lingualizer_alchemy.db\")\n self.assertEqual(tester, True)", "def test_basic_database_creation_results_in_201(self):\n params = self.params\n params['database-name'] = \"important-data-here\"\n self.teardown_databases.append(\"important-data-here\")\n response, body = self.booster.request(params)\n err = response.get(\"x-booster-error\", \"none\")\n self.assertEqual(response.status, 201)\n self.assertEqual(err, \"none\")", "def create_empty_db():\n drop_db()\n cm.database.create_tables([cm.Customer])\n cm.database.close()", "def create_db_tables():\n print(\"Start: Building Backend\")\n print(\">>> Running Make Migrations\")\n subprocess.run(\n args=[sys.executable, \"manage.py\", \"makemigrations\", \"api\"], cwd=BACKEND_DIR\n )\n print(\">>> Running Migrate\")\n subprocess.run(args=[sys.executable, \"manage.py\", \"migrate\"], cwd=BACKEND_DIR)\n print(\"Finish: Building Backend\")", "def _create_tables(self):\n log.debug('create_tables')\n\n Base.metadata.create_all(self.engine)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the DestroyDatabase action. This should clear all the tables that were created in the database.
def test_destroy_database(self): # Setup the tables engine = create_engine(TestManagePy.postgresql_url) connection = engine.connect() Base.metadata.create_all(bind=self.app.db.engine) for model in [User, Library, Permissions]: exists = engine.dialect.has_table(connection, model.__tablename__) self.assertTrue(exists) DestroyDatabase.run(app=self.app) for model in [User, Library, Permissions]: exists = engine.dialect.has_table(connection, model.__tablename__) self.assertFalse(exists)
[ "def tearDown(self):\n with database() as db:\n db.query('DROP TABLE test_data')", "def _reset_database(self):\r\n self._delete_tables()\r\n self._create_tables()", "def tearDown(self):\n\n\t\tdb.session.remove()\n\t\tdb.drop_all()", "def clearDatabase():\n Album.objects.all().delete()\n print(\"Cleared database\")", "def drop_test_database():\n CONNECTION.get_connection().drop_database(TEST_DATABASE_NAME)", "def clearTestDatabase():\n classes = [\n AerialPosition,\n AccessLog,\n FlyZone,\n GpsPosition,\n MissionConfig,\n MovingObstacle,\n ObstacleAccessLog,\n ServerInfo,\n ServerInfoAccessLog,\n StationaryObstacle,\n TakeoffOrLandingEvent,\n UasTelemetry,\n Waypoint,\n ]\n for cur_class in classes:\n cur_class.objects.all().delete()\n cache.clear()", "def reset_database():\n if os.path.exists(testinit.database_file):\n os.remove(testinit.database_file)\n shutil.copy(testinit.clean_db, testinit.database_file)", "def _purgeTables(cls):\n for table in cls.TABLES:\n table = getattr(models, table)\n try:\n print 'Deleting %s table ...' % table.__name__\n table.query.delete()\n except Exception:\n print 'Deleting %s table ...' % table.name\n table.delete()\n\n db.session.commit()", "def wipe_db():\n User.objects.all().delete()\n models.Issue.objects.all().delete()", "def tearDown(self):\n for query in self.queries:\n query.delete()", "def deleteAllTables():\n\n # Creates connections to our databases and cursors to work with it\n mainDBConn = connect(\"database/database.sqlite\")\n mainDBCursor = mainDBConn.cursor()\n historyDBConn = connect(\"database/history.sqlite\")\n historyDBCursor = historyDBConn.cursor()\n\n mainDBCursor.execute(\"DROP TABLE animals\")\n mainDBCursor.execute(\"DROP TABLE clients\")\n mainDBCursor.execute(\"DROP TABLE petsClientsLink\")\n mainDBCursor.execute(\"DROP TABLE appointments\")\n historyDBCursor.execute(\"DROP TABLE history\")\n\n mainDBConn.commit()\n historyDBConn.commit()\n mainDBConn.close()\n historyDBConn.close()", "def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(app=self.app)\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n\n for model in [User, Library, Permissions]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n # Clean up the tables\n Base.metadata.drop_all(bind=self.app.db.engine)", "def db_teardown():\n db_root_pw = get_db_root_pw()\n for config in settings.dejavu_configs:\n test_db = config['database']['db']\n drop_db_command = 'mysql -u root --password=' + db_root_pw + ' -e'\n drop_db_command = drop_db_command.split() + ['DROP DATABASE ' + test_db + ';']\n subprocess.call(drop_db_command)", "def wipe_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n os.system(\"rm -f {0}\".format(dbpath))", "def clean_db(database, exp_config):\n database.experiments.drop()\n database.experiments.insert_many(exp_config[0])\n database.lying_trials.drop()\n database.trials.drop()\n database.trials.insert_many(exp_config[1])\n database.workers.drop()\n database.workers.insert_many(exp_config[2])\n database.resources.drop()\n database.resources.insert_many(exp_config[3])", "def tearDown(self):\n self.testInit.clearDatabase()\n\n try:\n os.remove(os.path.join(self.tempDir, \"ProcReport.pkl\"))\n os.remove(os.path.join(self.tempDir, \"MergeReport.pkl\"))\n except Exception as ex:\n pass\n\n try:\n os.rmdir(self.tempDir)\n except Exception as ex:\n pass\n\n return", "def destroy(self) -> None:\n Base.metadata.drop_all(bind=self.engine)", "def fresh_database():\n from ip_inspector.database import DATABASE_PATH, create_tables\n\n if os.path.exists(DATABASE_PATH):\n os.remove(DATABASE_PATH)\n create_tables()", "def drop(self):\n self.client.delete_database(self.database_name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests that the DeleteStaleUsers action that propogates the deletion of users from the API database to that of the microservice.
def test_delete_stale_users(self): with self.app.session_scope() as session: # We do not add user 1 to the API database session.execute('create table users (id integer, random integer);') session.execute('insert into users (id, random) values (2, 7);') session.commit() with self.app.session_scope() as session: try: # Add some content to the users, libraries, and permissions within # the microservices user_1 = User(absolute_uid=1) session.add(user_1) session.commit() user_2 = User(absolute_uid=2) library_1 = Library(name='Lib1') library_2 = Library(name='Lib2') session.add_all([ user_1, user_2, library_1, library_2 ]) session.commit() # Make some permissions # User 1 owns library 1 and can read library 2 # User 2 owns library 2 and can read library 1 permission_user_1_library_1 = Permissions( owner=True, library_id=library_1.id, user_id=user_1.id ) permission_user_1_library_2 = Permissions( read=True, library_id=library_2.id, user_id=user_1.id ) permission_user_2_library_1 = Permissions( read=True, library_id=library_1.id, user_id=user_2.id ) permission_user_2_library_2 = Permissions( owner=True, library_id=library_2.id, user_id=user_2.id ) session.add_all([ permission_user_1_library_1, permission_user_1_library_2, permission_user_2_library_1, permission_user_2_library_2 ]) session.commit() # Retain some IDs for when they are deleted user_1_id = user_1.id user_2_id = user_2.id user_1_absolute_uid = user_1.absolute_uid library_1_id = library_1.id library_2_id = library_2.id # Now run the stale deletion DeleteStaleUsers().run(app=self.app) # Check the state of users, libraries and permissions # User 2 # 1. the user 2 should still exist # 2. library 2 should exist # 3. the permissions for library 2 for user 2 should exist # 4. the permissions for library 1 for user 2 should not exist _user_2 = session.query(User).filter(User.absolute_uid == 2).one() self.assertIsInstance(_user_2, User) _library_2 = session.query(Library)\ .filter(Library.id == library_2_id)\ .one() self.assertIsInstance(_library_2, Library) _permission_user_2_library_2 = session.query(Permissions)\ .filter(Permissions.library_id == library_2_id)\ .filter(Permissions.user_id == user_2_id)\ .one() self.assertIsInstance(_permission_user_2_library_2, Permissions) with self.assertRaises(NoResultFound): session.query(Permissions)\ .filter(Permissions.library_id == library_1_id)\ .filter(Permissions.user_id == user_2_id)\ .one() # User 1 # 1. the user should not exist # 2. library 1 should not exist # 3. the permissions for library 1 for user 1 should not exist # 4. the permissions for library 2 for user 1 should not exist with self.assertRaises(NoResultFound): session.query(User)\ .filter(User.absolute_uid == user_1_absolute_uid).one() with self.assertRaises(NoResultFound): session.query(Library)\ .filter(Library.id == library_1_id)\ .one() with self.assertRaises(NoResultFound): session.query(Permissions)\ .filter(Permissions.library_id == library_1_id)\ .filter(Permissions.user_id == user_1_id)\ .one() with self.assertRaises(NoResultFound): session.query(Permissions)\ .filter(Permissions.library_id == library_2_id)\ .filter(Permissions.user_id == user_1_id)\ .one() except Exception: raise finally: # Destroy the tables session.execute('drop table users;') pass
[ "def test_delete_stale_users(self):\n\n with self.app.session_scope() as session:\n # We do not add user 1 to the API database\n session.execute('create table users (id integer, random integer);')\n session.execute('insert into users (id, random) values (2, 7);')\n session.commit()\n\n with self.app.session_scope() as session:\n try:\n\n # Add some content to the users, libraries, and permissions within\n # the microservices\n user_1 = User(absolute_uid=1)\n\n user_2 = User(absolute_uid=2)\n\n library_1 = Library(name='Lib1')\n library_2 = Library(name='Lib2')\n\n session.add_all([\n user_1, user_2,\n library_1, library_2\n ])\n session.commit()\n\n # Make some permissions\n # User 1 owns library 1 and can read library 2\n # User 2 owns library 2 and can read library 1\n permission_user_1_library_1 = Permissions(\n permissions={'read': False, 'write': False, 'admin': False, 'owner': True},\n library_id=library_1.id,\n user_id=user_1.id\n )\n permission_user_1_library_2 = Permissions(\n permissions={'read': True, 'write': False, 'admin': False, 'owner': False},\n library_id=library_2.id,\n user_id=user_1.id\n )\n permission_user_2_library_1 = Permissions(\n permissions={'read': True, 'write': False, 'admin': False, 'owner': False},\n library_id=library_1.id,\n user_id=user_2.id\n )\n permission_user_2_library_2 = Permissions(\n permissions={'read': False, 'write': False, 'admin': False, 'owner': True},\n library_id=library_2.id,\n user_id=user_2.id\n )\n\n session.add_all([\n permission_user_1_library_1, permission_user_1_library_2,\n permission_user_2_library_1, permission_user_2_library_2\n ])\n session.commit()\n\n # Retain some IDs for when they are deleted\n user_1_id = user_1.id\n user_2_id = user_2.id\n user_1_absolute_uid = user_1.absolute_uid\n library_1_id = library_1.id\n library_2_id = library_2.id\n\n # Now run the stale deletion\n DeleteStaleUsers().run(app=self.app)\n\n # Check the state of users, libraries and permissions\n # User 2\n # 1. the user 2 should still exist\n # 2. library 2 should exist\n # 3. the permissions for library 2 for user 2 should exist\n # 4. the permissions for library 1 for user 2 should not exist\n _user_2 = session.query(User).filter(User.absolute_uid == 2).one()\n self.assertIsInstance(_user_2, User)\n\n _library_2 = session.query(Library)\\\n .filter(Library.id == library_2_id)\\\n .one()\n self.assertIsInstance(_library_2, Library)\n\n _permission_user_2_library_2 = session.query(Permissions)\\\n .filter(Permissions.library_id == library_2_id)\\\n .filter(Permissions.user_id == user_2_id)\\\n .one()\n self.assertIsInstance(_permission_user_2_library_2, Permissions)\n\n with self.assertRaises(NoResultFound):\n session.query(Permissions)\\\n .filter(Permissions.library_id == library_1_id)\\\n .filter(Permissions.user_id == user_2_id)\\\n .one()\n\n # User 1\n # 1. the user should not exist\n # 2. library 1 should not exist\n # 3. the permissions for library 1 for user 1 should not exist\n # 4. the permissions for library 2 for user 1 should not exist\n with self.assertRaises(NoResultFound):\n session.query(User)\\\n .filter(User.absolute_uid == user_1_absolute_uid).one()\n\n with self.assertRaises(NoResultFound):\n session.query(Library)\\\n .filter(Library.id == library_1_id)\\\n .one()\n\n with self.assertRaises(NoResultFound):\n session.query(Permissions)\\\n .filter(Permissions.library_id == library_1_id)\\\n .filter(Permissions.user_id == user_1_id)\\\n .one()\n\n with self.assertRaises(NoResultFound):\n session.query(Permissions)\\\n .filter(Permissions.library_id == library_2_id)\\\n .filter(Permissions.user_id == user_1_id)\\\n .one()\n\n except Exception:\n raise\n finally:\n # Destroy the tables\n session.execute('drop table users;')\n pass", "def test_delete_user_fail(self):\n with app.test_client() as client:\n seed_database()\n #Tests to confirm that if no one is logged in, a user cannot be and is not deleted\n #and there is a redirect to the login page.\n request=client.post('/users/newuser1/delete', follow_redirects=True)\n self.assertEqual(request.status_code, 200)\n response=request.get_data(as_text=True)\n self.assertIn(\"You do not have permission to delete this user.\", response)\n self.assertIn(\"<title>Log In</title>\", response)\n self.assertEqual(User.query.filter_by(username=\"newuser1\").first().username, \"newuser1\")\n self.assertIsNotNone(Feedback.query.filter_by(username=\"newuser1\").first())\n\n #Tests to confirm that if the wrong user is logged in, a user cannot be and is not deleted\n #and there is a redirect to the attempted deleted user's details page.\n client.post('/login', data={\"username\": \"newuser2\", \"password\": \"password456\"}, \n follow_redirects=True)\n request=client.post('users/newuser1/delete', follow_redirects=True)\n self.assertEqual(request.status_code, 200)\n response=request.get_data(as_text=True)\n self.assertIn(\"You do not have permission to delete this user.\", response)\n self.assertIn(\"<title>Details for newuser1</title>\", response)\n self.assertEqual(User.query.filter_by(username=\"newuser1\").first().username, \"newuser1\")\n self.assertIsNotNone(Feedback.query.filter_by(username=\"newuser1\").first())", "def test_users_activation_delete(self):\n pass", "def test_bulk_delete(self):\n\n from error_report.models import Error\n\n # Create some notification messages by throwing errors\n for _ii in range(10):\n Error.objects.create()\n\n # Check that messages have been created\n messages = NotificationMessage.objects.all()\n\n # As there are three staff users (including the 'test' user) we expect 30 notifications\n # However, one user is marked as inactive\n self.assertEqual(messages.count(), 20)\n\n # Only 10 messages related to *this* user\n my_notifications = messages.filter(user=self.user)\n self.assertEqual(my_notifications.count(), 10)\n\n # Get notification via the API\n url = reverse('api-notifications-list')\n response = self.get(url, {}, expected_code=200)\n self.assertEqual(len(response.data), 10)\n\n # Mark some as read\n for ntf in my_notifications[0:3]:\n ntf.read = True\n ntf.save()\n\n # Read out via API again\n response = self.get(\n url,\n {\n 'read': True,\n },\n expected_code=200\n )\n\n # Check validity of returned data\n self.assertEqual(len(response.data), 3)\n for ntf in response.data:\n self.assertTrue(ntf['read'])\n\n # Now, let's bulk delete all 'unread' notifications via the API,\n # but only associated with the logged in user\n response = self.delete(\n url,\n {\n 'filters': {\n 'read': False,\n }\n },\n expected_code=204,\n )\n\n # Only 7 notifications should have been deleted,\n # as the notifications associated with other users must remain untouched\n self.assertEqual(NotificationMessage.objects.count(), 13)\n self.assertEqual(NotificationMessage.objects.filter(user=self.user).count(), 3)", "def test_api_v1_users_id_delete(self):\n pass", "def test_delete_user(app):\n prosumer: User = find_user_by_email(\"test_prosumer@seita.nl\")\n num_users_before = User.query.count()\n user_assets_with_measurements_before = Asset.query.filter(\n Asset.owner_id == prosumer.id, Asset.asset_type_name.in_([\"wind\", \"solar\"])\n ).all()\n asset_ids = [asset.id for asset in user_assets_with_measurements_before]\n for asset_id in asset_ids:\n num_power_measurements = Power.query.filter(Power.asset_id == asset_id).count()\n assert num_power_measurements == 96\n delete_user(prosumer)\n assert find_user_by_email(\"test_prosumer@seita.nl\") is None\n user_assets_after = Asset.query.filter(Asset.owner_id == prosumer.id).all()\n assert len(user_assets_after) == 0\n assert User.query.count() == num_users_before - 1\n for asset_id in asset_ids:\n num_power_measurements = Power.query.filter(Power.asset_id == asset_id).count()\n assert num_power_measurements == 0", "def test_delete_user_success(self):\n with app.test_client() as client:\n seed_database()\n client.post('/login', data={\"username\": \"newuser1\", \"password\": \"password123\"}, \n follow_redirects=True)\n request=client.post('/users/newuser1/delete', follow_redirects=True)\n self.assertEqual(request.status_code, 200)\n response=request.get_data(as_text=True)\n self.assertIn(\"Successfully deleted the user newuser1!\", response)\n self.assertIn(\"<title>Register</title>\", response)\n self.assertIsNone(User.query.filter_by(username=\"newuser1\").first())\n self.assertIsNone(Feedback.query.filter_by(username=\"newuser1\").first())", "def test_del_user(self):\n # First check we get a 404 if we use the wrong user_id\n res = self.__client.delete('/site/api/v1.0/user/1001')\n self.assertEqual(res.status_code, 404)\n # Now add some sites to delete\n test_data = copy.deepcopy(self.TEST_SITE)\n res = self.__client.post('/site/api/v1.0/site', data=test_data)\n self.assertEqual(res.status_code, 200)\n test_data['site_name'] = 'YetAnotherTestSite'\n res = self.__client.post('/site/api/v1.0/site', data=test_data)\n self.assertEqual(res.status_code, 200)\n # Check this user now has two sites\n res = self.__client.get('/site/api/v1.0/site')\n self.assertEqual(res.status_code, 200)\n my_sites = [x for x in json.loads(res.data) if x[\"is_owner\"]]\n self.assertEqual(len(my_sites), 2)\n # We also need to add a credentials to test\n # Add this to the DB directly\n db = self.__service.test_db()\n Cred = db.tables.Cred\n db.session.add(Cred(cred_owner=1000,\n site_id=1,\n cred_username='mytest',\n cred_expiry=datetime.datetime.utcnow(),\n cred_value='secret'))\n db.session.commit()\n # Call the delete function\n res = self.__client.delete('/site/api/v1.0/user/1000')\n self.assertEqual(res.status_code, 200)\n # Now check user has 0 sites\n res = self.__client.get('/site/api/v1.0/site')\n self.assertEqual(res.status_code, 200)\n my_sites = [x for x in json.loads(res.data) if x[\"is_owner\"]]\n self.assertEqual(len(my_sites), 0)\n # Check the cred has gone too\n cred = Cred.query.filter_by(cred_owner=1000).first()\n self.assertIsNone(cred)", "def test_otoroshi_controllers_adminapi_users_controller_delete_admin(self):\n pass", "def test_delete(self):\n\n with self.client as c:\n self.login(c)\n\n # UNAUTHORIZED - deleting trade owned by user 222, as user 111\n resp = c.post('/trades/222/delete', follow_redirects=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('You are unauthorized to perform this action.', str(resp.data))\n trade = Trade.query.get(222)\n self.assertIsNotNone(trade)\n\n # AUTHORIZED\n resp = c.post('/trades/111/delete', follow_redirects=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('Trade successfully deleted', str(resp.data))\n trade = Trade.query.get(111)\n self.assertIsNone(trade)", "def test_delete_valid(self):\n response = self.app.delete('/api/a/actors/Christopher Lloyd')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.delete('/api/a/movies/Blind Date')\n self.assertEqual(response.status_code, 200)", "def test_delete_expired_users(self):\n expired_user = SignupManager.create_user(**self.user_info)\n expired_user.date_joined -= datetime.timedelta(days=auth_settings.BAPH_ACTIVATION_DAYS + 1)\n expired_user.save()\n\n deleted_users = SignupManager.delete_expired_users()\n\n self.failUnlessEqual(deleted_users[0].username, 'alice')", "def test_post_delete_account(self):\n c = Client()\n c.login(username='foo', password='bar')\n request = c.post('/GradMaze/accounts/delete/', follow=True)\n self.assertFalse(User.objects.filter(username='foo').exists())", "def test_delete_run_as_user(self):\n pass", "def test_delete_external_user(self):\n pass", "def test_services_delete(self):\n pass", "def test_delete_database_user(self):\n with requests_mock.Mocker() as m:\n m.register_uri(\n requests_mock.DELETE,\n \"http://localhost:8086/db/db/users/paul\"\n )\n\n cli = InfluxDBClient(database='db')\n cli.delete_database_user(username='paul')\n\n self.assertIsNone(m.last_request.body)", "def test_user_delete_o_auth2_application(self):\n pass", "def test_delete_already_gone(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.delete.return_value = False\n resp = self.app.delete('/api/1/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Debugging utility. Writes processed cut IDs to a file. Expects ``return_cuts=True`` to be passed to the Dataset class.
def cut_id_dumper(dataloader, path: Path): if not dataloader.dataset.return_cuts: return dataloader # do nothing, "return_cuts=True" was not set with path.open('w') as f: for batch in dataloader: for cut in batch['supervisions']['cut']: print(cut.id, file=f) yield batch
[ "def coverage_wrapper(dset_id, filtered_reads, utrfile_path):\n\n ## XXX This is a sin: I'm introducing a once-in-a-lifetime piece of code\n #here. It outputs the coverage \n\n # Rename path to 'covered_...'\n (dirpath, basename) = os.path.split(filtered_reads)\n out_path = os.path.join(dirpath, 'covered_'+dset_id)\n outfile = open(out_path, 'wb')\n\n cmd = ['coverageBed', '-d', '-a', filtered_reads, '-b', utrfile_path]\n\n f = Popen(cmd, stdout=outfile)\n f.wait()\n\n outfile.close()\n\n return out_path", "def _print(self, testcases, process_id, dryrun, testcases_per_file):\n chunks = ichunked(testcases, testcases_per_file)\n for i, chunk in enumerate(chunks):\n basename = f'testcase-{self.machine_index}-{process_id}'\n filename = f'tmp-{basename}' if dryrun else f'{basename}-{i}'\n data = [Format.make(self, x) for x in chunk if self.filter(x)]\n with open(join(self.folder_path, filename), 'a') as f:\n f.write(''.join(data))", "def write_results(cross_res, drop_res, file_name=knn_results):\r\n with open(file_name, mode='w') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(['K_value',\r\n 'cross_val_score',\r\n 'dropout_score'])\r\n for k in range(len(cross_res)):\r\n writer.writerow([k, cross_res[k], drop_res[k]])", "def write_results(filename):", "def write_works_data(self,file):\n fh = gzip.open(file,'w')\n fh.write(\"#workid bibids\\n\")\n fh.write(\"#workid fmt string is %s to get URI\\n\" % (self.workid_fmt))\n fh.write(\"#prefix fmt string is %s to get URI\\n\" % (self.bibid_fmt))\n n = 0\n for workid in sorted(self.workids.keys(),key=int):\n n += 1\n fh.write(\"%d %s\\n\" % (workid,\" \".join([str(x) for x in self.workids[workid]])))\n fh.close()\n logging.warning(\"written %d workid lines to %s\" % (n,file))", "def dump_descendant_cids_to_file(self, filename):\n\n outfile = open(filename, \"wt\")\n for n in range(0, len(self.nodes)):\n descendants = self.all_descendants_of_node(n)\n outfile.write(\"{0}\".format(self.nodes[n].cid))\n if len(descendants) > 0:\n outfile.write(\",\")\n\n descendant_count = len(descendants)\n\n i = 0\n for d in descendants:\n outfile.write(\"{0}\".format(self.nodes[d].cid))\n i += 1\n if i < descendant_count:\n outfile.write(\",\")\n outfile.write(\"\\n\")\n outfile.close()", "def write_data_to_file( self, chrom, start, end, filename ):\n raise Exception( \"Unimplemented Function\" )", "def cut_nuts(layers: str, nuts: list, schema: str, year: str):\r\n csv_result = get_csv_from_nuts(layers=layers, nuts=nuts, schema=schema, year=year)\r\n return ExportCut.save_file_csv_random_name(content=csv_result)", "def _save_spill_data(self, saveloc, nc_filename):\n nc_out = NetCDFOutput(nc_filename, which_data='all', cache=self._cache)\n nc_out.prepare_for_model_run(model_start_time=self.start_time,\n uncertain=self.uncertain,\n spills=self.spills)\n nc_out.write_output(self.current_time_step)\n\n if isinstance(saveloc, zipfile.ZipFile):\n saveloc.write(nc_filename, nc_filename)\n if self.uncertain:\n u_file = nc_out.uncertain_filename\n saveloc.write(u_file, os.path.split(u_file)[1])\n elif zipfile.is_zipfile(saveloc):\n with zipfile.ZipFile(saveloc, 'a',\n compression=zipfile.ZIP_DEFLATED,\n allowZip64=allowzip64) as z:\n z.write(nc_filename, nc_filename)\n if self.uncertain:\n u_file = nc_out.uncertain_filename\n z.write(u_file, os.path.split(u_file)[1])\n if self.uncertain:\n os.remove(u_file)\n os.remove(nc_filename)", "def make_df(\n save_df, files, widths, heights, particle_widths, particle_heights, cutoffs\n):\n\n cutoffs_formatted = [\"%.2f\" % elem for elem in cutoffs]\n df_dict = {\n \"filename\": files,\n \"frame width [pixels]\": widths,\n \"frame height [pixels]\": heights,\n \"particle width [microns]\": particle_widths,\n \"particle height [microns]\": particle_heights,\n \"cutoff [%]\": cutoffs_formatted,\n }\n df = pd.DataFrame(df_dict)\n\n # len_before = len(df)\n # df.drop_duplicates(\n # subset=[\n # \"frame width\",\n # \"frame height\",\n # \"particle width\",\n # \"particle height\",\n # \"cutoff\",\n # ],\n # keep=\"first\",\n # inplace=True,\n # )\n # print(len(df))\n # print(\"removed %d duplicates\" % (len_before - len(df)))\n\n df.to_csv(save_df, index=False)", "def print_cuts(cuts):\n for cut in cuts:\n print cut", "def write_cutout(cutout:Cutout2D, filename:str = \"cutout.fits\", overwrite:bool=False, exptime:float=None):\n hdr = cutout.wcs.to_header()\n if exptime is not None:\n assert type(exptime)==float, \"exposure time should be a float\"\n hdr['EXPTIME'] = exptime\n # Note to user: Make sure the image is not normalized when\n # inserting an exposure time. i.e. if the reduced image is normalised,\n # multiply it by the EXPTIME and then pass on the EXPTIME to\n # write_cutout.\n imghdu = fits.PrimaryHDU(cutout.data, hdr)\n hdulist = fits.HDUList([imghdu])\n hdulist.writeto(filename, overwrite=overwrite)\n return", "def get_regions_and_max_cuts(self):\n self.files_to_copy.append(self.cut_file_path + self.cut_file)\n self.max_cuts, self.regions = self.parse_cut_file(\n self.cut_file_path + self.cut_file)\n\n if self.regions_to_run != 'all':\n self.regions = {k:v for (k,v) in self.regions.items()\n if v in self.regions_to_run}\n\n if not self.regions:\n error_string = \"Invalid RegionsToRun arguments. Regions list if\"\n error_string += \" empty.\"\n raise ValueError(error_string)\n\n with open(self.analysis_code_path+\"MaxCuts.cxx\", 'w') as f:\n f.write(\"MaxCuts = {};\".format(self.max_cuts))", "def save(self):\n if self.outfile is not None:\n keep = np.array([False]*len(self.df))\n for c in self.categories:\n keep = keep|pd.notna(self.df[c]).values\n \n self.df[keep].to_csv(self.outfile, index=False)", "def do_save(self, filename: str):\n output_string = b\"\".join([tile.tobytes() for tile in self.tile_data ])\n with open(filename, 'wb') as fout:\n if self.file_format == 'raw':\n fout.write(output_string)\n elif self.file_format == 'ines':\n fout.write(self.ines_data + output_string)\n self.modified = False\n self.filename = filename", "def write_gen_output(subset, generation, reaction_name):\n\twith open(f\"{reaction_name}_output.txt\", \"a\") as f:\n\t\tfor graph in subset:\n\t\t\tf.write(f\"G{generation}\\t{graph.smiles}\\n\")", "def perform_cut(dataset, cut_function):\n data = dataset[:]\n logger.info('Performing cut with function %s', cut_function.__name__)\n cut = cut_function(data)\n logger.info('%d events were cut', np.where(~cut)[0].shape[0])\n return cut", "def DEBUGwrite(variable, filename):\r\n debug_file = open(\"DEBUG\" + os.sep + filename,'w')\r\n debug_file.write(variable.__repr__())\r\n debug_file.close()", "def _print_result_to_file(slice_list):\n\n\twith open(OUTPUT_FILE, 'w') as output_file:\n\t\toutput_file.write(str(len(slice_list)) + '\\n')\n\t\tfor slice in slice_list:\n\t\t\toutput_file.write(str(slice) + '\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fast version of `ntp_bottle_to_cast`.
def _ntp_bottle_to_cast(sB, tB, pB, S, T, P, k, K, tol_p, eos, ppc_fn): if K - k > 1: # Trim data to valid range and build interpolant P = P[k:K] Sppc = ppc_fn(P, S[k:K]) Tppc = ppc_fn(P, T[k:K]) args = (sB, tB, pB, Sppc, Tppc, P, eos) # Search for a sign-change, expanding outward from an initial guess lb, ub = guess_to_bounds(_func, pB, P[0], P[-1], args) if np.isfinite(lb): # A sign change was discovered, so a root exists in the interval. # Solve the nonlinear root-finding problem using Brent's method p = brent(_func, lb, ub, tol_p, args) # Interpolate S and T onto the updated surface s, t = ppval_1_nonan_two(p, P, Sppc, Tppc) else: s, t, p = np.nan, np.nan, np.nan else: # K - k <= 1, so at most one valid data site. Can't interpolate that. s, t, p = np.nan, np.nan, np.nan return s, t, p
[ "def nodeCast(disableScriptJobCallbacks=bool, force=bool, disconnectUnmatchedAttrs=bool, copyDynamicAttrs=bool, swapValues=bool, swapNames=bool, disableAPICallbacks=bool):\n pass", "def _type_cast(self, item: str): # noqa: ANN\n for cast in self._casts:\n item = cast.type_cast(item)\n return item", "def bprop_casttag(x, t, out, dout):\n return (unsafe_static_cast(tagged(dout, t), typeof(x)),\n zeros_like(t))", "def test_recast(self):\n self.assertEqual(type(recast(\"1234\")), int)\n self.assertEqual(type(recast(\"123.45\")), float)\n self.assertEqual(type(recast(\"123,45\")), float)\n self.assertEqual(type(recast(\"23.45%\")), float)\n self.assertEqual(type(recast(\"23,45%\")), float)\n self.assertEqual(type(recast(\"Mar 23 00:24:12\")), datetime.datetime)", "def set_cast_func(func):\n\tglobal _castfunc\n\t_castfunc = _autocast if func is None else func", "def ion_ts_2_ntp(ion_ts):\n\n # convert to seconds:\n sys_time = float(ion_ts) / 1000\n\n # convert to NTP\n ntp_time = ntplib.system_to_ntp_time(sys_time)\n\n return ntp_time", "def _typecasters():\n\n typecasters = dict(_PULLCAST.items())\n typecasters.update({\n field.name: field.pullcast\n for field in _extensions(None, None)\n if field.getter is None\n })\n return defaultdict(lambda: str, typecasters)", "def cast(self, *args) -> \"tid_t *\":\n return _ida_pro.tid_array_cast(self, *args)", "def nanotime2datetime(nt: nanotime) -> datetime:\n return datetime.utcfromtimestamp(nt.timestamp())", "async def infer_type_scalar_cast(track, x, t):\n await track.will_check(Number, x)\n await track.check(TypeType, t)\n new_t = await t['value']\n if new_t is ANYTHING:\n raise MyiaTypeError(f'Type to cast to must be known at compile time.')\n elif not ismyiatype(new_t, Number):\n raise MyiaTypeError(f'Cannot cast to {new_t}')\n return new_t", "def propagateTLE(tle, epoch):\n\treturn tle.radec(epoch)", "def _cast_type(self, value, obj=None):\n return value", "def cast(x, dtype):\n\treturn tf.cast(x, dtype)", "def bprop_scalar_cast(x, t, out, dout):\n return (scalar_cast(dout, typeof(x)), t)", "def try_cast(\n expression: _ColumnExpressionOrLiteralArgument[Any],\n type_: _TypeEngineArgument[_T],\n) -> TryCast[_T]:\n return TryCast(expression, type_)", "def _cast_object(x):\n x = _cast_none(x)\n\n if isinstance(x, six.string_types):\n try:\n return json_decode(x)\n except:\n return ast.literal_eval(x)\n else:\n return x", "def _cast_type(self, value):\n try:\n # Try to cast to integer, or JSON\n value = json.loads(value)\n return value\n except ValueError:\n return value", "def _convert_attention_mask(attn_mask, dtype):\n if attn_mask is not None and attn_mask.dtype != dtype:\n attn_mask_dtype = convert_dtype(attn_mask.dtype)\n if attn_mask_dtype == 'bool' or 'int' in attn_mask_dtype:\n attn_mask = (paddle.cast(attn_mask, dtype) - 1.0) * 1e9\n else:\n attn_mask = paddle.cast(attn_mask, dtype)\n return attn_mask", "def recast(val, datatype):\n ret_val = val\n if datatype == 'string':\n ret_val = str(val)\n elif datatype == 'boolean':\n # AWS returns 1s and 0s for boolean for most of the cases\n if val.isdigit():\n ret_val = bool(int(val))\n # AWS returns 'TRUE,FALSE' for Oracle engine\n elif val == 'TRUE':\n ret_val = True\n elif val == 'FALSE':\n ret_val = False\n elif datatype == 'integer':\n if val.isdigit():\n ret_val = int(val)\n elif datatype == 'float':\n ret_val = float(val) if val else 0.0\n\n return ret_val" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate a neutral trajectory through a sequence of casts. Given a sequence of casts with hydrographic properties `(S, T, P)`, calculate a neutral trajectory starting from the first cast at pressure `p0`, or starting from a bottle prior to the first cast with hydrographic properties `(s0, t0, p0)`.
def neutral_trajectory( S, T, P, p0, vert_dim=-1, tol_p=1e-4, interp="linear", eos="gsw", grav=None, rho_c=None, ): eos = make_eos(eos, grav, rho_c) ppc_fn = make_pp(interp, kind="1", out="coeffs", nans=False) S, T, P = _process_casts(S, T, P, vert_dim) nc, nk = S.shape # assert(all(size(T) == size(S)), 'T must be same size as S') # assert(all(size(P) == size(S)) || all(size(P) == [nk, 1]), 'P must be [nk,nc] or [nk,1]') s = np.full(nc, np.nan) t = np.full(nc, np.nan) p = np.full(nc, np.nan) # Loop over casts for c in range(0, nc): Sc = S[c, :] Tc = T[c, :] Pc = P[c, :] k, K = valid_range_1_two(Sc, Pc) if c == 0: # Evaluate S and T on first cast at p0 Sppc = ppc_fn(Pc[k:K], Sc[k:K]) Tppc = ppc_fn(Pc[k:K], Tc[k:K]) s[0], t[0] = ppval_1_nonan_two(p0, Pc[k:K], Sppc, Tppc) p[0] = p0 else: # Make a neutral connection from previous bottle to the cast (S[c,:], T[c,:], P[c,:]) s[c], t[c], p[c] = _ntp_bottle_to_cast( s[c - 1], t[c - 1], p[c - 1], Sc, Tc, Pc, k, K, tol_p, eos, ppc_fn, ) if np.isnan(p[c]): # The neutral trajectory incropped or outcropped break return s, t, p
[ "def _generate_sample_path_no_absorption(self, \n times):\n if self.mu<=-1:\n print(\"Attn: mu must be greater than -1. It is currently %f.\"%self.mu)\n return\n else:\n if not self.conditional:\n x=self.startPosition\n t=self.startTime\n path=[]\n for time in times:\n delta=float(time-t)\n try:\n y=self.Poi.rvs(0.5*x/delta)\n x=self.Gamma.rvs(y+self.mu+1)*2*delta\n except:\n pass\n path.append((time,x))\n t=time\n else:\n path = bridge_creation(self, times, 0)\n return path\n return [(p[0],self.rescalePath(p[1])) for p in path]", "def find_missing_taf_times(lines: [str]) -> [str]:\n last_fm_line = 0\n for i, line in enumerate(lines):\n if line['End-Time'] == '' and is_not_tempo_or_prob(line['Type']):\n last_fm_line = i\n if i < len(lines) - 1:\n for report in lines[i + 1:]:\n if is_not_tempo_or_prob(report['Type']):\n line['End-Time'] = report['Start-Time']\n break\n #Special case for final forcast\n if last_fm_line > 0:\n lines[last_fm_line]['End-Time'] = lines[0]['End-Time']\n return lines", "def no_medics_first_and_second_clauses(states_encoding, actions_encoding, police_teams, t):\n\n if t != 1 and t != 2:\n print(\"timestamp does not match clauses generation method\")\n return\n\n formula = []\n formula += unique_state_clauses(states_encoding, t)\n H = states_encoding[\"H\"]\n S = states_encoding[\"S\"]\n U = states_encoding[\"U\"]\n Q = states_encoding[\"Q\"]\n P = actions_encoding[\"P\"]\n formula += action_state_clauses(P, S, police_teams, t)\n\n X = H.shape[0]\n Y = H.shape[1]\n\n for x in range(X):\n for y in range(Y):\n # Q[x, y, t] ==> Q[x, y, t-1] | P[x, y, t-1]\n # Q[x, y, t-1] ==> Q[x, y, t]\n # P[x, y, t-1] ==> Q[x, y, t]\n formula.append([-Q[x, y, t], P[x, y, t-1], Q[x, y, t-1]])\n # check whether this helps/interrupts:\n formula.append([-P[x, y, t-1], Q[x, y, t]])\n formula.append([-Q[x, y, t-1], Q[x, y, t]])\n\n # U[x, y, t] ==> U[x, y, t-1]\n # U[x, y, t-1] ==> U[x, y, t]\n formula.append([-U[x, y, t], U[x, y, t-1]])\n formula.append([-U[x, y, t-1], U[x, y, t]])\n\n # if healthy[t] then was healthy[t-1]\n formula.append([-H[x, y, t], H[x, y, t-1]])\n\n adjacent_neighbors = neighbors((x, y), (X, Y))\n for neighbor_x, neighbor_y in adjacent_neighbors:\n # healthy[t] ==> ~sick_neighbor[t-1] | police_at_neighbor[t-1]\n formula.append([-H[x, y, t], -S[neighbor_x, neighbor_y, t-1], P[neighbor_x, neighbor_y, t-1]])\n\n # healthy[t-1] & sick_neighbor[t-1] & ~police_sick_neighbor[t-1] ==> sick[t]\n # formula.append([~H[x, y, t-1], ~S[neighbor_x, neighbor_y, t-1], P[neighbor_x, neighbor_y, t-1],\n # S[x, y, t]])\n\n # sick[t-1] & ~police[t-1] ==> sick[t]\n formula.append([-S[x, y, t-1], P[x, y, t-1], S[x, y, t]])\n\n # sick[t] ==> (sick[t-1] & ~police[t-1]) |\n # (H[t-1] & [(sick_neighbor1[t-1] & ~police_sick_neighbor[t-1]) |\n # (sick_neighbor2[t-1] & ~police_sick_neighbor[t-1]) | ... ])\n formula += [[-S[x, y, t], S[x, y, t - 1], H[x, y, t - 1]], [-S[x, y, t], -P[x, y, t - 1], H[x, y, t - 1]]]\n clause_prefix = [[-S[x, y, t], S[x, y, t - 1]], [-S[x, y, t], -P[x, y, t - 1]]]\n\n if len(adjacent_neighbors) == 2:\n neighbors_suffix = two_neighbor_suffix(adjacent_neighbors, t, S, P)\n elif len(adjacent_neighbors) == 3:\n neighbors_suffix = three_neighbor_suffix(adjacent_neighbors, t, S, P)\n else:\n neighbors_suffix = four_neighbor_suffix(adjacent_neighbors, t, S, P)\n formula += [base_cond + neighbor_cond for base_cond in clause_prefix\n for neighbor_cond in neighbors_suffix]\n\n return formula", "def _trajectory_centric_planning(self, trajectories):\n # Calculate non-parametric values over the trajectories.\n # Iterate backward through trajectories\n for t in range(len(trajectories) - 1, 0, -1):\n elem = trajectories[t][1]\n s_tp1 = tuple(elem.next_info_state)\n s_t = tuple(elem.info_state)\n a_t = elem.action\n r_t = elem.reward\n legal_actions = elem.legal_actions_mask\n if t < len(trajectories) - 1:\n for action in range(len(legal_actions)):\n if not legal_actions[action]:\n continue\n if action == elem.action:\n self._q_np[s_t][a_t] = (r_t + self._discount * self._v_np[s_tp1])\n else:\n self._agent.info_state = torch.Tensor(\n np.expand_dims(elem.info_state, axis=0))\n q_values_parametric = self._agent._q_network(\n self._agent.info_state).detach().numpy()\n self._q_np[s_t][a_t] = q_values_parametric[0][action]\n\n # Set V(s_t)\n if t == len(trajectories) - 1:\n # Sample from the parametric model.\n self._agent.info_state = torch.Tensor(\n np.expand_dims(elem.info_state, axis=0))\n q_values_parametric = self._agent._q_network(\n self._agent.info_state).detach().numpy()\n self._v_np[s_t] = np.max(q_values_parametric)\n else:\n self._v_np[s_t] = max(self._q_np[s_t])", "def pulse(S, tp=(0., 1.), dt=1., tfinal=10., nosum=False):\n tp = np.asarray(tp)\n if len(tp.shape) == 1:\n if not tp.shape[0] == 2:\n raise ValueError(\"tp is not (n, 2)-shaped\")\n tp = tp.reshape((1, tp.shape[0]))\n if len(tp.shape) == 2:\n if not tp.shape[1] == 2:\n raise ValueError(\"tp is not (n, 2)-shaped\")\n\n # Compute the time increment\n dd = 1\n for tpi in np.nditer(tp.T.copy(order='C')):\n _, di = rat(tpi, 1e-3)\n dd = lcm(di, dd)\n\n _, ddt = rat(dt, 1e-3)\n _, df = rat(tfinal, 1e-3)\n delta_t = 1./lcm(dd, lcm(ddt, df))\n delta_t = max(1e-3, delta_t) # Put a lower limit on delta_t\n if (isinstance(S, collections.Iterable) and len(S)) \\\n and (isinstance(S[0], collections.Iterable) and len(S[0])) \\\n and (isinstance(S[0][0], lti) or _is_zpk(S[0][0]) or _is_num_den(S[0][0]) \\\n or _is_A_B_C_D(S[0][0])):\n pass\n else:\n S = list(zip(S)) #S[input][output]\n y1 = None\n for Si in S:\n y2 = None\n for So in Si:\n _, y2i = step2(So, T=np.arange(0., tfinal + delta_t, delta_t))\n if y2 is None:\n y2 = y2i.reshape((y2i.shape[0], 1, 1))\n else:\n y2 = np.concatenate((y2,\n y2i.reshape((y2i.shape[0], 1, 1))),\n axis=1)\n if y1 is None:\n y1 = y2\n else:\n y1 = np.concatenate((y1, y2), axis=2)\n\n nd = int(np.round(dt/delta_t, 0))\n nf = int(np.round(tfinal/delta_t, 0))\n ndac = tp.shape[0]\n\n ni = len(S) # number of inputs\n\n if ni % ndac != 0:\n raise ValueError('The number of inputs must be divisible by the number of dac timings.')\n # Original comment from the MATLAB sources:\n # This requirement comes from the complex case, where the number of inputs\n # is 2 times the number of dac timings. I think this could be tidied up.\n\n # nis: Number of inputs grouped together with a common DAC timing\n # (2 for the complex case)\n nis = int(ni/ndac)\n\n # notice len(S[0]) is the number of outputs for us\n if not nosum: # Sum the responses due to each input set\n y = np.zeros((np.ceil(tfinal/float(dt)) + 1, len(S[0]), nis))\n else:\n y = np.zeros((np.ceil(tfinal/float(dt)) + 1, len(S[0]), ni))\n\n for i in range(ndac):\n n1 = int(np.round(tp[i, 0]/delta_t, 0))\n n2 = int(np.round(tp[i, 1]/delta_t, 0))\n z1 = (n1, y1.shape[1], nis)\n z2 = (n2, y1.shape[1], nis)\n yy = + np.concatenate((np.zeros(z1), y1[:nf-n1+1, :, i*nis:(i + 1)*nis]), axis=0) \\\n - np.concatenate((np.zeros(z2), y1[:nf-n2+1, :, i*nis:(i + 1)*nis]), axis=0)\n yy = yy[::nd, :, :]\n if not nosum: # Sum the responses due to each input set\n y = y + yy\n else:\n y[:, :, i] = yy.reshape(yy.shape[0:2])\n return y", "def turn_based_network(data, pIDs):\n data = data[data['pID'].isin(pIDs)]\n #data.sort_values(by = 'begin', inplace = True, ignore_index = True)\n \n interruptions = interruptive_simultaneous_speech(data, pIDs)\n interjections = non_interruptive_simultaneous_speech(data, pIDs)\n # Sauer & Kauffeld drop simultaneous speech from consideration\n # this is the old code:\n # data = data[~(data['begin'].isin(interruptions['begin'])) &\n # ~(data['begin'].isin(interjections['begin']))]\n # the below removes interjections, since they don't switch a turn, but keeps interruptions, since they do. How to do deal with ties? I think drop ties, or make the arrow go to the one who spoke longer. \n data = data.loc[~data[\"begin\"].isin(interjections[\"begin\"]), ]\n # remove speech events that have the same begin and end times but different speakers. This seems to remove the fewest speaking events while still ignoring Dabbs & Ruback's \"group turns\" as Sauer & Kauffeld seem to have done. By removing interjections and duplicates, data are increasing in both begin and end after the sort_values call, so arrows will always go to the person who finished speaking next.\n data = data.loc[~(data.duplicated(subset = [\"begin\", \"end\"], keep = False)), ]\n data.reset_index(inplace = True, drop = True)\n data.sort_values(by = 'begin', inplace = True, ignore_index = True)\n\n towhom = [np.nan] * len(data)\n for i in data.index[:-1]:\n towhom[i] = data.loc[i + 1, 'pID']\n data['towhom'] = towhom\n\n wtw = np.full(shape = (len(pIDs), len(pIDs)), fill_value = 0)\n wtw = pd.DataFrame(wtw, columns = pIDs, index = pIDs)\n\n for i in data.index[:-1]:\n who = data.loc[i, 'pID']\n whom = data.loc[i, 'towhom']\n wtw.loc[who, whom] += 1\n\n # Successive speaking events from the same speaker are considered part of the same turn\n for i, j in zip(wtw.index, list(wtw)):\n if i == j:\n wtw.loc[i, j] = 0\n\n g = nx.from_pandas_adjacency(wtw, create_using = nx.DiGraph)\n\n return g", "def transpose_to_zero(phrase, method=TRANSPOSE_BY_MEAN):\n if method is TRANSPOSE_BY_MEAN or method is True:\n pitches = [note[0] for note in phrase if note[0] != \"R\"]\n mean = np.mean(pitches)\n return [(note[0]-mean if note[0] != \"R\" else \"R\", note[1])\n for note in phrase]\n elif method is TRANSPOSE_BY_FIRST_PITCH:\n pitches = [note[0] for note in phrase if note[0] != \"R\"]\n fp = pitches[0]\n return [(note[0]-fp if note[0] != \"R\" else \"R\", note[1])\n for note in phrase]\n else:\n return phrase", "def photon_scat(L_init):\n\t# Check that user input for L_init is a negative number\n\tif L_init > 0:\n\t\tprint('You must input a negative number!')\n\t\tsys.exit()\n\n # Create array of 10,000 photons\n\tN = 10000\n\tphoton = np.ones(N)\n\n # Mean free path of low and high scattering cross-section modes respectively\n\tl_low = 1000\n\tl_high = 1\n\n\t# Probability that a photon in the low (high) mode will convert to the high (low) mode respectively\n\tP12 = 0.25\n\tP21 = 0.001\n\n # Arrays to store angles of low mode photons, high mode photons and the polarization of escaped photons and initialize the values of distance traveled between scattering events\n\tangles_low = []\n\tangles_high = []\n\tmode = []\n\td_low = 0\t\t\t\t\n\td_high = 0\n\n # Initialize the count of photons that leave the surface of the material, the number of photons exiting that are in the high mode, and the angle (direction) of the photon travel after a scattering event\n\tleave = N\n\tnum_high = 0\n\tangle = 0\n\ti = 0\n # Track each of the N photons as they travel through the material until it leaves the surface or until 20,000 scattering events take place; assume photon will stay in material and scatter indefinitely after 20,000 events\n\twhile (i < N):\n\t\tscat = 1\n\t\tL = L_init\n # Generate three random numbers: 1. for calculating the distance traveled by the photon before it scatter, 2. for determining if the photon switches polarization modes, 3. for determining the direction of travel of the photon after scattering\n\t\twhile (L < 0 and scat < 20000):\n\t\t\tx = random.random()\n\t\t\ty = random.random()\n\t\t\td_low = -l_low*np.log(y)\n\t\t\td_high = -l_high*np.log(y)\t\t\t\t\n\n\t\t\tangle = np.random.uniform(0,2*np.pi)\t\n\t\t\tif (photon[i] == 1 and x <= P21):\n\t\t\t\tphoton[i] = 0\n\t\t\telif (photon[i] == 0 and x <= P12):\n\t\t\t\tphoton[i] = 1\n\t\t\tscat = scat + 1\n\n\t\t\tif (photon[i] == 1):\n\t\t\t\tL = L + d_high*np.cos(angle)\n\t\t\tif (photon[i] == 0):\n\t\t\t\tL = L + d_low*np.cos(angle)\n\n\t\t\tif (scat == 20000):\n\t\t\t\tleave = leave - 1\n \n # Store the angle with which each photon exits the surface of the material and count the number of photons in the high mode\n\t\tif (scat < 20000):\n\t\t\tif (angle > np.pi):\n\t\t\t\tangle = 2*np.pi-angle\n\t\t\telse: \n\t\t\t\tangle = angle\n\t\t\tangles_high.append(angle) if photon[i] == 1 else angles_low.append(angle)\n\t\t\tmode.append(photon[i])\n\t\tif (photon[i] == 1 and scat < 20000):\n\t\t\tnum_high = num_high + 1\n\n\t\ti = i + 1\n\n\t# Print results to terminal and plot the angular distribution of photons exiting the surface for both polarization modes\n\tprint('Number of escaped photons that are in the high mode: '+str(num_high))\n\tprint('Total number of photons that escape: '+str(leave))\n\tprint('Fraction of escaped high-mode photons: '+str(num_high/float(leave)))\n\tplt.hist(angles_high,50, alpha=0.5, color='#88CCEE', label='High-Mode Photons')\n\tplt.hist(angles_low, 50, alpha=0.5, color='#DDCC77', label='Low-Mode Photons')\n\tplt.legend(loc='upper right')\n\tplt.xlabel('Degrees [radians]')\n\tplt.ylabel('Number')\n\tplt.grid(which='major',axis='both',color='grey',alpha=0.2,linewidth=1, linestyle='--')\n\tplt.minorticks_on()\n\tplt.savefig('./'+str(abs(L_init))+'_Below.png', dpi=300, bbox_inches='tight', pad_inches=0.1)\n\tplt.clf()", "def propagate_astrometry_and_covariance_matrix(self, a0, c0, t0, t1):\n\n zero, one, two, three = 0, 1, 2, 3\n tau = t1 - t0\n\n # Calculate the normal triad [p0 q0 r0] at t0\n p0, q0, r0 = normal_triad(a0[0], a0[1])\n\n # Convert to internal units (radians, Julian year)\n par0 = a0[2] * self.mastorad\n pma0 = a0[3] * self.mastorad\n pmd0 = a0[4] * self.mastorad\n pmr0 = a0[5] * a0[2] / au_km_year_per_sec * self.mastorad\n\n # Proper motion vector\n pmvec0 = pma0 * p0 + pmd0 * q0\n\n # Auxiliary quantities\n tau2 = tau * tau\n pm02 = pma0**2 + pmd0**2\n w = one + pmr0 * tau\n f2 = one / (one + two * pmr0 * tau + (pm02 + pmr0**2) * tau2)\n f = np.sqrt(f2)\n f3 = f2 * f\n f4 = f2 * f2\n\n # Position vector and parallax at t1\n u = (r0 * w + pmvec0 * tau) * f\n _, ra, dec = cartesian_to_spherical(u[0], u[1], u[2])\n par = par0 * f\n\n # Proper motion vector and radial proper motion at t1\n pmvec = (pmvec0 * (one + pmr0 * tau) - r0 * pmr0**2 * tau) * f3\n pmr = (pmr0 + (pm02 + pmr0**2) * tau) * f2\n\n # Normal triad at t1\n p, q, r = normal_triad(ra, dec)\n\n # Convert parameters at t1 to external units (mas, Julian year)\n pma = np.sum(p * pmvec, axis=0)\n pmd = np.sum(q * pmvec, axis=0)\n\n a = np.zeros_like(a0)\n a[0] = ra\n a[1] = dec\n a[2] = par / self.mastorad\n a[3] = pma / self.mastorad\n a[4] = pmd / self.mastorad\n a[5] = pmr / self.mastorad\n\n # Auxiliary quantities for the partial derivatives\n\n pmz = pmvec0 * f - three * pmvec * w\n pp0 = np.sum(p * p0, axis=0)\n pq0 = np.sum(p * q0, axis=0)\n pr0 = np.sum(p * r0, axis=0)\n qp0 = np.sum(q * p0, axis=0)\n qq0 = np.sum(q * q0, axis=0)\n qr0 = np.sum(q * r0, axis=0)\n ppmz = np.sum(p * pmz, axis=0)\n qpmz = np.sum(q * pmz, axis=0)\n\n jacobian = np.zeros_like(c0)\n if c0.ndim == 2:\n jacobian = jacobian[np.newaxis, :, :]\n\n # Partial derivatives\n jacobian[:, 0, 0] = pp0 * w * f - pr0 * pma0 * tau * f\n jacobian[:, 0, 1] = pq0 * w * f - pr0 * pmd0 * tau * f\n jacobian[:, 0, 2] = zero\n jacobian[:, 0, 3] = pp0 * tau * f\n jacobian[:, 0, 4] = pq0 * tau * f\n jacobian[:, 0, 5] = -pma * tau2\n\n jacobian[:, 1, 0] = qp0 * w * f - qr0 * pma0 * tau * f\n jacobian[:, 1, 1] = qq0 * w * f - qr0 * pmd0 * tau * f\n jacobian[:, 1, 2] = zero\n jacobian[:, 1, 3] = qp0 * tau * f\n jacobian[:, 1, 4] = qq0 * tau * f\n jacobian[:, 1, 5] = -pmd * tau2\n\n jacobian[:, 2, 0] = zero\n jacobian[:, 2, 1] = zero\n jacobian[:, 2, 2] = f\n jacobian[:, 2, 3] = -par * pma0 * tau2 * f2\n jacobian[:, 2, 4] = -par * pmd0 * tau2 * f2\n jacobian[:, 2, 5] = -par * w * tau * f2\n\n jacobian[:, 3, 0] = -pp0 * pm02 * tau * f3 - pr0 * pma0 * w * f3\n jacobian[:, 3, 1] = -pq0 * pm02 * tau * f3 - pr0 * pmd0 * w * f3\n jacobian[:, 3, 2] = zero\n jacobian[:, 3, 3] = (\n pp0 * w * f3 - two * pr0 * pma0 * tau * f3 - three * pma * pma0 * tau2 * f2\n )\n jacobian[:, 3, 4] = (\n pq0 * w * f3 - two * pr0 * pmd0 * tau * f3 - three * pma * pmd0 * tau2 * f2\n )\n jacobian[:, 3, 5] = ppmz * tau * f2\n\n jacobian[:, 4, 0] = -qp0 * pm02 * tau * f3 - qr0 * pma0 * w * f3\n jacobian[:, 4, 1] = -qq0 * pm02 * tau * f3 - qr0 * pmd0 * w * f3\n jacobian[:, 4, 2] = zero\n jacobian[:, 4, 3] = (\n qp0 * w * f3 - two * qr0 * pma0 * tau * f3 - three * pmd * pma0 * tau2 * f2\n )\n jacobian[:, 4, 4] = (\n qq0 * w * f3 - two * qr0 * pmd0 * tau * f3 - three * pmd * pmd0 * tau2 * f2\n )\n jacobian[:, 4, 5] = qpmz * tau * f2\n\n jacobian[:, 5, 0] = zero\n jacobian[:, 5, 1] = zero\n jacobian[:, 5, 2] = zero\n jacobian[:, 5, 3] = two * pma0 * w * tau * f4\n jacobian[:, 5, 4] = two * pmd0 * w * tau * f4\n jacobian[:, 5, 5] = (w**2 - pm02 * tau2) * f4\n\n jacobian_transposed = np.zeros_like(jacobian)\n for i in range(jacobian.shape[0]):\n jacobian_transposed[i] = jacobian[i].T\n\n if c0.ndim == 2:\n c = np.matmul(\n jacobian, np.matmul(c0[np.newaxis, :, :], jacobian_transposed)\n )\n else:\n c = np.matmul(jacobian, np.matmul(c0, jacobian_transposed))\n\n return a, np.squeeze(c)", "def make_lead(p):\n\n\tsys_ = kwant.Builder(kwant.TranslationalSymmetry([0,1]),conservation_law=tinyarray.array(np.kron(s_z,I_x)),particle_hole=sigma_ytau_y)\t## ???: symmetries - implementing complex conjugation?\n\tsys_[(lat(x,0) for x in par.middle)] = (2*(p.tx+p.ty) - p.mu)*tau_z\n\tsys_[kwant.builder.HoppingKind((1,0),lat)] = -p.tx*tau_z\n\tsys_[kwant.builder.HoppingKind((0,1),lat)] = -p.ty*tau_z\n\n\treturn sys_", "def calculate_throwoff(shooter, gun, recoil_full, recoil_empty):\n # Ek = 0.5 * m * v^2 = recoil energy\n # Ek = (p^2)/2m\n # Ek*2m = p^2\n # p = math.sqrt(Ek*2m)\n shooterdata = SHOOTER[shooter]\n h_shooter = shooterdata.get(\"height\") / 1000 # m\n m_shooter = shooterdata.get(\"weight\") / 1000 # kg\n s_shooter = shooterdata.get(\"strength\")\n h_shoulder = h_shooter * 4 / 5 # shoulder height\n print(f\"\\n#### {shooter} ({h_shooter:.2f} m, {m_shooter:.2f} kg, \"\n f\"STR: {s_shooter})\")\n # angular momentum is proportional to the moment of inertia and angular\n # spead in radians/s\n # L = Iω\n # L = rmv\n # the shooter is given a backwards rotation around the center of mass,\n # located at shooter's height / 2\n # where r = shooter's height / (4/5) (4 5ths being around shoulder height)\n print(f\"\\n* Shoulder height: {h_shoulder:.2f} m\")\n # rifles have three point anchoring (two hands + shoulder)\n if (gun.get(\"type\") == \"rifle\" and not shooterdata.get(\"injured_hand\")):\n print(\"* Using rifle shooting stance\")\n stance_factor = 3\n else:\n print(\"* Using one-handed pistol shooting stance\")\n stance_factor = 1\n\n for recoil in [recoil_full, recoil_empty]:\n if recoil == recoil_full:\n print(\"\\n##### Full Magazine\")\n else:\n print(\"\\n##### Empty Magazine\")\n # (backwards) velocity given to the shooter:\n v_shooter = recoil / m_shooter\n # theoretically, it would be possible to place the pivot point\n # at the centre of mass in the shooter, i.e\n # shoulder height - shooter_height / 2, which would lessen the\n # effect of angular momentum\n orbital_angular_momentum = h_shoulder * m_shooter * v_shooter\n print(f\"\\n* Shooter backwards velocity: {v_shooter:.2f} m/s\")\n print(f\"* Shooter raw orbital angular momentum: \"\n f\"{orbital_angular_momentum:.2f} rad/s\")\n\n # print(\"\\nSkill vs. Throw-off (radians | quarter degrees):\\n\")\n print(\"\\n##### Throw-off, radians (quarter degrees)\\n\")\n print(\"S = standing, C = crouching, P = prone, r = radians, ¼d = quarter degrees\\n\")\n print(\"| Skill | S (r) | S (¼d) | C (r) | C (¼d) | P (r) | P (¼d) |\")\n print(\"|------:|------:|-------:|------:|-------:|------:|-------:|\")\n\n for skill in range(11):\n skill_factor = skill * 0.1 # 10 represents full handling ability\n handling_factor = 1 + (s_shooter * skill_factor * stance_factor)\n # reaction time = 0.15 s for touch, we use this as a base\n # scaling factor to compensate for recoil management\n # this could theoretically be made worse being\n # dazed/injured/drugged/confused/less intelligent/dexterious\n throwoff = (orbital_angular_momentum / handling_factor) * 0.15\n throwoff_qd = ((throwoff*180)/math.pi)/4\n throwoff_c = throwoff / 2\n throwoff_c_qd = ((throwoff_c*180)/math.pi)/4\n throwoff_p = throwoff / 5\n throwoff_p_qd = ((throwoff_p*180)/math.pi)/4\n print(f\"| {skill} | {throwoff:.4f} | {throwoff_qd:.0f} \"\n f\"| {throwoff_c:.4f} | {throwoff_c_qd:.0f} \"\n f\"| {throwoff_p:.4f} | {throwoff_p_qd:.0f} |\")", "def tt(sp): #sop_tt\n t_t = m.const(0)\n for j in range(len(sp)):\n t_t = t_t | ttcube(sp[j])\n return t_t", "def send_trajectory_one_by_one(self,time_duration):\n\n rospy.loginfo(\"Start going to other trajectory points one by one\")\n\n time_last = rospy.Time.now()\n\n # since the first pt has been realized, iteration will start from the second pt \n for traj_idx in range(len(self._time_list)-1):\n # a goal to be sent to action server\n # this goal will contain only one trajectory point\n goal = FollowJointTrajectoryGoal()\n\n # add joint name\n goal.trajectory.joint_names.append('base_x')\n goal.trajectory.joint_names.append('x_y')\n goal.trajectory.joint_names.append('y_car')\n for idx in range(self._joint_num): \n goal.trajectory.joint_names.append(\"joint_a\"+str(idx+1))\n\n # a joint point in the trajectory\n trajPt = JointTrajectoryPoint()\n trajPt.positions.append(self._joints_pos_dict['base_x'][idx+1])\n trajPt.positions.append(self._joints_pos_dict['x_y'][idx+1])\n trajPt.positions.append(self._joints_pos_dict['y_car'][idx+1])\n for idx in range(self._joint_num):\n joint_name = \"joint_a\"+str(idx+1)\n trajPt.positions.append(self._joints_pos_dict[joint_name][traj_idx+1])\n #trajPt.velocities.append(0.0)\n # time to reach the joint trajectory point specified to 1.0 since this will be controlled by my enter\n trajPt.time_from_start = rospy.Duration(secs=time_duration)\n # add the joint trajectory point to the goal\n goal.trajectory.points.append(trajPt)\n\n # rospy.loginfo(\"At iteration {} goal has {} points to reach\".format(traj_idx ,len(goal.trajectory.points)))\n\n # rospy.loginfo(\"each of them is \")\n\n for idx in range(len(goal.trajectory.points)):\n print goal.trajectory.points[idx]\n\n goal.trajectory.header.stamp = rospy.Time.now()\n\n # send the goal to the action server\n self._action_client.send_goal(goal)\n\n # wait for the result\n rospy.loginfo(\"Start waiting for go to the next pose\")\n self._action_client.wait_for_result()\n rospy.loginfo(\"Waiting ends\")\n\n # show the error code\n rospy.loginfo(self._action_client.get_result())\n\n # show the time used to move to current position\n time_next = rospy.Time.now()\n rospy.loginfo(\"At iteration {}, time Duration is {}\".format(traj_idx ,(time_next-time_last).to_sec()))\n time_last = time_next\n\n\n # uncomment raw_input() if you want to control the pace of sending goals to the action server\n raw_input()", "def __call__(self):\n if ('_photon_timings' not in self.__dict__) or \\\n ('_photon_channels' not in self.__dict__):\n raise NotImplementedError\n \n # The pulse cache should be immediately transfered after call this function\n self.clear_pulse_cache()\n\n # Correct for PMT Transition Time Spread (skip for pmt afterpulses)\n if '_photon_gains' not in self.__dict__:\n self._photon_timings += np.random.normal(self.config['pmt_transit_time_mean'],\n self.config['pmt_transit_time_spread'],\n len(self._photon_timings))\n\n dt = self.config.get('sample_duration', 10) # Getting dt from the lib just once\n self._n_double_pe = self._n_double_pe_bot = 0 # For truth aft output\n\n counts_start = 0 # Secondary loop index for assigning channel\n for channel, counts in zip(*np.unique(self._photon_channels, return_counts=True)):\n\n #TODO: This is temporary continue to avoid out-of-range error.\n # It should be added a proper method for nVeto PMTs also.\n if channel >= 2000:\n continue\n # Use 'counts' amount of photon for this channel \n _channel_photon_timings = self._photon_timings[counts_start:counts_start+counts]\n counts_start += counts\n if channel in self.config['turned_off_pmts']: continue\n\n # If gain of each photon is not specifically assigned\n # Sample from spe scaling factor distribution and to individual gain\n # In contrast to pmt afterpulse that should have gain determined before this step\n if '_photon_gains' not in self.__dict__:\n if self.config['detector'] == 'XENON1T':\n _channel_photon_gains = self.config['gains'][channel] \\\n * self.uniform_to_pe_arr(np.random.random(len(_channel_photon_timings)), channel)\n\n else:\n _channel_photon_gains = self.config['gains'][channel] \\\n * self.uniform_to_pe_arr(np.random.random(len(_channel_photon_timings)))\n\n # Add some double photoelectron emission by adding another sampled gain\n n_double_pe = np.random.binomial(len(_channel_photon_timings),\n p=self.config['p_double_pe_emision'])\n self._n_double_pe += n_double_pe\n if channel in self.config['channels_bottom']:\n self._n_double_pe_bot += n_double_pe\n\n #_dpe_index = np.random.randint(len(_channel_photon_timings),\n # size=n_double_pe)\n if self.config['detector'] == 'XENON1T':\n _channel_photon_gains[:n_double_pe] += self.config['gains'][channel] \\\n * self.uniform_to_pe_arr(np.random.random(n_double_pe), channel)\n else:\n _channel_photon_gains[:n_double_pe] += self.config['gains'][channel] \\\n * self.uniform_to_pe_arr(np.random.random(n_double_pe))\n else:\n _channel_photon_gains = np.array(self._photon_gains[self._photon_channels == channel])\n\n # Build a simulated waveform, length depends on min and max of photon timings\n min_timing, max_timing = np.min(\n _channel_photon_timings), np.max(_channel_photon_timings)\n pulse_left = int(min_timing // dt) - int(self.config['samples_to_store_before'])\n pulse_right = int(max_timing // dt) + int(self.config['samples_to_store_after'])\n pulse_current = np.zeros(pulse_right - pulse_left + 1)\n\n Pulse.add_current(_channel_photon_timings.astype(int),\n _channel_photon_gains,\n pulse_left,\n dt,\n self._pmt_current_templates,\n pulse_current)\n\n # For single event, data of pulse level is small enough to store in dataframe\n self._pulses.append(dict(\n photons = len(_channel_photon_timings),\n channel = channel,\n left = pulse_left,\n right = pulse_right,\n duration = pulse_right - pulse_left + 1,\n current = pulse_current,))", "def move_to_neutral(self,sp):\n self._limb.move_to_neutral(speed=sp)", "def trio_phase(self, parents=False, callset=None):\n\n for c in self.io[0].snp_chromosomes():\n if c in self.io[1].snp_chromosomes() and c in self.io[2].snp_chromosomes():\n _logger.info(\"Phasing SNP data for chromosome '%s'.\" % c)\n ch, fa, mo = {}, {}, {}\n pos, ref, alt, nref, nalt, gt, flag, qual = self.io[0].read_snp(c, callset=callset)\n for i in range(len(pos)):\n k = str(pos[i])+\":\"+ref[i]+\">\"+alt[i]\n ch[k] = (gt[i] % 4,i)\n pos1, ref1, alt1, nref1, nalt1, gt1, flag1, qual1 = self.io[1].read_snp(c, callset=callset)\n for i in range(len(pos1)):\n k = str(pos1[i])+\":\"+ref1[i]+\">\"+alt1[i]\n fa[k] = (gt1[i] % 4,i)\n pos2, ref2, alt2, nref2, nalt2, gt2, flag2, qual2 = self.io[2].read_snp(c, callset=callset)\n for i in range(len(pos2)):\n k = str(pos2[i])+\":\"+ref2[i]+\">\"+alt2[i]\n mo[k] = (gt2[i] % 4,i)\n\n _logger.info(\"Phasing SNP data for child '%s'.\" % c)\n rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual = [], [], [], [], [], [], [], []\n for i in range(len(pos)):\n k = str(pos[i])+\":\"+ref[i]+\">\"+alt[i]\n resgt = ch[k][0]\n if ch[k][0]==0 or ch[k][0]==3:\n resgt = ch[k][0] + 4\n else:\n if k in fa and k in mo and fa[k][0]!=0 and mo[k][0]!=0:\n if fa[k][0]==3 and mo[k][0] in [1,2]:\n resgt = 5\n #_logger.debug(\"Phasing 5 (1/1 0/1). SNP: %s:%s\" % (c, k))\n elif mo[k][0]==3 and fa[k][0] in [1,2]:\n resgt = 6\n #_logger.debug(\"Phasing 6 (0/1 1/1). SNP: %s:%s\" % (c, k))\n else:\n _logger.debug(\"Unable to phase. Not unique - skipping. SNP: %s:%s\" % (c,k))\n elif k in fa and fa[k][0]!=0:\n resgt = 5\n #_logger.debug(\"Phasing 5 (?/1 0/0). SNP: %s:%s\" % (c, k))\n elif k in mo and mo[k][0]!=0:\n resgt = 6\n #_logger.debug(\"Phasing 6 (0/0 ?/1). SNP: %s:%s\" % (c, k))\n else:\n _logger.debug(\"Unable to phase. Not present in parents - skipping. SNP: %s:%s\" % (c,k))\n if resgt != -1:\n rpos.append(pos[i])\n rref.append(ref[i])\n ralt.append(alt[i])\n rnref.append(nref[i])\n rnalt.append(nalt[i])\n rgt.append(gt[i])\n rflag.append(flag[i])\n rqual.append(qual[i])\n rgt[-1] = resgt\n _logger.info(\"Writing phased SNP data for child '%s'.\" % c)\n self.io[0].save_snp(c, rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual, update=True, callset=callset)\n if parents:\n _logger.info(\"Phasing SNP data for parent 1 '%s'.\" % c)\n rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual = [], [], [], [], [], [], [], []\n for i in range(len(pos1)):\n k = str(pos1[i])+\":\"+ref1[i]+\">\"+alt1[i]\n resgt = fa[k][0]\n if fa[k][0]==0 or fa[k][0]==3:\n resgt = fa[k][0] + 4\n else:\n if k in ch and ch[k][0]!=0 and (k not in mo or mo[k][0]==0):\n resgt = 5\n elif k in ch and ch[k][0]==3:\n resgt = 5\n elif (k not in ch or ch[k][0]==0):\n resgt = 6\n else:\n _logger.debug(\"Unable to phase. Not unique. SNP: %s:%s\" % (c,k))\n if resgt != -1:\n rpos.append(pos1[i])\n rref.append(ref1[i])\n ralt.append(alt1[i])\n rnref.append(nref1[i])\n rnalt.append(nalt1[i])\n rgt.append(gt1[i])\n rflag.append(flag1[i])\n rqual.append(qual1[i])\n rgt[-1] = resgt\n _logger.info(\"Writing phased SNP data for parent 1 '%s'.\" % c)\n self.io[1].save_snp(c, rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual, update=True,\n callset=callset)\n\n _logger.info(\"Phasing SNP data for parent 2 '%s'.\" % c)\n rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual = [], [], [], [], [], [], [], []\n for i in range(len(pos2)):\n k = str(pos2[i])+\":\"+ref2[i]+\">\"+alt2[i]\n resgt = mo[k][0]\n if mo[k][0]==0 or mo[k][0]==3:\n resgt = mo[k][0] + 4\n else:\n if k in ch and ch[k][0]!=0 and (k not in fa or fa[k][0]==0):\n resgt = 6\n elif k in ch and ch[k][0]==3:\n resgt = 6\n elif (k not in ch or ch[k][0]==0):\n resgt = 5\n else:\n _logger.debug(\"Unable to phase. Not unique. SNP: %s:%s\" % (c,k))\n if resgt != -1:\n rpos.append(pos2[i])\n rref.append(ref2[i])\n ralt.append(alt2[i])\n rnref.append(nref2[i])\n rnalt.append(nalt2[i])\n rgt.append(gt2[i])\n rflag.append(flag2[i])\n rqual.append(qual2[i])\n rgt[-1] = resgt\n\n _logger.info(\"Writing phased SNP data for parent 2 '%s'.\" % c)\n self.io[2].save_snp(c, rpos, rref, ralt, rnref, rnalt, rgt, rflag, rqual, update=True,\n callset=callset)\n else:\n _logger.info(\"Chromosome '%s' not present in parents data.\" % c)", "def hi1a_thomson_sphere_and_parker_spiral():\n craft_cols = swp.get_craft_colors()\n\n rs = solarconst.radius.to('km').value\n\n # Plot each event individually\n time = [\"{}-06-15\".format(i) for i in range(2008,2013)]\n time = Time(time)\n\n fig, ax = plt.subplots(2, len(time), figsize=(15, 6))\n ax_t = ax[0, :]\n ax_b = ax[1, :]\n\n for t, at, ab in zip(time, ax_t, ax_b):\n\n system = 'HEEQ'\n wnd = swp.get_wind_coords(t, system)\n sta = spice.get_coord(t, 'sta', system, no_velocity=True)\n stb = spice.get_coord(t, 'stb', system, no_velocity=True)\n\n wnd = wnd / rs\n sta = sta / rs\n stb = stb / rs\n\n sta_r = np.sqrt(np.sum(sta**2))\n wnd_r = np.sqrt(np.sum(wnd**2))\n stb_r = np.sqrt(np.sum(stb**2))\n\n for a in [at, ab]:\n # Plot the sun, and source elongations considered\n rad = rs / rs\n xcenter, ycenter = 0, 0\n x, y = get_circ_coords(xcenter, ycenter, rad)\n a.fill(x, y, facecolor='orange', edgecolor='orange', linewidth=2, zorder=1)\n\n rad = (20.0 * rs / rs)\n x, y = get_circ_coords(xcenter, ycenter, rad)\n a.fill(x, y, facecolor='None', edgecolor='k', linewidth=2, zorder=1)\n\n rad = (22.5 * rs / rs)\n x, y = get_circ_coords(xcenter, ycenter, rad)\n\n a.fill(x, y, facecolor='None', edgecolor='k', linewidth=2, zorder=1)\n\n # Plot the TS for craft at x=au, y=0\n rad = sta_r / 2.0\n xcenter, ycenter = sta[1] / 2.0, sta[0] / 2.0\n x, y = get_circ_coords(xcenter, ycenter, rad)\n a.fill(x, y, facecolor='None', edgecolor=craft_cols[1], linestyle='--', linewidth=2, zorder=1)\n\n # Add on the craft\n a.plot(sta[1], sta[0], 's', color=craft_cols[1], label='STA')\n\n a.plot(wnd[1], wnd[0], 'o', color=craft_cols[0], label='WIND')\n\n a.plot(stb[1], stb[0], '^', color=craft_cols[2], label='STB')\n\n # Add on idealised Parker Spiral for STEREO-A, STEREO-B and WIND\n ro = 22.5\n V = 400.0\n phi_fin = np.arctan2(wnd[0], wnd[1])\n x, y = get_parker_spiral(ro, wnd_r, phi_fin, V)\n a.plot(y, x, '-', color=craft_cols[0])\n\n phi_fin = np.arctan2(sta[0], sta[1])\n x, y = get_parker_spiral(ro, sta_r, phi_fin, V)\n a.plot(y, x, '-', color=craft_cols[1])\n\n phi_fin = np.arctan2(stb[0], stb[1])\n x, y = get_parker_spiral(ro, stb_r, phi_fin, V)\n a.plot(y, x, '-', color=craft_cols[2])\n\n label = t.datetime.strftime(\"%Y-%m-%d\")\n at.text(0.6, 0.925, label, color='k', transform=at.transAxes, fontsize=14)\n at.legend(loc=2)\n\n for a in ax_t:\n a.set_xlim(-225, 225)\n a.set_ylim(-225, 225)\n\n for a in ax_b:\n a.set_xlim(-50, 50)\n a.set_ylim(-50, 50)\n\n for a in ax.ravel():\n a.set_xticklabels([])\n a.set_yticklabels([])\n a.set_xticks([])\n a.set_yticks([])\n a.set_aspect('equal')\n a.invert_yaxis()\n\n fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.99, wspace=0, hspace=0)\n proj_dirs = swp.project_info()\n out_name = \"hi1a_ts_relative_locations.png\"\n out_path = os.path.join(proj_dirs['figs'], out_name)\n fig.savefig(out_path)\n return", "def process_walk_to_transit_skim():\n # Create OD table of TAZ centroids to TAP nodes\n serpm_raw = make_path(RAW, \"SERPM\")\n serpm_clean = make_path(CLEANED, \"SERPM\")\n taz_centroids = make_path(serpm_raw, \"SERPM_TAZ_Centroids.shp\")\n tap_nodes = make_path(serpm_raw, \"SERPM_TAP_Nodes.shp\")\n tap_id = \"TAP\"\n tap_cutoff = \"15\" # minutes\n solved = []\n for year in YEARS:\n net_suffix, model_year = prep_conf.NET_BY_YEAR[year]\n if model_year not in solved:\n print(f\"Preparing TAP to TAZ skims for model year {model_year}\")\n # Get TAZ to TAP OD table\n # - Skim input\n nd = make_path(NETS_DIR, f\"Walk{net_suffix}.gdb\", \"osm\", \"osm_ND\")\n skim = make_path(serpm_clean, f\"TAZ_to_TAP{net_suffix}.csv\")\n restrictions = None\n # - Create and load problem\n print(\" - Network-based\")\n p_help.generate_od_table(\n origin_pts=taz_centroids,\n origin_name_field=prep_conf.TAZ_COMMON_KEY,\n dest_pts=tap_nodes,\n dest_name_field=tap_id,\n in_nd=nd,\n imped_attr=prep_conf.OSM_IMPED,\n cutoff=tap_cutoff,\n net_loader=prep_conf.NET_LOADER,\n out_table=skim,\n restrictions=restrictions,\n use_hierarchy=False,\n uturns=\"ALLOW_UTURNS\",\n o_location_fields=None,\n d_location_fields=None,\n o_chunk_size=None,\n )\n\n # Estimate simple spatial distance TAZ to TAP for TAZs outside extents of osm network\n print(\" - Spatial-based\")\n taz_layer = arcpy.MakeFeatureLayer_management(taz_centroids, \"TAZ\")\n tap_layer = arcpy.MakeFeatureLayer_management(tap_nodes, \"TAP\")\n edges = make_path(NETS_DIR, f\"Walk{net_suffix}.gdb\", \"osm\", \"edges\")\n net_layer = arcpy.MakeFeatureLayer_management(edges, \"edges\")\n # Set spatial reference\n sr = arcpy.Describe(edges).spatialReference\n mpu = float(sr.metersPerUnit)\n # Get distances and estimate times\n out_rows = []\n try:\n # Select TAZ's that wouldn't load on network\n arcpy.SelectLayerByLocation_management(\n in_layer=taz_layer,\n overlap_type=\"INTERSECT\",\n select_features=edges,\n search_distance=prep_conf.NET_LOADER.search_tolerance,\n selection_type=\"NEW_SELECTION\",\n invert_spatial_relationship=True,\n )\n # Iterate over TAZs\n with arcpy.da.SearchCursor(\n taz_layer,\n [\"SHAPE@\", prep_conf.TAZ_COMMON_KEY],\n spatial_reference=sr,\n ) as taz_c:\n for taz_r in taz_c:\n taz_point, taz_id = taz_r\n # Select TAP's that are within potential walking distance of selected TAZ's\n arcpy.SelectLayerByLocation_management(\n in_layer=tap_layer,\n overlap_type=\"INTERSECT\",\n select_features=taz_point,\n search_distance=prep_conf.IDEAL_WALK_RADIUS,\n selection_type=\"NEW_SELECTION\",\n invert_spatial_relationship=False,\n )\n # Iterate over taps and estimate walk time\n with arcpy.da.SearchCursor(\n tap_layer, [\"SHAPE@\", tap_id], spatial_reference=sr\n ) as tap_c:\n for tap_r in tap_c:\n tap_point, tap_n = tap_r\n grid_dist = abs(\n tap_point.centroid.X - taz_point.centroid.X\n )\n grid_dist += abs(\n tap_point.centroid.Y - taz_point.centroid.Y\n )\n grid_meters = grid_dist * mpu\n grid_minutes = (grid_meters * 60) / (\n prep_conf.IDEAL_WALK_MPH * 1609.344\n )\n if grid_minutes <= float(tap_cutoff):\n out_rows.append(\n [\n f\"{taz_id} - {tap_n}\",\n grid_minutes,\n taz_id,\n tap_n,\n ]\n )\n # Update output csv\n out_df = pd.DataFrame(\n out_rows, columns=[\"Name\", prep_conf.OSM_IMPED, \"OName\", \"DName\"]\n )\n out_df.to_csv(skim, mode=\"a\", header=False, index=False)\n except:\n raise\n finally:\n arcpy.Delete_management(taz_layer)\n arcpy.Delete_management(tap_layer)\n arcpy.Delete_management(net_layer)\n\n # Mark as solved\n solved.append(model_year)", "def tilt_pan_head(self, pan = 0, tilt = 0, duration = 2): #input is in degrees\n pan = pan * np.pi / 180\n tilt = tilt * np.pi / 180\n \n point = JointTrajectoryPoint()\n point.positions = [pan, tilt]\n point.time_from_start = rospy.Duration(duration)\n goal = FollowJointTrajectoryGoal()\n\n goal.trajectory.joint_names = [self.PAN_JOINT, self.TILT_JOINT]\n goal.trajectory.points.append(point)\n self.client.send_goal(goal)\n self.client.wait_for_result()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get details with partner sales
def get_partner_details(self, start_date, end_date, partner, is_lpg): data_list = [] domain = [ ('order_id.date_order', '>=', start_date), ('order_id.date_order', '<=', end_date), ('order_id.partner_id', '=', partner.id), ] if is_lpg: domain.append(('product_id.is_refile_lpg_product', '=', True)) pos_order_line_ids = self.env['pos.order.line'].search(domain) for rec in pos_order_line_ids: data_dict = {'name': rec.order_id.name, 'date_order': rec.order_id.date_order, 'partner_id': rec.order_id.partner_id and rec.order_id.partner_id.name or '', 'product_id': rec.product_id and rec.product_id.name or '', 'qty': rec.qty, 'price_unit': rec.price_unit, 'price_subtotal_incl': rec.price_subtotal_incl, } data_list.append(data_dict) return data_list
[ "def display_sale_detail(self):\n sale = self.find_brokered_sale_by_id(self.lhs)\n self.msg(sale.display(self.caller))", "def get_sales():\n all_sales = Sales.get_all_sales()\n if all_sales:\n return all_sales, 200\n else:\n raise InvalidUsage('No sales added yet', status_code=404)", "def get_sale(sales_id):\n #use Sales instance to call get_sale function\n a_single_sale = Sales.get_sale(sales_id)\n if a_single_sale:\n return a_single_sale, 200\n else:\n raise InvalidUsage('There is no sale record matching that ID', status_code=404)", "def get_sales_by_seller(seller):\n \n return Sale.query.filter(Sale.seller_name==seller.seller_name).all().order_by(\"date\")", "def get_shops_by_partner(self, **kwargs):\n return self.client.execute(\"shop/get_partner_shop\", \"POST\", kwargs)", "def all_sales():\n return [\n {\n \"sale_id\": 1,\n \"product\": \"Samsung Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 4500000\n },\n {\n \"sale_id\": 2,\n \"product\": \"Toshiba Flatscreen Tv\",\n \"quantity\": 6,\n \"price\": 9000000\n },\n {\n \"sale_id\": 3,\n \"product\": \"LG Flatscreen Tv\",\n \"quantity\": 12,\n \"price\": 1500000\n },\n {\n \"sale_id\": 4,\n \"product\": \"Sony Flatscreen Tv\",\n \"quantity\": 1,\n \"price\": 500000\n },\n {\n \"sale_id\": 5,\n \"product\": \"Hisense Flatscreen Tv\",\n \"quantity\": 2,\n \"price\": 800000\n },\n ]", "def rest_sales(request):\n # Check if the user is a developer\n if not request.user.has_perm('gamestore.developer'):\n return permission_denied(request, PermissionDenied)\n\n # Allow only requests for the developers own sales statistics\n orders = request.user.developer.sales.all()\n\n if request.method == 'GET':\n # Filter by order id\n if 'order' in request.GET:\n orders = orders.filter(pk=request.GET['order'])\n # Filter by game name\n if 'game' in request.GET:\n if Game.objects.filter(name=request.GET['game']).exists():\n game = Game.objects.get(name=request.GET['game'])\n orders = orders.filter(game=game)\n else:\n orders = []\n # Filter by buyer (username in this case!)\n if 'buyer' in request.GET:\n if User.objects.filter(username=request.GET['buyer']).exists():\n user = User.objects.get(username=request.GET['buyer'])\n if hasattr(user, 'player'):\n orders = orders.filter(buyer=user.player)\n else:\n orders = []\n else:\n orders = []\n # Search by status (paid or not paid orders)\n if 'status' in request.GET:\n if request.GET['status'] == 'paid':\n orders = orders.filter(status=True)\n elif request.get['status'] == 'not_paid':\n orders = orders.filter(status=False)\n else:\n orders = []\n\n # Convert buyer and seller ids to usernames and game ids to game names and status to paid/not_paid\n data = serializers.serialize('json', orders)\n data_json = json.loads(data)\n for d in data_json:\n d['fields']['buyer'] = Player.objects.get(pk=d['fields']['buyer']).user.username\n d['fields']['seller'] = Developer.objects.get(pk=d['fields']['seller']).user.username\n d['fields']['game'] = Game.objects.get(pk=d['fields']['game']).name\n d['fields']['status'] = 'paid' if Order.objects.get(pk=d['pk']).status else 'not_paid'\n # Remove model information from the returned data\n d.pop('model')\n data = json.dumps(data_json)\n\n return render(request, 'rest_sales.html', {'data': data})", "def get_sales_by_customer(entity):\n \n return Sale.query.filter(Sale.entity_id==entity.id).all().order_by(\"date\")", "def test_get_sales_quick_entries(self):\n pass", "def get_sales(start_date: datetime.datetime, end_date: datetime.datetime, seller_skus: set) -> List:\n\n print(\"getting sales data...\")\n interval = create_date_interval(start_date, end_date)\n\n return _get_sales(interval, Granularity.HOUR, seller_skus)", "def sale_get_by_id(current_user, sale_id):\n sale = Sale.query.filter_by(id=sale_id).first()\n sale_json = {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"quantity\": sale.quantity,\n \"sellingPrice\": sale.sellingPrice,\n \"created_on\": sale.created_on,\n }\n return jsonify(sale_json), 200", "def test_get_specific_sale_order(self):\n self.client.post(\n '/v1/sales',\n data=json.dumps({\n 'id': 1,\n 'name': \"Watch\",\n 'quantity': 3,\n 'price': 45000\n }),\n content_type=\"application/json\"\n )\n response = self.client.get(\n '/v1/sales/1',\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 200)", "def test_get_all_sales(self):\n response = self.client.get(\n '/v1/sales',\n content_type=\"application/json\"\n )\n self.assertEqual(response.status_code, 200)", "def get_sales_forecast(self, cr, uid, ids, context=None):\n\n\n if context is None:\n context = {}\n\n amount = 0.0\n\n new_id = False\n\n products = {}\n value = {}\n\n invoice_ids = []\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',\n 'sep', 'oct', 'nov', 'dec']\n\n inv_obj = self.pool.get('account.invoice')\n forecast_obj = self.pool.get('sales.forecast')\n forecast_line_obj = self.pool.get('sales.forecast.line')\n user_obj = self.pool.get('res.users')\n product_obj = self.pool.get('product.product')\n\n company_id = user_obj.browse(cr, uid, uid).company_id.id\n\n for form in self.browse(cr, uid, ids):\n #create forecast sales without lines\n new_id = forecast_obj.create(cr, uid, {'name': form.name,\n 'analytic_id': form.account_id.id,\n 'commercial_id': uid,\n 'date': time.strftime('%d-%m-%Y'),\n 'company_id': company_id,\n 'state': 'draft'\n })\n for month in range(0,11):\n #I find all the invoices in for each month last year.\n domain = \\\n [('date_invoice','>',str('01-' + str(month + 1) +\n '-' + str(int(time.strftime('%d-%m-%Y')[6:]) - 1))),\n ('date_invoice','<',\n str((calendar.monthrange((int(time.strftime('%d-%m-%Y')[6:]) - 1),\n (month + 1))[1])) + '-' + str(month + 1) + '-' +\n str(int(time.strftime('%d-%m-%Y')[6:]) - 1)),\n ('company_id','=', company_id)]\n\n invoice_ids = inv_obj.search(cr, uid, domain)\n if invoice_ids:\n\n #If invoices, step through lines that share the selected\n #analytic account and save them in a dictionary, with the\n #id of product of the line like key:\n #{Product_Id: [(amount, benefits)]}\n for inv in inv_obj.browse(cr, uid, invoice_ids):\n for line in inv.invoice_line:\n if line.account_analytic_id and \\\n line.account_analytic_id.id == form.account_id.id and \\\n line.product_id:\n\n quantity = self.pool.get('product.uom')._compute_qty(cr, uid, line.uos_id.id,line.quantity, line.product_id.uom_id.id)\n if products.get(line.product_id.id):\n new_val = (products[line.product_id.id][0][0] + quantity,\n products[line.product_id.id][0][1] + line.price_subtotal)\n products[line.product_id.id][0] = new_val\n else:\n products[line.product_id.id] = []\n products[line.product_id.id].append((quantity,\n line.price_subtotal))\n if products:\n for product in products:\n if form.percent_increase:\n #Calculation percentage increase\n qty = products[product][0][0] + \\\n ((form.percent_increase / 100) * \\\n products[product][0][0])\n else:\n qty = products[product][0][0]\n\n cur_forecast = forecast_obj.browse(cr, uid, new_id)\n l_products = forecast_line_obj.search(cr, uid,\n [('product_id','=', product),\n ('sales_forecast_id', '=', cur_forecast.id)])\n #If there are already lines created for the same product,\n #update the quantities. Else, I create a new line\n if l_products:\n l = forecast_line_obj.browse(cr, uid, l_products[0])\n if l.product_id.id == product:\n forecast_line_obj.write(cr, uid, l.id,\n {months[month] + '_qty': (qty + \\\n (eval('o.' + (months[month] + '_qty'),{'o': l})))})\n else:\n forecast_line_obj.create(cr, uid, {\n 'sales_forecast_id': new_id,\n 'product_id': product,\n months[month] + '_qty': qty})\n\n products = {}\n\n value = {\n 'domain': str([('id', 'in', [new_id])]),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'sales.forecast',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'res_id': new_id\n }\n\n return value", "def get_sale_by_prod_id(self, prod_id):\n self.cursor.execute(\"Select * from sales where product_id = %s\",\n (prod_id,))\n sales = self.cursor.fetchall()\n if sales:\n return sales", "def get_sale_by_id(self, sale_id):\n self.prod_id = sale_id\n self.cursor.execute(\n \"Select * from sales where sales_id = %s\",\n (self.prod_id,)\n )\n sale = self.cursor.fetchone()\n return sale", "def get_datas(self, start_date, end_date):\n datas = []\n invoice_obj = self.env['commission.invoice']\n invoice_ids = invoice_obj.search(\n [('date', '<=', end_date),\n ('date', '>=', start_date),\n ('inv', '=', True)])\n for value in invoice_ids:\n datas.append({'property': value.property_id.name,\n 'tenancy': value.tenancy.name,\n 'commission': value.amount_total,\n 'agent': value.agent.name,\n })\n return datas", "def get_invoices(self):", "def sale_get_all(current_user):\n sales = Sale.query.all()\n\n sales_json = [\n {\n \"id\": sale.id,\n \"product\": Product.query.filter_by(id=sale.product_id),\n \"selling_price\": sale.selling_price,\n \"quantity\": sale.quantity,\n \"created_on\": sale.created_on,\n }\n for sale in sales\n ]\n\n return jsonify(sales_json)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load WAV file and return its properties Load WAV file specified by the path (string), and return a list of 1/more numpy array(s) containing the data samples for each channel, the sampling frequency, the number of channels, and the number of samples per channel
def read_wav_file(path): # Parse the input file's extension extension = os.path.splitext(path)[1] # Load the WAV file and set the output parameters try: if extension.lower() == '.wav': [fs, x] = wavfile.read(path) num_samples = len(x) try: num_channels = x.shape[1] except: num_channels = 1 data = [] for channel in range(num_channels): if num_channels == 1: data.append(x.astype(np.float32)/float(2**15)) else: data.append(x[0:,channel].astype(np.float32)/float(2**15)) else: raise IOError("unknown file type") return (-1,-1,-1) except: IOError("file not found") return (-1,-1,-1) # Return the output data (tuple) return (data, fs, num_channels, num_samples)
[ "def read_wave(path):\n with contextlib.closing(wave.open(path, \"rb\")) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000, 48000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def load_wav_to_array(full_path):\n sampling_rate, data = read(full_path)\n return data.astype(np.float32), sampling_rate", "def readWav():\n\n # Read the sound wave from the input.\n sound_wave = wave.open(sys.argv[1], \"r\")\n\n # Get parameters of the sound wave.\n nframes = sound_wave.getnframes()\n framerate = sound_wave.getframerate()\n params = sound_wave.getparams()\n duration = nframes / float(framerate)\n\n print(\"frame rate: %d \" % (framerate,))\n print(\"nframes: %d\" % (nframes,))\n print(\"duration: %f seconds\" % (duration,))\n print(scipy.array(sound_wave))\n\n return (sound_wave, nframes, framerate, duration, params)", "def read_audio(path):\n wave_reader = wave.open(path, \"rb\")\n wave_params = wave_reader.getparams()\n return wave_reader, wave_params", "def read_wav(file):\n f=wave.open(file,\"r\")\n raw_data=f.readframes(f.getnframes())\n array=np.fromstring(raw_data,np.short)\n array.shape=-1,2\n array=array.T.astype(float)[0]\n samplerate=f.getframerate()\n f.close()\n return feature_normalize(array),samplerate", "def read_wav(wavfile):\n assert os.path.isfile(wavfile), \\\n 'ERROR: wivefile file %s does not exist' % wavfile\n\n x, fs, enc = audiolab.wavread(wavfile)\n if len(x.shape) >= 2:\n x = x[:, 0] # Make mono\n\n assert fs == 44100, \\\n \"ERROR: File %s is not sampled at 44100 Hz\" % wavfile\n\n return x, fs", "def load_wav_file(file_path: str) -> Sample:\n # Read the .wav file\n rate, data = wavfile.read(file_path)\n\n # cut the number of data points to the chosen power of 2\n data = np.array(data[:N])\n\n if rate != CD_QUALITY_RATE:\n raise ValueError(\n f'Invalid file rate, found {rate} Hz but '\n f'expected {CD_QUALITY_RATE} Hz')\n\n # Extract file meta data\n file_name = Path(file_path).name\n raw_phoneme = file_name.split('_')[0]\n try:\n phoneme = Phoneme(raw_phoneme.lower())\n except ValueError:\n raise ValueError(f'Invalid phoneme \"{raw_phoneme.lower()}\"')\n\n # Instantiate the associated data object\n return Sample(phoneme, file_name, data)", "def get_audio_features(audio_path):\n\n # create empty dictionaries\n wav2idx = {}\n melspec_dict = {}\n mfcc_dict = {}\n\n # get wav names\n wav_names = [wav for wav in os.listdir(audio_path) if wav.endswith(\"wav\")]\n\n # set sample length\n duration = 14.56\n sampling_rate = 44100\n n_sample_fit = int(duration * sampling_rate)\n \n # enumerate over audio files\n for i, w in enumerate(wav_names):\n\n # get wav idx\n wav2idx[w] = i\n\n # get the full path of an audio file\n wav_path = audio_path + w\n \n # get sampling rate and audio length\n y, sr = librosa.load(wav_path) \n \n n_sample = y.shape[0]\n\n if n_sample > n_sample_fit: # long enough\n y = y[:n_sample_fit]\n\n elif n_sample < n_sample_fit:\n y = np.hstack((y, np.zeros((n_sample_fit - n_sample,))))\n \n logam = librosa.amplitude_to_db\n mel_data = logam(librosa.feature.melspectrogram(y = y, sr= sr,\n hop_length=256, n_fft=512, n_mels=96), ref = 1.0)\n # mel_data = librosa.feature.melspectrogram(y=y, sr=sr, hop_length=256, n_fft=512, n_mels=96)\n # print(np.shape(mel_data))\n mfcc_data = librosa.feature.mfcc(y = y, sr = sr, n_mfcc = 13)\n mfcc_delta = librosa.feature.delta(mfcc_data)\n mfcc_delta2 = librosa.feature.delta(mfcc_data, order = 2)\n\n mfcc = np.vstack((mfcc_data, mfcc_delta, mfcc_delta2))\n \n melspec_dict[i] = np.swapaxes(mel_data, 0, 1)\n mfcc_dict[i] = mfcc\n\n return wav2idx, wav_names, melspec_dict, mfcc_dict", "def read_wav(filename):\n s,fs = load(filename) # scipy reads int\n s = np.array(s)/float(max(abs(s)))\n s = add_wgn(s) # Add jitter for numerical stability\n return fs,s", "def wav_data(mono_wav):\n the_data = fft.data_from_file(mono_wav)\n return the_data", "def _read_audio(self, path:str):\n try:\n extension = path.split('.')[-1]\n sound = AudioSegment.from_file(path)\n self.audio = np.array(sound.get_array_of_samples())\n self.original_rate = sound.frame_rate\n if len(self.audio.shape) != 1:\n self.audio = self.audio[:,0]\n \n self.audio_duration = len(self.audio) / self.original_rate\n\n except Exception as e:\n print('please insert a valid audio file')\n print(e)\n raise ValueError('please insert a valid audio file')", "def load_samples(self):\n self.samples = {}\n self.samplens = {}\n for note in self.sampdict.keys():\n rate_in, wavobj = wavfile.read(self.sampdict[note])\n # If it doesn't match the required rate, resample and re-write\n if rate_in != self.samprate:\n wavobj = utils.resample(rate_in, self.samprate, wavobj)\n # force to mono\n wavdat = np.mean(wavobj.data, axis=1)\n # remove DC term \n dc = wavdat.mean()\n wavdat -= dc\n wavdat /= abs(wavdat).max()\n samps = range(wavdat.size)\n self.samples[note] = interp1d(samps, wavdat,\n bounds_error=False,\n fill_value = (0.,0.),\n assume_sorted=True)\n self.samplens[note] = wavdat.size", "def ReadWaveFile(filename):\n f = wave.open(filename, 'rb')\n waveInfo = dict()\n waveInfo[\"nchannels\"] = f.getnchannels()\n waveInfo[\"framerate\"] = f.getframerate()\n waveInfo[\"nframes\"] = f.getnframes()\n waveInfo[\"samplewidth\"] = f.getsampwidth()\n str_data = f.readframes(waveInfo[\"nframes\"])\n\n # np.short is 16-bit length\n wave_data = np.fromstring(str_data, dtype=np.short) \n wave_data = wave_data.astype(np.float16)\n wave_data /= 32768.0\n wave_data.shape = -1, waveInfo[\"nchannels\"]\n return waveInfo, wave_data", "def load_wavelen(wavelength_file: str):\n\n q = np.loadtxt(wavelength_file)\n if q.shape[1] > 2:\n q = q[:, 1:3]\n if q[0, 0] < 100:\n q = q * 1000.0\n wl, fwhm = q.T\n return wl, fwhm", "def load_wav_to_torch(scp_path):\r\n data = read_matrix(scp_path).numpy().reshape(-1)\r\n data = data / MAX_WAV_VALUE\r\n return torch.from_numpy(data).float()", "def read(filename, limit=None):\n # pydub does not support 24-bit wav files, use wavio when this occurs\n try:\n audiofile = AudioSegment.from_file(filename)\n\n if limit:\n audiofile = audiofile[:limit * 1000]\n\n data = np.fromstring(audiofile._data, np.int16)\n\n channels = []\n for chn in range(audiofile.channels):\n channels.append(data[chn::audiofile.channels])\n\n fs = audiofile.frame_rate\n except audioop.error:\n fs, _, audiofile = wavio.readwav(filename)\n\n if limit:\n audiofile = audiofile[:limit * 1000]\n\n audiofile = audiofile.T\n audiofile = audiofile.astype(np.int16)\n\n channels = []\n for chn in audiofile:\n channels.append(chn)\n\n return channels, audiofile.frame_rate", "def wiimote_Load16bitMonoSampleWAV(*args):\n return _wiimote.wiimote_Load16bitMonoSampleWAV(*args)", "def pcm_channels(wave_file):\n global integer_data\n stream = wave.open(wave_file,\"rb\")\n\n num_channels = stream.getnchannels()\n sample_rate = stream.getframerate()\n sample_width = stream.getsampwidth()\n num_frames = stream.getnframes()\n\n raw_data = stream.readframes( num_frames ) # Returns byte data\n stream.close()\n\n total_samples = num_frames * num_channels\n\n if sample_width == 1: \n fmt = \"%iB\" % total_samples # read unsigned chars\n elif sample_width == 2:\n fmt = \"%ih\" % total_samples # read signed 2 byte shorts\n else:\n raise ValueError(\"Only supports 8 and 16 bit audio formats.\")\n\n integer_data = struct.unpack(fmt, raw_data)\n del raw_data # Keep memory tidy (who knows how big it might be)", "def read_probe_wavelengths(name):\n\n p = copy.copy(params)\n\n isrf_wavelengths = pd.read_csv(p.d_skirt+name+p.skirt_ext+'_rfpc_wavelengths.dat',comment='#',sep=' ',engine='python',\\\n names=['%i' % i for i in range(4)]) # microns\n # isrf_freq = p.clight/(isrf_wavelengths*1e-6) # Hz\n # isrf_wavelengths['bin_width'] = isrf_wavelengths['3'] - isrf_wavelengths['2']\n bin_width = isrf_wavelengths['1'].values\n wavelengths = isrf_wavelengths['0'].values\n return(wavelengths,bin_width)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform linear interpolation Perform the linear interpolation between two equally space values (y1, y2)
def linear_interpolation(y1, y2, weight): # Return linearly interpolated data value return y1*(1.0-weight)+y2*weight
[ "def interpolate(x, y, x1):\r\n\tfor item in x:\r\n\t\titem = float(item)\r\n\tfor item in y:\r\n\t\titem = float(item)\r\n\tx1 = float(x1)\r\n\t \r\n\ty1 = y[0] + (x1 - x[0]) / (x[1] - x[0]) * (y[1] - y[0])\r\n\t\r\n\treturn y1", "def interpolate2(x, y, z, x1, y1):\r\n\ty11 = interpolate(x, z[0], x1)\r\n\ty22 = interpolate(x, z[1], x1)\r\n\t\r\n\tz1 = interpolate(y, [y11, y22], y1)\r\n\t\r\n\treturn z1", "def interpolate(year1, year2, val1, val2):\r\n n_elements = year2 - year1\r\n dif_vals = val2 - val1\r\n dif_by_yr = dif_vals / n_elements\r\n out_list = []\r\n out_list_slope = []\r\n new_val = val1\r\n for i in range(n_elements):\r\n new_val = new_val + dif_by_yr\r\n out_list.append(new_val)\r\n out_list_slope.append(dif_by_yr)\r\n return out_list, out_list_slope", "def _linear_interpolation(self, ua, ub, d1, s=None):\n s = self._s(d1, s=s)\n u = ua + s * (ub - ua)\n return u", "def lerp(a: np.ndarray, b: np.ndarray, x: np.ndarray) -> np.ndarray:\n return a.astype(float) * (1 - x.astype(float)) + b.astype(float) * x", "def interpolate_linear(t, t0, x0, t1, x1):\n if t0 > t1:\n t0, x0, t1, x1 = t1, x1, t0, x0\n if t0 == t1 and not x0 == x1:\n raise ValueError(\"Interpolated quantity can not have two \"\n \"different values at the same time\")\n if t < t0 or t > t1:\n raise ValueError(\"Time argument outside of the given interval\")\n if t == t0:\n return x0\n if t == t1:\n return x1\n timeDelta = 1.0*(t1 - t0)\n t = (t - t0)/timeDelta\n onemt = 1.0 - t\n return onemt*x0 + t*x1", "def linear_interpolation(left_value: float, right_value: float, alpha: float):\n return left_value + alpha * (right_value - left_value)", "def _linear_interp(xi, yi, x):\n # Find lowest xi value >= x (end of segment containing x)\n end = np.atleast_1d(xi.searchsorted(x))\n # Associate any x found outside xi range with closest segment (first or\n # last one). This linearly extrapolates the first and last segment\n # to -inf and +inf, respectively.\n end[end == 0] += 1\n end[end == len(xi)] -= 1\n start = end - 1\n # Ensure that output y has same shape as input x\n # (especially, let scalar input result in scalar output)\n start, end = np.reshape(start, np.shape(x)), np.reshape(end, np.shape(x))\n # Set up weight such that xi[start] => 0 and xi[end] => 1\n end_weight = (x - xi[start]) / (xi[end] - xi[start])\n return (1.0 - end_weight) * yi[start] + end_weight * yi[end]", "def interp_lin(x, y, xp, log_spacing=False):\n with tf.name_scope('interp_lin'):\n if log_spacing:\n x = tf.log(x)\n xp = tf.log(xp)\n\n spacing = x[1] - x[0]\n grid = (xp - x[0]) / spacing\n ind1 = tf.cast(grid, tf.int32)\n ind2 = ind1 + 1\n max_ind = x.shape[0].value\n # set top and bottom indices identical if extending past end of range\n ind2 = tf.minimum(max_ind - 1, ind2)\n\n weight1 = tf.abs(xp - tf.gather(x, ind1)) / spacing\n weight2 = tf.abs(xp - tf.gather(x, ind2)) / spacing\n if log_spacing:\n weight1 = tf.exp(weight1)\n weight2 = tf.exp(weight2)\n\n weight1 = 1. - tf.reshape(weight1, [-1] + [1] * (len(y.shape) - 1))\n weight2 = 1. - tf.reshape(weight2, [-1] + [1] * (len(y.shape) - 1))\n\n weight_sum = weight1 + weight2\n weight1 /= weight_sum\n weight2 /= weight_sum\n\n y1 = tf.gather(y, ind1)\n y2 = tf.gather(y, ind2)\n yp = y1 * weight1 + y2 * weight2\n return yp", "def _linear_interpolation(\n left: np.ndarray,\n right: np.ndarray,\n gamma: np.ndarray,\n) -> np.ndarray:\n diff_b_a = np.subtract(right, left)\n lerp_interpolation = np.asanyarray(np.add(left, diff_b_a * gamma))\n np.subtract(\n right, diff_b_a * (1 - gamma), out=lerp_interpolation, where=gamma >= 0.5\n )\n if lerp_interpolation.ndim == 0:\n lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays\n return lerp_interpolation", "def interp_2d_vec(grid1,grid2,value,xi1,xi2,yi):\n\n for i in range(yi.size):\n yi[i] = interp_2d(grid1,grid2,value,xi1[i],xi2[i])", "def interpolation(cls, time, y, time_interp):\r\n f = interp.interp1d(time, y, fill_value=\"extrapolate\")\r\n y_interp = f(time_interp)\r\n return y_interp", "def interp1d(x, y):\n tck = interpolate.splrep(x, y, s=0)\n sclass = spline_return(tck)\n return sclass", "def linear_interpolation(image, pt1, pt2, unknown):\n f1 = (unknown[1] - pt1[1])/(pt2[1] - pt1[1])\n f1 = f1 *image[pt2[0]][pt2[1]]\n f2 = (pt2[1] - unknown[1])/(pt2[1] - pt1[1])*image[pt1[0]][pt1[1]]\n ft = f1 + f2\n\n return ft", "def _regrid_linear(x, y, newx, allow_extrapolation=False):\n\t\n\tif allow_extrapolation:\n\t\t_MATHUTIL_LOG.warning(\"allow_extrapolation=True not honored for regrid_linear\")\n\t\n\tif newx.min() < x.min():\n\t\traise ValueError('x.min(%f) must be smaller than newx.min(%f)' % (x.min(), newx.min()))\n\tif newx.max() > x.max():\n\t\traise ValueError('x.max(%f) must be larger than newx.max(%f)' % (x.max(), newx.max()))\n\n\treturn numpy.interp(newx, x, y)", "def linear_interp(val, lo, hi):\n return (1 - val) * lo + val * hi", "def parametric_interpolation(x ,y, t, type = 'linear'):\n fx_t = interp1d(t,x, fill_value='extrapolate')\n fy_t = interp1d(t,y, fill_value='extrapolate') \n return fx_t, fy_t", "def linear_interpolation(self, source_adata, dest_adata, n_steps):\n if sparse.issparse(source_adata.X):\n source_average = source_adata.X.A.mean(axis=0).reshape((1, source_adata.shape[1]))\n else:\n source_average = source_adata.X.A.mean(axis=0).reshape((1, source_adata.shape[1]))\n\n if sparse.issparse(dest_adata.X):\n dest_average = dest_adata.X.A.mean(axis=0).reshape((1, dest_adata.shape[1]))\n else:\n dest_average = dest_adata.X.A.mean(axis=0).reshape((1, dest_adata.shape[1]))\n start = self.to_latent(source_average)\n end = self.to_latent(dest_average)\n vectors = numpy.zeros((n_steps, start.shape[1]))\n alpha_values = numpy.linspace(0, 1, n_steps)\n for i, alpha in enumerate(alpha_values):\n vector = start * (1 - alpha) + end * alpha\n vectors[i, :] = vector\n vectors = numpy.array(vectors)\n interpolation = self.reconstruct(vectors)\n return interpolation", "def _segment_approx_value_linear(self, r, i1, i2):\n\n Dr = self._r[i2] - self._r[i1]\n X = (r - self._r[i1])/Dr\n \n return self._y[i1] + (self._y[i2] - self._y[i1])*X" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform parabolic interpolation Perform the parabolic interpolation between three equally space values and
def parabolic_interpolation(alpha, beta, gamma, x): # Perform initial check if x == 0: return beta else: offset = alpha if (beta < offset): offset = beta if (gamma < offset): offset = gamma # Apply the offset offset = math.fabs(offset)+1 alpha += offset; beta += offset; gamma += offset; # Return parabolically interpolated data value a = (alpha-2.0*beta+gamma)/2.0 if (a == 0): if (x > 1): return linear_interpolation(beta, gamma, x)-offset else: return linear_interpolation(alpha, beta, x+1)-offset else: c = (alpha-gamma)/(4.0*alpha) b = beta-a*(c**2) return (a*(x-c)*(x-c)+b)-offset
[ "def parab_interpolation(data, xi, yi):\n \n # get the maximum and the 4 neighbouring points \n z1, z2, z3 = data[yi-1:yi+2,xi] #@UnusedVariable\n z4, z2, z5 = data[yi,xi-1:xi+2]\n \n # parabolic interpolation at point (xi, yi)\n x0 = (z5-z4)/(4*z2 - 2*z4 - 2*z5)\n y0 = (z3-z1)/(4*z2 - 2*z1 - 2*z3)\n z0 = z2 + (z1-z3)**2 / (16*z2-8*z1-8*z3) + (z4-z5)**2 / (16*z2-8*z4-8*z5)\n \n return xi+x0, yi+y0, z0", "def parabola(list1, list2, list3, plo=False, pri=False, **kwargs):\n import matplotlib.pyplot as mp\n import numpy as np\n [x1, y1] = list1\n [x2, y2] = list2\n [x3, y3] = list3\n D = x1**2 * (x2 - x3) + x2**2 * (x3 - x1) + x3**2 * (x1 - x2)\n C = np.array([x2 - x3, x3**2 - x2**2, x2 * x3 * (x2 - x3),\n x3 - x1, x1**2 - x3**2, x3 * x1 * (x3 - x1),\n x1 - x2, x2**2 - x1**2, x1 * x2 * (x1 - x2)]\n ).reshape(3, 3)\n yarr = np.array([y1, y2, y3])\n I = C.T / D\n [a, b, c] = np.dot(I, yarr)\n label = str(a) + 'x^2 + ' + str(b) + 'x + ' + str(c)\n if plo:\n x = np.linspace(x1, x3, 101)\n y = a * x**2 + b * x + c\n mp.plot(x, y, label=label, **kwargs)\n if pri:\n print label\n return a, b, c", "def _parabola_3points(x1, y1, x2, y2, x3, y3):\n delta = (x1 - x2)*(x1 - x3)*(x2 - x3)\n a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / delta\n b = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / delta\n c = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / delta\n return a, b, c", "def interpolate3(x, y, z, v, x1, y1, z1):\r\n\tz11 = interpolate2(x, y, v[0], x1, y1)\r\n\tz22 = interpolate2(x, y, v[1], x1, y1)\r\n\t\r\n\tv1 = interpolate(z, [z11, z22], z1)\r\n\t\r\n\treturn v1", "def interpolate_all_on_grid_3D(self):\n\n r3D = self.grid.r3D\n z3D = self.grid.z3D\n phi3D = self.grid.phi3D\n\n if(self.equilibrium_mesh == '3D'):\n #interpolation on 3D mesh: (currently not used in FWR3D)\n #psi on grid\n self.psi_on_grid = self.psi_interp(z3D,r3D)\n\n #B Field on grid, right now, BPhi,BZ, and BR are directly used.\n self.BX_on_grid = -self.BPhi_interp(z3D,r3D)*np.sin(phi3D)+self.BR_interp(z3D,r3D)*np.cos(phi3D)\n self.BY_on_grid = self.BZ_interp(z3D,r3D)\n self.BZ_on_grid = -self.BR_interp(z3D,r3D)*np.sin(phi3D)-self.BPhi_interp(z3D,r3D)*np.cos(phi3D)\n self.B_on_grid = np.sqrt(self.BX_on_grid**2 + self.BY_on_grid**2 + self.BZ_on_grid**2)\n\n\n #Te and Ti on grid\n self.te_on_grid = self.te0_sp(self.psi_on_grid)\n self.ti_on_grid = self.ti0_sp(self.psi_on_grid)\n\n #ne0 on grid\n self.ne0_on_grid = self.ne0_sp(self.psi_on_grid)\n self.ni0_on_grid = self.ni0_sp(self.psi_on_grid)\n elif(self.equilibrium_mesh == '2D'):\n #interpolation on 2D mesh: (used in FWR3D, the FWR3D code will then rotate the whole equilibrium to get the values on 3D mesh.)\n R1D = self.grid.X1D\n Z1D = self.grid.Y1D\n R2D = np.zeros((self.grid.NY,self.grid.NX)) + R1D[np.newaxis,:]\n Z2D = np.zeros_like(R2D) + Z1D[:,np.newaxis]\n\n #psi on 2D grid\n self.psi_on_grid = self.psi_interp(Z2D,R2D)\n out_mask = np.copy(self.psi_on_grid.mask)\n\n Zout = Z2D[out_mask]\n Rout = R2D[out_mask]\n\n #boundary points are obtained by applying ConvexHull on equilibrium grid points\n hull = ConvexHull(self.points)\n p_boundary = self.points[hull.vertices]\n Z_boundary = p_boundary[:,0]\n R_boundary = p_boundary[:,1]\n\n #Now let's calculate *psi* on outside points, first, get the nearest boundary point for each outside point\n nearest_indices = []\n for i in range(len(Zout)):\n Z = Zout[i]\n R = Rout[i]\n nearest_indices.append (np.argmin((Z-Z_boundary)**2 + (R-R_boundary)**2) )\n\n # Then, calculate *psi* based on the gradient at these nearest points\n Zn = Z_boundary[nearest_indices]\n Rn = R_boundary[nearest_indices]\n #The value *psi* and its gradiant at this nearest point can by easily obtained\n psi_n = self.psi_interp(Zn,Rn)\n gradpsi_Z,gradpsi_R = self.psi_interp.gradient(Zn,Rn)\n\n psi_out = psi_n + (Zout-Zn)*gradpsi_Z + (Rout-Rn)*gradpsi_R\n\n # Finally, assign these outside values to the original array\n self.psi_on_grid[out_mask] = psi_out\n\n #B on grid\n self.BR_on_grid = self.BR_interp(Z2D,R2D)\n BR_n = self.BR_interp(Zn,Rn)\n gradBR_Z, gradBR_R = self.BR_interp.gradient(Zn,Rn)\n BR_out = BR_n + (Zout-Zn)*gradBR_Z + (Rout-Rn)*gradBR_R\n self.BR_on_grid[out_mask] = BR_out\n\n self.BZ_on_grid = self.BZ_interp(Z2D,R2D)\n BZ_n = self.BZ_interp(Zn,Rn)\n gradBZ_Z, gradBZ_R = self.BZ_interp.gradient(Zn,Rn)\n BZ_out = BZ_n + (Zout-Zn)*gradBZ_Z + (Rout-Rn)*gradBZ_R\n self.BZ_on_grid[out_mask] = BZ_out\n\n self.BPhi_on_grid = self.BPhi_interp(Z2D,R2D)\n BPhi_n = self.BPhi_interp(Zn,Rn)\n gradBPhi_Z, gradBPhi_R = self.BPhi_interp.gradient(Zn,Rn)\n BPhi_out = BPhi_n + (Zout-Zn)*gradBPhi_Z + (Rout-Rn)*gradBPhi_R\n self.BPhi_on_grid[out_mask] = BPhi_out\n\n self.B_on_grid = np.sqrt(self.BR_on_grid**2 + self.BZ_on_grid**2 + self.BPhi_on_grid**2)\n\n\n\n #Te0, Ti0, ne0 and ni0 on grid\n self.te0_on_grid = self.te0_sp(self.psi_on_grid)\n self.ti0_on_grid = self.ti0_sp(self.psi_on_grid)\n self.ne0_on_grid = self.ne0_sp(self.psi_on_grid)\n self.ni0_on_grid = self.ni0_sp(self.psi_on_grid)\n\n\n #ne fluctuations on 3D grid\n\n if(not self.Equilibrium_Only):\n self.dne_ad_on_grid = np.zeros((self.n_cross_section,len(self.time_steps),r3D.shape[0],r3D.shape[1],r3D.shape[2]))\n if self.HaveElectron:\n self.nane_on_grid = np.zeros(self.dne_ad_on_grid.shape)\n if self.load_ions:\n self.dni_on_grid = np.zeros(self.dni_ad_on_grid.shape)\n\n interp_positions = find_interp_positions_v2_upgrade(self)\n\n for k in range(self.n_cross_section):\n print 'center plane {0}.'.format(self.center_planes[k])\n for i in range(len(self.time_steps)):\n print 'time step {0}'.format(self.time_steps[i])\n #for each time step, first create the 2 arrays of quantities for interpolation\n prev = np.zeros( (self.grid.NZ,self.grid.NY,self.grid.NX) )\n next = np.zeros(prev.shape)\n\n #create index dictionary, for each key as plane number and value the corresponding indices where the plane is used as previous or next plane.\n prev_idx = {}\n next_idx = {}\n for j in range(len(self.planes)):\n prev_idx[j] = np.where(self.prevplane == self.planes[j] )\n next_idx[j] = np.where(self.nextplane == self.planes[j] )\n\n #now interpolate adiabatic ne on each toroidal plane for the points using it as previous or next plane.\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dne_ad[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dne_ad[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n # on_grid adiabatic ne is then calculated by linearly interpolating values between these two planes\n\n self.dne_ad_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]\n\n\n if self.HaveElectron:\n #non-adiabatic ne data as well:\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.nane[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.nane[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n self.nane_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]\n\n \"\"\" NOW WE WORK WITH IONS \"\"\"\n\n if self.load_ions:\n #for each time step, first create the 2 arrays of quantities for interpolation\n prev = np.zeros( (self.grid.NZ,self.grid.NY,self.grid.NX) )\n next = np.zeros(prev.shape)\n\n for j in range(len(self.planes)):\n if(prev[prev_idx[j]].size != 0):\n prev[prev_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dni[k,i,j,:], fill_value = 0)(np.array([interp_positions[0,0][prev_idx[j]], interp_positions[0,1][prev_idx[j]] ]).T )\n if(next[next_idx[j]].size != 0):\n next[next_idx[j]] = CloughTocher2DInterpolator(self.Delaunay,self.dni[k,i,j,:], fill_value = 0)(np.array([interp_positions[1,0][next_idx[j]], interp_positions[1,1][next_idx[j]] ]).T )\n self.dni_on_grid[k,i,...] = prev * interp_positions[1,2,...] + next * interp_positions[0,2,...]", "def interpolate_to_points(mesh, points, params_to_interp, make_spherical=False):\n\n if make_spherical:\n map_to_sphere(mesh)\n if isinstance(mesh, str):\n from salvus.mesh.unstructured_mesh import UnstructuredMesh\n\n mesh = UnstructuredMesh.from_h5(mesh)\n elem_centroid = mesh.get_element_centroid()\n print(\"Initializing KDtree...\")\n centroid_tree = KDTree(elem_centroid)\n\n # Get GLL points from old mesh\n gll_points = mesh.points[mesh.connectivity]\n gll_order = mesh.shape_order\n\n # Get elements and interpolation coefficients for new_points\n print(\"Retrieving interpolation weights\")\n elem_indices, coeffs = get_element_weights(\n gll_points, gll_order, centroid_tree, points\n )\n\n num_failed = len(np.where(elem_indices == -1)[0])\n if num_failed > 0:\n print(\n num_failed,\n \"points could not find an enclosing element. \"\n \"These points will be set to zero. \"\n \"Please check your domain or the interpolation tuning parameters\",\n )\n\n print(\"Interpolating fields...\")\n vals = np.zeros((len(points), len(params_to_interp)))\n for i, param in enumerate(params_to_interp):\n old_element_nodal_vals = mesh.element_nodal_fields[param]\n vals[:, i] = np.sum(coeffs * old_element_nodal_vals[elem_indices], axis=1)\n return vals", "def parametric_interpolation(x ,y, t, type = 'linear'):\n fx_t = interp1d(t,x, fill_value='extrapolate')\n fy_t = interp1d(t,y, fill_value='extrapolate') \n return fx_t, fy_t", "def interpolate(self, f: callable, a: float, b: float, n: int) -> callable:\r\n\r\n # replace this line with your solution to pass the second test\r\n\r\n def bezier3(P1, P2, P3, P4):\r\n M = np.array(\r\n [[-1, +3, -3, +1],\r\n [+3, -6, +3, 0],\r\n [-3, +3, 0, 0],\r\n [+1, 0, 0, 0]],\r\n dtype=np.float32\r\n )\r\n P = np.array([P1, P2, P3, P4], dtype=np.float64)\r\n\r\n def f(t):\r\n T = np.array([t ** 3, t ** 2, t, 1], dtype=np.float64)\r\n return T.dot(M).dot(P)\r\n\r\n return f\r\n\r\n if n == 1:\r\n return lambda x: f(x)\r\n\r\n nsplines = n // 2 - 2\r\n dots = {}\r\n x_vals = np.linspace(a, b, n)\r\n\r\n func_intervals_range = []\r\n functions = []\r\n\r\n for i in range(0, nsplines): #organizing points of every spline\r\n x1 = x_vals[i * 2 + 1]\r\n if x1 in dots.keys():\r\n y1 = dots[x1]\r\n else:\r\n y1 = f(x1)\r\n dots[x1] = y1\r\n p0 = np.array([x1, y1])\r\n x2 = 2 * x_vals[i * 2 + 1] - x_vals[i * 2]\r\n if x2 in dots.keys():\r\n y2 = dots[x2]\r\n else:\r\n y2 = f(x2)\r\n dots[x2] = y2\r\n p1 = np.array([x2, y2])\r\n x3 = x_vals[i * 2 + 2]\r\n if x3 in dots.keys():\r\n y3 = dots[x3]\r\n else:\r\n y3 = f(x3)\r\n dots[x3] = y3\r\n p2 = np.array([x3, y3])\r\n x4 = x_vals[i * 2 + 3]\r\n if x4 in dots.keys():\r\n y4 = dots[x4]\r\n else:\r\n y4 = f(x4)\r\n dots[x4] = y4\r\n p3 = np.array([x4, y4])\r\n curve = bezier3(p0, p1, p2, p3)\r\n functions.append(curve)\r\n func_intervals_range.append((p0, p3))\r\n\r\n def g(x):\r\n for i in range(len(func_intervals_range)):#looping threw functions range list to find specific location\r\n interpolate_func = functions[i]\r\n if func_intervals_range[i][0][0] <= x < func_intervals_range[i][1][0]:\r\n t = (x - func_intervals_range[i][0][0]) / (\r\n func_intervals_range[i][1][0] - func_intervals_range[i][0][0]) #converting x to t\r\n return interpolate_func(t)[1]\r\n\r\n result = g\r\n return result", "def _parabola(data):\n y = np.asarray(data)\n x = np.linspace(-1, 1, len(y))\n # use only the endpoints; when trying to use the mean of the last few values, the\n # fit is usually not as good since beads expects the endpoints to be 0; may allow\n # setting mean_width as a parameter later\n A = y.min()\n y1 = y[0] - A\n y2 = y[-1] - A\n # mean_width = 5\n # y1 = y[:mean_width].mean() - A\n # y2 = y[-mean_width:].mean() - A\n\n # if parabola == p(x) = A + B * x + C * x**2, find coefficients such that\n # p(x[0]==x1) = y[0] - min(y)==y1, p(x[-1]==x2) = y[-1] - min(y)==y2, and p(x_middle==0) = 0:\n # A = min(y)\n # C = (x1 * y2 - x2 * y1) / (x1 * x2**2 - x2 * x1**2)\n # B = (y1 - C) / x1\n # then replace x1 with -1, x2 with 1, and simplify\n C = (y2 + y1) / 2\n B = C - y1\n\n return A + B * x + C * x**2", "def z(tau, zs):\n\n def l(j,t):\n \"\"\"\n Intermediate values for polynomial interpolation\n \"\"\"\n tau = self.NLPdata['collocation_points']\\\n [self.NLPdata['cp']][self.NLPdata['DEG']]\n return np.prod(np.array([ \n (t - tau[k])/(tau[j] - tau[k]) \n for k in xrange(0,self.NLPdata['DEG']+1) if k is not j]))\n\n \n interp_vector = []\n for i in xrange(self.NEQ):\n interp_vector += [np.sum(np.array([l(j, tau)*zs[j,i]\n for j in xrange(0, self.NLPdata['DEG']+1)]))]\n return interp_vector", "def cubic_interp(xi,yi):\t\n\terror_message = \"xi, yi need to be type numpy.ndarray\"\n\tassert (type(xi) is np.ndarray) and (type(yi) is np.ndarray), error_message\n\terror_message = \"You need 4 points!\"\n\tassert (len(xi)==4) and (len(yi)==4), error_message\n\terror_message = \"The xi points have to be unique!\"\n\tassert (len(xi) == len(np.unique(xi))), error_message\n\n\tA = np.vstack([np.ones(4), xi, xi**2,xi**3]).T\n\tc= np.linalg.solve(A,yi)\n\treturn c", "def _interp_ww3(variable_data, lon, lat,\n target_lon, target_lat, method='linear'):\n\n # Most data will be in a numpy MaskedArray but some,\n # such as wind component, will not\n if type(variable_data) == np.ma.core.MaskedArray:\n in_values = variable_data[~variable_data.mask].data\n in_lon = lon[~variable_data.mask].flatten()\n in_lat = lat[~variable_data.mask].flatten()\n else:\n in_values = variable_data.flatten()\n in_lon = lon.flatten()\n in_lat = lat.flatten()\n\n in_points = np.zeros(shape=(len(in_lon), 2))\n in_points[:, 0] = in_lon\n in_points[:, 1] = in_lat\n\n interp_data = interpolate.griddata(\n in_points, in_values, (target_lon, target_lat), method=method)\n\n return interp_data", "def __call__(self,pressure, temperature = linspace(0,20000, 201)):\r\n self.pressure = pressure\r\n self.temperature = temperature\r\n \r\n n = self.alpha.shape[1]\r\n \r\n self.p = sum(self.alpha*(log(self.pressure)**(arange(n)))[newaxis,...], axis =1)\r\n \r\n self.const = self.p[:2]\r\n self.a = self.p[slice(2+0, None, 3)]\r\n self.c = self.p[slice(2+1, None, 3)]\r\n self.delta = self.p[slice(2+2, None, 3)]\r\n \r\n self.a = exp(self.a)\r\n self.c = exp(self.c)\r\n self.delta = exp(self.delta)\r\n \r\n self.specificHeat = sum(self.const[:,newaxis]*temperature[newaxis,...]**arange(self.const.shape[0])[:,newaxis],axis = 0)\r\n for i in range(self.sigmaIdx):\r\n self.specificHeat+= self.a[i]*self.sigma(temperature, self.c[i], self.delta[i])\r\n for i in range(self.sigmaIdx, self.a.shape[0]):\r\n self.specificHeat+= self.a[i]*self.gamma(temperature, self.c[i], self.delta[i])\r\n \r\n self.specificHeat *= 4.184*1000.\r\n with open(\"airSpecificHeat_p=%1.f_nT=%d.pkl\"%(pressure, temperature.shape[0]), 'wb') as f:\r\n pkl.dump(dict(zip(['pressure', 'temperature', 'specificHeat'], \r\n [pressure, temperature, self.specificHeat])), f, -1)\r\n \r\n self.specificHeat[0] = self.specificHeat[1]\r\n return self.specificHeat", "def __call__(self,pressure, temperature = linspace(0,20000, 201)):\r\n self.pressure = pressure\r\n self.temperature = temperature\r\n \r\n n = self.alpha.shape[1]\r\n \r\n self.p = sum(self.alpha*(log(self.pressure)**(arange(n)))[newaxis,...], axis =1)\r\n \r\n self.const = self.p[:2]\r\n self.a = self.p[slice(2+0, None, 3)]\r\n self.c = self.p[slice(2+1, None, 3)]\r\n self.delta = self.p[slice(2+2, None, 3)]\r\n \r\n self.a = exp(self.a)\r\n self.c = exp(self.c)\r\n self.delta = exp(self.delta)\r\n \r\n self.specificEnthalpy = sum(self.const[:,newaxis]*temperature[newaxis,...]**arange(1,self.const.shape[0]+1)[:,newaxis],axis = 0)\r\n for i in range(self.sigmaIdx):\r\n self.specificEnthalpy+= self.a[i]*self.sigma(temperature, self.c[i], self.delta[i])\r\n \r\n self.specificEnthalpy *= 4.184*1000.\r\n with open(\"airSpecificEnthalpy_p=%1.f_nT=%d.pkl\"%(pressure, temperature.shape[0]), 'wb') as f:\r\n pkl.dump(dict(zip(['pressure', 'temperature', 'specificEnthalpy'], \r\n [pressure, temperature, self.specificEnthalpy])), f, -1)\r\n \r\n return self.specificEnthalpy", "def parabolic_fit_3pt(time, waveform, min_index, frame_idx0, sample_distance = 1):\n time_fit = [0]*3\n data_to_fit = [0]*3\n\n dt = time[1]-time[0]\n data_to_fit[1] = np.float32(waveform[min_index])\n time_fit[1] = time[min_index]\n data_to_fit[0] = np.float32(waveform[min_index-sample_distance])\n time_fit[0] = time[min_index-sample_distance]\n data_to_fit[2] = np.float32(waveform[min_index+sample_distance])\n time_fit[2] = time[min_index+sample_distance]\n coeff_par = np.polyfit(time_fit, data_to_fit, 2)\n\n fit_time = np.linspace(-10*dt, 10*dt, 100) + time[min_index]\n fit_par = np.polyval(coeff_par, fit_time)\n # hold on plot([0 1 2], data_to_fit) plot(newtime, ypar, 'r') hold off\n # calculate the horizontal position of max for a general parabola\n # place into point [0, 1, 2] -> tmax = -b/2a on x = [0, 1, 2]\n tmax_rel = -coeff_par[1]/2/coeff_par[0]\n# # use the displacement calculate before to correct the position\n# # obtained by the max sample\n tmax_abs = frame_idx0+tmax_rel\n\n return tmax_abs, fit_time, fit_par", "def revolute_params_analytic(points):\n # | x_i - c |^2 = r^2, for all i\n # for all i, minimize ||c' c - 2 x_i' c - r^2 ||\n # \n ## for 2D\n # take any pair of points\n # xm_ij = (x_i + x_j)/2\n # (x_i - xm_ij)' (c - xm_ij) = 0\n # (x_j - xm_ij)' (c - xm_ij) = 0\n # [x_i' - xm_ij'] c = (x_i -xm_ij)' xm_ij\n # [x_j' - xm_ij'] = (x_j -xm_ij)' xm_ij\n\n #\n ## for 3D\n # take any triplets of points\n # xm_ijk = (xi + xj + xk)/3\n\n ## for nD\n # take any n points\n d, n = points.shape\n assert n >= d + 1, 'need n > d + 1, n = %d, d = %d' % (n, d)\n nchoosed = comb(n,d)\n A = np.zeros((nchoosed * d, d))\n b = np.zeros(nchoosed * d)\n for i, pt_idx_d_tuple in enumerate(itertools.combinations(range(n), d)):\n mean_point = np.mean(points[:, pt_idx_d_tuple], axis=1)\n for j, pt_idx in enumerate(pt_idx_d_tuple):\n A[d*i + j, :] = points[:, pt_idx] - mean_point\n b[d*i + j] = (points[:, pt_idx] - mean_point).dot(mean_point)\n\n\n c = np.linalg.lstsq(A,b)[0]\n cpts = points - c.reshape(-1,1)\n r = np.mean(np.sqrt(np.sum(cpts**2, axis=0)))\n thetas = np.arctan2(cpts[1,:], cpts[0,:]) # only valid for 2D\n omega = np.mean(np.diff(thetas))\n retvals = list(c)\n retvals.extend([r, thetas[0], omega])\n return retvals", "def interpolate_pvalues(self,label,sarray):\n return self.CLfunc[label](sarray)", "def polynomiale(a: float, b: float, c: float, d: float, x: float) -> float:\n return a*x*x*x + b*x*x + c*x + d", "def extrapolate(gas,fillval,atm,gpar):\n Pmin = min(atm.gas[atm.config.C['P']])\n Pmax = max(atm.gas[atm.config.C['P']])\n #extrapolate constituents in as fixed mixing ratios\n for yvar in atm.config.C:\n if yvar=='Z' or yvar=='P' or yvar=='T' or yvar=='DZ':\n continue\n val = atm.gas[atm.config.C[yvar]][-1]\n for i,P in enumerate(gas[atm.config.C['P']]):\n if P>Pmax:\n gas[atm.config.C[yvar]][i]=val\n #extrapolate T and z as dry adiabat in hydrostatic equilibrium\n for i,P in enumerate(gas[atm.config.C['P']]):\n if P>Pmax:\n dP = P-gas[atm.config.C['P']][i-1]\n P = gas[atm.config.C['P']][i-1]\n T = gas[atm.config.C['T']][i-1]\n cp = 0.0\n for key in properties.specific_heat:\n if key in atm.config.C:\n cp+=properties.specific_heat[key]*gas[atm.config.C[key]][i]\n cp*=properties.R #since the catalogued values are Cp/R\n dT = (properties.R*T)/(cp*P)*dP\n gas[atm.config.C['T']][i] = T+dT\n amu = 0.0\n for key in properties.amu:\n if key in atm.config.C:\n amu+=properties.amu[key]*gas[atm.config.C[key]][i]\n g = gpar[0] + 2.0*properties.R*T*np.log(P/gpar[2])/(gpar[1]*amu)/1000.0\n H = properties.R*T/(amu*g)/1000.0\n dz = H*dP/P\n gas[atm.config.C['Z']][i] = gas[atm.config.C['Z']][i-1]-dz\n return gas \n\n #extrapolate out along last slope (move return gas above if you think you want this)\n for yvar in atm.config.C:\n if yvar == 'P':\n continue\n gas[atm.config.C[yvar]] = extrapolateOut(gas[atm.config.C['P']],gas[atm.config.C[yvar]],fillval)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send msg size (with default size length) | \/ ack | \/ send msg
def send(self, msg): # send length send_only_msg(len(pickle.dumps(msg))) # wait for confirmation if (recv_only_msg() == 'ack'): # send msg send_only_msg(msg) # if didn't recv ack for some reason else: raise ConnectionRefusedError
[ "def send_message(conn, msg):\n\n print(\"sending:\", msg)\n\n data = msg.SerializeToString()\n size = encode_varint(len(data))\n conn.sendall(size + data)", "def send_by_size(self, s):\n self.sendall(str(len(s)).zfill(HEADER_SIZE) + s)", "def mqtt_msg(self, msg_size):\r\n if msg_size < MQTT_MSG_MAX_SZ:\r\n self._msg_size_lim = msg_size", "def send(socket, msg_type, payload=None):\n if not payload:\n enc_msg = b''\n elif isinstance(payload, str):\n enc_msg = payload.encode()\n elif isinstance(payload, bytes) and 4 <= msg_type <= 7:\n enc_msg = payload\n else:\n raise InvalidPayload\n len_encmsg = len(enc_msg)\n l_value = len_encmsg - 1\n if len_encmsg == 0: # Empty Header Format\n # for message type = 0\n if msg_type >= 0 and msg_type <= 63:\n msg_tosend = (int(bin(msg_type),2) | 0b11000000 ).to_bytes(1, \"big\") # 1st byte\n try:\n socket.sendall(msg_tosend)\n return True\n except:\n return False\n else:\n raise InvalidType \n elif len_encmsg > 0 and len_encmsg <= 256: # Short Header Format\n # types for which max payload is 0\n if (msg_type >= 0 and msg_type <=9) or (msg_type >=13 and msg_type <= 63):\n byte_x = (int(bin(msg_type),2) | 0b10000000 ).to_bytes(1, \"big\") # 1st byte\n byte_y = l_value.to_bytes(1, \"big\")\n msg_tosend = byte_x+byte_y+enc_msg\n try:\n socket.sendall(msg_tosend)\n return True\n except:\n return False\n else:\n raise InvalidType\n elif len_encmsg > 256 and len_encmsg<=4096: # Long Header Format\n if msg_type >=0 and msg_type <= 7:\n if msg_type == 0: \n byte_y = l_value.to_bytes(2, \"big\") \n msg_tosend = byte_y+enc_msg\n try:\n socket.sendall(msg_tosend)\n return True\n except:\n return False\n elif(msg_type >= 1 and msg_type <= 7):\n byte_x_int = ((int(bin(msg_type),2) << 4) & 0b01110000 ) | ((int(bin(l_value),2) & 0b111100000000) >> 8)\n byte_y_int = int(bin(l_value),2) & 0b000011111111 \n byte_x = byte_x_int.to_bytes(1, \"big\") \n byte_y = byte_y_int.to_bytes(1, \"big\") \n msg_tosend = byte_x+byte_y+enc_msg\n try:\n socket.sendall(msg_tosend)\n return True\n except:\n return False \n else:\n raise InvalidType\n elif len_encmsg > 4096: # Invalid Payload Size\n raise PayloadTooBig", "def pack_n_send(self, sock, typ_pfx: str, msg: str):\n\n packed_msg = self.pack_message(typ_pfx, msg)\n sock.send(packed_msg.encode())", "def _send_xreq(self,msg,flags=0):\r\n self.disp_sock.send(\"\",flags | zmq.SNDMORE)\r\n self.disp_sock.send(msg,flags)", "def send(self, buf):", "def _sendcmd(self, msg):\n msg += self._terminator\n self.write(msg)\n try:\n self.flush()\n except IOError as e:\n logger.warning(\"Exception %s occured during flush().\", repr(e))", "def send(self, data : bytes):\r\n header=numheader.encode32(len(data))\r\n self.socket.send(header+data)", "def send_msg(self, msg):\n if not self._connected:\n self._try_connecting()\n if self._compress_func:\n msg = self._compress_func(msg)\n msg_len = len(msg)\n try:\n self._write_all(struct.pack(\"!i\", msg_len))\n self._write_all(msg)\n except Exception as e:\n self._connected = False\n raise ConnectionTerimated(\n \"Error writing to simpletcp server {0}\".format(e))", "def protocol_send(self, data, sock):", "def send_packet():", "def sendInfo(conn, data):\n\ttry:\n\t\tlengthPrefix = str(len(data)).encode('utf-8') + ':#:'\n\t\tpayload = lengthPrefix + data\n\t\tconn.sendall(payload)\n\texcept socket.error:\n\t\traise", "def check_size(msg):\n\n if len(msg) > TWEET_SIZE:\n return False\n return True", "def send_with_delay(self, sock, delay=3, initial_amount=20):\n data = self.pack()\n if initial_amount < 0 or len(data) < initial_amount:\n initial_amount = initial_amount / 2\n dbg('Sending msg_type {0}: first {1} bytes of {2} then sleeping {3} seconds'.format(\n self._header.msg_type,\n initial_amount,\n len(data),\n delay))\n sock.sendall(data[0:initial_amount])\n time.sleep(delay)\n dbg('Sending remaining {0} bytes of msg_type {1}'.format(\n len(data) - initial_amount,\n self._header.msg_type))\n sock.sendall(data[initial_amount:])", "def test_message_exactly_buffsize(self):\n buf_message = \"It's 16 bytes eh\"\n self.send_message(buf_message)\n actual_sent, actual_reply = self.process_log()\n expected_sent = self.sending_msg.format(buf_message)\n self.assertEqual(expected_sent, actual_sent)\n expected_reply = self.received_msg.format(buf_message)\n self.assertEqual(expected_reply, actual_reply)", "def send_file_size(self, filename):\n\t\tlength = os.path.getsize(filename)\n\t\tlength_bytes = pack('>Q', length)\n\t\twith BytesIO(length_bytes) as f:\n\t\t\tself.send_data(f, 8)", "def _txCmd(self, cmd):\n # Serialize the protobuf into a string\n msgStr = cmd.SerializeToString()\n msgLen = len(msgStr)\n # Convert the message length into int32 byte array\n msgHdr = struct.pack('!I', msgLen)\n # Send the message length first\n self.sock.sendall(msgHdr)\n #\tprint(\"hello\")\n # Then send the protobuf\n self.sock.sendall(msgStr)", "def send(self, packet, flags: int = ...) -> int:\n to_send = packet\n sent_len = 0\n if not isinstance(to_send, bytes):\n if not isinstance(packet, GeneralPacket):\n to_send = generate_packet(packet)\n to_send = raw(to_send)\n sent_len = super().send(int_to_bytes(len(to_send)))\n sent_data = super().send(to_send)\n return sent_len + sent_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether a dtype is real.
def is_real_dtype(dtype: DType) -> bool: return snp.dtype(dtype).kind != "c"
[ "def is_real(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.floating)", "def isreal(self):\n return np.all(np.isreal(self.data))\n # return np.isrealobj(self._data)", "def isdouble(dtype):\n return dtype in ('float64', 'complex128')", "def is_double(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.float_) or numpy.issubdtype(dtype, numpy.complex_)", "def _is_double(v):\n return not isinstance(v, Integral) and isinstance(v, Real)", "def is_real_num(X):\n def each_elem_true(x):\n try:\n float(x)\n return not (np.isnan(x) or np.isinf(x))\n except:\n return False\n f = np.vectorize(each_elem_true)\n return f(X)", "def is_complex(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.complexfloating)", "def is_torch_dtype(x):\n return False if not is_torch_available() else _is_torch_dtype(x)", "def is_numeric_dtype(obj):\n if isclass(obj):\n if issubclass(obj, cudf.core.dtypes.DecimalDtype):\n return True\n if issubclass(obj, _BaseDtype):\n return False\n else:\n if isinstance(\n obj,\n (cudf.Decimal128Dtype, cudf.Decimal64Dtype, cudf.Decimal32Dtype),\n ) or isinstance(\n getattr(obj, \"dtype\", None),\n (cudf.Decimal128Dtype, cudf.Decimal64Dtype, cudf.Decimal32Dtype),\n ):\n return True\n if isinstance(obj, _BaseDtype) or isinstance(\n getattr(obj, \"dtype\", None), _BaseDtype\n ):\n return False\n if isinstance(obj, cudf.BaseIndex):\n return obj._is_numeric()\n return pd_types.is_numeric_dtype(obj)", "def _is_dtype(obj: object) -> TypeGuard[_Dtype]:\n return isinstance(obj, (tf.dtypes.DType, np.dtype)) or (\n isinstance(obj, type) and issubclass(obj, np.number)\n )", "def _is_complex(data):\n return (NUMPY and numpy.iscomplex(data).any()) or (isinstance(data, complex))", "def is_np_float(X) -> bool:\n return \\\n np.issubdtype(type(X), np.floating) or \\\n isinstance(X, np.ndarray) and np.issubdtype(X.dtype, np.floating)", "def __isNumeric(self, arr):\n try:\n return arr.dtype.kind in 'biufc'\n except AttributeError:\n return False", "def is_float_dtype(arr_or_dtype) -> bool:\n if isinstance(arr_or_dtype, cudf.BaseIndex):\n return arr_or_dtype._is_floating()\n return _wrap_pandas_is_dtype_api(pd_types.is_float_dtype)(arr_or_dtype)", "def anyFloat(self):\n for win in self._data:\n if issubclass(win.dtype.type,np.floating):\n return True\n return False", "def is_dtype_numpy(dtype):\n is_torch = is_dtype_tensor(dtype)\n is_num = dtype in (int, float, complex)\n if hasattr(dtype, \"__module__\"):\n is_numpy = dtype.__module__ == \"numpy\"\n else:\n is_numpy = False\n return (is_num or is_numpy) and not is_torch", "def is_dtype_tensor(dtype):\n return isinstance(dtype, torch.dtype) or (dtype == torch.Tensor)", "def test_private_isreal(obj, ref):\n assert pmisc.number._isreal(obj) == ref", "def is_double(self) -> \"bool\":\n return self._value.getType() == Value.FVAL", "def _is_numeric(df, column):\n\n if str(df[column].dtypes) == 'int64' or \\\n str(df[column].dtypes) == 'float64':\n return True\n else:\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine whether a dtype is complex.
def is_complex_dtype(dtype: DType) -> bool: return snp.dtype(dtype).kind == "c"
[ "def is_complex(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.complexfloating)", "def _is_complex(data):\n return (NUMPY and numpy.iscomplex(data).any()) or (isinstance(data, complex))", "def complex_for(dtype: DTypeLike) -> \"numpy.dtype[Any]\":\n dtype = _normalize_type(dtype)\n if dtype == numpy.float32:\n return numpy.dtype(\"complex64\")\n if dtype == numpy.float64:\n return numpy.dtype(\"complex128\")\n raise ValueError(f\"{dtype} does not have a corresponding complex type\")", "def _is_complex(input):\n return input.shape[-1] == 2", "def iscomplex(self):\n return np.any(np.iscomplex(self.data))\n # return np.iscomplexobj(self._data)", "def is_complex(self):\n return self.is_number() and isinstance(ast.literal_eval(self.string), complex)", "def appropriate_complex_type_for(X):\n X = asfarray(X)\n\n if np.issubsctype(X.dtype, np.complex64) or np.issubsctype(X.dtype, np.complex128):\n return X.dtype\n elif np.issubsctype(X.dtype, np.float32):\n return np.complex64\n elif np.issubsctype(X.dtype, np.float64):\n return np.complex128\n\n # God knows, err on the side of caution\n return np.complex128", "def is_real(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.floating)", "def isdouble(dtype):\n return dtype in ('float64', 'complex128')", "def complex_type(self):\n return self._complex_type", "def is_flag_complex(self):\n return self == self.graph().clique_complex()", "def test_isComplex(self):\n r = Rates([0,0,0.1j,0,0,0,0,0,0], self.abc_pairs)\n assert r.isComplex()\n \n r = Rates([0,0,0.1,0,0,0,0,0,0], self.abc_pairs)\n assert not r.isComplex()", "def is_double(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.float_) or numpy.issubdtype(dtype, numpy.complex_)", "def test_isSignificantlyComplex(self):\n r = Rates([0,0,0.2j,0,0,0,0,0,0], self.abc_pairs)\n assert r.isSignificantlyComplex()\n assert r.isSignificantlyComplex(0.01)\n assert not r.isSignificantlyComplex(0.2)\n assert not r.isSignificantlyComplex(0.3)\n \n r = Rates([0,0,0.1,0,0,0,0,0,0], self.abc_pairs)\n assert not r.isSignificantlyComplex()\n assert not r.isSignificantlyComplex(1e-30)\n assert not r.isSignificantlyComplex(1e3)", "def has_complex_result(self):\n tfq_sup = self._model.metadata.time_freq_support\n if not tfq_sup:\n return False\n if tfq_sup.complex_frequencies == None:\n return False\n return True", "def isreal(self):\n return np.all(np.isreal(self.data))\n # return np.isrealobj(self._data)", "def complex_desc_supported(self):\n return self.__complex_desc_available", "def is_dtype_numpy(dtype):\n is_torch = is_dtype_tensor(dtype)\n is_num = dtype in (int, float, complex)\n if hasattr(dtype, \"__module__\"):\n is_numpy = dtype.__module__ == \"numpy\"\n else:\n is_numpy = False\n return (is_num or is_numpy) and not is_torch", "def is_CM_extension(self):\n\n try:\n return self.__is_CM_extension\n except(AttributeError):\n pass\n\n if self.relative_degree() == 2:\n if self.base_field().is_totally_real():\n if self.is_totally_imaginary():\n self.__is_CM_extension = True\n self.__is_CM = True\n self.__max_tot_real_sub = [self.base_field(), self._internal_coerce_map_from(self.base_field())]\n return True\n self.__is_CM_extension = False\n return False", "def test_with_complex(self):\r\n df_x1, df_x2, df_z1 = 5,6,7\r\n r = 0.1\r\n z1 = ucomplex(1,[1,r,r,1],df_z1)\r\n \r\n x1 = ureal(1,1,df_x1,independent=False)\r\n x2 = ureal(1,1,df_x2,independent=False)\r\n\r\n m = magnitude(z1)\r\n \r\n y = m + x1 + x2\r\n \r\n v_z1 = component(y,z1.real)**2\\\r\n + component(y,z1.imag)**2\\\r\n + 2*component(y,z1.real)*r*component(y,z1.imag)\r\n \r\n den = sum([\r\n component(y,x1)**4/df_x1,\r\n component(y,x2)**4/df_x2,\r\n v_z1**2/df_z1,\r\n ])\r\n nu_eff = variance(y)**2 / den\r\n \r\n df = dof(y)\r\n \r\n self.assertTrue( equivalent(nu_eff,df) )\r\n\r\n # but if others are correlated it fails\r\n self.assertRaises(RuntimeError,set_correlation,r,x1,x2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct the corresponding complex dtype for a given real dtype. Construct the corresponding complex dtype for a given real dtype, e.g. the complex dtype corresponding to `np.float32` is `np.complex64`.
def complex_dtype(dtype: DType) -> DType: return (snp.zeros(1, dtype) + 1j).dtype
[ "def complex_for(dtype: DTypeLike) -> \"numpy.dtype[Any]\":\n dtype = _normalize_type(dtype)\n if dtype == numpy.float32:\n return numpy.dtype(\"complex64\")\n if dtype == numpy.float64:\n return numpy.dtype(\"complex128\")\n raise ValueError(f\"{dtype} does not have a corresponding complex type\")", "def appropriate_complex_type_for(X):\n X = asfarray(X)\n\n if np.issubsctype(X.dtype, np.complex64) or np.issubsctype(X.dtype, np.complex128):\n return X.dtype\n elif np.issubsctype(X.dtype, np.float32):\n return np.complex64\n elif np.issubsctype(X.dtype, np.float64):\n return np.complex128\n\n # God knows, err on the side of caution\n return np.complex128", "def to_complex(real, imag=None):\n if imag is None:\n return torch.stack(\n [real, torch.zeros(real.size(), dtype=real.dtype, device=real.device)], -1\n )\n else:\n return torch.stack([real, imag], -1)", "def is_complex(dtype: DTypeLike) -> bool:\n dtype = _normalize_type(dtype)\n return numpy.issubdtype(dtype, numpy.complexfloating)", "def dtype_to_c99(dtype):\n dtype = _dtype_util.obj_to_dtype(dtype)\n if isinstance(dtype, _dtype_util.integers):\n return \"%s_t\" % str(dtype)\n elif dtype == np.float32:\n return \"float\"\n elif dtype == np.float64:\n return \"double\"\n elif dtype == np.complex64:\n return \"float complex\"\n elif dtype == np.complex128:\n return \"double complex\"\n raise TypeError(\"dtype '%s' unsupported\" % str(dtype))", "def dtype_to_ctype(dtype):\n try:\n return ctypes_vector_mapper[dtype]\n except KeyError:\n pass\n if issubclass(dtype, ctypes._SimpleCData):\n # Bypass np.ctypeslib's normalization rules such as\n # `np.ctypeslib.as_ctypes_type(ctypes.c_void_p) -> ctypes.c_ulong`\n return dtype\n else:\n return np.ctypeslib.as_ctypes_type(dtype)", "def c_constant(\n val: Union[int, float, complex, numpy.generic, \"numpy.ndarray[Any, numpy.dtype[Any]]\"],\n dtype: Optional[DTypeLike] = None,\n) -> str:\n if dtype is not None:\n dtype = _promote_type(_normalize_type(dtype))\n elif isinstance(val, (int, float, complex)):\n dtype = min_scalar_type(val)\n else:\n dtype = _promote_type(val.dtype)\n\n numpy_val: Union[numpy.generic, \"numpy.ndarray[Any, numpy.dtype[Any]]\"]\n if isinstance(val, numpy.ndarray):\n numpy_val = numpy.cast[dtype](val)\n else:\n numpy_val = numpy.cast[dtype](val).flat[0]\n\n if len(numpy_val.shape) > 0:\n return _c_constant_arr(numpy_val, numpy_val.shape)\n\n scalar_val: numpy.generic\n if isinstance(numpy_val, numpy.ndarray):\n scalar_val = numpy_val.flat[0]\n else:\n scalar_val = numpy_val\n\n if isinstance(scalar_val, numpy.void) and scalar_val.dtype.names is not None:\n return (\n \"{\" + \", \".join([c_constant(scalar_val[name]) for name in scalar_val.dtype.names]) + \"}\"\n )\n\n if isinstance(scalar_val, numpy.complexfloating):\n return (\n f\"COMPLEX_CTR({_ctype_builtin(dtype)})\"\n + f\"({c_constant(scalar_val.real)}, {c_constant(scalar_val.imag)})\"\n )\n\n if isinstance(scalar_val, numpy.integer):\n if dtype.itemsize > 4:\n postfix = \"L\" if numpy.issubdtype(scalar_val.dtype, numpy.signedinteger) else \"UL\"\n else:\n postfix = \"\"\n return str(scalar_val) + postfix\n\n if isinstance(scalar_val, numpy.floating):\n return repr(float(scalar_val)) + (\"f\" if scalar_val.dtype.itemsize <= 4 else \"\")\n\n raise TypeError(f\"Cannot render a value of type {type(val)} as a C constant\")", "def to_ctypes(dshape):\n if len(dshape) == 1:\n if dshape == coretypes.int8:\n return ctypes.c_int8\n elif dshape == coretypes.int16:\n return ctypes.c_int16\n elif dshape == coretypes.int32:\n return ctypes.c_int32\n elif dshape == coretypes.int64:\n return ctypes.c_int64\n elif dshape == coretypes.uint8:\n return ctypes.c_uint8\n elif dshape == coretypes.uint16:\n return ctypes.c_uint16\n elif dshape == coretypes.uint32:\n return ctypes.c_uint32\n elif dshape == coretypes.uint64:\n return ctypes.c_uint64\n elif dshape == coretypes.float32:\n return ctypes.c_float\n elif dshape == coretypes.float64:\n return ctypes.c_double\n elif dshape == coretypes.complex_float32:\n class Complex64(ctypes.Structure):\n _fields_ = [('real', ctypes.c_float),\n ('imag', ctypes.c_float)]\n _blaze_type_ = coretypes.complex_float32\n return Complex64\n elif dshape == coretypes.complex_float64:\n class Complex128(ctypes.Structure):\n _fields_ = [('real', ctypes.c_double),\n ('imag', ctypes.c_double)]\n _blaze_type_ = coretypes.complex_float64\n return Complex128\n elif isinstance(dshape, coretypes.Record):\n fields = [(name, to_ctypes(dshape.fields[name]))\n for name in dshape.names]\n class temp(ctypes.Structure):\n _fields_ = fields\n return temp\n else:\n raise TypeError(\"Cannot convert datashape %r into ctype\" % dshape)\n # Create arrays\n else:\n if isinstance(dshape[0], (coretypes.TypeVar, coretypes.Ellipsis)):\n num = 0\n else:\n num = int(dshape[0])\n return num*to_ctypes(dshape.subarray(1))", "def complex_pad(xfft, fft_size):\n # xfft has at least two dimensions (with the last one being a dimension for\n # a pair of real numbers representing a complex number). Moreover, pytorch\n # supports half-sized fft (one-sided fft) by default.\n half_fft = fft_size // 2 + 1\n pad_shape = tensor(xfft.shape)\n # Omit the last dimension (-1) for complex numbers.\n current_length = xfft.shape[-2]\n if current_length < half_fft:\n pad_shape[-2] = half_fft - current_length\n complex_pad = torch.zeros(*pad_shape, dtype=xfft.dtype,\n device=xfft.device)\n xfft = torch.cat((xfft, complex_pad), dim=-2)\n return xfft", "def get_real(real_or_complex_number):\n result = real_or_complex_number\n if isinstance(real_or_complex_number, complex):\n result = real_or_complex_number.real\n return result", "def work_dtype(dtype: np.dtype) -> np.dtype:\n if dtype.kind == \"u\":\n if dtype.itemsize < 4:\n return np.dtype(\"uint32\")\n else:\n return np.dtype(\"uint64\")\n elif dtype.kind == \"i\":\n if dtype.itemsize < 4:\n return np.dtype(\"int32\")\n else:\n return np.dtype(\"int64\")\n return dtype", "def _to_complex(obj):\n if isinstance(obj, Complex):\n return obj\n elif isinstance(obj, complex):\n return Complex(obj.real, obj.imag)\n elif isinstance(obj, float):\n return Complex(obj, 0)\n elif isinstance(obj, int):\n return Complex(obj, 0)\n else:\n raise TypeError(\"Must be a complex number object\")", "def deg_to_complex(\n angle_deg: Union[ndarray, float], radius: Union[ndarray, float] = 1\n ) -> Union[ndarray, float]:\n # Convert from degrees to radians.\n angle_rad = np.deg2rad(angle_deg)\n # Derive real and imaginary components (also known as a and b)\n real = radius * np.cos(angle_rad)\n imag = radius * np.sin(angle_rad)\n\n # Combine components into a complex number and return.\n return real + 1j * imag", "def _cplxreal(z, tol=None):\n\n z = atleast_1d(z)\n if z.size == 0:\n return z, z\n elif z.ndim != 1:\n raise ValueError('_cplxreal only accepts 1-D input')\n\n if tol is None:\n # Get tolerance from dtype of input\n tol = 100 * np.finfo((1.0 * z).dtype).eps\n\n # Sort by real part, magnitude of imaginary part (speed up further sorting)\n z = z[np.lexsort((abs(z.imag), z.real))]\n\n # Split reals from conjugate pairs\n real_indices = abs(z.imag) <= tol * abs(z)\n zr = z[real_indices].real\n\n if len(zr) == len(z):\n # Input is entirely real\n return array([]), zr\n\n # Split positive and negative halves of conjugates\n z = z[~real_indices]\n zp = z[z.imag > 0]\n zn = z[z.imag < 0]\n\n if len(zp) != len(zn):\n raise ValueError('Array contains complex value with no matching '\n 'conjugate.')\n\n # Find runs of (approximately) the same real part\n same_real = np.diff(zp.real) <= tol * abs(zp[:-1])\n diffs = numpy.diff(concatenate(([0], same_real, [0])))\n run_starts = numpy.nonzero(diffs > 0)[0]\n run_stops = numpy.nonzero(diffs < 0)[0]\n\n # Sort each run by their imaginary parts\n for i in range(len(run_starts)):\n start = run_starts[i]\n stop = run_stops[i] + 1\n for chunk in (zp[start:stop], zn[start:stop]):\n chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]\n\n # Check that negatives match positives\n if any(abs(zp - zn.conj()) > tol * abs(zn)):\n raise ValueError('Array contains complex value with no matching '\n 'conjugate.')\n\n # Average out numerical inaccuracy in real vs imag parts of pairs\n zc = (zp + zn.conj()) / 2\n\n return zc, zr", "def dtype_from_ctypes_type(t):\n import _ctypes\n if issubclass(t, _ctypes.Array):\n return _from_ctypes_array(t)\n elif issubclass(t, _ctypes._Pointer):\n raise TypeError(\"ctypes pointers have no dtype equivalent\")\n elif issubclass(t, _ctypes.Structure):\n return _from_ctypes_structure(t)\n elif issubclass(t, _ctypes.Union):\n return _from_ctypes_union(t)\n elif isinstance(getattr(t, '_type_', None), str):\n return _from_ctypes_scalar(t)\n else:\n raise NotImplementedError(\n \"Unknown ctypes type {}\".format(t.__name__))", "def _to_complex(value: Union[List[float], complex]) -> complex:\n if isinstance(value, list) and len(value) == 2:\n return complex(value[0], value[1])\n elif isinstance(value, complex):\n return value\n\n raise TypeError(\"{} is not in a valid complex number format.\".format(value))", "def _to_complex(value: Union[List[float], complex]) -> complex:\n if isinstance(value, list) and len(value) == 2:\n return complex(value[0], value[1])\n elif isinstance(value, complex):\n return value\n\n raise TypeError(f\"{value} is not in a valid complex number format.\")", "def _is_complex(data):\n return (NUMPY and numpy.iscomplex(data).any()) or (isinstance(data, complex))", "def complex_pad_simple(xfft, fft_size):\n half_fft = fft_size // 2 + 1\n current_length = xfft.shape[-2]\n if half_fft > current_length:\n pad_right = half_fft - current_length\n # We have to skip the last dimension that represents the complex number\n # so effectively we use the 2D padding for the 1D complex values.\n return F.pad(input=xfft, pad=(0, 0, 0, pad_right), mode=\"constant\",\n value=0)\n else:\n return xfft", "def _coerce_to_dtype(dtype):\n\n if is_categorical_dtype(dtype):\n dtype = CategoricalDtype()\n elif is_datetime64tz_dtype(dtype):\n dtype = DatetimeTZDtype(dtype)\n elif is_period_dtype(dtype):\n dtype = PeriodDtype(dtype)\n elif is_interval_dtype(dtype):\n dtype = IntervalDtype(dtype)\n else:\n dtype = np.dtype(dtype)\n return dtype" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List comprehensions in Python 3 when handled as a closure. See if we can combine code.
def listcomp_closure3(node): p = self.prec self.prec = 27 code_obj = node[1].attr assert iscode(code_obj) code = Code(code_obj, self.scanner, self.currentclass) ast = self.build_ast(code._tokens, code._customize) self.customize(code._customize) # skip over: sstmt, stmt, return, ret_expr # and other singleton derivations while len(ast) == 1 or ( ast in ("sstmt", "return") and ast[-1] in ("RETURN_LAST", "RETURN_VALUE") ): self.prec = 100 ast = ast[0] n = ast[1] # collections is the name of the expression(s) we are iterating over collections = [node[-3]] list_ifs = [] if self.version == 3.0 and n != "list_iter": # FIXME 3.0 is a snowflake here. We need # special code for this. Not sure if this is totally # correct. stores = [ast[3]] assert ast[4] == "comp_iter" n = ast[4] # Find the list comprehension body. It is the inner-most # node that is not comp_.. . while n == "comp_iter": if n[0] == "comp_for": n = n[0] stores.append(n[2]) n = n[3] elif n[0] in ("comp_if", "comp_if_not"): n = n[0] # FIXME: just a guess if n[0].kind == "expr": list_ifs.append(n) else: list_ifs.append([1]) n = n[2] pass else: break pass # Skip over n[0] which is something like: _[1] self.preorder(n[1]) else: assert n == "list_iter" stores = [] # Find the list comprehension body. It is the inner-most # node that is not list_.. . while n == "list_iter": # recurse one step n = n[0] if n == "list_for": stores.append(n[2]) n = n[3] if n[0] == "list_for": # Dog-paddle down largely singleton reductions # to find the collection (expr) c = n[0][0] if c == "expr": c = c[0] # FIXME: grammar is wonky here? Is this really an attribute? if c == "attribute": c = c[0] collections.append(c) pass elif n in ("list_if", "list_if_not"): # FIXME: just a guess if n[0].kind == "expr": list_ifs.append(n) else: list_ifs.append([1]) n = n[2] pass elif n == "list_if37": list_ifs.append(n) n = n[-1] pass elif n == "list_afor": collections.append(n[0][0]) n = n[1] stores.append(n[1][0]) n = n[3] pass assert n == "lc_body", ast self.preorder(n[0]) # FIXME: add indentation around "for"'s and "in"'s n_colls = len(collections) for i, store in enumerate(stores): if i >= n_colls: break if collections[i] == "LOAD_DEREF" and co_flags_is_async(code_obj.co_flags): self.write(" async") pass self.write(" for ") self.preorder(store) self.write(" in ") self.preorder(collections[i]) if i < len(list_ifs): self.preorder(list_ifs[i]) pass pass self.prec = p
[ "def test_list_comprehension_func():\n source = FUNCTION_TEMPLATE.format('[self for i in range(10)]')\n win = compile_source(source, 'Main', namespace={'RUN_CHECK': IS_PY3})()\n assert win.call()", "def test_list_comprehension_operator():\n source = OPERATOR_TEMPLATE.format('[self for i in range(10)]')\n win = compile_source(source, 'Main', namespace={'RUN_CHECK': IS_PY3})()\n win.ev = True\n assert win.called", "def test_handling_nested_comprehension():\n source = FUNCTION_TEMPLATE.format('{self for i in {j for j in range(10)}}')\n win = compile_source(source, 'Main', namespace={'RUN_CHECK': True})()\n assert win.call()", "def test_listcomp() -> None:\n # 'Normal' for loop\n squares: List[int] = []\n for num in range(5):\n squares.append(num ** 2)\n assert squares == [0, 1, 4, 9, 16]\n assert num == 4 # Side-effect: num exists after the loop completes\n\n # List comprehension - no side-effects\n squares_listcomp = [num ** 2 for num in range(5)]\n assert squares_listcomp == squares", "def nested_list_comp():\n outer = [x * y for x in range(1, 10) for y in [i ** 2 for i in range(10)]]\n print outer", "def test_set_comprehension_func():\n source = FUNCTION_TEMPLATE.format('{self for i in range(10)}')\n win = compile_source(source, 'Main', namespace={'RUN_CHECK': True})()\n assert win.call()", "def collect(sequence, function):\n for seq in __builtin__.map(function, sequence):\n for x in seq:\n yield x", "def listify(gen):\n\n def patched(*args, **kwargs):\n \"\"\"Wrapper function\"\"\"\n return list(gen(*args, **kwargs))\n\n return patched", "def loopIt(func: Callable, *inps: Iterable) -> List:\n\n return [func(*inp) for inp in zip(*inps)]", "def lazy_reduce(f, xs):\r\n return reduce(f, xs)", "def test_dict_comprehension_func():\n source = FUNCTION_TEMPLATE.format('{i: self for i in range(10)}')\n win = compile_source(source, 'Main', namespace={'RUN_CHECK': True})()\n assert win.call()", "def filter_then_map(lst, func_filter, func_map):\n # YOUR CODE GOES HERE #\n return list(map(func_map, filter(func_filter, lst)))", "def __apply__(self) -> list:\n try:\n listOfLambdas = [addtwo(i) for i in self.input_list]\n self.input_list = listOfLambdas\n return listOfLambdas\n except TypeError:\n str = \"Oops! it was unvaild. Plz Try again...\"\n return str", "def get_squares(numbers):\n local_list = numbers\n local_result = []\n for i in local_list:\n r = lambda a: a * a\n local_result.append(r(i))\n # ez: local_result = [x*x for x in local_list]\n return local_result", "def triple_map(func, iterable):\n # YOUR CODE GOES HERE #\n for i in iterable:\n yield func(func(func(i)))", "def map_fn(item):\n ctrls = item[1]\n return (item[0], list(filter(filter_fn, ctrls)))", "def closure_func(x):\n def c():\n return x\n return c", "def pipeline_each(data, fns):\n\tfrom functools import reduce\n\treturn reduce(lambda a, x: list(map(x, a)), fns, data)", "def mapmany(self, function):\r\n return Iterable(itertools.chain.from_iterable(map(function, self.__iterable)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Handle "classdef" nonterminal for 3.0 >= version 3.0 <= 3.5
def n_classdef3(node): assert 3.0 <= self.version <= 3.5 # class definition ('class X(A,B,C):') cclass = self.currentclass # Pick out various needed bits of information # * class_name - the name of the class # * subclass_info - the parameters to the class e.g. # class Foo(bar, baz) # ---------- # * subclass_code - the code for the subclass body subclass_info = None if node == "classdefdeco2": if self.version <= 3.3: class_name = node[2][0].attr else: class_name = node[1][2].attr build_class = node else: build_class = node[0] class_name = node[1][0].attr build_class = node[0] assert "mkfunc" == build_class[1] mkfunc = build_class[1] if mkfunc[0] in ("kwargs", "no_kwargs"): if 3.0 <= self.version <= 3.2: for n in mkfunc: if hasattr(n, "attr") and iscode(n.attr): subclass_code = n.attr break elif n == "expr": subclass_code = n[0].attr pass pass else: for n in mkfunc: if hasattr(n, "attr") and iscode(n.attr): subclass_code = n.attr break pass pass if node == "classdefdeco2": subclass_info = node else: subclass_info = node[0] elif build_class[1][0] == "load_closure": # Python 3 with closures not functions load_closure = build_class[1] if hasattr(load_closure[-3], "attr"): # Python 3.3 classes with closures work like this. # Note have to test before 3.2 case because # index -2 also has an attr. subclass_code = find_code_node(load_closure, -3).attr elif hasattr(load_closure[-2], "attr"): # Python 3.2 works like this subclass_code = find_code_node(load_closure, -2).attr else: raise "Internal Error n_classdef: cannot find class body" if hasattr(build_class[3], "__len__"): if not subclass_info: subclass_info = build_class[3] elif hasattr(build_class[2], "__len__"): subclass_info = build_class[2] else: raise "Internal Error n_classdef: cannot superclass name" elif not subclass_info: if mkfunc[0] in ("no_kwargs", "kwargs"): subclass_code = mkfunc[1].attr else: subclass_code = mkfunc[0].attr if node == "classdefdeco2": subclass_info = node else: subclass_info = node[0] if node == "classdefdeco2": self.write("\n") else: self.write("\n\n") self.currentclass = str(class_name) self.write(self.indent, "class ", self.currentclass) self.print_super_classes3(subclass_info) self.println(":") # class body self.indent_more() self.build_class(subclass_code) self.indent_less() self.currentclass = cclass if len(self.param_stack) > 1: self.write("\n\n") else: self.write("\n\n\n") self.prune()
[ "def CLASSDEF(self, node):\r\n for deco in node.decorator_list:\r\n self.handleNode(deco, node)\r\n for baseNode in node.bases:\r\n self.handleNode(baseNode, node)\r\n if not PY2:\r\n for keywordNode in node.keywords:\r\n self.handleNode(keywordNode, node)\r\n self.pushScope(ClassScope)\r\n if self.withDoctest:\r\n self.deferFunction(lambda: self.handleDoctests(node))\r\n for stmt in node.body:\r\n self.handleNode(stmt, node)\r\n self.popScope()\r\n self.addBinding(node, ClassDefinition(node.name, node))", "def compile_class(self):\n self.xml_lines.append(\"<class>\")\n # keyword: class\n # identifier: name of class\n # symbol: {\n self.append_xml_lines(3)\n # compile the variable declarations part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods or functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine()\n # symbol: }\n self.append_next_xml_line()\n self.xml_lines.append(\"</class>\")", "def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")", "def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }", "def class_abbrev(type):\n ...", "def class_declaration(self):\n\n name = self.consume(TokenType.ID, \"Expecting class name\")\n superclass = None\n if self.match(TokenType.LT):\n self.consume(TokenType.ID, \"Expecting superclass name\")\n superclass = Variable(self.previous())\n self.consume(TokenType.LB, \"Expecting '{' before class body\")\n methods = []\n while not self.check(TokenType.RB) and not self.done():\n methods.append(self.function(\"method\"))\n self.consume(TokenType.RB, \"Expecting '}' after class body\")\n return Class(name, methods, superclass)", "def read_class(is_private, tokens):\n name = tokens.pop(0)\n validate_name(name)\n superclass = None\n if tokens[0] == 'extends':\n tokens.pop(0)\n superclass = tokens.pop(0)\n validate_name(superclass)\n if tokens[0] != '{':\n raise SyntaxError('expected {')\n tokens.pop(0)\n exp = []\n while tokens and tokens[0] != '}':\n exp.append(read_statement(tokens))\n if not tokens:\n raise SyntaxError('expected }')\n else:\n tokens.pop(0)\n return {'op': 'class', \n 'name': name, \n 'body': exp,\n 'super': superclass,\n 'private': is_private}", "def _compile_class(self) -> None:\n self.file_obj.write(\" \" * self.indent + \"<class>\\n\")\n self._increase_indent()\n self._eat(\"class\")\n self._compile_class_name()\n self._eat(\"{\")\n self._compile_class_var_dec()\n self._compile_subroutine_dec()\n self._eat(\"}\")\n self._decrease_indent()\n self.file_obj.write(\" \" * self.indent + \"</class>\\n\")", "def get_classdef(self, class_name):\n clazz = ghidra_utils.get_class_symbol(class_name, self.program)\n classdef = None\n\n if clazz is not None:\n symlist = list(self.program.getSymbolTable().getChildren(clazz))\n\n for sym in symlist:\n if sym.getName() == \"__classdef__\":\n classdef = dex_classdef(\n dex_hdr=self, address=sym.getAddress())\n break\n\n return classdef", "def test_classes(self):\n self.filename = \"parser_tests/ruby_classes.txt\"\n expected_classes = [\"Customer\"]\n self.run_parser()\n self.assertListEqual(expected_classes, self.p.scanner.classes)", "def process_class_def(self, node, state, *_):\n grfn = {\"name\": \"\", \"type\": \"type\", \"attributes\": []}\n namespace = self._get_namespace(self.fortran_file)\n type_name = f\"@type::{namespace}::@global::{node.name}\"\n grfn[\"name\"] = type_name\n\n # Keep a track of declared user-defined types\n self.derived_types.append(node.name.lower())\n self.derived_types_attributes[node.name] = []\n\n attributes = node.body[0].body\n # Populate class member variables into attributes array.\n for attrib in attributes:\n attrib_is_array = False\n attrib_ast = attrib.__repr__().split()[0][2:]\n if attrib_ast == \"ast.AnnAssign\":\n attrib_name = attrib.target.attr\n if attrib.annotation.id in self.annotate_map:\n attrib_type = self.annotate_map[attrib.annotation.id]\n elif attrib.annotation.id in self.derived_types:\n attrib_type = attrib.annotation.id\n elif attrib_ast == \"ast.Assign\":\n attrib_name = attrib.targets[0].attr\n attrib_type = attrib.value.func.id\n assert (\n attrib_type in self.derived_types\n or attrib_type in self.library_types\n ), f\"User-defined type [{attrib_type}] does not exist.\"\n\n if attrib_type == \"Array\":\n attrib_is_array = True\n\n if attrib_is_array:\n elem_type = attrib.value.args[0].id\n # TODO: Currently, derived type array attributes are assumed\n # to be a single dimensional array with integer type. It maybe\n # appropriate to handle a multi-dimensional with variable used\n # as a dimension size.\n dimension_info = attrib.value.args[1]\n is_literal = False\n is_name = False\n\n single_dimension = False\n dimension_list = []\n if isinstance(dimension_info.elts[0], ast.Tuple):\n lower_bound = int(dimension_info.elts[0].elts[0].n)\n single_dimension = True\n\n # Retrieve upper bound of an array.\n if isinstance(dimension_info.elts[0].elts[1], ast.Num):\n upper_bound = int(dimension_info.elts[0].elts[1].n)\n is_literal = True\n elif isinstance(dimension_info.elts[0].elts[1], ast.Name):\n upper_bound = dimension_info.elts[0].elts[1].id\n is_name = True\n else:\n assert False, (\n f\"Currently, ast type \"\n f\"[{type(dimension_info.elts[0].elts[1])}] is not \"\n f\"supported.\"\n )\n\n if is_literal:\n dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n dimension = upper_bound\n else:\n pass\n\n dimension_list.append(dimension)\n\n elif isinstance(dimension_info.elts[0], ast.Call):\n lower_bound = int(dimension_info.elts[0].func.elts[0].n)\n if isinstance(\n dimension_info.elts[0].func.elts[1], ast.Num\n ):\n upper_bound = int(\n dimension_info.elts[0].func.elts[1].n\n )\n is_literal = True\n elif isinstance(\n dimension_info.elts[0].func.elts[1], ast.Name\n ):\n upper_bound = dimension_info.elts[0].func.elts[1].id\n is_name = True\n\n if is_literal:\n first_dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n first_dimension = upper_bound\n\n dimension_list.append(first_dimension)\n\n lower_bound = int(dimension_info.elts[0].args[0].n)\n\n if isinstance(dimension_info.elts[0].args[1], ast.Num):\n upper_bound = int(dimension_info.elts[0].args[1].n)\n is_literal = True\n elif isinstance(dimension_info.elts[0].args[1], ast.Name):\n upper_bound = dimension_info.elts[0].args[1].id\n is_name = True\n\n if is_literal:\n second_dimension = (upper_bound - lower_bound) + 1\n elif is_name:\n second_dimension = upper_bound\n\n dimension_list.append(second_dimension)\n\n dimensions = dimension_list\n\n grfn[\"attributes\"].append(\n {\n \"name\": attrib_name,\n \"type\": attrib_type,\n \"elem_type\": elem_type,\n \"dimensions\": dimensions,\n }\n )\n # Here index is not needed for derived type attributes,\n # but simply adding it as a placeholder to make a constant\n # structure with other arrays.\n self.arrays[attrib_name] = {\n \"index\": 0,\n \"dimensions\": dimensions,\n \"elem_type\": elem_type,\n \"mutable\": True,\n }\n else:\n grfn[\"attributes\"].append(\n {\"name\": attrib_name, \"type\": attrib_type}\n )\n pass\n self.derived_types_attributes[node.name].append(attrib_name)\n\n state.variable_types[attrib_name] = attrib_type\n\n return [grfn]", "def typeclass(\n definition: Type[_AssociatedType],\n) -> '_TypeClassDef[_AssociatedType]':", "def cmd2ClassDef(nm, helpText=\"\", indent=1):\n s = indentStr*(indent)+\"def %s(self, **kwargs):\\n\"%(nm)\n s += '%s\"\"\"\\n%s\\n%s\"\"\"\\n'%( indentStr*(indent+1), helpText, indentStr*(indent+1))\n s += \"%sreturn self.msgSend('%s', kwargs)\\n\\n\"%(indentStr*(indent+1), nm);\n\n #s += \"def msg%s(self, **kwargs):\\n\"%(nm.capitalize())\n #s += '\"\"\"\\n%s\\n\"\"\"\\n'%command.helpText\n #s+=\" return self.cmdMsgSend('%s', kwargs)\\n\\n\";\n return s", "def get_all_classdef(root):\n if root['name']=='ClassDef':\n # block(ClassDef) begin id\n begin_id=root['id']\n end_id=get_end_id(root)\n return 'begin:%s,end:%s'%(begin_id,begin_id+end_id-1)\n else:\n if 'children' in root:\n t=[]\n for i in root['children']:\n temp=get_all_classdef(i)\n if temp:\n t.append(temp)\n return '$$$$$$'.join(t)", "def validate_class_name(self, arg):\n args = arg.split(' ')\n class_name = args[0]\n if class_name not in HBNBCommand.valid_classes:\n print(HBNBCommand.ERROR_CLASS)\n return False\n return class_name", "def class_desc():\n\n return \"Parent class of all Combat Turtle classes.\"", "def ParseXMLClass(classDefStr):\n c = CharClass()\n definitions = classDefStr.split('|')\n for d in definitions:\n hexStr = []\n for di in d:\n if di in '[]#x':\n continue\n else:\n hexStr.append(di)\n rangeDef = map(\n lambda x: int(x, 16), string.split(string.join(hexStr, ''), '-'))\n if len(rangeDef) == 1:\n a = rangeDef[0]\n if a > maxunicode:\n print \"Warning: character outside narrow python build (%X)\" % a\n else:\n c.add_char(unichr(a))\n elif len(rangeDef) == 2:\n a, b = rangeDef\n if a > maxunicode:\n print \"Warning: character range outside narrow python build (%X-%X)\" % (a, b)\n elif b > maxunicode:\n print \"Warning: character range truncated due to narrow python build (%X-%X)\" % (a, b)\n b = maxunicode\n c.add_range(unichr(a), unichr(b))\n else:\n c.add_range(unichr(a), unichr(b))\n print repr(c)", "def make_class_ast(src):\n return python.AstTree(ast.parse(src)).classes()[0]", "def is_class(self, line):\n # We only want the first token in the line, to avoid false positives.\n # That is, the word 'class' appearing in some other context.\n tokens = line.split()\n if tokens:\n first_token = tokens[0]\n return first_token == 'class' or first_token == 'module'\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes blanks and newline characters in input_str and returns a string with those removed.
def remove_blanks(input_str): temp_str = input_str.replace(' ', '') temp_str = temp_str.replace('\n', '') return temp_str
[ "def remove_whitespace(input):\n return input.strip(\"\\0\\r\\n \")", "def strip_input(input):\n if input is None:\n return \"\"\n elif isinstance(input, basestring):\n return input.strip()\n else:\n return input", "def _remove_line_breaks(str):\n str2 = str\n str2 = str2.replace(\"\\n\", \" \") # Replace line breaks\n str2 = str2.strip() # Remove leading and trailing spaces\n str2 = re.sub('\\s{2,}', ' ', str2) # At most one space\n return str2", "def remove_empty_lines(strIn):\n return os.linesep.join([s for s in strIn.splitlines() if s.strip()])", "def stripped_input(prompt):\n return input(prompt).strip()", "def clean_and_split_input(input):\n\n input = input.strip()\n input = input.split(' ')\n\n return input", "def clean(s):\n # return dedent(s)\n return \"\\n\".join([line.strip() for line in s.split(\"\\n\")])", "def purge_punctuation_etc(input_string):\n result = input_string.lower()\n result = result.replace(\"\\n\", \"\")\n list_of_non_letters = []\n for character in result:\n if (ord(character) < ord('a')) or (ord(character) > ord('z')):\n if character != ' ':\n list_of_non_letters.append(character)\n for non_letter in list_of_non_letters:\n result = result.replace(non_letter, \"\")\n while \" \" in result:\n result = result.replace(\" \", \" \")\n return result", "def clean(seq):\n return seq.strip().replace(' ', '').replace('\\n', '').replace('\\r', '')", "def _strip_whitespace(self, s):\n\t s = s.replace('\\n', ' ').replace('\\r', ' ')\n\t s = re.sub(r'\\s+', ' ', s)\n\t return s", "def clean_text(text):\r\n clean_text = text.replace('\\n', ' ').replace('\\r', '').strip()\r\n return clean_text", "def remove_whitespace(string):\n return string.replace(\" \", \"\")", "def clean_line(line):\n return line.replace(\"\\0\", \"\").strip()", "def filter_input(string):\n return ((unicode(string)).lower()).strip()", "def safe_strip(string):\n\n if string != None:\n string = string.strip()\n return string", "def clean_str(string):\n string = re.sub(r\"\\s\", \"_\", string.strip())\n return re.sub(r\"[^\\w]\", \"\", string)", "def remove_special_characters(string):\r\n s = re.sub('[^A-Za-z0-9\\s]+', '', string)\r\n s = re.sub('\\s+', ' ', s)\r\n return s", "def _clean(string):\n\n grammar_tokens = [\".\", \",\", \"<\", \">\", \"?\", \"!\", \":\", \";\", \"\\\"\", \"(\", \")\", \"{\", \"}\", \"~\", \"|\", \"/\" ] \n\n for g in grammar_tokens: \n string = string.replace(g, \"\")\n\n string = string.replace(\"\\s+\",\" \")\n string = string.lower()\n return string", "def strip_chars(chars_to_strip, raw_string):\n return ''.join(ch for ch in raw_string if ch not in set(chars_to_strip))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if input string is a DNA sequence.
def is_DNA(input_DNA): # Uses remove_blanks() method to remove any blanks and newline characters # in the input_DNA string DNA = remove_blanks(input_DNA) condition = True DNA_bases = 'AGTCagtc' # If one character in the input string DNA is not found in DNA_bases, # will set condition to False and return an Exception telling the user # that the input sequence is not a DNA sequence. for base in DNA: if base not in DNA_bases: condition = False raise Exception("Not a DNA sequence! Please enter again!") break else: continue return condition
[ "def is_valid_sequence(dna):\n\n valid = True\n for char in dna:\n if char not in 'ATCG':\n valid = False\n break\n\n return valid", "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(\"[^ATKMBVCNSWD-GUYRHatkbbvcnswdguyrh]\")\n\n # if any illegal characters found return False\n if seq_val.search(sequence):\n return False\n\n return True", "def validate_dna(s):\n\n s = s.lower()\n\n DNA = True\n\n for i in s:\n if i != 'a' and i != 't' and i != 'g' and i != 'c':\n DNA = False\n return DNA", "def is_RNA(input_RNA):\n \n # Uses remove_blanks() method to remove any blanks and newline characters\n # in the input_RNA string\n RNA = remove_blanks(input_RNA)\n \n condition = True\n RNA_bases = 'AGUCaguc'\n \n # If one character in the input string RNA is not found in RNA_bases,\n # will set condition to False and raise an Exception telling the user\n # that the input sequence is not a RNA sequence.\n for base in RNA:\n if base not in RNA_bases:\n condition = False\n raise Exception(\"Not an RNA sequence! Please enter again!\")\n break\n else:\n continue\n \n return condition", "def check_string(seq):\n if not isinstance(seq, str):\n assert False, \"Input is not a string.\"\n else:\n pass\n return None", "def validate(seq, alphabet='dna'):\n## Source : https://www.biostars.org/p/102/ Giovanni M Dall'Olio\n alphabets = {'dna': re.compile('^[acgtn]*$', re.I), \n 'protein': re.compile('^[acdefghiklmnpqrstvwy]*$', re.I)}\n if alphabets[alphabet].search(seq) is not None:\n return True\n else:\n return False", "def is_valid(self):\n for s in set(self._dna_string):\n if s not in self.dna_nucleotides:\n return False\n return True", "def validate_seq(seq):\n\n valid_bases = ['G', 'A', 'T', 'C']\n valid = True\n\n for i in seq:\n if i not in valid_bases:\n valid = False\n\n return valid", "def is_non_string_sequence(obj):\n return not isinstance(obj, str) and isinstance(obj, Sequence)", "def is_sequence(input):\n return (isinstance(input, six.collections_abc.Sequence) and\n not isinstance(input, six.string_types))", "def is_sequence(x):\n return isinstance(x, Sequence) and not isinstance(x, str)", "def is_seq(obj):\n try:\n len(obj)\n except (TypeError, ValueError):\n return False\n else:\n return not isinstance(obj, str)", "def _is_sequence(self, ddl):\n m_seqs = self._find_seq.search(ddl)\n return m_seqs is not None", "def is_sequence(seq):\n if isinstance(seq, (dict, collections.Mapping)):\n return True\n if isinstance(seq, set):\n _warn_once(\"Sets are not currently considered sequences, but this may \"\n \"change in the future, so consider avoiding using them.\")\n return (isinstance(seq, collections.Sequence) and\n not isinstance(seq, _six.string_types))", "def validateAminoSequence(self, sequence_list):\r\n\r\n # Create a list of invalid amino acid characters for later validation\r\n # all except the specified characters are invalid.\r\n invalid = re.compile(r'[^ARNDBCEQZGHILKMFPSTWYV]')\r\n for string in sequence_list:\r\n if re.search(invalid, string) is not None:\r\n return 0\r\n return 1", "def is_seq_and_not_str(obj):\n return (isinstance(obj, Sequence) and\n not isinstance(obj, (string_type, text_type)))", "def is_uuid(string):\n if re.match(r'[0-9a-fA-F]{8}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{4}\\-[0-9a-fA-F]{12}', string):\n return True\n return False", "def _checkstring(string:str, invalid_characters):\n #invalid_characters = \";:'\\\".,\\\\/ \"\n for i in string:\n if i in invalid_characters:\n return True\n return False", "def is_valid_domain_id(id_str: str) -> bool:\n return re.match('([0-9][a-zA-Z0-9]{3})([a-zA-Z0-9])([0-9]{2})$', id_str)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns the input DNA sequence into all caps.
def DNA_to_caps(DNA): # First uses is_DNA() method to check if input sequence is DNA; # this prevents proceeding on to use other methods (and wasting time # & resources) when the input sequence is not a DNA sequence. if RNA_pol.is_DNA(DNA): return DNA.upper()
[ "def RNA_to_caps(RNA):\n \n # First uses is_RNA() method to check if input sequence is RNA;\n # this prevents proceeding on to use other methods (and wasting time\n # & resources) when the input sequence is not an RNA sequence.\n if Ribosome.is_RNA(RNA):\n return RNA.upper()\n \n return RNA.upper()", "def changeCase(seq):\n return seq.upper()", "def convert_fasta_case(seq_dict):\n for k, v in seq_dict.items():\n seq_dict[k] = v.lower()\n\n return seq_dict", "def abbreviated_capwords(self, snake_case: str):\n abbreviated = self.abbreviations.sub(self.abbreviate, snake_case)\n return SNAKE_CASE.sub(self.capitalize, abbreviated)", "def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()", "def capitalize(s):\n\n pass", "def match_capitalization_and_diacritic(abbrv: str, original: str) -> str:\n\n abbrv = list(normalize(abbrv, Level.SOFT))\n for i, c in enumerate(abbrv):\n unided = unidecode(original[i])\n if unidecode(c) in [unided.lower(), unided.upper()]:\n abbrv[i] = original[i]\n\n return ''.join(abbrv)", "def rna_to_dna(self):\n self.seq = self.seq.replace(\"U\", \"T\")", "def transcribe_dna_to_rna(s):\n\n s = s.upper()\n new_string = \"\"\n\n for i in s:\n if(i == 'T'):\n i = 'U'\n new_string = new_string + i\n return new_string", "def acronym(input):\n words = input.split()\n res = ''\n for word in words:\n res = res + word[0].upper()\n return res", "def fix_title_capitalization(title):\n if re.search(\"[A-Z]\", title) and re.search(\"[a-z]\", title):\n return title\n word_list = re.split(' +', title)\n final = [word_list[0].capitalize()]\n for word in word_list[1:]:\n if word.upper() in COMMON_ACRONYMS:\n final.append(word.upper())\n elif len(word) > 3:\n final.append(word.capitalize())\n else:\n final.append(word.lower())\n return \" \".join(final)", "def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)", "def _clean_sequence(sequence, str_table):\r\n comp_dict = _complements_dictionary(str_table)\r\n return \"\".join(\r\n [char.upper() for char in sequence if char.upper() in comp_dict.keys()]\r\n )", "def makeUpperCase(self, obj):\n obj.touch = None\n o = ''\n for n, x in enumerate(self.operationText()):\n if x == VC.BlankLetter:\n o += self.rawText[n]\n else:\n o += x.upper()\n self.resetRawText(o)", "def recase(self, variable):\n pass", "def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def uncapitalize(s, preserveAcronymns='False'):\n\n pass", "def str_to_ascii_upper_case(s):\n return ''.join([c.upper() if 'a' <= c <= 'z' else c for c in s])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if input string is an RNA sequence.
def is_RNA(input_RNA): # Uses remove_blanks() method to remove any blanks and newline characters # in the input_RNA string RNA = remove_blanks(input_RNA) condition = True RNA_bases = 'AGUCaguc' # If one character in the input string RNA is not found in RNA_bases, # will set condition to False and raise an Exception telling the user # that the input sequence is not a RNA sequence. for base in RNA: if base not in RNA_bases: condition = False raise Exception("Not an RNA sequence! Please enter again!") break else: continue return condition
[ "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(\"[^ATKMBVCNSWD-GUYRHatkbbvcnswdguyrh]\")\n\n # if any illegal characters found return False\n if seq_val.search(sequence):\n return False\n\n return True", "def is_valid_sequence(dna):\n\n valid = True\n for char in dna:\n if char not in 'ATCG':\n valid = False\n break\n\n return valid", "def check_string(seq):\n if not isinstance(seq, str):\n assert False, \"Input is not a string.\"\n else:\n pass\n return None", "def is_DNA(input_DNA):\n \n # Uses remove_blanks() method to remove any blanks and newline characters\n # in the input_DNA string\n DNA = remove_blanks(input_DNA)\n \n condition = True\n DNA_bases = 'AGTCagtc'\n \n # If one character in the input string DNA is not found in DNA_bases,\n # will set condition to False and return an Exception telling the user\n # that the input sequence is not a DNA sequence.\n for base in DNA:\n if base not in DNA_bases:\n condition = False\n raise Exception(\"Not a DNA sequence! Please enter again!\")\n break\n else:\n continue\n \n return condition", "def is_seq(obj):\n try:\n len(obj)\n except (TypeError, ValueError):\n return False\n else:\n return not isinstance(obj, str)", "def validate(seq, alphabet='dna'):\n## Source : https://www.biostars.org/p/102/ Giovanni M Dall'Olio\n alphabets = {'dna': re.compile('^[acgtn]*$', re.I), \n 'protein': re.compile('^[acdefghiklmnpqrstvwy]*$', re.I)}\n if alphabets[alphabet].search(seq) is not None:\n return True\n else:\n return False", "def is_sequence(x):\n return isinstance(x, Sequence) and not isinstance(x, str)", "def check_is_palindrome(self, sequence):\n return self.sequence.find(sequence[::-1]) == 0", "def validate_seq(seq):\n\n valid_bases = ['G', 'A', 'T', 'C']\n valid = True\n\n for i in seq:\n if i not in valid_bases:\n valid = False\n\n return valid", "def is_sequence(input):\n return (isinstance(input, six.collections_abc.Sequence) and\n not isinstance(input, six.string_types))", "def is_non_string_sequence(obj):\n return not isinstance(obj, str) and isinstance(obj, Sequence)", "def validateAminoSequence(self, sequence_list):\r\n\r\n # Create a list of invalid amino acid characters for later validation\r\n # all except the specified characters are invalid.\r\n invalid = re.compile(r'[^ARNDBCEQZGHILKMFPSTWYV]')\r\n for string in sequence_list:\r\n if re.search(invalid, string) is not None:\r\n return 0\r\n return 1", "def validate_dna(s):\n\n s = s.lower()\n\n DNA = True\n\n for i in s:\n if i != 'a' and i != 't' and i != 'g' and i != 'c':\n DNA = False\n return DNA", "def _valid_seq(self, seq):\n if self.filter_AA and self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if len(seq) >= int(self.minlength) and not forbidden_AAs:\n return True\n elif self.filter_AA and not self.filter_minlength:\n forbidden_AAs = re.search(r'[BXZOUJ]', seq)\n if not forbidden_AAs:\n return True\n elif not self.filter_AA and self.filter_minlength:\n if seq >= int(self.minlength):\n return True\n else:\n return False", "def is_sequence(seq):\n if isinstance(seq, (dict, collections.Mapping)):\n return True\n if isinstance(seq, set):\n _warn_once(\"Sets are not currently considered sequences, but this may \"\n \"change in the future, so consider avoiding using them.\")\n return (isinstance(seq, collections.Sequence) and\n not isinstance(seq, _six.string_types))", "def is_seq_and_not_str(obj):\n return (isinstance(obj, Sequence) and\n not isinstance(obj, (string_type, text_type)))", "def _isResQuest(r):\n\n if r and isinstance(r, basestring) and r[0] == _questChar:\n return True\n\n return False", "def has_perfect_matching(rna_string):\n\n if os.path.isfile(rna_string):\n r = fasta_read(rna_string)[0][1]\n else:\n r = rna_string.upper()\n\n if len(r)%2 != 0:\n return False\n else:\n return r.count('A') == r.count('U') and r.count('G') == r.count('C')", "def is_valid_rut(rut: str) -> bool:\n\n if not rut or not __is_well_formatted(rut):\n return False\n rut = __clean_rut(rut)\n return get_verification_digit(rut[:-1]) == rut[-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns the input RNA sequence into all caps.
def RNA_to_caps(RNA): # First uses is_RNA() method to check if input sequence is RNA; # this prevents proceeding on to use other methods (and wasting time # & resources) when the input sequence is not an RNA sequence. if Ribosome.is_RNA(RNA): return RNA.upper() return RNA.upper()
[ "def DNA_to_caps(DNA):\n \n # First uses is_DNA() method to check if input sequence is DNA;\n # this prevents proceeding on to use other methods (and wasting time\n # & resources) when the input sequence is not a DNA sequence.\n if RNA_pol.is_DNA(DNA):\n return DNA.upper()", "def changeCase(seq):\n return seq.upper()", "def rna_to_dna(self):\n self.seq = self.seq.replace(\"U\", \"T\")", "def convert_fasta_case(seq_dict):\n for k, v in seq_dict.items():\n seq_dict[k] = v.lower()\n\n return seq_dict", "def transcribe_dna_to_rna(s):\n\n s = s.upper()\n new_string = \"\"\n\n for i in s:\n if(i == 'T'):\n i = 'U'\n new_string = new_string + i\n return new_string", "def toUpper(self):\n self.name.toUpper()\n self.ext.toUpper()", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def makeUpperCase(self, obj):\n obj.touch = None\n o = ''\n for n, x in enumerate(self.operationText()):\n if x == VC.BlankLetter:\n o += self.rawText[n]\n else:\n o += x.upper()\n self.resetRawText(o)", "def _clean_sequence(sequence, str_table):\r\n comp_dict = _complements_dictionary(str_table)\r\n return \"\".join(\r\n [char.upper() for char in sequence if char.upper() in comp_dict.keys()]\r\n )", "def testUpperCaseSequencesOnly(self):\n reads = list(combineReads(None, [\"id acgt\"], upperCase=True))\n self.assertEqual([Read(\"id\", \"ACGT\")], reads)", "def abbreviated_capwords(self, snake_case: str):\n abbreviated = self.abbreviations.sub(self.abbreviate, snake_case)\n return SNAKE_CASE.sub(self.capitalize, abbreviated)", "def all_caps(self, all_caps):\n self._all_caps = all_caps", "def convert2aa(sequence):\r\n\r\n # sequence = \"\".join([x.upper() for x in sequence]) # converts lowercase to uppercase\r\n\r\n number_of_codons = len(sequence)/3\r\n aa_seq = []\r\n\r\n for nmbr in list(range(1, int(number_of_codons)+1)): # goes through each codon converting it to an aa\r\n\r\n if \"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3] in codon2aa:\r\n aa_seq.append(codon2aa[\"\".join([x.upper() for x in sequence])[nmbr*3-3:nmbr*3]])\r\n else:\r\n aa_seq.append(\"XXX\")\r\n\r\n return \"\".join(aa_seq)", "def to_rna(strand):\n return strand.upper().translate(maketrans('GCTA', 'CGAU'))", "def standardize(self):\n import string\n self.full_name = string.capwords(self.full_name)", "def read_sequence(SequenceFile):\n f = open(SequenceFile, 'r')\n sequence=f.read()\n sequence=sequence.rstrip()\n sequence=sequence.upper()\n f.close()\n return sequence", "def fix_title_capitalization(title):\n if re.search(\"[A-Z]\", title) and re.search(\"[a-z]\", title):\n return title\n word_list = re.split(' +', title)\n final = [word_list[0].capitalize()]\n for word in word_list[1:]:\n if word.upper() in COMMON_ACRONYMS:\n final.append(word.upper())\n elif len(word) > 3:\n final.append(word.capitalize())\n else:\n final.append(word.lower())\n return \" \".join(final)", "def capitalize(s):\n\n pass", "def dna_to_rna(self):\n self.seq = self.seq.replace(\"T\",\"U\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Translates an input RNA sequence to the corresponding protein.
def translate(RNA_seq): RNA = remove_blanks(RNA_seq) # Uses find_start_codon() method to find codon from which # translation will start counter = Ribosome.find_start_codon(RNA) codon = '' protein = '' # Assigns triplets of RNA sequence chars to 'codon' and concatenates the # corresponding amino acid symbol to the growing chain of amino acids, # then moves on to the next triplet, until reaching stop codon. while counter <= (len(RNA) - 3): codon = RNA[counter] + RNA[counter+1] + RNA[counter+2] #Start codon & Methionine(M) if codon == 'AUG': protein = protein + 'M' #Phenylalanine(F) elif codon == 'UUU' or codon == 'UUC': protein = protein + 'F' #Leucine(L) elif codon == 'UUA' or codon == 'UUG' or codon == 'CUU' \ or codon == 'CUC' or codon == 'CUA' or codon == 'CUG': protein = protein + 'L' #Isoleucine(I) elif codon == 'AUU' or codon == 'AUC' or codon == 'AUA': protein = protein + 'I' #Valine(V) elif codon == 'GUU' or codon == 'GUC' or codon == 'GUA' \ or codon == 'GUG': protein = protein + 'V' #Serine(S) elif codon == 'UCU' or codon == 'UCC' or codon == 'UCA' \ or codon == 'UCG' or codon == 'AGU' or codon == 'AGC': protein = protein + 'S' #Proline(P) elif codon == 'CCU' or codon == 'CCC' or codon == 'CCA' \ or codon == 'CCG': protein = protein + 'P' #Threonine(T) elif codon == 'ACU' or codon == 'ACC' or codon == 'ACA' \ or codon == 'ACG': protein = protein + 'T' #Alaline(A) elif codon == 'GCU' or codon == 'GCC' or codon == 'GCA' \ or codon == 'GCG': protein = protein + 'A' #Tyrosine(Y) elif codon == 'UAU' or codon == 'UAC': protein = protein + 'Y' #Histidine(H) elif codon == 'CAU' or codon == 'CAC': protein = protein + 'H' #Glutamine(Q) elif codon == 'CAA' or codon == 'CAG': protein = protein + 'Q' #Asparagine(N) elif codon == 'AAU' or codon == 'AAC': protein = protein + 'N' #Lysine(K) elif codon == 'AAA' or codon == 'AAG': protein = protein + 'K' #Aspartate(D) elif codon == 'GAU' or codon == 'GAC': protein = protein + 'D' #Glutamate(E) elif codon == 'GAA' or codon == 'GAG': protein = protein + 'E' #Cysteine(C) elif codon == 'UGU' or codon == 'UGC': protein = protein + 'C' #Tryptophan(W) elif codon == 'UGG': protein = protein + 'W' #Arginine(R) elif codon == 'CGU' or codon == 'CGC' or codon == 'CGA' \ or codon == 'CGG' or codon == 'AGA' or codon == 'AGG': protein = protein + 'R' #Glycine(G) elif codon == 'GGU' or codon == 'GGC' or codon == 'GGA' \ or codon == 'GGG': protein = protein + 'G' #Stop codons elif codon == 'UAA' or codon == 'UAG' or codon == 'UGA': break #Exception for if codon is not found else: raise Exception("No such codon found!") #Increments counter to move to next codon counter = counter + 3 return protein
[ "def translate_rna_to_protein(rna_seq):\n\n\t# dictionary containing each codon (3 base sequences) translation\n\tcodon_dict = {\"UUU\":\"F\",\"UUC\":\"F\",\"UUA\":\"L\",\"UUG\":\"L\",\n\t\t\t\t\t\"UCU\":\"S\",\"UCC\":\"S\",\"UCA\":\"S\",\"UCG\":\"S\",\n\t\t\t\t\t\"UAU\":\"Y\",\"UAC\":\"Y\",\"UAA\":\"Stop\",\"UAG\":\"Stop\",\n\t\t\t\t\t\"UGU\":\"C\",\"UGC\":\"C\",\"UGA\":\"Stop\",\"UGG\":\"W\",\n\t\t\t\t\t\"CUU\":\"L\",\"CUC\":\"L\",\"CUA\":\"L\",\"CUG\":\"L\",\n\t\t\t\t\t\"CCU\":\"P\",\"CCC\":\"P\",\"CCA\":\"P\",\"CCG\":\"P\",\n\t\t\t\t\t\"CAU\":\"H\",\"CAC\":\"H\",\"CAA\":\"Q\",\"CAG\":\"Q\",\n\t\t\t\t\t\"CGU\":\"R\",\"CGC\":\"R\",\"CGA\":\"R\",\"CGG\":\"R\",\n\t\t\t\t\t\"AUU\":\"I\",\"AUC\":\"I\",\"AUA\":\"I\",\"AUG\":\"M\",\n\t\t\t\t\t\"ACU\":\"T\",\"ACC\":\"T\",\"ACA\":\"T\",\"ACG\":\"T\",\n\t\t\t\t\t\"AAU\":\"N\",\"AAC\":\"N\",\"AAA\":\"K\",\"AAG\":\"K\",\n\t\t\t\t\t\"AGU\":\"S\",\"AGC\":\"S\",\"AGA\":\"R\",\"AGG\":\"R\",\n\t\t\t\t\t\"GUU\":\"V\",\"GUC\":\"V\",\"GUA\":\"V\",\"GUG\":\"V\",\n\t\t\t\t\t\"GCU\":\"A\",\"GCC\":\"A\",\"GCA\":\"A\",\"GCG\":\"A\",\t\t\t\t\t\n\t\t\t\t\t\"GAU\":\"D\",\"GAC\":\"D\",\"GAA\":\"E\",\"GAG\":\"E\",\n\t\t\t\t\t\"GGU\":\"G\",\"GGC\":\"G\",\"GGA\":\"G\",\"GGG\":\"G\",\n\t\t\t\t\t}\n\tcodon_length = 3\n\tstop_code = \"Stop\"\n\tunknown_code = \"?\"\n\tprotein_seq = [] #Store the sequence in a list before converting it to a string to save memory.\n\n\t# Go through the RNA sequence from beginning to the end, \n\t# but with index increment of the codon length\n\tfor i in xrange(0,len(rna_seq),codon_length):\n\t\t# Check if the index + codon length will still within the length of RNA sequence.\n\t\tif (i+codon_length) <= len(rna_seq):\n\t\t\tcodon = rna_seq[i:(i+codon_length)]\n\t\t\t# Check if the codon exists in the dictionary. \n\t\t\t# If so, get the translation. \n\t\t\tif codon in codon_dict:\n\t\t\t\ttranslation = codon_dict[codon]\n\t\t\t\t# If the translation is stop code, return the protein sequence. \n\t\t\t\tif translation == stop_code:\n\t\t\t\t\treturn \"\".join(protein_seq)\n\t\t\t\t# Otherwise, append the translation to the protein sequence. \n\t\t\t\telse:\n\t\t\t\t\tprotein_seq.append(translation)\n\t\t\telse:\n\t\t\t\tprint(\"The sequence {0} is not valid. The translation will be coded as '?'\").format(\n\t\t\t\t\tcodon)\n\n\tprint(\"Warning: no stop codon found. \")\n\treturn \"\".join(protein_seq)", "def translate(seq):\n\n table = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\n }\n\n seq_len = len(seq)\n if seq_len % 3 == 0:\n protein = \"\"\n for i in range(0, seq_len, 3):\n codon = seq[i: i+3]\n protein += table[codon]\n return protein\n else:\n return f\"Invalid Input Sequence, len = {seq_len}\"", "def translate(seq):\n \n #translation table of codons to amino acids\n # _ underscores are nature's stop codons.\n table = {\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',\n }\n \n #The protein is a sequence of amino acids\n protein = \"\"\n \n # Check that the length of the string is divisible by 3\n if len(seq) % 3 == 0:\n # Valid sequence - proceed\n # Loop over the sequence\n for i in range(0, len(seq), 3):\n \n # Extract a single codon (3-letter string)\n codon = seq[i:i+3]\n \n # Look up each codon (3-letter string) and store the result\n # Concatenating to generate an amino acid sequence\n protein += table[codon]\n else:\n pass\n \n\n return protein;", "def trans_trans(self, chromosome):\n\n # validate chromosome\n bad_base_positions = []\n good_bases = set(self.dna_rna)\n for idx, base in enumerate(chromosome):\n if not base in good_bases:\n bad_base_positions.append(idx)\n if bad_base_positions:\n raise ValueError(\"bad bases in {} at positions {}\".format(chromosome, bad_base_positions))\n\n # initialize chromosome properties\n proteins = []\n num_noncoding_bases = 0\n DNA_failure = False\n\n # translate DNA to RNA\n rna_chromosome = ''.join([self.dna_rna[base] for base in chromosome])\n\n # transcribe and translate\n # use a state machine that transitions between translating and not translating\n translating = False\n for codon_start_pos in range(0, len(rna_chromosome), self.CODON_LEN):\n codon = rna_chromosome[codon_start_pos:codon_start_pos + self.CODON_LEN]\n if len(codon) != self.CODON_LEN:\n # at end of chromosome, and number of bases in chromosome isn't a multiple of CODON_LEN\n num_noncoding_bases += len(codon)\n break\n if not translating:\n if codon in self.start_codons:\n # the codon is AUG\n translating = True\n # record a protein as a list of AAs\n protein = []\n continue\n else:\n num_noncoding_bases += self.CODON_LEN\n else:\n # raise exception if the codon can't be translated\n if codon not in self.rna_protein:\n raise ValueError(\"unknown codon '{}' at pos {} in {}\".format(codon, codon_start_pos,\n rna_chromosome))\n translated_codon = self.rna_protein[codon]\n if translated_codon == \"STOP\":\n translating = False\n if protein:\n proteins.append(''.join(protein))\n continue\n amino_acid = translated_codon\n protein.append(amino_acid)\n\n # DNA failure if still translating at end of chromosome or no proteins found\n if translating or not proteins:\n DNA_failure = True\n\n return (proteins, num_noncoding_bases, DNA_failure)", "def translateSequence(seq):\n aa = ''\n for i in xrange(0, len(seq), 3):\n aa += codonToAminoAcid(seq[i:i+3])\n return aa", "def translate(cds):\n RNA2DNA = lambda seq: seq.upper().replace('U', 'T')\n cds = RNA2DNA(cds)\n valid_sequence(cds)\n codon_table = {'TTT': 'F', 'CTT': 'L', 'ATT': 'I', 'GTT': 'V',\n 'TTC': 'F', 'CTC': 'L', 'ATC': 'I', 'GTC': 'V',\n 'TTA': 'L', 'CTA': 'L', 'ATA': 'I', 'GTA': 'V',\n 'TTG': 'L', 'CTG': 'L', 'ATG': 'M', 'GTG': 'V',\n 'TCT': 'S', 'CCT': 'P', 'ACT': 'T', 'GCT': 'A',\n 'TCC': 'S', 'CCC': 'P', 'ACC': 'T', 'GCC': 'A',\n 'TCA': 'S', 'CCA': 'P', 'ACA': 'T', 'GCA': 'A',\n 'TCG': 'S', 'CCG': 'P', 'ACG': 'T', 'GCG': 'A',\n 'TAT': 'Y', 'CAT': 'H', 'AAT': 'N', 'GAT': 'D',\n 'TAC': 'Y', 'CAC': 'H', 'AAC': 'N', 'GAC': 'D',\n 'TAA': '', 'CAA': 'Q', 'AAA': 'K', 'GAA': 'E',\n 'TAG': '', 'CAG': 'Q', 'AAG': 'K', 'GAG': 'E',\n 'TGT': 'C', 'CGT': 'R', 'AGT': 'S', 'GGT': 'G',\n 'TGC': 'C', 'CGC': 'R', 'AGC': 'S', 'GGC': 'G',\n 'TGA': '', 'CGA': 'R', 'AGA': 'R', 'GGA': 'G',\n 'TGG': 'W', 'CGG': 'R', 'AGG': 'R', 'GGG': 'G'}\n prot = ''\n if len(cds) % 3:\n raise ValueError('The CDS sequence seems to have wrong length which cannot be divided by 3!')\n prot = ''.join((codon_table[codon] for codon in split_seq(cds, 3)))\n return prot", "def rna_to_dna(self):\n self.seq = self.seq.replace(\"U\", \"T\")", "def dna_to_rna(self):\n self.seq = self.seq.replace(\"T\",\"U\")", "def rna_translate(bases):\n translation = ''\n while (len(bases[:3])==3):\n if(bases[0]=='U'):\n if(bases[1]=='U'):\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'F'\n else:\n translation = translation + 'L'\n elif(bases[1]=='C'):\n translation = translation + 'S'\n elif(bases[1]=='A'):\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'Y'\n #else:\n #translation.append('stop')\n else:\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'C'\n elif(bases[2]=='A'):\n translation = translation\n else:\n translation = translation + 'W'\n elif(bases[0]=='C'):\n if(bases[1]=='U'):\n translation = translation + 'L'\n elif(bases[1]=='C'):\n translation = translation + 'P'\n elif(bases[1]=='A'):\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'H'\n else:\n translation = translation + 'Q'\n else:\n translation = translation + 'R'\n elif(bases[0]=='A'):\n if(bases[1]=='U'):\n if(bases[2]=='G'):\n translation = translation + 'M'\n else:\n translation = translation + 'I'\n elif(bases[1]=='C'):\n translation = translation + 'T'\n elif(bases[1]=='A'):\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'N'\n else:\n translation = translation + 'K'\n else:\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'S'\n else:\n translation = translation + 'R'\n else:\n if(bases[1]=='U'):\n translation = translation + 'V'\n elif(bases[1]=='C'):\n translation = translation + 'A'\n elif(bases[1]=='A'):\n if(bases[2]=='U' or bases[2]=='C'):\n translation = translation + 'D'\n else:\n translation = translation + 'E'\n else:\n translation = translation + 'G'\n bases = bases[3:]\n \n print(translation)", "def __map_sequence_to_pid(self):\n if not self.x509_data['uut_cert_sequence'] and not self.x509_data['uut_prog_version']:\n log.debug(\"Mapping PID...\")\n for ps_map in X509Sudi.PID_SEQ_MAP:\n if re.search(ps_map.pid_pattern, self.x509_data['uut_pid']):\n self.x509_data['uut_cert_sequence'] = ps_map.sequence\n self.x509_data['uut_prog_version'] = ps_map.prog_version\n break\n else:\n log.debug(\"Unmapped PID; using default SUDI sequence arrangement.\")\n self.x509_data['uut_cert_sequence'] = 'STANDARD'\n self.x509_data['uut_prog_version'] = 'MULTICERT'\n else:\n log.debug(\"No PID mapping necessary; Cert Sequence and Programming Version were explicitly specified.\")\n\n log.info(\"X509 SUDI sequence arrangement = {0}\".format(self.x509_data['uut_cert_sequence']))\n log.info(\"X509 SUDI programming version = {0}\".format(self.x509_data['uut_prog_version']))\n return", "def dna_and_introns_to_protein(fasta_file):\n ## To Do:\n #### Write this in a more cohesive and less retarded way.\n #### Check that the intron positions and lengths don't overlap and\n #### therefore produce valid exons.\n #### Check that the exon string is evenly divisble by 3 and therefore\n #### nicely transcribable.\n #### Somehow allow for multiple dna strings to be processed.\n #### Somehow account for UTR's.\n\n dna_and_introns = fasta_read(fasta_file)\n\n dna = dna_and_introns[0][1]\n introns = [i[1] for i in dna_and_introns[1:]]\n\n coding_region, protein = str(), str()\n intron_intervals = []\n\n for i in introns:\n positions = subs(dna, i)\n for pos in positions:\n intron_intervals += [range(pos, pos + len(i))]\n\n for i in range(len(dna)):\n is_exon_part = True\n for rng in intron_intervals:\n if i in rng:\n is_exon_part = False\n if is_exon_part:\n coding_region += dna[i]\n\n for i in range(0, len(coding_region) - 2, 3):\n s = coding_region[i:i+3]\n try:\n if dna_codons[s] == 'Stop':\n protein += ' '\n else:\n protein += dna_codons[s]\n except Exception as ex:\n print(ex)\n\n with open('output_{}'.format(fasta_file), 'w') as fout:\n fout.write(protein)\n return protein", "def _transform_genomic_position_to_protein(self,genome_position):\n\t\tprotein_name = self._get_protein_name_for_genomic_position(genome_position)\n\t\tif (protein_name is None) or (protein_name not in self.reference_protein_locations):\n\t\t\treturn None\n\t\treturn (genome_position - self.reference_protein_locations[protein_name][0]) + 1", "def to_rna(dna_strand):\n translation = dna_strand.maketrans(code)\n return dna_strand.translate(translation)", "def create_skbio_protein_sequence(match_row):\n metadata = {'id': match_row['Protein_Accession'], 'description': ('(From ' + match_row['Sample_Name'] + ')')}\n return Protein(sequence=match_row['Sequence'], metadata=metadata)", "def translate(self, **kwargs) -> \"SeqLike\":\n sc = deepcopy(self)\n if sc._nt_record is None:\n raise ValueError(\n \"Oops! It looks like you're trying to translate a SeqLike object \"\n \"that doesn't have a nucleotide record set. \"\n \"Unfortunately this would be semantically incorrect. \"\n \"Please ensure that your SeqLike has a `._nt_record` SeqRecord \"\n \"before calling on `.translate()`.\"\n )\n\n if len(sc) % 3 != 0:\n raise TypeError(\n \"Oh no! It looks like you're trying to translate a nucleotide sequence \"\n \"whose length is not a multiple of 3. \"\n \"As a safeguard, SeqLike objects do not allow this to happen. \"\n )\n sc._aa_record = record_from(sc._nt_record.translate(gap=gap_letter, **kwargs))\n # neutralize \"protein\" `molecule_type` annotation added by BioPython's `SeqRecord.translate()`\n sc._aa_record.annotations.pop(\"molecule_type\")\n return sc.aa()", "def translate(rec):\n # Truncate to nearest codon\n end = len(rec.seq) // 3 * 3\n rec.seq = rec.seq[:end].translate()\n rec.description = f\"translated {rec.description}\"\n return rec", "def transform_sequence(self, sequence):\n return ''.join(self.transformations.get(c, c) for c in sequence)", "def translate(seq, table):\n result = []\n for i in range(len(seq)):\n result.append(table[seq[i]])\n return result", "def peptide_encoding(dna, peptide, codons):\n rc_dna = reverse_complement(dna)\n sub_dna = find_peptide(dna, peptide, codons)\n rev_subseqs = find_peptide(rc_dna, peptide, codons)\n for i in rev_subseqs:\n sub_dna.append(reverse_complement(i))\n return(sub_dna)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display quantites of fasteners.
def quantites(total, count_imperial, count_metric, state="(Current state)"): print("{}".format(state)) print("Total number of fasteners: {}".format(total)) print(" - imperial: ............. {}".format(count_imperial)) print(" - metric : ............. {}\n".format(count_metric))
[ "def display_fps(self):\n caption = \"Score - {}, clicks - {}\".format(self.rezult_points, self.click_count)\n pygame.display.set_caption(caption)", "def visualize_spike_train_fast(trains):\r\n# -----------------------------------------------------------------------------\r\n length = len(trains)\r\n y_width = 1\r\n for i, train in enumerate(trains):\r\n train_to_plot = np.array(binary_to_sparse_train(train)[0])\r\n y_position = np.zeros(len(train_to_plot)) + i * y_width\r\n plot(train_to_plot, y_position,\".\", color = \"blue\")\r\n xlabel(\"dt\")\r\n ylabel(\"N (neuron spike trains)\")\r\n ylim((-y_width,(i + 1) * y_width))", "def show_multistart(self,N=-1):\n\n sorted_ests = sorted(self.est['ests'],key=lambda k:k['obj'])\n cutoff_obj = [est['obj'] for est in sorted_ests][N]\n print(f'{\"obj\":>15s}',end='')\n print(' ',end='')\n for name in self.theta.keys():\n print(f'{name:>10s} ',end='')\n print(f'{\"nits\":>7s}')\n for i,est in enumerate(sorted_ests):\n if est['obj'] < cutoff_obj:\n print(f'{i:3d}: {est[\"obj\"]:12.8f} ',end='')\n for name in self.theta.keys():\n print(f'{est[name]:10.3f} ',end='')\n print(f' [{est[\"nits\"]:5d}]')\n \n self.sorted_ests = sorted_ests", "def featuresList(self):\n self.featuresListScreen.setWindowTitle(\"Unique Facial Feature Detection\")\n self.featuresListScreen.resize(575, 400)\n\n featuresLayout = QVBoxLayout()\n featuresBtnLayout = QHBoxLayout()\n\n obtainedFeaturesList = QLabel(self.photoProcessedScreen)\n obtainedFeaturesList.setStyleSheet(\"font: 14pt Century Gothic\")\n obtainedFeaturesList.setText(\"Your unique features!\")\n obtainedFeaturesList.setAlignment(Qt.AlignCenter)\n\n featuresLayout.addWidget(obtainedFeaturesList)\n\n # Features are displayed in outputtingList\n # Buttons are added in outputtingList to maintain order of widgets\n\n self.featuresListScreen.setLayout(featuresLayout)", "def showSamples(self, nrows, ncols):\n fig, axes = plt.subplots(nrows, ncols)\n for i, ax in enumerate(axes.flat): \n ax.imshow(self.x[i,:])\n ax.set_xticks([]); ax.set_yticks([])\n ax.set_title(np.argmax(self.y[i]))\n \n plt.show()", "def show_data(self):\n for i, task in enumerate(self.todo_tasks):\n print(f\"{i + 1}° - \", end=\"\")\n task.show_task()", "def show_pickup(self):\r\n print(\"\"\"\r\nVous avez {}/{} objets.\r\n \"\"\".format(len(self.macg.bag), len(self.syrg.items)))\r\n for item in self.macg.bag:\r\n if item == c.NEEDLE:\r\n print(\"- Aiguille\", end=\" \")\r\n elif item == c.TUBE:\r\n print(\"- Tube\", end=\" \")\r\n elif item == c.ETHER:\r\n print(\"- Fiole d'Ether\", end=\" \")\r\n print()", "def show_all(self):\n self.show_top(len(self.evaluations))", "def show_measurement(self):\n return f\"{int(self.amount)} {self.unit} {self.ingredient.name}\"", "def showInfo(self):\n print(\"\\n\\n\"+\"#\"*3,os.path.basename(self.abfFileName),\"#\"*3)\n print(\"INFO:\")\n for key in sorted(self.info): print(\" %s = %s\"%(key,self.info[key]))\n print(\"EPOCHS:\")\n for key in sorted(self.epochs): print(\" %s = %s\"%(key,self.epochs[key]))", "def showChips(self):\n print(\"You currently have %d chips\" % self.chips)", "def show_artist(self):\n print('====', self.artist['name'], '====')\n print('Popularity: ', self.artist['popularity'])\n if len(self.artist['genres']) > 0:\n print('Genres: ', ','.join(self.artist['genres']))", "def display_number_of_items(self):\n return False", "def update_hint(self):\r\n try:\r\n count = self.logic.preview(self.filepath, self.sub_view.subsample)\r\n update_feedback_label(\r\n self.sub_view.point_count_lb, '%s points' % str(count), \r\n True\r\n )\r\n except ValueError:\r\n update_feedback_label(self.sub_view.point_count_lb)", "def display_stuff(self):\n # print(\"i1 | i2 | b | Net| O/p | Thresh\")\n #print(\"neuron display\")\n print(*self.inp_list, self.y, self.out, self.threshold, *self.weight_list, sep=\" | \")\n # print(\"Weights used are: \")\n # print(*self.weight_list)", "def show():\n items = \"\"\n for num, item in Item.collection.items():\n if item.quantity == 0:\n continue\n else:\n items += (\n f\"\\t{str(num)}: {item.name.capitalize()} \"\n f\"- {colored(item.description, color=Color.item_description)} \"\n f'- {colored(f\"{item.quantity} available\", color=Color.item_quantity)}\\n'\n )\n items += \"\\t*: Exit\"\n print(\"Available items\")\n print(items)", "def show_songs(self):\n try:\n songs = self.songs\n [Print(\"%s: %s\" % (a + 1, b)) for a, b in enumerate(songs)]\n except TypeError:\n Print(\"Please set Media first\\nNo Artist name\")", "def show_summary(self):\n length = self.sum_length.first()\n coef = self.sum_coef.first()\n cons = length*coef/100\n print(\"Total length: \" + str(length) +\n \"\\nTotal consumption: \" + str(coef) +\n \"\\nTotal fuel used: \" + str(cons))", "def get_n_spectra():\n return 25" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a model is registered with auditlog.
def contains(self, model: ModelBase) -> bool: return model in self._registry
[ "def model_exists(self, tag):\n return tag in self._models", "def has_access(self, model):\n return True", "def can_log(self):\n return # boolean", "def is_model(self, model):\n if model is None:\n self.dump()\n raise Exception(\"Model is None\")\n return model_path(model) == model_path(self.model)", "def has_been_enabled(self):\n return self.engine.get('audit.has_been_armed', False)", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == APIRouter.ANALYTICS_DB_ALIAS:\n return self.is_analytics_model(model_name)\n elif self.is_analytics_model(model_name):\n # If we are working on a analytics model, and the db is not\n # analytics_db, return false\n return False\n return None", "def check_model_exists(config):\n return os.path.exists(get_model_name_config(config))", "def can_manage_smart_log(self):\n return # boolean", "def has_model(self, id):\n return id in self.list_ids()", "def can_lookup_logs(self):\n return # boolean", "def should_add_record(record):\n return not apps.get_model(record[\"model\"]).objects.filter(pk=record[\"pk\"]).exists()", "def has_log(self, log_name: str) -> bool:\n for log in self.logs:\n if log.property_name == log_name:\n return True\n return False", "def _already_injected(model):\r\n try: # try/except is faster than hasattr, if inject method is called repeatedly\r\n model.gcam_dict # Check if attribute exists\r\n return True\r\n except AttributeError:\r\n return False", "def can_register_for_log_notifications(self):\n return # boolean", "def allow_migrate(self, db, app_label, model_name=None, **hints):\n if db == self.ANALYTICS_DB_ALIAS:\n return self.is_analytics_model(model_name)\n elif self.is_analytics_model(model_name):\n # If we are working on a analytics model, and the db is not\n # analytics_quiz_db, return false\n return False\n return None", "def has_permission(self, request, view):\n try:\n visitor = request.user.visitormodel\n return True\n except UserModel.visitormodel.RelatedObjectDoesNotExist:\n return False", "def __bool__(self) :\n\t\treturn bool(self.log)", "def can_register_for_log_entry_notifications(self):\n return # boolean", "def test_add(self):\n\n p = Person(first_name='John',\n last_name='Doe',\n email='john@doe.com',\n birth_date=datetime.now())\n p.save()\n logs = ModelChange.objects.filter(instance_pk=p.pk)\n self.assertTrue(logs.exists())\n self.assertEqual(logs.first().type, 'add')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unregister a model with auditlog. This will not affect the database.
def unregister(self, model: ModelBase) -> None: try: del self._registry[model] except KeyError: pass else: self._disconnect_signals(model)
[ "def unregister_model(self, model):\n if model not in self._model_registry:\n raise NotRegistered('The model %s is not registered' % model)\n\n del self._model_registry[model]", "def unload_model(self, model_name):\n raise_error(\"Not implemented yet\")", "def unload_model(model_name):\n del backend_globals.loaded_model[model_name]", "def remove_model(self, model):\n models = listify(model)\n\n for mod in models:\n del self.model_context_map[id(mod)]\n\n return super(LockedMachine, self).remove_model(models)", "def invalidate_model(model):\n model = non_proxy(model)\n conjs_keys = redis_client.keys('conj:%s:*' % get_model_name(model))\n if conjs_keys:\n cache_keys = redis_client.sunion(conjs_keys)\n redis_client.delete(*(list(cache_keys) + conjs_keys))", "def delete_model(self, request, obj):\n # handle something here\n obj.delete()", "def delete(self, model_name):\n\n LOGGER.info('Deleting model by name, model_name = %s', model_name)\n _, data_access = self._get(model_name)\n if model_name in self.sessionmakers:\n del self.sessionmakers[model_name]\n with self.modelmaker() as session:\n session.query(Model).filter(Model.handle == model_name).delete()\n data_access.delete_all(self.engine)", "def unregister_object() -> None:\n self.controller.known_objects.discard((description.key, self._obj_id))", "def unregister(self, slug):\n\n if slug not in self._registry:\n raise NotRegistered('The slug %s is not registered' % slug)\n bundle = self._registry[slug]\n if bundle._meta.model and bundle._meta.primary_model_bundle:\n self.unregister_model(bundle._meta.model)\n\n del self._registry[slug]\n del self._order[slug]", "def delete(self):\n HistoryModel.objects.filter(\n source_app_label=self.get_model_inst()._meta.app_label,\n source_model_name=self.get_model_inst()._meta.object_name.lower(),\n source_identifier=self.get_model_inst().pk,\n group_name=self.get_group_name(),\n ).delete()", "def unregister(metric):\n del state.metrics[metric.name]", "def delete_registered_model(self, registered_model):\n try:\n registered_model = self._get_registered_model(registered_model.model_name)\n registered_model.delete()\n return Status.OK\n except mongoengine.OperationError as e:\n raise AIFlowException(str(e))", "def model_clear():\n Iris.objects.all().delete()\n\n # Raw SQL is needed to update the system table that tracks the row number/pk id\n # without resetting to 0 on a clear, the numbering will continue after objects are deleted\n from django.db import connection \n with connection.cursor() as cursor: \n cursor.execute(\"UPDATE sqlite_sequence SET SEQ = 0 WHERE NAME = 'iris_iris'\")", "def delete_model(self):\n if self.name is None:\n raise ValueError(\n \"The SageMaker model must be created first before attempting to delete.\"\n )\n self.sagemaker_session.delete_model(self.name)", "def _uninstall(self):\n self.log.info('Uninstalling \"{schema}\"'.format(**self.env))\n with higher_log_indent():\n self._unlink()\n self._delete()", "def delete_model(self, user, name, force=False):\n\n super().delete_repo(\"model\", user, name, force=force)", "def unhook(self):\n raise NotImplementedError", "def unload_model(self, model_name, headers=None):\n if headers is not None:\n metadata = headers.items()\n else:\n metadata = ()\n try:\n request = grpc_service_pb2.RepositoryModelUnloadRequest(\n model_name=model_name)\n if self._verbose:\n print(\"unload_model, metadata {}\\n{}\".format(metadata, request))\n self._client_stub.RepositoryModelUnload(request=request,\n metadata=metadata)\n if self._verbose:\n print(\"Unloaded model '{}'\".format(model_name))\n except grpc.RpcError as rpc_error:\n raise_error_grpc(rpc_error)", "def del_model( modelName ): # FIXME: Freezes Python, DO NOT USE!\n # delete_model : gazebo_msgs/DeleteModel\n del_model_prox = rospy.ServiceProxy('gazebo/delete_model', DeleteModel) # model spawner\n # rospy.wait_for_service('gazebo/delete_model') # Wait for the model loader to be ready \n # FREEZES EITHER WAY\n ref=del_model_prox(modelName) # Remove from Gazebo" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect signals for the model.
def _connect_signals(self, model): for signal in self._signals: receiver = self._signals[signal] signal.connect( receiver, sender=model, dispatch_uid=self._dispatch_uid(signal, model) )
[ "def _connect_signals(self):\n # ui signals\n self._view.signal_browse_noice_app.connect(self.browse_noice_app)\n self._view.signal_add_aov.connect(self.add_aov)\n self._view.signal_window_close.connect(self.window_close)\n self._view.signal_remove_aov[list].connect(self.remove_aovs)\n self._view.signal_add_input.connect(self.add_input)\n self._view.signal_remove_input[list].connect(self.remove_inputs)\n self._view.signal_browse_output.connect(self.browse_output)\n self._view.signal_run.connect(self.run)\n\n # thread signals\n self._worker.signal_output[str].connect(self._worker_output)\n self._worker.signal_start.connect(self._start)\n self._worker.signal_abort.connect(self._abort)\n self._worker.singal_complete.connect(self._complete)\n self._worker.signal_error.connect(self._error)", "def setup_signals(self):\n sig = { 'on_mainwindow_destroy': self.close\n , 'on_entry_activate' : self.connect\n , 'on_spinbutton_activate': self.connect\n , 'on_connect_clicked' : self.connect\n , 'on_cancel_clicked' : self.close }\n\n return sig", "def connect_signals():\n reply_published.connect(send_reply_published_mail, sender=Review)\n review_published.connect(send_review_published_mail, sender=Review)\n review_request_closed.connect(send_review_request_closed_mail,\n sender=ReviewRequest)\n review_request_published.connect(send_review_request_published_mail,\n sender=ReviewRequest)\n user_registered.connect(send_user_registered_mail)\n post_delete.connect(send_webapi_token_deleted_mail, sender=WebAPIToken)\n post_save.connect(send_webapi_token_saved_mail, sender=WebAPIToken)", "def connect(self):\n name = \"%s.%s.%s\" % (\n self.parent_model._meta.model_name,\n self.child_model._meta.model_name,\n self.foreign_field.name\n )\n counted_name = \"%s-%s\" % (name, self.counter_name)\n\n def pre_save_receiver_counter(sender, instance, **kwargs):\n self.pre_save_receiver(instance)\n\n pre_save.connect(pre_save_receiver_counter, sender=self.child_model, weak=False,\n dispatch_uid=f'{counted_name}_pre_save')\n\n def post_save_receiver_counter(sender, instance, **kwargs):\n self.post_save_receiver(instance)\n\n post_save.connect(post_save_receiver_counter, sender=self.child_model, weak=False,\n dispatch_uid=f'{counted_name}_post_save')\n\n def post_delete_receiver_counter(sender, instance, **kwargs):\n self.post_delete_receiver(instance)\n\n post_delete.connect(post_delete_receiver_counter, sender=self.child_model, weak=False,\n dispatch_uid=f'{counted_name}_post_delete')\n\n counters[counted_name] = self", "def init_signals(self):\n # Update the table views when a tree node is selected\n self.tree_view.selectionModel().selectionChanged.connect(self.handle_selection_changed)\n\n # Dynamically populate more of the tree items when\n # selected to keep memory usage at a minimum.\n self.tree_view.expanded.connect(self.tree_model.handle_expanded)\n self.tree_view.collapsed.connect(self.tree_model.handle_collapsed)\n\n self.dims_model.dataChanged.connect(self.handle_dims_data_changed)", "def connect(self):\n self.signal.connect(\n self.on_signal_received,\n sender=self.sender,\n dispatch_uid=self._dispatch_uid,\n )\n self.is_connected = True", "def _connectSignals(self):\n logging.debug('Application: _connectSignals()')\n self.connect(self._window.tabWidget(), SIGNAL(\"currentChanged(int)\"), self.tabChanged)\n self.connect(self._window, SIGNAL(\"windowActivated()\"), self.tabChanged)\n self.connect(self._window.tabWidget(), SIGNAL(\"tabCloseRequested(int)\"), self.tabCloseRequest)", "def _connect_db_signals(self):\n self.callman.register_handles({'ensemble': [self.obj.get_handle()]})\n self.callman.register_callbacks(\n {'ensemble-update': self.check_for_ensemble_change,\n 'ensemble-delete': self.check_for_close,\n 'ensemble-rebuild': self._do_close,\n 'textile-rebuild': self._do_close,\n })\n self.callman.connect_all(keys=['ensemble', 'textile'])", "def _connect_signals(self):\r\n for service_name in IDE.__IDECONNECTIONS:\r\n connections = IDE.__IDECONNECTIONS[service_name]\r\n for connection in connections:\r\n if connection.get('connected', False):\r\n continue\r\n target = IDE.__IDESERVICES.get(\r\n connection['target'], None)\r\n slot = connection['slot']\r\n signal_name = connection['signal_name']\r\n if target and isinstance(slot, collections.Callable):\r\n self.connect(target, SIGNAL(signal_name), slot)\r\n connection['connected'] = True", "def set_signals_slots(self):\n self.btn_add.clicked.connect(self.add_fs_band)\n self.btn_exit.clicked.connect(self.check)", "def _manage_signal_handler(self, signal_method):\n\n indexed_model = self.get_model()\n\n # Connect the basic signal handler for this index.\n signal_method(search_index_signal_handler, sender=indexed_model)\n\n # For any related models, create and connect signal handlers that will\n # update an indexable object when a related object is changed.\n for related_model, field_name in self.get_related_models():\n\n # Creating the function isn't necessary when disconnecting\n # signals, but for simplicity's sake (and since we never\n # disconnect) just do it anyway.\n related_handler = make_related_signal_handler(field_name)\n\n unique_id = related_signal_handler_uid(related_model, indexed_model)\n signal_method(\n receiver=related_handler,\n sender=related_model,\n weak=False,\n dispatch_uid=unique_id,\n )", "def create_connections(self):\n \n self.connect(self.cancel_button, SIGNAL('clicked()'), self.close_dialog) \n self.connect(self.import_button, SIGNAL('clicked()'), self.import_alembic_dialog)\n self.combo_box.currentIndexChanged.connect(self.on_comboBox_changed)\n self.sequence_list_wdg.currentItemChanged.connect(self.on_sequenceList_changed)\n self.shots_list_wdg.currentItemChanged.connect(self.on_shotList_changed)", "def connect_signals_to_slots(window):\n # connect amplitude spinbox changed to set amplitude\n window.sineAmplitudeSpinBox.valueChanged.connect(\n set_sinewave_amplitude\n )\n # connect frequency spinbox changed to set frequency\n window.sineFrequencySpinBox.valueChanged.connect(\n set_sinewave_frequency\n )\n # connect the plot sinewave button to plot sinwave method\n window.plotSineButton.clicked.connect(\n plot_sinewave\n )\n # connect the clear sinewave plot button to remove lines\n window.clearSineButton.clicked.connect(\n window.mpl.removeLines\n )\n # connect the sinewave settings updated signal to plot sinewave method\n sinewave_settings_signal.updated.connect(\n plot_sinewave\n )", "def reconnect_signals(self):\n try:\n self.cache_input_layer_btn.clicked.disconnect()\n self.preroll_sb.valueChanged.disconnect()\n self.start_frame_sb.valueChanged.disconnect()\n except RuntimeError:\n pass\n self.cache_input_layer_btn.clicked.connect(self._cache_input_layer)\n self.delete_input_layer_btn.clicked.connect(self._delete_cache_input_layer)\n self.setup_select_cb.currentIndexChanged.connect(self.setup_selection_changed)\n self.create_ncache_btn.clicked.connect(self.create_ncache)\n self.delete_ncache_btn.clicked.connect(self.delete_ncache)\n self.open_ncache_dir_btn.clicked.connect(self.open_cache_dir)\n self.refresh_btn.clicked.connect(self.total_refresh)\n self.start_frame_sb.valueChanged.connect(self._set_start_frame)\n self.preroll_sb.valueChanged.connect(self._set_start_frame)", "def connect(self, signal_name, receiver):\n self.signals[signal_name].append(receiver)", "def register_signals(cls):\n from kb.events.models import SubmittedEvent\n from django.db.models.signals import post_save\n from .signals import handle_codeorg_submission\n post_save.connect(handle_codeorg_submission, sender=SubmittedEvent)\n cls.debug(201)", "def connectModelToSignal(model: Any, combinedSignal: pyqtBoundSignal) -> Callable:\n\n connectedFunction = lambda *_args, **_kwargs: combinedSignal.emit()\n\n for fieldValue in recursive_field_iter(model):\n if isinstance(fieldValue, Signal):\n fieldValue.connect(connectedFunction)\n\n return connectedFunction", "def update_signal_source(self):\n self.signals = self.array_graph.signals\n self.signals.signal_iterations.connect(self.set_iterations_label)", "def create_connections(self):\n self.up_button.clicked.connect(self.move_items_up)\n self.down_button.clicked.connect(self.move_items_down)\n self.remove_button.clicked.connect(self.delete_obj_items)\n self.add_button.clicked.connect(self.add_clicked)\n self.seq_button.clicked.connect(self.sequence_camera)\n self.browse_button.clicked.connect(self.browse_dirs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disconnect signals for the model.
def _disconnect_signals(self, model): for signal, receiver in self._signals.items(): signal.disconnect( sender=model, dispatch_uid=self._dispatch_uid(signal, model) )
[ "def disconnectModelFromSignal(model: Any, connectedFunction: Callable) -> None:\n\n for fieldValue in recursive_field_iter(model):\n if isinstance(fieldValue, Signal):\n fieldValue.disconnect(connectedFunction)", "def disconnect_signal(self):\r\n if self.properties_signal is not None:\r\n self.properties_signal.remove()\r\n self.properties_signal = None\r\n\r\n for char in self.characteristics:\r\n char.disconnect_signal()", "def disconnect(self):\n self.signal.disconnect(\n self.on_signal_received,\n sender=self.sender,\n dispatch_uid=self._dispatch_uid,\n )\n self.is_connected = False", "def disconnect_signal(self):\r\n if self.properties_signal is not None:\r\n self.properties_signal.remove()\r\n self.properties_signal = None", "def disconnect(self, signal_name, receiver):\n try:\n self.signals[signal_name].remove(receiver)\n except ValueError:\n pass", "def disconnect_trigger_signal(self):\n\n prev_self = Trigger.objects.get(pk=self.id) # get previous version of trigger\n verb_signal = prev_self.get_verb_signal()\n if verb_signal is None:\n logger.error(f\"Trigger disconnecting failed! Trigger: {self} , verb: {self.verb}\")\n return\n\n for receiver in verb_signal.receivers:\n try:\n if receiver[1].__self__ != self:\n # For example a standard django signal like post_save that is connected to another handler out of DSN\n logger.debug(\n \"Receiver's bounded method is not handler function of a trigger, DSN doesn't disconnect it \"\n \"from the signal\")\n continue\n except AttributeError:\n logger.debug(\n \"Receiver's bounded method is not handler function of a trigger, DSN doesn't disconnect it \"\n \"from the signal\")\n continue\n\n if self.action_object_content_type is not None:\n # Make sure that is disconnected completely from new and old object to avoid unplanned problems\n prev_action_object_content_type_class = None\n if prev_self.action_object_content_type is not None:\n prev_action_object_content_type_class = prev_self.action_object_content_type.model_class()\n disconnectedSuccess = verb_signal.disconnect(receiver=prev_self,\n sender=prev_action_object_content_type_class,\n dispatch_uid=str(prev_self)) or \\\n verb_signal.disconnect(receiver=self,\n sender=self.action_object_content_type.model_class(),\n dispatch_uid=str(self))\n else:\n # Make sure that is disconnected completely from new and old object to avoid unplanned problems\n disconnectedSuccess = verb_signal.disconnect(receiver=prev_self,\n sender=None,\n dispatch_uid=str(prev_self)) or \\\n verb_signal.disconnect(receiver=self,\n sender=None,\n dispatch_uid=str(self))\n if not disconnectedSuccess:\n logger.error(f\"Trigger disconnecting failed! Trigger: {self} , verb_signal: {verb_signal}\")", "def _disable_signals(self):\n for dispatch_uid in self.SAMPLE_APP_DISPATCH_UIDS:\n signals.checkout_preferences_created.disconnect(\n sender=services.MercadoPagoService,\n dispatch_uid=dispatch_uid)", "def slider_disconnect(self):\n self.sender().valueChanged.disconnect()", "def disconnected(self):\n\t\tself.connectType = None\n\t\tfor instance in self.disconnectHandlers:\n\t\t\tinstance()", "def disconnect(self):\n\n self.model.status.code *= -1\n\n for i in range(15):\n for service in self.model.sap.connections:\n if not service.gui.is_closed:\n service.gui.close()\n\n try:\n self.model.vpn.disconnect()\n except:\n pass\n\n self.view.controls.btn_disconnect['state'] = 'disabled'\n # self.model.status.post('Disconnected')\n self.model.status.code = -1", "def _disconnect_receivers():\n yield\n main_app_config = apps.get_app_config('main_app')\n pre_migrate.disconnect(sender=main_app_config, dispatch_uid=DISPATCH_UID)\n post_migrate.disconnect(sender=main_app_config, dispatch_uid=DISPATCH_UID)", "def unregister(self, model: ModelBase) -> None:\n try:\n del self._registry[model]\n except KeyError:\n pass\n else:\n self._disconnect_signals(model)", "def disconnect(self):\n vrep.simxStopSimulation(self.clientID,vrep.simx_opmode_oneshot)\n vrep.simxFinish(self.clientID)\n print ('Simulation ended!')", "def disconnect(self, *args) -> \"void\":\n return _coin.SoField_disconnect(self, *args)", "def _disconnect_from(self, obj):\n if obj is None:\n return\n elif isinstance(obj, self._type):\n roles = self._role_to_prop\n elif isinstance(obj, self._ref_type):\n roles = self._ref_role_to_prop\n self._disconnect_from(getattr(obj, 'ref', None))\n\n for num, name in roles.items():\n signal_name = name + 'Changed'\n\n try:\n signal = getattr(obj, signal_name)\n signal.disconnect(self.onChildModified if roles is self._role_to_prop else self.onChildRefModified)\n except (AttributeError, KeyError):\n continue", "def disconnect(self):\n for fig in self.figs:\n fig.canvas.mpl_disconnect(self.enterfig_id)\n fig.canvas.mpl_disconnect(self.leavefig_id)\n fig.canvas.mpl_disconnect(self.enterax_id)\n fig.canvas.mpl_disconnect(self.leaveax_id)\n fig.canvas.mpl_disconnect(self.pressb_id)\n fig.canvas.mpl_disconnect(self.close_id)", "def disconnect(self, device):", "def __disconnect_field_signal(self, node):\n field = node.elem\n if field != None:\n if field.id != None:\n field.view.disconnect(field.id)", "def _disconnectDetector(self):\n if self.isExposing():\n logger.warning('Emitting exposureFinished only because disconnecting the detector while exposing.')\n self.state = ExposerState.Idle\n self.exposureFinished.emit(False)\n logger.debug('Disconnecting detector signals from slots in Exposer')\n self.detector.connectionEnded.disconnect(self.onDetectorDisconnected)\n self.detector.variableChanged.disconnect(self.onDetectorVariableChanged)\n self.detector.commandResult.disconnect(self.onCommandResult)\n self.detector = None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the messages output by the Paver task.
def task_messages(self): return tasks.environment.messages
[ "def get_outputs(self):\n output = ''\n if self.out:\n output = output + \"\\nOutput:\\n{}\".format(self.out)\n if self.err:\n output = output + \"\\nSTDERR:\\n{}\".format(self.err)\n return output", "def get_messages(self):\n\n running_job_messages = None\n job_exe_end_models = None\n finished_job_exes = None\n\n with self._lock:\n finished_job_exes = self._finished_job_exes\n job_exe_end_models = self._job_exe_end_models\n running_job_messages = self._running_job_messages\n self._finished_job_exes = []\n self._job_exe_end_models = []\n self._running_job_messages = []\n\n # Start with running job messages\n messages = running_job_messages\n\n # Add messages for creating job_exe_end models\n messages.extend(create_job_exe_end_messages(job_exe_end_models))\n\n # Add messages for finished job executions\n messages.extend(self._create_finished_job_exe_messages(finished_job_exes))\n\n return messages", "def get_messages(self):\n return self.messages_received", "def get_outputs(self):\r\n return []", "def return_ambient_msgs(self):\n if self.db.ambient_switch:\n ambient_msgs = self.db.ambient_msgs\n for obj in self.contents_get():\n try:\n ambient_msgs.extend(obj.return_ambient_msgs())\n except:\n continue\n return ambient_msgs", "def read_messages(self):\n\n msg = \"\"\n try:\n while True:\n line = self.message_queue.get_nowait()\n # msg += line.decode(\"utf-8\")\n msg += line\n except Empty:\n pass # finished\n return msg", "def get_messages(path, model):\n cmd = f'{gs.WGRIB2} {path} -s -n'\n res = open_subprocess_pipe(cmd)\n res = (res.split('\\n'))[:-1]\n names = []\n messages = []\n for key, meta in vs.metvars.items():\n for i in res:\n if meta['mod'][model][1] in i:\n if 'ens mean' in i:\n messages.append(int(i.split(':')[0]))\n names.append(f'{key}_mean')\n if f\"{meta['ensemble_percentiles'][0]}%\" in i:\n messages.append(int(i.split(':')[0]))\n names.append(f'{key}_lower_percentile')\n if f\"{meta['ensemble_percentiles'][1]}\" in i:\n messages.append(int(i.split(':')[0]))\n names.append(f'{key}_upper_percentile')\n names.extend(['lon', 'lat'])\n messages.extend([3, 12])\n return names, messages", "def consume_messages(self):\n warnings = self.rotkehlchen.msg_aggregator.consume_warnings()\n errors = self.rotkehlchen.msg_aggregator.consume_errors()\n result = {'result': {'warnings': warnings, 'errors': errors}, 'message': ''}\n return process_result(result)", "def get_results(self):\n for t in self.task:\n print t.get()", "def get_complete_messages(self):\n\t\treturn self.completed_stream_messages", "async def get_msgs(self) -> t.List[t.Dict[str, t.Any]]: # type:ignore[override]\n msgs = []\n while True:\n try:\n msgs.append(await self.get_msg())\n except Empty:\n break\n return msgs", "def get_messages():\n mes_tweets = MyTweet.query.all()\n return mes_tweets", "def messages(self):\n resp = []\n while self.__notices:\n resp.append(self.__notices.pop(0))\n return resp", "def getMsg(self) -> str:\n return self.msg", "def get_text_message(self):\n message = self.pytddmon.get_log()\n return message", "def message_ports_out(self):\n return _raw_util.raw_peak_detector_fb_sptr_message_ports_out(self)", "def get_all():\n global buffer\n messages = buffer\n buffer = []\n logger.info('Returning contents and cleared buffer, current count: %d'%count())\n return messages", "def _get_output_vars(self):", "def process_output(self):\n self.lock.acquire()\n messages = self.message_queue\n self.message_queue = []\n self.lock.release()\n for msg in messages:\n self.show_output(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current platform's root directory.
def platform_root(self): return os.getcwd()
[ "def sysroot_dir(self):\n\n return self._sysroot.sysroot_dir", "def get_mo_root_dir():\n return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), os.pardir))", "def get_root_path(self):\n mock_cmd = self._mock_cmd('--print-root-path')\n output = check_output(mock_cmd)\n return output.rstrip()", "def get_current_dir() -> str:\n return os.getcwd()", "def host_dir(self):\n\n return self._sysroot.host_dir", "def get_current_dir():\n return os.getcwd()", "def default_base_dir():\n cwd = Path('.').resolve()\n\n pwd = os.environ.get('PWD')\n if pwd is None:\n return cwd\n\n pwd = Path(pwd)\n if not pwd.is_absolute():\n return cwd\n\n if cwd != pwd.resolve():\n return cwd\n\n return pwd", "def get_base_directory() -> str:\n return SO5CGConfig.base_directory \\\n if SO5CGConfig.base_directory is not None \\\n else expanduser(SO5CGConfig.default_base_directory)", "def get_android_root_dir():\n android_root_path = os.environ.get(constants.ANDROID_BUILD_TOP)\n if not android_root_path:\n _show_env_setup_msg_and_exit()\n return android_root_path", "def base_directory():\n return os.path.dirname(os.path.realpath(__file__)) + os.path.sep", "def cwd() -> str:\n return os.path.abspath(os.getcwd())", "def get_base_dir():\r\n\r\n # Configured setting\r\n backup_dir = settings.get('backup_dir', '')\r\n if backup_dir != '':\r\n return os.path.expanduser(backup_dir)\r\n\r\n # Windows: <user folder>/My Documents/Sublime Text Backups\r\n if sublime.platform() == 'windows':\r\n return os.path.join(\r\n win32helpers.get_shell_folder('Personal'),\r\n 'Sublime Text Backups')\r\n\r\n # Linux/OSX/other: ~/sublime_backups\r\n return os.path.expanduser('~/.sublime/backups')", "def __root_directory__(config) :\n path_config = config.get('ContentPaths', {})\n return os.path.realpath(path_config.get('PService', os.path.join(os.environ['HOME'], '.toxaway')))", "def get_directory():\n home_dir = dirname(dirname(abspath(__file__))) # call dirname twice to get parent dir\n return home_dir", "def __get_file_root_location(self):\n\n return self.main_location", "def get_kard_root_path():\n return get_pkr_path() / KARD_FOLDER", "def workingDir() -> str:\n d = os.getcwd()\n _log.verify( len(d) == 0 or not (d[-1] == '/' or d[-1] == '\\\\'), \"*** Internal error 13123212-2: %s\", d)\n return d + \"/\"", "def get_current_dir():\n return os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))", "def get_git_root():\n\n rpath = git.Repo('.', search_parent_directories=True).working_tree_dir\n rpath = rpath + '/'\n return rpath" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Used to simulate an error when executing "npm install"
def fail_on_npm_install(): return 1
[ "def _install_npm_command(cmd):\n with settings(warn_only=True):\n version = npm_commands[cmd]['version']\n out = local('npm install -g {0}@{1}'.format(cmd, version), capture=True)\n if out.return_code != 0:\n print 'Using sudo'\n local('sudo npm install -g {0}@{1}'.format(cmd, version))", "def npm_install(package, flags=None):\n\n command = u'install %s %s' % (package, flags or u'') \n npm_command(command.strip())", "def test_install_error_other():\n mock_retcode = MagicMock(return_value=1234)\n path = \"C:\\\\KB123456.msu\"\n with patch.dict(win_wusa.__salt__, {\"cmd.retcode\": mock_retcode}):\n with pytest.raises(CommandExecutionError) as excinfo:\n win_wusa.install(path)\n mock_retcode.assert_called_once_with(\n [\"wusa.exe\", path, \"/quiet\", \"/norestart\"], ignore_retcode=True\n )\n assert \"Unknown error: 1234\" == excinfo.value.message", "def npm_command(command):\n\n sudo(u'npm %s' % command)", "def install_npm_dependencies(checkout):\n run_command([\"npm\", \"--silent\", \"--no-progress\", \"install\", \"--no-save\"], cwd=checkout)", "def test_create_package_invalid_input():\n\n wheel_dir = os.path.join(tempfile.mkdtemp(), \"wheel_dir\")\n try:\n create_package(\"/abc/def/ghijkl\", wheel_dir)\n pytest.fail(\"Expecting AgentPackageError got none\")\n except AgentPackageError as e:\n assert e.message == \"Invalid agent package directory specified\"\n\n try:\n create_package(tempfile.mkdtemp(), wheel_dir)\n pytest.fail(\"Expecting NotImplementedError got none\")\n except NotImplementedError:\n pass", "def setup():\n dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'react')\n handle = subprocess.Popen([\"npm\", \"install\"], cwd=dir_path)\n handle.wait()", "def test_syscall_with_error(self):\n with self.assertRaises(Exception):\n utils.syscall(\"notacommandunlessyoumadeitone\")", "def test_error(self):\n results = yield self.runCommand(\n command_bogusCommand,\n script=\"calendarserver_config\")\n self.assertEquals(results[\"error\"], \"Unknown command 'bogus'\")", "def fail(msg):\n error(msg)\n sys.exit(1)", "def test_install_source_packages_notarget_success(self):\n \n self.assertTrue(install_source_packages(\"/tmp/install_source_packages_no_target\", [\"wvdial\"], None, False, self.log))", "def install_npm_modules():\n print('----- Installing npm modules -----')\n\n confirm = input('Your are about to install npm modules. Do you want to continue ? [Y/n] : ')\n if confirm.upper() not in ('N', 'No'):\n os.system(\"sudo apt remove -y nodejs npm\")\n os.system(\"curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -\")\n os.system(\"sudo apt-get update\")\n os.system(\"sudo apt-get install -y nodejs\")\n print('------- Installing npm modules --------')\n\n for mod in NPM_MODULES:\n os.system(\"sudo npm install -g %s\" % mod)\n print(\"run: 'sudo dpkg -r --force-depends nodejs' if the installation fails\")", "def test_install_error_87():\n retcode = 87\n mock_retcode = MagicMock(return_value=retcode)\n path = \"C:\\\\KB123456.msu\"\n with patch.dict(win_wusa.__salt__, {\"cmd.retcode\": mock_retcode}):\n with pytest.raises(CommandExecutionError) as excinfo:\n win_wusa.install(path)\n mock_retcode.assert_called_once_with(\n [\"wusa.exe\", path, \"/quiet\", \"/norestart\"], ignore_retcode=True\n )\n assert (\n f\"Unknown error. Additional info follows:\\n\\n{retcode}\" == excinfo.value.message\n )", "def test_nonexistant_package(server):\n response = requests.get(f\"{server}/index/ultraspampackage/\")\n assert response.status_code == 404", "def minver_error(pkg_name):\n print(\n 'ERROR: specify minimal version of \"{0}\" using '\n '\">=\" or \"==\"'.format(pkg_name),\n file=sys.stderr\n )\n sys.exit(1)", "def test_install_no_version(self, tmp_path):\n tfenv = TFEnvManager(tmp_path)\n\n with pytest.raises(ValueError) as excinfo:\n assert tfenv.install()\n assert str(excinfo.value) == (\n 'version not provided and unable to find a .terraform-version file'\n )", "def test_install_module(self):\n pass", "def test_global_dep_error(monkeypatch):\n buildspec = deepcopy(BS_BASE)\n buildspec[\"build_toml\"][\"deps\"] = {\n \"req\": []\n }\n try:\n with base_test_invocator(monkeypatch, buildspec=buildspec):\n # `with` to activate the cm\n pass\n except zcbe.exceptions.BuildTOMLError:\n return\n assert 0, \"This test should raise\"", "def test_exec_fail_no_distro(self): # suppress(no-self-use)\n with SafeTempDir() as container_dir:\n with ExpectedException(RuntimeError):\n cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()][\"0\"]\n run_use_container_on_dir(container_dir, cmd=cmd)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the Tank, only if Tank already in form, if not form tank uses 'movePirate' to move Tank
def moveTank(self, dest): old_dest = dest dest = nearestPassableLoc(dest, caller_loc=self.findBaseLoc(), exclude_locations=[p.location for p in self.pirates]) path = createPath(self.baseLoc, dest, alternating=False, pirate=self.findBaseLoc(), exclude_locs=[p.location for p in self.pirates]) if path != '-': self.last_path = path form_path = path if path == '-': form_path = game.get_directions(self.baseLoc, old_dest)[0] if form_path == '-': form_path = self.last_path # logger.debug("Tank going to target {} with path '{}'".format(dest, path)) if self.formTank(form_path): logger.debug("We've got a tank! prepare to die..") if path is not None and path != '-': for pirate in self.pirates: dest = game.destination(pirate, path) movePirate(pirate, dest) # Apply new base if self.baseLoc != old_dest: self.baseLoc = game.destination(self.baseLoc, path) else: logger.debug("Stopped tank from moving.")
[ "def move(self):\n # spread pheromone\n self.spread()\n\n return super(DepositPheromones, self).move()", "def move(self):\r\n if not self.waitTurn(): # if no traffic from the opposite direction\r\n\t gui.Label.repaint(self.getCell(0))\r\n self.setWaiting(False) \r\n self.getCell(0).removeVehicle(self)\r\n self.passedpath += 1\r\n if self.getCell(0).addVehicle(self): # if the car crashed\r\n if not self.isOut():\r\n self.setCrashed()\r\n else:\r\n\t #print \"wait turn\"\r\n self.setWaiting(True)", "def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[0], self.position[1]+1)\n\n elif self.direction == \"w\":\n self.position = (self.position[0], self.position[1]-1)", "def at_before_move(self, destination):\r\n #return has_perm(self, destination, \"can_move\")\r\n return True", "def move(self):\n if random.randint(0, 1) == 0:\n if self.position > self.left_limit:\n self.position -= 1\n\n else:\n if self.position < self.right_limit:\n self.position += 1\n self.steps += 1", "def move(self):\n try:\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n self.chooseNewDirection(self, 10)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n # done!\n except:\n print(\"Error in boundingBox - probably because turtle t has no dx or dy.\")\n\n # now move our monster\n super().move()", "def _move_door(self,):\n\n pass", "def move(brick, position, orientation):\n controller.do(MoveBrickInDesign(brick, position, orientation))", "def move(self, player, position):\n if self.state.pitch.board[position.y][position.x] == player:\n return\n assert player.position is not None\n assert self.state.pitch.board[position.y][position.x] is None\n for ball in self.state.pitch.balls:\n if ball.position == player.position and ball.is_carried:\n ball.move_to(position)\n self.remove(player)\n self.put(player, position)", "def moveup(self, move):\n self.goup = True\n if not move:\n self.goup = False", "def movePiece(self,pos, dest):\r\n\r\n #set passant every turn to check if en passant is possible\r\n if(isinstance(self.board[pos[0]][pos[1]],piece.Pawn)):\r\n if(self.board[pos[0]][pos[1]].enPassant == True):\r\n self.passant = True\r\n else:\r\n self.passant = False\r\n\r\n #check if castle move\r\n if(not self.checkPossible(pos,dest)):\r\n if(pos == self.kingPos[0]):\r\n if(dest == (1,0)):\r\n self.board[2][0] = self.board[0][0]\r\n self.board[2][0].position = (2,0)\r\n self.board[0][0] = 0\r\n elif(dest == (6,0)):\r\n self.board[5][0] = self.board[7][0]\r\n self.board[5][0].position = (5,0)\r\n self.board[7][0] = 0\r\n elif(pos == self.kingPos[1]):\r\n if(dest == (1,7)):\r\n self.board[2][7] = self.board[0][7]\r\n self.board[2][7].position = (2,7)\r\n self.board[0][7] = 0\r\n elif(dest ==(6,7)):\r\n print(\"Final Step\")\r\n self.board[5][7] = self.board[7][7]\r\n self.board[5][7].position = (5,7)\r\n self.board[7][7] = 0\r\n\r\n # move piece(normally)\r\n self.board[dest[0]][dest[1]] = self.board[pos[0]][pos[1]]\r\n self.board[pos[0]][pos[1]] = 0\r\n self.board[dest[0]][dest[1]].position = (dest[0], dest[1])", "def switchmode(self):\n if self.mode == _CHOOSEPILE:\n self.stackpicker = self.stackpointer\n self.mode = _PICKMOVE\n elif self.mode == _PICKMOVE:\n # check for valid move and make if so\n if len(self.board[self.stackpointer]) > 0:\n cnum = self.board[self.stackpointer][len(self.board[self.stackpointer]) - self.cardpointer - 1][0]\n if self.validMove(self.stackpicker, self.stackpointer, cnum):\n self.move(self.stackpicker, self.stackpointer, cnum)\n self.stackpointer = self.stackpicker\n self.mode = _CHOOSEPILE\n self.cardpointer = 0", "def __move(N,frompole,topole,withpole):\n if N == 1:\n print \"%d: %s ==> %s\"%(N,frompole,topole)# move directly\n else:\n __move(N-1,frompole,withpole,topole)\n print \"%d: %s ==> %s\"%(N,frompole,topole)\n __move(N-1,withpole,topole,frompole)", "def stay_put(self):\n self.go_to(self.pos.x,self.pos.y, self.pos.theta)", "def move_to_position(self, position_in_steps):\n\n if (position_in_steps > self.max_position) or (position_in_steps < self.min_position):\n print \"Stepper position out of bounds, ignoring\"\n return\n\n self.goal_position = position_in_steps\n if self.debug:\n print \"%.4f-%s\\tNEW goal position: %.2f\" % (time.clock(), self.name, self.goal_position)", "def move(self):\n\n if not self.dead:\n self.__set_next_direction()\n Character.move(self)\n else:\n self.retreat()", "def move(self):\r\n if self.get_direction == c.UP:\r\n return self.macg.step_up()\r\n elif self.get_direction == c.DOWN:\r\n return self.macg.step_down()\r\n elif self.get_direction == c.LEFT:\r\n return self.macg.step_left()\r\n elif self.get_direction == c.RIGHT:\r\n return self.macg.step_right()\r\n elif self.get_direction == c.LEAVE:\r\n exit()", "def move(self):\n turtle.penup() # Ensure nothing is drawn while moving\n turtle.setpos(self.location)\n\n distance = distance_2d(self.location, self.destination)\n\n # Clamp distance below radius / 2 (inclusive)\n half_radius = self.radius / 2\n if distance > half_radius:\n distance = half_radius\n\n # Move the person towards their destination\n turtle.setheading(turtle.towards(self.destination))\n turtle.forward(distance)\n self.location = turtle.pos()", "def move(self):\n if self._moving_between_tiles:\n self.__move_between_tiles()\n else:\n check_next_coord, jump = self._calculate_new_coord()\n # Checks if the next calculated coordinate is a wall or if there is an intersection/turn (and extreme mode deactivated)\n # If so it wil calculate a new direction. This is so for most of the following moving methods (scatter/frightened)\n if (self.__coord_dict.get(\n check_next_coord).is_wall() or self.__check_neighbours()) and not self.__extreme_mode:\n self.__update_target_tile()\n self._direction = self.astar.get_direction(self._coord,\n self.astar.get_closest_tile(self.__update_target_tile()))\n elif self.__extreme_mode:\n # Calculates immediately a new direction\n self.__update_target_tile()\n self._direction = self.astar.get_direction(self._coord,\n self.astar.get_closest_tile(self.__update_target_tile()))\n # Looks if the ghosts need to perform a set on opposite side movement\n if jump:\n self._set_on_opposite_side()\n self._moving_between_tiles = True\n self._draw_character(self._coord, self.__image)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
export Tank class instance data to use in next turn
def exportData(self): global tankData resumeData = [self.baseLoc, self.tankFormations, self.last_path] # resumeData.append([p.id for p in self.pirates]) tankData[self.identifier] = resumeData
[ "def __init__(self):\n\n self.deck = Deck()\n self.keep_playing = True\n self.total_points = 300", "def __init__(self):\n self.history = []\n self.stochastic = \"random\" in inspect.getsource(self.__class__)\n self.tournament_length = -1\n if self.name == \"Player\":\n self.stochastic = False\n self.cooperations = 0\n self.defections = 0", "def __init__(self):\n\n self.speed = 0.3\n self.time = 0\n self.mileage = 0\n self.package_list = []\n self.current_node = 1", "def __init__(self):\n self.game_deck = Deck()\n self.game_deck.shuffle_deck()\n\n self.player1 = Hand(self.game_deck.deck[0:26])\n self.player2 = Hand(self.game_deck.deck[26:52])\n\n self.drawdeck = []", "def __init__(self, water_heater):\n super(MilkHeater, self).__init__()\n self.water_heater = water_heater\n self.milk_tank = MilkTank()", "def __init__(self, deck):\n # self._name = name\n self._deck = deck", "def __init__(self):\n self.dataNames = ['distance']\n self.initializeData()", "def __init__(self):\n self.variables = {}\n self.values = {}", "def player_data(self):\n data = PlayerData(self.inventory) \n data.load_meter_data(self)\n return data", "def __init__(self):\n super(WaterHeater, self).__init__()\n self.water_tank = WaterTank()\n self.water_temp = 20 # C\n self.current_capacity = self.MIN_CAPACITY", "def __init__(self,data,level,fval,pre,move):\r\n self.data = data\r\n self.level = level\r\n self.fval = fval\r\n self.pre=pre\r\n self.move=move", "def __init__(self: \"Keeper\") -> \"Keeper\":\n self.player = 0\n self.points = 0\n self.score = [0, 0]", "def __init__(self):\n self.T = dict()", "def __init__(self):\n self.count = 0\n self.weather = 1", "def __init__(self):\n self.timestep_to_name = {}", "def __init__(self):\n ABSVisitor.__init__(self)\n self.smart_dep_json = []\n self.dc_json = {}\n self.deploy_annotations = []\n self.module_name = \"\"\n self.classes = {}\n self.interfaces = {}", "def __init__(self, player, start_credit=100):\n self.dealer = Dealer()\n self.deck = Deck()\n self.player = Player(player, start_credit)", "def __init__(self):\n self.stat = Statistics()", "def __init__(self,name):\n self.name = name\n self.level = 0\n self.xp = 0\n self.requiredxp = 1000\n self.health = 100\n self.maxhealth = 100\n self.mana = 100\n self.maxmana = 100\n self.inventory = {1:\"x\",2:\"x\",3:\"x\",4:\"x\"}\n self.backpack = {}\n for n in range(1,10):\n self.backpack[n] = \"x\"", "def __init__(self):\n Planet.planetcount += 1 #becomes ID\n\n self.ID = Planet.planetcount\n self.name = self.generateRandomName()\n self.type = self.chooseRandomPlanetType()\n self.adjlist = [] #represents which planets can be reached from this planet" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate positions to form Tank in current game radius (base is (0,0))
def calculateFormation(self): # Path-faced tank formation calculation: # ----------------------------------- starting_loc = (0, 0) direction_forms = [] for direction in xrange(4): # n, w, s, e goodloc = [] count = 1 row = 0 while count > 0: count = 0 for col in xrange(radius + 1): col_or_row = (direction % 2 == 1) possible_loc = (starting_loc[0] + col_or_row * col + (not col_or_row) * row, starting_loc[1] + col_or_row * row + (not col_or_row) * col) for test_loc in goodloc: if not my_in_range(possible_loc, test_loc): break else: count += 1 goodloc.append(possible_loc) continue row += 1 + -2 * int(direction >= 2) direction_forms.append(goodloc) self.tankFormations = direction_forms # n, w, s, e # Centered tank formation calculation: # ----------------------------------- # starting_loc = (0, 0) # goodloc = [starting_loc] # # possible_loc = starting_loc # startedDown = False # # while True: # count = 0 # while abs(possible_loc[1] - starting_loc[1]) <= radius: # for test_loc in goodloc: # if not my_in_range(possible_loc, test_loc): # break # else: # count += 1 # goodloc.append(possible_loc) # # possible_loc = (possible_loc[0], possible_loc[1] + 1) # # if count > 0 and not startedDown: # possible_loc = (possible_loc[0] + 1, starting_loc[1]) # else: # if not startedDown: # possible_loc = (starting_loc[0] - 1, starting_loc[1]) # else: # possible_loc = (possible_loc[0] - 1, starting_loc[1]) # # if startedDown and count == 0: # break # # startedDown = True # # goodloc = list(set(goodloc)) # self.tankFormations = [sorted(goodloc, key=lambda loc: (abs(loc[0]), abs(loc[1])))] * 4
[ "def domain_tank(self):\n \n assert (self.ntheta > 0), \"ntheta must be >0 to use the tank shape\" \n\n #Create the 'rectangle part' of the tank\n\n for j in range(0,self.Nptsy):\n for i in range(0,self.Nptsx):\n #nodes\n self.nodes[i+j*self.Nptsx,0]=i+j*self.Nptsx\n self.nodes[i+j*self.Nptsx,1]=self.y[j]\n self.nodes[i+j*self.Nptsx,2]=self.x[i]\n #neighbor\n self.neig[i+j*self.Nptsx,0]=i+j*self.Nptsx\n #middle\n if (j>0 and j<self.Nptsy-1 and i>0 and i<self.Nptsx-1):\n self.neig[i+j*self.Nptsx,1]=i-1+j*self.Nptsx \n self.neig[i+j*self.Nptsx,2]=i+1+j*self.Nptsx\n self.neig[i+j*self.Nptsx,3]=i+(j-1)*self.Nptsx\n self.neig[i+j*self.Nptsx,4]=i+(j+1)*self.Nptsx\n self.neig[i+j*self.Nptsx,5]=-1 \n #lower boudary\n elif (j==0 and i>0 and i<self.Nptsx-1):\n self.neig[i+j*self.Nptsx,1]=i-1+j*self.Nptsx \n self.neig[i+j*self.Nptsx,2]=i+1+j*self.Nptsx\n self.neig[i+j*self.Nptsx,3]=-1 #temporary -1\n self.neig[i+j*self.Nptsx,4]=i+(j+1)*self.Nptsx\n self.neig[i+j*self.Nptsx,5]=-1\n #upper boundary\n elif (j==self.Nptsy-1 and i>0 and i<self.Nptsx-1):\n self.neig[i+j*self.Nptsx,1]=i-1+j*self.Nptsx \n self.neig[i+j*self.Nptsx,2]=i+1+j*self.Nptsx\n self.neig[i+j*self.Nptsx,3]=i+(j-1)*self.Nptsx\n self.neig[i+j*self.Nptsx,4]=-1 #temporary -1\n self.neig[i+j*self.Nptsx,5]=-1\n #left boundary\n elif (i==0 and j>0 and j<self.Nptsy-1):\n self.neig[i+j*self.Nptsx,1]=-2\n self.neig[i+j*self.Nptsx,2]=i+1+j*self.Nptsx \n self.neig[i+j*self.Nptsx,3]=i+(j-1)*self.Nptsx\n self.neig[i+j*self.Nptsx,4]=i+(j+1)*self.Nptsx\n self.neig[i+j*self.Nptsx,5]=-1\n #right boundary\n elif (i==self.Nptsx-1 and j>0 and j<self.Nptsy-1):\n self.neig[i+j*self.Nptsx,1]=i-1+j*self.Nptsx \n self.neig[i+j*self.Nptsx,2]=-3\n self.neig[i+j*self.Nptsx,3]=i+(j-1)*self.Nptsx\n self.neig[i+j*self.Nptsx,4]=i+(j+1)*self.Nptsx\n self.neig[i+j*self.Nptsx,5]=-1\n #corners\n self.neig[0,1]=-2\n self.neig[0,2]=1\n self.neig[0,3]=-1 #temporary -1\n self.neig[0,4]=self.Nptsx\n self.neig[0,5]=-1\n \n self.neig[self.Nptsx-1,1]=self.Nptsx-2\n self.neig[self.Nptsx-1,2]=-3\n self.neig[self.Nptsx-1,3+self.ntheta]=2*self.Nptsx-1\n self.neig[self.Nptsx-1,4+self.ntheta]=-1\n\n self.neig[self.Nptsx*(self.Nptsy-1),1]=-2 \n self.neig[self.Nptsx*(self.Nptsy-1),2]=self.Nptsx*(self.Nptsy-1)+1\n self.neig[self.Nptsx*(self.Nptsy-1),3]=-1 #temporary -1\n self.neig[self.Nptsx*(self.Nptsy-1),4]=self.Nptsx*(self.Nptsy-2)\n self.neig[self.Nptsx*(self.Nptsy-1),5]=-1\n \n self.neig[self.Nptsx*self.Nptsy-1,1]=self.Nptsx*self.Nptsy-2\n self.neig[self.Nptsx*self.Nptsy-1,2]=-3\n self.neig[self.Nptsx*self.Nptsy-1,3]=self.Nptsx*(self.Nptsy-1)-1\n self.neig[self.Nptsx*self.Nptsy-1,4]=-1\n \n id_node=self.Nptsy*self.Nptsx-1 \n self.angle=math.pi/(self.ntheta*2)\n \n #Create lower 'cricle part'\n for theta in range(1,self.ntheta+1):\n for r in range(1,self.Nptsx):\n #update the id of the node\n id_node+=1\n #nodes\n self.nodes[id_node,0]=id_node\n self.nodes[id_node,1]=self.Lx/2-self.x[r]*math.sin(theta*self.angle)\n self.nodes[id_node,2]=self.Lx/2-self.x[r]*math.cos(theta*self.angle)\n #neighbor\n self.neig[id_node,0]=id_node\n if (r>1 and r<self.Nptsx-1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n #border of the circle\n elif (r==self.Nptsx-1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n #right boundary\n elif (r>1 and r<self.Nptsx-1 and theta==self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=-3\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n #upper boundary\n elif (r>1 and r<self.Nptsx-1 and theta==1 ):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=self.Nptsx-1-r\n self.neig[id_node,5]=-1\n self.neig[self.Nptsx-1-r,3]=id_node\n #center of the circle\n elif (r==1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=self.Nptsx-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsx-1,2+theta]=id_node\n #corners\n elif (r==self.Nptsx-1 and theta==1):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=0\n self.neig[id_node,5]=-1\n self.neig[0,3]=id_node\n elif (r==self.Nptsx-1 and theta==self.ntheta):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=-3\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n elif (r==1 and theta==1):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=self.Nptsx-1\n self.neig[id_node,3]=id_node+self.Nptsx-1\n self.neig[id_node,4]=self.Nptsx-2\n self.neig[id_node,5]=-1\n self.neig[self.Nptsx-2,3]=id_node\n self.neig[self.Nptsx-1,3]=id_node\n elif (r==1 and theta==self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=self.Nptsx-1\n self.neig[id_node,3]=-3\n self.neig[id_node,4]=id_node-self.Nptsx+1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsx-1,2+theta]=id_node\n \n # Create upper circle part\n for theta in range(1,self.ntheta+1):\n for r in range(1,self.Nptsx):\n #update the id of the node\n id_node+=1\n #nodes\n self.nodes[id_node,0]=id_node\n self.nodes[id_node,1]=self.Ly-self.Lx/2+self.x[r]*math.sin(theta*self.angle)\n self.nodes[id_node,2]=self.Lx/2-self.x[r]*math.cos(theta*self.angle)\n #neighbor\n self.neig[id_node,0]=id_node\n if (r>1 and r<self.Nptsx-1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=id_node+self.Nptsx-1\n self.neig[id_node,5]=-1\n #border of the circle\n elif (r==self.Nptsx-1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=id_node+self.Nptsx-1\n self.neig[id_node,5]=-1\n #right boundary\n elif (r>1 and r<self.Nptsx-1 and theta==self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=-3\n self.neig[id_node,5]=-1\n #lower boundary\n elif (r>1 and r<self.Nptsx-1 and theta==1 ):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=self.Nptsy*self.Nptsx-1-r\n self.neig[id_node,4]=id_node+self.Nptsx-1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsy*self.Nptsx-1-r,3]=id_node\n #center of the circle\n elif (r==1 and theta>1 and theta<self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=self.Nptsx*self.Nptsy-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=id_node+self.Nptsx-1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsy*self.Nptsx-1,3+theta]=id_node\n #corners\n elif (r==self.Nptsx-1 and theta==1):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=self.Nptsx*(self.Nptsy-1)\n self.neig[id_node,4]=id_node+self.Nptsx-1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsx*(self.Nptsy-1),3]=id_node\n elif (r==self.Nptsx-1 and theta==self.ntheta):\n self.neig[id_node,1]=-2\n self.neig[id_node,2]=id_node-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=-3\n self.neig[id_node,5]=-1\n elif (r==1 and theta==1):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=id_node+self.Nptsx-1\n self.neig[id_node,3]=self.Nptsy*self.Nptsx-2\n self.neig[id_node,4]=self.Nptsy*self.Nptsx-1\n self.neig[id_node,5]=-1\n self.neig[self.Nptsy*self.Nptsx-2,4]=id_node\n self.neig[self.Nptsy*self.Nptsx-1,4]=id_node\n elif (r==1 and theta==self.ntheta):\n self.neig[id_node,1]=id_node+1\n self.neig[id_node,2]=self.Nptsy*self.Nptsx-1\n self.neig[id_node,3]=id_node-self.Nptsx+1\n self.neig[id_node,4]=-3\n self.neig[id_node,5]=-1\n self.neig[self.Nptsy*self.Nptsx-1,3+theta]=id_node\n self.neig[self.Nptsy*self.Nptsx-1,4+theta]=-1\n \n\t\tself.dy = max(abs(np.diff(self.nodes[:,1])))", "def _random_towards(current, target, radius):", "def getMovementPoints(self):\n movementPoints = 0\n if 45 <=self.tread > 30:\n movementPoints = 3\n elif 30 <= self.tread > 15:\n movementPoints = 2\n elif 15 <= self.tread > 0:\n movementPoints = 1\n else:\n movementPoints = 0\n\n return movementPoints", "def calculePosition(self):\n #Si latitude + vitesse se trouve entre -90 degre et 90 degre\n if self.latitude + self.vitesse <= 324000 and self.latitude + self.vitesse >= -324000:\n self.latitude = self.latitude + self.vitesse\n self.longitude = self.longitude - 15\n #Si latitude + vitesse se trouve superieur a 90 degre (il vient de depasser le Pole Nord)\n elif self.latitude + self.vitesse > 324000:\n self.latitude = (648000) - (self.latitude + self.vitesse)\n self.longitude = -(648000) + (self.longitude - 15)\n self.vitesse = -self.vitesse\n #Si latitude + vitesse se trouve infenieur a 90 degre (il vient de depasser le Pole Sud)\n else:\n self.latitude = -(648000) - (self.latitude + self.vitesse)\n self.longitude = -(648000) + (self.longitude - 15)\n self.vitesse = -self.vitesse\n #Si longitude depasse -648000\" alors longitude repasse à 647999\"\n if self.longitude < -648000:\n self.longitude = 648000 - (-self.longitude - 648000)\n self.calculPointageCamera()", "def get_kicker_position(self):\n # use the direction and position of the robot to find the position of the kicker\n direction_unit_vector = self.robot.direction.unit_vector()\n kicker_vector = direction_unit_vector.scale(self.dist_kicker_robot)\n return self.robot.position + kicker_vector", "def _get_target_position(self):\n\n if self.chasing:\n player_position = self.game.player.grid_position\n player_direction_vector = self.game.player.current_direction.value\n # Could have used Pink's target position, but calculating here reduces confusion\n two_cells_ahead_of_player = Vector(player_position) + (2 * player_direction_vector)\n red_beetle_position = self.game.red_enemy.grid_position\n # Double the vector between 2 cells ahead of the player and the red beetle's position\n target_position = 2 * Vector(two_cells_ahead_of_player) - Vector(red_beetle_position)\n return target_position\n\n else:\n # Bottom right in scatter mode\n target_position = (self.game.level.columns + 1, -1)\n return target_position", "def _get_random_location(self):\n\n width, height = self.world_size\n\n # # Generate a random (x, y) coordinate within the world's borders\n x = random.uniform(self.radius, width - self.radius)\n y = random.uniform(self.radius, height - self.radius)\n\n x -= width // 2\n y -= height // 2\n\n return x, y", "def getPos(self):\n\t\treturn self.__robot.x(), self.__robot.y(), self.__robot.a()", "def circle_trajectory(commander, x=500, y0=250, z0=500, r=50, step=10):\n pos = [x, y0 + r, z0, 0, 0, 0]\n sol = [commander.find_closest_ikt(pos)]\n rng = int(360 / step)\n for i in range(rng + 1):\n y = y0 + r * np.cos((i * step) / 180.0 * np.pi)\n z = z0 + r * np.sin((i * step) / 180.0 * np.pi)\n pos = [x, y, z, 0, 0, 0]\n prev_a = commander.find_closest_ikt(pos, sol[-1])\n sol.append(prev_a)\n\n return np.array(sol)", "def find_points(self):\n \n radius = self.radius\n thickness = self.thickness\n bottom_outer_x, bottom_outer_y = self.outer_start_point\n top_outer_y = bottom_outer_y + (4 * radius)\n top_outer_x = bottom_outer_x\n inner_r = radius - thickness\n bottom_outer_x, bottom_outer_y, thickness, radius, top_outer_x, top_outer_y, inner_r = float(bottom_outer_x), float(bottom_outer_y), float(thickness), float(radius), float(top_outer_x), float(top_outer_y), float(inner_r)\n \n p1 = (bottom_outer_x,bottom_outer_y, 'circle')\n p3 = (p1[0] + radius, p1[1] + radius, 'straight')\n p4 = (p3[0], p3[1] + radius*2, 'circle')\n p6 = (top_outer_x,top_outer_y, 'straight')\n p7 = (p6[0],p6[1] - thickness, 'circle')\n p9 = (p4[0]-thickness, p4[1],'straight')\n p10 = (p3[0] - thickness, p3[1],'circle')\n p12 = (p1[0], p1[1] + thickness, 'straight')\n p2 = ((p1[0]) + (radius*math.cos((3*math.pi)/8)),(p1[1]+radius) -(radius*math.sin((3*math.pi)/8)),'circle')\n p5 = ((p6[0] + (radius*math.cos((2*math.pi)/8))),(p6[1] - radius) + (radius*math.sin((2*math.pi)/8)),'circle')\n p8 = ((p7[0] + (inner_r*math.cos((2*math.pi)/8))),(p7[1] - inner_r) + (inner_r*math.sin((2*math.pi)/8)),'circle')\n p11 = ((p12[0]) + (inner_r*math.cos((3*math.pi)/8)),(p12[1]+inner_r) -(inner_r*math.sin((3*math.pi)/8)),'circle')\n \n self.points = [\n p1,\n p2,\n p3,\n p4,\n p5,\n p6,\n p7,\n p8,\n p9,\n p10,\n p11,\n p12\n ]", "def radial_coord(self, r):\n return r / np.sqrt(1 - (2 * self.M.value / r))", "def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT", "def cr(radius):\n\tcenter=position()\n\tres=[]\n\tres.append(epq.StageCoordinate(center, epu.Translate2D([0, -radius, 0.0], 0.0)))\n\tres.append(epq.StageCoordinate(center, epu.Translate2D([-0.866*radius, 0.5*radius, 0.0], 0.0)))\n\tres.append(epq.StageCoordinate(center, epu.Translate2D([-0.866*radius, -0.5*radius, 0.0], 0.0)))\n\treturn tuple(res)", "def test_calc_new_pos_direct_top_right(self):\n expected = [123, 115]\n self.ball.ball_velocity[:] = 1, -1\n result = self.ball.calc_new_pos(self.ball.rect, self.ball.angle)\n self.assertEqual([result.x, result.y], expected)", "def comp_tank_relative_r_phi(\n sample: np.lib.npyio.NpzFile,\n ender_x_y_center: float = 180.0,\n) -> tuple:\n x_abs = sample[\"enderstat\"].tolist()[\"abs_x_pos\"] - ender_x_y_center\n y_abs = sample[\"enderstat\"].tolist()[\"abs_y_pos\"] - ender_x_y_center\n\n r = np.round(np.sqrt(x_abs**2 + y_abs**2), 2)\n # Pay attention to the phi position due to the phantom tank alignment.\n phi = -np.round(math.degrees(np.arctan2(x_abs, y_abs)) - 90, 2) # + 90\n\n return (r, phi)", "def get_position_clock(self, surface):\n return 0, 0", "def get_bounds(self):\n # retrieve the current center position\n position = self.get_position()\n # retrieve the tile's center (half size of the card)\n card_center = self.get_center()\n # calculate the top-left\n topleft = (position[0] - card_center[0], position[1] - card_center[1])\n # calculate the bottom-right\n bottomright = (position[0] + card_center[0], position[1] + card_center[1])\n return (topleft, bottomright)", "def max_radius():\r\n return 20", "def r(self):\n return self.coords.r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Look up videos for youtubegeo.
def geo(): # ensure parameters are present if not request.args.get("location"): raise RuntimeError("missing youtube geodata") query = { 'q' : request.args.get('q'), 'location' : request.args.get('location'), 'locationRadius' : request.args.get('locationRadius'), 'maxResults' : request.args.get('maxResults') } key=os.environ.get("API_KEY") videos = youtubesearch(query, key) return jsonify(videos)
[ "def search_youtube(text_to_search):\n # query = urllib.parse.quote(text_to_search)\n # url = \"https://www.youtube.com/results?search_query=\" + query\n videosSearch = VideosSearch(text_to_search, limit=2)\n results = videosSearch.result()\n results = results['result']\n\n #\n # try:\n # response = urllib.request.urlopen(url)\n # html = response.read()\n # html = str(html, 'utf-8')\n # except Exception as e:\n # p('😥 Youtube gave up, this is so sad, can we get 1 like ' + repr(e))\n # return []\n #\n # # find and get video id from html string.\n # start_string = 'var ytInitialData = '\n # end_string = ']};</script><script nonce='\n #\n # start_position = html.find(start_string)\n # start_position += len(start_string)\n #\n # end_position = html.find(end_string)\n #\n # # get the youtube object\n # object_string = html[start_position: end_position + 3]\n #\n # # trim the end and remove the last ; semi colon\n # my_fav_object = object_string.strip()[0:-1]\n #\n # fav_object = json.loads(my_fav_object)\n #\n # list = \\\n # fav_object['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'][\n # 0][\n # 'itemSectionRenderer']['contents']\n #\n # selected_video = False\n\n video_list = []\n\n for item in results:\n if item['descriptionSnippet']:\n desc = item['descriptionSnippet'][0]['text']\n else:\n desc = ''\n video_list.append({\n 'title': item['title'],\n 'channel': item['channel']['name'],\n 'description': desc,\n 'href': item['link'],\n 'video_id': item['id'],\n 'duration': item['duration'],\n 'duration_seconds': give_me_seconds(item['duration'])\n })\n # for item in list:\n # if 'videoRenderer' in item:\n # videoId = item['videoRenderer']['videoId']\n # title = item['videoRenderer']['title']['runs'][0]['text']\n # time = item['videoRenderer']['lengthText']['simpleText']\n # description = ''\n # if 'descriptionSnippet' in item['videoRenderer']:\n # description = item['videoRenderer']['descriptionSnippet']['runs'][0]['text']\n # channel_name = item['videoRenderer']['ownerText']['runs'][0]['text']\n # seconds = give_me_seconds(time)\n # # selected_video = {\n # # 'video_id': videoId,\n # # 'title': title,\n # # 'time': this_video_seconds,\n # # 'description': description,\n # # 'channel_name': channel_name\n # # }\n # video_list.append({\n # 'title': title,\n # 'channel': channel_name,\n # 'description': description,\n # 'href': '',\n # 'video_id': videoId,\n # 'duration': time,\n # 'duration_seconds': seconds\n # })\n\n # page = BeautifulSoup(html, features='lxml')\n # vid_list = page.find_all('div', attrs={'class': 'yt-lockup-content'})\n #\n # for vid in vid_list:\n #\n # title_link = vid.findChild('a', attrs={'class': 'yt-uix-tile-link'}, recursive=True)\n # if title_link is None:\n # continue\n #\n # title = title_link.attrs['title']\n # href = title_link.attrs['href']\n #\n # duration_el = vid.findChild('span', attrs={'class': 'accessible-description'}, recursive=True)\n # if duration_el is None:\n # continue\n #\n # duration = duration_el.text\n #\n # channel_name = ''\n # channel_name_el = vid.findChild('a', attrs={'class': 'yt-uix-sessionlink'}, recursive=True)\n # if channel_name_el is None:\n # channel_name = channel_name_el.text\n #\n # video_description_el = vid.findChild('div', attrs={'class': 'yt-lockup-description'}, recursive=True)\n # video_description = ''\n # if video_description_el is not None:\n # video_description = video_description_el.text\n #\n # if duration.find('Duration') == -1:\n # continue\n #\n # duration_parsed = duration[duration.find(':') + 2:-1]\n # # not parsing hour long stuff right now: example: 1:01:49\n # # if the target video is more than 1 hr, consider it has 1 hr.\n # if len(duration_parsed) > 5:\n # duration_parsed = '59:59'\n #\n # duration_in_seconds = int(duration_parsed[int(duration_parsed.find(':')) + 1:])\n # duration_in_minutes = int(duration_parsed[:duration_parsed.find(':')])\n # total_duration_in_seconds = duration_in_seconds + (duration_in_minutes * 60)\n # video_id = href[href.find('?v=') + 3:]\n # video_list.append({\n # 'title': title,\n # 'channel': channel_name,\n # 'description': video_description,\n # 'href': href,\n # 'video_id': video_id,\n # 'duration': duration_parsed,\n # 'duration_seconds': total_duration_in_seconds\n # })\n\n return video_list", "async def youtube(self, ctx, *, video: str):\n videos = await self.search_youtube(video)\n if videos:\n await ctx.send(videos[0][\"info\"][\"uri\"])\n else:\n await ctx.send(\"Nothing found.\")", "async def youtubesearch(self, ctx, *, video: str):\n results = await self.search_youtube(video)\n if results:\n videos = []\n for obj in results:\n videos.append(obj[\"info\"][\"uri\"])\n await menu(ctx, videos, DEFAULT_CONTROLS, timeout=60)\n else:\n await ctx.send(\"Nothing found.\")", "def get_youtube_video():\n### FROM random_recipe_and_movie_results.html\n\n q = request.args.get(\"q\")\n\n payload = {'part': 'snippet',\n 'maxResults': 5,\n 'q': q,\n 'type': 'video',\n 'videoDuration':'long',\n 'videoType': 'movie',\n 'key': YOUTUBE_KEY}\n\n response = requests.get(\"https://www.googleapis.com/youtube/v3/search\", params=payload)\n data = response.json()\n video_id = data['items'][0]['id']['videoId']\n\n return jsonify(video_id)", "def search_youtube(self, search):\n url = YOUTUBE_URL + self.get_search_query(search)\n self.open_page(url)", "def search_videos_on_youtube(youtube, queries):\n video_ids = []\n for query in queries:\n if query in youtube_search_cache:\n video_ids.append(youtube_search_cache[query])\n else:\n response = (\n youtube.search()\n .list(q=query, part=\"id,snippet\", type=\"video\", maxResults=1)\n .execute()\n )\n video_id = response[\"items\"][0][\"id\"][\"videoId\"]\n youtube_search_cache[query] = video_id\n video_ids.append(video_id)\n return video_ids", "def get_youtube_video(type):\n videos = []\n with open(abspath(os.path.join('data', type + 's_imdb.json'))) as v:\n videos_raw = json.load(v)\n for video in videos_raw:\n # video info provided to youtube_search\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\"--q\", help=\"Search term\", default=\"Google\")\n argparser.add_argument(\"--max-results\",\n help=\"Max results\", default=25)\n if type == 'movie':\n args = argparser.parse_args(\n [\"--q\", video['title'] + ' trailer'])\n video = get_youtube_url(video, args)\n videos.append(video)\n else:\n for i in range(int(video['season'])):\n new_video = copy.deepcopy(video)\n new_title = video['title'] + ' season ' + str(i + 1)\n new_video['title'] = new_title\n args = argparser.parse_args(\n [\"--q\", new_title + ' trailer'])\n new_video = get_youtube_url(new_video, args)\n videos.append(new_video)\n\n # write to the new json file\n with open(abspath(os.path.join('data',\n type + 's_youtube.json')), 'w') as f:\n f.write(json.dumps(videos))", "def search_youtube(self, title):\n\n # If title is already a link\n if link_utils.get_url(title) is not None:\n return title\n\n with youtube_dl.YoutubeDL(YTDL_Config) as ydl:\n song_request = ydl.extract_info(title, download=False)\n \n videocode = song_request['entries'][0]['id']\n return \"https://www.youtube.com/watch?v={}\".format(videocode)", "def youtube_search():\n recognizer = sr.Recognizer()\n url = \"https://www.youtube.com/results?search_query=\"\n with sr.Microphone() as source:\n recognizer.adjust_for_ambient_noise(source, duration=5)\n print(\"What would you wish to search for on youtube?\")\n audio = recognizer.listen(source)\n try:\n get = recognizer.recognize_google(audio)\n print(get)\n wb.get().open_new(url+get)\n except sr.UnknownValueError:\n print(\"error\")\n except sr.RequestError as e:\n print(\"failed\".format(e))", "def youtube_search(search_term, max_results, api_key, search_type=\"video\"):\r\n client = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=api_key, cache_discovery=False)\r\n\r\n search_response = client.search().list(q=search_term, part=\"id,snippet\", maxResults=max_results, type=search_type).execute()\r\n\r\n results = []\r\n for result in search_response.get(\"items\", []):\r\n title = result[\"snippet\"][\"title\"]\r\n result_type = result[\"id\"][\"kind\"].replace(\"youtube#\", \"\")\r\n description = result[\"snippet\"][\"description\"]\r\n if result_type == \"video\":\r\n result_id = result[\"id\"][\"videoId\"]\r\n elif result_type == \"channel\":\r\n result_id = result[\"id\"][\"channelId\"]\r\n elif result_type == \"playlist\":\r\n result_id = result[\"id\"][\"playlistId\"]\r\n results.append(YoutubeResult(title, result_type, description, result_id))\r\n\r\n return results", "def get_movie_url(self, youtube_id):\n\n \n return 'http://www.youtube.com/watch?v=' + youtube_id", "def get_video(self):\n yt = YouTube(self.url)\n videos = yt.streams.filter(\n file_extension=\"mp4\", resolution=self.res)\n assert len(videos) > 0, \"Video unavailable.\"\n videos[0].download(filename=self.outfile)\n print(\"Download complete.\")", "def get_yt_urls(video_id):\n yield f'https://youtu.be/{video_id}'\n for prefix in ('https://', 'https://www.'):\n yield f'{prefix}youtube.com/watch?v={video_id}'\n yield f'{prefix}youtube.com/watch/{video_id}'\n yield f'{prefix}youtube.com/embed/{video_id}'\n yield f'{prefix}youtube.com/v/{video_id}'", "def get_videos(self):\n matchups = models.Matchup.objects.select_related('home', 'away').all()\n matchup_prefetch = Prefetch('matchups', queryset=matchups)\n return models.Video.objects.prefetch_related(matchup_prefetch)\\\n .filter(is_visible=True)", "def search_videos(self, search_term):\n results = []\n for video in self._video_library.get_all_videos():\n if search_term.lower() in video.title.lower():\n if not video.flag:\n results.append(video)\n self.display_search(results, search_term)", "def _extract_video_urls(self, sFilter=None):\n nNumPages = self._NumPagesGet()\n print(f\"Found {nNumPages} pages in the playlist...\")\n\n lUrlVideos = []\n for nPage in range(0, nNumPages):\n lPageVideos = self._extract_page_urls(nPage)\n if lPageVideos:\n lUrlVideos += lPageVideos\n print(f\"Found {len(lPageVideos)} on page {nPage + 1}\")\n else:\n print(f\"Failed to load page {nPage + 1}!\")\n break\n\n # Remove non-video URLs that may have been picked up\n lTemp = []\n for sUrl in lUrlVideos:\n if 'com/videos/recommended' in sUrl:\n continue\n if 'com/video' in sUrl:\n lTemp += [sUrl]\n else:\n raise ValueError(f\"Not sure about this one: {sUrl}\")\n lUrlVideos = lTemp\n\n nNumVideos = len(lUrlVideos)\n print(f\"\\r\\nFound {nNumVideos} video URLs in the playlist\")\n self.videos += lUrlVideos", "def video_links(doc):\n result = []\n if not isinstance(doc, str):\n try:\n for key, value in doc.items():\n if key == 'youtube':\n result.append(VideoLink(\n os.path.join('youtube', value + '.jpg'),\n 'http://img.youtube.com/vi/{0}/0.jpg'.format(value)\n ))\n else:\n result.extend(video_links(value))\n except AttributeError:\n try:\n for val in doc:\n result.extend(video_links(val))\n except TypeError:\n pass\n return result", "def _mock_query_youtube(self, ys):\n ys.driver.get(self.SEARCH_PAGE_URL)\n self._ys_driver = ys.driver\n ys.query_youtube = self._mocked_query_youtube", "def _yt(self, url):\n\n vid_id = get_yt_id(url)\n if not vid_id:\n return None\n try:\n json = get_yt_json(vid_id)\n if json is None:\n return None\n except (KeyError, ValueError):\n return None\n\n vid_info = {}\n try:\n # Last part of the ID format is the actual ID\n vid_id = json[\"id\"][\"$t\"].split(':')[-1]\n vid_info[\"link\"] = \"http://youtu.be/\" + vid_id\n except KeyError:\n # No point getting any more info if we don't have a valid link\n return None\n\n try:\n vid_info[\"title\"] = json[\"title\"][\"$t\"]\n except KeyError:\n vid_info[\"title\"] = \"N/A\"\n\n try:\n vid_info[\"uploader\"] = json[\"author\"][0][\"name\"][\"$t\"]\n except KeyError:\n vid_info[\"uploader\"] = \"N/A\"\n\n try:\n dt = datetime.strptime(json[\"published\"][\"$t\"], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n vid_info[\"uploaded\"] = dt.strftime(\"%Y-%m-%d\")\n except KeyError:\n vid_info[\"uploaded\"] = \"N/A\"\n\n try:\n vid_secs = int(json[\"media$group\"][\"yt$duration\"][\"seconds\"])\n vid_info[\"duration\"] = \"\"\n if vid_secs < 1:\n vid_info[\"duration\"] = \"LIVE\"\n else:\n hours, rem = divmod(vid_secs, 3600)\n mins, secs = divmod(rem, 60)\n\n if hours != 0:\n vid_info[\"duration\"] += format(hours, \"02d\") + \":\"\n\n vid_info[\"duration\"] += \"{:02d}:{:02d}\".format(mins, secs)\n except KeyError as ex:\n vid_info[\"duration\"] = \"N/A\"\n\n try:\n views = int(json[\"yt$statistics\"][\"viewCount\"])\n vid_info[\"views\"] = \"{:,}\".format(views)\n except KeyError:\n vid_info[\"views\"] = \"N/A\"\n\n try:\n likes = int(json[\"yt$rating\"][\"numLikes\"])\n dislikes = int(json[\"yt$rating\"][\"numDislikes\"])\n vid_info[\"likes\"] = \"+{:,}/-{:,}\".format(likes, dislikes)\n except KeyError:\n vid_info[\"likes\"] = \"N/A\"\n\n return vid_info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show generate passwords window
def on_generate_button(self): self.password_generator.show()
[ "def gen_pass(self):\n\n length = int(self.mainwindow_gui.length_slider.value())\n password = \"\"\n\n if (self.mainwindow_gui.include_numbers.isChecked()):\n password = functions.generate_password(length=length, include_numbers=True)\n else:\n password = functions.generate_password(length=length, include_numbers=False)\n\n self.update_status(\"status\", \"Password Generated\")\n self.mainwindow_gui.output_edit.setText(password)", "def show_new_password_window():\n window = NewPasswordWindow()\n window.show()\n gtk.main()\n return window.get_new_password()", "def generate_password():\n # Define password length\n nr_letters = random.randint(8, 10)\n nr_symbols = random.randint(2, 4)\n nr_numbers = random.randint(2, 4)\n\n # Choose random letters, symbols, numbers and add to lists\n password_letters = [random.choice(letters) for _ in range(nr_letters)]\n password_symbols = [random.choice(symbols) for _ in range(nr_symbols)]\n password_numbers = [random.choice(numbers) for _ in range(nr_numbers)]\n\n # Create full password and randomize the input order, join the list to a string\n password_list = password_letters + password_symbols + password_numbers\n random.shuffle(password_list)\n new_password = ''.join(password_list)\n\n # Delete any current password entries and add new one to window\n password_entry.delete(0, 'end')\n password_entry.insert(0, new_password)\n pyperclip.copy(new_password)\n # messagebox.showinfo(title='Password Copied', message='Your password has been copied to the clipboard!')", "def gen_pass():\n length = random.randint(7, 9)*5\n alphabet = string.ascii_letters + string.digits\n password = ''.join(secrets.choice(alphabet) for i in range(length))\n return password", "def password_form(self, err=''):\n\t\treturn header('Please choose a password for your account.') + \\\n\t\t\tplain_text(strings['ACC_PASSWORD']) + \\\n\t\t\tform_start(self.ses, self.form, {'go': 'store_pw'}) + \\\n\t\t\terror(err) + \\\n\t\t\ttable([],[('Password','<input type=\"password\" name=\"pw1\" value=\"\" />'),\n\t\t\t\t('Confirm password','<input type=\"password\" name=\"pw2\" value=\"\" />')]) + \\\n\t\t\tform_end('Continue')", "def pw_screen(self, screen):\n max_y, max_x = screen.getmaxyx()\n\n pass_screen_y_off = 3\n pass_screen_x_off = 10\n \n pass_text_y_off = 0\n pass_text_x_off = 3\n \n pass_win = screen.subwin(5, 25,(max_y/2) - pass_screen_y_off, (max_x/2) - pass_screen_x_off)\n pass_win.box()\n pass_win.addstr(1, 2, \"Please enter password\")\n \n pass_box = screen.subwin(1, 9,(max_y/2) - pass_text_y_off, (max_x/2) - pass_text_x_off)\n \n # pass_box.box()\n screen.refresh()\n pass_win.refresh()\n tb = curses.textpad.Textbox(pass_box)\n \n pass_text = tb.edit()\n\n self.password = pass_text[:len(pass_text) - 1]\n print \"PASSWORD LENGTH ==== \" + str(len(self.password))", "def show_password():\n if not flask.session:\n return flask.redirect(flask.url_for(\"show_login\"))\n\n if flask.request.method == \"POST\":\n connection = todolist.model.get_db()\n cur = connection.execute(\n \"SELECT password \"\n \"FROM users \"\n \"WHERE username = '%s'\" % flask.session[\"username\"]\n )\n users = cur.fetchall()\n\n if check_password(flask.request.form['password'],\n users[0]['password']):\n if flask.request.form['new_password1'] != \\\n flask.request.form['new_password2']:\n return flask.abort(401)\n connection.execute(\n \"UPDATE users \"\n \"SET password = ? \"\n \"WHERE username = ?\", (hash_salt(\n flask.request.form['new_password1']),\n flask.session['username'])\n )\n return flask.redirect(flask.url_for(\"show_edit\"))\n return flask.abort(403)\n return flask.render_template(\"password.html\",\n logname=flask.session['username'])", "def _page_password(self):\n return self._open(self.app.page_password)", "def prntPWlist():\n print(\"\\nHere is your list of 10 passwords:\\n\")\n pws = genPWlist()\n ii = 0\n while ii < len(pws):\n print(str(ii+1)+\": \"+pws[ii])\n ii += 1\n answer = input(\"would you like another list of 10 passwords? (y/n)\\n\")\n if answer == \"y\":\n prntPWlist()\n else:\n print(\"Thank you for using this password generator. Goodbye.\")", "def password(title,height,width,text):\n command=\"dialog --clear --title \\\"\" + title + \"\\\" --password \\\"\" + \\\n\t text + \"\\\" \" + `height` + \" \" + `width` + \\\n\t \" 2>&1 > /dev/tty\"\n diag=os.popen(command)\n ans=diag.read()\n r=diag.close()\n if r:\n\treturn 0\n else:\n\treturn ans", "def _create_popup(self, instance):\n super()._create_popup(instance)\n self.textinput.password = True", "def genPW():\n newPW = \"\"\n i = 1\n while i <= 12:\n newChar = genChar()\n newPW = newPW + newChar\n i += 1\n return newPW", "def generate_password():\n return \"\".join(\n [\n secrets.choice(\n string.ascii_letters +\n string.digits +\n string.punctuation\n )\n for _ in range(32)\n ]\n )", "def DwfPassword(self) -> str:", "def test_view_pw(self):\n rsc = resources.get_by_name(\"host1.example.com\")\n self.open_url('/resource/view/{0}'.format(rsc.id))\n \n user0 = rsc.passwords.filter_by(username='user0').one()\n \n el = self.wd.find_element(By.ID, \"pw{0}\".format(user0.id))\n self.assertFalse(el.is_displayed())\n \n link = self.wd.find_element(By.ID, \"lnk{0}\".format(user0.id))\n \n \n link.click()\n \n def is_displayed(el):\n if el.is_displayed():\n return el\n \n found_el = WebDriverWait(self.wd, 10).until(lambda d: is_displayed(d.find_element(By.ID, \"pw{0}\".format(user0.id))))\n \n self.assertEqual(user0.password_decrypted, el.get_attribute(\"value\"))", "def incorrect(self):\r\n popup = tk.messagebox.showwarning(\"Incorrect Password\", \"Password entered is incorroect, try again.\")", "def test_add_gen(self):\n rsc = resources.get_by_name(\"host1.example.com\")\n self.open_url('/resource/view/{0}'.format(rsc.id))\n self.submit_form(\"add_password_form\")\n \n self.assertEqual(\"Add a Password\", self.wd.title)\n \n el = self.wd.find_element(By.ID, \"username\")\n el.send_keys('user5')\n \n # Generate a password\n self.wd.find_element(By.ID, \"generate-pw-button\").click()\n \n def has_value(element):\n if element.get_attribute(\"value\") != \"\":\n return element\n \n genpw_el = WebDriverWait(self.wd, 10).until(lambda d: has_value(d.find_element(By.ID, \"mypassword\")))\n generated_password = genpw_el.get_attribute('value')\n \n # Copy it in\n self.wd.find_element(By.ID, \"copy-pw-button\").click()\n \n self.assertEquals(generated_password, self.wd.find_element(By.ID, \"password_decrypted\").get_attribute('value'))\n \n self.submit_form(\"password_form\")\n \n self.assertEqual(\"View Resource\", self.wd.title)\n \n user5 = rsc.passwords.filter_by(username='user5').one()\n \n self.assert_notification(\"Password created: user5 (id={0})\".format(user5.id))\n self.assert_in_list_table(\"user5\", table=2, is_link=False)\n \n self.assertEqual(generated_password, user5.password_decrypted)", "def main():\n\n # Walk the user through some choices\n passlen = next(inputintgen(\n \"Note: provided passwords will always be at least 4 characters \\n\"\n \" Choose a length for your passwords: \", None))\n if passlen < 4:\n passlen = 4\n\n typechoice = input(\n \"OPTIONS:\\n\"\n \"L Password must contain lowercase Letters\\n\"\n \"U Password must contain uppercase Letters\\n\"\n \"D Password must contain numeric digits\\n\"\n \"S Password must contain Symbols\\n\"\n \"Type some letters describing your choice: \\n\"\n \" Examples: you could type LD, UDS, or LUDS \"\n )\n\n # Notes:\n # Silently ignore any garbage in the input\n # Permit options in upper or lower case\n # Defaults to L (lowercase) if no valid options found\n\n typechoice = typechoice.upper()\n\n # Comprehension using a local dict to decode the letters\n passtypes = [\n {\n 'L': LOWER,\n 'U': UPPER,\n 'D': DIGITS,\n 'S': SYMBOLS}[letter]\n for letter in typechoice if letter in \"LUDS\"\n ]\n\n if not passtypes:\n passtypes.append(LOWER)\n\n # Now generate and print passwords based on the user specifications\n print(\"Each time you press ENTER, a new password will be generated,\\n\",\n \"Type anything else to terminate.\")\n while input() == \"\":\n print(pwgen(passlen, passtypes))", "def password(nchars: int) -> str:\n choices = string.printable\n return nfromchoices(nchars, choices)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function which calculates execution time of arbitrary number of functions and rank them
def fun_exec_time(*func): times = dict() for f in func: # execute function and calculation of execution time with contextlib.redirect_stdout(io.StringIO()) as f_: start_time = time.time() f() times[f.__name__] = time.time() - start_time # write time in dict times = {k: v for k, v in sorted(times.items(), key=lambda item: item[1])} # sort dict # print table print('Function | RANK | TIME ELAPSED') for i, t in enumerate(times): print(f'{t}\t {i + 1}\t {times[t]}s')
[ "def timing_analysis(func, start, stop, inc, runs):\n\n for n in range(start, stop, inc): # for every input size n\n acc = 0.0 # initialize accumulator\n\n for i in range(runs): # repeat runs times:\n acc += timing(func, n) # run func on input size n\n # and accumulates run times\n # print average run times for input size n\n format_str = 'Run time of {}({}) is {:.7f} seconds.'\n print(format_str.format(func.__name__, n, acc / runs))", "def compare_speed_of_functions(input_to_func, function_list, exp_output):\n\n speed_dict = dict()\n longest_name = 0\n for func in function_list:\n name = func.__name__\n size = len(name)\n if size > longest_name:\n longest_name = size\n print(\"Applying {}\".format(name))\n t0 = time.time()\n output = func(input_to_func)\n t1 = time.time()\n if output == exp_output:\n total = round(t1-t0, 8)\n speed_dict[name] = total\n else:\n print(\"{}: Incorrect Output Found: {}\".format(name, output))\n print()\n display_results(speed_dict, longest_name)", "def bestof(reps,func,*args,**kwargs):\n best=2**32\n for i in range(reps):\n startTime=timer()\n ret=func(*args,**kwargs)\n elaspedTime=timer()-startTime\n if elaspedTime<best:best=elaspedTime\n return (best,ret)", "def evaluate(fn):\n runs = 1000\n def speed_evaluation(*args) -> str:\n t_start = time.time()\n for _ in range(runs):\n fn(*args)\n t_end = time.time()\n return f'The average runtime for {fn.__name__} was {(t_end-t_start)/runs}'\n return speed_evaluation", "def run_element(jobs, boolean_result, aggregation):\n\n faults_tolerate = 1\n\n mstart = time.time()\n run_executionxxx(faults_tolerate, jobs, boolean_result, aggregation)\n mend = time.time()\n span = str(mend - mstart)\n print \"Ranking time: %s\" % span", "def bestof(reps, func, *pargs, **kargs): \r\n best = 2 ** 32 # 136 years seems large enough\r\n for _ in range(reps): # range usage not timed here\r\n start = timer()\r\n ret = func(*pargs, **kargs)\r\n elapsed = timer() - start # Or call total() with reps=1\r\n if elapsed < best: best = elapsed # Or add to list and take min()\r\n return (best, ret)", "def get_n_coresidency(mem_size, runtime, min_no):\n fout = \"tmp_coresidency.log\"\n open(fout, \"w\")\n N = 10\n exp = Worker(fout, 0, N, basic_probe_func)\n exp.init()\n\n para_list = []\n src_code_path = os.path.join(os.getcwd(), CODE_PATH[runtime])\n zip_code(zipped_code_path, src_code_path)\n\n fops = []\n for i in xrange(N):\n func_name = func_prex + str(int(time.time() * 1000))[-8:]\n fop = FuncOp(\n aws_id,\n aws_key,\n region,\n role,\n runtime,\n mem_size,\n func_name)\n # fop.del_function()\n fop.create_function(zipped_code_path, func_handler)\n para = (fop, )\n para_list.append(para)\n fops.append(fop)\n\n exp.add_tasks(para_list)\n exp.clear_queue()\n\n buf = [v.strip(\"\\n\") for v in open(fout).readlines()]\n\n count = {}\n for v in buf:\n vm_id = v.split(\"#\")[10]\n func_name = v.split(\"#\")[7]\n if vm_id not in count:\n count[vm_id] = []\n count[vm_id].append(func_name)\n key_max = max(count, key=lambda k: len(count[k]))\n #print count\n\n #print count[key_max], len(count[key_max])\n\n if len(count[key_max]) >= min_no:\n target_ops = [\n v for v in fops if v.get_func_name() in count[key_max]]\n return target_ops\n else:\n for fop in fops:\n fop.del_function()", "def timings(reps, func, *args, **kw):\r\n return timings_out(reps, func, *args, **kw)[0:2]", "def run_time(func, *args):\n\n start = clock()\n\n func(*args)\n\n end = clock()\n\n return end-start", "def timelist(n, fn, *args, **kwargs):\n start = time.perf_counter_ns()\n for i in range(n):\n result = list(fn(*args, **kwargs))\n return time.perf_counter_ns() - start, result", "def analyse_time(size_to_test, no_of_trials):\n \n if sys.version_info < (3, 3):\n get_time = time.clock\n else:\n get_time = time.perf_counter\n REZ = time.get_clock_info('perf_counter').resolution \n\n total_time = 0 \n for trial in range(no_of_trials):\n list_to_test = generate_random_array(size_to_test)\n start = get_time()\n sol = giftwrap_e(list_to_test)\n end = get_time()\n total_time += (end - start)\n time_taken_per_locate = (1.0*total_time) / no_of_trials\n print('finish timing for array with {} random points'.format(size_to_test))\n \n #Uncomment if want graph\n #draw_graph(list_to_test, sol)\n \n print(size_to_test)\n #print(time_taken_per_locate)\n return time_taken_per_locate", "def time_getter(func, n, rand_val=True):\n\n t1 = Timer(\"{}({},{})\".format(func, get_me_random_list(n, rand_val), 99999999), setup=\"from __main__ import {}\".format(func))\n func_avg = t1.timeit(number=100)\n \n return func_avg", "def timings_out(reps,func,*args,**kw):\n\n reps = int(reps)\n assert reps >=1, 'reps must be >= 1'\n if reps==1:\n start = clock()\n out = func(*args,**kw)\n tot_time = clock()-start\n else:\n rng = xrange(reps-1) # the last time is executed separately to store output\n start = clock()\n for dummy in rng: func(*args,**kw)\n out = func(*args,**kw) # one last time\n tot_time = clock()-start\n av_time = tot_time / reps\n return tot_time,av_time,out", "def main():\n list_size = [500, 1000, 10000]\n sort_result = {'insertion':0, 'shell':0, 'python':0}\n for i in list_size:\n list_count = 0\n while list_count < 100:\n random_number_list = get_me_random_list(i)\n sort_result['insertion']+=insertion_sort(random_number_list)\n sort_result['shell'] += shell_sort(random_number_list)\n sort_result['python'] += python_sort(random_number_list)\n list_count+=1\n\n for key, val in sort_result.items():\n print('%s sort took %10.7f seconds to run, on average'%(key, val/100))", "def runtime_example1():\n\n # this need to be runned outside of this function\n %timeit rand_nums = np.random.rand(1000)\n\n # here we save the runtime to a variable using \"-o\" after\n # the %timeit clause\n times = %timeit -o rand_nums = np.random.rand(1000)\n return times", "def get_time_scores(scoring_function, test_kb, method='greedy-coalescing', load_to_gpu=True):\n facts = test_kb.facts\n\n if method in ['greedy-coalescing']: # for time-point models\n\n scores_t_list = []\n\n for i in range(0, int(facts.shape[0]), 1):\n fact = facts[i]\n\n s, r, o = fact[:3]\n\n start_bin = fact[3 + time_index[\"t_s_orig\"]]\n\n # start_bin, end_bin=fact[3:5]\n\n # num_times=end_bin-start_bin+1\n num_times = 2\n\n if num_times > 1:\n t = numpy.arange(start_bin, start_bin + 2)\n\n # t=numpy.arange(start_bin, end_bin+1)\n else:\n num_times += 1\n # to avoid batch size of 1\n t = numpy.array([start_bin, start_bin])\n\n s = numpy.repeat(s, num_times)\n r = numpy.repeat(r, num_times)\n o = numpy.repeat(o, num_times)\n\n # '''\n\n if load_to_gpu:\n s = torch.autograd.Variable(torch.from_numpy(\n s).cuda().unsqueeze(1), requires_grad=False)\n r = torch.autograd.Variable(torch.from_numpy(\n r).cuda().unsqueeze(1), requires_grad=False)\n o = torch.autograd.Variable(torch.from_numpy(\n o).cuda().unsqueeze(1), requires_grad=False)\n t = torch.autograd.Variable(torch.from_numpy(\n t).cuda().unsqueeze(1), requires_grad=False)\n else:\n # CPU\n s = torch.autograd.Variable(torch.from_numpy(\n s).unsqueeze(1), requires_grad=False)\n r = torch.autograd.Variable(torch.from_numpy(\n r).unsqueeze(1), requires_grad=False)\n o = torch.autograd.Variable(torch.from_numpy(\n o).unsqueeze(1), requires_grad=False)\n t = torch.autograd.Variable(torch.from_numpy(\n t).unsqueeze(1), requires_grad=False)\n\n # print(facts[i],facts_track_range, i,s.shape, facts_time_chunk, len(numpy.nonzero(facts_track_range==i)))\n\n scores_t = scoring_function(s, r, o, None).data\n\n # save for later (all scores_t are same pick any one)\n print('scores_t shape', scores_t.shape)\n scores_t_list.append(scores_t[-1])\n\n print('the shape of scores_t', scores_t_list[0].shape)\n # scores_t_pickle=torch.tensor(scores_t_pickle)\n t = torch.from_numpy(facts[:, 3:]).unsqueeze(1)\n\n data_pickle = prepare_data_iou_scores(\n t, test_kb, scores_t=scores_t_list, load_to_gpu=load_to_gpu)\n data_pickle[\"facts\"] = facts\n data_pickle[\"data_folder_full_path\"] = test_kb.datamap.dataset_root\n\n elif method in [\"start-end-exhaustive-sweep\"]:\n num_relations = len(test_kb.datamap.relation_map)\n start_scores_t_list = []\n end_scores_t_list = []\n\n for i in range(0, int(facts.shape[0]), 1):\n fact = facts[i]\n s, r, o = fact[:3]\n\n s = numpy.repeat(s, 2) # to avoid batch size of 1\n r = numpy.repeat(r, 2)\n o = numpy.repeat(o, 2)\n\n if load_to_gpu:\n s = torch.autograd.Variable(torch.from_numpy(\n s).cuda().unsqueeze(1), requires_grad=False)\n r = torch.autograd.Variable(torch.from_numpy(\n r).cuda().unsqueeze(1), requires_grad=False)\n o = torch.autograd.Variable(torch.from_numpy(\n o).cuda().unsqueeze(1), requires_grad=False)\n else: # CPU\n s = torch.autograd.Variable(torch.from_numpy(\n s).unsqueeze(1), requires_grad=False)\n r = torch.autograd.Variable(torch.from_numpy(\n r).unsqueeze(1), requires_grad=False)\n o = torch.autograd.Variable(torch.from_numpy(\n o).unsqueeze(1), requires_grad=False)\n\n start_scores_t = scoring_function(s, r, o, None).data\n end_scores_t = scoring_function(s, r + num_relations, o, None).data\n\n # save for later (all scores_t are same pick any one)\n start_scores_t_list.append(start_scores_t[-1])\n end_scores_t_list.append(end_scores_t[-1])\n\n t = torch.from_numpy(facts[:, 3:]).unsqueeze(1)\n\n data_pickle = prepare_data_iou_scores(\n t, test_kb, scores_t=(start_scores_t_list, end_scores_t_list), load_to_gpu=load_to_gpu)\n data_pickle[\"facts\"] = facts\n data_pickle[\"data_folder_full_path\"] = test_kb.datamap.dataset_root\n\n else:\n raise Exception(\"Not implemented\")\n\n return data_pickle", "def _CalculateFrameTimes(events_per_frame, event_data_func):\n times_per_frame = []\n for event_list in events_per_frame:\n event_times = [event_data_func(event) for event in event_list]\n times_per_frame.append(sum(event_times))\n return times_per_frame", "def execution_time(func):\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n run_time = time.time() - start_time\n\n print(t.format_exec_time(run_time,\n func,\n args,\n kwargs,\n result))\n\n return result\n\n return inner", "def time_methods() -> Tuple[List[float], List[float]]:\r\n # These two lists will hold our timing results.\r\n queue_times = []\r\n stack_times = []\r\n\r\n # This loop runs the timing experiment for enqueueing one item to\r\n # LinkedListQueue.\r\n print(\"Running LinkedListQueue.enqueue experiments...\")\r\n for size in SIZES:\r\n # 1. Initialize the sample queues\r\n queues = _setup_queues(size, NUM_TRIALS)\r\n\r\n # 2. For each queue created, call the function timeit.\r\n # timeit takes three arguments:\r\n # - a *string* representation of a piece of code to run\r\n # - the number of times to run it (just 1 for us)\r\n # - globals is a technical argument that you DON'T need to\r\n # care about\r\n time = 0\r\n for queue in queues:\r\n time += timeit('queue.enqueue(1)', number=1, globals=locals())\r\n\r\n # 3. Get the average time in microseconds (μs)\r\n average_time = time / NUM_TRIALS * 1e6\r\n\r\n # 4. Report the average time taken and add that to our list of\r\n # results.\r\n queue_times.append(average_time)\r\n print(f'enqueue: Queue size {size:>7}, time: {average_time}')\r\n\r\n print(\"Running LinkedListStack.push experiments...\")\r\n # TODO: Using the above code as an example, run the same experiment\r\n # but on LinkedListStack.push\r\n # (You can just copy the above code and make minor modifications!)\r\n # Add the results to stack_times instead of queue_times\r\n\r\n\r\n\r\n # Do not change the return statement below.\r\n return queue_times, stack_times" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the clock in/out entries for the 91
def test_case_2(self): ca = clock_adjustment.ClockInOutAdjustment() params = { # If time is not provided, default values are used instead. # One day instead of two is also accepted. "date" : [["06/6/2019", "02:25 AM"], ["06/26/2019", "11:59 PM"]], "employee" : "91-Area Manager", "time" : { "clock in" : ["06/19/2019", "11:45 AM"], "clock out" : ["06/19/2019", "01:02 PM"] } } self.log.info("Starting the test trying to add new time entry") if not ca.add(params): tc_fail("Failed to add the clock in/out entry for the Area Manager") self.log.info("Added new time entry") # Check self.log.info("Checking if the new time entry is valid") ca._select(params) if not mws.select("Clock list", "%s %s"%( params['time']['clock in'][0], params['time']['clock in'][1] ) ): tc_fail("The entry was added but was not found in the list") self.log.info("Checking passed successfully") mws.recover()
[ "def _draw_digital_clock(self):\n self._draw_time_scale()\n self._draw_time()", "def clock_corrections(self,t):\n # TODO this and derived methods should be changed to accept a TOA\n # table in addition to Time objects. This will allow access to extra\n # TOA metadata which may be necessary in some cases.\n raise NotImplementedError", "def _draw_simple_clock(self):\n self._draw_simple_background()\n self._draw_numbers()\n self._draw_hands()", "def add_nighttime_flags(prizm_data, antennas=['70MHz', '100MHz']):\r\n\r\n # Adds flags for each antenna.\r\n for antenna in antennas:\r\n\r\n # Makes sure the input dictionary contains entries for antenna(s) of\r\n # interest. An error message is printed if that information is missing.\r\n if antenna not in prizm_data.keys():\r\n print(\r\n '`add_nighttime_flags`: the data for the '\r\n + antenna\r\n + ' antenna could not be found.'\r\n )\r\n continue\r\n\r\n # Makes sure the input dictionary contains the timestamp data. An error\r\n # message is printed if that information is missing.\r\n if len(prizm_data[antenna]['time_sys_start.raw']) == 0:\r\n print(\r\n '`add_nighttime_flags`: no timestamp data was found for the '\r\n + antenna\r\n + ' antenna.'\r\n )\r\n continue\r\n\r\n # Extracts the NumPy array of data ctimes stored in `prizm_data`.\r\n ctimes = prizm_data[antenna]['time_sys_start.raw']\r\n\r\n # Obtains the timestamps associated with these `ctimes`.\r\n dates = timestamp_from_ctime(ctimes)\r\n\r\n # Transforms the entries of `dates` into datetime objects.\r\n dates = [\r\n datetime.strptime(entry, '%Y%m%d_%H%M%S')\r\n for entry in dates\r\n ]\r\n\r\n # Artificially includes a one-day buffer to `dates` in order to\r\n # facilitate the flagging of nighttime data below.\r\n previous_to_first_day = dates[0] - timedelta(days=1)\r\n next_to_last_day = dates[-1] + timedelta(days=1)\r\n dates = [previous_to_first_day] + dates + [next_to_last_day]\r\n\r\n # Initializes the geographial location of Marion island for the purpose\r\n # of obtaining accurate sunset and sunrise times for different periods\r\n # of the year.\r\n marion = suntime.Sun(lat=-46.88694, lon=37.819638)\r\n\r\n # Obtains the sunrise and sunset times for the time period spanned by\r\n # `dates`.\r\n sunset_times = [\r\n marion.get_sunset_time(entry)\r\n for entry in dates\r\n ]\r\n\r\n sunrise_times = [\r\n marion.get_sunrise_time(entry)\r\n for entry in dates\r\n ]\r\n\r\n # Keeps only the unique entries featuring in `sunset_times` and\r\n # `sunrise_times`.\r\n sunset_times = np.unique(sunset_times)\r\n sunrise_times = np.unique(sunrise_times)\r\n\r\n # Sets the reference time `ref_time` used in the definition of ctime\r\n # (i.e., the number of seconds since 1970/1/1).\r\n ref_time = datetime(1970, 1, 1, tzinfo=timezone.utc)\r\n\r\n # Transforms the datetime objects in `sunset_times` to ctimes. As a\r\n # buffer,\r\n # we also add an hour (3600 seconds) to the sunset ctimes and subtract\r\n # the\r\n # same amount of seconds from the sunrise ctimes.\r\n sunset_ctimes = [\r\n (entry - ref_time).total_seconds() + 3600\r\n for entry in sunset_times\r\n ]\r\n\r\n sunrise_ctimes = [\r\n (entry - ref_time).total_seconds() - 3600\r\n for entry in sunrise_times\r\n ]\r\n\r\n # Deletes the first entry of `sunrise_ctimes` and the last entry of\r\n # `sunset_ctimes`. This creates an offset between the two NumPy arrays\r\n # which makes the process of finding the nighttime portions of the data a\r\n # lot simpler.\r\n sunrise_ctimes = sunrise_ctimes[1:]\r\n sunset_ctimes = sunset_ctimes[:-1]\r\n\r\n # Initializes the NumPy array `flag` which will be used in the flags\r\n # generation below.\r\n flag = np.zeros_like(prizm_data[antenna]['time_sys_start.raw'],\r\n dtype='int')\r\n\r\n # Slices the data into chunks delimited in time by the entries in\r\n # `sunset_ctimes` and `sunrise_ctimes`. These are used to create a\r\n # `nighttime_filter` which picks only nighttime data.\r\n for nighttime_start, nighttime_end in zip(sunset_ctimes, sunrise_ctimes):\r\n condition = np.logical_and(ctimes >= nighttime_start,\r\n ctimes <= nighttime_end)\r\n nighttime_filter = np.where(condition)[0]\r\n\r\n # Assigns the value `1` to the portions of `flag` corresponding to\r\n # the chunk under consideration.\r\n flag[nighttime_filter] = np.ones(len(nighttime_filter), dtype='int')\r\n\r\n # Adds flags to `prizm_data`.\r\n prizm_data[antenna]['nighttime_flags'] = flag\r\n\r\n return", "def update_clock(self, _):\n self.clock = utils.get_time_human_readable()", "def clock_pin(self):\n return None", "def _draw_nice_clock(self):\n self._draw_nice_background()\n self._draw_hands()", "def tick(self):\r\n self.eventClock += 1", "def drawTimers(self):\n if self.char.gravityBoosted or self.char.superjump:\n self.drawText(\"Anti-gravity: \"+str((self.char.activatedGravity+self.char.powerTime+1000 - \\\n pygame.time.get_ticks())/1000), \\\n COOL_FONT, (WIDTH/2, HEIGHT/20*3), RED, 30)\n if self.char.invincible:\n self.drawText(\"Invincibility: \"+str((self.char.activatedInvincibility+self.char.powerTime+1000 - \\\n pygame.time.get_ticks())/1000), \\\n COOL_FONT, (WIDTH/2, HEIGHT/20*4), RED, 30)\n if self.char.underAttack:\n self.drawText(\"Escape: \"+str((self.char.hitTime+self.char.escapeTime+1000 - \\\n pygame.time.get_ticks())/1000), \\\n COOL_FONT, (WIDTH/2, HEIGHT/20*5), RED, 30)", "def time_selection(self):\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_ATIME | TSL27721_COMMAND_BIT, TSL27721_REG_ATIME_2_73)\n\t\t\n\t\t\"\"\"Select the PTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_PTIME | TSL27721_COMMAND_BIT, TSL27721_REG_PTIME_2_73)\n\t\t\n\t\t\"\"\"Select the WTIME register configuration from the given provided values\"\"\"\n\t\tbus.write_byte_data(TSL27721_DEFAULT_ADDRESS, TSL27721_REG_WTIME | TSL27721_COMMAND_BIT, TSL27721_REG_WTIME_2_73)", "def drawTime(self,hrs,min,sec=None):\n hrot = - hrs*30. - min*0.5\n mrot = - min*6.\n GD.canvas.removeActors(self.hands)\n MH = draw(self.mainhand.rot(mrot),bbox=None,color='red',linewidth=self.linewidth)\n HH = draw(self.hourhand.rot(hrot),bbox=None,color='red',linewidth=self.linewidth)\n self.hands = [MH,HH]\n if self.secshand and sec:\n srot = - sec*6.\n SH = draw(self.secshand.rot(srot),bbox=None,color='orange',linewidth=0.5*self.linewidth)\n self.hands.append(SH)", "def start_clock(self):\n st = self.get_state()\n self.set_trigger(st | 0x2)", "def _add_timing_pattern(self):\n timing_pat_len = self.qr_size - 16\n timing_line = [0 if c % 2 == 0 else 255 for c in range(timing_pat_len)]\n self.qr[6, 8:-8] = timing_line # horizontal\n self.qr[8:-8, 6] = timing_line # vertical", "def draw_clock(self):\n\n now = self.current_time()\n hour = now.hour\n minute = now.minute\n second = now.second\n\n x0 = 32*3+19\n y0 = 12\n\n self.graphics.DrawCircle(self.canvas, x0, y0, 12, self.time_color)\n self.graphics.DrawCircle(self.canvas, x0, y0+1, 12, self.time_color)\n\n hour_angle = (hour+minute/60)*2.0*pi/12.0 - 0.5*pi\n minute_angle = minute*2.0*pi/60.0 - 0.5*pi\n second_angle = second*2.0*pi/60.0 - 0.5*pi\n\n self.draw_line_angular(x0, y0, 10, second_angle, self.second_color)\n self.draw_line_angular(x0, y0, 10, minute_angle, self.minute_color)\n self.draw_line_angular(x0, y0+1, 10, minute_angle, self.minute_color)\n self.draw_line_angular(x0+1, y0, 10, minute_angle, self.minute_color)\n self.draw_line_angular(x0, y0, 8, hour_angle, self.hour_color)\n self.draw_line_angular(x0, y0+1, 8, hour_angle, self.hour_color)\n self.draw_line_angular(x0+1, y0, 8, hour_angle, self.hour_color)", "def build_clock():\n return html.Div([\n daq.LEDDisplay(id='clock', value='00:00:00',\n size=24, color='#333333')\n ], className='navbar-2')", "def update_clock():\n\n # update vector clock\n vector_clock[process_id] += 1\n\n # update bloom clock\n for seed in range(k):\n hash_val = hash((process_id, event_count, seed))\n index = hash_val % m\n bloom_clock[index] += 1\n time.sleep(random.random())", "async def test_clock(hass: ha.HomeAssistant, skip_dependencies, skip_history):\n # pylint: disable=unused-argument\n\n async with IUExam(hass, \"test_clock.yaml\") as exam:\n\n # Wait to go into testing mode\n while not exam.coordinator.tester.is_testing:\n await asyncio.sleep(1)\n\n # Now wait for the test to finish\n while exam.coordinator.tester.is_testing:\n await asyncio.sleep(1)\n\n # Convert tick_log to virtual time\n tick_log: list[datetime] = []\n test = exam.coordinator.tester.last_test\n for atime in exam.coordinator.clock.tick_log:\n tick_log.append(test.virtual_time(atime))\n\n assert tick_log == [\n mk_local(\"2021-01-04 06:30:00\"),\n mk_local(\"2021-01-04 06:27:00\"),\n mk_local(\"2021-01-04 06:26:40\"),\n mk_local(\"2021-01-04 06:26:20\"),\n mk_local(\"2021-01-04 06:26:00\"),\n mk_local(\"2021-01-04 06:25:40\"),\n mk_local(\"2021-01-04 06:25:20\"),\n mk_local(\"2021-01-04 06:25:00\"),\n mk_local(\"2021-01-04 06:24:40\"),\n mk_local(\"2021-01-04 06:24:20\"),\n mk_local(\"2021-01-04 06:24:00\"),\n mk_local(\"2021-01-04 06:23:40\"),\n mk_local(\"2021-01-04 06:23:20\"),\n mk_local(\"2021-01-04 06:23:00\"),\n mk_local(\"2021-01-04 06:22:40\"),\n mk_local(\"2021-01-04 06:22:20\"),\n mk_local(\"2021-01-04 06:22:00\"),\n mk_local(\"2021-01-04 06:21:40\"),\n mk_local(\"2021-01-04 06:21:20\"),\n mk_local(\"2021-01-04 06:21:00\"),\n mk_local(\"2021-01-04 06:20:40\"),\n mk_local(\"2021-01-04 06:20:20\"),\n mk_local(\"2021-01-04 06:20:00\"),\n mk_local(\"2021-01-04 06:19:40\"),\n mk_local(\"2021-01-04 06:19:20\"),\n mk_local(\"2021-01-04 06:19:00\"),\n mk_local(\"2021-01-04 06:18:40\"),\n mk_local(\"2021-01-04 06:18:20\"),\n mk_local(\"2021-01-04 06:18:00\"),\n mk_local(\"2021-01-04 06:17:40\"),\n mk_local(\"2021-01-04 06:17:20\"),\n mk_local(\"2021-01-04 06:17:00\"),\n mk_local(\"2021-01-04 06:16:40\"),\n mk_local(\"2021-01-04 06:16:20\"),\n mk_local(\"2021-01-04 06:16:00\"),\n mk_local(\"2021-01-04 06:15:40\"),\n mk_local(\"2021-01-04 06:15:20\"),\n mk_local(\"2021-01-04 06:15:00\"),\n mk_local(\"2021-01-04 06:14:40\"),\n mk_local(\"2021-01-04 06:14:20\"),\n mk_local(\"2021-01-04 06:14:00\"),\n mk_local(\"2021-01-04 06:13:40\"),\n mk_local(\"2021-01-04 06:13:20\"),\n mk_local(\"2021-01-04 06:13:00\"),\n mk_local(\"2021-01-04 06:12:00\"),\n mk_local(\"2021-01-04 06:11:40\"),\n mk_local(\"2021-01-04 06:11:20\"),\n mk_local(\"2021-01-04 06:11:00\"),\n mk_local(\"2021-01-04 06:10:40\"),\n mk_local(\"2021-01-04 06:10:20\"),\n mk_local(\"2021-01-04 06:10:00\"),\n mk_local(\"2021-01-04 06:09:40\"),\n mk_local(\"2021-01-04 06:09:20\"),\n mk_local(\"2021-01-04 06:09:00\"),\n mk_local(\"2021-01-04 06:08:40\"),\n mk_local(\"2021-01-04 06:08:20\"),\n mk_local(\"2021-01-04 06:08:00\"),\n mk_local(\"2021-01-04 06:07:40\"),\n mk_local(\"2021-01-04 06:07:20\"),\n mk_local(\"2021-01-04 06:07:00\"),\n mk_local(\"2021-01-04 06:06:40\"),\n mk_local(\"2021-01-04 06:06:20\"),\n mk_local(\"2021-01-04 06:06:00\"),\n mk_local(\"2021-01-04 06:05:40\"),\n mk_local(\"2021-01-04 06:05:20\"),\n mk_local(\"2021-01-04 06:05:00\"),\n mk_local(\"2021-01-04 06:04:40\"),\n mk_local(\"2021-01-04 06:04:20\"),\n mk_local(\"2021-01-04 06:04:00\"),\n mk_local(\"2021-01-04 06:00:00\"),\n ]\n\n exam.check_summary()", "def add_switch_flags(prizm_data, antennas=['70MHz', '100MHz']):\r\n\r\n # Recovers the keys in `prizm_data['switch']`.\r\n switch_files = prizm_data['switch'].keys()\r\n\r\n # Adds flags for each antenna.\r\n for antenna in antennas:\r\n\r\n # Makes sure the input dictionary contains entries for antenna(s) of\r\n # interest. An error message is printed if that information is missing.\r\n if antenna not in prizm_data.keys():\r\n print(\r\n '`add_switch_flags`: the data for the '\r\n + antenna\r\n + ' antenna could not be found.'\r\n )\r\n continue\r\n\r\n # Makes sure the input dictionary contains the timestamp data. An error\r\n # message is printed if that information is missing.\r\n if len(prizm_data[antenna]['time_sys_start.raw']) == 0:\r\n print(\r\n '`add_switch_flags`: no timestamp data was found for the '\r\n + antenna\r\n + ' antenna.'\r\n )\r\n continue\r\n\r\n # Initializes the dictionary entry which will store the flags.\r\n prizm_data[antenna]['switch_flags'] = {}\r\n\r\n # Collects the start and stop times stored in\r\n # `prizm_data[antenna]['time_sys_start.raw']`.\r\n start_time = prizm_data[antenna]['time_sys_start.raw'][0]\r\n stop_time = prizm_data[antenna]['time_sys_stop.raw'][-1]\r\n\r\n # Generates the flags and adds them to `prizm_data`.\r\n for file_name in switch_files:\r\n # Ensures flags are generated only for the appropriate '.scio' switch files.\r\n if '.scio' not in file_name: # or 'open' in file_name:\r\n continue\r\n\r\n # Here `times` contains the ctimes at which the data-taking\r\n # associated with a given component (antenna, the 100 Ohm resistor,\r\n # the short, or the 50 Ohm resistor) started and stopped.\r\n try:\r\n times = prizm_data['switch'][file_name]\r\n except:\r\n continue\r\n\r\n # Initializes the NumPy array `flag` which will be used in the flags\r\n # generation below.\r\n flag = np.zeros_like(prizm_data[antenna]['time_sys_start.raw'],\r\n dtype='int')\r\n\r\n # Artificially adds endpoints in case those are missing. The\r\n # starting endpoint is characterized by\r\n # `np.array([[1.0, start_time]])`, while the final endpoint is\r\n # characterized by `np.array([[0.0, end_time]])`.\r\n if len(times) > 0 and times[0,0] == 0.0:\r\n starting_endpoint = np.array([[1.0, start_time]])\r\n times = np.append(starting_endpoint, times, axis=0)\r\n if len(times) > 0 and times[-1,0] == 1.0:\r\n final_endpoint = np.array([[0.0, stop_time]])\r\n times = np.append(times, final_endpoint, axis=0)\r\n\r\n # Takes the ctime data stored in `prizm_data` in preparation for the\r\n # data chunk selection performed below. Here the NumPy arrays\r\n # `data_time_start` and `data_time_stop` contain the times at which\r\n # data (associated with any) given component started and stopped\r\n # being recorded, respectively.\r\n data_time_start = prizm_data[antenna]['time_sys_start.raw']\r\n data_time_stop = prizm_data[antenna]['time_sys_stop.raw']\r\n\r\n # Slices the data into chunks delimited in time by the entries in\r\n # `times`. These are used to create a filter `chunk_filter` which\r\n # picks only data matching the chunk under consideration.\r\n for chunk_start, chunk_end in zip(times[:-1], times[1:]):\r\n condition = np.logical_and(data_time_start >= chunk_start[1],\r\n data_time_stop <= chunk_end[1])\r\n chunk_filter = np.where(condition)[0]\r\n\r\n # If the current element (antenna, resistance, or short) is\r\n # active for the chunk under consideration (i.e.,\r\n # `chunk_start[0] == 1.0`), the `flag` is assigned the value `1`\r\n # in that chunk.\r\n if chunk_start[0] == 1.0:\r\n flag[chunk_filter] = np.ones(len(chunk_filter), dtype='int')\r\n\r\n # Adds flags to `prizm_data`.\r\n prizm_data[antenna]['switch_flags'][file_name] = flag\r\n\r\n return", "def configure_timing_burst_handshaking_import_clock(self, *args, **kws):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates random date in the range '01/01/1981' '01/01/2100'
def random_date(self): stime = time.mktime(time.strptime('01/01/1981', '%m/%d/%Y')) etime = time.mktime(time.strptime('01/01/2100', '%m/%d/%Y')) ptime = stime + random.random() * (etime - stime) return time.strftime('%m/%d/%Y', time.localtime(ptime))
[ "def gen_date():\r\n return random.randint(DAY1, TODAY)", "def random_date_generator(start_date):\n\n\t\trange_in_days = current_date + np.timedelta64(-T, \"D\") - np.datetime64(start_date)\n\t\tdays_to_add = np.arange(1, range_in_days-1)\n\t\trandom_date = np.datetime64(start_date) + np.random.choice(days_to_add, n, replace=False)\n\t\treturn random_date", "def random_date(year):\n try:\n return datetime.strptime(\"{} {}\".format(random.randint(1, 366), year), \"%j %Y\")\n # accounts for leap year values\n except ValueError:\n random_date(year)", "def gen_Date_Time():\n month = randint(1, 12)\n day = randint(1, 30)\n year = randint(2009, 2019)\n return (str(month) + '-' + str(day) + '-' + str(year))", "def random_date(start, end):\n random_time = start + timedelta(\n seconds=randint(0, int((end - start).total_seconds())),\n )\n hour = numpy.random.choice(hours, p=probabilities)\n return random_time.replace(hour=hour)", "def randomize_datetime(year, month):\n day = random.choice(range(1, 20))\n date = datetime.datetime(year, month, day)\n return date.strftime(\"%B %d, %Y\")", "def pick_birthdate():\n year = randrange(1916, 2017)\n month = randrange(12)\n if isleap(year) and month == 1:\n day = randrange(1, 30)\n elif month == 1:\n day = randrange(1, 29)\n elif month == 3 or month == 5 or month == 8 or month == 10:\n day = randrange(1, 31)\n else:\n day = randrange(1, 32)\n return [str(day), str(month), str(year)]", "def random_datetime():\n era = random.choice(range(len(telisaran.Era.years)))\n max_year = 20000 if era == 2 else telisaran.Era.years[era]\n return telisaran.datetime(\n era=era + 1,\n year=random.choice(range(1, max_year + 1)),\n season=random.choice(range(1, telisaran.Year.length_in_seasons + 1)),\n day=random.choice(range(1, telisaran.Season.length_in_days + 1)),\n hour=random.choice(range(24)),\n minute=random.choice(range(60)),\n second=random.choice(range(60))\n )", "def gen_dates(birth_year=None):\n birthdate = None\n \n\n if birth_year:\n byear = random.randrange(birth_year - 5, birth_year + 5)\n else:\n byear = random.randrange(1944, 1992)\n birthdate = datetime.date(byear, random.randrange(1, 12), random.randrange(1, 28))\n\n wyear = random.randrange(byear + 18, byear + 35)\n\n if wyear > 2012:\n wyear = 2012\n\n wedding = datetime.date(wyear, random.randrange(1, 12), random.randrange(1, 28))\n\n results = {'birth' : birthdate, 'wedding' : wedding}\n\n return results", "def date_of_order(year=2020, month=1, day=1):\n start_date = datetime(year=year, month=month, day=day, hour=00, minute=00)\n end_date = datetime.now()\n time_between_dates = end_date - start_date\n int_delta = (time_between_dates.days * 24 * 60 * 60) + time_between_dates.seconds\n random_seconds = randrange(int_delta)\n random_date = start_date + timedelta(seconds=random_seconds)\n return random_date.strftime(\"%Y-%m-%d %H:%M:%S\")", "def random_birthday(n):\n birthdays = []\n for i in range(n):\n bd = random.randint(1, 365)\n birthdays.append(bd)\n\n return birthdays", "def gen_modelled_date(start_date, end_date):\n # 2012, 2013, 2014\n year_model = [1, 2, 4]\n year_model = reduce(lambda x, y: x+y, [[year]*freq for year, freq in\n zip(range(2012, 2015), year_model)])\n rand_year = random.choice(year_model)\n\n\n # J F M A M J J A S O N D\n month_model = [1, 4, 8, 9, 7, 5, 4, 6, 8, 12, 10, 6]\n month_model = reduce(lambda x, y: x+y, [[month]*freq for month, freq in\n zip(range(1, 13), month_model)])\n rand_month = random.choice(month_model)\n\n week_dict = {0: [], 1: [], 2: [], 3: [], 4: [], 5: [], 6: []} \t\n num_days_in_month = monthrange(rand_year, rand_month)[1]\n\n for day in range(1, num_days_in_month+1):\n week_dict[datetime.date(rand_year, rand_month, day).weekday()] += [day] \n \n\n # M T W R F S S\n week_model = [2, 1, 1, 2, 4, 8, 3]\n week_model = reduce(lambda x, y: x+y, [[week]*freq for week, freq in\n zip(range(7), week_model)])\n rand_day = random.choice(week_dict[random.choice(week_model)])\n\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20\n # 21 22 23\n hour_model = [1, 1, 1, 1, 1, 1, 2, 9, 7, 5, 2, 1, 1, 2, 2, 3, 4, 14,\n 10, 8, 6, 3, 1, 1]\n hour_model = reduce(lambda x, y: x+y, [[hour]*freq for hour, freq in\n zip(range(24), hour_model)])\n rand_hour = random.choice(hour_model)\n \n rand_minute = random.choice(range(60))\n\n rand_second = random.choice(range(60))\n \n random_timestamp_arr = [rand_year, rand_month, rand_day, rand_hour,\n rand_minute, rand_second]\n return random_timestamp_arr", "def generate_dates():\n available_dates = []\n # For loop generates days 21-25\n for i in range(21, 26):\n # All generated dates will be in January 2019\n available_dates.append(date.Date(1, i, 2019))\n return available_dates", "def random_datetime(start=START_DATE, end=END_DATE):\n delta = end - start\n int_delta = (delta.days * 24 * 60 * 60) + delta.seconds\n random_second = random.randrange(int_delta)\n return start + timedelta(seconds=random_second)", "def random_festival_datetime():\n era = random.choice(range(len(telisaran.Era.years)))\n max_year = 20000 if era == 2 else telisaran.Era.years[era]\n return telisaran.datetime(\n era=era + 1,\n year=random.choice(range(1, max_year + 1)),\n season=9,\n day=random.choice(range(1, telisaran.FestivalOfTheHunt.length_in_days + 1)),\n hour=random.choice(range(24)),\n minute=random.choice(range(60)),\n second=random.choice(range(60))\n )", "def genAge(self):\n date = self.dataHandler.getRandomDate()\n self.identity.birthYear = date.year\n self.identity.age = datetime.datetime.now().year - self.identity.birthYear\n self.identity.birthday = f\"{date.day}.{date.month}\"", "def gen_rand_23():\n a = []\n for i in range(23):\n a.append(randint(1, 365))\n return a", "def gen_date(date, year=None):\n\n year = parse_year(year) or parse_year(date)\n if year:\n return ' (%s).' % year\n return ''", "def next_draw_date(date=datetime.now()):\n wed_weekday = 2\n sat_weekday = 5\n draw_hour = 20\n days_diff = 0\n\n if date.weekday() in [wed_weekday, sat_weekday] and date.hour < draw_hour:\n # provided date is next valid lottery date\n days_diff = 0\n elif date.weekday() < wed_weekday or date.weekday() >= sat_weekday:\n days_diff = (wed_weekday-date.weekday()) % 7\n elif date.weekday() > wed_weekday or date.weekday() < sat_weekday:\n days_diff = (sat_weekday-date.weekday()) % 7\n\n new_date = date+timedelta(days=days_diff)\n return new_date.replace(hour=20, minute=00, second=00, microsecond=00)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the value of mu_c with the Theorem 3.1.
def mu_c(self, d_p, d_c): return self.mu_p * (1 - self.phi(d_p)) / self.phi(d_c)
[ "def compute_mu_covar(feature_iterator):\n features = []\n for hi in feature_iterator: # hi is numpy with shape (512, )\n features.append(hi.reshape(1, -1))\n\n h = np.concatenate(features, axis = 0) # (set_size, 512)\n print(\"h.shape:\", h.shape)\n mu = np.mean(h, axis = 0) # (512, )\n print(\"mu.shape:\", mu.shape)\n print(\"mu:\", mu[0:10])\n sigma = np.cov(h, rowvar = False) # (512, 512)\n print(\"sigma shape: \", sigma.shape)\n print(\"sigma:\", sigma[0:10,0:10])\n return mu, sigma", "def get_mu(self):\n return self.mu", "def k_mu_krie(data):\n tdata = dc(data)\n\n try:\n k_s = tdata['k_s']\n mu_s = tdata['mu_s']\n por = tdata['por']\n except NameError:\n raise\n a_k = tdata.get('a_k', np.array(3.))\n\n tpor = np.array(por, dtype=float, copy=True, ndmin=1)\n\n k_m = np.zeros(tpor.shape)\n mu_m = np.zeros(tpor.shape)\n a_exp = np.zeros(tpor.shape)\n b_i = (tpor != 1.)\n\n if a_k >= 0:\n a_exp[b_i] = np.array(a_k/(1. - tpor[b_i]))\n else:\n a_exp[b_i] = np.array(1. + -1*a_k/(1. - tpor[b_i]))\n\n k_m[tpor == 1.] = 0.\n mu_m[tpor == 1.] = 0.\n k_m[tpor == 0.] = k_s\n mu_m[tpor == 0.] = mu_s\n b_i = np.logical_and(tpor != 1., tpor != 0.)\n k_m[b_i] = k_s * (1. - tpor[b_i])**a_exp[b_i]\n mu_m[b_i] = (mu_s / k_s) * k_m[b_i]\n\n return k_m, mu_m", "def __const_c(self):\n return gamma((self.eta+1)/2) \\\n / ((np.pi*(self.eta-2))**.5*gamma(self.eta/2))", "def enc_mul_const(pub, m, c): # to do\n return powmod(m, c, pub.n_sq) # m^c mod n^2", "def eccentricity(mu):\n mu02 = mu[0,2]\n mu20 = mu[2,0]\n\n A = (mu20 - mu02)**2\n B = 4*(mu[1,1]**2)\n C = (mu20 + mu02)**2\n\n return abs((A - B) / C)", "def generate_mie_costheta(mu_cdf):\n\n num = len(mu_cdf) - 1\n index = int(np.random.random() * num)\n if index >= num:\n index = num - 1\n\n x = mu_cdf[index]\n x += (mu_cdf[index + 1] - mu_cdf[index]) * np.random.random()\n\n return x", "def nl(mu, coeffs):\n c0, c1, c2, c3 = coeffs\n return 1 - c0*(1-mu**0.5) - c1*(1-mu) - c2*(1-mu**1.5) - c3*(1-mu**2)", "def vcorrcoef_Y(U, c):\n U[np.isnan(U)] = 0;\n temp = (c - c.mean(axis=0,keepdims=True));\n return np.matmul(U - U.mean(axis=1,keepdims=True), temp/np.std(temp, axis=0, keepdims=True));", "def UlamCoefficients(C):\n return U.coeff_up_to(C * n).comparable_print()", "def var_cov_var(P, c, mu, sigma):\r\n alpha = norm.ppf(1 - c, mu, sigma)\r\n return P - P * (alpha + 1)", "def _initial_anti_causal_coefficient(self,c,z,dim=1):\n\n if self.Ns is None:\n raise ValueError('Unknown data length')\n\n return (z/(z*z-1.))*(z*self._slice_dim(c,-2,dim=dim) + self._slice_dim(c,-1,dim=dim))", "def cvar(self, p: Union[int, float, list, np.ndarray]) -> Union[float, list, np.ndarray]:\n\n # Check\n check_type_p(p)\n p = initialize_input(p)\n\n # Compute\n cvar = self.mu + self.sigma * standard_normal_pdf(standard_normal_quantile(p)) / p\n\n # Return\n if len(p)==1:\n return cvar[0]\n else:\n return cvar", "def calculate_single_mu_update(mu: float, opp_sigma_sq: float, s: np.ndarray,\n opp_mu: float) -> float:\n\n g = calculate_g(opp_sigma_sq)\n\n return np.sum(g * (s - calculate_e(mu, opp_mu, opp_sigma_sq)))", "def mie_mu_with_uniform_cdf(m, x, num):\n\n big_num = 2000 # large to work with x up to 10\n big_mu, big_cdf = mie_cdf(m, x, big_num)\n mu = np.empty(num)\n cdf = np.empty(num)\n\n mu[0] = -1 # cos[180 degrees] is -1\n cdf[0] = 0 # initial cdf is zero\n\n big_k = 0 # index into big_cdf\n for k in range(1, num - 1):\n\n target = k / (num - 1)\n while big_cdf[big_k] < target:\n big_k += 1\n\n delta = big_cdf[big_k] - target\n delta_cdf = big_cdf[big_k] - big_cdf[big_k - 1]\n delta_mu = big_mu[big_k] - big_mu[big_k - 1]\n\n mu[k] = big_mu[big_k] - delta / delta_cdf * delta_mu # interpolate\n cdf[k] = target\n\n# print(' mu[',k,']=% .5f'%mu[k],' cdf[',k,']=% .5f'%cdf[k],\n# 'cdf=',big_cdf[big_k], fraction)\n\n mu[num - 1] = 1 # cos[0 degrees] is 1\n cdf[num - 1] = 1 # last cdf is one\n\n return [mu, cdf]", "def mu_ideal(tau,v):\n N = 10;\n mu = np.zeros(len(v)) + 0.1\n # calculate p\n p_id = p_ideal(tau,v)\n\n for i in range(len(p_id)-1):\n mu[i] += N*v[i]*(p_id[i+1]-p_id[i])\n return p_id, mu", "def get_c(a, b):\r\n return np.sqrt(a * a + b * b)", "def phi_c(self):\n return self._phi_c", "def _initial_causal_coefficient(self,c,z,tol,dim=1):\n\n if self.Ns is None:\n raise ValueError('Unknown data length')\n\n if dim not in [1,2,3]:\n raise ValueError('Dimension needs to be 1, 2, or 3')\n\n horizon = self.Ns[dim-1]\n if tol > 0:\n horizon = int(np.ceil(np.log(tol)/np.log(np.abs(z))))\n\n if horizon<self.Ns[dim-1]:\n # accelerated loop\n zn = z.clone()\n Sum = self._slice_dim(c,0,dim=dim)\n for n in range(1,horizon):\n Sum += zn*self._slice_dim(c,n,dim=dim)\n zn *= z\n\n return Sum\n else:\n # full loop\n zn = z.clone()\n iz = 1./z\n z2n = z**(self.Ns[dim-1]-1.)\n Sum = self._slice_dim(c,0,dim=dim) + z2n*self._slice_dim(c,-1,dim=dim)\n z2n *= z2n * iz\n for n in range(1,self.Ns[dim-1]-1):\n Sum += (zn + z2n )*self._slice_dim(c,n,dim=dim)\n zn *= z\n z2n *= iz\n\n return Sum/(1.-zn*zn)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot the average gain for different values of a. It should be a strict convex where the global minimum is at a = r, And the average gain should be non positive.
def graph(r, position): option = EuropeanOption(S_0, T, r, sigma, K_p, K_c) axes = fig.add_subplot(position) A = np.linspace(r - 0.1, r + 0.1, 15) E = [] V_minus = [] V_positive = [] for a in A: val = option.average_gain(a) E.append(val[0]) V_minus.append(val[0] - 1.96 / np.sqrt(1000) * val[1]) V_positive.append(val[0] + 1.96 / np.sqrt(1000) * val[1]) print("The min is reached for a =", A[np.argmin(E)], "and r =", r) axes = plt.gca() axes.plot(A, E) axes.plot(A, V_minus, 'k--') axes.plot(A, V_positive, 'k--') axes.set_xlim(r - 0.15, r + 0.15) axes.set_title("r = " + str(r)) axes.xaxis.set_major_locator(MultipleLocator(1.0)) axes.xaxis.set_minor_locator(MultipleLocator(0.01)) axes.yaxis.set_major_locator(MultipleLocator(1.0)) axes.yaxis.set_minor_locator(MultipleLocator(0.1)) axes.grid(which="major", axis="x", linewidth=0.75, linestyle="-", color="0.75") axes.grid(which="minor", axis="x", linewidth=0.25, linestyle="-", color="0.75") axes.grid(which="major", axis="y", linewidth=0.75, linestyle="-", color="0.75") axes.grid(which="minor", axis="y", linewidth=0.25, linestyle="-", color="0.75") axes.margins(0, 0.5)
[ "def plot_alpha(ax, mu, se, sig_level=0.05, color='red'):\n\n right = alpha(mean=mu, se=se, sig_level=sig_level)\n ax.axvline(right, c=color, linestyle='--', alpha=0.5)", "def avg_Ao(self):\n ...", "def gain(self, g):\n return self.normalize(0, 1, scale=g)", "def get_average_gain(self):\n min_date = self.trading_days.min()\n max_date = self.trading_days.max()\n\n diff = (max_date - min_date).days\n\n gain = (self.data.loc[max_date].Close / self.data.loc[min_date].Close).mean() - 1\n gain_p_year = (gain + 1)**(1 / (diff/365)) - 1\n return gain, gain_p_year", "def plot_avgerage_threshold_accuracy():\n\n data = [\n # [threshold, accuracy]\n [-0.1, 0.6316],\n [-0.2, 0.6161],\n [-0.3, 0.6059],\n [-0.4, 0.5988],\n [-0.5, 0.5888],\n [-0.6, 0.5711],\n [-0.7, 0.5423],\n [-0.8, 0.5083],\n [-0.9, 0.4881],\n [0.0, 0.6479],\n [0.1, 0.6516],\n [0.2, 0.6511],\n [0.3, 0.6430],\n [0.4, 0.6305],\n [0.5, 0.6122],\n [0.6, 0.5934],\n [0.7, 0.5712],\n [0.8, 0.5457],\n [0.9, 0.5307]\n ]\n\n x = [e[0] for e in data]\n y = [e[1] for e in data]\n plt.xlabel('Threshold')\n plt.ylabel('Accuracy')\n plt.plot(x, y, 'o')\n plt.show()", "def apply_gain(a, db):\n gain_float = 10 ** (db / 20)\n return np.clip(a * gain_float, -1, 1)", "def plot_di_mean(dec,inc,a95,color='k',marker='o',markersize=20,label='',legend='no'):#Modified from PmagPy (Tauxe et al., 2016)\r\n DI_dimap=dimap(dec,inc)\r\n if inc < 0:\r\n plt.scatter(DI_dimap[0],DI_dimap[1],\r\n edgecolors=color ,facecolors='white',\r\n marker=marker,s=markersize,label=label,zorder=4)\r\n if inc >= 0:\r\n plt.scatter(DI_dimap[0],DI_dimap[1],\r\n edgecolors=color,facecolors=color,\r\n marker=marker,s=markersize,label=label,zorder=4)\r\n Xcirc,Ycirc=[],[]\r\n Da95,Ia95=circ(dec,inc,a95)\r\n if legend=='yes':\r\n plt.legend(loc=2)\r\n for k in range(len(Da95)):\r\n XY=dimap(Da95[k],Ia95[k])\r\n Xcirc.append(XY[0])\r\n Ycirc.append(XY[1])\r\n plt.plot(Xcirc,Ycirc,c=color,zorder=3)\r\n plt.tight_layout()", "def plot_avg_strain_stress(self, ax=None, **kargs):\r\n if ax is None:\r\n fig, ax = plt.subplots()\r\n\r\n self.plot2d('avg_strain', 'avg_stress', ax=ax, color=\"black\", **kargs)\r\n ax.invert_xaxis()\r\n ax.invert_yaxis()\r\n handles, labels = ax.get_legend_handles_labels()\r\n ax.legend(handles, labels)\r\n # ax.set_xlabel('Strain, $\\\\varepsilon$')\r\n # ax.set_ylabel('Stress, $\\\\sigma$ [Mpa]')\r\n ax.grid()\r\n\r\n return ax", "def plot_gain_calibration(msname, calname, plabels=None, outname=None, show=True):\n if plabels is None:\n plabels = [\"A\", \"B\"]\n\n gain_phase, time_phase, _flags, ant1, ant2 = read_caltable(\n f\"{msname}_{calname}_gpcal\", cparam=True\n )\n gain_phase = gain_phase.squeeze(axis=3)\n gain_phase = gain_phase.mean(axis=2)\n npol = gain_phase.shape[-1]\n if np.all(ant2 == ant2[0]):\n labels = ant1\n else:\n labels = np.array([ant1, ant2]).T\n nlab = labels.shape[0]\n\n gain_amp, time, _flags, _ant1, _ant2 = read_caltable(\n f\"{msname}_{calname}_gacal\", cparam=True\n )\n gain_amp = gain_amp.squeeze(axis=3)\n gain_amp = gain_amp.mean(axis=2)\n time_days = time.copy()\n t0 = time[0]\n time = ((time - t0) * u.d).to_value(u.min)\n time_phase = ((time_phase - t0) * u.d).to_value(u.min)\n\n idx_to_plot = np.where(np.abs(gain_amp.reshape(nlab, -1).mean(1) - 1) > 1e-10)[0]\n\n ccyc = plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"]\n lcyc = [\"-\", \":\"]\n _, ax = plt.subplots(1, 2, figsize=(16, 6), sharex=True)\n\n if gain_amp.shape[1] > 1:\n tplot = time\n gplot = gain_amp\n else:\n tplot = [0, 1]\n gplot = np.tile(gain_amp, [1, 2, 1])\n\n for i, bidx in enumerate(idx_to_plot):\n for pidx in range(npol):\n ax[0].plot(\n tplot,\n np.abs(gplot[bidx, :, pidx]),\n label=f\"{labels[bidx] + 1} {plabels[pidx]}\",\n color=ccyc[i % len(ccyc)],\n ls=lcyc[pidx],\n )\n\n if gain_phase.shape[1] > 1:\n tplot = time_phase\n gplot = gain_phase\n else:\n tplot = [tplot[0], tplot[-1]] # use the time from the gains\n gplot = np.tile(gain_phase, [1, 2, 1])\n\n for i, bidx in enumerate(idx_to_plot):\n for pidx in range(npol):\n ax[1].plot(\n tplot,\n np.angle(gplot[bidx, :, pidx]),\n label=f\"{labels[bidx] + 1} {plabels[pidx]}\",\n color=ccyc[i % len(ccyc)],\n ls=lcyc[pidx],\n )\n\n ax[0].set_xlim(tplot[0], tplot[-1])\n ax[1].set_ylim(-np.pi, np.pi)\n ax[0].legend(\n ncol=10, fontsize=\"x-small\", bbox_to_anchor=(0.05, -0.1), loc=\"upper left\"\n )\n ax[0].set_xlabel(\"time (min)\")\n ax[1].set_xlabel(\"time (min)\")\n ax[0].set_ylabel(\"Abs of gain\")\n ax[1].set_ylabel(\"Phase of gain\")\n if outname is not None:\n plt.savefig(f\"{outname}_gaincal.png\", bbox_inches=\"tight\")\n if not show:\n plt.close()\n return time_days, gain_amp, gain_phase, labels", "def mean_motion(k, a):\n return np.sqrt(k / abs(a ** 3)).to(1 / u.s) * u.rad", "def alpha(self, alpha):\n if alpha < 1e-10:\n alpha = 1e-10\n edge = sqrt(-2.0 * (self.stddev ** 2) * log(alpha))\n return RealRange((self.mean - edge, self.mean + edge))", "def CA_Average(avg_pts,quiet='q',rate=\"Slow\",scanDIM=1):\n print(\"\\nAverage set to: \"+str(max(avg_pts,1)))\n CA_list=Detector_List(BL_ioc())\n n=len(CA_list)-1\n for i in RangeUp(0,n,1):\n ca_ioc=CA_list[i][0]\n ca_num=CA_list[i][1]\n CA_Filter (ca_ioc,ca_num,avg_pts,rate,quiet,scanDIM)", "def plot(self, ax=None):\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Sensitivity\")\n ax = ax or plt.gca()\n\n ax.plot(self.energy.value, self.diff_sens.value, color='red',\n label=r\" $\\sigma$=\" + str(self.sigma) + \" T=\" + \\\n str(self.livetime.to('h').value) + \"h \\n\" + r\"$\\alpha$=\" + str(self.alpha) + \\\n r\" Syst$_{BKG}$=\" + str(self.bkg_sys * 100) + \"%\" + r\" $\\gamma_{min}$=\" + str(self.gamma_min))\n\n ax.set_xscale('log')\n ax.set_yscale('log')\n ax.grid(True)\n ax.set_xlabel('Reco Energy [{}]'.format(self.energy.unit))\n ax.set_ylabel('Sensitivity [{}]'.format(self.diff_sens.unit))\n if log.getEffectiveLevel() == 10:\n self.irf.sens.plot(color='black', label=\"ROOT\")\n\n plt.legend()\n return ax", "def sensitivity_minimization_plot(n_bins_energy, n_bins_gammaness, n_bins_theta2, energy, sensitivity_3Darray):\n\n #TODO : To be changed!!!\n # if (n_bins_energy == 12):\n # figarr, axarr = plt.subplots(4,3, sharex=True, sharey=True, figsize=(13.2,18))\n\n figarr, axarr = plt.subplots(5,4, sharex = True, sharey = True, figsize = (13.2,18))\n\n # The minimum sensitivity per energy bin\n sensitivity = np.ndarray(shape = n_bins_energy)\n\n for i in range(0, n_bins_energy):\n for j in range(0, n_bins_gammaness):\n for k in range(0, n_bins_theta2):\n conditions = (not np.isfinite(sensitivity_3Darray[i,j,k])) or (sensitivity_3Darray[i,j,k] <= 0)\n if conditions:\n sensitivity_3Darray[i,j,k] = 1\n\n for ebin in range(0,n_bins_energy):\n if (figarr):\n arr_i = int(ebin / 4)\n arr_j = ebin-int(ebin / 4) * 4\n plot = axarr[arr_i,arr_j].imshow(sensitivity_3Darray[ebin], cmap = 'viridis_r', \\\n extent=[0.005, 0.05, 1., 0.], norm = LogNorm(vmin=sensitivity_3Darray.min(), \\\n vmax = sensitivity_3Darray.max()), aspect = 'auto')\n\n fig, ax = plt.subplots(figsize = (8, 8))\n ax.set_title(\"Ebin: %.2f - %.2f %s\" % (energy[ebin].to_value(),\n energy[ebin+1].to_value(), energy.unit.name))\n img = ax.imshow(sensitivity_3Darray[ebin], cmap='viridis', extent = [0.005, 0.05, 1., 0.], aspect = 'auto')\n\n fill_bin_content(ax, sensitivity_3Darray, ebin, n_bins_gammaness, n_bins_theta2)\n format_axes_ebin(ax, img)\n fig.savefig(\"Ebin%d.png\" % ebin)\n if (figarr):\n figarr.subplots_adjust(hspace = 0, wspace = 0)\n format_axes_array(axarr[arr_i, arr_j], arr_i, arr_j, plot)\n\n return figarr", "def sobol_g_function_exact(a):\n t = 1. / (3 * (1. + a) ** 2)\n return t / np.sum(t)", "def plot_particle_energy_gain():\n # beta_e = 0.005\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta00025-guide0-200-100-nppc200')\n tenergy1 = pic_info.tenergy\n kene_e1 = pic_info.kene_e\n kene_i1 = pic_info.kene_i\n\n # beta_e = 0.02\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta001-guide0-200-100-nppc400')\n tenergy2 = pic_info.tenergy\n kene_e2 = pic_info.kene_e\n kene_i2 = pic_info.kene_i\n\n # beta_e = 0.06\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta003-guide0-200-100-nppc200')\n tenergy3 = pic_info.tenergy\n kene_e3 = pic_info.kene_e\n kene_i3 = pic_info.kene_i\n\n # beta_e = 0.2\n pic_info = pic_information.get_pic_info(\n '../../mime25-beta01-guide0-200-100-nppc200')\n tenergy4 = pic_info.tenergy\n kene_e4 = pic_info.kene_e\n kene_i4 = pic_info.kene_i\n\n # Estimate the energy gain for beta_e = 0.0072 using beta_e = 0.005\n kene_e12 = kene_e1[0] + (kene_e1 - kene_e1[0]) * 0.005 / 0.0072\n kene_i12 = kene_i1[0] + (kene_i1 - kene_i1[0]) * 0.005 / 0.0072\n\n print('The ratio of electron energy gain to its initial energy: ')\n print(' beta_e = 0.0072, 0.02, 0.06, 0.2: %f %f %f %f',\n ((kene_e12[-1]-kene_e12[0])/kene_e12[0],\n (kene_e2[-1]-kene_e2[0])/kene_e2[0],\n (kene_e3[-1]-kene_e3[0])/kene_e3[0],\n (kene_e4[-1]-kene_e4[0])/kene_e4[0]))\n # Electrons\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_e12 - kene_e12[0]) / kene_e12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_e2 - kene_e2[0]) / kene_e2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_e3 - kene_e3[0]) / kene_e3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_e4 - kene_e4[0]) / kene_e4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n #ax.set_ylim([0, 1.05])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_e/K_e(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(\n 680, 8.8, r'$\\beta_e=0.007$', color='blue', rotation=5, fontsize=16)\n ax.text(680, 5, r'$\\beta_e=0.02$', color='red', rotation=4, fontsize=16)\n ax.text(\n 680, 2.1, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(\n 680, -1.5, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n # Ions\n fig = plt.figure(figsize=[3.5, 2.5])\n ax = fig.add_axes([0.22, 0.22, 0.75, 0.73])\n ax.plot(tenergy1, (kene_i12 - kene_i12[0]) / kene_i12[0], 'b', linewidth=2)\n ax.plot(tenergy2, (kene_i2 - kene_i2[0]) / kene_i2[0], 'r', linewidth=2)\n ax.plot(\n tenergy3, (kene_i3 - kene_i3[0]) / kene_i3[0], 'orange', linewidth=2)\n ax.plot(tenergy4, (kene_i4 - kene_i4[0]) / kene_i4[0], 'g', linewidth=2)\n ax.set_xlim([0, 1190])\n ax.set_ylim([-5, 30])\n\n #plt.title('Energy spectrum', fontdict=font)\n ax.set_xlabel(r'$t\\Omega_{ci}$', fontdict=font, fontsize=20)\n ax.set_ylabel(r'$\\Delta K_i/K_i(0)$', fontdict=font, fontsize=20)\n plt.tick_params(labelsize=16)\n\n ax.text(680, 22, r'$\\beta_e=0.007$', color='blue', rotation=0, fontsize=16)\n ax.text(680, 9, r'$\\beta_e=0.02$', color='red', rotation=0, fontsize=16)\n ax.text(680, 3, r'$\\beta_e=0.06$', color='orange', rotation=0, fontsize=16)\n ax.text(680, -4, r'$\\beta_e=0.2$', color='green', rotation=0, fontsize=16)\n plt.show()", "def get_gain(applied_volts):\n a, b, c = 2.432, 12.86, -237.5\n #a, b, c = 545.1, 13.65, 0\n gain = a*np.exp(b*applied_volts) + c\n return gain", "def _Gain(self, value):\n v = (((self.max-self.min))-float(value))\n v = int(v*10)/10.0\n return v", "def meanSolar():" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create connection to OpenStack.
def create_connection(self): try: if self.USE_APPLICATION_CREDENTIALS: self.LOG.info("Using Application Credentials for OpenStack Connection") conn = connection.Connection( auth_url=self.AUTH_URL, application_credential_id=self.APPLICATION_CREDENTIAL_ID, application_credential_secret=self.APPLICATION_CREDENTIAL_SECRET, auth_type="v3applicationcredential", ) else: self.LOG.info("Using User Credentials for OpenStack Connection") conn = connection.Connection( username=self.USERNAME, password=self.PASSWORD, auth_url=self.AUTH_URL, project_name=self.PROJECT_NAME, user_domain_name=self.USER_DOMAIN_NAME, project_domain_id=self.PROJECT_DOMAIN_ID, ) conn.authorize() except Exception as e: self.LOG.exception("Client failed authentication at Openstack : {0}", e) raise authenticationException( Reason="Client failed authentication at Openstack" ) self.LOG.info("Connected to Openstack") return conn
[ "def getOpenstackConnection():\n\n connection = openstack.connect(\n region = parser.get('openstack', 'region'), \n auth = {\n 'auth_url': parser.get('openstack', 'auth_url'),\n 'domain_name': parser.get('openstack', 'domain_name'), \n 'password': parser.get('openstack', 'password'),\n 'project_name': parser.get('openstack', 'project_name'),\n 'user_domain_name': parser.get('openstack', 'user_domain_name'),\n 'username': parser.get('openstack', 'username'),\n },\n )\n\n return connection", "def create_connection(self):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n return ssh", "def createConnection():\n conn = libvirt.open(None)\n if not conn:\n print 'Failed to open connection to libvirt'\n sys.exit(1)\n else:\n return conn", "def __init__(self, config):\n\n self.USERNAME = os.environ[\"OS_USERNAME\"]\n self.PASSWORD = os.environ[\"OS_PASSWORD\"]\n self.PROJECT_NAME = os.environ[\"OS_PROJECT_NAME\"]\n self.PROJECT_ID = os.environ[\"OS_PROJECT_ID\"]\n self.USER_DOMAIN_NAME = os.environ[\"OS_USER_DOMAIN_NAME\"]\n self.AUTH_URL = os.environ[\"OS_AUTH_URL\"]\n self.PROJECT_DOMAIN_ID = os.environ[\"OS_PROJECT_DOMAIN_ID\"]\n self.USE_APPLICATION_CREDENTIALS = os.environ.get(\n \"USE_APPLICATION_CREDENTIALS\", False\n )\n if self.USE_APPLICATION_CREDENTIALS:\n self.LOG.info(\"APPLICATION CREDENTIALS will be used!\")\n try:\n self.APPLICATION_CREDENTIAL_ID = os.environ[\"APPLICATION_CREDENTIAL_ID\"]\n self.APPLICATION_CREDENTIAL_SECRET = os.environ[\n \"APPLICATION_CREDENTIAL_SECRET\"\n ]\n except KeyError:\n self.LOG.error(\n \"Usage of Application Credentials enabled - but no credential id or/and secret provided in env!\"\n )\n sys.exit(1)\n\n self.SSH_PORT = 22\n\n with open(config, \"r\") as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)\n self.LOG = setup_logger(config=cfg)\n self.DEFAULT_SECURITY_GROUP_NAME = \"defaultSimpleVM\"\n self.DEFAULT_SECURITY_GROUPS = [self.DEFAULT_SECURITY_GROUP_NAME]\n self.GATEWAY_SECURITY_GROUP_ID = cfg[\"openstack_connection\"][\n \"gateway_security_group_id\"\n ]\n\n self.USE_GATEWAY = cfg[\"openstack_connection\"][\"use_gateway\"]\n self.NETWORK = cfg[\"openstack_connection\"][\"network\"]\n self.FLOATING_IP_NETWORK = cfg[\"openstack_connection\"][\n \"floating_ip_network\"\n ]\n self.PRODUCTION = cfg[\"openstack_connection\"][\"production\"]\n self.CLOUD_SITE = cfg[\"cloud_site\"]\n # connection to redis. Uses a pool with 10 connections.\n self.REDIS_HOST = cfg[\"redis\"][\"host\"]\n self.REDIS_PORT = cfg[\"redis\"][\"port\"]\n self.REDIS_PASSWORD = cfg[\"redis\"].get(\"password\", None)\n self.LOG.info(\n f\"Connecting to Redis at {self.REDIS_HOST}:{self.REDIS_PORT}..\"\n )\n self.pool = redis.ConnectionPool(\n host=self.REDIS_HOST, port=self.REDIS_PORT, password=self.REDIS_PASSWORD\n )\n\n self.redis = redis.Redis(connection_pool=self.pool, charset=\"utf-8\")\n try:\n self.redis.ping()\n self.LOG.info(\"Connected to Redis!\")\n except redis.ConnectionError:\n self.LOG.exception(\"Could not connect to Redis!\")\n sys.exit(1)\n\n # try to initialize forc connection\n try:\n self.SUB_NETWORK = cfg[\"bibigrid\"][\"sub_network\"]\n self.BIBIGRID_MODES = cfg[\"bibigrid\"][\"bibigrid_modes\"]\n self.BIBIGRID_HOST = cfg[\"bibigrid\"][\"host\"]\n self.BIBIGRID_PORT = cfg[\"bibigrid\"][\"port\"]\n if cfg[\"bibigrid\"].get(\"https\", False):\n self.BIBIGRID_URL = (\n f\"https://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}/bibigrid/\"\n )\n self.BIBIGIRD_EP = (\n f\"https://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}\"\n )\n else:\n self.BIBIGRID_URL = (\n f\"http://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}/bibigrid/\"\n )\n self.BIBIGIRD_EP = (\n f\"http://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}\"\n )\n self.BIBIGRID_DEACTIVATE_UPRADES_SCRIPT = (\n self.create_deactivate_update_script()\n )\n self.BIBIGRID_ANSIBLE_ROLES = cfg[\"bibigrid\"].get(\n \"ansibleGalaxyRoles\", []\n )\n self.BIBIGRID_LOCAL_DNS_LOOKUP = cfg[\"bibigrid\"].get(\n \"localDnsLookup\", False\n )\n self.LOG.info(\n f\"Loaded Ansible Galaxy Roles for Bibigrid:\\n {self.BIBIGRID_ANSIBLE_ROLES}\"\n )\n\n self.LOG.info(msg=f\"Bibigrd url loaded: {self.BIBIGRID_URL}\")\n except Exception as e:\n self.LOG.exception(e)\n self.LOG.info(\"Bibigrid not loaded.\")\n self.BIBIGRID_URL = None\n self.SUB_NETWORK = None\n\n try:\n self.RE_BACKEND_URL = cfg[\"forc\"][\"forc_url\"]\n backend_url_host = self.RE_BACKEND_URL.split(\":\")\n self.FORC_URL = (\n cfg[\"forc\"].get(\"openresty_url\", None)\n or f\"https:{backend_url_host[1]}/\"\n )\n\n self.FORC_API_KEY = os.environ.get(\"FORC_API_KEY\", None)\n self.FORC_ALLOWED = {}\n self.FORC_HTTPS = cfg[\"forc\"].get(\"forc_https\", True)\n self.FORC_REMOTE_ID = cfg[\"forc\"][\"forc_remote_id\"]\n self.GITHUB_PLAYBOOKS_REPO = cfg[\"forc\"][\"github_playbooks_repo\"]\n if (\n not self.RE_BACKEND_URL\n or not self.FORC_API_KEY\n or not self.GITHUB_PLAYBOOKS_REPO\n ):\n raise ValueError\n self.LOG.info(msg=f\"Forc-Backend url loaded: {self.RE_BACKEND_URL}\")\n self.LOG.info(msg=f\"Forc-Frontend Url loaded: {self.FORC_URL}\")\n except ValueError as ve:\n self.LOG.exception(ve)\n self.LOG.info(\n \"Forc-Backend not loaded as one of the configurations was empty.\"\n )\n self.RE_BACKEND_URL = None\n self.FORC_API_KEY = None\n self.FORC_ALLOWED = None\n self.GITHUB_PLAYBOOKS_REPO = None\n except Exception as e:\n self.LOG.exception(e)\n self.LOG.info(\"Forc-Backend not loaded.\")\n self.RE_BACKEND_URL = None\n self.FORC_API_KEY = None\n self.FORC_ALLOWED = None\n self.GITHUB_PLAYBOOKS_REPO = None\n if self.USE_GATEWAY:\n self.GATEWAY_IP = cfg[\"openstack_connection\"][\"gateway_ip\"]\n self.SSH_FORMULAR = cfg[\"openstack_connection\"][\n \"ssh_port_calc_formular\"\n ]\n self.UDP_FORMULAR = cfg[\"openstack_connection\"][\n \"udp_port_calc_formular\"\n ]\n\n self.LOG.info(f\"Gateway IP is {self.GATEWAY_IP}\")\n self.conn = self.create_connection()\n self._validate_forc_security_group()\n self.update_playbooks()\n self.validate_gateway_security_group()\n self.create_or_get_default_ssh_security_group()", "def create_connection(username):\n \n cred_location = \"/mnt/data/{}/utils/data_creds_redshift.json.nogit\".format(username)\n db = db_connection.DBConnection(cred_location)\n return db", "def makeOpenstack(self):\n self.osid = self.handler.createOpenstack()", "def setup_os_client():\n host = os.environ['OS_HOST'] # cluster endpoint, for ex: my-domain.us-east-1.es.amazonaws.com\n region = os.environ['OS_REGION']\n credentials = boto3.Session().get_credentials()\n auth = AWSV4SignerAuth(credentials, region)\n\n os_client = OpenSearch(\n hosts=[{'host': host, 'port': 443}],\n http_auth=auth,\n use_ssl=True,\n verify_certs=True,\n connection_class=RequestsHttpConnection\n )\n return os_client", "def prepare_openstack(self, setup):\n # init variables\n exist_networks = self.os_conn.list_networks()['networks']\n ext_network = [x for x in exist_networks\n if x.get('router:external')][0]\n self.zone = self.os_conn.nova.availability_zones.find(zoneName=\"nova\")\n self.hosts = self.zone.hosts.keys()[:2]\n self.instance_keypair = self.os_conn.create_key(key_name='instancekey')\n self.security_group = self.os_conn.create_sec_group_for_ssh()\n self.networks = []\n\n # create router\n self.router = self.os_conn.create_router(name=\"router01\")['router']\n self.os_conn.router_gateway_add(router_id=self.router['id'],\n network_id=ext_network['id'])\n logger.info('router {} was created'.format(self.router['id']))\n\n self.dhcp_agent_ids = [agt['id'] for agt in\n self.os_conn.neutron.list_agents(\n binary='neutron-dhcp-agent')['agents']]", "def init(ip=\"localhost\", port=54321, start_h2o=True, enable_assertions=True,\n license=None, nthreads=-1, max_mem_size=None, min_mem_size=None, ice_root=None, \n strict_version_check=True, proxy=None, https=False, insecure=False, username=None, \n password=None, cluster_name=None, max_mem_size_GB=None, min_mem_size_GB=None, proxies=None, size=None):\n H2OConnection(ip=ip, port=port,start_h2o=start_h2o,enable_assertions=enable_assertions,license=license,\n nthreads=nthreads,max_mem_size=max_mem_size,min_mem_size=min_mem_size,ice_root=ice_root,\n strict_version_check=strict_version_check,proxy=proxy,https=https,insecure=insecure,username=username,\n password=password,cluster_name=cluster_name,max_mem_size_GB=max_mem_size_GB,min_mem_size_GB=min_mem_size_GB,proxies=proxies,size=size)\n return None", "def get_conn(self):\n if not self._conn:\n http_authorized = self._authorize()\n self._conn = build(\"compute\", self.api_version, http=http_authorized, cache_discovery=False)\n return self._conn", "def connect(uri=LIBVIRT_URI):\n conn = libvirt.open(uri)\n if conn is None:\n raise Exception(\"Could not open connection to the HYPERVISOR\")\n return conn", "def _create_ndex_connection(self):\n if self._ndex is None:\n self._ndex = Ndex2(host=self._server, \n username=self._user, \n password=self._pass)\n return self._ndex", "def _setup_client(self, create=False, container=None):\n\n if container is None:\n container = self.args.container\n\n try:\n values = self.conf.get_container(container)\n except ValueError as ex:\n self.log.error(ex)\n return (None, None)\n\n auth = dict(authurl = self.args.authurl,\n user = values['username'],\n key = values['password'],\n )\n\n if self.args.keystone:\n try:\n from keystoneclient.v2_0 import client as _check_for_ksclient\n except ImportError:\n sys.exit(\"auth 2.0 (keystone) requires python-keystoneclient\")\n else:\n self.log.debug(\"using auth 2.0 (keystone)\")\n\n if self.args.keystone_separator not in values['username']:\n self.log.error(\"%s: separator not found in %r\" % (container, values['username']))\n return (None, None)\n\n keystone_auth = values['username'].split(self.args.keystone_separator, 1)\n auth['tenant_name'], auth['user'] = keystone_auth\n auth['auth_version'] = '2.0'\n auth['os_options'] = dict(service_type = self.args.keystone_service,\n endpoint_type = self.args.keystone_endpoint,\n region_name = self.args.keystone_region,\n )\n self.log.debug(\"os_options: %r\" % auth['os_options'])\n\n self.auth = auth\n cli = client.Connection(**auth)\n\n try:\n headers, _ = cli.get_container(container)\n except (socket.error, client.ClientException) as ex:\n if getattr(ex, 'http_status', None) == 404:\n if create:\n self.log.warning(\"%s doesn't exist, will be created\" % container)\n return (cli, dict())\n else:\n self.log.error(\"%s doesn't exist\" % container)\n else:\n self.log.error(ex)\n return (None, None)\n\n self.log.debug(headers)\n\n meta = getMeta(headers)\n self.log.debug(\"Meta: %s\" % meta)\n\n if not meta:\n self.log.error(\"%s hasn't been setup to be used with swiftnbd\" % container)\n return (None, None)\n\n return (cli, meta)", "def ssh_connection(ctx, fabric_env):\n\n for name, value in FABRIC_ENV_DEFAULTS.items():\n fabric_env.setdefault(name, value)\n\n try:\n host_ip = ctx.instance.host_ip\n agent_user = ctx.bootstrap_context.cloudify_agent.user\n agent_key_path = ctx.bootstrap_context.cloudify_agent.agent_key_path\n except NonRecoverableError as e:\n ctx.logger.error(\n 'Failed to find potentially required data '\n 'from context: {}'.format(str(e)))\n host_ip = None\n agent_user = None\n agent_key_path = None\n\n put_host(fabric_env, host_ip)\n put_user(fabric_env, agent_user)\n connect_kwargs = {}\n put_key_or_password(\n fabric_env,\n connect_kwargs,\n agent_key_path)\n\n host = fabric_env.pop('host')\n # Prepare the fabric2 env inputs if they passed\n fabric2_env = {}\n prepare_fabric2_env(fabric2_env, fabric_env, connect_kwargs)\n overrides = {'overrides': fabric2_env}\n\n # Convert fabric 1.x inputs to fabric 2.x\n fabric_env = _AttributeDict(**fabric_env)\n config = Config.from_v1(fabric_env, **overrides)\n\n if not config[\"timeouts\"].get(\"command\"):\n config[\"timeouts\"][\"command\"] = fabric_env.command_timeout\n if fabric_env.connect_timeout != 10:\n config[\"timeouts\"]['connect'] = fabric_env.connect_timeout\n\n fabric_env_config = {\n 'host': host,\n 'user': fabric2_env['user'],\n 'port': fabric2_env['port'],\n 'config': config\n }\n conn = Connection(**fabric_env_config)\n try:\n conn.open()\n yield conn\n finally:\n conn.close()", "def __init__(self):\n self.host = CONF.AGENT.zvm_xcat_server\n self.port = 443\n self.xcat_timeout = CONF.AGENT.zvm_xcat_timeout\n try:\n self.conn = HTTPSClientAuthConnection(self.host, self.port,\n CONF.AGENT.zvm_xcat_ca_file,\n timeout=self.xcat_timeout)\n except Exception:\n LOG.error(\"Connect to xCat server %s failed\" % self.host)\n raise exception.zVMxCatConnectionFailed(xcatserver=self.host)", "def open_connection_with_the_cloud():\n client_socket = socket.socket()\n client_socket.connect((CLOUD_IP, CLOUD_PORT))\n return client_socket", "def createConnection(self, URL):\n if URL.startswith(\"https://\"):\n hostPort = URL.replace(\"https://\", \"\")\n if \"X509_USER_KEY\" in os.environ.keys() and \\\n \"X509_USER_CERT\" in os.environ.keys():\n return httplib.HTTPSConnection(hostPort,\n key_file = os.environ[\"X509_USER_KEY\"],\n cert_file = os.environ[\"X509_USER_CERT\"])\n elif \"X509_HOST_KEY\" in os.environ.keys() and \\\n \"X509_HOST_CERT\" in os.environ.keys():\n return httplib.HTTPSConnection(hostPort,\n key_file = os.environ[\"X509_HOST_KEY\"],\n cert_file = os.environ[\"X509_HOST_CERT\"])\n else:\n print \"Path to the key and cert files must be set in either\"\n print \"X509_HOST_[CERT|KEY] or X509_USER_[CERT|KEY].\"\n sys.exit(-1)\n elif URL.startswith(\"http://\"):\n hostPort = URL.replace(\"http://\", \"\")\n return httplib.HTTPConnection(hostPort)\n else:\n print \"URL must start with http:// or https://.\"\n sys.exit(-1)\n\n return None", "def __init__(self, **nxos_kwargs):\n self.nxargs = self._prepare_conn_args(clean_kwargs(**nxos_kwargs))\n # Default: Connect to unix domain socket on localhost.\n if self.nxargs[\"connect_over_uds\"]:\n if not os.path.exists(self.NXAPI_UDS):\n raise NxosClientError(\n \"No host specified and no UDS found at {}\\n\".format(self.NXAPI_UDS)\n )\n\n # Create UHTTPConnection object for NX-API communication over UDS.\n log.info(\"Nxapi connection arguments: %s\", self.nxargs)\n log.info(\"Connecting over unix domain socket\")\n self.connection = UHTTPConnection(self.NXAPI_UDS)\n else:\n # Remote connection - Proxy Minion, connect over http(s)\n log.info(\"Nxapi connection arguments: %s\", self.nxargs)\n log.info(\"Connecting over %s\", self.nxargs[\"transport\"])\n self.connection = salt.utils.http.query", "def _connect(self):\n\n if self.connected:\n self.queue_message('v', 'gRPC connection to host %s already exist' % self._target)\n return\n\n grpcEnv = self.get_option('grpc_environment') or {}\n if not isinstance(grpcEnv, dict):\n raise AnsibleConnectionFailure(\"grpc_environment must be a dict\")\n\n for key in grpcEnv:\n if grpcEnv[key]:\n os.environ[key] = str(grpcEnv[key])\n else:\n try:\n del os.environ[key]\n except KeyError:\n # no such setting in current environment, but thats ok\n pass\n\n self._login_credentials = [\n ('username', self.get_option('remote_user')),\n ('password', self.get_option('password'))\n ]\n\n host = self.get_option('host')\n port = self.get_option('port')\n self._target = host if port is None else '%s:%d' % (host, port)\n self._timeout = self.get_option('persistent_command_timeout')\n\n certs = {}\n certs['root_certificates'] = self.readFile('root_certificates_file')\n certs['certificate_chain'] = self.readFile('certificate_chain_file')\n certs['private_key'] = self.readFile('private_key_file')\n\n options = self.get_option('grpc_channel_options')\n if options:\n if not isinstance(options, dict):\n raise AnsibleConnectionFailure(\"grpc_channel_options must be a dict\")\n options = options.items()\n\n if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']:\n self.queue_message('v', 'Starting secure gRPC connection')\n creds = grpc.ssl_channel_credentials(**certs)\n self._channel = grpc.secure_channel(self._target, creds, options=options)\n else:\n self.queue_message('v', 'Starting insecure gRPC connection')\n self._channel = grpc.insecure_channel(self._target, options=options)\n\n self.queue_message('v', \"gRPC connection established for user %s to %s\" %\n (self.get_option('remote_user'), self._target))\n\n self.queue_message('v', 'Creating gNMI stub')\n self._stub = gnmi_pb2.gNMIStub(self._channel)\n\n self._encoding = self.get_option('gnmi_encoding')\n if not self._encoding:\n self.queue_message('v', 'Run CapabilityRequest()')\n request = gnmi_pb2.CapabilityRequest()\n response = self._stub.Capabilities(request, metadata=self._login_credentials)\n self.queue_message('v', 'CapabilityRequest() succeeded')\n\n self._gnmiVersion = response.gNMI_version\n self._yangModels = response.supported_models\n\n if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings:\n self._encoding = 'JSON_IETF'\n elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings:\n self._encoding = 'JSON'\n else:\n raise AnsibleConnectionFailure(\"No compatible supported encoding found (JSON or JSON_IETF)\")\n else:\n if self._encoding not in ['JSON_IETF', 'JSON']:\n raise AnsibleConnectionFailure(\"Incompatible encoding '%s' requested (JSON or JSON_IETF)\" % self._encoding)\n\n self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding)\n\n self._connected = True\n self.queue_message('v', 'gRPC/gNMI connection has established successfully')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }