query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
used to choose the manga from the list.
def choose_manga(index): while index == -1: # any message. print("\nSelect the manga to download\n") # This is to display the list of manga available to download. for i in range(1,len(anime_list)): print(str(i)+'. '+anime_list[i]) # This block is to check weather input given is a integer or not. try: index = int(input("\nSelect the choice [1-%d]: "%(len(anime_list)-1))) except ValueError: index = -1 # for clearing the screen acc to your system platform(unix or windows). os.system('cls' if os.name == 'nt' else 'clear') # check if the input given is in the appropriate range or not. if index<=0 or index>(len(anime_list)-1): index = -1 print("\n***INVALID option. Try Again.***") return index
[ "async def manga(self, ctx, *, title):\n cmd = \"manga\"\n await self.fetch_info(ctx, cmd, title)", "def phones_by_manufacturer(mnf,phoneList):\n selectedPhones = []\n for phone in phoneList:\n if phone['mnf'] == mnf:\n selectedPhones.append(phone)\n return selectedPhones", "def get_all_available_manga_online_list(self)-> Optional[List[Manga]]:\n\n cookies = {\"mangadex_title_mode\": \"2\"}\n soup = self.get_soup(self.url_search, cookies=cookies) # cookie to create\n\n if soup is None:\n return None\n\n saving_mangas_list: List[Manga] = []\n try:\n nb_pages = int(soup.find_all(\"a\", {\"class\": \"page-link\"})[-1][\"href\"].split(\"/\")[-2])\n\n for nbPage in range(1, nb_pages+1):\n page_url = self.url_search + str(nbPage)\n soup = self.get_soup(page_url, cookies=cookies)\n manga_field_in_page = soup.find_all(\"div\", {\"class\": \"manga-entry\"})\n for manga_field in manga_field_in_page:\n title_field = manga_field.find(\"a\", {\"class\":\"ml-1\"})\n name = title_field[\"title\"]\n url = self.url_manga + title_field[\"href\"]\n\n saving_manga = Manga()\n saving_manga.name = name\n saving_manga.link = url\n\n saving_mangas_list.append(saving_manga)\n except Exception as e:\n self.print_v(\"Impossible to get the info from the \", self.url_search, \" page. Maybe the site tags have change: \", str(e))\n return None\n\n return saving_mangas_list", "def pick_needed(animelist: List[Tuple[int,str]]) -> Tuple[List[Tuple[int,str]],List[AnimeThemeAnime]]:\n logger.debug(f'Loading animethemes data from {TEMPFILE}')\n animethemes = []\n animelist = {i[0]:i[1] for i in animelist}\n \n with open(TEMPFILE,'r') as file:\n for anime in json.load(file):\n malid = get_malid(anime)\n if malid in animelist:\n animethemes.append(anime)\n del animelist[malid]\n \n return list(animelist.items()),animethemes", "async def manga(message):\n query = message.content.strip()\n if not len(query):\n raise CommandError(\"Supply the name of a manga to search.\")\n auth = aiohttp.BasicAuth(username(), password())\n try:\n r = await http.get(\"https://myanimelist.net/api/manga/search.xml\", params=[\n ('q', query)\n ], auth=auth)\n except BadStatusCodeError as e:\n if e.http_code in (204, 404):\n raise CommandError(\"No manga results for '{query}'.\".format(query=query))\n raise\n doc = BeautifulSoup(r.text(), features=\"lxml\")\n entries = doc.manga.find_all(\"entry\", recursive=False)\n if not len(entries):\n raise CommandError(\"No results found.\")\n entry = entries[0]\n return \"{image}\\n\\n\" \\\n \"**{name}** ({type})\\n\\n\" \\\n \"**Status:** {status}\\n\" \\\n \"**Score:** {score}\\n\" \\\n \"**Chapters:** {chapters}\\n\" \\\n \"**Run Dates:** {start}-{end}\\n\\n\" \\\n \"{synopsis}\\n\".format(\n image=entry.image.text,\n type=entry.type.text,\n name=entry.title.text,\n status=entry.status.text,\n score=entry.score.text,\n chapters=entry.chapters.text,\n start=entry.start_date.text,\n end=entry.end_date.text,\n synopsis=strip_html(entry.synopsis.text),\n )", "async def get_anime_manga(bot: UtilityBot, *, query: str, _type: ContentType) -> dict:\n query_string = _QUERY(_type=_type.value.upper())\n async with bot.http_session.post(\n API_URL, json={\"query\": query_string, \"variables\": {\"search\": query}}\n ) as resp:\n logger.info(f\"Searching Anilist for {query} {_type.value}\")\n try:\n d = await resp.json()\n return {\n \"siteUrl\": d[\"data\"][\"Media\"][\"siteUrl\"],\n \"title\": d[\"data\"][\"Media\"][\"title\"][\"romaji\"],\n }\n except KeyError as e:\n logger.warning(\n f\"Could not find content {_type.value}: {query}\\nAPI status: {resp.status}\"\n )\n logger.debug(str(d)) # type: ignore ; not unbound\n raise ContentNotFoundError(\n f\"Could not find {_type.value} with name {query}\"\n ) from e", "def complete_pick(self, text, line, begidk, endidx):\n avail_names = self.ap.index.get_level_values(\"player\")\n # TODO: make it look a bit prettier by allowing spaces instead of underscores.\n # see:\n # https://stackoverflow.com/questions/4001708/change-how-python-cmd-module-handles-autocompletion\n # clean up the list a bit, removing ' characters and replacing spaces with underscores\n mod_avail_names = [simplify_name(name) for name in avail_names]\n # TODO: allow another argument for manager names and complete based on available\n if text:\n return [name for name in mod_avail_names if name.startswith(text.lower())]\n else:\n return [name for name in mod_avail_names]", "def selMuonPParts(name, trackingSeq):\n veloprotos = ChargedProtoParticleMaker(name+\"ProtoPMaker\")\n veloprotos.Inputs = [\"Rec/VeloMuon/Tracks\"]\n veloprotos.Output = \"Rec/ProtoP/\"+name+\"ProtoPMaker/ProtoParticles\"\n veloprotos.addTool( DelegatingTrackSelector, name=\"TrackSelector\" )\n tracktypes = [ \"Long\" ]\n veloprotos.TrackSelector.TrackTypes = tracktypes\n selector = veloprotos.TrackSelector\n for tsname in tracktypes:\n \tselector.addTool(TrackSelector,name=tsname)\n \tts = getattr(selector,tsname)\n \t# Set Cuts\n \tts.TrackTypes = [tsname]\n\n veloprotoseq = GaudiSequencer(name+\"ProtoPSeq\")\n veloprotoseq.Members += [ veloprotos ]\n\n return GSWrapper(name=\"WrappedVeloMuonProtoPSeqFor\" + name,\n sequencer=veloprotoseq,\n output='Rec/ProtoP/' + name +'ProtoPMaker/ProtoParticles',\n requiredSelections = [ trackingSeq])", "def selectObjByMbeList(mbe_list) :\t\n\tmats=bpy.data.materials\n\tmatter=[]\n\t\n\ti=0 #counter\n\t\n\tfor m in mats :\n\t\tif m.mbe == mbe_list :\n\t\t\tmatter.append(m)\n\tfor obj in bpy.context.scene.objects :\n\t\tif obj.type == 'MESH' :\n\t\t\tif obj.active_material in matter :\n\t\t\t\tobj.select=True\n\t\t\t\ti+=1\n\treturn i", "def getGenesFromMaf(maffile):\n\n maf_head = pd.read_csv(gzip.open(maffile),sep='\\t',comment='#')\n ##get hugo_symbol, and case_id\n return maf_head[['Hugo_Symbol', 'case_id', 'HGVSc', 'One_Consequence', 'SIFT', 'PolyPhen']]", "def random_by_genre():\n os.system('clear')\n music_list = music()\n genre = input(\"Enter the genre of the music: \")\n print(\"%s album:\" % genre)\n genre_list = []\n for item in music_list:\n if item[1][1].lower() == genre.lower():\n genre_list.append(item)\n if len(genre_list) > 0:\n album = random.choice(genre_list)\n print(\"%s - %s\" % (album[0][0], album[0][1]))\n else:\n print(\"there is no %s album on this music list.\" % genre)\n print(\"\\nPress enter to continue\")\n input()\n os.system('clear')", "def amf_list(self) -> str:\n return self.run_device_command(\"amf-list\")[0]", "def getEntry(path):\n l = makeList(path)\n #return random.choice(l)", "def anmanosca(self):\n if (self.Linary[self.Index + 1] == sktch) and ((self.Pada1 == \"A\") or (self.Pada1 == \"mA\")):\n self.insertary(sktt, self.Index)\n self.Index = self.Index + 1", "def random_item(list:any):\n return random.choice(list)", "def complete_mv(self, text, line, begidk, endidx):\n # begidx and endidx are supposed to be useful when the completion\n # depends on the position, but they don't appear to be defined.\n line_words = line.split(\" \")[1:]\n completions = []\n if len(line_words) == 1:\n picked_names = self.pp.index.get_level_values(\"player\").map(simplify_name)\n completions = [name for name in picked_names if name.startswith(text)]\n elif len(line_words) == 2:\n completions = [\n name\n for name in self.manager_names.values()\n if simplify_name(name).startswith(text.lower())\n ]\n return completions", "def parseMangaTitle(self, mangaSoup: BeautifulSoup) -> str:\n return mangaSoup.find('ul', 'manga-info-text').find('h1').text", "def mel_to_maya():\n\tsend_to_maya('mel')", "def rnd_male_name(iNo_of_names):\n\n# DATABASE FOR THE MALE NAMES\n import modules.x_database as db\n\n # Connect to the database\n ccTremb = db.connect()\n cChosen_db = db.rnd_man(ccTremb)\n aaNames = pick_name_w_alt(iNo_of_names, cChosen_db)\n return aaNames" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_trained_representations(method) > Array, int Gets fully trained representations for given method, cell type and chromosome. obtain sniper and sca representations from respective methods. Should contain SNIPER and SCA positions end internal representations.
def get_trained_representations(self, method="hiclstm"): pred_data = pd.read_csv( self.cfg.output_directory + "%s_%s_predictions_chr%s.csv" % (method, self.cell, str(self.chr)), sep="\t") pred_data = pred_data.drop(['Unnamed: 0'], axis=1) representations, start, stop = self.convert_df_to_np(pred_data, method=method) pred_data = pred_data.filter(['i', 'j', 'v', 'pred'], axis=1) return representations, start, stop, pred_data
[ "def get_meth_codes(self):\n try:\n raw_codes = pd.io.json.read_json('https://api.earthref.org/MagIC/method_codes.json')\n except urllib2.URLError:\n return [], []\n except httplib.BadStatusLine:\n return [], []\n code_types = raw_codes.ix['label']\n tot_codes = raw_codes.ix['count'].sum()\n\n all_codes = []\n for code_name in code_types.index:\n code_url = 'https://api.earthref.org/MagIC/method_codes/{}.json'.format(code_name)\n # if internet fails in the middle, cut out\n try:\n raw_df = pd.io.json.read_json(code_url)\n except urllib2.URLError:\n return [], []\n except httplib.BadStatusLine:\n return [], []\n # unpack the data into a dataframe, drop unnecessary columns\n df = DataFrame(raw_df[code_name][0])[['definition', 'code']]\n # remake the dataframe with the code (i.e., 'SM_VAR') as the index\n df = DataFrame(raw_df[code_name][0], index=df['code'])[['definition']]\n # add a column with the code type (i.e., 'anisotropy_estimation')\n df['dtype'] = code_name\n little_series = df['definition']\n big_series = Series()\n if any(all_codes):\n all_codes = pd.concat([all_codes, df])\n big_series = pd.concat([big_series, little_series])\n else:\n all_codes = df\n big_series = little_series\n\n # format code_types and add pmag, er, and age columns\n code_types = raw_codes.T\n code_types['pmag'] = False\n code_types['er'] = False\n code_types['age'] = False\n age = ['geochronology_method']\n pmag = ['anisotropy_estimation', 'data_adjustment',\n 'direction_estimation', 'geochronology_method',\n 'intensity_estimation', 'lab_protocol', 'lab_treatment',\n 'stability_tests', 'statistical_method']\n er = ['field_sampling', 'sample_characterization',\n 'sample_orientation', 'sample_preparation',\n 'geochronology_method']\n code_types.ix[pmag, 'pmag'] = True\n code_types.ix[er, 'er'] = True\n code_types.ix[age, 'age'] = True\n return all_codes, code_types", "def microstates_segment(eeg, n_microstates=4, train=\"gfp\", method='kmod', gfp_method='l1', sampling_rate=None,\n standardize_eeg=False, n_runs=10, max_iterations=1000, criterion='gev', random_state=None, **kwargs):\n # Sanitize input\n data, indices, gfp, info_mne = microstates_clean(eeg,\n train=train,\n sampling_rate=sampling_rate,\n standardize_eeg=standardize_eeg,\n gfp_method=gfp_method,\n **kwargs)\n\n # Run clustering algorithm\n if method in [\"kmods\", \"kmod\", \"kmeans modified\", \"modified kmeans\"]:\n\n # If no random state specified, generate a random state\n if not isinstance(random_state, np.random.RandomState):\n random_state = np.random.RandomState(random_state)\n\n # Generate one random integer for each run\n random_state = random_state.choice(range(n_runs * 1000), n_runs, replace=False)\n\n # Initialize values\n gev = 0\n microstates = None\n segmentation = None\n polarity = None\n info = None\n\n # Do several runs of the k-means algorithm, keep track of the best segmentation.\n for run in range(n_runs):\n\n # Run clustering on subset of data\n _, _, current_info = cluster(data[:, indices].T,\n method=\"kmod\",\n n_clusters=n_microstates,\n random_state=random_state[run],\n max_iterations=max_iterations,\n threshold=1e-6)\n current_microstates = current_info[\"clusters_normalized\"]\n\n # Run segmentation on the whole dataset\n s, p, g = _microstates_segment_runsegmentation(data, current_microstates, gfp)\n\n # If better (i.e., higher GEV), keep this segmentation\n if g > gev:\n microstates, segmentation, polarity, gev = current_microstates, s, p, g\n info = current_info\n\n\n else:\n # Run clustering algorithm on subset\n _, microstates, info = cluster(data[:, indices].T,\n method=method,\n n_clusters=n_microstates,\n random_state=random_state,\n **kwargs)\n\n # Run segmentation on the whole dataset\n segmentation, polarity, gev = _microstates_segment_runsegmentation(data, microstates, gfp)\n\n # Reorder\n segmentation, microstates = microstates_classify(segmentation, microstates)\n\n # CLustering quality\n# quality = cluster_quality(data, segmentation, clusters=microstates, info=info, n_random=10, sd=gfp)\n\n # Output\n info = {\"Microstates\": microstates,\n \"Sequence\": segmentation,\n \"GEV\": gev,\n \"GFP\": gfp,\n \"Polarity\": polarity,\n \"Info_algorithm\": info,\n \"Info\": info_mne}\n\n return info", "def identify_topics_arxiv_no_refs(method: str, candidates: str):\n print(\"Loading title noun_phrases...\")\n with open(NO_REFS_ARXIV_CS_TITLE_NPS_PATH) as _json_file:\n title_noun_phrases = json.load(_json_file)\n\n print(\"Loading abstract noun_phrases...\")\n with open(NO_REFS_ARXIV_CS_ABSTRACT_NPS_PATH) as _json_file:\n abstract_noun_phrases = json.load(_json_file)\n\n print(\"Loading body noun_phrases...\")\n with open(NO_REFS_ARXIV_CS_BODY_NPS_PATH) as _json_file:\n body_noun_phrases = json.load(_json_file)\n\n print(\"Loading normalization...\")\n with open(NO_REFS_ARXIV_CS_NORMALIZATION_PATH) as _json_file:\n phrase_normalization = json.load(_json_file)\n\n print(\"Loading citng ids...\")\n with open(NO_REFS_ARXIV_CS_CITING_IDS_PATH) as _json_file:\n s2_id_to_citing_ids = json.load(_json_file)\n\n print(\"Loading references...\")\n with open(NO_REFS_ARXIV_CS_REFERENCES_PATH) as _json_file:\n s2_id_to_references = json.load(_json_file)\n\n print(\"Loading canonicalization...\")\n with open(NO_REFS_ARXIV_CS_CANONICALIZATION_PATH) as _json_file:\n s2_id_to_canonical = json.load(_json_file)\n\n print(\"Loading arxiv to s2 mapping...\")\n with open(NO_REFS_ARXIV_CS_TO_S2_MAPPING_PATH) as _json_file:\n arxiv_to_s2_mapping = json.load(_json_file)\n\n s2_id_to_date_key = {\n value: get_date_key_from_arxiv_id(key)\n for key, value in arxiv_to_s2_mapping.items()\n }\n\n combined_noun_phrases = merge_noun_phrase_dicts(\n title_noun_phrases, abstract_noun_phrases, body_noun_phrases\n )\n\n if candidates == \"title\":\n all_title_noun_phrases = {\n noun_phrase\n for noun_phrase in tqdm(\n title_noun_phrases.keys(), desc=\"Computing all candidate phrases\"\n )\n }\n candidate_expanded_noun_phrases = [\n get_all_candidate_strings_for_n_gram(noun_phrase, phrase_normalization)\n for noun_phrase in tqdm(\n all_title_noun_phrases, desc=\"Expanding candidate phrases\"\n )\n ]\n elif candidates == \"abstract\":\n all_abstract_noun_phrases = {\n noun_phrase\n for noun_phrase in tqdm(\n abstract_noun_phrases.keys(), desc=\"Computing all candidate phrases\"\n )\n }\n candidate_expanded_noun_phrases = [\n get_all_candidate_strings_for_n_gram(noun_phrase, phrase_normalization)\n for noun_phrase in tqdm(\n all_abstract_noun_phrases, desc=\"Expanding candidate phrases\"\n )\n ]\n else:\n raise Exception(\"Invalid candidate set\")\n\n all_ids_in_corpus_canonical = {\n s2_id_to_canonical.get(id, \"\") for id in set(arxiv_to_s2_mapping.values())\n }\n score_results = []\n for noun_phrase_cluster in tqdm(\n candidate_expanded_noun_phrases, desc=\"Computing citation scores\"\n ):\n if method == \"forecite\":\n citation_scores = compute_citation_scores(\n noun_phrase_cluster,\n combined_noun_phrases,\n arxiv_to_s2_mapping,\n s2_id_to_citing_ids,\n s2_id_to_references,\n s2_id_to_canonical,\n s2_id_to_date_key,\n all_ids_in_corpus_canonical,\n )\n if citation_scores[1] != []:\n score_results.append(citation_scores)\n elif method == \"cnlc\":\n score = compute_cnlc_score(\n noun_phrase_cluster,\n combined_noun_phrases,\n s2_id_to_references,\n s2_id_to_canonical,\n arxiv_to_s2_mapping,\n all_ids_in_corpus_canonical,\n )\n score_results.append(score)\n elif method == \"loor\":\n score = compute_loor_score(\n noun_phrase_cluster,\n combined_noun_phrases,\n s2_id_to_references,\n arxiv_to_s2_mapping,\n s2_id_to_canonical,\n s2_id_to_citing_ids,\n all_ids_in_corpus_canonical,\n )\n score_results.append(score)\n\n if method == \"forecite\":\n score_results = [\n (\n result[0],\n topic_score(\n result[1][0][TERM_OCCURRENCES_INDEX],\n result[1][0][TERM_CITATIONS_INDEX],\n ),\n result[1],\n )\n for result in score_results\n ]\n score_results = sorted(score_results, key=lambda x: x[1], reverse=True,)\n elif method == \"cnlc\":\n score_results = sorted(score_results, key=lambda x: x[1], reverse=True)\n elif method == \"loor\":\n score_results = sorted(score_results, key=lambda x: x[1], reverse=True)\n\n if candidates == \"title\":\n if method == \"forecite\":\n output_file_path = NO_REFS_ARXIV_CS_TITLE_CANDIDATES_SCORES_PATH\n elif method == \"cnlc\":\n output_file_path = NO_REFS_ARXIV_CS_TITLE_CANDIDATES_CNLC_PATH\n elif method == \"loor\":\n output_file_path = NO_REFS_ARXIV_CS_TITLE_CANDIDATES_LOOR_PATH\n elif candidates == \"abstract\":\n if method == \"forecite\":\n output_file_path = NO_REFS_ARXIV_CS_ABSTRACT_CANDIDATES_SCORES_PATH\n elif method == \"cnlc\":\n output_file_path = NO_REFS_ARXIV_CS_ABSTRACT_CANDIDATES_CNLC_PATH\n elif method == \"loor\":\n output_file_path = NO_REFS_ARXIV_CS_ABSTRACT_CANDIDATES_LOOR_PATH\n print(\"Dumping citation score output to {}\".format(output_file_path))\n with open(output_file_path, \"w\") as _json_file:\n json.dump(score_results, _json_file)\n\n print(\"Done.\")", "def recognize(self, image, boxes):\r\n raise NotImplementedError", "def get_enabled_providers(method):\n results = []\n type = \"2\"\n if method == \"general\":\n type = \"0\"\n elif method == \"movie\":\n type = \"1\"\n for provider in definitions:\n if 'enabled' in definitions[provider] and not definitions[provider]['enabled']:\n continue\n\n if get_setting('use_%s' % provider, bool):\n contains = get_setting('%s_contains' % provider, choices=('All', 'Movies', 'Shows'))\n if not contains or contains == \"0\":\n results.append(provider)\n elif contains == type:\n results.append(provider)\n if 'custom' in definitions[provider] and definitions[provider]['custom']:\n results.append(provider)\n return results", "def write_numipedia(rewrite_method_pages=True):\n methods = rk.loadRKM('All')\n\n for k in range(2,6):\n method = lm.Adams_Bashforth(k)\n methods[method.name] = method\n method = lm.Adams_Moulton(k)\n methods[method.name] = method\n method = lm.backward_difference_formula(k)\n methods[method.name] = method\n\n for key in ['DDAS47','LDDC46','RK45[2R]C','RK58[3R]C','RK59[2R]C']:\n method = lsrk.load_2R(key)\n methods[method.name] = method\n\n for key in ['NSSP32','NSSP33']:\n del methods[key]\n\n method = rk.SSPRK2(4)\n methods[method.name] = method\n method = rk.SSPRK3(4)\n methods[method.name] = method\n method = rk.SSPRK3(9)\n methods[method.name] = method\n method = rk.SSPIRK3(3)\n methods[method.name] = method\n method = rk.SSPIRK3(4)\n methods[method.name] = method\n\n method = rk.RKC1(5)\n methods[method.name] = method\n method = rk.RKC1(20)\n methods[method.name] = method\n method = rk.RKC2(5)\n methods[method.name] = method\n method = rk.RKC2(20)\n methods[method.name] = method\n\n method = rk.DC_pair(3)\n methods[method.name] = method\n method = rk.DC_pair(4)\n methods[method.name] = method\n\n method = rk.extrap_pair(4,base='euler')\n methods[method.name] = method\n method = rk.extrap_pair(6,base='euler')\n methods[method.name] = method\n\n method = rk.extrap_pair(2,base='midpoint')\n methods[method.name] = method\n method = rk.extrap_pair(3,base='midpoint')\n methods[method.name] = method\n method = rk.extrap_pair(4,base='midpoint')\n methods[method.name] = method\n\n write_index_page(methods,index_file_path=os.path.join(parent_path,top_dir))\n\n if rewrite_method_pages:\n methods_path = os.path.join(parent_path,top_dir,methods_dir)\n print methods_path\n if not os.path.exists(methods_path):\n os.makedirs(methods_path)\n img_path = os.path.join(parent_path,top_dir,img_dir)\n print img_path\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n\n template_file = os.path.join(parent_path,top_dir,'method_template.html')\n for key,method in methods.iteritems():\n fname = os.path.join(methods_path,method.shortname+'.html')\n print fname\n s = method_page(method,template_file)\n\n outfile = open(fname,'w')\n outfile.write(s)\n outfile.close()", "def train(self, method: str):\n if method == 'knn':\n # 'Best' result came from had an accuracy of ~0.59, which is just a\n # bit better than guessing.\n self.method = 'k nearest neighbours'\n model = KNeighborsClassifier(n_neighbors=5)\n return self._train_model(model)\n elif method == 'dt':\n # Best result came from doing nothing with the data, and resulted\n # in an accuracy of ~0.89.\n self.method = 'decision tree'\n model = DecisionTreeClassifier(\n min_samples_leaf=4,\n min_samples_split=10\n )\n return self._train_model(model)\n elif method == 'gnb':\n # This method is based on the Bayesian probability that a point in\n # the data set is a certain class, e.g. p(x = 1), given all the\n # parameters for this point, y_i, so e.g. p(x = 1 | y_i). The naive\n # part of the method is that it considers that all these parameters\n # y_i are independent of each other.\n # This method was just implemented to see the documentation from\n # scikit-learn, no real experimenting has been done. This delivered\n # an accuracy of ~0.78.\n self.method = 'naive bayes (gaussian)'\n model = GaussianNB()\n return self._train_model(model)\n elif method == 'adaboost':\n self.method = method\n model = AdaBoostClassifier(n_estimators=2)\n return self._train_ensemble(model)\n else:\n raise Exception(\"No proper training method provided.\")\n return 0", "def convert_fast_rcnn(self,classes,boxes,rois,ratios):\n res=[]\n for i in range(len(rois)):\n # for the i=th image\n # find the i-th im\n mask=(rois[:,0]==i)\n \n i_rois=rois[mask] # [M,1+4]\n i_param_boxes=boxes[mask] # [M,4*cls_num]\n i_param_cls=classes[mask] # [M,1+obj_cls_num]\n if len(i_rois)==0:\n # no other images\n break \n ratio=ratios[i]\n # the image_id is useless, so remove it\n i_rois=i_rois[:,1:] # [M,4]\n\n i_param_boxes=i_param_boxes.view(len(i_param_boxes),-1,4) # [M,cls_num,4] \n\n mean=self.mean # [4]\n std=self.std # [4]\n\n mean=mean[None].expand_as(i_param_boxes).type_as(i_param_boxes)\n std=std[None].expand_as(i_param_boxes).type_as(i_param_boxes)\n\n i_param_boxes=(i_param_boxes*std+mean)\n\n i_rois=i_rois[:,None].expand_as(i_param_boxes) # [M,cls_num,4]\n r_boxes= decode_box(i_param_boxes,i_rois) # [M,cls_num,4]\n _,cls_num,_=r_boxes.shape\n \n # remove the neg_cls_score and apply nms\n res_box,res_label,res_prob=self._suppres(r_boxes,i_param_cls[:,1:],cls_num)\n res_box*=ratio\n res.append((res_box,res_label,res_prob))\n\n return res\n\n raise NotImplementedError()", "def generate (self, method='crest', nprocs_per_job=1, nprocs=1) :\n Conformers.generate(self, method, nprocs_per_job, nprocs)", "def community_detection(G, method = 'un_louvain'):\n Gnodes = list(G.nodes())\n Gun = G.to_undirected()\n G_to_return = Gun\n if method == 'un_louvain':\n partition = community.best_partition(Gun, randomize=None, resolution=1.00)\n all_nodes = []\n all_nodes = []\n all_list_nodes = []\n\n for com in set(partition.values()) :\n # print('Cluster {} Members:'.format(com))\n list_nodes = [nodes for nodes in partition.keys()\n if partition[nodes] == com]\n all_list_nodes += [Gnodes.index(nodes) for nodes in partition.keys()\n if partition[nodes] == com]\n all_nodes.append(list_nodes)\n else:\n if method == 'label_propagation':\n func = algorithms.label_propagation\n elif method == 'leiden':\n func = algorithms.leiden\n elif method == 'walktrap':\n func = algorithms.walktrap\n elif method == 'louvain':\n func = algorithms.louvain\n elif method == 'infomap':\n func = algorithms.infomap\n elif method == 'rb_pots':\n func = algorithms.rb_pots\n try:\n if method == 'infomap':\n coms = func(G, flags = '--directed')\n G_to_return = G\n elif method == 'rb_pots':\n coms = func(G, weights='weight')\n G_to_return = G\n else:\n coms = func(G)\n G_to_return = G\n except:\n coms = func(Gun)\n all_nodes = []\n all_list_nodes = []\n for i in coms.communities:\n list_nodes = i\n all_list_nodes += [Gnodes.index(j) for j in i]\n all_nodes.append(list_nodes)\n return G_to_return, all_list_nodes, all_nodes", "def get_annotations(self):\n\n variants = self.ids()\n variants = np.array([var.replace(':', '-').replace('/', '-') for var in variants], dtype='object')\n\n url = 'https://api.missionbio.io/annotations/v1/variants?ids=' + ','.join(variants.astype(str))\n r = requests.get(url=url)\n vars = r.text.split('chromosome')[1:]\n genes = deepcopy(variants)\n\n for ii in range(len(vars)):\n\n vals = vars[ii].split('\"')\n p = np.array(np.where(np.isin(vals, ['Protein'])))[0]\n g = np.array(np.where(np.isin(vals, ['Gene'])))[0]\n if len(g) == 0:\n continue\n\n prot = vals[p[0] + 4]\n gene = vals[g[0] + 4]\n\n patho = vars[ii].find('Pathogenic') != -1\n lpatho = vars[ii].find('Likely') != -1\n missense = vars[ii].find('missense') != -1\n nonsense = vars[ii].find('nonsense') != -1\n\n variants[ii] = ('(PATH) ' if patho else '') + \\\n ('(L.PATH) ' if (lpatho & (not patho)) else '') + \\\n ('(MISS) ' if (missense & (not patho) & (not lpatho)) else '') + \\\n ('(NONS) ' if (nonsense & (not patho) & (not lpatho)) else '') + \\\n (gene if (len(prot) == 0) & (len(gene) > 0) else '') + \\\n (prot) + \\\n (' - ' if len(gene) > 0 else '') + variants[ii]\n\n genes[ii] = gene if len(gene) else variants[ii]\n\n return variants", "def eval_tracking(self, method, detection_method=None, iou_threshold=0.5):\n\n def extract(boxes):\n boxes = list(boxes)\n objs = list(map(lambda box: box.obj_id, boxes))\n box_arr = np.stack([box.ltwh for box in boxes]) if boxes else np.array([])\n return objs, box_arr\n\n self.print(f\"Evaluating tracking...\")\n accumulator = mm.MOTAccumulator(auto_id=True)\n\n with self.gt as gt:\n gt = chain(gt, repeat(iter(())))\n gt = self.tqdm(gt, total=len(self))\n for tracks, gt_boxes in zip(\n self.tracking(method, detection_method, False), gt\n ):\n gt_objs, gt_box_arr = extract(gt_boxes)\n track_objs, track_box_arr = extract(tracks)\n dists = mm.distances.iou_matrix(\n gt_box_arr, track_box_arr, max_iou=iou_threshold\n )\n\n accumulator.update(\n gt_objs, track_objs, dists,\n )\n\n mh = mm.metrics.create()\n summary = mh.compute(\n accumulator, metrics=[\"num_frames\", \"idf1\", \"mota\"], name=\"Full\"\n )\n\n self.print(summary)\n return summary[\"idf1\"][0]", "def concept_tags_similarity(method1, method2, nl_dict, nl_model):\n # nl_sim = gensim_lang_cossim(method1, method2, nl_dict, nl_model)\n jaccard_sim, info_dict = counter_cossim(method1.concepts, method2.concepts)\n # avg_sim = (jaccard_sim + nl_sim) / 2\n # if len(info_dict) > 1:\n # print(method2)\n return jaccard_sim, info_dict", "def get_traits(self) -> list:", "def ImageProcessor(self):\n # get the tokens (and the bounding boxes for vizualization)\n\t\tleft_token, left_box, right_token, right_box, success_ = self.gest_rec.Get_gest(self.original, self.use_single_hand)\n\t\tprint (\"Hand gestures detection success: {2}. token: ({0}, {1})\".format(right_token, left_token, success_))\n\n\t\tif success_:\n\t\t\t# ROBO_GEST mode\n\t\t\tif self.robo_gest_mode:\n # reverse left and right since camera(left, right) == person(right, left)\n # then pass it to generate instruction\n\t\t\t\tget_token, done_ = self.ins.decode(right_token, left_token)\n\t\t\t\tprint (get_token, done_)\n\t\t\t\tif done_:\n print \n print (\"*** Decoded Instruction: {0}\".format(get_token))\n print\n\n\n\n\t\t\t# For Menue Selection only\n\t\t\tif self.menue_mode:\n\t\t\t\tmen_ins_, men_done_ = self.men_sel.decode(right_token, left_token)\n #print(men_ins_, men_done_)\n\t\t\t\tif men_done_:\n print \n print (\"Decoded Instruction: {0}\".format(men_ins_))\n print\n\t\t\t\t\tmen_tok = men_ins_.split(' ')\n\t\t\t\t\tif (len(men_tok)>0 and men_tok[1] in self.menue_map.keys()):\n\t\t\t\t\t\tmenue_selected = self.menue_map[men_tok[1]]\n\t\t\t\t\t\tmsg = Tags()\n\t\t\t\t\t\ttag = Tag()\n\t\t\t\t\t\ttag.id = menue_selected\n\t\t\t\t\t\tmsg.tags = [tag]\n\t\t\t\t\t\tself.tags_pub.publish(msg)\n\t\t\t\t\t\tprint ('***** Menue selected :: {0}'.format(menue_selected))\n print\n\t\t\n\n\n\t\tif self.bench_test:\n\t\t\tself.showFrame(self.original, 'test_viz')\n\n\t\tif self.publish_image:\n if left_box != None:\n output_img = cv2.rectangle(self.original,(left_box[0],left_box[2]), (left_box[1], left_box[3]), (255,0,0), 2)\n else:\n output_img=self.original\n\t\t\tmsg_frame = CvBridge().cv2_to_imgmsg(output_img, encoding=\"bgr8\")\n\t\t\tself.ProcessedRaw.publish(msg_frame)", "def eval_detection_at(self, method=None, iou_threshold=0.1):\n self.print(f\"Evaluating detections @{iou_threshold}\")\n with self.gt as gt:\n # TODO: check if self.total_frames is working\n # gt = chain(gt, repeat(iter(())))\n gt = self.tqdm(gt, total=len(self))\n matches = (\n match_detections(detections, gt_boxes, iou_threshold)\n for detections, gt_boxes in zip(self.detect(method), gt)\n )\n matches = chain.from_iterable(matches)\n matches = sorted(matches, key=lambda m: m[0].confidence)\n TP = np.fromiter(map(lambda x: x[1] is not None, matches), bool)\n precision = TP.cumsum() / (np.arange(len(TP)) + 1)\n precision = np.flip(np.maximum.accumulate(precision[::-1]))\n\n recall = TP.cumsum() / len(self.gt)\n recall_diff = np.diff(np.insert(recall, 0, 0))\n score = (precision * recall_diff).sum()\n self.print(f\"AP@{iou_threshold}: {score}\")\n return score", "def community_detection(G, method='louvain'):\n if method == 'louvain':\n communities = community.best_partition(G)\n nx.set_node_attributes(G, 'modularity', communities)\n return communities", "def get_samples(self, model, unlabeled_data, method, feature_method, number=5, limit=10000):\n \n samples = []\n \n if limit == -1 and len(unlabeled_data) > 10000 and self.verbose: # we're drawing from *a lot* of data this will take a while \n print(\"Get predictions for a large amount of unlabeled data: this might take a while\")\n else:\n # only apply the model to a limited number of items \n shuffle(unlabeled_data)\n unlabeled_data = unlabeled_data[:limit]\n \n with torch.no_grad():\n v=0\n for item in unlabeled_data:\n text = item[1]\n \n feature_vector = feature_method(text)\n hidden, logits, log_probs = model(feature_vector, return_all_layers=True) \n \n prob_dist = torch.exp(log_probs) # the probability distribution of our prediction\n \n score = method(prob_dist.data[0]) # get the specific type of uncertainty sampling\n \n item[3] = method.__name__ # the type of uncertainty sampling used \n item[4] = score\n \n samples.append(item)\n \n \n samples.sort(reverse=True, key=lambda x: x[4]) \n return samples[:number:]", "def getKinetics(self):\n from rmgpy.chemkin import load_chemkin_file\n from rmgpy.kinetics import ArrheniusEP, ArrheniusBM\n from rmgpy.reaction import Reaction\n from rmgpy.data.base import Entry\n\n kinetics_data_list = []\n chem_file = os.path.join(self.path, 'chemkin', 'chem.inp')\n dict_file = os.path.join(self.path, 'RMG_Dictionary.txt')\n if self.foreign:\n read_comments = False\n else:\n read_comments = True\n if os.path.exists(dict_file):\n spc_list, rxn_list = load_chemkin_file(chem_file, dict_file, read_comments=read_comments)\n else:\n spc_list, rxn_list = load_chemkin_file(chem_file, read_comments=read_comments)\n\n for reaction in rxn_list:\n # If the kinetics are ArrheniusEP and ArrheniusBM, replace them with Arrhenius\n if isinstance(reaction.kinetics, (ArrheniusEP, ArrheniusBM)):\n reaction.kinetics = reaction.kinetics.to_arrhenius(reaction.get_enthalpy_of_reaction(298))\n\n if os.path.exists(dict_file):\n reactants = ' + '.join([moleculeToInfo(reactant) for reactant in reaction.reactants])\n arrow = '&hArr;' if reaction.reversible else '&rarr;'\n products = ' + '.join([moleculeToInfo(product) for product in reaction.products])\n href = reaction.get_url()\n else:\n reactants = ' + '.join([reactant.label for reactant in reaction.reactants])\n arrow = '&hArr;' if reaction.reversible else '&rarr;'\n products = ' + '.join([product.label for product in reaction.products])\n href = ''\n\n source = str(reaction).replace('<=>', '=')\n entry = Entry()\n entry.result = rxn_list.index(reaction) + 1\n forward_kinetics = reaction.kinetics\n forward = True\n chemkin = reaction.to_chemkin(spc_list)\n\n rev_kinetics = reaction.generate_reverse_rate_coefficient()\n rev_kinetics.comment = 'Fitted reverse reaction. ' + reaction.kinetics.comment\n\n rev_reaction = Reaction(reactants=reaction.products, products=reaction.reactants, kinetics=rev_kinetics)\n chemkin_rev = rev_reaction.to_chemkin(spc_list)\n\n kinetics_data_list.append([reactants, arrow, products, entry, forward_kinetics, source, href, forward, chemkin, rev_kinetics, chemkin_rev])\n\n return kinetics_data_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_tadbs() > Array Gets TAD Boundaries to knockout.
def get_tadbs(self): dom_ob = Domains(cfg, chr, mode="ko") tf_ob = TFChip(cfg, chr) tadbs = dom_ob.get_tad_boundaries(tf_ob, ctcf="negative") cum_pos = get_cumpos(self.cfg, self.chr) tadbs = tadbs + cum_pos tadbs = np.array(tadbs) return tadbs
[ "def get_turbines(self):\r\n\r\n return self.turbines", "def list_tiddlers(self, bag):\n self._prepare_twp(bag)\n return self.build_non_js_version(bag)", "def bidders(self):\n return self._bidders", "def tribus(self):\n return self._tribus", "def bake_dynamics():\n obj = doc.GetFirstObject()\n if obj is None:\n return\n\n # Make a list of all Dynamics tags\n dynamics_tags = []\n while obj:\n tags = obj.GetTags()\n for tag in tags:\n # Check if it's a dynamics body tag\n if tag.CheckType(180000102):\n dynamics_tags.append(tag)\n obj = get_next_object(obj)\n\n for dynamics_tag in dynamics_tags:\n print(tag.GetName())\n c4d.CallButton(dynamics_tag, c4d.RIGID_BODY_CACHE_BAKE)", "def get_t_weights(analysis_time, t_boundary):\n tstart2t0 = (analysis_time - t_boundary[0]).total_seconds()\n t02tend = (t_boundary[1] - analysis_time).total_seconds()\n t_total = tstart2t0 + t02tend\n tstart2t0_weight = tstart2t0/t_total\n t02tend_weight = t02tend/t_total\n \n return [t02tend_weight, tstart2t0_weight]", "def _get_controlPoints(self) -> \"std::vector< adsk::core::Ptr< adsk::core::Point2D >,std::allocator< adsk::core::Ptr< adsk::core::Point2D > > >\" :\n return _core.NurbsCurve2D__get_controlPoints(self)", "def get_rtboundaries(self):\r\n return (self.rawdata_frame[0], self.rawdata_frame[1])", "def ifttt_budget_options():\n if \"IFTTT-Service-Key\" not in request.headers or \\\n request.headers[\"IFTTT-Service-Key\"] != get_ifttt_key():\n return json.dumps({\"errors\": [{\"message\": \"Invalid key\"}]}), 401\n try:\n data = get_ynab_budgets()\n return json.dumps({\"data\": data})\n except:\n traceback.print_exc()\n return json.dumps({\"data\": [{\"label\": \"ERROR retrieving YNAB data\",\n \"value\": \"\"}]})", "def gcFromBT(bt):\n if bt['StartGameClock']:\n return([bt['StartGameClock'],\n bt['EndGameClock']])\n else:\n return([bt['GameClock'],\n bt['GameClock']])", "def getTweakControls(self):\n return []", "def getTentativeBB(self):\n\t\treturn self.minx, self.miny, self.maxx, self.maxy", "def items(self):\n return zip(self.times, self.droplets)", "def ttms(self):\n return self._slices.keys()", "def betti_curves(bars, length):\n bettis = np.zeros((len(bars), length))\n for i in range(bettis.shape[0]):\n bn = bars[i][1]\n for bar in bn:\n birth = int(bar[0])\n death = length+1 if np.isinf(bar[1]) else int(bar[1]+1)\n bettis[i][birth:death] += 1\n return bettis", "def get_all_bodegas():\n bodegas = Bodega.objects.all()\n return bodegas", "def boys(self):\n return self._boys", "def get_upper_bound(self) -> _ARRAY:\n return self._upper_bound", "def all_waypoint_data(self):\n return self.waypoints" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get_ctcf_indices() > Array Gets CTCF positions to knockout.
def get_ctcf_indices(self): "gets CTCF positions" ctcf_ob = TFChip(cfg, chr) data = ctcf_ob.get_ctcf_data() data = data.filter(['start'], axis=1) "converts to cumulative indices" cum_pos = get_cumpos(self.cfg, self.chr) data["start"] = data["start"] + cum_pos indices = np.array(data["start"]) return indices
[ "def get_chemical_indices(self):\n return self.indices", "def computeCindices(self):\n\n self.surf_index_C = PUBSlib.computesurfindices(self.nsurf, self.nedge, self.ngroup, self.surf_edge, self.edge_group, self.group_m)\n self.edge_index_C = PUBSlib.computeedgeindices(self.nedge, self.ngroup, self.edge_group, self.group_m)\n self.nC = self.nvert\n self.nC += self.edge_index_C[-1,1]\n self.nC += self.surf_index_C[-1,1]\n\n if self.printInfo:\n print '# Control points =',self.nC", "def getCoordIndices(self) -> \"int32_t const *\":\n return _coin.SoConvexDataCache_getCoordIndices(self)", "def getTexIndices(self) -> \"int32_t const *\":\n return _coin.SoConvexDataCache_getTexIndices(self)", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def indices(self) -> List[int]:\n return self.__indices", "def getFaceIndices(self) -> \"int const *\":\n return _coin.SoGlyph_getFaceIndices(self)", "def getIndices(self) -> \"int32_t const *\":\n return _coin.SoNormalCache_getIndices(self)", "def SoLazyElement_getColorIndices(arg2: 'SoState') -> \"int32_t const *\":\n return _coin.SoLazyElement_getColorIndices(arg2)", "def _getPtychographyPositions(self) -> np.ndarray:\n\n p1 = self._scan_params.scan_area_buffer_npix\n p2 = self._probe_params.npix - p1 - self._obj_params.obj_w_border_npix\n positions_x = np.arange(p1, p2, self._scan_params.scan_step_npix)\n positions = []\n\n for r in positions_x:\n for c in positions_x:\n positions.append([r,c])\n return np.array(positions)", "def indices(self):\n nx, ny, nz = self.shape()\n return [(ix,iy,iz) for ix in range(nx) for iy in range(ny) for iz in range(nz)]", "def getColorIndices(arg1: 'SoState') -> \"int32_t const *\":\n return _coin.SoLazyElement_getColorIndices(arg1)", "def get_cell_indices():\r\n\r\n cell_indices = []\r\n\r\n for idx in np.ndindex(9, 9):\r\n cell_indices.append(tuple(t * 60 for t in idx))\r\n\r\n return cell_indices", "def getNumCoordIndices(self) -> \"int\":\n return _coin.SoConvexDataCache_getNumCoordIndices(self)", "def encode_c_positions(seq):\n indexes = \"\"\n prev_index = 0\n index = seq.find(\"C\",prev_index)\n offset = index + 34\n while True:\n if index < 0:\n break\n while offset > 255:\n indexes += chr(255)\n offset -= 255\n indexes += chr(offset)\n\n prev_index = index + 1\n index = seq.find(\"C\",prev_index)\n offset = index - prev_index + 34\n return indexes", "def indices_fd(self):\n if self._indices_fd is None:\n start = self.startindex_fd\n stop = self.network.number_of_edges()\n self._indices_fd = np.array(range(start, start + stop))\n return self._indices_fd", "def get_selected_compartment_indices() -> Set[int]:\n return _canvas.sel_compartments_idx.item_copy()", "def get_indices_section(self):\n return np.unique(self.sv_map.volume_surf_coordinates['triangles'])", "def neighbor_indices(self):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
normalize_embed(representations, zero_embed) > Array, Array Normalize each row separately.
def normalize_embed(self, representations, zero_embed): "normalize representations" for n in range(len(representations)): norm = np.linalg.norm(representations[n, :]) if norm == 0: continue else: representations[n, :] = representations[n, :] / norm "normalize padding" norm = np.linalg.norm(zero_embed) zero_embed = zero_embed / norm return representations, zero_embed
[ "def normalize(self, embeddings):\n\n # Calculation is different for matrices vs vectors\n if len(embeddings.shape) > 1:\n return embeddings / np.linalg.norm(embeddings, axis=1).reshape(-1, 1)\n\n return embeddings / np.linalg.norm(embeddings)", "def normalize_embeddings(embeddings, dim=0, order=2) -> torch.nn.Embedding:\n # norm = get_norm(embeddings.weight, dim=dim, order=order)\n norm = torch.norm(embeddings.weight, p=order, dim=dim, keepdim=True)\n normalized = torch.div(embeddings.weight, norm)\n embeddings.weight = torch.nn.Parameter(normalized)\n\n return embeddings", "def normalize(array):\n if sum(array) == 0:\n return array\n return map(lambda n: float(n) / sum(array), array)", "def normalizeRows(x):\n\n ### YOUR CODE HERE\n # we first compute each row norm\n per_row_norm = np.sqrt(np.sum(np.square(x), axis=1)).reshape(-1,1)\n\n # now we divide each value of each row by the row's norm\n x = x / per_row_norm\n ### END YOUR CODE\n\n return x", "def normalize(arr):\n\t\tarr[np.isnan(arr)]=0\n\t\t\n\t\tif arr.shape != (74,74):\n\t\t\traise ValueError(\"Image's shape must be (74,74)!\")\n\t\telse:\n\t\t\treturn ((arr - arr.min()) * (1 / (arr.max() - arr.min())))", "def normalize(x):\n \n output = np.array([image/255 for image in x])\n return output", "def normalize(self):\n list(map(lambda normalization: normalization[0](self.entry,normalization[1]), self.normalizations))", "def normalize(array, normalization):\n\n for i,x in enumerate(array):\n array[i]=x/normalization", "def _unscale_preds(self, arr):\n arr = np.multiply(arr, self.dataset.scaling_params['scale'][:self.dataset.n_outputs]) + \\\n self.dataset.scaling_params['center'][:self.dataset.n_outputs]\n\n if self.config.log_squasher:\n arr = self.dataset.reverse_log_squasher(arr)\n return arr", "def normalize(word_vectors):\n\n # get norm for each row in word vector matrix\n norms = np.apply_along_axis(np.linalg.norm, 1, word_vectors)\n norms = norms.reshape((norms.size, 1))\n\n # create new matrix of normalized word vectors\n normalized_word_vectors = word_vectors / norms\n\n return normalized_word_vectors", "def normalize(self):\n l = self.len()\n if l == 0:\n raise ZeroDivisionError, \"can't normalize a zero-length vector\"\n s = self.data\n s[0] /= l; s[1] /= l; s[2] /= l", "def post_normalize(hist):\n hn = hist.Clone()\n for i in range(1, hn.nbins()+1):\n hn.SetBinContent(i, hn.GetBinContent(i) / hn.GetBinWidth(i))\n hn.SetBinError(i, hn.GetBinError(i) / hn.GetBinWidth(i))\n if hn.GetBinContent(i)<0:\n hn.SetBinContent(i, 0)\n return hn", "def normalizeRows(x):\r\n ### START CODE HERE ### (≈ 2 lines of code)\r\n # Compute x_norm as the norm 2 of x. Use np.linalg.norm(..., ord = 2, axis = ..., keepdims = True)\r\n x_norm = np.linalg.norm(x, ord = 2, axis = 1, keepdims = True)\r\n # Divide x by its norm.\r\n x = x/x_norm\r\n ### END CODE HERE ###\r\n return x", "def normalize_frame_array(frame_array, length):\n norm_frame_list = []\n for frames in frame_array:\n norm_frame_list.append(normalize_frames(frames, length))\n\n return np.array(norm_frame_list)", "def _normalize_rows(self, Y):\n return Y / la.norm(Y, axis=1)[:, np.newaxis]", "def _normalize_inputs(inputs):\n if Layer._is_number(inputs):\n return np.asarray([inputs])\n if isinstance(inputs, list):\n return np.asarray(inputs)\n return inputs", "def normalize_weights(\n weights: np.ndarray,\n) -> np.ndarray:\n values = np.asarray(weights, dtype=np.float64)\n # to overcome edge cases - ensure that there are no zeros before normalization\n values = _ensure_non_zero(values)\n norm = np.linalg.norm(values, ord=1)\n if norm > 0:\n values = values / norm\n # but also after, as some of the resulting values (because of close to zero\n # numerical values) could have turned into zeros after normalization\n values = _ensure_non_zero(values)\n return values", "def _normalize(v, *args, **kwargs):\n return v / _length(v, keepdims=True, *args, **kwargs)", "def _normalize(vectors):\n norms = np.sqrt(np.sum(vectors ** 2, axis=1))\n vectors /= norms.reshape((len(norms), 1))\n return vectors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
ko_representations(representations, start, indices, zero_embed, mode) > Array, Array Alter representations to feed to knockout.
def ko_representations(self, representations, start, indices, zero_embed, mode="average"): window = self.cfg.ko_window size = len(representations) if isinstance(indices, (int, np.integer)): indices = [indices] "alter according to mode in config" for ind in indices: if mode == "average": if ind - start - window < 0 or ind - start + window > size: window = int(window // 2) window_left_arr = representations[ind - start - window: ind - start, :].copy() window_right_arr = representations[ind - start + 1: ind - start + window + 1, :].copy() window_arr_avg = np.stack((window_left_arr, window_right_arr)).mean(axis=0).mean(axis=0) representations[ind - start, :] = window_arr_avg elif mode == "zero": representations[ind - start, :] = np.zeros((1, cfg.pos_embed_size)) elif mode == "shift": representations[ind - start:size - 1, :] = representations[ind - start + 1:size, :] representations[size - 1, :] = np.zeros((1, cfg.pos_embed_size)) elif mode == "padding": representations[ind - start, :] = zero_embed[:cfg.pos_embed_size] if mode == "reverse": representations = np.fliplr(representations) zero_embed = np.flip(zero_embed) elif mode == "normalize": representations, zero_embed = self.normalize_embed(representations, zero_embed) return representations, zero_embed
[ "def _build_representations(self):\n \n N = self.order()\n\n # Build all the Irreducible Representations\n for k in range(0, int(N // 2) + 1):\n self.irrep(k)\n \n # Build all Representations\n\n # add all the irreps to the set of representations already built for this group\n self.representations.update(**self.irreps)\n\n # build the regular representation\n self.representations['regular'] = self.regular_representation\n self.representations['regular'].supported_nonlinearities.add('vectorfield')", "def _repr_parts(self):\n return []", "def normalize_embed(self, representations, zero_embed):\n\n \"normalize representations\"\n for n in range(len(representations)):\n norm = np.linalg.norm(representations[n, :])\n if norm == 0:\n continue\n else:\n representations[n, :] = representations[n, :] / norm\n\n \"normalize padding\"\n norm = np.linalg.norm(zero_embed)\n zero_embed = zero_embed / norm\n return representations, zero_embed", "def _all_representations(self):\n return _remove_identical(itertools.chain(\n self.orphan_representations,\n (model.representation for group, model in self._all_models()\n if model.representation)))", "def _render(self, indices, mode=\"human\"):\n raise NotImplementedError", "def albumentations2densetorch(augmentation):\n from albumentations import Compose\n\n def wrapper_func(sample):\n if \"names\" in sample:\n del sample[\"names\"]\n targets = {\n name: \"image\" if name == \"image\" else \"mask\" for name in sample.keys()\n }\n output = Compose(make_list(augmentation), additional_targets=targets)(**sample)\n return output\n\n return wrapper_func", "def _convert_shapes1(self, design):\n for _pp in design.component_instances:\n _libid = -1\n _devn = -1\n _libname = 'default'\n _pname = _pp.library_id\n if -1 != _pp.library_id.find(':'):\n _libname, _pname = _pp.library_id.split(':')\n \n for _li, _ll in enumerate(self.libraries):\n if _libname == _ll.name:\n _libid = _li\n for _di, _dd in enumerate(_ll.devsets[0].shapesets):\n if _pname == _dd.name:\n _devn = _di\n break\n break\n\n self.shapeheader.parts.append(Eagle.Part(\n name=_pp.instance_id, libid=_libid, devsetndx=_devn,\n symvar=1, techno=1)) # after OpenJSON all parts are split\n return", "def frompresentationlist(samples, targets, presentations):\n if not isinstance(presentations, list):\n raise ValueError(\"Presentation argument must be a list.\")\n\n if isinstance(presentations[0], int):\n presentations = [str(x) for x in presentations]\n\n samples = samples.swaplevel(0,3,axis=0).sort_index(axis=0).loc[(presentations,),:].swaplevel(0,3,axis=0).sort_index(axis=0)\n targets = targets.swaplevel(0,3,axis=0).sort_index(axis=0).loc[(presentations,),:].swaplevel(0,3,axis=0).sort_index(axis=0)\n\n return samples, targets", "def no_overlap_representation_space(\n max_type_index: int,\n max_state_index: int,\n max_color_value: int,\n width: int,\n height: int,\n) -> Dict[str, np.ndarray]:\n\n rep = default_representation_space(\n max_type_index, max_state_index, max_color_value, width, height\n )\n\n # increment channels to ensure there is no overlap\n rep['grid'][:, :, [1, 4]] += max_type_index\n rep['grid'][:, :, [2, 5]] += max_type_index + max_state_index\n\n # default also returns position and orientation, which must be removed\n rep['agent'] = rep['agent'][3:]\n\n rep['agent'][1] += max_type_index\n rep['agent'][2] += max_type_index + max_state_index\n\n return rep", "def interpretations(self, interpretations):\n\n self._interpretations = interpretations", "def _create_intrinsically_motivated_actor_model(self):", "def appearance_reconstructions(self):\n if self.appearance_parameters:\n return [self.fitter.appearance_model.instance(w)\n for w in self.appearance_parameters]\n else:\n return [self.fitter.template for _ in self.shapes]", "def __str__(self):\r\n if len(self.__shapes) > 0:\r\n return \"['\" + \"', '\".join(list(self.__shapes.keys())) + \"']\"\r\n else:\r\n return \"[]\"", "def _set_irreps(self):\n # the totally symmetric irrep must always be the first element of elements\n if self.pg == 'cn':\n if self.n == 1:\n # C1\n a = IrreducibleRepresentation(self, (1,))\n self.elements = FrozenOrderedBidict({ 'a': a })\n # besser: dimensions?\n self.symop_multiplicity = (1,)\n self.order = 1\n\n if self.pg == 'cnv':\n if self.n == 2:\n # C2v\n a1 = IrreducibleRepresentation(self, (1, 1, 1, 1))\n a2 = IrreducibleRepresentation(self, (1, 1,-1,-1))\n b1 = IrreducibleRepresentation(self, (1,-1, 1,-1))\n b2 = IrreducibleRepresentation(self, (1,-1,-1, 1))\n self.elements = FrozenOrderedBidict({ 'a1': a1, 'a2': a2, 'b1': b1, 'b2': b2 })\n self.symop_multiplicity = (1, 1, 1, 1)\n self.order = 4\n\n if self.pg == 'dn':\n if self.n == 3:\n # D3\n a1 = IrreducibleRepresentation(self, (1, 1, 1))\n a2 = IrreducibleRepresentation(self, (1, 1,-1))\n e = IrreducibleRepresentation(self, (2,-1, 0), degenerate=True)\n self.elements = FrozenOrderedBidict({ 'a1': a1, 'a2': a2, 'e': e })\n self.symop_multiplicity = (1, 2, 3)\n self.order = 6\n\n if self.pg == 'dnh':\n if self.n == 2:\n # D2h\n ag = IrreducibleRepresentation(self, (1, 1, 1, 1, 1, 1, 1, 1))\n b1g = IrreducibleRepresentation(self, (1, 1,-1,-1, 1, 1,-1,-1))\n b2g = IrreducibleRepresentation(self, (1,-1,-1, 1, 1,-1, 1,-1))\n b3g = IrreducibleRepresentation(self, (1,-1, 1,-1, 1,-1,-1, 1))\n au = IrreducibleRepresentation(self, (1, 1, 1, 1,-1,-1,-1,-1))\n b1u = IrreducibleRepresentation(self, (1, 1,-1,-1,-1,-1, 1, 1))\n b2u = IrreducibleRepresentation(self, (1,-1,-1, 1,-1, 1,-1, 1))\n b3u = IrreducibleRepresentation(self, (1,-1, 1,-1,-1, 1, 1,-1))\n self.elements = FrozenOrderedBidict({ 'ag': ag, 'b1g': b1g, 'b2g': b2g, 'b3g': b3g,\n 'au': au, 'b1u': b1u, 'b2u': b2u, 'b3u': b3u })\n self.symop_multiplicity = (1, 1, 1, 1, 1, 1, 1, 1)\n self.order = 8\n\n if self.n == 6:\n # D6h\n a1g = IrreducibleRepresentation(self, (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))\n a2g = IrreducibleRepresentation(self, (1, 1, 1, 1,-1,-1, 1, 1, 1, 1,-1,-1))\n b1g = IrreducibleRepresentation(self, (1,-1, 1,-1, 1,-1, 1, 1,-1,-1,-1, 1))\n b2g = IrreducibleRepresentation(self, (1,-1, 1,-1,-1, 1, 1, 1,-1,-1, 1,-1))\n e1g = IrreducibleRepresentation(self, (2, 1,-1,-2, 0, 0, 2,-1, 1,-2, 0, 0), degenerate=True)\n e2g = IrreducibleRepresentation(self, (2,-1,-1, 2, 0, 0, 2,-1,-1, 2, 0, 0), degenerate=True)\n a1u = IrreducibleRepresentation(self, (1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1,-1))\n a2u = IrreducibleRepresentation(self, (1, 1, 1, 1,-1,-1,-1,-1,-1,-1, 1, 1))\n b1u = IrreducibleRepresentation(self, (1,-1, 1,-1, 1,-1,-1,-1, 1, 1, 1,-1))\n b2u = IrreducibleRepresentation(self, (1,-1, 1,-1,-1, 1,-1,-1, 1, 1,-1, 1))\n e1u = IrreducibleRepresentation(self, (2, 1,-1,-2, 0, 0,-2, 1,-1, 2, 0, 0), degenerate=True)\n e2u = IrreducibleRepresentation(self, (2,-1,-1, 2, 0, 0,-2, 1, 1,-2, 0, 0), degenerate=True)\n self.elements = FrozenOrderedBidict({ 'a1g': a1g, 'a2g': a2g, 'b1g': b1g, 'b2g': b2g, 'e1g': e1g, 'e2g': e2g,\n 'a1u': a1u, 'a2u': a2u, 'b1u': b1u, 'b2u': b2u, 'e1u': e1u, 'e2u': e2u })\n self.symop_multiplicity = (1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3)\n self.order = 24", "def visualize_RN(self):\n\n react_dict = self.__net_dict\n N = len(react_dict)\n keys = list(react_dict.keys())\n\n # construct top of matrix\n matrix_RN = []\n matrix_RN.append(['---'])\n\n for i in range(N):\n current_complex = self.bin_to_string(keys[i])\n matrix_RN[0].append(current_complex)\n\n # construct remaining parts\n count = 0\n for i in range(N):\n row_leader = self.bin_to_string(keys[i])\n matrix_RN.append([row_leader])\n count += 1\n\n # if complex i reacts from/to complex j, entry is 1. Else entry is 0\n for j in keys:\n\n if j in react_dict[keys[i]]:\n matrix_RN[count].append(['1'])\n else:\n matrix_RN[count].append(['0'])\n\n print('\\n'.join([''.join(['{:10}'.format(str(item)) for item in row]) for row in matrix_RN]))\n\n return", "def CreateRepresentation(aProxy, view, **extraArgs):\n\n global rendering\n if not aProxy:\n raise RuntimeError (\"proxy argument cannot be None.\")\n if not view:\n raise RuntimeError (\"view argument cannot be None.\")\n if \"proxyName\" in extraArgs:\n display = CreateProxy(\"representations\", extraArgs['proxyName'], None)\n del extraArgs['proxyName']\n else:\n display = view.SMProxy.CreateDefaultRepresentation(aProxy.SMProxy, 0)\n if display:\n display.UnRegister(None)\n if not display:\n return None\n extraArgs['proxy'] = display\n proxy = rendering.__dict__[display.GetXMLName()](**extraArgs)\n proxy.Input = aProxy\n proxy.UpdateVTKObjects()\n view.Representations.append(proxy)\n return proxy", "def _convert_shapes2(self, design):\n for _nn in design.nets:\n _web = None\n if 'type' in _nn.attributes:\n if 'bus' == _nn.attributes['type']:\n _width = 0.762\n _web = Eagle.Bus(name=_nn.net_id)\n self.shapeheader.buses.append(_web)\n else:\n _clrs = []\n _attrre = re.compile(r'^netclearance(\\d+)$')\n for _aa in _nn.attributes:\n _attr = _attrre.match(_aa)\n if None != _attr:\n _clrs.append((_attr.group(1), _nn.attributes[_aa]))\n\n self.netclasses.append(Eagle.NetClass( # duplicates are cleared below\n num=_nn.attributes['netclass'], \n name=_nn.attributes['netname'], \n width=_nn.attributes['netwidth'],\n drill=_nn.attributes['netdrill'],\n clearances=_clrs,\n ))\n _width = 0.1524 # _nn.attributes['netwidth']\n _web = Eagle.Net(name=_nn.net_id, \n nclass=_nn.attributes['netclass'])\n self.shapeheader.nets.append(_web)\n else:\n _width = 0.1524\n _web = Eagle.Net(name=_nn.net_id, nclass=0)\n self.shapeheader.nets.append(_web)\n\n _prpts = set() # processed points\n for _pp in _nn.points:\n _pt = _nn.points[_pp]\n for _opp in _pt.connected_points:\n if not _opp in _prpts: # not yet processed\n _opt = None\n try:\n _opt = _nn.points[_opp]\n except KeyError: # not from current net\n for _xxn in design.nets:\n if _opp in _xxn.points:\n _opt = _xxn.points[_opp]\n break\n else:\n raise ValueError(\"undefined point ID: %s\" % str(_opp))\n\n _web.shapes.append(Eagle.Wire(\n x1=_pt.x, y1=_pt.y,\n x2=_opt.x,\n y2=_opt.y,\n style=\"Continuous\", layer=91, width=_width))\n\n _prpts.add(_pp)\n letter_pin_numbers = []\n for _rr in _pt.connected_components:\n _pno = -1\n for _in, _ii in enumerate(self.shapeheader.parts):\n if _rr.instance_id == _ii.name:\n _pno = 1 + _in\n break\n try:\n pin_number = int(_rr.pin_number)\n except ValueError:\n if letter_pin_numbers:\n pin_number = letter_pin_numbers.pop() + 1\n else: \n pin_number = 1\n letter_pin_numbers.append(pin_number)\n\n _web.shapes.append(Eagle.PinRef(\n partno= _pno, gateno=1, \n pinno=pin_number,\n ))\n return", "def generate_notes(model, network_input, pitchnames, n_vocab,prediction_output,notes_Quantity):\n # pick a random sequence from the input as a starting point for the prediction\n start = numpy.random.randint(0, len(network_input)-1)\n\n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n\n pattern = network_input[start]\n \n\n # generate 500 notes\n for note_index in range(notes_Quantity):\n prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))\n prediction_input = prediction_input / float(n_vocab)\n\n prediction = model.predict(prediction_input, verbose=0)\n predictionX=prediction[0]\n # print(predictionX) #預測下一個音符所有可能的機率\n k=0\n maxP = predictionX[0] # 第一名的機率\n prediction_Choose=0 # 第一名是第幾位\n for i in predictionX:\n if float(i)>float(maxP):\n maxP=predictionX[k]\n prediction_Choose=k\n k+=1\n k=0\n a=random.randint(0,len(predictionX)-1)\n max2P = predictionX[a] # 第二名的機率\n prediction_Choose2=a # 第二名是第幾位\n for i in predictionX:\n if float(i)>float(max2P):\n if k != prediction_Choose:\n a=random.randint(1,4) #隨機定義第二名\n if a<4:\n max2P=predictionX[k]\n prediction_Choose2=k\n k+=1\n \n # print(str(prediction_Choose)) #他預測下個音符是多少\n # print(str(prediction_Choose2)) #他預測下個音符是多少(隨機的第二名)\n prediction2 = model.predict_classes(prediction_input, verbose=0)\n # print(prediction2) #他真實預測下個音符是多少\n index = numpy.argmax(prediction)\n # print(index)\n a=random.randint(1,3)\n if a<2:\n index=prediction_Choose #僅用第一名預測的音符寫譜\n else:\n index=prediction_Choose2 #用隨機的第二名寫譜\n result = int_to_note[index]\n \n \n prediction_output.append(result)\n \n\n pattern.append(index)\n pattern = pattern[1:len(pattern)]\n\n return prediction_output", "def _nativeObjects( self ):\r\n\t\tlayerNodes = mxs.pyhelper.getLayerNodes(self._nativePointer)\r\n\t\tlayerNodes.reverse()\r\n\t\treturn layerNodes" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute_kodiff(pred_data, ko_pred_df, indices) > Array Compute difference between predicted contacts after and before knockout
def compute_kodiff(self, pred_data, ko_pred_df, ind): "initialize" ko_diffs = np.zeros((11,)) win = self.cfg.ko_increment diff = np.arange(0, 101, 10) "compute diff" for j, d in enumerate(diff): "take subset of knockout data in window" if j == 0: subset_og = pred_data.loc[pred_data["i"] == ind] else: subset_og = pred_data.loc[ ((pred_data["i"] <= ind + j * win) & (pred_data["i"] > ind + (j - 1) * win)) | ((pred_data["i"] >= ind - j * win) & (pred_data["i"] < ind - (j - 1) * win))] if subset_og.empty: continue "take subset of original data in window" if j == 0: subset_ko = ko_pred_df.loc[ko_pred_df["i"] == ind] else: subset_ko = ko_pred_df.loc[ ((ko_pred_df["i"] <= ind + j * win) & (ko_pred_df["i"] > ind + (j - 1) * win)) | ((ko_pred_df["i"] >= ind - j * win) & (ko_pred_df["i"] < ind - (j - 1) * win))] "compute mean diff in window" merged_df = pd.merge(subset_og, subset_ko, on=["i", "j"]) merged_df = merged_df.filter(['i', 'j', 'pred', 'ko_pred'], axis=1) mean_diff = np.mean(np.array(merged_df["ko_pred"]) - np.array(merged_df["pred"])) ko_diffs[j] = mean_diff return ko_diffs
[ "def j_index(true_labels, predicts):\n if not hasattr(true_labels, 'shape'):\n true_labels = np.asarray(true_labels)\n if not hasattr(predicts, 'shape'):\n predicts = np.asarray(predicts)\n N, L = true_labels.shape\n s = 0.0\n for i in range(N):\n inter = sum((true_labels[i, :] * predicts[i, :]) > 0) * 1.\n union = sum((true_labels[i, :] + predicts[i, :]) > 0) * 1.\n if union > 0:\n s += inter / union\n elif np.sum(true_labels[i, :]) == 0:\n s += 1.\n return s * 1. / N", "def _diffs(self, test_Xs, train_Xs):\n test_Xs = test_Xs.view(test_Xs.shape[0], 1, *test_Xs.shape[1:])\n train_Xs = train_Xs.view(1, train_Xs.shape[0], *train_Xs.shape[1:])\n return test_Xs - train_Xs", "def predict(self,kernel_test):\n \n y_predict = np.zeros(kernel_test.shape[1])\n \n for i in range(kernel_test.shape[1]):\n #print(self.alpha_.shape,self.sv.shape,self.sv,self.sv_label[:,0])\n y_predict[i] = sum(alpha * sv_label * kernel_test[sv,i] for alpha, sv, sv_label in zip(self.alpha_, self.sv, self.sv_label[:,0]))\n return y_predict + self.b\n\n prediction= np.sign(y_predict + self.b)\n \n return prediction", "def predict(self, user_index: int) -> np.ndarray:", "def backcalculate_predictions(df, predictions, num_hist_bars, num_predict_bars):\n rs_preds = predictions.reshape(-1, num_predict_bars, 5)\n\n all_preds = []\n print('backcalculating values...')\n for i in tqdm(range(rs_preds.shape[0])):\n # get the point just before the predictions for\n # calculating actual value from pct_chg\n point = df.iloc[i + num_hist_bars - 1]\n pr = back_calc_one_point_pct(point, rs_preds[i])\n all_preds.append(pr)\n\n return all_preds", "def DP_Pitch_Estimation(f0_candidates,score,nonDPindices,DPindices):\r\n \r\n rows=len(f0_candidates)\r\n cols=len(f0_candidates[0])\r\n pitch = np.zeros((1,rows))\r\n indsmax=np.argmax(score,axis=1)\r\n f0_candidates_dp = np.zeros((rows,cols))\r\n for m in np.arange(0,len(nonDPindices)):\r\n f0_candidates_dp[int(nonDPindices[m])] = f0_candidates[int(nonDPindices[m]),indsmax[int(nonDPindices[m])]]\r\n #print(f0_candidates_dp[int(nonDPindices[m]),:])\r\n for m in np.arange(0,len(DPindices)):\r\n f0_candidates_dp[int(DPindices[m]),:]=f0_candidates[int(DPindices[m]),:]\r\n #print(f0_candidates_dp[int(DPindices[m]),:])\r\n \r\n VuV = np.sign(abs(np.sum(f0_candidates_dp,axis=1)))\r\n boundary = abs(VuV-np.append(VuV[1:,],np.zeros(1)))\r\n boundary_inds = np.where(boundary==1)\r\n \r\n #for m in np.arange(0,len(f0_candidates_dp)):\r\n #print(f0_candidates_dp[m,:])\r\n for i2 in np.arange(0,len(boundary_inds[0]),2):\r\n inds_temp = np.arange(boundary_inds[0][i2]+1,boundary_inds[0][i2+1]+1)\r\n \r\n if len(inds_temp)>1:\r\n x = f0_candidates_dp[inds_temp,:]\r\n rows1=len(x)\r\n cols1=len(x[0])\r\n c=np.zeros((rows1,cols1))\r\n b=np.zeros((rows1,cols1))\r\n out=np.zeros((1,rows1))\r\n temp2=np.zeros((1,cols1))\r\n \r\n for j1 in np.arange(1,rows1):\r\n for j2 in np.arange(0,cols1):\r\n for j3 in np.arange(0,cols1):\r\n temp2[0][j3]=c[j1-1,j3]+np.square(x[j1,j2]-x[j1-1,j3])\r\n c[j1,j2]=np.amin(temp2[0])\r\n b[j1,j2]=np.argmin(temp2[0])\r\n\r\n indd = np.argmin(c[-1,:])\r\n for j in np.arange(len(out[0])-1,-1,-1):\r\n out[0][j]=x[j][int(indd)]\r\n indd=b[j][int(indd)]\r\n pitch[0][inds_temp]=np.matrix.transpose(out[0])\r\n else:\r\n pitch[0][inds_temp]=f0_candidates_dp[inds_temp,indsmax[inds_temp]]\r\n \r\n uvInds = np.where(VuV==0)\r\n for m in np.arange(0,len(uvInds[0])):\r\n pitch[0][uvInds[0][m]]=f0_candidates[uvInds[0][m],indsmax[uvInds[0][m]]]\r\n pitch = np.matrix.transpose(pitch)\r\n \r\n return(pitch)", "def abs_sales_diff(pred, target):\n assert len(pred) == len(target)\n return sum([abs(pred[i] - target[i]) for i in range(len(pred))])", "def get_difference_map(self, other, tolerance=0, plot=False):\n arr1 = self.ensemble_array\n arr2 = other.ensemble_array\n\n # Stack the two arrays along the fourth dimension. Values from the second ensemble are set to negative.\n difference_array = np.append(arr1, -arr2, axis=3)\n # Find all points that are sampled in the ensemble:\n max_diff = np.max(difference_array, axis=3) # max_diff nonzero values correspond to all points in arr1 that have been sampled.\n min_diff = np.min(difference_array, axis=3) # min_diff nonzero values correspond to all points in arr2 that have been sampled.\n diff_diff = max_diff - min_diff # nonzero indices of diff_diff indicate all points in 3D space that have hotspot values in at least 1 ensemble.\n\n indices = np.transpose(diff_diff.nonzero()) # get the indices per value, rather than per dimension.\n\n on = arr1[diff_diff.nonzero()]\n off = arr2[diff_diff.nonzero()]\n\n # idx_dict has the shape {(3D indices): Kolmogorov-Smirnov 2sample D-value}.\n idx_dict = {}\n for (a, b, c), n, f in zip(indices, on, off):\n # Get all values within the radius specified by tolerance. Will be of shape (2*tol+1, 2*tol+1, 2*tol+1) , so flatten.\n sel = difference_array[a - tolerance: a + tolerance+1, b - tolerance: b + tolerance+1, c - tolerance: c + tolerance+1].flatten()\n # Get the Kolmogorov-Smirnov D statistic for the distributions at the sample\n d = self.get_2_KS_scores((n, f), plot=plot)\n idx_dict[(a, b, c)] = d\n\n # Create an array of the modified D scores (can be used as clustering input).\n iarr = np.zeros(diff_diff.shape)\n for i, d_val in idx_dict.items():\n iarr[i] = d_val\n\n return iarr", "def equalized_odds_pred(y_true_train,y_pred_train,group_train,y_pred_test,group_test):\n y_true_train = np.array([1 if y==1 else -1 for y in y_true_train])\n y_pred_train = np.array([1 if y==1 else -1 for y in y_pred_train])\n y_pred_test = np.array([1 if y==1 else -1 for y in y_pred_test])\n\n assert np.array_equal(np.unique(y_true_train),np.array([-1,1])), 'y_true_train has to contain -1 and 1 and only these'\n assert np.array_equal(np.unique(y_pred_train),np.array([-1,1])), 'y_pred_train has to contain -1 and 1 and only these'\n assert np.array_equal(np.unique(group_train),np.array([0,1])), 'group_train has to contain 0 and 1 and only these'\n assert np.all(np.isin(y_pred_test,np.array([-1,1]))), 'y_pred_test has to only contain -1 or 1'\n assert np.all(np.isin(group_test, np.array([0,1]))), 'group_test has to only contain 0 or 1'\n\n eq_odd_pred_test=np.copy(y_pred_test)\n\n alpha1=np.sum(np.logical_and(y_pred_train==1,np.logical_and(y_true_train == 1, group_train == 0))) / float(\n np.sum(np.logical_and(y_true_train == 1, group_train == 0)))\n beta1 = np.sum(np.logical_and(y_pred_train == 1, np.logical_and(y_true_train == 1, group_train == 1))) / float(\n np.sum(np.logical_and(y_true_train == 1, group_train == 1)))\n alpha2 = np.sum(np.logical_and(y_pred_train == 1, np.logical_and(y_true_train == -1, group_train == 0))) / float(\n np.sum(np.logical_and(y_true_train == -1, group_train == 0)))\n beta2 = np.sum(np.logical_and(y_pred_train == 1, np.logical_and(y_true_train == -1, group_train == 1))) / float(\n np.sum(np.logical_and(y_true_train == -1, group_train == 1)))\n\n\n prob_Ye1_Ae1 = float(np.sum(np.logical_and(y_true_train == 1, group_train == 1)))/y_true_train.size\n prob_Ye1_Ae0 = float(np.sum(np.logical_and(y_true_train == 1, group_train == 0)))/y_true_train.size\n prob_Yem1_Ae1 = float(np.sum(np.logical_and(y_true_train == -1, group_train == 1)))/y_true_train.size\n prob_Yem1_Ae0 = float(np.sum(np.logical_and(y_true_train == -1, group_train == 0)))/y_true_train.size\n\n p11 = cvx.Variable()\n p10 = cvx.Variable()\n pm11 = cvx.Variable()\n pm10 = cvx.Variable()\n\n constraints = [p10 * alpha1 + pm10 * (1 - alpha1) == p11 * beta1 + pm11 * (1 - beta1),\n p10 * alpha2 + pm10 * (1 - alpha2) == p11 * beta2 + pm11 * (1 - beta2),\n p11 >= 0, p10 >= 0, pm11 >= 0, pm10 >= 0, p11 <= 1, p10 <= 1, pm11 <= 1, pm10 <= 1]\n\n\n obj = cvx.Minimize((-prob_Ye1_Ae0 * alpha1 + prob_Yem1_Ae0 * alpha2) * p10 + (-prob_Ye1_Ae1 * beta1 + prob_Yem1_Ae1 * beta2) * p11 + (\n (1 - alpha2) * prob_Yem1_Ae0 + (-1 + alpha1) * prob_Ye1_Ae0) * pm10 + (\n (1 - beta2) * prob_Yem1_Ae1 + (-1 + beta1) * prob_Ye1_Ae1) * pm11 + prob_Ye1_Ae0 + prob_Ye1_Ae1)\n\n prob = cvx.Problem(obj, constraints)\n prob.solve()\n #print(\"status:\", prob.status)\n\n p10V=np.amin([1,np.amax([0,p10.value])])\n p11V=np.amin([1,np.amax([0,p11.value])])\n pm10V=np.amin([1,np.amax([0,pm10.value])])\n pm11V=np.amin([1,np.amax([0,pm11.value])])\n\n test_ind_y1_A0=np.logical_and(y_pred_test == 1, group_test == 0)\n to_flip=np.random.choice(np.array([0,1]),size=np.sum(test_ind_y1_A0),p=np.array([p10V,1-p10V]))\n eq_odd_pred_test[np.where(test_ind_y1_A0)[0][to_flip==1]]=-1\n\n test_ind_y1_A1 = np.logical_and(y_pred_test == 1, group_test == 1)\n to_flip = np.random.choice(np.array([0, 1]), size=np.sum(test_ind_y1_A1), p=np.array([p11V, 1 - p11V]))\n eq_odd_pred_test[np.where(test_ind_y1_A1)[0][to_flip == 1]] = -1\n\n test_ind_ym1_A1 = np.logical_and(y_pred_test == -1, group_test == 1)\n to_flip = np.random.choice(np.array([0, 1]), size=np.sum(test_ind_ym1_A1), p=np.array([1-pm11V, pm11V]))\n eq_odd_pred_test[np.where(test_ind_ym1_A1)[0][to_flip == 1]] = 1\n\n test_ind_ym1_A0 = np.logical_and(y_pred_test == -1, group_test == 0)\n to_flip = np.random.choice(np.array([0, 1]), size=np.sum(test_ind_ym1_A0), p=np.array([1 - pm10V, pm10V]))\n eq_odd_pred_test[np.where(test_ind_ym1_A0)[0][to_flip == 1]] = 1\n\n return eq_odd_pred_test", "def evaluate(idxs):\n for key, result_analysis in analysis_dct.items():\n if \"avg-\" in key:\n new_idxs = list(set([i[0:-2] for i in idxs]))\n else:\n new_idxs = idxs\n # \n df_X = result_analysis.trinary.df_X\n ser_y = result_analysis.trinary.ser_y\n states = list(set(ser_y.values))\n #\n ensemble = classifier_ensemble.ClassifierEnsemble(\n filter_high_rank=100, size=100)\n ensemble.fit(df_X, ser_y)\n df_X_test = df_X.loc[new_idxs, :]\n ser_y_test = ser_y.loc[new_idxs]\n df_predict = ensemble.predict(df_X_test)\n df_predict[\"true state\"] = ser_y_test\n # Construct the predictions\n predictions = []\n for clf in ensemble.clfs:\n df_X_test_sub = df_X[ensemble.columns]\n dct = {i: [] for i in states}\n for idx in new_idxs:\n {i: dct[i].append(clf.coef_[i].dot(df_X_test_sub.loc[idx, :]))\n for i in states}\n df_result = pd.DataFrame(dct, index=new_idxs)\n predictions.append(df_result)\n result_analysis.df_predict = df_predict\n result_analysis.predictions = predictions\n result_analysis.ensemble = ensemble", "def performance(weights, y, xT):\n from proj1_helpers import predict_labels\n compare_pred = predict_labels(weights, xT)\n compare_pred -= y.reshape((len(y), 1))\n\n non_zero = 0\n for i in range(len(compare_pred)):\n if compare_pred[i] != 0:\n non_zero += 1\n\n return 1 - non_zero / compare_pred.size", "def sum_of_difference(predicted,actual):\n sum=0\n for i in range(len(predicted)):\n sum+=math.pow((predicted[i]-actual[i]),2)\n return sum;", "def find_outliers(y_true_f, y_pred_f):\n\n y_true = np.load(y_true_f)\n y_pred_s = np.load(y_pred_f)\n samples, x, y, z = y_true.shape\n print (\"Number of Samples : %d, image size : %d x %d \"%(samples, x, y))\n y_pred = np.round(y_pred_s)\n y_true_sum = y_true.sum(axis=(1, 2), keepdims=True).reshape(samples)\n y_pred_sum = y_pred.sum(axis=(1, 2), keepdims=True).reshape(samples) \n lb0 = (np.where(y_true_sum == 0))\n pd0 = (np.where(y_pred_sum == 0))\n lb0 = list(lb0[0])\n pd0 = list(pd0[0])\n print('-'*30)\n print (\"Outliers\")\n print('-'*30)\n print (\"Sample Index of labels with zero contours\", lb0)\n print (\"Sample Index of predictions with zero contours\", pd0)\n ypr = []\n for idx in pd0:\n ypr.append(y_pred_s[idx,:,:,:].max())\n print (\"max-sigmoid values with zero contours\", ypr)\n\n img_d = []\n img_j = []\n for i in range(samples) :\n smooth = 0.001\n y_truex = y_true[i].flatten()\n y_predx = y_pred[i].flatten()\n intersection = np.sum(y_truex * y_predx)\n dice_coefx = (2. * intersection + smooth) / (np.sum(y_truex) + np.sum(y_predx) + smooth)\n jaccard_coefx = float(intersection + smooth) / float(np.sum(y_truex) + np.sum(y_predx)-intersection + smooth)\n dice_coefx = np.around(dice_coefx, decimals=3)\n jaccard_coefx = np.around(jaccard_coefx, decimals=3)\n img_d.append(dice_coefx)\n img_j.append(jaccard_coefx)\n \n\n \n plt.hist(img_d, bins=[i/20 for i in range(20)])\n plt.grid()\n plt.title('Distribution dice coef')\n plt.xlabel('dice_coef')\n plt.ylabel('Sample count')\n plt.show()\n \n plt.hist(img_j, bins=[i/20 for i in range(20)])\n plt.grid()\n plt.title('Distribution of jaccard coef (IoU)')\n plt.xlabel('jaccard_coef (IoU)')\n plt.ylabel('Sample count')\n plt.show()\n \n \n px0 = [i for i,v in enumerate(img_d) if v ==1.0]\n px1 = [i for i,v in enumerate(img_d) if v > .98]\n px25 = [i for i,v in enumerate(img_d) if v <= .7 and v >.5]\n px50 = [i for i,v in enumerate(img_d) if v < .1]\n px100 = [i for i,v in enumerate(img_d) if v == 0]\n print('-'*30)\n print (\"Statistics on missed predictions of contour pixels (white pixels)\")\n print('-'*30)\n print (\"max, min\", min(img_d), max(img_d))\n print (\"Sample Index where dice coef = 100%\",len(px0), px0)\n print (\"Sample Index where dice coef >98%\",len(px1), px1)\n print (\"Sample Index where dice coef 50%-70%\",len(px25), px25)\n print (\"Sample Index where dice coef <10%\", len(px50),px50)\n print (\"Sample Index where dice coef = 0%\", len(px100),px100)\n print('-'*30)\n print('-'*30)", "def perform_ko(self, model):\n\n cfg = self.cfg\n\n \"load data\"\n if cfg.run_tal and cfg.hnisz_region == \"tal1\":\n self.cfg.get_tal1_only = True\n data_loader = self.prepare_tal1_lmo2()\n elif cfg.run_tal and cfg.hnisz_region == \"lmo2\":\n self.cfg.get_lmo2_only = True\n data_loader = self.prepare_tal1_lmo2()\n else:\n data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False)\n\n \"get zero embed\"\n cfg.full_test = False\n cfg.compute_pca = False\n cfg.get_zero_pred = True\n zero_embed = test_model(model, cfg, self.chr)\n\n \"get knockout indices depending on experiment\"\n if cfg.run_tal:\n if cfg.hnisz_region == \"tal1\":\n cfg.ko_experiment = \"ctcf\"\n indices = cfg.tal1ko_indices\n elif cfg.hnisz_region == \"lmo2\":\n cfg.ko_experiment = \"ctcf\"\n indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11)\n else:\n if cfg.ko_experiment == \"ctcf\":\n if cfg.ctcf_indices == \"all\":\n indices = ko_ob.get_ctcf_indices()\n indices = sample(list(indices), 10)\n else:\n indices = ko_ob.cfg.ctcf_indices_22\n elif cfg.ko_experiment == \"foxg1\":\n indices = cfg.foxg1_indices\n elif cfg.ko_experiment == \"tadbs\":\n indices = ko_ob.get_tadbs()\n\n \"plotting and metrics\"\n n_indices = len(indices)\n diff_list = np.zeros((n_indices, 11))\n diff_mat = np.zeros((n_indices, 200, 200))\n \"run for all indices\"\n for i, indice in enumerate(indices):\n \"get representations\"\n representations, start, stop, pred_data = self.get_trained_representations(method=\"hiclstm\")\n\n \"alter representations\"\n representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed,\n mode=cfg.ko_mode)\n\n if self.cfg.load_ko:\n ko_pred_df = pd.read_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n else:\n \"run through model using altered representations, save ko predictions\"\n _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode=\"ko\")\n if self.cfg.save_kopred:\n ko_pred_df.to_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n\n \"compute difference between WT and KO predictions\"\n if self.cfg.compute_avg_diff:\n ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice)\n diff_list[i] = ko_diffs\n\n \"get merged heatmap\"\n pred_data = pd.merge(pred_data, ko_pred_df, on=[\"i\", \"j\"])\n pred_data = pred_data.rename(columns={\"ko_pred\": \"v\"})\n hic_mat, st = get_heatmaps(pred_data, no_pred=False)\n # simple_plot(hic_mat, mode=\"reds\")\n\n \"get diff mat\"\n hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment)\n n_win = len(hic_win)\n diff_mat[i, :n_win, :n_win] = hic_win\n\n diff_mat = diff_mat.mean(axis=0)\n ko = np.triu(diff_mat)\n pred = np.tril(diff_mat).T\n diff_mat = ko - pred\n simple_plot(diff_mat, mode=\"diff\")\n np.save(cfg.output_directory + \"tad_diff_zero_ctctn.npy\", diff_mat)\n mean_diff = np.mean(diff_list, axis=1)\n return mean_diff, ko_pred_df, pred_data", "def KME_score(c1, c2):\n c1.index = c1.index.astype(int)\n c2.index = c2.index.astype(int)\n n = int(max(c1.index.max(), c2.index.max())) + 1\n\n # build the full dataset\n data = pd.DataFrame(index=np.arange(n), columns=['c1_down', 'c1_up', 'c2_down', 'c2_up'], dtype='float')\n\n # fill the dataset\n for df, df_lab in zip([c1, c2], [['c1_down', 'c1_up'], ['c2_down', 'c2_up']]):\n data.loc[df.index, df_lab] = np.array(df)\n\n # complete the dataset by interpolation\n data = data.interpolate(method='linear', limit_direction='forward')\n\n assert np.all(~data.isna())\n\n dif = np.linalg.norm(data['c1_up'] - data['c2_up'], ord=1) + np.linalg.norm(data['c1_down'] - data['c2_down'],\n ord=1)\n # we normalize to ease comparison\n dif = dif / n\n return dif", "def rolling_backtest_hmm(self, X, df, n_preds=15, window_len=1500, progress_bar=True, verbose=False):\n\n # Create 3- and 4-D array to store predictions and covariances\n self.preds = np.empty(shape=(len(df) - window_len, n_preds, self.n_assets)) # 3-D array\n self.cov = np.empty(shape=(len(df) - window_len, n_preds, self.n_assets, self.n_assets)) # 4-D array\n\n for t in tqdm.trange(window_len, len(df)):\n # Slice data into rolling sequences\n df_rolling = df.iloc[t - window_len:t]\n X_rolling = X.iloc[t - window_len:t]\n\n # fit rolling data with model, return predicted means and covariances, posteriors and state sequence\n pred_mu, pred_cov, posteriors, state_sequence = self.fit_model_get_uncond_dist(X_rolling, df_rolling, n_preds=n_preds, verbose=verbose)\n\n if np.any(np.isnan(pred_mu)) == True:\n print('t: ', t)\n print(pred_mu)\n print(f'NaNs in cov: {np.any(np.isnan(pred_cov))} -- NaNs in posteriors: {np.any(np.isnan(posteriors))} ')\n\n # TODO this is a quick fix and must be solved better in cases where no solutins are found!\n self.preds[t - window_len] = self.preds[t - window_len - 1]\n self.cov[t - window_len] = self.cov[t - window_len - 1]\n else:\n self.preds[t - window_len] = pred_mu\n self.cov[t - window_len] = pred_cov\n\n return self.preds, self.cov", "def double_event_indices(prediction, d_idx, c_idx):\n c_doubles = np.where(prediction[d_idx] == 1)[0]\n w_doubles = np.where(prediction[d_idx] == 0)[0]\n c_close_doubles = np.where(prediction[c_idx] == 1)[0]\n w_close_doubles = np.where(prediction[c_idx] == 0)[0]\n\n return c_doubles, w_doubles, c_close_doubles, w_close_doubles", "def takeDifferences(self):\n\n rawData = self.runData[\n self.runData.keys()[0]] # takes the dictionary down one level to all the obserrvations in this specific run\n \"\"\"\n we want to check how many values you have to figure our the configuraitons (either ABBA or ABA)\n \"\"\"\n numObs = len(rawData[rawData.keys()[0]].keys()) # number of observations per weigh\n differ = [] # the difference matrix also known as [A-B]\n sensi = []\n drift = []\n # print numObs\n if numObs == 4: # if ABBA\n # print(\"you got 4 keys\")#code for testing\n\n for y in sorted(rawData.keys()): # runs the loop through all the comparisons\n Observations = rawData[y]\n differ.append(float((Observations['A1'][0] + Observations['A2'][0]) - (\n Observations['B1'][0] + Observations['B2'][0])) / 2)\n sensi.append(float(\n Observations['A2'][0] - Observations['A1'][0] + Observations['B2'][0] - Observations['B1'][0]) / 2)\n drift.append(float((Observations['B1'][0] - Observations['A1'][0]) + (\n Observations['A2'][0] - Observations['B2'][0])) / 2)\n\n self.difference = np.atleast_2d(differ).T # takes the list, converts to array, makes it 2d and transposes it\n self.drift = np.atleast_2d(drift).T\n self.sensitivity = np.atleast_2d(sensi).T\n if self.debug:\n print 'difference matrix:'\n print self.difference\n # print 'sensitivity:',self.sensitivity\n # print 'drift:',self.drift", "def model_diff(model_a, model_b, n_words=100):\n n_topic_a = model_a.num_topics\n n_topic_b = model_b.num_topics\n D = np.zeros((n_topic_a, n_topic_b))\n for t_a in range(n_topic_a):\n for t_b in range(n_topic_b):\n set_a = set(\n [x[0] for x in model_a.get_topic_terms(t_a, topn=n_words)]\n )\n set_b = set(\n [x[0] for x in model_b.get_topic_terms(t_b, topn=n_words)]\n )\n D[t_a, t_b] = jaccard_distance(set_a, set_b)\n return D" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_ko(model) > Array Loads data for chromosome. Loads representations. Alters representations. Gets padding representation. Runs through decoder. Computes mean diff between WT and KO. Saves predictions.
def perform_ko(self, model): cfg = self.cfg "load data" if cfg.run_tal and cfg.hnisz_region == "tal1": self.cfg.get_tal1_only = True data_loader = self.prepare_tal1_lmo2() elif cfg.run_tal and cfg.hnisz_region == "lmo2": self.cfg.get_lmo2_only = True data_loader = self.prepare_tal1_lmo2() else: data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False) "get zero embed" cfg.full_test = False cfg.compute_pca = False cfg.get_zero_pred = True zero_embed = test_model(model, cfg, self.chr) "get knockout indices depending on experiment" if cfg.run_tal: if cfg.hnisz_region == "tal1": cfg.ko_experiment = "ctcf" indices = cfg.tal1ko_indices elif cfg.hnisz_region == "lmo2": cfg.ko_experiment = "ctcf" indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11) else: if cfg.ko_experiment == "ctcf": if cfg.ctcf_indices == "all": indices = ko_ob.get_ctcf_indices() indices = sample(list(indices), 10) else: indices = ko_ob.cfg.ctcf_indices_22 elif cfg.ko_experiment == "foxg1": indices = cfg.foxg1_indices elif cfg.ko_experiment == "tadbs": indices = ko_ob.get_tadbs() "plotting and metrics" n_indices = len(indices) diff_list = np.zeros((n_indices, 11)) diff_mat = np.zeros((n_indices, 200, 200)) "run for all indices" for i, indice in enumerate(indices): "get representations" representations, start, stop, pred_data = self.get_trained_representations(method="hiclstm") "alter representations" representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed, mode=cfg.ko_mode) if self.cfg.load_ko: ko_pred_df = pd.read_csv(cfg.output_directory + "hiclstm_%s_afko_chr%s.csv" % (cfg.cell, str(chr)), sep="\t") else: "run through model using altered representations, save ko predictions" _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode="ko") if self.cfg.save_kopred: ko_pred_df.to_csv(cfg.output_directory + "hiclstm_%s_afko_chr%s.csv" % (cfg.cell, str(chr)), sep="\t") "compute difference between WT and KO predictions" if self.cfg.compute_avg_diff: ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice) diff_list[i] = ko_diffs "get merged heatmap" pred_data = pd.merge(pred_data, ko_pred_df, on=["i", "j"]) pred_data = pred_data.rename(columns={"ko_pred": "v"}) hic_mat, st = get_heatmaps(pred_data, no_pred=False) # simple_plot(hic_mat, mode="reds") "get diff mat" hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment) n_win = len(hic_win) diff_mat[i, :n_win, :n_win] = hic_win diff_mat = diff_mat.mean(axis=0) ko = np.triu(diff_mat) pred = np.tril(diff_mat).T diff_mat = ko - pred simple_plot(diff_mat, mode="diff") np.save(cfg.output_directory + "tad_diff_zero_ctctn.npy", diff_mat) mean_diff = np.mean(diff_list, axis=1) return mean_diff, ko_pred_df, pred_data
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_df", "def perform_tal1_ko(self, model):\n\n \"save representations\"\n self.chr = 1\n self.cfg.get_tal1_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"tal1\"\n _, ko_pred_df, _ = self.perform_ko(model)\n return ko_pred_df", "def evaluate(predict_path, data_path, div, y_vocab_path, log_path):\n #h = h5py.File(data_path, 'r')[div]\n y_vocab = cPickle.loads(open(y_vocab_path, 'rb').read())\n inv_y_vocab = {v: k for k, v in six.iteritems(y_vocab)}\n\n b_vocab = cPickle.loads(open(\"./data/b_vocab.cPickle\", 'rb').read())\n m_vocab = cPickle.loads(open(\"./data/m_vocab.cPickle\", 'rb').read())\n s_vocab = cPickle.loads(open(\"./data/s_vocab.cPickle\", 'rb').read())\n d_vocab = cPickle.loads(open(\"./data/d_vocab.cPickle\", 'rb').read())\n \n inv_b_vocab = {i: s for s, i in six.iteritems(b_vocab)}\n inv_m_vocab = {i: s for s, i in six.iteritems(m_vocab)}\n inv_s_vocab = {i: s for s, i in six.iteritems(s_vocab)}\n inv_d_vocab = {i: s for s, i in six.iteritems(d_vocab)}\n\n fin = open(predict_path, 'r')\n hit, n = defaultdict(lambda: 0), defaultdict(lambda: 0)\n print('loading ground-truth...')\n #CATE = np.argmax(h['cate'], axis=1)\n \n size = get_size(data_path, div)\n #CATE = toss_answer(data_path, div)\n \n bomb = toss_chunk_answer(data_path, div)\n for bx in bomb:\n for p, y in tqdm(zip(fin, bx), desc='bomb', total=len(list(bx))):\n # format y = (b, m, s, d) this is answer\n pid, b, m, s, d = p.split('\\t')\n b, m, s, d = list(map(int, [b, m, s, d])) # 나의 prediction\n #gt = list(map(int, inv_y_vocab[y].split('>'))) # 정답\n \n gt_b = inv_b_vocab[y[0]]\n gt_m = inv_m_vocab[y[1]]\n gt_s = inv_s_vocab[y[2]]\n gt_d = inv_d_vocab[y[3]]\n\n gt = [gt_b, gt_m, gt_s, gt_d]\n\n for depth, _p, _g in zip(['b', 'm', 's', 'd'],\n [b, m, s, d],\n gt):\n if _g == -1:\n continue\n n[depth] = n.get(depth, 0) + 1 # 총 개수 파악\n if _p == _g:\n hit[depth] = hit.get(depth, 0) + 1 # 맞은 개수 기록\n \n with open(log_path, 'w') as f:\n for d in ['b', 'm', 's', 'd']:\n if n[d] > 0:\n print('%s-Accuracy: %.3f(%s/%s)' % (d, hit[d] / float(n[d]), hit[d], n[d]))\n f.write('%s-Accuracy: %.3f(%s/%s) \\n' % (d, hit[d] / float(n[d]), hit[d], n[d]))\n score = sum([hit[d] / float(n[d]) * w\n for d, w in zip(['b', 'm', 's', 'd'],\n [1.0, 1.2, 1.3, 1.4])]) / 4.0\n print('score: %.3f' % score)\n f.write('score: %.3f\\n' % score)", "def predict(self, chips, windows):\n pass", "def buildModel(experiment):\n\n # Determine number of injections.\n N = experiment.number_of_injections\n\n # Physical constants\n Na = 6.02214179e23 # Avogadro's number (number/mol)\n kB = Na * 1.3806504e-23 / 4184.0 * Units.kcal / Units.mol / Units.K # Boltzmann constant (kcal/mol/K)\n C0 = 1.0 * Units.M # standard concentration (M)\n\n temperature = 298.15 * Units.K# temperature (K)\n beta = 1.0 / (kB * temperature) # inverse temperature 1/(kcal/mol)\n\n # Store sample cell volume and injection volume.\n Ls_stated = experiment.syringe_concentration # syringe concentration (M)\n P0_stated = experiment.cell_concentration # cell concentration (M)\n V0 = experiment.cell_volume # cell volume (L)\n\n V0 = V0 - 0.044*Units.ml # Tellinghuisen volume correction for VP-ITC (L) # not sure for iTC-200\n \n # Uncertainties in concentrations.\n dP0 = 0.10 * P0_stated # uncertainty in protein stated concentration (M) - 10% error \n dLs = 0.10 * Ls_stated # uncertainty in ligand stated concentration (M) - 10% error \n\n # For CAII:CBS\n dP0 = 0.10 * P0_stated # uncertainty in protein stated concentration (M) \n dLs = 0.01 * Ls_stated # uncertainty in ligand stated concentration (M) \n \n # Extract evolved injection heats.\n injection_heats = numpy.zeros([N], numpy.float64)\n for (n, injection) in enumerate(experiment.injections):\n injection_heats[n] = injection['evolved_heat']\n \n print \"injection heats\"\n print injection_heats / Units.ucal\n\n # Determine guesses for initial values\n nlast = 4 # number of injections to use\n duration_n = numpy.zeros([N], numpy.float64) # duration_n[n] is the duration of injection n\n for n in range(N):\n duration_n[n] = experiment.injections[n]['duration'] \n sigma2 = injection_heats[N-nlast:N].var() / duration_n[N-nlast:N].sum()\n log_sigma_guess = numpy.log(numpy.sqrt(sigma2 / Units.cal**2 * Units.second)) # cal/s # TODO: Use std of individual filtered measurements instead\n\n try:\n DeltaG_guess = -5.0 * Units.kcal/Units.mol\n print \"Ls_stated = %f uM\" % (Ls_stated / Units.uM)\n print \"final injection volume = %f uL\" % (experiment.injections[N-1]['volume'] / Units.ul)\n DeltaH_0_guess = - injection_heats[N-1] / (Ls_stated * experiment.injections[N-1]['volume'])\n DeltaH_guess = - (injection_heats[0] / (Ls_stated * experiment.injections[0]['volume']) - DeltaH_0_guess)\n except:\n DeltaG_guess = 0.0\n DeltaH_0_guess = 0.0\n DeltaH_guess = 0.0\n\n # DEBUG\n print \"\"\n print \"\"\n print \"INITIAL GUESS\"\n print \"DeltaH_0_guess = %.3f ucal/uL\" % (DeltaH_0_guess / (Units.ucal/Units.ul))\n first_index = 0\n last_index = experiment.injections[0]['first_index']\n filter_time = experiment.injections[0]['filter_period']\n print \"Computing sigma2\"\n sigma2 = (experiment.differential_power[first_index:last_index] - experiment.baseline_power[first_index:last_index]).var() / filter_time\n print sigma2\n log_sigma_guess = numpy.log(numpy.sqrt(sigma2 / Units.cal**2 * Units.second)) # cal/s # TODO: Use std of individual filtered measurements instead\n print log_sigma_guess\n print \"\"\n print \"\"\n print \"\"\n print \"\"\n\n # Determine min and max range for log_sigma\n log_sigma_min = log_sigma_guess - 10.0\n log_sigma_max = log_sigma_guess + 10.0\n\n # Determine range for priors for thermodynamic parameters.\n DeltaG_min = -40. * Units.kcal/Units.mol # \n DeltaG_max = +40. * Units.kcal/Units.mol # \n DeltaH_min = -100. * Units.kcal/Units.mol # \n DeltaH_max = +100. * Units.kcal/Units.mol # \n heat_interval = injection_heats.max() - injection_heats.min()\n DeltaH_0_min = injection_heats.min() - heat_interval # \n DeltaH_0_max = injection_heats.max() + heat_interval # \n\n # Create model.\n model = dict()\n\n # Define constants.\n model['beta'] = beta\n\n # Define priors.\n @pymc.deterministic\n def zero():\n return 0.0\n\n if (P0_stated > 0.0):\n model['P0'] = pymc.Lognormal('P0', mu=numpy.log(P0_stated), tau=1.0/numpy.log(1.0+(dP0/P0_stated)**2), value=P0_stated) # true cell concentration (M)\n else:\n model['P0'] = zero\n\n if (Ls_stated > 0.0):\n model['Ls'] = pymc.Lognormal('Ls', mu=numpy.log(Ls_stated), tau=1.0/numpy.log(1.0+(dLs/Ls_stated)**2), value=Ls_stated) # true syringe concentration (M)\n else:\n model['Ls'] = zero\n\n model['log_sigma'] = pymc.Uniform('log_sigma', lower=log_sigma_min, upper=log_sigma_max, value=log_sigma_guess) # natural logarithm of std dev of integrated injection heat divided by 1 cal\n model['DeltaG'] = pymc.Uniform('DeltaG', lower=DeltaG_min, upper=DeltaG_max, value=DeltaG_guess) # DeltaG (kcal/mol)\n model['DeltaH'] = pymc.Uniform('DeltaH', lower=DeltaH_min, upper=DeltaH_max, value=DeltaH_guess) # DeltaH (kcal/mol)\n model['DeltaH_0'] = pymc.Uniform('DeltaH_0', lower=DeltaH_0_min, upper=DeltaH_0_max, value=DeltaH_0_guess) # heat of mixing and mechanical injection (cal/volume)\n\n @pymc.deterministic\n def zero():\n return 0.0\n\n @pymc.deterministic\n def expected_injection_heats(DeltaG=model['DeltaG'], DeltaH=model['DeltaH'], DeltaH_0=model['DeltaH_0'], P0=model['P0'], Ls=model['Ls']):\n \"\"\"\n Expected heats of injection for two-component binding model.\n\n ARGUMENTS\n\n DeltaG - free energy of binding (kcal/mol)\n DeltaH - enthalpy of binding (kcal/mol)\n DeltaH_0 - heat of injection (cal/mol)\n\n \"\"\"\n\n debug = False\n\n Kd = numpy.exp(beta * DeltaG) * C0 # dissociation constant (M)\n\n # Compute dilution factor for instantaneous injection model (perfusion).\n d_n = numpy.zeros([N], numpy.float64) # d_n[n] is the dilution factor for injection n\n dcum_n = numpy.ones([N], numpy.float64) # dcum_n[n] is the cumulative dilution factor for injection n\n if debug: print \"%5s %24s %24s\" % ('n', 'd_n', 'dcum_n')\n for n in range(N):\n d_n[n] = 1.0 - (experiment.injections[n]['volume'] / V0) # dimensionless dilution factor for injection n\n dcum_n[n:] *= d_n[n]\n if debug: print \"%5d %24f %24f\" % (n, d_n[n], dcum_n[n])\n if debug: print \"\"\n \n # Compute complex concentrations.\n Pn = numpy.zeros([N], numpy.float64) # Pn[n] is the protein concentration in sample cell after n injections (M)\n Ln = numpy.zeros([N], numpy.float64) # Ln[n] is the ligand concentration in sample cell after n injections (M)\n PLn = numpy.zeros([N], numpy.float64) # PLn[n] is the complex concentration in sample cell after n injections (M)\n if debug: print \"%5s %24s %24s %24s %24s %24s\" % ('n', 'P (umol)', 'L (umol)', 'Pn (uM)', 'Ln (uM)', 'PLn (uM)')\n for n in range(N):\n # Instantaneous injection model (perfusion)\n P = V0 * P0 * dcum_n[n] # total quantity of protein in sample cell after n injections (mol)\n L = V0 * Ls * (1. - dcum_n[n]) # total quantity of ligand in sample cell after n injections (mol)\n PLn[n] = 0.5/V0 * ((P + L + Kd*V0) - numpy.sqrt((P + L + Kd*V0)**2 - 4*P*L)); # complex concentration (M)\n Pn[n] = P/V0 - PLn[n]; # free protein concentration in sample cell after n injections (M)\n Ln[n] = L/V0 - PLn[n]; # free ligand concentration in sample cell after n injections (M)\n if debug: print \"%5d %24f %24f %24f %24f %24f\" % (n, P / Units.umol, L / Units.umol, Pn[n] / Units.uM, Ln[n] / Units.uM, PLn[n] / Units.uM)\n \n # Compute expected injection heats.\n q_n = numpy.zeros([N], numpy.float64) # q_n_model[n] is the expected heat from injection n\n q_n[0] = (-DeltaH) * V0 * (PLn[0] - d_n[0]*0.0) + (-DeltaH_0) * experiment.injections[0]['volume'] # first injection\n for n in range(1,N):\n q_n[n] = (-DeltaH) * V0 * (PLn[n] - d_n[n]*PLn[n-1]) + (-DeltaH_0) * experiment.injections[n]['volume'] # subsequent injections\n\n # Debug output\n if debug:\n print \"DeltaG = %6.1f kcal/mol ; DeltaH = %6.1f kcal/mol ; DeltaH_0 = %6.1f ucal/injection\" % (DeltaG / (Units.kcal/Units.mol), DeltaH / (Units.kcal/Units.mol), DeltaH_0 / Units.ucal)\n for n in range(N):\n print \"%6.1f\" % (PLn[n] / Units.uM),\n print \"\"\n for n in range(N):\n print \"%6.1f\" % (q_n[n] / Units.ucal),\n print \"\"\n for n in range(N):\n print \"%6.1f\" % (injection_heats[n] / Units.ucal),\n print \"\"\n print \"\"\n\n return q_n\n\n @pymc.deterministic\n def tau(log_sigma=model['log_sigma']):\n \"\"\"\n Injection heat measurement precision.\n \n \"\"\"\n return numpy.exp(-2.0*log_sigma)/(Units.cal**2 / Units.second) * numpy.sqrt(duration_n)\n\n # Define observed data.\n print N\n print expected_injection_heats\n print tau\n print injection_heats\n #model['q_n'] = pymc.Normal('q_n', size=[N], mu=expected_injection_heats, tau=tau, observed=True, value=injection_heats)\n model['q_n'] = pymc.Normal('q_n', mu=expected_injection_heats, tau=tau, observed=True, value=injection_heats)\n\n return model", "def models(self,query,query_num):\n okapi_tf_sigma = {}\n tf_idf_sigma = {}\n bm25_sigma = {}\n lm_laplace = {}\n lm_jm = {}\n # query_len = sum(query.values()) # get length with weight\n query_len = len(query)\n lam = 0.99\n for word in query.keys():\n print word\n df, tfs, sumlen, sumtf= self.getTF(word)\n wqtf = query[word]\n #print tfs\n for doc in tfs.keys():\n doc_len = self.doc_len_dict[doc]\n tf = tfs[doc]\n laplace_base = math.log(1.0/(doc_len + self.vsize))\n jm_base = math.log((1-lam) * (sumtf-tf) / (sumlen-doc_len))\n okapi_tf = self.okapiTF(tf, doc_len)\n # okapi_tf = self.okapiTF(tf, doc_len, wqtf) # calculate with word weight\n tf_idf = self.tfiDF(okapi_tf, df)\n bm25 = self.okapiBM25(tf, doc_len, df, wqtf)\n log_p_laplace = self.lm_laplace(tf, doc_len)\n log_p_jm = self.lm_jm(tf, doc_len, sumtf, sumlen, lam)\n # if doc in lm_jm:\n if doc in okapi_tf_sigma:\n okapi_tf_sigma[doc] += okapi_tf \n tf_idf_sigma[doc] += tf_idf\n bm25_sigma[doc] += bm25\n lm_laplace[doc] += log_p_laplace - laplace_base\n # calculate the lm_laplace with word weight\n # lm_laplace[doc] += (log_p_laplace - laplace_base) * wqtf\n lm_jm[doc] += log_p_jm - jm_base\n # lm_jm[doc] += (log_p_jm - jm_base) * wqtf\n else :\n okapi_tf_sigma[doc] = okapi_tf\n tf_idf_sigma[doc] = tf_idf\n bm25_sigma[doc] = bm25\n lm_laplace[doc] = (query_len - 1) * laplace_base + log_p_laplace\n # calculate laplace with word weight\n # lm_laplace[doc] = (query_len - wqtf) * laplace_base + log_p_laplace * wqtf\n lm_jm[doc] = (query_len - 1) * jm_base + log_p_jm\n # calculate jm with word weight\n # lm_jm[doc] = (query_len - wqtf) * jm_base + log_p_jm * wqtf\n sorted_okapi_tf_sigma = sorted(okapi_tf_sigma.items(), key=operator.itemgetter(1), reverse = True)\n sorted_tf_idf_sigma = sorted(tf_idf_sigma.items(), key=operator.itemgetter(1), reverse = True)\n sorted_bm25_sigma = sorted(bm25_sigma.items(), key=operator.itemgetter(1), reverse = True)\n sorted_lm_laplace = sorted(lm_laplace.items(), key=operator.itemgetter(1), reverse = True)\n sorted_lm_jm = sorted(lm_jm.items(), key=operator.itemgetter(1), reverse = True)\n\n self.writeFile(\"okapitf\", query_num, sorted_okapi_tf_sigma)\n self.writeFile(\"tfidf\", query_num, sorted_tf_idf_sigma)\n self.writeFile(\"bm25\", query_num, sorted_bm25_sigma)\n self.writeFile(\"lmlaplace\", query_num, sorted_lm_laplace)\n self.writeFile(\"lmjm\", query_num,sorted_lm_jm)\n # print sorted_okapi_tf_sigma\n self.bordaCount(query_num, sorted_okapi_tf_sigma, sorted_tf_idf_sigma,sorted_bm25_sigma,\n sorted_lm_laplace, sorted_lm_jm)\n\n return sorted_okapi_tf_sigma, sorted_tf_idf_sigma, sorted_bm25_sigma, sorted_lm_laplace, sorted_lm_jm", "def test_predict(self, get_model):\n data, data_val = make_data()\n model = get_model()\n model.compile(optimizer='sgd', loss='categorical_crossentropy')\n\n expe = Experiment(model)\n expe.fit([data], [data_val])\n KTB.predict(expe.model_dict, [data['X']])\n print(self)", "def run_model(Y,X,EM_DICT=None,verbose=0,modalpha=0.0005,removecells=1):\n\n enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=modalpha,max_iter=10000)\n enet.fit(X,Y)\n if verbose==1:\n print(enet.score(X,Y))\n\n Be=pd.DataFrame(enet.coef_)\n Be.columns=X.columns\n Be.index=Y.columns\n\n #EM iterateit\n Yhat=pd.DataFrame(enet.predict(X))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n SSE_all=np.square(Y.subtract(Yhat))\n\n X_adjust=X.copy()\n X_adjust['unperturbed']=[0]*len(X)\n\n df_SSE = []\n df_logit = []\n df_pf = []\n\n if EM_DICT is not None:\n\n for curcov in EM_DICT.keys():\n\n curcells=EM_DICT[curcov]\n\n X_notcur=X.copy()\n X_notcur[curcov]=[0]*len(X_notcur)\n\n X_sub=X_notcur.loc[curcells]\n\n Y_sub=Y.loc[curcells]\n\n GENE_var=2.0*Y_sub.var(axis=0)\n vargenes=GENE_var[GENE_var>0].index\n\n\n Yhat_notcur=pd.DataFrame(enet.predict(X_sub))\n Yhat_notcur.index=Y_sub.index\n Yhat_notcur.columns=Y_sub.columns\n\n SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))\n SSE=SSE_all.loc[curcells].subtract(SSE_notcur)\n SSE_sum=SSE.sum(axis=1)\n\n SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)\n logitify=np.divide(1.0,1.0+np.exp(SSE_sum))#SSE_transform))#sum))\n\n df_SSE.append(SSE_sum)\n df_logit.append(logitify)\n pf=np.mean(logitify>0.99)\n\n if verbose==1:\n \n print(curcov,pf)\n df_pf.append([curcov,pf])\n weak_perturb=1.0*(logitify<0.1)\n X_adjust[curcov].loc[curcells]=logitify\n X_adjust['unperturbed'].loc[curcells]=weak_perturb\n\n print('done with EM')\n\n #refit model\n\n enet=sklearn.linear_model.ElasticNet(precompute=True,l1_ratio=0.5,alpha=0.0005,max_iter=10000)\n\n if removecells==1:\n goodcells=X_adjust['unperturbed']!=1\n print(np.mean(goodcells))\n Y=Y[goodcells]\n X_adjust=X[goodcells]\n \n enet.fit(X_adjust,Y)\n Yhat=pd.DataFrame(enet.predict(X_adjust))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n\n if verbose==1:\n print(enet.score(X_adjust,Y))\n\n Be=pd.DataFrame(enet.coef_)\n Be.columns=X_adjust.columns\n Be.index=Y.columns\n RES_out=Y.subtract(Yhat) \n\n if EM_DICT is not None:\n return(Be,X_adjust,RES_out,df_pf)#,df_SSE,df_logit)\n\n return(Be,X_adjust,RES_out)#,df_SSE,df_logit)", "def HMM(X,K,loo_idx,song_idx,song_bounds):\n \n w = 6\n srm_k = 45\n nPerm = 1000\n within_across = np.zeros(nPerm+1)\n run1 = [X[i] for i in np.arange(0, int(len(X)/2))]\n run2 = [X[i] for i in np.arange(int(len(X)/2), len(X))]\n print('Building Model')\n srm = SRM(n_iter=10, features=srm_k) \n print('Training Model')\n srm.fit(run1)\n print('Testing Model')\n shared_data = srm.transform(run2)\n shared_data = stats.zscore(np.dstack(shared_data),axis=1,ddof=1)\n others = np.mean(shared_data[:,:,np.arange(shared_data.shape[-1]) != loo_idx],axis=2)\n loo = shared_data[:,song_bounds[song_idx]:song_bounds[song_idx + 1],loo_idx] \n nTR = loo.shape[1]\n\n # Fit to all but one subject\n ev = brainiak.eventseg.event.EventSegment(K)\n ev.fit(others[:,song_bounds[song_idx]:song_bounds[song_idx + 1]].T)\n events = np.argmax(ev.segments_[0],axis=1)\n\n # Compute correlations separated by w in time\n corrs = np.zeros(nTR-w)\n for t in range(nTR-w):\n corrs[t] = pearsonr(loo[:,t],loo[:,t+w])[0]\n \n # Compute within vs across boundary correlations, for real and permuted bounds\n for p in range(nPerm+1):\n within = corrs[events[:-w] == events[w:]].mean()\n across = corrs[events[:-w] != events[w:]].mean()\n within_across[p] = within - across\n \n np.random.seed(p)\n events = np.zeros(nTR, dtype=np.int)\n events[np.random.choice(nTR,K-1,replace=False)] = 1\n events = np.cumsum(events)\n\n return within_across", "def gen_predictions(model):\r\n Y=np.load(\"data/Y-val-2k-new.npy\") #validation param data\r\n\r\n fmap=load_fmap(model)\r\n TY = fmap.ssy.transform(Y)\r\n TX = fmap.model.predict(TY)\r\n Xp = fmap.ssx.inverse_transform(TX) #predicted xsecs\r\n\r\n np.save(\"data/X-pre-%s\"%model,Xp,allow_pickle=True)", "def startModelProcess(self):\n #\n _LOGGER.info(\"starting model : \" + self.algo)\n st = time.time()\n global inpData\n inpData, trainDataDF, testDataDF = self.readData()\n #\n\n def getPredictions(\n model, trainDF, testDF, train_x, test_x, op_seq_len, batchSize\n ):\n \"\"\"Gets predictions for Exponentialsmoothingholtwinters class.\"\"\"\n #\n trainDFPredicted = trainDF.copy()\n testDFPredicted = testDF.copy()\n #\n model.save(CONFIG.code_output_path + self.algo + \"_fitted_model.h5\")\n # model = load_model(CONFIG.code_output_path + self.algo + \"_fitted_model.h5\")\n #\n predVars = [\n CONFIG.dv_variable_name + \"_forecast_predicted_\" + str(val)\n for val in range(1, op_seq_len + 1)\n ]\n #\n predTrain = model.predict(train_x, batch_size=batchSize)\n trainDFPredicted[predVars] = pd.DataFrame(predTrain, columns=predVars)\n #\n predTest = model.predict(test_x, batch_size=batchSize)\n testDFPredicted[predVars] = pd.DataFrame(predTest, columns=predVars)\n #\n return trainDFPredicted, testDFPredicted, predVars\n\n def get_transformed_data(given_df, method=\"MinMax\"):\n \"\"\"Gets transformed data for Exponentialsmoothingholtwinters class.\"\"\"\n if method == \"MinMax\":\n scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\n scaled_df = pd.DataFrame(scaler.fit_transform(given_df))\n scaled_df.columns = given_df.columns\n return scaled_df, scaler\n else:\n _LOGGER.info(\"returning without transforming the data!\")\n return given_df\n\n def runModel(paramDict):\n \"\"\"Runs model for Exponentialsmoothingholtwinters class.\"\"\"\n inpDataDF = inpData.copy()\n # prepare input sequence\n ipSeqVars = []\n for col in CONFIG.idv_variable_names:\n for val in range(1, paramDict[\"ip_seq_len\"] + 1):\n newVar = str(val) + \"_lag_\" + col\n ipSeqVars.append(newVar)\n inpDataDF[newVar] = inpDataDF[col].shift(val)\n # prepare output sequence\n opSeqVars = []\n for val in range(1, paramDict[\"op_seq_len\"] + 1):\n newVar = str(val) + \"_forecast_actual_\" + CONFIG.dv_variable_name\n opSeqVars.append(newVar)\n inpDataDF[newVar] = inpDataDF[CONFIG.dv_variable_name].shift(\n -1 * (paramDict[\"ip_to_op_offset\"] + val)\n )\n # split data into train and test\n inpDataDF = inpDataDF.dropna()\n inpDataDF = inpDataDF.sort_values(\"Date\", ascending=True)\n # scale complete data (train + test)\n # inpDataDF, scaler_X = get_transformed_data(inpDataDF[ipSeqVars+opSeqVars])\n inpXDF = inpDataDF.loc[:, ipSeqVars].reindex_axis(\n sorted(\n inpDataDF[ipSeqVars].columns, key=lambda x: int(x[0 : x.find(\"_\")])\n ),\n axis=1,\n )\n\n temp_cols = inpXDF.columns\n inpXDF = inpXDF[inpXDF.columns[::-1]]\n inpXDF.columns = temp_cols\n\n inpYDF = inpDataDF.loc[:, opSeqVars].reindex_axis(\n sorted(\n inpDataDF[opSeqVars].columns, key=lambda x: int(x[0 : x.find(\"_\")])\n ),\n axis=1,\n )\n splitInd = int(len(inpXDF) * 0.80)\n trainDF = pd.concat(\n [\n inpXDF[0:splitInd].reset_index(drop=True),\n inpYDF[0:splitInd].reset_index(drop=True),\n ],\n axis=1,\n )\n testDF = pd.concat(\n [\n inpXDF[splitInd:].reset_index(drop=True),\n inpYDF[splitInd:].reset_index(drop=True),\n ],\n axis=1,\n )\n trainDF, scaler_X = get_transformed_data(trainDF[ipSeqVars + opSeqVars])\n testDF = pd.DataFrame(\n scaler_X.transform(testDF[ipSeqVars + opSeqVars]),\n columns=ipSeqVars + opSeqVars,\n )\n\n ipSeqVarsSorted = inpXDF.columns.tolist()\n opSeqVarsSorted = inpYDF.columns.tolist()\n\n # adjust sample size - keras requires number of_samples to be divisible by batch size\n def adjustSampleSize(df):\n \"\"\"Adjusts sample size for Exponentialsmoothingholtwinters class.\"\"\"\n while 1 == 1:\n if len(df) % paramDict[\"batch_size\"] != 0:\n df = df[0 : len(df) - 1]\n else:\n break\n return df\n\n trainDF = adjustSampleSize(trainDF)\n testDF = adjustSampleSize(testDF)\n\n trainDFScaled = trainDF\n testDFScaled = testDF\n\n train_x = trainDFScaled[ipSeqVarsSorted].values.reshape(\n len(trainDFScaled),\n paramDict[\"ip_seq_len\"],\n len(CONFIG.idv_variable_names),\n )\n train_y = trainDF[opSeqVarsSorted].values.reshape(\n len(trainDF), paramDict[\"op_seq_len\"]\n )\n test_x = testDFScaled[ipSeqVarsSorted].values.reshape(\n len(testDFScaled),\n paramDict[\"ip_seq_len\"],\n len(CONFIG.idv_variable_names),\n )\n test_y = testDF[opSeqVarsSorted].values.reshape(len(testDF), paramDict['op_seq_len']) # noqa\n #\n # create LSTM network architecture based on configurations\n model = Sequential()\n n_hidden_layers = paramDict[\"n_hidden_layers\"]\n if n_hidden_layers == 1:\n model.add(\n LSTM(\n paramDict[\"n_lstm_units_in_hidden_layers\"],\n batch_input_shape=(\n paramDict[\"batch_size\"],\n train_x.shape[1],\n train_x.shape[2],\n ),\n stateful=True,\n kernel_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n recurrent_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n )\n )\n else:\n n_lstm_units = paramDict[\"n_lstm_units_in_hidden_layers\"]\n for hlayer in range(1, n_hidden_layers):\n model.add(\n LSTM(\n n_lstm_units,\n batch_input_shape=(\n paramDict[\"batch_size\"],\n train_x.shape[1],\n train_x.shape[2],\n ),\n stateful=True,\n kernel_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n recurrent_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n return_sequences=True,\n )\n )\n n_lstm_units = n_lstm_units - round(\n (paramDict[\"n_lstm_units_decay_percent\"] / 100) * n_lstm_units\n )\n n_lstm_units = n_lstm_units if n_lstm_units > 1 else 2\n model.add(\n LSTM(\n n_lstm_units,\n batch_input_shape=(\n paramDict[\"batch_size\"],\n train_x.shape[1],\n train_x.shape[2],\n ),\n stateful=True,\n kernel_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n recurrent_initializer=initializers.RandomNormal(\n mean=0, stddev=0.05\n ),\n )\n )\n model.add(Dense(train_y.shape[1]))\n model.compile(loss=paramDict[\"loss\"], optimizer=paramDict[\"optimizer\"])\n # run epochs\n for i in range(paramDict[\"n_epochs\"]):\n model.fit(\n train_x,\n train_y,\n epochs=1,\n batch_size=paramDict[\"batch_size\"],\n verbose=0,\n shuffle=False,\n )\n model.reset_states()\n # _LOGGER.info(\"----------------- completed epochs : \" + str(i))\n trainDFPredicted, testDFPredicted, predVars = getPredictions(\n model,\n trainDF,\n testDF,\n train_x,\n test_x,\n paramDict[\"op_seq_len\"],\n paramDict[\"batch_size\"],\n )\n\n actual_output = pd.DataFrame(\n scaler_X.inverse_transform(\n trainDFPredicted[ipSeqVarsSorted + opSeqVarsSorted]\n ),\n columns=ipSeqVarsSorted + opSeqVarsSorted,\n )\n predicted_output = pd.DataFrame(\n scaler_X.inverse_transform(\n trainDFPredicted[ipSeqVarsSorted + predVars]\n ),\n columns=ipSeqVarsSorted + predVars,\n )\n actual_output[predVars] = predicted_output[predVars]\n actual_output = actual_output.applymap(float)\n trainDFPredicted = actual_output.copy()\n del actual_output\n actual_output = pd.DataFrame(\n scaler_X.inverse_transform(\n testDFPredicted[ipSeqVarsSorted + opSeqVarsSorted]\n ),\n columns=ipSeqVarsSorted + opSeqVarsSorted,\n )\n predicted_output = pd.DataFrame(\n scaler_X.inverse_transform(testDFPredicted[ipSeqVarsSorted + predVars]),\n columns=ipSeqVarsSorted + predVars,\n )\n actual_output[predVars] = predicted_output[predVars]\n actual_output = actual_output.applymap(float)\n testDFPredicted = actual_output\n temp_list = []\n for i in range(1, paramDict[\"op_seq_len\"] + 1):\n (\n rmse_train,\n mape_train,\n mae_train,\n rsqTrain,\n ) = self.getAccuracyMetricsBase(\n trainDFPredicted[opSeqVarsSorted[i - 1]],\n trainDFPredicted[predVars[i - 1]],\n )\n (\n rmse_test,\n mape_test,\n mae_test,\n rsqTestDummy,\n ) = self.getAccuracyMetricsBase(\n testDFPredicted[opSeqVarsSorted[i - 1]],\n testDFPredicted[predVars[i - 1]],\n )\n temp_var = self.algo + \"@forecast_sequence_\" + str(i)\n temp_list.append(\n pd.DataFrame(\n [\n [\n temp_var,\n paramDict,\n rmse_train,\n mape_train,\n mae_train,\n rsqTrain,\n rmse_test,\n mape_test,\n mae_test,\n ]\n ]\n )\n )\n\n temp_df = pd.concat(temp_list, axis=0)\n temp_df.columns = [\n \"algo\",\n \"hyperParams\",\n \"rmse_train\",\n \"mape_train\",\n \"mae_train\",\n \"rsqTrain\",\n \"rmse_test\",\n \"mape_test\",\n \"mae_test\",\n ]\n return temp_df\n\n resultLst = []\n for val in self.hyperParams:\n resultLst.append(runModel(val))\n _LOGGER.info(\"time taken : %f minutes\" % (((time.time() - st) / (60.0))))\n\n return pd.concat(resultLst)", "def compare_observed_models(self):\n num_iters = 0\n tot_ovr_trips_mult_paths = 0.0\n fl2num_trips = {}\n #first element is hausdorff distance, second is sum hausdorff, third is dsn\n fl2similarity_measures = {}\n for fl in self.fl2models:\n models = self.fl2models[fl]\n num_models = len(models)\n probs = [0.0 for i in range(len(models))]\n model_array = []\n total_trips = 0.0\n model_i = 0\n for model in models:\n count = len(models[model])\n probs[model_i] += count\n total_trips += count\n model_array.append(model)\n #print \"Trips with model %d: %d\" % (model_i,count)\n model_i += 1\n if len(model_array) == 1:\n continue\n tot_ovr_trips_mult_paths += total_trips\n fl2num_trips[fl] = total_trips\n probs = map(lambda x: x/total_trips,probs)\n diag_sum = sum(map(lambda x: x*x,probs))\n denom = 1.0-diag_sum\n weights = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i+1,num_models):\n weights[i][j] = (2*probs[i]*probs[j])/denom\n # \"\"\"\n fl2similarity_measures[fl] = [0.0,0.0,0.0]\n for i in range(len(model_array)):\n for j in range(i+1,len(model_array)):\n weight = weights[i][j]\n haus,sum_haus,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, sum_haus %.2f, dsn %.2f\" % (str((i,j)),haus,sum_haus,dsn) \n fl2similarity_measures[fl][0] += weight*haus\n fl2similarity_measures[fl][1] += weight*sum_haus\n fl2similarity_measures[fl][2] += weight*dsn\n measures = fl2similarity_measures[fl]\n #\"\"\"\n \"\"\"\n for i in range(len(model_array)):\n print \"path %d\" % i\n self.draw_grid(model_array[i])\n weights_with_diag = [[0.0 for i in range(num_models)] for i in range(num_models)]\n for i in range(num_models):\n for j in range(i,num_models):\n if i == j:\n weights_with_diag[i][j] = probs[i]*probs[i]\n else:\n weights_with_diag[i][j] = weights[i][j]*denom\n fl2similarity_measures[fl] = [0.0,0.0,0.0]\n weight_sum = 0.0\n for i in range(num_models):\n #for j in range(num_models):\n # sys.stdout.write(\"%.3f \" % weights_with_diag[i][j])\n #print \"\"\n weight_sum += sum(weights_with_diag[i])\n #print \"weight sum: %f\" % weight_sum\n for i in range(len(model_array)):\n for j in range(i,len(model_array)):\n weight = weights_with_diag[i][j]\n haus,sum_haus,dsn = self.path_diff_measures(model_array[i],model_array[j])\n #print \"%s: haus %.2f, sum_haus %.2f, dsn %.2f\" % (str((i,j)),haus,sum_haus,dsn) \n fl2similarity_measures[fl][0] += weight*haus\n fl2similarity_measures[fl][1] += weight*sum_haus\n fl2similarity_measures[fl][2] += weight*dsn\n measures = fl2similarity_measures[fl]\n \"\"\"\n #print \"overall: haus %.2f, sum_haus %.2f, dsn %.2f\" % (measures[0],measures[1],measures[2])\n #print \"\"\n #if num_iters > 6:\n # break\n num_iters += 1\n overall_haus = 0.0\n overall_sum_haus = 0.0\n overall_dsn = 0.0\n for fl in fl2num_trips:\n if len(self.fl2models[fl]) == 1:\n continue\n num_trips = fl2num_trips[fl]\n meas = fl2similarity_measures[fl]\n overall_haus += num_trips*meas[0]\n overall_sum_haus += num_trips*meas[1]\n overall_dsn += num_trips*meas[2]\n overall_haus = overall_haus/tot_ovr_trips_mult_paths\n overall_sum_haus = overall_sum_haus/tot_ovr_trips_mult_paths\n overall_dsn = overall_dsn/tot_ovr_trips_mult_paths\n print \"\\naverage hausdorff %.2f, average sum hausdorff %.2f, average dsn %.2f\" % (overall_haus,overall_sum_haus,overall_dsn)\n return", "def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()", "def model_training(train_data, tags):\n start=[]\n obs = []\n for i in range(len(train_data)):\n start.append(train_data[i].tags[0])\n obs+= train_data[i].words\n obs=list(set(obs))\n A=np.zeros([len(tags),len(tags)])\n B=np.zeros([len(tags),len(obs)])\n for i in range(len(train_data)):\n words_ = train_data[i].words\n tags_ = train_data[i].tags\n for j in range(len(tags_)):\n if j < len(tags_)-1:\n A[tags.index(tags_[j])][tags.index(tags_[j+1])] += 1\n B[tags.index(tags_[j])][obs.index(words_[j])] +=1\n A_d = np.sum(A,axis=1,keepdims=True)\n A_d[A_d==0] = 1\n B_d = np.sum(B,axis=1,keepdims=True)\n B_d[B_d==0] = 1\n A = A/A_d \n B = B/B_d\n \n count=Counter(start)\n pi=np.zeros(len(tags))\n state_dict={}\n for i in range(len(tags)):\n if tags[i] in count:\n pi[i]=count[tags[i]]/len(start)\n \n obs_dict={k: v for v, k in enumerate(obs)}\n state_dict={k: v for v, k in enumerate(tags)}\n model = HMM(pi, A, B, obs_dict, state_dict)\n ###################################################\n # Edit here\n ###################################################\n return model", "def run_model(width_prior, vision_prior, separation_prior):\n model = ShoalModel(n_fish=n,\n width=width_prior,\n height=50,\n speed=1,\n vision=vision_prior,\n separation=separation_prior,\n cohere=0.25,\n separate=0.25,\n match=0.3)\n for i in range(20): # number of steps to run the model for\n model.step()\n data = model.datacollector.get_model_vars_dataframe() # retrieve data from model\n data_trim = data.iloc[18:19, ] # remove early runs\n np_data = np.asarray(data_trim)\n # Flatten Positions so it is more accessible & add column names\n p = np_data.flatten() # remove one set of brackets & make a dataframe\n pos_df = pd.DataFrame(p.flatten()) # remove one set of brackets & make a dataframe\n pos_df = pos_df[0].apply(pd.Series) # remove another set of brackets\n pos_df[0].apply(pd.Series) # remove last set of brackets\n pos = np.asarray(pos_df) # back to numpy array\n return pos_df", "def rateModels(lOutputFileName, dataFileName):\r\n global Configuration\r\n results = dict()\r\n # \"D:\\\\Lagramge\\\\downloads\\\\results\\\\OG-gstep.7.gramm-hmse-sexhaustive-d5-hmse.log\"\r\n models = parseLagramgeOutput(lOutputFileName)\r\n \r\n # \"D:\\\\Lagramge\\\\downloads\\\\temp\\\\trainDataOGnRI1.csv\"\r\n preppedData, dataLength = readData(dataFileName)\r\n results['isValidation'] = True\r\n results['dataLength'] = dataLength\r\n results['isDifferential'] = bool(Configuration['lagramge']['-i'] or Configuration['lagramge']['-t'])\r\n timeStep = 1\r\n \r\n if(results['isDifferential'] and Configuration['lagramge']['-i']):\r\n timeStep = Configuration['lagramge']['-i']\r\n \r\n results['models'] = dict()\r\n \r\n for i, model in enumerate(models):\r\n results['models'][i] = dict()\r\n results['models'][i]['equation'] = model.Eq\r\n results['models'][i]['lagramgeMSE'] = model.Mse\r\n results['models'][i]['lagramgeMDL'] = model.Mdl\r\n results['models'][i]['runMSE'] = 0.0\r\n results['models'][i]['runRMSE'] = 0.0\r\n results['models'][i]['runMPE'] = 0.0\r\n results['models'][i]['runMAPE'] = 0.0\r\n \r\n pVarName = Configuration['lagramge']['-v']\r\n\r\n if results['isDifferential']:\r\n for i in results['models']:\r\n evaluationDataPoints = 0\r\n calculated = numpy.zeros((dataLength - 1, ))\r\n \r\n for data in preparedDataRow(preppedData):\r\n calculated[evaluationDataPoints] = evaluateModel(results['models'][i]['equation'], data)\r\n evaluationDataPoints += 1\r\n \r\n actual = numpy.array(map(itemgetter(preppedData[0].index(pVarName)), preppedData[1:dataLength]))\r\n predicted = AdamBashforth2Integration(calculated, actual, timeStep)\r\n \r\n error = numpy.subtract(actual, predicted)\r\n squaredError = numpy.multiply(error, error)\r\n mpe = numpy.average(numpy.divide(error, actual)) * 100.0\r\n mape = numpy.average(numpy.abs(numpy.divide(error, actual))) * 100.0\r\n mse = numpy.average(squaredError)\r\n rmse = numpy.sqrt(mse)\r\n \r\n results['models'][i]['runMSE'] = mse\r\n results['models'][i]['runRMSE'] = rmse\r\n results['models'][i]['runMPE'] = mpe\r\n results['models'][i]['runMAPE'] = mape\r\n else:\r\n evaluationDataPoints = 0.0\r\n for data in preparedDataRow(preppedData):\r\n evaluationDataPoints += 1\r\n for i in results['models']:\r\n res = evaluateModel(results['models'][i]['equation'], data)\r\n results['models'][i]['runMSE'] += calcSquaredError(data[pVarName], res)\r\n results['models'][i]['runMPE'] += calcPercentageError(data[pVarName], res)\r\n results['models'][i]['runMAPE'] += calcAbsolutePercentageError(data[pVarName], res)\r\n \r\n for i in results['models']:\r\n results['models'][i]['runMSE'] = results['models'][i]['runMSE']/evaluationDataPoints\r\n results['models'][i]['runMPE'] = results['models'][i]['runMPE']/evaluationDataPoints\r\n results['models'][i]['runMAPE'] = results['models'][i]['runMAPE']/evaluationDataPoints\r\n results['models'][i]['runRMSE'] = numpy.sqrt(results['models'][i]['runMSE'])\r\n \r\n results['bestMseMId'] = getBestModel(results['models'], \"runMSE\")\r\n results['bestRmseMId'] = getBestModel(results['models'], \"runRMSE\")\r\n results['bestMpeMId'] = getBestAbsModel(results['models'], \"runMPE\")\r\n results['bestMapeMId'] = getBestAbsModel(results['models'], \"runMAPE\")\r\n \r\n results['bestMse'] = results['models'][results['bestMseMId']]['runMSE']\r\n results['bestRmse'] = results['models'][results['bestRmseMId']]['runRMSE']\r\n results['bestMape'] = results['models'][results['bestMapeMId']]['runMAPE']\r\n results['bestMpe'] = results['models'][results['bestMpeMId']]['runMPE']\r\n\r\n return results", "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n sequences = test_set.get_all_sequences()\n XLenghts = test_set.get_all_Xlengths()\n\n for s in sequences:\n X, length = XLenghts[s]\n p = {}\n guess = \"\"\n for word, model in models.items():\n try:\n p[word] = model.score(X, length)\n except:\n p[word] = float('-inf')\n probabilities.append(p)\n values = list(p.values())\n keys = list(p.keys())\n guesses.append(keys[values.index(max(values))])\n\n return probabilities, guesses\n\n \"\"\"\n valid_models = {word: model for word,model in models.items() if model is not None}\n probabilities = [word_probabilities(valid_models, *test_set.get_item_Xlengths(i))\n for i,_ in enumerate(test_set.wordlist)]\n guesses = [best_guess(word_probs) for word_probs in probabilities]\n return probabilities, guesses\n\ndef word_probabilities(models, X, lengths):\n word_probs = {}\n\n for word,model in models.items():\n try:\n word_probs[word] = model.score(X, lengths)\n except ValueError: # The hmmlearn library may not be able to train or score all models.\n word_probs[word] = float('-inf')\n\n return word_probs\n\ndef best_guess(word_probs):\n return max(word_probs.keys(), key=lambda word: word_probs[word])\n \"\"\"", "def __init__(self, model, sd, sn, md, mn, contactNames, swingIds=None, swingPosNoise=None, swingVelNoise=None):\n self.model = model\n self.pin_model = model.state.pinocchio\n self.pin_data = self.pin_model.createData()\n self.nx, self.ndx, self.nu = model.state.nx, model.state.ndx, model.nu\n self.nq = self.pin_model.nq \n self.nv = self.pin_model.nv \n self.ny = self.ndx\n self.sd = sd\n self.sn = sn\n self.md = md\n self.mn = mn\n self.np = self.sd.shape[0]\n self.nm = self.md.shape[0]\n \n self.measurement = np.zeros(self.nx)\n self.MeasurementDataType = MeasurementDataFullState\n self.contact_names = contactNames\n self.contact_ids = [self.pin_model.getFrameId(name) for name in self.contact_names]\n self.nc = len(contactNames)\n self.state_names = []\n self.control_names = []\n self.branch_names = []\n self.branch_joints = []\n self.branch_ids = []\n self.parse_model()\n self.njoints = self.nv - 6 \n self.nq_base = 7 \n self.nv_base = 6\n self.swingIds = swingIds\n self.swingPosNoise = swingPosNoise\n self.swingVelNoise = swingVelNoise\n if self.swingIds is not None: \n assert len(self.swingIds) == len(self.swingPosNoise), \"swingPosNoise Dimension Missmatch\"\n assert len(self.swingIds) == len(self.swingVelNoise), \"swingVelNoise Dimension Missmatch\"\n # find active branches\n self.active_branches = []\n self.q_indices = []\n self.dq_indices = []\n\n if self.swingIds is not None:\n for fid in self.swingIds:\n for i, branch in enumerate(self.branch_ids):\n if fid in branch:\n self.active_branches += [i]\n # now collect state indeces \n \n for i in self.active_branches:\n q_inds = [self.state_names.index(jn) - 1 for jn in self.branch_joints[i]]\n dq_inds = [self.nv-1+self.state_names.index(jn) for jn in self.branch_joints[i]]\n self.q_indices += [q_inds]\n self.dq_indices += [dq_inds]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
change_index(list_split) > list, list get locations from index.
def change_index(self, list_split): "format index" temp = [k.split('|')[-1] for k in list_split] chr_list = [] index_list = [] for t in temp: index = t.split(':') chr_list.append(index[0]) index_list.append(index[1].split('-')) "prepare locations list" loc_list = [] for ind in index_list: loc = int(((int(ind[0]) + int(ind[1])) / 2) // 10000) loc_list.append(loc) return loc_list, chr_list
[ "def reindexObject(idxs=[]):", "def move_index(self):\n\n index = bpy.context.scene.list_index\n list_length = len (bpy.context.scene.my_list) - 1\n # (index starts at 0)\n new_index = index + (-1 if self.direction == 'UP' else 1)\n bpy.context.scene.list_index = max (0 , min (new_index , list_length))", "def split_indices(l,lookup):\n within,without = [],[]\n for (i,v) in enumerate(l):\n try:\n ind = lookup.index(v)\n within.append((i,ind))\n except ValueError: # v not found in lookup\n without.append((i,v))\n return within,without", "def replace_in_list(my_list, idx, element):\n if (idx + 1 > len(my_list) or idx < 0):\n return (my_list)\n else:\n my_list[idx] = element\n return(my_list)", "def partition_around_index(list_to_partition, index):\n list_len = len(list_to_partition)\n if list_len <= index:\n raise ValueError(\n \"Index out of range: {} ({} item list)\".format(index, list_len)\n )\n l1, l2 = [], []\n if index > 0:\n l1 = list_to_partition[0:index]\n if index < (list_len - 1):\n l2 = list_to_partition[(index + 1) :]\n return l1, l2", "def _update_index(self):\n self.current_index = (self.current_index + 1) % self.nb_intervals", "def doublelist2flatlistindex(listlist):\n flatlist = []\n indexlist = []\n for ind, entries in enumerate(listlist):\n flatlist += entries\n indexlist += [ind for j in entries]\n return flatlist, np.array(indexlist)", "def split_into_chunks(alist, indices):\n\n ret = []\n alist_c = copy.deepcopy(alist)\n if np.sum(indices) != len(alist):\n print('Error')\n\n for i in indices:\n ret += [alist_c[:i]]\n del alist_c[:i]\n\n return ret", "def split_dataset_by_indices():", "def separate_states_by_index(list_of_states, absorbing, nonabsorbing):\r\n for index, sublist in enumerate(list_of_states):\r\n absorbing.append(index) if sum(sublist) == 0 else nonabsorbing.append(index)", "def _list_split_helper(alist, *indices):\n list_length = len(alist)\n indices = [list_length + index if index < 0 else index for index in indices]\n pairs = izip(chain([0], indices), chain(indices, [None]))\n return (alist[i:j] for i, j in pairs)", "def get_indexes_of_split(split, Y_size, Z_size):\n index_list = []\n for i in range(0, split.split_x):\n for j in range(0, split.split_z):\n # calculate the indexes (in bytes) of each tile, add all the tiles in to data_dict that in the write range.\n write_index = int(split.split_pos[-3]) + (int(split.split_pos[-2]) + j) * Y_size + (int(\n split.split_pos[-1]) + i) * Y_size * Z_size\n index_list.append(write_index)\n return index_list", "def move_index(self, index):\n\n if not self.ff_list is self.parent.focus_get():\n self.ff_list.focus()\n\n self.ff_list.activate(index)\n self.ff_list.selection_clear(0, END)\n self.ff_list.selection_set(index)\n self.ff_list.see(index)\n\n self.update_image(0)", "def change_point(self, point, index):\r\n pass", "def list_replace(lst: list, old, new) -> None:\n idx = -1\n try:\n while True:\n i = lst.index(old, idx + 1)\n lst[i] = new\n except ValueError:\n pass", "def sorted_indices(full_list):\n \n return [i[0] for i in sorted(enumerate(full_list), key=lambda x:x[1])]", "def group_indices(indexes):\n\n diff_inds = np.where(np.abs(np.diff(indexes)) > 1)[0]\n diff_points = np.concatenate(([-1], diff_inds, [len(indexes) - 1]))\n length = diff_points.size\n pairs = np.hstack((diff_points[:-1].reshape(length - 1, 1) + 1, diff_points[1:].reshape(length - 1, 1)))\n # pairs = zip(diff_points[::]+1, diff_points[1::])\n segments = indexes[pairs]\n return np.array(segments)", "def replace_index(x, index, value):\n # assume x has a copy-constructor and can be interpreted as a list\n y = list(x)\n y[index] = value\n cctor = copy_constructor(x)\n result = cctor(y)\n return result", "def ListIdx( lst ):\n return dict( list(map( reversed, enumerate( lst ) )) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
convert_to_hic_format() > No return object. Assigns positions and chr. Convert 5C to HiC like format.
def convert_to_hic_format(self): if self.cfg.tal_mode == "wt": hek_mat = pd.read_csv(self.hek_file, sep="\t") elif self.cfg.tal_mode == "tal1_ko": hek_mat = pd.read_csv(self.tal1ko_file, sep="\t") elif self.cfg.tal_mode == "lmo2_ko": hek_mat = pd.read_csv(self.lmo2ko_file, sep="\t") "get positions" index, chr_list = self.change_index(list(hek_mat.index)) columns, _ = self.change_index(hek_mat.columns) "assign rows, columns and chr" hek_mat.index = index hek_mat.columns = columns hek_mat["chr"] = chr_list "get matrices for TAL1 and LMO2" tal1_mat = hek_mat.loc[hek_mat["chr"] == "chr1"] tal1_mat = tal1_mat.iloc[:, 0:285] lmo2_mat = hek_mat.loc[hek_mat["chr"] == "chr11"] lmo2_mat = lmo2_mat.iloc[:, 286:632] tal1_mat = tal1_mat.groupby(level=0, axis=1).sum() tal1_mat = tal1_mat.groupby(level=0, axis=0).sum() lmo2_mat = lmo2_mat.groupby(level=0, axis=1).sum() lmo2_mat = lmo2_mat.groupby(level=0, axis=0).sum() "prepare data in the form of Hi-C" tal_i = list(tal1_mat.index) tal_j = tal1_mat.columns lmo2_i = list(lmo2_mat.index) lmo2_j = lmo2_mat.columns tal_df = pd.DataFrame(columns=["i", "j", "v"]) for i in tal_i: for j in tal_j: tal_df = tal_df.append({"i": i, "j": j, "v": tal1_mat.loc[i][j]}, ignore_index=True) lmo2_df = pd.DataFrame(columns=["i", "j", "v"]) for i in lmo2_i: for j in lmo2_j: lmo2_df = lmo2_df.append({"i": i, "j": j, "v": lmo2_mat.loc[i][j]}, ignore_index=True) "save data" if self.cfg.tal_mode == "wt": tal_df.to_csv(cfg.hic_path + cfg.cell + "/tal_df.txt", sep="\t") lmo2_df.to_csv(cfg.hic_path + cfg.cell + "/lmo2_df.txt", sep="\t") else: tal_df.to_csv(cfg.output_directory + "tal1_ko.txt", sep="\t") lmo2_df.to_csv(cfg.output_directory + "lmo2_ko.txt", sep="\t")
[ "def load_hic(cfg, chr):\r\n try:\r\n data = pd.read_csv(\"%s%s/%s/hic_chr%s.txt\" % (cfg.hic_path, cfg.cell, chr, chr), sep=\"\\t\",\r\n names=['i', 'j', 'v'])\r\n data = data.dropna()\r\n data[['i', 'j']] = data[['i', 'j']] / cfg.resolution\r\n data[['i', 'j']] = data[['i', 'j']].astype('int64')\r\n return data\r\n except Exception as e:\r\n print(\"Hi-C txt file does not exist or error during Juicer extraction\")", "def to_hcl(self):\n if self.color_space == 'hcl':\n return self.copy()\n elif self.color_space == 'xyz':\n return self._xyz_to_hcl(self)\n else:\n return self.to_xyz().to_hcl()", "def load_interchrom_hic(cfg, chrx, chry):\r\n try:\r\n data = pd.read_csv(\"%s%s/%s/hic_chr%s.txt\" % (cfg.hic_path, cfg.cell, chrx, chry), sep=\"\\t\",\r\n names=['i', 'j', 'v'])\r\n data = data.dropna()\r\n data[['i', 'j']] = data[['i', 'j']] / cfg.resolution\r\n data[['i', 'j']] = data[['i', 'j']].astype('int64')\r\n return data\r\n except Exception as e:\r\n print(\"Hi-C txt file does not exist or error during Juicer extraction\")", "def convert_to_HWC(tensor, input_format):\n assert (len(set(input_format)) == len(input_format)\n ), \"You can not use the same dimension shordhand twice. \\\n input_format: {}\".format(input_format)\n assert (len(tensor.shape) == len(input_format)\n ), \"size of input tensor and input format are different. \\\n tensor shape: {}, input_format: {}\".format(tensor.shape, input_format)\n input_format = input_format.upper()\n\n if len(input_format) == 4:\n index = [input_format.find(c) for c in 'NCHW']\n tensor_NCHW = tensor.transpose(index)\n tensor_CHW = make_grid(tensor_NCHW)\n return tensor_CHW.transpose(1, 2, 0)\n\n if len(input_format) == 3:\n index = [input_format.find(c) for c in 'HWC']\n tensor_HWC = tensor.transpose(index)\n if tensor_HWC.shape[2] == 1:\n tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC],\n 2)\n return tensor_HWC\n\n if len(input_format) == 2:\n index = [input_format.find(c) for c in 'HW']\n tensor = tensor.transpose(index)\n tensor = np.stack([tensor, tensor, tensor], 2)\n return tensor", "def as_string(self, output_format=\"pfm\", header=True):\r\n\t\tout_string = \"\"\r\n \r\n\t\tif output_format in [\"pfm\", \"jaspar\"]:\r\n\t\t\t# create id line\r\n\t\t\tout_string += f\">{self.id}\\t{self.name}\\n\"\r\n\r\n\t\t\t# add counts\r\n\t\t\tfor index, base in enumerate(self.bases):\r\n\t\t\t\trow = \" \".join(map(str, self.counts[index]))\r\n\r\n\t\t\t\tif output_format == \"jaspar\":\r\n\t\t\t\t\trow = f\"{base} [{row} ]\"\r\n\r\n\t\t\t\tout_string += row + \"\\n\"\r\n\t\t\t\r\n\t\telif output_format == \"meme\":\r\n\r\n\t\t\t# create meme header\r\n\t\t\tif header:\r\n\t\t\t\t# TODO read meme version from original file (or default to version 4)\r\n\t\t\t\tmeme_header = \"MEME version 4\\n\\n\"\r\n\t\t\t\tmeme_header += \"ALPHABET= {0}\\n\\n\".format(\"\".join(self.bases))\r\n\t\t\t\tmeme_header += \"strands: {0}\\n\\n\".format(self.strands)\r\n\t\t\t\tmeme_header += \"Background letter frequencies\\n\"\r\n\t\t\t\tmeme_header += \" \".join([f\"{self.bases[i]} {self.bg[i]}\" for i in range(4)]) + \"\\n\"\r\n\r\n\t\t\t\tout_string += meme_header\r\n\r\n\t\t\t# add motif information to string\r\n\t\t\tout_string += f\"\\nMOTIF {self.id} {self.name}\\n\"\r\n\t\t\tout_string += f\"letter-probability matrix: alength= {len(self.bases)} w= {self.length} nsites= {int(round(self.n))} E= 0\\n\" # omit e_value as it could be out of context\r\n\t\t\t\r\n\t\t\t# add pfm\r\n\t\t\tif self.pfm is None:\r\n\t\t\t\tself.get_pfm()\r\n\r\n\t\t\tprecision = 6\r\n\t\t\tfor row in self.pfm.T: #add frequency per position\r\n\t\t\t\tout_string += \" {0}\\n\".format(\" \".join(map(lambda f: format(round(f, precision), f\".{precision}f\"), row)))\r\n\r\n\t\t# TODO also implementation in from_file needed\r\n\t\t#elif output_format == \"transfac\":\r\n\t\t#\tout_string += self.get_gimmemotif().gimme_obj.to_transfac()\r\n\r\n\t\telse:\r\n\t\t\traise ValueError(\"Format \" + output_format + \" is not supported\")\r\n\r\n\t\treturn out_string", "def scHiC_preprocess(cfg):\r\n\r\n chr_list = [19, 20, 21, 22]\r\n columns = ['x1', 'y1', 'bar1', 'bar2']\r\n full_pairs_path = cfg.hic_path + cfg.cell + cfg.schic_pairs_file\r\n pairs = pd.read_csv(full_pairs_path, sep=\"\\t\",\r\n names=['chrA', 'x1', 'x2', 'chrB', 'y1', 'y2', 'a', 'b', 'c', 'd', 'e', 'bar1', 'bar2',\r\n 'l', 'i', 'j', 'k'])\r\n\r\n for chr in chr_list:\r\n pairs = pairs.loc[pairs[\"chrA\"] == \"human_chr\" + str(chr)]\r\n pairs = pairs.loc[pairs[\"chrB\"] == \"human_chr\" + str(chr)]\r\n pairs = pairs[columns]\r\n pairs.to_csv(cfg.hic_path + cfg.cell + '/' + str(chr) + '/' + \"pairs_\" + str(chr) + '.txt', sep=\"\\t\")\r\n\r\n full_read_path = cfg.hic_path + cfg.cell + cfg.schic_reads_file\r\n reads = pd.read_csv(full_read_path, sep=\"\\t\",\r\n names=['a', 'b', 'reads_hg19', 'd', 'e', 'f', 'bar1', 'bar2', 'i', 'j', 'k', 'l', 'm', 'n',\r\n 'o', 'p', 'q'])\r\n reads = reads[['reads_hg19', 'bar1', 'bar2']]\r\n\r\n for chr in chr_list:\r\n pairs = pd.read_csv(cfg.hic_path + cfg.cell + '/' + str(chr) + '/' + \"pairs_\" + str(chr) + '.txt', sep=\"\\t\")\r\n merged_pairs = pairs.merge(reads, on=[\"bar1\", \"bar2\"])\r\n merged_pairs = merged_pairs[[\"x1\", \"y1\", \"reads_hg19\"]]\r\n merged_pairs = merged_pairs.rename(columns={\"x1\": \"i\", \"y1\": \"j\", \"reads_hg19\": \"v\"})\r\n merged_pairs.to_csv(cfg.hic_path + cfg.cell + '/' + str(chr) + '/' + \"hic_chr\" + str(chr) + '.txt', sep=\"\\t\")", "def _convert_to_hhea_props(font, _to_funits):\n return dict(\n ascent=_to_funits(font.ascent),\n descent=-_to_funits(font.descent),\n lineGap=_to_funits(font.leading),\n # other values are compiled by fontTools\n )", "def test_augmented_inchi(self):\n mol = Molecule().from_adjacency_list(\"\"\"\n 1 C u1 p0 c0 {2,S}\n 2 C u1 p0 c0 {1,S}\n \"\"\", saturate_h=True)\n\n self.assertEqual(mol.to_augmented_inchi(), 'InChI=1S/C2H4/c1-2/h1-2H2/u1,2')", "def _loh_to_vcf(cur):\n cn = int(float(cur[\"C\"]))\n minor_cn = int(float(cur[\"M\"]))\n if cur[\"type\"].find(\"LOH\"):\n svtype = \"LOH\"\n elif cn > 2:\n svtype = \"DUP\"\n elif cn < 1:\n svtype = \"DEL\"\n else:\n svtype = None\n if svtype:\n info = [\"SVTYPE=%s\" % svtype, \"END=%s\" % cur[\"end\"],\n \"SVLEN=%s\" % (int(cur[\"end\"]) - int(cur[\"start\"])),\n \"CN=%s\" % cn, \"MajorCN=%s\" % (cn - minor_cn), \"MinorCN=%s\" % minor_cn]\n return [cur[\"chr\"], cur[\"start\"], \".\", \"N\", \"<%s>\" % svtype, \".\", \".\",\n \";\".join(info), \"GT\", \"0/1\"]", "def b2h(x):\n return hex(x if isinstance(x,int) else int(x,2))", "def get_aqi_co_8h(co_8h: float) -> (int, str, str):\n cp = __round_down(co_8h, 1)\n return __get_aqi_general_formula_texts(cp, US_CO_8H, US_CO_EFFECTS, US_CO_CAUTIONS, US_AQI)", "def get_sample_1940_hh():\n hh_line = \"H19400200024278096700000001000009100000000001198632410100102100000009999000260300026007000840199990012200020999999901223233100110101000000001000900000000100090\"\n return hh_line", "def hue_to_cmy(hue_value):\n # Red-Yellow range. M100 = red. M0 = Yelow.\n if hue_value in range(0, 60):\n c = 0\n m = int((60 - hue_value) / 60 * 100)\n y = 100\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n # Yellow-Green range. C0 = Yellow. C=100 = green.\n elif hue_value in range(60, 120):\n hue_value = hue_value - 60\n c = int(hue_value / 60 * 100)\n m = 0\n y = 100\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n # Green-Cyan range. Y100 = Green. Y0 = Cyan.\n elif hue_value in range(120, 180):\n c = 100\n m = 0\n y = int((180 - hue_value) / 60 * 100)\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n # Cyan-Blue range. M0 = Cyan. M100 = Blue.\n elif hue_value in range(180, 240):\n hue_value = hue_value - 180\n c = 100\n m = int(hue_value / 60 * 100)\n y = 0\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n #Blue-Magenta range. C100 = Blue. C0 = Magenta.\n elif hue_value in range(240, 300):\n c = int((300 - hue_value) / 60 * 100)\n m = 100\n y = 0\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n #Magenta-Red range. Y0 = Magenta. Y100 = Red.\n elif hue_value in range(300, 361):\n hue_value = hue_value - 300\n c = 0\n m = 100\n y = int(hue_value / 60 * 100)\n print(\"C: \" + str(c) + \" M: \" + str(m) + \" Y: \" + str(y))\n else:\n pass\n return [c, m, y]", "def getHourFormat(self) -> str:\n ...", "def grayscale256_to_ascii(val: int, ascii_mapping: str) -> str:\n return ascii_mapping[round(val / 255 * (len(ascii_mapping) - 1))]", "def HSI2RGB(self, hsi_img):\n # save the shape of original image\n row = np.shape(hsi_img)[0]\n col = np.shape(hsi_img)[1]\n #copy the origin image\n rgb_img = hsi_img.copy()\n #split the channel\n H,S,I = cv2.split(hsi_img)\n #project the channel into [0,1]\n [H,S,I] = [ i/ 255.0 for i in ([H,S,I])]\n R,G,B = H,S,I\n for i in range(row):\n h = H[i]*2*np.pi\n #H is bigger than 0 but smaller than 120\n a1 = h >=0\n a2 = h < 2*np.pi/3\n a = a1 & a2 #index in first situation\n tmp = np.cos(np.pi / 3 - h)\n b = I[i] * (1 - S[i])\n r = I[i]*(1+S[i]*np.cos(h)/tmp)\n g = 3*I[i]-r-b\n B[i][a] = b[a]\n R[i][a] = r[a]\n G[i][a] = g[a]\n #H is bigger than 120 but smaller than 240\n a1 = h >= 2*np.pi/3\n a2 = h < 4*np.pi/3\n a = a1 & a2 #index in second situation\n tmp = np.cos(np.pi - h)\n r = I[i] * (1 - S[i])\n g = I[i]*(1+S[i]*np.cos(h-2*np.pi/3)/tmp)\n b = 3 * I[i] - r - g\n R[i][a] = r[a]\n G[i][a] = g[a]\n B[i][a] = b[a]\n #H is bigger than 180 but smaller than 360\n a1 = h >= 4 * np.pi / 3\n a2 = h < 2 * np.pi\n a = a1 & a2 \n tmp = np.cos(5 * np.pi / 3 - h)\n g = I[i] * (1-S[i])\n b = I[i]*(1+S[i]*np.cos(h-4*np.pi/3)/tmp)\n r = 3 * I[i] - g - b\n B[i][a] = b[a]\n G[i][a] = g[a]\n R[i][a] = r[a]\n rgb_img[:,:,0] = B*255\n rgb_img[:,:,1] = G*255\n rgb_img[:,:,2] = R*255\n return rgb_img", "def h3_to_string(x):\n return _cy.int2hex(x)", "def shess(hess, num_chars=10):\n n = hess.shape[0]\n s = 'Hessian:' + ('\\n' + '| {} ' * n + '|') * n\n return s.format(*[sfloat(h, num_chars)\n for h in np.array(hess).reshape(-1)])", "def toHLS(self):\n retVal = self.getEmpty()\n if( self._colorSpace == ColorSpace.BGR or\n self._colorSpace == ColorSpace.UNKNOWN ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HLS)\n elif( self._colorSpace == ColorSpace.RGB):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.HSV ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)\n cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.XYZ ):\n cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)\n cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)\n elif( self._colorSpace == ColorSpace.HLS ):\n retVal = self.getBitmap() \n else:\n warnings.warn(\"Image.toHSL: There is no supported conversion to HSL colorspace\")\n return None\n return Image(retVal, colorSpace = ColorSpace.HLS )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prepare_tal1_lmo2(cfg) > DataLoader prepare dataloader to train.
def prepare_tal1_lmo2(self): "load Hi-C like data" tal_df = pd.read_csv(cfg.hic_path + cfg.cell + "/tal_df.txt", sep="\t") lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + "/lmo2_df.txt", sep="\t") "preprocess" tal_df = tal_df.drop(['Unnamed: 0'], axis=1) lmo2_df = lmo2_df.drop(['Unnamed: 0'], axis=1) tal_df[['i', 'j']] = tal_df[['i', 'j']].astype('int64') lmo2_df[['i', 'j']] = lmo2_df[['i', 'j']].astype('int64') "prepare indices and values for TAL1 in chromosome 1" values = torch.empty(0, cfg.sequence_length) input_idx = torch.empty(0, cfg.sequence_length, 2) input_idx_tal1, values_tal1 = get_samples_sparse(tal_df, 1, cfg) values_tal1 = F.pad(input=values_tal1, pad=(0, 4, 0, 0), mode='constant', value=0) input_idx_tal1 = F.pad(input=input_idx_tal1, pad=(0, 0, 0, 4, 0, 0), mode='constant', value=0) values = torch.cat((values, values_tal1.float()), 0) input_idx = torch.cat((input_idx, input_idx_tal1), 0) if self.cfg.get_tal1_only: "create tal dataloader" dataset = torch.utils.data.TensorDataset(input_idx, values) data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True) return data_loader if self.cfg.get_lmo2_only: values = torch.empty(0, cfg.sequence_length) input_idx = torch.empty(0, cfg.sequence_length, 2) "prepare indices and values for LMO2 in chromosome 11" input_idx_lmo2, values_lmo2 = get_samples_sparse(lmo2_df, 11, cfg) values = torch.cat((values, values_lmo2.float()), 0) input_idx = torch.cat((input_idx, input_idx_lmo2), 0) "create dataloader" dataset = torch.utils.data.TensorDataset(input_idx, values) data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True) return data_loader
[ "def train_tal1_lmo2(self, model):\n\n \"summary writer\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr)\n\n \"initialize optimizer and prepare dataloader\"\n self.cfg.get_tal1_only = False\n self.cfg.get_lmo2_only = False\n optimizer, criterion = model.compile_optimizer()\n data_loader = self.prepare_tal1_lmo2()\n\n \"train and save the model\"\n model.train_model(data_loader, criterion, optimizer, writer)\n torch.save(model.state_dict(), cfg.model_dir + cfg.model_name + '.pth')", "def _make_torch_data_loaders(opt, actions):\n train_dataset = Human36M(actions=actions, data_path=opt.data_dir,\n orthogonal_data_augmentation_prob=opt.orthogonal_data_augmentation_prob,\n z_rotations_only=opt.z_rotations_only, dataset_normalization=opt.dataset_normalization,\n flip_prob=opt.flip_prob, drop_joint_prob=opt.drop_joint_prob)\n test_dataset = Human36M(actions=actions, data_path=opt.data_dir,\n dataset_normalization=opt.dataset_normalization, is_train=False)\n\n if opt.use_horovod:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, num_replicas=hvd.size(),\n rank=hvd.rank())\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=opt.train_batch,\n sampler=train_sampler, # shuffle=True,#sampler=train_sampler,\n num_workers=args.workers,\n pin_memory=True)\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset, num_replicas=hvd.size(),\n rank=hvd.rank())\n test_loader = DataLoader(\n dataset=test_dataset,\n batch_size=opt.train_batch,\n sampler=test_sampler, # shuffle=True,#sampler=train_sampler,\n num_workers=args.workers,\n pin_memory=True)\n else:\n train_loader = DataLoader(\n dataset=train_dataset,\n batch_size=opt.train_batch_size,\n shuffle=True,\n num_workers=opt.workers,\n pin_memory=True)\n test_loader = DataLoader(\n dataset=test_dataset,\n batch_size=opt.test_batch_size,\n shuffle=False,\n num_workers=opt.workers,\n pin_memory=True)\n return train_dataset, train_loader, test_loader", "def prepare_data(self):\n self.tokenizer = custom_tokenizer_from_pretrained(\n self.tokenizer_name_or_path, self.cache_dir\n )\n try:\n self.train_examples = ExamplesBuilder(\n self.data_dir,\n Split.train,\n delimiter=self.delimiter,\n ).examples\n self.val_examples = ExamplesBuilder(\n self.data_dir,\n Split.dev,\n delimiter=self.delimiter,\n ).examples\n self.test_examples = ExamplesBuilder(\n self.data_dir,\n Split.test,\n delimiter=self.delimiter,\n ).examples\n\n if self.num_samples > 0:\n self.train_examples = self.train_examples[: self.num_samples]\n self.val_examples = self.val_examples[: self.num_samples]\n self.test_examples = self.test_examples[: self.num_samples]\n\n # create label vocabulary from dataset\n all_examples = self.train_examples + self.val_examples + self.test_examples\n all_labels = sorted(\n {\n tag.label\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n self.label_list = [BIO.O.value] + sorted(all_labels)\n label_types = sorted(\n {\n tag.tagtype.value\n for ex in all_examples\n for tag in ex.labels\n if tag.bio != BIO.O\n }\n )\n with open(self.labels_path, \"w\") as fp:\n for l in label_types:\n fp.write(l)\n fp.write(\"\\n\")\n\n self.label_to_id = {l: i for i, l in enumerate(self.label_list)}\n self.id_to_label = self.label_list\n\n start = time.time()\n self.train_dataset = self.create_dataset(\n self.train_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(train): {read_time}\")\n\n start = time.time()\n self.val_dataset = self.create_dataset(\n self.val_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(val): {read_time}\")\n\n start = time.time()\n self.test_dataset = self.create_dataset(\n self.test_examples, self.tokenizer, self.label_to_id\n )\n end = time.time()\n read_time = end - start\n logger.info(f\"DATASET TIME(test): {read_time}\")\n\n self.dataset_size = len(self.train_dataset)\n\n logger.info(self.val_examples[:3])\n logger.info(self.val_dataset[:3])\n\n except NoLocalFileError as e:\n logger.error(e)\n exit(1)", "def _create_dataloaders(config, dataset_class, tf1, tf2, partitions, target_transform=None, shuffle=False):\r\n train_imgs_list = []\r\n #original dataloader generation\r\n for train_partition in partitions:\r\n #specific case: STL10 has \"split\" argument instead of \"train\"\r\n if \"STL10\" == config.dataset:\r\n train_imgs_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf1,\r\n split=train_partition,\r\n target_transform=target_transform)\r\n else:\r\n train_imgs_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf1,\r\n train=train_partition,\r\n target_transform=target_transform)\r\n\r\n train_imgs_list.append(train_imgs_curr)\r\n\r\n train_imgs = ConcatDataset(train_imgs_list)\r\n train_dataloader = torch.utils.data.DataLoader(train_imgs, batch_size=config.iic_dataloader_bs, shuffle=shuffle, num_workers=0, drop_last=False)\r\n\r\n if not shuffle:\r\n assert (isinstance(train_dataloader.sampler, torch.utils.data.sampler.SequentialSampler))\r\n dataloaders = [train_dataloader]\r\n\r\n #a number of augmented dataloader are generated\r\n for d_i in range(config.num_dataloaders):\r\n train_tf_imgs_list = []\r\n for train_partition in partitions:\r\n if \"STL10\" == config.dataset:\r\n train_imgs_tf_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf2, # random per call\r\n split=train_partition,\r\n target_transform=target_transform)\r\n else:\r\n train_imgs_tf_curr = dataset_class(\r\n root=config.dataset_root,\r\n transform=tf2,\r\n train=train_partition,\r\n target_transform=target_transform)\r\n train_tf_imgs_list.append(train_imgs_tf_curr)\r\n train_imgs_tf = ConcatDataset(train_tf_imgs_list)\r\n train_tf_dataloader = torch.utils.data.DataLoader(train_imgs_tf, batch_size=config.iic_dataloader_bs, shuffle=shuffle, num_workers=0, drop_last=False)\r\n\r\n if not shuffle:\r\n assert (isinstance(train_tf_dataloader.sampler, torch.utils.data.sampler.SequentialSampler))\r\n assert (len(train_dataloader) == len(train_tf_dataloader))\r\n dataloaders.append(train_tf_dataloader)\r\n\r\n num_train_batches = len(dataloaders[0]) #Number of batches of first dataloader (and also others)\r\n return dataloaders", "def _initialize_dataloader(\n self, X_train: np.ndarray, y_train: np.ndarray, batch_size: int\n ):\n train_set = SimpleDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))\n self.train_loader = DataLoader(train_set, batch_size, shuffle=True)", "def create_loaders(self):\n self.spam_data.text_to_tensors()\n print('creating dataloaders')\n train_data = TensorDataset(self.spam_data.train_inputs, \n self.spam_data.train_masks, \n self.spam_data.train_labels)\n train_sampler = RandomSampler(train_data)\n self.train_dataloader = DataLoader(train_data, \n sampler=train_sampler, \n batch_size=self.batch_size)\n\n validation_data = TensorDataset(self.spam_data.validation_inputs, \n self.spam_data.validation_masks, \n self.spam_data.validation_labels)\n validation_sampler = SequentialSampler(validation_data)\n self.validation_dataloader = DataLoader(validation_data, \n sampler=validation_sampler, \n batch_size=self.batch_size)\n \n test_data = TensorDataset(self.spam_data.test_inputs, \n self.spam_data.test_masks, \n self.spam_data.test_labels)\n test_sampler = SequentialSampler(test_data)\n self.test_dataloader = DataLoader(test_data, \n sampler=test_sampler, \n batch_size=self.batch_size)\n print('finished creating dataloaders')", "def _init_labelled_data_generator(self):\n if self.conf.l_mix == 0:\n return\n\n log.info('Initialising labelled datagen. Loading %s data' % self.conf.dataset_name)\n self.data_labelled = \\\n self.loader.load_labelled_data(self.conf.split, 'training',\n modality=self.conf.modality,\n downsample=self.conf.image_downsample)\n # harric added modality and segmentation_option auguments\n self.data_labelled.sample_per_volume(-1,self.conf.pctg_per_volume, seed=self.conf.seed)\n self.data_labelled.sample_by_volume(int(self.conf.l_mix * self.data_labelled.num_volumes), seed=self.conf.seed)\n\n self.data_labelled.crop(self.conf.input_shape[:2]) # crop data to input shape: useful in transfer learning\n # self.conf.data_len = self.data.size()\n\n datagen_dict1 = self.get_datagen_params()\n datagen_dict2 = self.get_datagen_params()\n datagen_dict3 = self.get_datagen_params()\n img_gen = ImageDataGenerator(**datagen_dict1).flow(self.data_labelled.images, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n anato_msk_gen = ImageDataGenerator(**datagen_dict2).flow(self.data_labelled.anato_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n patho_msk_gen = ImageDataGenerator(**datagen_dict3).flow(self.data_labelled.patho_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n scn_gen = utils.data_utils.generator(self.conf.batch_size, self.conf.seed, 'no_overflow', self.data_labelled.scanner)\n return itertools.zip_longest(img_gen, anato_msk_gen, patho_msk_gen, scn_gen)", "def build_ss_train_loader(cfg, mapper):\n\n # Load labeled and unlabeled dataset dicts (either use two separate ones or perform a random split)\n labeled_dataset_dicts, unlabeled_dataset_dicts = build_ss_datasets(cfg)\n\n # Log the datasets sizes\n if comm.is_main_process():\n logger = setup_logger(name=__name__)\n logger.debug(\n \"Number of images in the labeled and unlabeled datasets: {}, {}\".format(\n len(labeled_dataset_dicts), len(unlabeled_dataset_dicts)\n )\n )\n\n # Print updated metadata counts\n print_instances_class_histogram(\n labeled_dataset_dicts, MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes\n )\n\n # Map metadata into actual objects (note: data augmentations also take place here)\n labeled_dataset = MapDataset(labeled_dataset_dicts, mapper)\n unlabeled_dataset = MapDataset(unlabeled_dataset_dicts, mapper)\n\n # Define data samplers\n assert cfg.DATALOADER.SAMPLER_TRAIN == \"TrainingSampler\", \"Unsupported training sampler: {}\".format(\n cfg.DATALOADER.SAMPLER_TRAIN\n )\n labeled_sampler = TrainingSampler(len(labeled_dataset))\n unlabeled_sampler = TrainingSampler(len(unlabeled_dataset))\n\n return (\n labeled_dataset_dicts,\n unlabeled_dataset_dicts,\n ), build_ss_batch_data_loader( # Initialize actual dataloaders\n (labeled_dataset, unlabeled_dataset),\n (labeled_sampler, unlabeled_sampler),\n cfg.SOLVER.IMS_PER_BATCH_LABELED,\n cfg.SOLVER.IMS_PER_BATCH_UNLABELED,\n aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,\n num_workers=cfg.DATALOADER.NUM_WORKERS,\n )", "def set_dataloader(args):\n if args.dataset == 'mnist':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=True, download=True, \n transform=transform)\n test_dataset = torchvision.datasets.MNIST(root=args.loading_path, train=False, download=True, \n transform=transform)\n elif args.dataset == 'spatial':\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n ])\n train_dataset = SpatialDataset(args.data_root, args.data_file_name)\n test_dataset = SpatialDataset(args.data_root, args.data_file_name)\n\n else:\n raise NotImplemented(\"dataset {} is not implemented.\".format(args.dataset))\n # train loader\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n # test loader\n test_dataloader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n\n return train_dataloader, test_dataloader", "def init_loader(dataset):\n if dataset == 'liverct':\n return LiverCtLoader()\n elif dataset == 'kits':\n return KitsLoader()\n elif dataset == 'toy':\n return ToyLoader()\n return None", "def _construct_loader(self):\n # Get list of paths\n os.makedirs(self.path_to_data_dir, exist_ok=True)\n path_to_file = os.path.join(\n self.path_to_data_dir, f\"{self.ds_name}_{self.mode}.txt\"\n )\n if not os.path.exists(path_to_file) and self.ds_name != 'audioset':\n files = list(sorted(glob.glob(os.path.join(self.data_prefix, '*', '*')))) \n with open(path_to_file, 'w') as f:\n for item in files:\n f.write(\"%s\\n\" % item)\n\n self._path_to_videos = []\n self._labels = []\n self._spatial_temporal_idx = []\n self._vid_indices = []\n with open(path_to_file, \"r\") as f:\n for clip_idx, path in enumerate(f.read().splitlines()):\n for idx in range(self._num_clips):\n self._path_to_videos.append(\n os.path.join(self.data_prefix, path)\n )\n if self.ds_name != 'audioset':\n class_name = path.split('/')[-2]\n label = self.class_to_idx[class_name]\n self._labels.append(int(label))\n self._spatial_temporal_idx.append(idx)\n self._vid_indices.append(clip_idx)\n self._video_meta[clip_idx * self._num_clips + idx] = {}\n assert (\n len(self._path_to_videos) > 0\n ), \"Failed to load {} split {} from {}\".format(\n self.ds_name, self._split_idx, path_to_file\n )\n print(\n \"Constructing {} dataloader (size: {}) from {}\".format(\n self.ds_name, len(self._path_to_videos), path_to_file\n )\n )\n\n # Create / Load valid indices (has audio)\n vid_valid_file = f'{self.path_to_data_dir}/{self.ds_name}_valid.pkl'\n if os.path.exists(vid_valid_file):\n with open(vid_valid_file, 'rb') as handle:\n self.valid_indices = pickle.load(handle)\n else:\n self.valid_indices = filter_videos(self._path_to_videos)\n with open(vid_valid_file, 'wb') as handle:\n pickle.dump(\n self.valid_indices, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL\n )\n if self.num_data_samples is not None:\n self.valid_indices = self.valid_indices[:self.num_data_samples]\n print(f\"Total number of videos: {len(self._path_to_videos)}, Valid videos: {len(self.valid_indices)}\", flush=True)\n\n # Make lists a Manager objects\n #self._path_to_videos = self.manager.list(self._path_to_videos)\n self.valid_indices = list(self.valid_indices)", "def __init__(self, dataloader: DataLoader, latent_dim: int = 2) -> None:\n self.dataloader = dataloader\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.encoder = Encoder(latent_dim).to(self.device)\n self.decoder = Decoder(latent_dim).to(self.device)\n self.criterion = nn.L1Loss()\n self.optim = torch.optim.Adam(\n (*self.encoder.parameters(), *self.decoder.parameters()),\n lr=config.LR,)", "def _init_unlabelled_data_generator(self):\n if self.conf.l_mix == 0:\n return\n\n log.info('Initialising labelled datagen. Loading %s data' % self.conf.dataset_name)\n self.data_unlabelled = \\\n self.loader.load_labelled_data(self.conf.split, 'training',\n modality=self.conf.modality,\n downsample=self.conf.image_downsample)\n\n self.data_unlabelled.sample_per_volume(-1, self.conf.pctg_per_volume, seed=self.conf.seed)\n\n self.data_unlabelled.crop(self.conf.input_shape[:2]) # crop data to input shape: useful in transfer learning\n self.conf.data_len = self.data_unlabelled.size()\n\n datagen_dict1 = self.get_datagen_params()\n datagen_dict2 = self.get_datagen_params()\n datagen_dict3 = self.get_datagen_params()\n img_gen = ImageDataGenerator(**datagen_dict1).flow(self.data_unlabelled.images, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n anato_msk_gen = ImageDataGenerator(**datagen_dict2).flow(self.data_unlabelled.anato_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n patho_msk_gen = ImageDataGenerator(**datagen_dict3).flow(self.data_unlabelled.patho_masks, batch_size=self.conf.batch_size,\n seed=self.conf.seed)\n scn_gen = utils.data_utils.generator(self.conf.batch_size, self.conf.seed, 'no_overflow', self.data_unlabelled.scanner)\n return itertools.zip_longest(img_gen, anato_msk_gen, patho_msk_gen, scn_gen)", "def read_preprocess(params):\n\n if params.dataset == 'mnist':\n pcha = 1\n plen = 28\n else:\n pcha = 3\n plen = 32\n\n ratioHyper = params.ratioHyper\n ratioValid = params.ratioValid\n preProcess = params.preProcess\n preContrast = params.preContrast\n sigmoid = lambda x: 1./(1.+ np.exp(-x))\n \n # read data\n t1Data, t1Label, vData, vLabel, testD, testL = read(params)\n\n # permuting data \n vData, vLabel = permute(vData, vLabel, params)\n t1Data, t1Label = permute(t1Data, t1Label, params)\n\n # form datasets T1 and T2 \n if params.meta_bw:\n nVSamples = vData.shape[0]\n # set up t2+validation\n if ratioHyper > 1.:\n tempIndex = int(round((ratioHyper - 1.)*nVSamples))\n tempData = t1Data[:tempIndex]\n tempLabel = t1Label[:tempIndex]\n vData = np.concatenate((vData, tempData))\n vLabel = np.concatenate((vLabel, tempLabel))\n t1Data = t1Data[tempIndex:]\n t1Label = t1Label[tempIndex:]\n elif ratioHyper < 1.:\n tempIndex = int(round((1.-ratioHyper)*nVSamples))\n tempData = vData[:tempIndex]\n tempLabel = vLabel[:tempIndex]\n t1Data = np.concatenate((t1Data, tempData))\n t1Label = np.concatenate((t1Label, tempLabel))\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n # shuffle indices in t2+validation\n nVSamples = vData.shape[0]\n # set up t2 and validation\n if params.ratioValid > 0:\n tempIndex = int(round(nVSamples*(1.-ratioValid)))\n t2Data = vData[:tempIndex]\n t2Label = vLabel[:tempIndex]\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n else: \n tempIndex = int(round(nVSamples*(1.-ratioValid)))\n t2Data = vData\n t2Label = vLabel\n vData = vData[tempIndex:]\n vLabel = vLabel[tempIndex:]\n\n else:\n t2Data = []\n t2Label = [] \n if not params.ratioValid > 0:\n t1Data = np.concatenate((vData, t1Data))\n t1Label = np.concatenate((vLabel, t1Label)) \n\n # global contrast normalization and ZCA \n if preProcess in ['global_contrast_norm', 'global_contrast_norm+zca', 'zca']:\n \n if not params.meta_bw:\n t2Data = t1Data[:5, :]\n #data = [t1Data, t2Data, testD, vData]\n if params.dataset == 'convnet':\n t1Data = t1Data.reshape(-1, pcha, plen, plen)\n t2Data = t2Data.reshape(-1, pcha, plen, plen)\n testD = testD.reshape(-1, pcha, pcha, plen)\n t1Data.astype(dtype=np.float64); t2Data.astype(dtype=np.float64); testD.astype(dtype=np.float64)\n \n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD) \n \n if preProcess in ['global_contrast_norm', 'global_contrast_norm+zca']:\n gcn = ContrastNorm()\n t1Data = gcn.apply(t1Data/np.float64(255.))\n t2Data = gcn.apply(t2Data/np.float64(255.))\n testD = gcn.apply(testD/np.float64(255.))\n\n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD) \n\n \n if preProcess in ['zca', 'global_contrast_norm+zca']: \n white = ZCA(3072, t1Data.copy())\n t1Data = white.apply(t1Data)\n t2Data = white.apply(t2Data)\n testD = white.apply(testD)\n \n #print np.max(t1Data), np.max(t2Data), np.max(testD), ' shapes:', t1Data.shape, t2Data.shape, testD.shape\n #print np.var(t1Data), np.var(t2Data), np.var(testD), \n \n # other kinds of preprocessing \n else: \n scaler = {\n 'm0': preprocessing.StandardScaler(with_std = False).fit(t1Data),\n 'm0s1': preprocessing.StandardScaler().fit(t1Data),\n 'minMax': preprocessing.MinMaxScaler().fit(t1Data),\n 'None': 1.\n }[preProcess] \n if preProcess != 'None':\n t1Data = scaler.transform(t1Data)\n if params.meta_bw: t2Data = scaler.transform(t2Data)\n vData = scaler.transform(vData)\n testD = scaler.transform(testD)\n\n # contrast \n contrastFun = {\n 'tanh': np.tanh,\n 'arcsinh': np.arcsinh,\n 'sig': sigmoid,\n 'None': 1.\n }[preContrast]\n if preContrast != 'None':\n t1Data = contrastFun(t1Data)\n if params.meta_bw: t2Data = contrastFun(t2Data)\n vData = contrastFun(vData)\n testD = contrastFun(testD)\n\n\n print '- size T1, valid, T2'\n print t1Data.shape, vData.shape\n if params.meta_bw: print t2Data.shape\n \n\n\n # reshape if convnet\n if params.model == 'convnet':\n if params.dataset in ['mnist', 'not_mnist']:\n t1Data = t1Data.reshape(-1, 1, 28, 28)\n vData = vData.reshape(-1, 1, 28, 28)\n testD = testD.reshape(-1, 1, 28, 28)\n if params.meta_bw: \n t2Data = t2Data.reshape(-1, 1, 28, 28) \n \n if params.dataset in ['cifar10', 'svhn']:\n t1Data = t1Data.reshape(-1, 3, 32, 32)\n vData = vData.reshape(-1, 3, 32, 32)\n testD = testD.reshape(-1, 3, 32, 32)\n if params.meta_bw: \n t2Data = t2Data.reshape(-1, 3, 32, 32)\n \n # final shape \n print 'Elementary Set data shape: ', t1Data.shape, t1Label.shape\n if np.sum(np.isinf(t1Data)) > 0 : print 'Nan in T1 data!!'\n if np.sum(np.isinf(t1Label)) > 0 : print 'Nan in T1 label!!'\n\n if params.meta_bw: \n print 'Hyper Set data shape: ', t2Data.shape, t2Label.shape\n if np.sum(np.isinf(t2Data)) > 0 : print 'Nan in T2 data!!'\n if np.sum(np.isinf(t2Label)) > 0 : print 'Nan in T2 label!!'\n \n# show_samples(t1Data[:100]/255., 50) \n \n return t1Data, t1Label, t2Data, t2Label, vData, vLabel, testD, testL", "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def build_imagenet_train_dataloader(cfg_dataset, data_type='train'):\n cfg_train = cfg_dataset['train']\n # build dataset\n if cfg_dataset['use_dali']:\n # NVIDIA dali preprocessing\n assert cfg_train['transforms']['type'] == 'STANDARD', 'only support standard augmentation'\n dataset = ImageNetDataset(\n root_dir=cfg_train['root_dir'],\n meta_file=cfg_train['meta_file'],\n read_from=cfg_dataset['read_from'],\n )\n else:\n image_reader = cfg_dataset[data_type].get('image_reader', {})\n # PyTorch data preprocessing\n if isinstance(cfg_train['transforms'], list):\n transformer = build_transformer(cfgs=cfg_train['transforms'],\n image_reader=image_reader)\n else:\n transformer = build_common_augmentation(cfg_train['transforms']['type'])\n dataset = ImageNetDataset(\n root_dir=cfg_train['root_dir'],\n meta_file=cfg_train['meta_file'],\n transform=transformer,\n read_from=cfg_dataset['read_from'],\n image_reader_type=image_reader.get('type', 'pil'),\n )\n # build sampler\n cfg_train['sampler']['kwargs'] = {}\n cfg_dataset['dataset'] = dataset\n sampler = build_sampler(cfg_train['sampler'], cfg_dataset)\n if cfg_dataset['last_iter'] >= cfg_dataset['max_iter']:\n return {'loader': None}\n # build dataloader\n if cfg_dataset['use_dali']:\n # NVIDIA dali pipeline\n pipeline = ImageNetTrainPipeV2(\n data_root=cfg_train['root_dir'],\n data_list=cfg_train['meta_file'],\n sampler=sampler,\n crop=cfg_dataset['input_size'],\n colorjitter=[0.2, 0.2, 0.2, 0.1]\n )\n loader = DaliDataloader(\n pipeline=pipeline,\n batch_size=cfg_dataset['batch_size'],\n epoch_size=len(sampler),\n num_threads=cfg_dataset['num_workers'],\n last_iter=cfg_dataset['last_iter']\n )\n else:\n # PyTorch dataloader\n loader = DataLoader(\n dataset=dataset,\n batch_size=cfg_dataset['batch_size'],\n shuffle=False,\n num_workers=cfg_dataset['num_workers'],\n pin_memory=True,\n sampler=sampler\n )\n return {'type': 'train', 'loader': loader}", "def get_datasets(config):\n mod = config[\"data_loader\"][\"modifier\"]\n head = config[\"data_loader\"][\"head\"]\n if config[\"model\"][\"type\"] == \"tw_joint\":\n label_1 = config[\"data_loader\"][\"label_1\"]\n label_2 = config[\"data_loader\"][\"label_2\"]\n dataset_train = JointRankingDataset(data_path=config[\"train_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n dataset_valid = JointRankingDataset(data_path=config[\"validation_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n dataset_test = JointRankingDataset(data_path=config[\"test_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label_1=label_1, label_2=label_2, mod=mod, head=head)\n elif \"classifier\" in config[\"model\"][\"type\"]:\n if config[\"feature_extractor\"][\"contextualized_embeddings\"] is True:\n bert_parameter = config[\"feature_extractor\"][\"contextualized\"][\"bert\"]\n bert_model = bert_parameter[\"model\"]\n max_len = bert_parameter[\"max_sent_len\"]\n lower_case = bert_parameter[\"lower_case\"]\n batch_size = bert_parameter[\"batch_size\"]\n label = config[\"data_loader\"][\"label\"]\n load_bert = config[\"data_loader\"][\"load_bert_embeddings\"]\n all_labels = extract_all_labels(training_data=config[\"train_data_path\"],\n validation_data=config[\"validation_data_path\"],\n test_data=config[\"test_data_path\"], separator=config[\"data_loader\"][\"sep\"],\n label=label\n )\n label_encoder = create_label_encoder(all_labels)\n print(\"labelsize %d\" % len(set(all_labels)))\n if \"semclass\" in config[\"model\"][\"type\"]:\n semclass = config[\"data_loader\"][\"semclass\"]\n dataset_train = ContextualizedSemPhraseDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0,\n top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n dataset_valid = ContextualizedSemPhraseDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0,\n top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n dataset_test = ContextualizedSemPhraseDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert, semclass=semclass)\n else:\n dataset_train = ContextualizedPhraseDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n dataset_valid = ContextualizedPhraseDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n dataset_test = ContextualizedPhraseDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size, label_encoder=label_encoder,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert)\n\n else:\n\n label = config[\"data_loader\"][\"label\"]\n all_labels = extract_all_labels(training_data=config[\"train_data_path\"],\n validation_data=config[\"validation_data_path\"],\n test_data=config[\"test_data_path\"], separator=config[\"data_loader\"][\"sep\"],\n label=config[\"data_loader\"][\"label\"]\n )\n print(\"all labels\")\n print(all_labels)\n label_encoder = create_label_encoder(all_labels)\n print(\"labelsize %d\" % len(set(all_labels)))\n if \"semclass\" in config[\"model\"][\"type\"]:\n semclass = config[\"data_loader\"][\"semclass\"]\n dataset_train = JointClassificationDataset(data_path=config[\"train_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n dataset_valid = JointClassificationDataset(data_path=config[\"validation_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n dataset_test = JointClassificationDataset(data_path=config[\"test_data_path\"],\n embedding_path=config[\"feature_extractor\"][\n \"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder,\n feature=semclass)\n else:\n\n dataset_train = ClassificationDataset(data_path=config[\"train_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n dataset_valid = ClassificationDataset(data_path=config[\"validation_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n dataset_test = ClassificationDataset(data_path=config[\"test_data_path\"],\n embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head, label_encoder=label_encoder)\n\n else:\n label = config[\"data_loader\"][\"label\"]\n if config[\"feature_extractor\"][\"contextualized_embeddings\"] is True:\n bert_parameter = config[\"feature_extractor\"][\"contextualized\"][\"bert\"]\n bert_model = bert_parameter[\"model\"]\n max_len = bert_parameter[\"max_sent_len\"]\n lower_case = bert_parameter[\"lower_case\"]\n batch_size = bert_parameter[\"batch_size\"]\n load_bert = config[\"data_loader\"][\"load_bert_embeddings\"]\n load_labels = config[\"data_loader\"][\"load_labels\"]\n label_definition_path = config[\"feature_extractor\"][\"definition\"]\n dataset_train = ContextualizedRankingDataset(data_path=config[\"train_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label_definition_path=label_definition_path,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n load_label_embeddings=load_labels)\n dataset_valid = ContextualizedRankingDataset(data_path=config[\"validation_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n label_definition_path=label_definition_path,\n load_label_embeddings=load_labels)\n dataset_test = ContextualizedRankingDataset(data_path=config[\"test_data_path\"],\n bert_model=bert_model, lower_case=lower_case,\n max_len=max_len, separator=config[\"data_loader\"][\"sep\"],\n batch_size=batch_size,\n label=label, mod=mod, head=head, low_layer=0, top_layer=4,\n load_bert_embeddings=load_bert,\n label_definition_path=label_definition_path,\n load_label_embeddings=load_labels)\n else:\n dataset_train = RankingDataset(data_path=config[\"train_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n dataset_valid = RankingDataset(data_path=config[\"validation_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n dataset_test = RankingDataset(data_path=config[\"test_data_path\"],\n general_embedding_path=config[\"feature_extractor\"][\"general_embeddings\"],\n label_embedding_path=config[\"feature_extractor\"][\"label_embeddings\"],\n separator=config[\"data_loader\"][\"sep\"],\n label=label, mod=mod, head=head)\n\n return dataset_train, dataset_valid, dataset_test", "def get_loader(args, batch_size, vocab, shuffle, num_workers, use_video=False):\n\n if use_video:\n tasty_videos = TastyVideoDataset()\n data_loader = torch.utils.data.DataLoader(dataset=tasty_videos,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n else:\n recipe1m = Recipe1MDataset(args, vocab)\n data_loader = torch.utils.data.DataLoader(dataset=recipe1m,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def setup(self):\n if self.train or not os.path.isfile(\"B_M_I_Y_C_agent_cpu.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n\n self.trainings_model = Q_Net()\n self.trainings_model.cuda(0)\n self.trainings_model.train()\n self.trainings_model.double()\n\n self.optimizer = torch.optim.Adam(params=self.trainings_model.parameters(), lr=0.001)\n self.criterion = nn.SmoothL1Loss()\n self.criterion.cuda(0)\n\n self.target_model = Q_Net()\n self.target_model.cuda(0)\n self.target_model.double()\n\n self.actions = constants.ACTIONS\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"B_M_I_Y_C_agent_cpu.pt\", \"rb\") as file:\n self.trainings_model = pickle.load(file)\n self.trainings_model.eval()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
train_tal1_lmo2(model, cfg) > No return object Train model on 5C data from TAL1 and LMO2 regions.
def train_tal1_lmo2(self, model): "summary writer" timestr = time.strftime("%Y%m%d-%H%M%S") writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr) "initialize optimizer and prepare dataloader" self.cfg.get_tal1_only = False self.cfg.get_lmo2_only = False optimizer, criterion = model.compile_optimizer() data_loader = self.prepare_tal1_lmo2() "train and save the model" model.train_model(data_loader, criterion, optimizer, writer) torch.save(model.state_dict(), cfg.model_dir + cfg.model_name + '.pth')
[ "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def prepare_tal1_lmo2(self):\n\n \"load Hi-C like data\"\n tal_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n\n \"preprocess\"\n tal_df = tal_df.drop(['Unnamed: 0'], axis=1)\n lmo2_df = lmo2_df.drop(['Unnamed: 0'], axis=1)\n tal_df[['i', 'j']] = tal_df[['i', 'j']].astype('int64')\n lmo2_df[['i', 'j']] = lmo2_df[['i', 'j']].astype('int64')\n\n \"prepare indices and values for TAL1 in chromosome 1\"\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n input_idx_tal1, values_tal1 = get_samples_sparse(tal_df, 1, cfg)\n values_tal1 = F.pad(input=values_tal1, pad=(0, 4, 0, 0), mode='constant', value=0)\n input_idx_tal1 = F.pad(input=input_idx_tal1, pad=(0, 0, 0, 4, 0, 0), mode='constant', value=0)\n values = torch.cat((values, values_tal1.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_tal1), 0)\n\n if self.cfg.get_tal1_only:\n \"create tal dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n return data_loader\n\n if self.cfg.get_lmo2_only:\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n\n \"prepare indices and values for LMO2 in chromosome 11\"\n input_idx_lmo2, values_lmo2 = get_samples_sparse(lmo2_df, 11, cfg)\n values = torch.cat((values, values_lmo2.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_lmo2), 0)\n\n \"create dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n\n return data_loader", "def perform_tal1_ko(self, model):\n\n \"save representations\"\n self.chr = 1\n self.cfg.get_tal1_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"tal1\"\n _, ko_pred_df, _ = self.perform_ko(model)\n return ko_pred_df", "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_df", "def lacer(df, df1, train_start_date, train_end_date, test_start_date, test_end_date, request_type, CD, predictor_num): #Once model is ready, replace df with csv\n\n #Create Training and Testing Sets\n dftrain = preprocessing(df , train_start_date, train_end_date)\n dftrain = dftrain.reset_index(drop = True)\n dftest = preprocessing(df1, test_start_date, test_end_date)\n dftest = dftest.reset_index(drop = True)\n\n #Reserve test set for training on all 3 models. \n y_train, y_test = lc.CreateTestSet(dftest, predictor_num)\n y_test = y_test.reshape((-1, 1))\n\n\n## 2 Models\n #Model1: CD\n modelCD = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainCD = dftrain[dftrain['CD'] == CD].reset_index(drop = True)\n\n X_trainCD, X_testCD = lc.CreateTrainSet(dftrainCD, predictor_num)\n X_testCD = X_testCD.reshape((-1, 1))\n modelCD.fit(X_trainCD, X_testCD)\n\n y_predCD = modelCD.predict(y_train)\n\n #Model2: Request_type\n modelRT = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainRT = dftrain[dftrain['RequestType'] == request_type].reset_index(drop = True)\n\n X_trainRT, X_testRT = lc.CreateTrainSet(dftrainRT, predictor_num)\n X_testRT = X_testRT.reshape((-1, 1))\n\n modelRT.fit(X_trainRT, X_testRT)\n\n y_predRT = modelRT.predict(y_train)\n\n\n #Average out all predictions\n y_predFinal = (y_predCD + y_predRT )/2\n\n #Return metrics \n return lc.metrics(y_predFinal, y_test)", "def perform_ko(self, model):\n\n cfg = self.cfg\n\n \"load data\"\n if cfg.run_tal and cfg.hnisz_region == \"tal1\":\n self.cfg.get_tal1_only = True\n data_loader = self.prepare_tal1_lmo2()\n elif cfg.run_tal and cfg.hnisz_region == \"lmo2\":\n self.cfg.get_lmo2_only = True\n data_loader = self.prepare_tal1_lmo2()\n else:\n data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False)\n\n \"get zero embed\"\n cfg.full_test = False\n cfg.compute_pca = False\n cfg.get_zero_pred = True\n zero_embed = test_model(model, cfg, self.chr)\n\n \"get knockout indices depending on experiment\"\n if cfg.run_tal:\n if cfg.hnisz_region == \"tal1\":\n cfg.ko_experiment = \"ctcf\"\n indices = cfg.tal1ko_indices\n elif cfg.hnisz_region == \"lmo2\":\n cfg.ko_experiment = \"ctcf\"\n indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11)\n else:\n if cfg.ko_experiment == \"ctcf\":\n if cfg.ctcf_indices == \"all\":\n indices = ko_ob.get_ctcf_indices()\n indices = sample(list(indices), 10)\n else:\n indices = ko_ob.cfg.ctcf_indices_22\n elif cfg.ko_experiment == \"foxg1\":\n indices = cfg.foxg1_indices\n elif cfg.ko_experiment == \"tadbs\":\n indices = ko_ob.get_tadbs()\n\n \"plotting and metrics\"\n n_indices = len(indices)\n diff_list = np.zeros((n_indices, 11))\n diff_mat = np.zeros((n_indices, 200, 200))\n \"run for all indices\"\n for i, indice in enumerate(indices):\n \"get representations\"\n representations, start, stop, pred_data = self.get_trained_representations(method=\"hiclstm\")\n\n \"alter representations\"\n representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed,\n mode=cfg.ko_mode)\n\n if self.cfg.load_ko:\n ko_pred_df = pd.read_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n else:\n \"run through model using altered representations, save ko predictions\"\n _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode=\"ko\")\n if self.cfg.save_kopred:\n ko_pred_df.to_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n\n \"compute difference between WT and KO predictions\"\n if self.cfg.compute_avg_diff:\n ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice)\n diff_list[i] = ko_diffs\n\n \"get merged heatmap\"\n pred_data = pd.merge(pred_data, ko_pred_df, on=[\"i\", \"j\"])\n pred_data = pred_data.rename(columns={\"ko_pred\": \"v\"})\n hic_mat, st = get_heatmaps(pred_data, no_pred=False)\n # simple_plot(hic_mat, mode=\"reds\")\n\n \"get diff mat\"\n hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment)\n n_win = len(hic_win)\n diff_mat[i, :n_win, :n_win] = hic_win\n\n diff_mat = diff_mat.mean(axis=0)\n ko = np.triu(diff_mat)\n pred = np.tril(diff_mat).T\n diff_mat = ko - pred\n simple_plot(diff_mat, mode=\"diff\")\n np.save(cfg.output_directory + \"tad_diff_zero_ctctn.npy\", diff_mat)\n mean_diff = np.mean(diff_list, axis=1)\n return mean_diff, ko_pred_df, pred_data", "def train_regression_model_for_bmi(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, x1, y1, y1label, feature_headers, mrns, agex_low, agex_high, months_from, months_to, modelType='lasso', percentile=False, filterSTR=['Gender:1'], filterSTRThresh=[0.5], variablesubset=['Vital'],variable_exclude=['Trend'], num_clusters=16, num_iters=100, dist_type='euclidean', corr_vars_exclude=['Vital'], return_data_for_error_analysis=False, return_data=False, return_data_transformed=False, do_impute=True, mrnForFilter=[], add_time=False, bin_ix=[], do_normalize=True, binarize_diagnosis=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False])): #filterSTR='Gender:0 male'\n\n if any([len(x)==0 for x in (x1,y1,y1label,feature_headers,mrns)]):\n print('At least one required data not provided out of x1, y1, y1label, feature_headers, or mrns.')\n print('Creating data from data dictionaries')\n x1, y1, y1label, feature_headers, mrns = build_features.call_build_function(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, agex_low, agex_high, months_from, months_to, percentile, mrnsForFilter=mrnForFilter)\n original_data = (x1, y1, y1label, feature_headers, mrns)\n else:\n print('Using prepared raw data')\n\n if binarize_diagnosis:\n bin_ix = np.array([(h.startswith('Diagnosis:') or h.startswith('Maternal Diagnosis:') or h.startswith('Newborn Diagnosis:')) for h in feature_headers])\n print(bin_ix.sum(), 'features are binary')\n x1[:,bin_ix] = (x1[:,bin_ix] > 0) * 1.0\n\n ix, x2, y2, y2label, mrns = filter_training_set_forLinear(x1, y1, y1label, feature_headers, filterSTR, percentile, mrns, filterSTRThresh)\n\n if do_impute or do_normalize or add_time:\n x2, mux, stdx, bin_ix, unobserved = normalize(x2, bin_ix=bin_ix)\n\n if do_impute:\n x2 = autoencoder_impute(x2, bin_ix)\n\n if add_time:\n x2, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = add_temporal_features(x2, feature_headers, num_clusters, num_iters, y2, y2label, dist_type, True, mux, stdx, do_impute, subset)\n else:\n centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = ['NaN']*7\n\n corr_headers = np.array(feature_headers)\n corr_matrix = np.corrcoef(x2.transpose())\n corr_headers_filtered, corr_matrix_filtered, ix_corr_headers = filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude)\n print('corr matrix is filtered to size', corr_matrix_filtered.shape)\n\n if len(variablesubset) != 0:\n x2, feature_headers = variable_subset(x2, variablesubset, feature_headers)\n\n print ('output is: average:{0:4.3f}'.format(y2.mean()), ' min:', y2.min(), ' max:', y2.max())\n print ('normalizing output.'); y2 = (y2-y2.mean())/y2.std()\n\n print ('Predicting BMI at age:'+str(agex_low)+ ' to '+str(agex_high)+ 'years, from data in ages:'+ str(months_from)+'-'+str(months_to) + ' months')\n if filterSTR != '':\n print ('filtering patients with: ' , filterSTR)\n\n print ('total size',ix.sum())\n if (ix.sum() < 50):\n print('Not enough subjects. Next.')\n return (filterSTR, [])\n\n if modelType == 'lasso' or modelType == 'randomforest' or modelType == 'gradientboost' or modelType == 'lars':\n iters = 10\n model_weights_array = np.zeros((iters, x2.shape[1]), dtype=float)\n auc_test_list=np.zeros((iters), dtype=float); r2testlist = np.zeros((iters), dtype=float);\n for iteration in range(0, iters):\n randix = list(range(0, x2.shape[0]))\n random.shuffle(randix)\n randix = randix[0:int(len(randix)*0.9)]\n datax = x2[randix,:]; datay=y2[randix]; dataylabel = y2label[randix]; mrnx = mrns[randix]\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(datax, datay, dataylabel, percentile, modelType, feature_headers, mrnx)\n model_weights_array[iteration, :] = model.coef_ if ((modelType == 'lasso') or (modelType == 'lars')) else model.feature_importances_\n auc_test_list[iteration] = auc_test; r2testlist[iteration] = r2test\n\n model_weights = model_weights_array.mean(axis=0)\n model_weights_std = model_weights_array.std(axis=0)\n model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std\n test_auc_mean = auc_test_list.mean()\n test_auc_mean_ste = (1.96/np.sqrt(iters)) * auc_test_list.std()\n r2test_mean = r2testlist.mean()\n r2test_ste = (1.96/np.sqrt(iters)) * r2testlist.std()\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n else:\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(x2, y2, y2label, percentile, modelType, feature_headers, mrnx)\n model_weights_conf_term = np.zeros((x2.shape[1]), dtype=float)\n test_auc_mean = auc_test; r2test_mean= r2test;\n test_auc_mean_ste = 0; r2test_ste=0\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n\n if modelType == 'mlp':\n print ('you need to implement gradient to get top weights. ')\n return (filterSTR, [])\n\n sorted_ix = np.argsort(-1* abs(model_weights))\n weights = model_weights[sorted_ix]\n terms_sorted = model_weights_conf_term[sorted_ix]\n\n factors = np.array(feature_headers)[sorted_ix]\n x2_reordered = x2[:,sorted_ix]\n xtest_reordered = xtest[:, sorted_ix]\n\n ytestpred = model.predict(xtest)\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytestpred)\n operating_Thresholds = []\n operating_levels = [0, 0.0001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n ix_level = 0\n\n for ix, thr in enumerate(thresholds):\n if fpr[ix] >= operating_levels[ix_level]:\n operating_Thresholds.append(thr)\n ix_level += 1\n if ix_level == len(operating_levels):\n break\n\n operating_Thresholds = thresholds\n report_metrics = 'Test set metrics:\\n'\n prec_list = []\n recall_list = []\n spec_list = []\n for t in operating_Thresholds:\n tp = ((ytestlabel > 0) & (ytestpred.ravel() > t)).sum()*1.0\n tn = ((ytestlabel == 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fn = ((ytestlabel > 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fp = ((ytestlabel == 0) & (ytestpred.ravel() > t)).sum()*1.0\n\n sens = tp / (tp + fn)\n spec = tn / (tn + fp)\n ppv = tp / (tp + fp)\n acc = (tp + tn) / (tp + tn + fp + fn)\n f1 = 2*tp / (2*tp + fp + fn)\n\n report_metrics += '@threshold:{0:4.3f}, sens:{1:4.3f}, spec:{2:4.3f}, ppv:{3:4.3f}, acc:{4:4.3f}, f1:{5:4.3f} total+:{6:4.3f}\\n'.format(t, sens, spec, ppv, acc, f1, tp+fp)\n prec_list.append(ppv)\n recall_list.append(sens)\n spec_list.append(spec)\n\n print('total variables', x2.sum(axis=0).shape, ' and total subjects:', x2.shape[0])\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n print(report_metrics)\n\n occurances = (x2 != 0).sum(axis=0)[sorted_ix]\n zip_weights = {}\n sig_headers = []\n feature_categories = {}\n for i in range(0, (abs(model_weights)>0).sum()):\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, xtest_reordered[:,i].ravel())\n feature_auc_indiv = metrics.auc(fpr, tpr)\n corrs = corr_matrix_filtered[sorted_ix[i],:].ravel()\n top_corr_ix = np.argsort(-1*abs(corrs))\n corr_string = 'Correlated most with:\\n'+' '.join( [str(corr_headers_filtered[top_corr_ix[j]])+ ':' + \"{0:4.3f}\\n\".format(corrs[top_corr_ix[j]]) for j in range(0,10)] )\n\n tp = ((y2label > 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n tn = ((y2label == 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fn = ((y2label > 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fp = ((y2label == 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n\n if fp*fn*tp*tn == 0:\n oratio = np.nan\n low_OR = np.nan\n high_OR = np.nan\n else:\n oratio = tp*tn/(fp*fn)\n se = np.sqrt(1/tp + 1/fp + 1/tn + 1/fn)\n low_OR = np.exp(np.log(oratio) - 1.96 * se)\n high_OR = np.exp(np.log(oratio) + 1.96 * se)\n try:\n feature_categories[factors[i].split(':')[0]] += weights[i]\n except:\n feature_categories[factors[i].split(':')[0]] = weights[i]\n\n star = ' '\n if (low_OR > 1 or high_OR < 1): #or (weights[i]+terms_sorted[i]) < 0 or (weights[i]-terms_sorted[i]) > 0\n sig_headers.append(factors[i])\n star = '*'\n print(\"{8} {3} | coef {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}] | OR_adj {9:4.3f} [{10:4.3f} {11:4.3f}] | occ: {4} | OR_unadj: {5:4.3f} [{6:4.3f} {7:4.3f}] | indivs AUC:{12:4.3f}\".format(weights[i], weights[i]-terms_sorted[i], weights[i]+terms_sorted[i], factors[i], occurances[i], oratio, low_OR, high_OR, star, np.exp(weights[i]), np.exp(weights[i]-terms_sorted[i]), np.exp(weights[i]+terms_sorted[i]), feature_auc_indiv))\n print(corr_string)\n\n for k in feature_categories:\n print (k, \":\", feature_categories[k])\n\n if return_data and return_data_transformed:\n return (x2, y1, y1label, feature_headers, mrns, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n elif return_data and not return_data_transformed:\n return (original_data[0], original_data[1], original_data[2], original_data[3], original_data[4], filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n else:\n return (feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)", "def train_lr_l2_model(args, data):\n acc_list = []\n data = np.array(data)\n data = data[data[:, 0].argsort()]\n x_data = data[:, 1:-1]\n y_data = data[:, -1]\n for random_num in range(0, 10):\n X_train, X_test, y_train, y_test = train_test_split(\n x_data,\n y_data,\n test_size=0.2,\n random_state=random_num + random_seed)\n\n # use the one vs rest to train the lr model with l2 \n pred_test = []\n for i in range(0, args.num_class):\n y_train_relabel = np.where(y_train == i, 1, 0)\n y_test_relabel = np.where(y_test == i, 1, 0)\n lr = LogisticRegression(C=10.0, random_state=0, max_iter=100)\n lr.fit(X_train, y_train_relabel)\n pred = lr.predict_proba(X_test)\n pred_test.append(pred[:, -1].tolist())\n pred_test = np.array(pred_test)\n pred_test = np.transpose(pred_test)\n c_index = np.argmax(pred_test, axis=1)\n acc = accuracy_score(y_test.flatten(), c_index)\n acc_list.append(acc)\n print(\"pass:{}-acc:{}\".format(random_num, acc))\n print(\"the avg acc is {}\".format(np.mean(acc_list)))", "def train_model_track1(train_pool, validation_pool, validation, test_private, features, data_path):\n\n cat = CatBoostClassifier(iterations=3000,\n loss_function='Logloss',\n l2_leaf_reg=2,\n random_seed=100,\n scale_pos_weight=11.92984045,\n eval_metric='AUC',\n use_best_model=True,\n early_stopping_rounds=100,\n max_depth=7,\n max_bin=100\n )\n\n cat.fit(train_pool, eval_set=validation_pool)\n valid_pred_prob = cat.predict_proba(validation.loc[:, features].values)[:, 1]\n valid_score_90 = scoring.rejection90(validation.label.values, valid_pred_prob,\n sample_weight=validation.weight.values)\n # 0.771923225\n print(f\"Score at rejection 90 {valid_score_90}\")\n predictions = cat.predict_proba(test_private.loc[:, features].values)[:, 1]\n prediction_file = os.path.join(data_path, \"test_private.csv\")\n print(f\"Track 1 prediction on private test data is present at {prediction_file}\")\n pd.DataFrame(data={\"prediction\": predictions}, index=test_private.index).to_csv(prediction_file,\n index_label=utils.ID_COLUMN)\n model_file = os.path.join(data_path, 'track_1_best_mode.cbm')\n print(f\"Track 1 best model is saved at {model_file}\")\n cat.save_model(model_file, format='cbm')", "def train(self):\n TM = TrainingMode()\n\n \"\"\"\n Training Arguments\n \"\"\"\n train_args = {'use_global_valid': False,\n 'use_custom_obj': False,\n 'show_importance': False,\n 'save_final_pred': True,\n 'save_final_pred_train': False,\n 'save_cv_pred': True,\n 'save_cv_pred_train': False,\n 'save_csv_log': True,\n 'loss_fuc': self.rmse,\n 'append_info': 'Yuanan Bike'}\n\n \"\"\"\n Cross Validation Arguments\n \"\"\"\n cv_args = {'n_cv': 10}\n\n \"\"\"\n Base Parameters\n \"\"\"\n base_parameters = self.get_base_params('dnn')\n\n \"\"\"\n Auto Train with Logs of Boost Round\n \"\"\"\n pg_list = [\n [['learning_rate', [0.05]]]\n ]\n train_seed_list = [68]\n cv_seed_list = [95]\n TM.auto_train_boost_round('dnn', num_boost_round=10, n_epoch=1, full_grid_search=True,\n train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n base_parameters=base_parameters, parameter_grid_list=pg_list,\n save_final_pred=True, train_args=train_args, cv_args=cv_args)\n\n \"\"\"Train Different Rounds\"\"\"\n # num_boost_round_list = [83, 85, 87]\n # self.train_diff_round('xgb', TM, num_boost_round_list=num_boost_round_list, n_epoch=1, full_grid_search=True,\n # train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n # base_parameters=base_parameters, parameter_grid_list=pg_list, save_final_pred=True,\n # train_args=train_args, cv_args=cv_args)", "def Model2():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n \n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*0.5\n \n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n sn.thresholds = tan.GetActivationFunction()(np.zeros(sn.inputs.shape)-.5)\n sp.thresholds = tan.GetActivationFunction()(np.zeros(sp.inputs.shape)-.5)\n c2tan.weights = np.random.random(c2tan.weights.shape)/-100.0\n c2tan.learningEnabled = True\n\n c2tan.learningFunction = TAN_LearningRule\n \n return M2", "def train_model_track2(train_pool, validation_pool, validation, features, data_path):\n cat = CatBoostClassifier(iterations=1200,\n loss_function='Logloss',\n l2_leaf_reg=2,\n random_seed=100,\n scale_pos_weight=11.92984045,\n eval_metric='AUC',\n use_best_model=True,\n early_stopping_rounds=100,\n max_depth=7,\n max_bin=100)\n\n cat.fit(train_pool, eval_set=validation_pool)\n\n valid_pred_prob = cat.predict_proba(validation.loc[:, features].values)[:, 1]\n valid_score_90 = scoring.rejection90(validation.label.values, valid_pred_prob,\n sample_weight=validation.weight.values)\n print(f\"Score at rejection 90 for validation {valid_score_90}\")\n model_file = os.path.join(data_path, 'track_2_best_model.cbm')\n print(f\"Track 2 best model is being saved at {model_file}\")\n cat.save_model(model_file, format='cbm')", "def run_train_model(self,model,X_train,y_train,X_test,y_test,model_path,logs_path,plots_path,activate_tensorboard=0,run_id=0,tl_type='full_fine_tune'):\t\t\t\n\t\timport tensorflow as tf\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom tensorflow.keras.models import load_model\n\t\tfrom tensorflow.keras.callbacks import ModelCheckpoint\n\t\tfrom tensorflow.keras.callbacks import TensorBoard\n\n\t\tmodel_file_path=model_path+'/trained_model_'+str(run_id)+'.h5'\n\t\t\n\t\t#X_train, X_test, y_train, y_test = train_test_split(X_in, Y_out, test_size = self.split_ratio)\n\t\tprint(\"Data Split Completed\")\n\t\t\n\t\t#Checkpointer to save the best model\n\t\tcheckpointer = tf.keras.callbacks.ModelCheckpoint(model_file_path, verbose=1, save_best_only=True,monitor='val_loss',save_weights_only=True)\n\t\t\n\t\tcallbacks=[checkpointer]\n\t\t\n\t\tif(activate_tensorboard==1):\n\t\t\t#Activating Tensorboard for Visualization\n\t\t\ttensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\t\t\tcallbacks=[checkpointer,tensorboard]\n\t\t\n\t\t#tensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\n\t\thistory=model.fit(x=X_train, y=y_train, validation_data=(X_test,y_test), epochs=self.epochs, batch_size=self.batch_size,callbacks=callbacks)\n\t\t\n\t\ttrainviz=TrainViz()\n\t\t#trainviz.training_plot(history,plots_path,run_id)\n\t\t\n\t\tif(tl_type=='variable_lr'):\n\t\t\tinference_model=load_model(model_file_path, custom_objects={'LRMultiplier': LRMultiplier})\n\t\telse:\n\t\t\tmodel.load_weights(model_file_path)\n\t\t\n\t\tprint('Compiling test metrics...')\n\t\ty_pred=model.predict(X_test)\n\n\t\tmetrics_eval=MetricsEval()\n\t\teval_metrics_reg,accuracy_metrics_df_reg=metrics_eval.metrics_eval_base(y_pred[0],y_test[0],logs_path)\n\t\teval_metrics_cla,accuracy_metrics_df_cla=metrics_eval.metrics_eval_classification(y_pred[1],y_test[1],logs_path)\n\n\t\treturn model,eval_metrics_reg,accuracy_metrics_df_reg,eval_metrics_cla,accuracy_metrics_df_cla", "def train_lenet5(d, model, learning_rate=1e-3, epochs=50, batch=32, print_cost = True):\n path = os.getcwd()\n # Loading the dataset\n X_train, Y_train_orig = load_dataset(d, img_size=32, mode=\"train-test\")\n n_values = len(set(Y_train_orig))\n os.chdir(path)\n\n # Convert the labels to one hot matrix\n Y_train = np.eye(n_values)[Y_train_orig[:]]\n\n print(\"Number of training examples = \" + str(X_train.shape[0]))\n print(\"Total number of classes: \", n_values)\n print(\"X_train shape: \" + str(X_train.shape))\n print(\"Y_train shape: \" + str(Y_train.shape))\n\n (m, n_H0, n_W0, n_C0) = X_train.shape\n n_y = n_values\n\n # Create placeholders\n X = tf.placeholder(tf.float32, shape = (None, n_H0, n_W0, n_C0), name = \"X\")\n Y = tf.placeholder(tf.float32, shape = (None, n_y), name = \"Y\")\n batch_size = tf.placeholder(tf.int32)\n\n features, labels = (X, Y)\n\n dataset = tf.data.Dataset.from_tensor_slices((features,labels)).shuffle(buffer_size=1000).repeat().batch(batch)\n # dataset = tf.data.Dataset.from_tensor_slices((features,labels)).repeat().batch(BATCH_SIZE)\n\n # Creates and interator for the traning data\n iter = dataset.make_initializable_iterator()\n x, y = iter.get_next()\n\n # Create model folder\n if not os.path.exists(path+\"/models\"):\n os.makedirs(\"models\")\n\n # Model architecture\n if model == \"model1\":\n logits = lenet5(x, n_y)\n if not os.path.exists(path+\"/models/model1\"):\n os.makedirs(\"models/model1/saved\")\n save_dir = os.path.join(path, \"models\", \"model1\",\"saved\")\n save_path = os.path.join(save_dir, 'lenet5')\n elif model == \"model2\":\n logits = lenet5_improved(x, n_y)\n if not os.path.exists(path+\"/models/model2\"):\n os.makedirs(\"models/model2/saved\")\n save_dir = os.path.join(path, \"models\", \"model2\",\"saved\")\n save_path = os.path.join(save_dir, 'lenet5_improved')\n else:\n print(\"Model {} is not a valid model\".format(m))\n\n # Compute the cost\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = logits, labels = y))\n\n # Create the optimizer\n optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost)\n\n # Initialize variables\n init = tf.global_variables_initializer()\n\n costs = []\n acc = []\n num_minibatches = int(m / batch)+1\n\n print(\"#============ Training model ============#\")\n print(\"The total number of minibatches is: \", num_minibatches)\n\n saver = tf.train.Saver()\n\n # Train the neural netowrk\n with tf.Session() as sess:\n sess.run(init)\n sess.run(iter.initializer, feed_dict={ X: X_train, Y: Y_train, batch_size: batch})\n\n for epoch in range(epochs):\n epoch_cost = 0.\n epoch_acc = 0.\n print(\"Epoch:{}/{}\".format(epoch, epochs))\n\n for minibatch in tqdm(range(num_minibatches)):\n _, minibatch_cost = sess.run([optimizer, cost])\n epoch_cost += minibatch_cost / num_minibatches\n\n # Calculate the correct predictions\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n # Calculate accuracy on the test set\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n epoch_acc = sess.run(accuracy)\n epoch_acc += epoch_acc / num_minibatches\n\n if print_cost == True and epoch % 1 == 0:\n print (\"Loss:{}, Accuracy:{}\".format(epoch_cost, epoch_acc))\n costs.append(epoch_cost)\n acc.append(epoch_acc)\n\n if print_cost == True and epoch % 10 == 0:\n saver.save(sess=sess, save_path=save_path) # Save the model\n\n saver.save(sess=sess, save_path=save_path)\n print (\"\\nModel has been trained and saved in: \", save_path)\n\n # if print_cost:\n # # Plot the loss\n # plt.plot(np.squeeze(costs))\n # plt.ylabel('Loss')\n # plt.xlabel('Iterations')\n # plt.title(\"Learning rate =\" + str(learning_rate))\n # plt.show()\n\n return n_values", "def train_model(self):\n retrieved_planner_type, retrieved_path, final_planner_type, final_path, num_paths, num_NN_paths = self.retrieved_and_final_path\n # record stats\n self.total_num_paths.append(num_paths)\n self.total_num_paths_NN.append(num_NN_paths)\n self.total_new_nodes.append(self.total_new_node)\n self.total_new_nodes_NN.append(self.total_new_node_NN)\n self.plan_times.append(self.plan_time)\n if retrieved_planner_type is None:\n self.plan_mode.append(0) # 0 for pfs, 1 for rr\n else:\n self.plan_mode.append(1)\n # depending on retrieved_planner_type and final_planner, train the network\n if (retrieved_planner_type is None and final_planner_type == PlannerType.NEURAL) \\\n or (retrieved_planner_type == PlannerType.NEURAL and final_planner_type == PlannerType.NEURAL):\n self.train_sample.append(0) # no path trained\n to_save = {}\n to_save['loss'] = self.losses\n to_save['total_num_paths'] = self.total_num_paths\n to_save['total_num_paths_NN'] = self.total_num_paths_NN\n to_save['plan_time'] = self.plan_times\n to_save['plan_mode'] = self.plan_mode\n to_save['total_new_node'] = self.total_new_nodes\n to_save['total_new_node_NN'] = self.total_new_nodes_NN\n to_save['train_sample'] = self.train_sample\n utility.save_info(to_save, self.model_path+'lightning_res.pkl')\n return\n rospy.loginfo('Lightning: Training Neural Network...')\n # receive obstacle information\n obs = rospy.wait_for_message('obstacles/obs', Float64Array)\n obs = obs.values\n obs_i = rospy.wait_for_message('obstacles/obs_i', Int32)\n obs_i = obs_i.data\n # if it is a new obs, add to the obs list\n if len(self.obs_i) == 0 or obs_i != self.obs_i[-1]:\n self.obs_i.append(obs_i)\n self.obs.append(obs)\n\n obs = torch.FloatTensor(obs)\n\n dataset, targets, env_indices = plan_general.transformToTrain(final_path, len(final_path), obs, obs_i)\n self.data_all += list(zip(dataset, targets, env_indices))\n self.num_trained_samples += len(targets)\n added_data = list(zip(dataset,targets,env_indices))\n bi = np.concatenate( (obs.numpy().reshape(1,-1).repeat(len(dataset),axis=0), dataset), axis=1).astype(np.float32)\n targets = np.array(targets)\n bi = self.normalize_func(bi)\n targets = self.normalize_func(targets)\n bi = torch.FloatTensor(bi)\n bt = torch.FloatTensor(targets)\n self.model.zero_grad()\n bi=utility.to_var(bi, self.device)\n bt=utility.to_var(bt, self.device)\n self.model.observe(bi, 0, bt)\n self.num_path_trained += 1\n # record the number of samples trained\n train_sample = len(dataset)\n self.train_sample.append(train_sample)\n # rehersal\n if self.num_path_trained % self.freq_rehersal == 0 and len(self.data_all) > self.batch_rehersal:\n rospy.loginfo('Lightning: Rehersal...')\n print('rehersal...')\n sample = random.sample(self.data_all, self.batch_rehersal)\n dataset, targets, env_indices = list(zip(*sample))\n dataset, targets, env_indices = list(dataset), list(targets), list(env_indices)\n obs = np.array(self.obs)\n bi = np.concatenate( (obs[env_indices], dataset), axis=1).astype(np.float32)\n bt = targets\n bi = torch.FloatTensor(bi)\n bt = torch.FloatTensor(bt)\n bi, bt = self.normalize_func(bi), self.normalize_func(bt)\n self.model.zero_grad()\n bi=utility.to_var(bi, self.device)\n bt=utility.to_var(bt, self.device)\n self.model.observe(bi, 0, bt, False) # train but don't remember\n # obtain the loss after training:\n loss = self.model.loss(self.model.forward(bi), bt)\n loss = loss.data.cpu()\n print(\"loss: %f\" % (loss))\n print('planner type: %d' % (final_planner_type))\n self.losses.append(loss.data.cpu().item())\n\n if self.num_path_trained % self.freq_save == 0:\n # save loss and planner type\n to_save = {}\n to_save['loss'] = self.losses\n to_save['total_num_paths'] = self.total_num_paths\n to_save['total_num_paths_NN'] = self.total_num_paths_NN\n to_save['plan_time'] = self.plan_times\n to_save['plan_mode'] = self.plan_mode\n to_save['total_new_node'] = self.total_new_nodes\n to_save['total_new_node_NN'] = self.total_new_nodes_NN\n to_save['train_sample'] = self.train_sample\n utility.save_info(to_save, self.model_path+'lightning_res.pkl')\n\n # write trained model to file\n utility.save_state(self.model, self.torch_seed, self.np_seed, self.py_seed, self.model_path+self.model_name)\n # notify planners to update the model\n msg = UInt8(0)\n rospy.loginfo('Lightning: Notify planner to update network...')\n if self.use_pfs:\n self._notify_update('pfs')\n if self.use_rr:\n self._notify_update('rr')", "def load_soil_sample_data2(sl1):\n # soil\n sl1.cohesion = 30 # [Pa]\n sl1.phi = 0 # [degrees]\n sl1.unit_dry_weight = 17000 # [N/m3]", "def train_refinement_network():\n \n model_dict = {} # all the different models\n model_dict['UNet'] = UNet\n model_dict['UNetLite'] = UNetLite\n model_dict['UNetWide40'] = UNetWide40\n model_dict['UNetWide48'] = UNetWide48\n model_dict['UNetDS64'] = UNetDS64\n model_dict['UNetWide64'] = UNetWide64\n model_dict['MultiResUNet1D'] = MultiResUNet1D\n model_dict['MultiResUNetDS'] = MultiResUNetDS\n\n\n mdlName1 = 'UNetDS64' # approximation network\n mdlName2 = 'MultiResUNet1D' # refinement network\n \n length = 1024 # length of the signal\n\n # 10 fold cross validation\n for foldname in range(10):\n\n print('----------------')\n print('Training Fold {}'.format(foldname+1))\n print('----------------')\n # loading training data\n dt = pickle.load(open(os.path.join('data','train{}.p'.format(foldname)),'rb'))\n X_train = dt['X_train']\n Y_train = dt['Y_train']\n # loading validation data\n dt = pickle.load(open(os.path.join('data','val{}.p'.format(foldname)),'rb'))\n X_val = dt['X_val']\n Y_val = dt['Y_val']\n\n # loading metadata\n dt = pickle.load(open(os.path.join('data','meta{}.p'.format(foldname)),'rb'))\n max_ppg = dt['max_ppg']\n min_ppg = dt['min_ppg']\n max_abp = dt['max_abp']\n min_abp = dt['min_abp']\n\n\n Y_train = prepareLabel(Y_train) # prepare labels for training deep supervision\n \n Y_val = prepareLabel(Y_val) # prepare labels for training deep supervision\n \n \n mdl1 = model_dict[mdlName1](length) # load approximation network\n mdl1.load_weights(os.path.join('models','{}_model1_fold{}.h5'.format(mdlName1,foldname))) # load weights\n\n X_train = prepareDataDS(mdl1, X_train) # prepare training data for 2nd stage, considering deep supervision\n X_val = prepareDataDS(mdl1, X_val) # prepare validation data for 2nd stage, considering deep supervision\n\n mdl1 = None # garbage collection\n\n \n mdl2 = model_dict[mdlName2](length) # create refinement network\n\n # loss = mse\n mdl2.compile(loss='mean_squared_error',optimizer='adam',metrics=['mean_absolute_error'])\n\n checkpoint2_ = ModelCheckpoint(os.path.join('models','{}_model2_fold{}.h5'.format(mdlName2,foldname)), verbose=1, monitor='val_loss',save_best_only=True, mode='auto') \n\n # train refinement network for 100 epochs\n history2 = mdl2.fit(X_train,Y_train['out'],epochs=100,batch_size=192,validation_data=(X_val,Y_val['out']),callbacks=[checkpoint2_])\n\n pickle.dump(history2, open('History/{}_model2_fold{}.p'.format(mdlName2,foldname),'wb')) # save training history\n\n time.sleep(300) # pause execution for a while to free the gpu", "def run_optuna2():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n dataset = dataset.sample(frac=0.02)\n print(dataset.shape)\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n # X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n # dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n # dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n #\"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"boosting_type\": trial.suggest_categorical(\"boosting_type\", ['gbdt', 'rf']),\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n\n model = lgb.train(params, dtrain)\n vd_preds = model.predict(X_test)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n accuracy = accuracy_score(y_test, vd_preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def Model3():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n snr = M2.GetGroupByName(\"SNr/GPi\")\n\n \n \n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n\n sn.SetActivationFunction(neu.Tanh_plus)\n sp.SetActivationFunction(neu.Tanh_plus)\n snr.SetActivationFunction(neu.Tanh_plus)\n\n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*-0.25\n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n hb = np.average(sn.thresholds)/-tan.size\n HB = np.ones(tan.inputs.shape)*hb\n sn.thresholds = 0.1*np.ones(sn.activations.shape)\n sp.thresholds = 0.1*np.ones(sp.activations.shape)\n #sn.thresholds = -1*tan.GetActivationFunction()(np.ones(sn.inputs.shape)-1)\n #sp.thresholds = -1*tan.GetActivationFunction()(np.ones(sp.inputs.shape)-1)\n #sn.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sn.inputs.shape))\n #sp.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sp.inputs.shape))\n #c2tan.weights = np.random.random(c2tan.weights.shape)\n c2tan.weights = np.ones(c2tan.weights.shape)*1.5\n c2tan.mask = np.dot(np.ones(tan.inputs.shape),\n np.array([[1,1,1,0,0,0,0,0,0]]))\n c2tan.learningEnabled = True\n c2tan.learningFunction = TAN_LearningRule\n\n M2.SetParameter(\"TRACE_TAN\", True)\n M2.SetParameter(\"HB\", HB)\n return M2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test_tal1_lmo2(model) > DataFrame Test model on 5C data from TAL1 and LMO2 regions.
def test_tal1_lmo2(self, model): "prepare dataloader" data_loader = self.prepare_tal1_lmo2() "test model" self.cfg.full_test = True self.cfg.compute_pca = False self.cfg.get_zero_pred = False _, _, _, pred_df, _ = model.test(data_loader) "save predictions" pred_df.to_csv(self.cfg.output_directory + "hiclstm_%s_predictions_chr%s.csv" % (self.cell, str(self.chr)), sep="\t") return pred_df
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_df", "def perform_tal1_ko(self, model):\n\n \"save representations\"\n self.chr = 1\n self.cfg.get_tal1_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"tal1\"\n _, ko_pred_df, _ = self.perform_ko(model)\n return ko_pred_df", "def prepare_tal1_lmo2(self):\n\n \"load Hi-C like data\"\n tal_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n\n \"preprocess\"\n tal_df = tal_df.drop(['Unnamed: 0'], axis=1)\n lmo2_df = lmo2_df.drop(['Unnamed: 0'], axis=1)\n tal_df[['i', 'j']] = tal_df[['i', 'j']].astype('int64')\n lmo2_df[['i', 'j']] = lmo2_df[['i', 'j']].astype('int64')\n\n \"prepare indices and values for TAL1 in chromosome 1\"\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n input_idx_tal1, values_tal1 = get_samples_sparse(tal_df, 1, cfg)\n values_tal1 = F.pad(input=values_tal1, pad=(0, 4, 0, 0), mode='constant', value=0)\n input_idx_tal1 = F.pad(input=input_idx_tal1, pad=(0, 0, 0, 4, 0, 0), mode='constant', value=0)\n values = torch.cat((values, values_tal1.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_tal1), 0)\n\n if self.cfg.get_tal1_only:\n \"create tal dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n return data_loader\n\n if self.cfg.get_lmo2_only:\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n\n \"prepare indices and values for LMO2 in chromosome 11\"\n input_idx_lmo2, values_lmo2 = get_samples_sparse(lmo2_df, 11, cfg)\n values = torch.cat((values, values_lmo2.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_lmo2), 0)\n\n \"create dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n\n return data_loader", "def train_tal1_lmo2(self, model):\n\n \"summary writer\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr)\n\n \"initialize optimizer and prepare dataloader\"\n self.cfg.get_tal1_only = False\n self.cfg.get_lmo2_only = False\n optimizer, criterion = model.compile_optimizer()\n data_loader = self.prepare_tal1_lmo2()\n\n \"train and save the model\"\n model.train_model(data_loader, criterion, optimizer, writer)\n torch.save(model.state_dict(), cfg.model_dir + cfg.model_name + '.pth')", "def example_bed_l2_h5():\n yield h5py.File(\"tests/test_data/example_test_2label.h5\", \"r\")", "def perform_ko(self, model):\n\n cfg = self.cfg\n\n \"load data\"\n if cfg.run_tal and cfg.hnisz_region == \"tal1\":\n self.cfg.get_tal1_only = True\n data_loader = self.prepare_tal1_lmo2()\n elif cfg.run_tal and cfg.hnisz_region == \"lmo2\":\n self.cfg.get_lmo2_only = True\n data_loader = self.prepare_tal1_lmo2()\n else:\n data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False)\n\n \"get zero embed\"\n cfg.full_test = False\n cfg.compute_pca = False\n cfg.get_zero_pred = True\n zero_embed = test_model(model, cfg, self.chr)\n\n \"get knockout indices depending on experiment\"\n if cfg.run_tal:\n if cfg.hnisz_region == \"tal1\":\n cfg.ko_experiment = \"ctcf\"\n indices = cfg.tal1ko_indices\n elif cfg.hnisz_region == \"lmo2\":\n cfg.ko_experiment = \"ctcf\"\n indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11)\n else:\n if cfg.ko_experiment == \"ctcf\":\n if cfg.ctcf_indices == \"all\":\n indices = ko_ob.get_ctcf_indices()\n indices = sample(list(indices), 10)\n else:\n indices = ko_ob.cfg.ctcf_indices_22\n elif cfg.ko_experiment == \"foxg1\":\n indices = cfg.foxg1_indices\n elif cfg.ko_experiment == \"tadbs\":\n indices = ko_ob.get_tadbs()\n\n \"plotting and metrics\"\n n_indices = len(indices)\n diff_list = np.zeros((n_indices, 11))\n diff_mat = np.zeros((n_indices, 200, 200))\n \"run for all indices\"\n for i, indice in enumerate(indices):\n \"get representations\"\n representations, start, stop, pred_data = self.get_trained_representations(method=\"hiclstm\")\n\n \"alter representations\"\n representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed,\n mode=cfg.ko_mode)\n\n if self.cfg.load_ko:\n ko_pred_df = pd.read_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n else:\n \"run through model using altered representations, save ko predictions\"\n _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode=\"ko\")\n if self.cfg.save_kopred:\n ko_pred_df.to_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n\n \"compute difference between WT and KO predictions\"\n if self.cfg.compute_avg_diff:\n ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice)\n diff_list[i] = ko_diffs\n\n \"get merged heatmap\"\n pred_data = pd.merge(pred_data, ko_pred_df, on=[\"i\", \"j\"])\n pred_data = pred_data.rename(columns={\"ko_pred\": \"v\"})\n hic_mat, st = get_heatmaps(pred_data, no_pred=False)\n # simple_plot(hic_mat, mode=\"reds\")\n\n \"get diff mat\"\n hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment)\n n_win = len(hic_win)\n diff_mat[i, :n_win, :n_win] = hic_win\n\n diff_mat = diff_mat.mean(axis=0)\n ko = np.triu(diff_mat)\n pred = np.tril(diff_mat).T\n diff_mat = ko - pred\n simple_plot(diff_mat, mode=\"diff\")\n np.save(cfg.output_directory + \"tad_diff_zero_ctctn.npy\", diff_mat)\n mean_diff = np.mean(diff_list, axis=1)\n return mean_diff, ko_pred_df, pred_data", "def lacer(df, df1, train_start_date, train_end_date, test_start_date, test_end_date, request_type, CD, predictor_num): #Once model is ready, replace df with csv\n\n #Create Training and Testing Sets\n dftrain = preprocessing(df , train_start_date, train_end_date)\n dftrain = dftrain.reset_index(drop = True)\n dftest = preprocessing(df1, test_start_date, test_end_date)\n dftest = dftest.reset_index(drop = True)\n\n #Reserve test set for training on all 3 models. \n y_train, y_test = lc.CreateTestSet(dftest, predictor_num)\n y_test = y_test.reshape((-1, 1))\n\n\n## 2 Models\n #Model1: CD\n modelCD = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainCD = dftrain[dftrain['CD'] == CD].reset_index(drop = True)\n\n X_trainCD, X_testCD = lc.CreateTrainSet(dftrainCD, predictor_num)\n X_testCD = X_testCD.reshape((-1, 1))\n modelCD.fit(X_trainCD, X_testCD)\n\n y_predCD = modelCD.predict(y_train)\n\n #Model2: Request_type\n modelRT = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainRT = dftrain[dftrain['RequestType'] == request_type].reset_index(drop = True)\n\n X_trainRT, X_testRT = lc.CreateTrainSet(dftrainRT, predictor_num)\n X_testRT = X_testRT.reshape((-1, 1))\n\n modelRT.fit(X_trainRT, X_testRT)\n\n y_predRT = modelRT.predict(y_train)\n\n\n #Average out all predictions\n y_predFinal = (y_predCD + y_predRT )/2\n\n #Return metrics \n return lc.metrics(y_predFinal, y_test)", "def test_split_otu_table_on_sample_metadata(self):\n actual = list(split_otu_table_on_sample_metadata(self.otu_table_f1,\n self.mapping_f1,\n \"Treatment\"))\n for id_, e in actual:\n try:\n parse_biom_table(e)\n except:\n print e\n actual = [(id_,parse_biom_table(e)) for id_, e in actual]\n exp = [(id_,parse_biom_table(e)) for id_, e in otu_table_exp1]\n \n actual.sort()\n exp.sort()\n \n for a,e in zip(actual,exp):\n self.assertEqual(a,e,\"OTU tables are not equal:\\n%s\\n%s\" % \\\n (format_biom_table(a[1]),format_biom_table(e[1])))", "def test5_SingleObservationSelectByIntentNewModel(self):\n\n # print out some values for debugging\n debug=False\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test5\")\n self.inpms += \".test5\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=False, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2012', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 10)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 11)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['med4'] = tblocal.getcell('MODEL_DATA', 4)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n # row numbers for specific data changed...\n #record['auto2'] = tblocal.getcell('MODEL_DATA', 1892)\n #record['long2'] = tblocal.getcell('MODEL_DATA', 1930)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 2838)\n #record['med3'] = tblocal.getcell('MODEL_DATA', 2854)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto2query)\n record['auto2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==5 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long2query)\n record['long2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==3 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n med3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==1 AND ANTENNA2==4 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(med3query)\n record['med3'] = subt.getcell('MODEL_DATA', 0)\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto2'] = tblocal.getcell('MODEL_DATA', 270)\n\t\t record['long2'] = tblocal.getcell('MODEL_DATA', 310)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 408)\n\t\t record['med3'] = tblocal.getcell('MODEL_DATA', 424)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Uranus')\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Titan')\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n if debug:\n if not self.ismms: print \"self.result['history']=\",self.result['history']\n print \"self.result['auto0']=\",self.result['auto0']\n print \"self.result['auto3']=\",self.result['auto3']\n\n #\"\"\"Flux density in HISTORY (Uranus)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus:\", \"V=0.0] Jy\"])\n #\"\"\"WVR spw\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33798409+0.j,25.33798409+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33490372+0.j, 25.33490372+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 3\"\"\"\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.72530365+0.j],[ 66.72530365+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.71941376+0.j], [ 66.71941376+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 4\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.40153503+0.j],[ 70.40153503+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.39561462+0.j], [ 70.39561462+0.j]]), 0.0001)\n #Titan\n \"\"\"Zero spacing of spw 2\"\"\"\n self.check_eq(self.result['auto2'][0][0], (6.69543791+0.j),0.0001)\n\n \"\"\"Long spacing of spw 2\"\"\"\n self.check_eq(self.result['long2'][0][0],(6.09987020 +2.47228783e-11j),0.0001)\n\n \"\"\"Zero spacing of spw 3\"\"\"\n self.check_eq(self.result['auto3'][0][0], (3.13487768+0.j),0.0001)\n\n \"\"\"Medium spacing of spw 3\"\"\"\n self.check_eq(self.result['med3'][0][0],(3.09678578 -2.19477778e-12j) ,0.0001)\n\n \"\"\"Long spacing of spw 3\"\"\"\n self.check_eq(self.result['long3'][0][0], (2.66332293 +1.29327478e-11j),0.0001)\n\n return sjran", "def test_other_active_region(self):\r\n path = \"InputTestFilesSection3/TrainingSets/other_active_region\"\r\n tissue_for_cell_name, cell_name_for_tissue = \\\r\n Utilities.cell_to_tissue_matches(\"InputTestFilesSection3/TissueCellMatches\")\r\n motif_split_chr = \"../datafiles/Motifs/motifs_per_chr\"\r\n data = WeightFeatures.get_trainings_data_dirs(path, cell_name_for_tissue, tissue_for_cell_name,\r\n motif_split_chr)[0]\r\n other_act_reg_file = data[1] + '/output.bed'\r\n regions_df_outfile = data[1] + '/output_motifs.df'\r\n col_names = ['chr', 'motifstart', 'motifend', 'name', 'score', 'pval', 'strand']\r\n x = WeightFeatures.other_active_region(coordinates_infile=data[0], infile=data[2],\r\n other_act_reg_file=other_act_reg_file,\r\n cells_to_extract_info_from=data[3],\r\n regions_df_outfile=regions_df_outfile, db_name='funmotifsdb',\r\n db_user_name='mm99', col_names=col_names, cell_table='test_table')\r\n\r\n d = {'chr': 10, 'motifstart': 101, 'motifend': 200, 'name': 'Motif2', 'score': 2.222,\r\n 'pval': 0.0002, 'strand': '-', 'liver___tfexpr': 67.14, 'Activity_Score': '772.530134424223'}\r\n df = pd.DataFrame(d, index=[0])\r\n\r\n z = (x == df)\r\n\r\n for i in z.all():\r\n assert i is True\r\n\r\n return", "def test_two_dataset_two_models(self):\n fmod = self.fitter([self.model1d.copy(), self.model1d_2.copy()], [self.x1, self.x2], [self.y1, self.y2], err=[self.dy1, self.dy2])\n for ff, mm in zip(fmod, [self.tmodel1d, self.tmodel1d_2]):\n for nn, pp in enumerate(ff.param_names):\n assert_allclose(getattr(ff, pp), getattr(mm, pp), rtol=0.05)", "def test_LM(in_file, out_file, LM):\n print(\"testing language models...\")\n # This is an empty method\n # Pls implement your code in below\n \n results = []\n text = []\n # read text input and compute estimate\n with open(in_file, mode=\"r\", encoding=\"utf-8\") as f:\n textList = f.readlines()\n \n for line in textList:\n text.append( preprocess_line(line) )\n grams = convert_line_to_4gramList( text[-1] )\n resulting_lang = estimate_sentence(grams, LM)\n results.append(resulting_lang + ' ' + text[-1])\n \n # write the prediction\n with open(out_file, mode=\"w\", encoding=\"utf-8\") as f:\n for result in results:\n f.write(result + '\\n')", "def mc_stratum(y1_data,y1match_data,y2match_data,stratum,unit_convert=None):\n # Bootstrap Sample (Sampling Error)\n # Don't do bootstrap for core sites (non-subsampled) or strata with < 8 sites \n if len(y1_data) < 8 or stratum.analysis == \"core\":\n y1 = y1_data[:]\n y1m = y1match_data[:]\n y2m = y2match_data[:]\n else:\n y1 = svmp.bootstrap(y1_data)\n y1m = svmp.bootstrap(y1match_data)\n y2m = svmp.match_sites(y1m,y2match_data)\n # Measurement Error - Simulated Zm area\n me1 = svmp.measurement_error(y1)\n me1m = svmp.measurement_error(y1m)\n me2m = svmp.measurement_error(y2m)\n # Printng for DEBUG only\n #for d in me1:\n #yr1_string = ','.join([\"%s\" % i for i in d[0:2]])\n #yr1_string = \"%s,%s\" % (\"Y1\",yr1_string)\n ## print yr1_string # for debug only\n #for c,d in zip(me1m,me2m):\n #yr1m_string = ','.join([\"%s\" % i for i in c[0:2]])\n #yr1m_string = \"%s,%s\" % (\"Y1m\",yr1m_string)\n #yr2m_string = ','.join([\"%s\" % i for i in d[0:2]])\n #yr2m_string = \"%s,%s\" % (\"Y2m\",yr2m_string)\n #allmatch_string = \"%s,%s\" % (yr1m_string,yr2m_string)\n ## print allmatch_string # for debug only\n \n # Calculate Change Analysis stats with Simulated Data\n y1Samp = svmp.SampleStats(me1,stratum,unit_convert)\n y1mSamp = svmp.SampleStats(me1m,stratum,unit_convert)\n y2mSamp = svmp.SampleStats(me2m,stratum,unit_convert) \n change = svmp.ChangeStats(y1mSamp,y2mSamp,y1Samp)\n # Relative Change\n return (y1Samp,y1mSamp,y2mSamp,change)", "def test_create_train_X_y_output_when_lags_5_steps_1_and_exog_is_dataframe_of_category():\n series = pd.DataFrame({'l1': pd.Series(np.arange(10), dtype=float), \n 'l2': pd.Series(np.arange(50, 60), dtype=float)})\n exog = pd.DataFrame({'exog_1': pd.Categorical(range(10)),\n 'exog_2': pd.Categorical(range(100, 110))})\n \n forecaster = ForecasterAutoregMultiVariate(LinearRegression(), level='l2',\n lags=5, steps=1)\n results = forecaster.create_train_X_y(series=series, exog=exog)\n\n expected = (\n pd.DataFrame(\n data = np.array([[4., 3., 2., 1., 0., 54., 53., 52., 51., 50.],\n [5., 4., 3., 2., 1., 55., 54., 53., 52., 51.],\n [6., 5., 4., 3., 2., 56., 55., 54., 53., 52.],\n [7., 6., 5., 4., 3., 57., 56., 55., 54., 53.],\n [8., 7., 6., 5., 4., 58., 57., 56., 55., 54.]], \n dtype=float),\n index = pd.RangeIndex(start=5, stop=10, step=1),\n columns = ['l1_lag_1', 'l1_lag_2', 'l1_lag_3', 'l1_lag_4', 'l1_lag_5', \n 'l2_lag_1', 'l2_lag_2', 'l2_lag_3', 'l2_lag_4', 'l2_lag_5']\n ).assign(\n exog_1_step_1=pd.Categorical(range(5, 10), categories=range(10)),\n exog_2_step_1=pd.Categorical(range(105, 110), categories=range(100, 110))\n ),\n {1: pd.Series(\n data = np.array([55., 56., 57., 58., 59.], dtype=float), \n index = pd.RangeIndex(start=5, stop=10, step=1),\n name = \"l2_step_1\"\n )\n }\n )\n\n pd.testing.assert_frame_equal(results[0], expected[0])\n assert isinstance(results[1], dict)\n assert all(isinstance(x, pd.Series) for x in results[1].values())\n assert results[1].keys() == expected[1].keys()\n for key in expected[1]: \n pd.testing.assert_series_equal(results[1][key], expected[1][key])", "def test_cm2_lycopene_100():\n\n test_params = {\n \"name\": \"CM_2: Lycopene - 100 targets - Sanger - Threshold: 0.y\",\n \"id\": \"cm2-lycopene-100targets-sanger-th07\",\n \"targets\": {\n \"file\": f\"{TEST_DIR}/targets/target_lycopene_sanger_100.json\",\n },\n \"template\": f\"{TEST_DIR}/templates/template_lycopene_sanger.json\",\n \"nbloop\": 1,\n \"threshold\": 0.7,\n }\n\n run_test(test_params)", "def test_create_obsmodel_with_subgrid(self):\n print(\"running test_create_obsmodel_with_subgrid\")\n\n # run create_obsmodel\n create_obsmodel.create_obsmodel(\n self.settings_sg, use_sd=False, nsubs=self.settings_sg.n_subgrid, nprocs=1,\n )\n\n # check that files match\n compare_hdf5(\n self.noise_sub0_fname_cache,\n \"beast_metal_small_subgrids/beast_metal_small_subgrids_noisemodel.gridsub0.hd5\",\n )\n compare_hdf5(\n self.noise_sub1_fname_cache,\n \"beast_metal_small_subgrids/beast_metal_small_subgrids_noisemodel.gridsub1.hd5\",\n )", "def get_r2s_by_trial(hparams, model_types):\n\n dataset = _get_dataset_str(hparams)\n region_names = get_region_list(hparams)\n\n metrics = []\n model_idx = 0\n model_counter = 0\n for region in region_names:\n hparams['region'] = region\n for model_type in model_types:\n\n hparams['session_dir'], _ = get_session_dir(hparams)\n expt_dir = get_expt_dir(\n hparams,\n model_type=model_type,\n model_class=hparams['model_class'],\n expt_name=hparams['experiment_name'])\n\n # gather all versions\n try:\n versions = get_subdirs(expt_dir)\n except Exception:\n print('No models in %s; skipping' % expt_dir)\n\n # load csv files with model metrics (saved out from test tube)\n for i, version in enumerate(versions):\n # read metrics csv file\n model_dir = os.path.join(expt_dir, version)\n try:\n metric = pd.read_csv(\n os.path.join(model_dir, 'metrics.csv'))\n model_counter += 1\n except:\n continue\n with open(os.path.join(model_dir, 'meta_tags.pkl'), 'rb') as f:\n hparams = pickle.load(f)\n # append model info to metrics ()\n version_num = version[8:]\n metric['version'] = str('version_%i' % model_idx + version_num)\n metric['region'] = region\n metric['dataset'] = dataset\n metric['model_type'] = model_type\n for key, val in hparams.items():\n if isinstance(val, (str, int, float)):\n metric[key] = val\n metrics.append(metric)\n\n model_idx += 10000 # assumes no more than 10k model versions/expt\n # put everything in pandas dataframe\n metrics_df = pd.concat(metrics, sort=False)\n return metrics_df", "def run_LOO_validation(s: dict, df_set: pd.DataFrame, logging):\n logging.info(\"\\n--------------- starting run_LOO_validation ---------------\\n\")\n setname = s[\"setname\"]\n names_excel_path = os.path.join(s[\"base_dir\"], \"protein_names.xlsx\")\n\n # drop redundant proteins according to CD-HIT\n df_set = thoipapy.utils.drop_redundant_proteins_from_list(df_set, logging)\n\n # input\n train_data_after_first_feature_seln_csv = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/train_data/03_train_data_after_first_feature_seln.csv\"\n tuned_ensemble_parameters_csv = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/train_data/04_tuned_ensemble_parameters.csv\"\n # output\n LOO_crossvalidation_pkl = os.path.join(s[\"data_dir\"], \"results\", s[\"setname\"], \"crossvalidation\", \"data\", \"{}_LOO_crossvalidation.pkl\".format(s[\"setname\"]))\n bocurve_data_raw_csv = os.path.join(s[\"data_dir\"], \"results\", s[\"setname\"], \"crossvalidation\", \"data\", \"{}_loo_bocurve_data_raw.csv\".format(s[\"setname\"]))\n bocurve_data_xlsx: Union[Path, str] = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/crossvalidation/data/{s['setname']}_thoipa_loo_bo_curve_data.xlsx\"\n sim_matrix_xlsx = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/clusters/{setname}_sim_matrix.xlsx\"\n\n if not sim_matrix_xlsx.is_file():\n raise FileNotFoundError(f\"The similarity matrix with clusters of putative homologues could not be found ({sim_matrix_xlsx})\")\n\n thoipapy.utils.make_sure_path_exists(bocurve_data_xlsx, isfile=True)\n\n df_data = pd.read_csv(train_data_after_first_feature_seln_csv, index_col=0)\n assert \"Unnamed\" not in \", \".join(df_data.columns.tolist())\n\n # df_data = df_data.dropna()\n\n # drop training data (full protein) that don't have enough homologues\n if s[\"min_n_homol_training\"] != 0:\n df_data = df_data.loc[df_data.n_homologues >= s[\"min_n_homol_training\"]]\n\n acc_db_ser = pd.Series(df_data.index).apply(lambda x: x.split(\"_\")[0])\n acc_db_list = acc_db_ser.to_list()\n # df_data[\"acc_db\"] = acc_db_ser\n acc_db_unique_list = acc_db_ser.unique()\n logging.info(f\"Dataset has {len(acc_db_unique_list)} unique proteins for training.\")\n start = time.clock()\n pred_colname = \"THOIPA_{}_LOO\".format(s[\"set_number\"])\n\n n_features = thoipapy.validation.feature_selection.drop_cols_not_used_in_ML(logging, df_data, s[\"settings_path\"]).shape[1]\n forest = thoipapy.ML_model.train_model.return_classifier_with_loaded_ensemble_parameters(s, tuned_ensemble_parameters_csv)\n\n if s[\"use_multiprocessing\"]:\n # TURN LOGGING OFF BEFORE MULTIPROCESSING\n logger = LogOnlyToConsole()\n else:\n logger = logging\n\n putative_homologue_clusters = get_clusters_putative_homologues_in_protein_set(sim_matrix_xlsx)\n\n loo_validation_data_list = []\n\n val_list = []\n for i in df_set.index:\n acc = df_set.loc[i, \"acc\"]\n database = df_set.loc[i, \"database\"]\n acc_db = df_set.loc[i, \"acc_db\"]\n\n # find the cluster of putative homologues\n # each protein should only appear once in a single cluster\n clusters_containing_acc_db_of_interest = [c for c in putative_homologue_clusters if acc_db in c]\n if not len(clusters_containing_acc_db_of_interest) == 1:\n raise ValueError(f\"Protein of interest found in 0 or >1 clusters of putative homologues.\\nacc_db = '{acc_db}\\n'\" +\n f\"clusters_containing_acc_db_of_interest = {clusters_containing_acc_db_of_interest}\")\n\n acc_db_putative_homologues: List[str] = clusters_containing_acc_db_of_interest[0]\n row_filter_excluding_putative_homologues = [acc_db not in acc_db_putative_homologues for acc_db in acc_db_list]\n # index_excluding_putative_homologues = df_data.acc_db.apply(lambda x: x not in acc_db_putative_homologues)\n\n df_train = df_data.loc[row_filter_excluding_putative_homologues]\n\n filtered_index_acc_db = pd.Series(df_train.index).apply(lambda x: x.split(\"_\")[0]).to_list()\n assert acc_db not in filtered_index_acc_db\n\n loo_validation_data = LooValidationData()\n loo_validation_data.acc = acc\n loo_validation_data.acc_db = df_set.loc[i, \"acc_db\"]\n loo_validation_data.bind_column = s[\"bind_column\"]\n loo_validation_data.database = database\n loo_validation_data.df_train = df_train\n loo_validation_data.settings_path = s[\"settings_path\"]\n loo_validation_data.forest = forest\n loo_validation_data.i = i\n loo_validation_data.logger = logger\n loo_validation_data.pred_colname = pred_colname\n loo_validation_data.testdata_combined_file = os.path.join(s[\"data_dir\"], \"features\", \"combined\", database, \"{}.surr20.gaps5.combined_features.csv\".format(acc))\n loo_validation_data.THOIPA_LOO_prediction_csv = Path(s[\"data_dir\"]) / f\"results/{s['setname']}/predictions/THOIPA_LOO/{database}.{acc}.LOO.prediction.csv\"\n\n thoipapy.utils.make_sure_path_exists(loo_validation_data.THOIPA_LOO_prediction_csv, isfile=True)\n\n #######################################################################################################\n # #\n # Train data is based on large training csv, after dropping the protein of interest #\n # (positions without closedist/disruption data will be excluded) #\n # #\n #######################################################################################################\n\n if not loo_validation_data.acc_db in acc_db_unique_list:\n logging.warning(\"{} is in protein set, but not found in training data\".format(loo_validation_data.acc_db))\n # skip protein\n continue\n\n if s[\"use_multiprocessing\"]:\n loo_validation_data_list.append(loo_validation_data)\n else:\n auc_dict, BO_df = LOO_single_prot(loo_validation_data)\n val_tuple = (auc_dict, BO_df)\n val_list.append(val_tuple)\n\n if s[\"use_multiprocessing\"]:\n with Pool(processes=s[\"multiple_tmp_simultaneous\"]) as pool:\n val_list = pool.map(LOO_single_prot, loo_validation_data_list)\n\n #######################################################################################################\n # #\n # Get mean AUC etc for all proteins in list #\n # #\n #######################################################################################################\n duration = time.clock() - start\n sys.stdout.write(\"\\n\")\n\n # copied from original mean_tpr code\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n\n BO_all_df = pd.DataFrame()\n all_roc_auc = []\n all_pr_auc = []\n xv_dict = {}\n acc_db_unique_list = df_set.acc_db.tolist()\n\n # iterate through the output tuple (auc_dict, BO_df)\n for nn, val_tuple in enumerate(val_list):\n acc_db = acc_db_unique_list[nn]\n auc_dict = val_tuple[0]\n all_roc_auc.append(auc_dict[\"roc_auc\"])\n all_pr_auc.append(auc_dict[\"pr_auc\"])\n BO_df = val_tuple[1]\n # join the data for all BO curves together\n if nn == 0:\n BO_all_df = BO_df\n else:\n BO_all_df = pd.concat([BO_all_df, BO_df], axis=1, join=\"outer\")\n mean_tpr += interp(mean_fpr, auc_dict[\"fpr\"], auc_dict[\"tpr\"])\n mean_tpr[0] = 0.0\n\n xv_dict[acc_db] = {\"fpr\": auc_dict[\"fpr\"], \"tpr\": auc_dict[\"tpr\"], \"roc_auc\": auc_dict[\"roc_auc\"]}\n\n # copied from original mean_tpr code\n mean_tpr /= df_set.shape[0]\n mean_tpr[-1] = 1.0\n mean_roc_auc_from_joined_data = auc(mean_fpr, mean_tpr)\n\n # calculate mean of each protein AUC separately\n mean_roc_auc_all_prot = np.array(all_roc_auc).mean()\n xv_dict[\"mean_roc_auc_all_prot\"] = mean_roc_auc_all_prot\n mean_pr_auc_all_prot = np.array(all_pr_auc).mean()\n xv_dict[\"mean_pr_auc_all_prot\"] = mean_pr_auc_all_prot\n\n # add to dict that can be used for figure creation later\n xv_dict[\"true_positive_rate_mean\"] = mean_tpr\n xv_dict[\"false_positive_rate_mean\"] = mean_fpr\n xv_dict[\"mean_roc_auc_from_joined_data\"] = mean_roc_auc_from_joined_data\n xv_dict[\"mean_roc_auc_all_prot\"] = mean_roc_auc_all_prot\n\n # save dict as pickle\n thoipapy.utils.make_sure_path_exists(LOO_crossvalidation_pkl, isfile=True)\n with open(LOO_crossvalidation_pkl, \"wb\") as f:\n pickle.dump(xv_dict, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n #######################################################################################################\n # #\n # Processing BO CURVE data, saving to csv and running the BO curve analysis script #\n # #\n #######################################################################################################\n\n BO_all_df.to_csv(bocurve_data_raw_csv)\n # names_excel_path = os.path.join(s[\"base_dir\"], \"protein_names.xlsx\")\n\n # linechart_mean_obs_and_rand = thoipapy.figs.Create_Bo_Curve_files.analyse_bo_curve_underlying_data(bocurve_data_raw_csv, crossvalidation_folder, names_excel_path)\n thoipapy.validation.bocurve.parse_BO_data_csv_to_excel(bocurve_data_raw_csv, bocurve_data_xlsx, s[\"n_residues_AUBOC_validation\"], logging)\n\n logging.info('{} LOO crossvalidation. Time taken = {:.2f}.'.format(s[\"setname\"], duration))\n logging.info('---ROC_AUC(mean each protein : {:.2f})(from joined data {:.2f})---'.format(mean_roc_auc_all_prot, mean_roc_auc_from_joined_data))\n logging.info('---PR_AUC(mean each protein : {:.2f})---'.format(mean_pr_auc_all_prot))\n logging.info(\"\\n--------------- finished run_LOO_validation ---------------\\n\")", "def train_regression_model_for_bmi(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, x1, y1, y1label, feature_headers, mrns, agex_low, agex_high, months_from, months_to, modelType='lasso', percentile=False, filterSTR=['Gender:1'], filterSTRThresh=[0.5], variablesubset=['Vital'],variable_exclude=['Trend'], num_clusters=16, num_iters=100, dist_type='euclidean', corr_vars_exclude=['Vital'], return_data_for_error_analysis=False, return_data=False, return_data_transformed=False, do_impute=True, mrnForFilter=[], add_time=False, bin_ix=[], do_normalize=True, binarize_diagnosis=True, subset=np.array([True, False, False, False, False, False, False, False, False, False, False, False, False, False, False])): #filterSTR='Gender:0 male'\n\n if any([len(x)==0 for x in (x1,y1,y1label,feature_headers,mrns)]):\n print('At least one required data not provided out of x1, y1, y1label, feature_headers, or mrns.')\n print('Creating data from data dictionaries')\n x1, y1, y1label, feature_headers, mrns = build_features.call_build_function(data_dic, data_dic_mom, data_dic_hist_moms, lat_lon_dic, env_dic, agex_low, agex_high, months_from, months_to, percentile, mrnsForFilter=mrnForFilter)\n original_data = (x1, y1, y1label, feature_headers, mrns)\n else:\n print('Using prepared raw data')\n\n if binarize_diagnosis:\n bin_ix = np.array([(h.startswith('Diagnosis:') or h.startswith('Maternal Diagnosis:') or h.startswith('Newborn Diagnosis:')) for h in feature_headers])\n print(bin_ix.sum(), 'features are binary')\n x1[:,bin_ix] = (x1[:,bin_ix] > 0) * 1.0\n\n ix, x2, y2, y2label, mrns = filter_training_set_forLinear(x1, y1, y1label, feature_headers, filterSTR, percentile, mrns, filterSTRThresh)\n\n if do_impute or do_normalize or add_time:\n x2, mux, stdx, bin_ix, unobserved = normalize(x2, bin_ix=bin_ix)\n\n if do_impute:\n x2 = autoencoder_impute(x2, bin_ix)\n\n if add_time:\n x2, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = add_temporal_features(x2, feature_headers, num_clusters, num_iters, y2, y2label, dist_type, True, mux, stdx, do_impute, subset)\n else:\n centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew = ['NaN']*7\n\n corr_headers = np.array(feature_headers)\n corr_matrix = np.corrcoef(x2.transpose())\n corr_headers_filtered, corr_matrix_filtered, ix_corr_headers = filter_correlations_via(corr_headers, corr_matrix, corr_vars_exclude)\n print('corr matrix is filtered to size', corr_matrix_filtered.shape)\n\n if len(variablesubset) != 0:\n x2, feature_headers = variable_subset(x2, variablesubset, feature_headers)\n\n print ('output is: average:{0:4.3f}'.format(y2.mean()), ' min:', y2.min(), ' max:', y2.max())\n print ('normalizing output.'); y2 = (y2-y2.mean())/y2.std()\n\n print ('Predicting BMI at age:'+str(agex_low)+ ' to '+str(agex_high)+ 'years, from data in ages:'+ str(months_from)+'-'+str(months_to) + ' months')\n if filterSTR != '':\n print ('filtering patients with: ' , filterSTR)\n\n print ('total size',ix.sum())\n if (ix.sum() < 50):\n print('Not enough subjects. Next.')\n return (filterSTR, [])\n\n if modelType == 'lasso' or modelType == 'randomforest' or modelType == 'gradientboost' or modelType == 'lars':\n iters = 10\n model_weights_array = np.zeros((iters, x2.shape[1]), dtype=float)\n auc_test_list=np.zeros((iters), dtype=float); r2testlist = np.zeros((iters), dtype=float);\n for iteration in range(0, iters):\n randix = list(range(0, x2.shape[0]))\n random.shuffle(randix)\n randix = randix[0:int(len(randix)*0.9)]\n datax = x2[randix,:]; datay=y2[randix]; dataylabel = y2label[randix]; mrnx = mrns[randix]\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(datax, datay, dataylabel, percentile, modelType, feature_headers, mrnx)\n model_weights_array[iteration, :] = model.coef_ if ((modelType == 'lasso') or (modelType == 'lars')) else model.feature_importances_\n auc_test_list[iteration] = auc_test; r2testlist[iteration] = r2test\n\n model_weights = model_weights_array.mean(axis=0)\n model_weights_std = model_weights_array.std(axis=0)\n model_weights_conf_term = (1.96/np.sqrt(iters)) * model_weights_std\n test_auc_mean = auc_test_list.mean()\n test_auc_mean_ste = (1.96/np.sqrt(iters)) * auc_test_list.std()\n r2test_mean = r2testlist.mean()\n r2test_ste = (1.96/np.sqrt(iters)) * r2testlist.std()\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n else:\n (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, mrnstrain, mrnstest) = train_regression(x2, y2, y2label, percentile, modelType, feature_headers, mrnx)\n model_weights_conf_term = np.zeros((x2.shape[1]), dtype=float)\n test_auc_mean = auc_test; r2test_mean= r2test;\n test_auc_mean_ste = 0; r2test_ste=0\n\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n if return_data_for_error_analysis == True:\n print('lets analyse this')\n return (model, xtrain, ytrain, xtest, ytest, ytestlabel, ytrainlabel, auc_test, r2test, feature_headers, centroids, hnew, standardDevCentroids, cnt_clusters, distances, muxnew, stdxnew, mrnstrain, mrnstest, mrns)\n\n if modelType == 'mlp':\n print ('you need to implement gradient to get top weights. ')\n return (filterSTR, [])\n\n sorted_ix = np.argsort(-1* abs(model_weights))\n weights = model_weights[sorted_ix]\n terms_sorted = model_weights_conf_term[sorted_ix]\n\n factors = np.array(feature_headers)[sorted_ix]\n x2_reordered = x2[:,sorted_ix]\n xtest_reordered = xtest[:, sorted_ix]\n\n ytestpred = model.predict(xtest)\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, ytestpred)\n operating_Thresholds = []\n operating_levels = [0, 0.0001, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\n ix_level = 0\n\n for ix, thr in enumerate(thresholds):\n if fpr[ix] >= operating_levels[ix_level]:\n operating_Thresholds.append(thr)\n ix_level += 1\n if ix_level == len(operating_levels):\n break\n\n operating_Thresholds = thresholds\n report_metrics = 'Test set metrics:\\n'\n prec_list = []\n recall_list = []\n spec_list = []\n for t in operating_Thresholds:\n tp = ((ytestlabel > 0) & (ytestpred.ravel() > t)).sum()*1.0\n tn = ((ytestlabel == 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fn = ((ytestlabel > 0) & (ytestpred.ravel() <= t)).sum()*1.0\n fp = ((ytestlabel == 0) & (ytestpred.ravel() > t)).sum()*1.0\n\n sens = tp / (tp + fn)\n spec = tn / (tn + fp)\n ppv = tp / (tp + fp)\n acc = (tp + tn) / (tp + tn + fp + fn)\n f1 = 2*tp / (2*tp + fp + fn)\n\n report_metrics += '@threshold:{0:4.3f}, sens:{1:4.3f}, spec:{2:4.3f}, ppv:{3:4.3f}, acc:{4:4.3f}, f1:{5:4.3f} total+:{6:4.3f}\\n'.format(t, sens, spec, ppv, acc, f1, tp+fp)\n prec_list.append(ppv)\n recall_list.append(sens)\n spec_list.append(spec)\n\n print('total variables', x2.sum(axis=0).shape, ' and total subjects:', x2.shape[0])\n print('->AUC test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(test_auc_mean, test_auc_mean - test_auc_mean_ste, test_auc_mean + test_auc_mean_ste))\n print('->Explained Variance (R2) test: {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}]'.format(r2test_mean, r2test_mean - r2test_ste, r2test_mean + r2test_ste))\n print(report_metrics)\n\n occurances = (x2 != 0).sum(axis=0)[sorted_ix]\n zip_weights = {}\n sig_headers = []\n feature_categories = {}\n for i in range(0, (abs(model_weights)>0).sum()):\n fpr, tpr, thresholds = metrics.roc_curve(ytestlabel, xtest_reordered[:,i].ravel())\n feature_auc_indiv = metrics.auc(fpr, tpr)\n corrs = corr_matrix_filtered[sorted_ix[i],:].ravel()\n top_corr_ix = np.argsort(-1*abs(corrs))\n corr_string = 'Correlated most with:\\n'+' '.join( [str(corr_headers_filtered[top_corr_ix[j]])+ ':' + \"{0:4.3f}\\n\".format(corrs[top_corr_ix[j]]) for j in range(0,10)] )\n\n tp = ((y2label > 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n tn = ((y2label == 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fn = ((y2label > 0) & (x2_reordered[:,i].ravel() <= 0)).sum()*1.0\n fp = ((y2label == 0) & (x2_reordered[:,i].ravel() > 0)).sum()*1.0\n\n if fp*fn*tp*tn == 0:\n oratio = np.nan\n low_OR = np.nan\n high_OR = np.nan\n else:\n oratio = tp*tn/(fp*fn)\n se = np.sqrt(1/tp + 1/fp + 1/tn + 1/fn)\n low_OR = np.exp(np.log(oratio) - 1.96 * se)\n high_OR = np.exp(np.log(oratio) + 1.96 * se)\n try:\n feature_categories[factors[i].split(':')[0]] += weights[i]\n except:\n feature_categories[factors[i].split(':')[0]] = weights[i]\n\n star = ' '\n if (low_OR > 1 or high_OR < 1): #or (weights[i]+terms_sorted[i]) < 0 or (weights[i]-terms_sorted[i]) > 0\n sig_headers.append(factors[i])\n star = '*'\n print(\"{8} {3} | coef {0:4.3f} 95% CI: [{1:4.3f} , {2:4.3f}] | OR_adj {9:4.3f} [{10:4.3f} {11:4.3f}] | occ: {4} | OR_unadj: {5:4.3f} [{6:4.3f} {7:4.3f}] | indivs AUC:{12:4.3f}\".format(weights[i], weights[i]-terms_sorted[i], weights[i]+terms_sorted[i], factors[i], occurances[i], oratio, low_OR, high_OR, star, np.exp(weights[i]), np.exp(weights[i]-terms_sorted[i]), np.exp(weights[i]+terms_sorted[i]), feature_auc_indiv))\n print(corr_string)\n\n for k in feature_categories:\n print (k, \":\", feature_categories[k])\n\n if return_data and return_data_transformed:\n return (x2, y1, y1label, feature_headers, mrns, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n elif return_data and not return_data_transformed:\n return (original_data[0], original_data[1], original_data[2], original_data[3], original_data[4], filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)\n else:\n return (feature_headers, filterSTR, sig_headers, centroids, hnew, standardDevCentroids, cnt_clusters, muxnew, stdxnew, mrns, prec_list, recall_list, spec_list, test_auc_mean, test_auc_mean_ste, r2test_mean, r2test_ste)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_tal1_ko(model) > DataFrame Performs knockout of selected sites in TAL1 and LMO2 regions.
def perform_tal1_ko(self, model): "save representations" self.chr = 1 self.cfg.get_tal1_only = True ko_ob.test_tal1_lmo2(model) "perform ko" self.cfg.hnisz_region = "tal1" _, ko_pred_df, _ = self.perform_ko(model) return ko_pred_df
[ "def perform_lmo2_ko(self, model):\n\n \"save representations\"\n self.chr = 11\n self.cfg.get_lmo2_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"lmo2\"\n _, ko_pred_df, _ = self.perform_ko(model)\n\n return ko_pred_df", "def perform_ko(self, model):\n\n cfg = self.cfg\n\n \"load data\"\n if cfg.run_tal and cfg.hnisz_region == \"tal1\":\n self.cfg.get_tal1_only = True\n data_loader = self.prepare_tal1_lmo2()\n elif cfg.run_tal and cfg.hnisz_region == \"lmo2\":\n self.cfg.get_lmo2_only = True\n data_loader = self.prepare_tal1_lmo2()\n else:\n data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False)\n\n \"get zero embed\"\n cfg.full_test = False\n cfg.compute_pca = False\n cfg.get_zero_pred = True\n zero_embed = test_model(model, cfg, self.chr)\n\n \"get knockout indices depending on experiment\"\n if cfg.run_tal:\n if cfg.hnisz_region == \"tal1\":\n cfg.ko_experiment = \"ctcf\"\n indices = cfg.tal1ko_indices\n elif cfg.hnisz_region == \"lmo2\":\n cfg.ko_experiment = \"ctcf\"\n indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11)\n else:\n if cfg.ko_experiment == \"ctcf\":\n if cfg.ctcf_indices == \"all\":\n indices = ko_ob.get_ctcf_indices()\n indices = sample(list(indices), 10)\n else:\n indices = ko_ob.cfg.ctcf_indices_22\n elif cfg.ko_experiment == \"foxg1\":\n indices = cfg.foxg1_indices\n elif cfg.ko_experiment == \"tadbs\":\n indices = ko_ob.get_tadbs()\n\n \"plotting and metrics\"\n n_indices = len(indices)\n diff_list = np.zeros((n_indices, 11))\n diff_mat = np.zeros((n_indices, 200, 200))\n \"run for all indices\"\n for i, indice in enumerate(indices):\n \"get representations\"\n representations, start, stop, pred_data = self.get_trained_representations(method=\"hiclstm\")\n\n \"alter representations\"\n representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed,\n mode=cfg.ko_mode)\n\n if self.cfg.load_ko:\n ko_pred_df = pd.read_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n else:\n \"run through model using altered representations, save ko predictions\"\n _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode=\"ko\")\n if self.cfg.save_kopred:\n ko_pred_df.to_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n\n \"compute difference between WT and KO predictions\"\n if self.cfg.compute_avg_diff:\n ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice)\n diff_list[i] = ko_diffs\n\n \"get merged heatmap\"\n pred_data = pd.merge(pred_data, ko_pred_df, on=[\"i\", \"j\"])\n pred_data = pred_data.rename(columns={\"ko_pred\": \"v\"})\n hic_mat, st = get_heatmaps(pred_data, no_pred=False)\n # simple_plot(hic_mat, mode=\"reds\")\n\n \"get diff mat\"\n hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment)\n n_win = len(hic_win)\n diff_mat[i, :n_win, :n_win] = hic_win\n\n diff_mat = diff_mat.mean(axis=0)\n ko = np.triu(diff_mat)\n pred = np.tril(diff_mat).T\n diff_mat = ko - pred\n simple_plot(diff_mat, mode=\"diff\")\n np.save(cfg.output_directory + \"tad_diff_zero_ctctn.npy\", diff_mat)\n mean_diff = np.mean(diff_list, axis=1)\n return mean_diff, ko_pred_df, pred_data", "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def train_tal1_lmo2(self, model):\n\n \"summary writer\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr)\n\n \"initialize optimizer and prepare dataloader\"\n self.cfg.get_tal1_only = False\n self.cfg.get_lmo2_only = False\n optimizer, criterion = model.compile_optimizer()\n data_loader = self.prepare_tal1_lmo2()\n\n \"train and save the model\"\n model.train_model(data_loader, criterion, optimizer, writer)\n torch.save(model.state_dict(), cfg.model_dir + cfg.model_name + '.pth')", "def prepare_tal1_lmo2(self):\n\n \"load Hi-C like data\"\n tal_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n\n \"preprocess\"\n tal_df = tal_df.drop(['Unnamed: 0'], axis=1)\n lmo2_df = lmo2_df.drop(['Unnamed: 0'], axis=1)\n tal_df[['i', 'j']] = tal_df[['i', 'j']].astype('int64')\n lmo2_df[['i', 'j']] = lmo2_df[['i', 'j']].astype('int64')\n\n \"prepare indices and values for TAL1 in chromosome 1\"\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n input_idx_tal1, values_tal1 = get_samples_sparse(tal_df, 1, cfg)\n values_tal1 = F.pad(input=values_tal1, pad=(0, 4, 0, 0), mode='constant', value=0)\n input_idx_tal1 = F.pad(input=input_idx_tal1, pad=(0, 0, 0, 4, 0, 0), mode='constant', value=0)\n values = torch.cat((values, values_tal1.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_tal1), 0)\n\n if self.cfg.get_tal1_only:\n \"create tal dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n return data_loader\n\n if self.cfg.get_lmo2_only:\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n\n \"prepare indices and values for LMO2 in chromosome 11\"\n input_idx_lmo2, values_lmo2 = get_samples_sparse(lmo2_df, 11, cfg)\n values = torch.cat((values, values_lmo2.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_lmo2), 0)\n\n \"create dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n\n return data_loader", "def test5_SingleObservationSelectByIntentNewModel(self):\n\n # print out some values for debugging\n debug=False\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test5\")\n self.inpms += \".test5\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=False, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2012', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 10)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 11)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['med4'] = tblocal.getcell('MODEL_DATA', 4)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n # row numbers for specific data changed...\n #record['auto2'] = tblocal.getcell('MODEL_DATA', 1892)\n #record['long2'] = tblocal.getcell('MODEL_DATA', 1930)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 2838)\n #record['med3'] = tblocal.getcell('MODEL_DATA', 2854)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto2query)\n record['auto2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==5 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long2query)\n record['long2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==3 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n med3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==1 AND ANTENNA2==4 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(med3query)\n record['med3'] = subt.getcell('MODEL_DATA', 0)\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto2'] = tblocal.getcell('MODEL_DATA', 270)\n\t\t record['long2'] = tblocal.getcell('MODEL_DATA', 310)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 408)\n\t\t record['med3'] = tblocal.getcell('MODEL_DATA', 424)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Uranus')\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Titan')\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n if debug:\n if not self.ismms: print \"self.result['history']=\",self.result['history']\n print \"self.result['auto0']=\",self.result['auto0']\n print \"self.result['auto3']=\",self.result['auto3']\n\n #\"\"\"Flux density in HISTORY (Uranus)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus:\", \"V=0.0] Jy\"])\n #\"\"\"WVR spw\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33798409+0.j,25.33798409+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33490372+0.j, 25.33490372+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 3\"\"\"\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.72530365+0.j],[ 66.72530365+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.71941376+0.j], [ 66.71941376+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 4\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.40153503+0.j],[ 70.40153503+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.39561462+0.j], [ 70.39561462+0.j]]), 0.0001)\n #Titan\n \"\"\"Zero spacing of spw 2\"\"\"\n self.check_eq(self.result['auto2'][0][0], (6.69543791+0.j),0.0001)\n\n \"\"\"Long spacing of spw 2\"\"\"\n self.check_eq(self.result['long2'][0][0],(6.09987020 +2.47228783e-11j),0.0001)\n\n \"\"\"Zero spacing of spw 3\"\"\"\n self.check_eq(self.result['auto3'][0][0], (3.13487768+0.j),0.0001)\n\n \"\"\"Medium spacing of spw 3\"\"\"\n self.check_eq(self.result['med3'][0][0],(3.09678578 -2.19477778e-12j) ,0.0001)\n\n \"\"\"Long spacing of spw 3\"\"\"\n self.check_eq(self.result['long3'][0][0], (2.66332293 +1.29327478e-11j),0.0001)\n\n return sjran", "def test4_SingleObservationSelectByIntent(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test4\")\n self.inpms += \".test4\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n #Titan\n if self.ismms:\n #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n\n else:\n record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (selectbyIntent)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with selectbyIntent\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n #\"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n\n #\"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Zero spacing of spw 3 with scalebychan, selectbyintent\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n return sjran", "def run_optuna2():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n dataset = dataset.sample(frac=0.02)\n print(dataset.shape)\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n # X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n # dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n # dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n #\"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"boosting_type\": trial.suggest_categorical(\"boosting_type\", ['gbdt', 'rf']),\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n\n model = lgb.train(params, dtrain)\n vd_preds = model.predict(X_test)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n accuracy = accuracy_score(y_test, vd_preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def run_model(steps):\n model = ShoalModel()\n for j in range(steps):\n model.step()\n data = model.datacollector.get_model_vars_dataframe()\n return data", "def lacer(df, df1, train_start_date, train_end_date, test_start_date, test_end_date, request_type, CD, predictor_num): #Once model is ready, replace df with csv\n\n #Create Training and Testing Sets\n dftrain = preprocessing(df , train_start_date, train_end_date)\n dftrain = dftrain.reset_index(drop = True)\n dftest = preprocessing(df1, test_start_date, test_end_date)\n dftest = dftest.reset_index(drop = True)\n\n #Reserve test set for training on all 3 models. \n y_train, y_test = lc.CreateTestSet(dftest, predictor_num)\n y_test = y_test.reshape((-1, 1))\n\n\n## 2 Models\n #Model1: CD\n modelCD = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainCD = dftrain[dftrain['CD'] == CD].reset_index(drop = True)\n\n X_trainCD, X_testCD = lc.CreateTrainSet(dftrainCD, predictor_num)\n X_testCD = X_testCD.reshape((-1, 1))\n modelCD.fit(X_trainCD, X_testCD)\n\n y_predCD = modelCD.predict(y_train)\n\n #Model2: Request_type\n modelRT = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainRT = dftrain[dftrain['RequestType'] == request_type].reset_index(drop = True)\n\n X_trainRT, X_testRT = lc.CreateTrainSet(dftrainRT, predictor_num)\n X_testRT = X_testRT.reshape((-1, 1))\n\n modelRT.fit(X_trainRT, X_testRT)\n\n y_predRT = modelRT.predict(y_train)\n\n\n #Average out all predictions\n y_predFinal = (y_predCD + y_predRT )/2\n\n #Return metrics \n return lc.metrics(y_predFinal, y_test)", "def train_rb_system(self):\r\n\r\n train_case_no = pd.Series(0, index=self.train.index)\r\n # case_no in training and forecast samples are independent with each other\r\n # they are not of much use but merely for counting and comparing purposes\r\n gb_train = self.train.groupby([\"lu\", \"ld\", \"sp\", \"sn\", \"ud\", \"aud\", \"rsi1\", \"rsi2\", \"rsi3\", \"rsi4\"])\r\n for i, key in enumerate(gb_train.indices.keys()):\r\n train_case_no.loc[gb_train.groups[key]] = i\r\n train_ncase = gb_train.ngroups\r\n \r\n train_case_result = pd.Series(\"\", index=self.train.index)\r\n # store case_result for case observations in the training sample\r\n \r\n for i in range(train_ncase):\r\n case = self.train[train_case_no == i]\r\n if ((case.lu[0] == -1) & (case.ld[0] == -1) &\r\n (case.rsi1[0] == -1) & (case.rsi2[0] == -1)):\r\n train_case_result[case.index] = \"Trigger_OFF\"\r\n else:\r\n u1, u2, u3, u4, d1, d2, d3, d4 = (0.0,)*8\r\n if case.lu[0] == 1:\r\n u1 = self.train.ix[(self.train.lu == case.lu[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d1 = self.train.ix[(self.train.lu == case.lu[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.ld[0] == 1:\r\n u2 = self.train.ix[(self.train.ld == case.ld[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d2 = self.train.ix[(self.train.ld == case.ld[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.rsi1[0] == 1:\r\n u3 = self.train.ix[(self.train.rsi1 == case.rsi1[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d3 = self.train.ix[(self.train.rsi1 == case.rsi1[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.rsi2[0] == 1:\r\n u4 = self.train.ix[(self.train.rsi2 == case.rsi2[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d4 = self.train.ix[(self.train.rsi2 == case.rsi2[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n u = u1 + u2 + u3 + u4\r\n d = d1 + d2 + d3 + d4\r\n \r\n if u == d == 0.0:\r\n # This could happen it there is only one observation for this case,\r\n # and the Close of next day does not change, i.e. no up-day or down-day.\r\n # Assign it to be \"Obvious_WAIT\" by discretion.\r\n train_case_result[case.index] = \"Obvious_WAIT\"\r\n elif (u/(u+d)) >= .55:\r\n train_case_result[case.index] = \"Obvious_LONG\"\r\n elif (d/(u+d)) >= .55:\r\n train_case_result[case.index] = \"Obvious_SHORT\"\r\n elif u == d != 0.0:\r\n train_case_result[case.index] = \"Obvious_WAIT\"\r\n elif (.50 < (u/(u+d)) < .55) or (.45 < (u/(u+d)) < .50):\r\n train_case_result[case.index] = \"Non_Obvious\"\r\n self.trained_case = pd.concat([train_case_no, train_case_result], axis=1)\r\n self.trained_case.columns = [\"case_no\", \"case_result\"]", "def test2_SingleObservationScaleByChan(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test2\")\n self.inpms += \".test2\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n #sjran = setjy(vis=self.inpms, field='Uranus', spw='', modimage='',\n sjran = setjy(vis=self.inpms, field='Titan', spw='', modimage='',\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n \n if 'MODEL_DATA' not in cols:\n raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n\t\t #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n\t\t record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n\t\t record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n\t\t record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n # record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n #record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (scalebychan)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with scalebychan\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n \"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n # Titan ------------\n # check spw0, YY chan 0, 1920, 3839\n self.check_eq(self.result['auto0'][1][0], 3.30965233+0.j, 0.0001)\n self.check_eq(self.result['auto0'][1][1920], 3.31375313+0j, 0.0001)\n self.check_eq(self.result['auto0'][1][3839], 3.31785417+0j, 0.0001)\n\n \"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n # Titan\n self.check_eq(self.result['long0'][1][0],(2.77658414+6.98719121e-12j),0.0001)\n self.check_eq(self.result['long0'][1][1920],(2.77936244+6.99878090e-12j),0.0001)\n self.check_eq(self.result['long0'][1][3839],(2.78213906+7.01037362e-12j),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n \"\"\"Zero spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Long spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['long3'][1][0],(2.62812424+6.38091359e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][1920],(2.62534332+6.36981873e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][3839],(2.62256360+6.35873776e-12j) ,0.0001)\n\n return sjran", "def Model2():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n \n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*0.5\n \n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n sn.thresholds = tan.GetActivationFunction()(np.zeros(sn.inputs.shape)-.5)\n sp.thresholds = tan.GetActivationFunction()(np.zeros(sp.inputs.shape)-.5)\n c2tan.weights = np.random.random(c2tan.weights.shape)/-100.0\n c2tan.learningEnabled = True\n\n c2tan.learningFunction = TAN_LearningRule\n \n return M2", "def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()", "def run_optuna():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial,\n \"multi_logloss\")\n model = lgb.train(params, dtrain,\n num_boost_round=1000,\n early_stopping_rounds=30,\n valid_sets=dvalid,\n callbacks=[pruning_callback]\n )\n vd_preds = model.predict(X_test, num_iteration=model.best_iteration)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n\n accuracy = accuracy_score(y_test, vd_preds)\n # gbm = lgb.train(param, dtrain)\n # pred_labels = np.rint(preds)\n # rmse = sklearn.metrics.mean_squared_error(valid_y, preds, squared=False)\n #accuracy = accuracy_score(valid_y, preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def main():\n sound_path = []\n textgrid_path = []\n # change the data to your own recorded sounds, make sure you have wav file and TextGrid file which\n # mark every vowel with some character, and every word end must be marked with \"wordend\"\n # theses recordings are only [a], [e], [i], [o], [u] sequence to test the pipeline flow,\n # the phonological rule learner tests are in the \"phonological_learner\" py file.\n for i in range(1, 5):\n sound_path.append(\"recordings\\\\aeiou{}.wav\".format(str(i)))\n textgrid_path.append(\"recordings\\\\aeiou{}.TextGrid\".format(str(i)))\n data = signal_parser.parse_input_sound(sound_path, textgrid_path) # from sound to vowel objects with f1 and f2\n clustered_data = mdl_clustering.mdl_cluster(data) # cluster into main values\n final_tagged_data = phonology_learner.extract_features(clustered_data.cluster) # add phonological features\n update_data(data, final_tagged_data) # update the input data with the phonological features\n lexicon = find_lexicon(data) # get lexicon from data\n data = separate_data_into_words(data)\n model = phonology_learner.MdlPhonology(final_tagged_data.keys(), lexicon, data, POSSIBLE_FEATURES)\n print model\n model = phonology_learner.mdl_phonology_learner(model)\n print model", "def Model3():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n snr = M2.GetGroupByName(\"SNr/GPi\")\n\n \n \n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n\n sn.SetActivationFunction(neu.Tanh_plus)\n sp.SetActivationFunction(neu.Tanh_plus)\n snr.SetActivationFunction(neu.Tanh_plus)\n\n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*-0.25\n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n hb = np.average(sn.thresholds)/-tan.size\n HB = np.ones(tan.inputs.shape)*hb\n sn.thresholds = 0.1*np.ones(sn.activations.shape)\n sp.thresholds = 0.1*np.ones(sp.activations.shape)\n #sn.thresholds = -1*tan.GetActivationFunction()(np.ones(sn.inputs.shape)-1)\n #sp.thresholds = -1*tan.GetActivationFunction()(np.ones(sp.inputs.shape)-1)\n #sn.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sn.inputs.shape))\n #sp.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sp.inputs.shape))\n #c2tan.weights = np.random.random(c2tan.weights.shape)\n c2tan.weights = np.ones(c2tan.weights.shape)*1.5\n c2tan.mask = np.dot(np.ones(tan.inputs.shape),\n np.array([[1,1,1,0,0,0,0,0,0]]))\n c2tan.learningEnabled = True\n c2tan.learningFunction = TAN_LearningRule\n\n M2.SetParameter(\"TRACE_TAN\", True)\n M2.SetParameter(\"HB\", HB)\n return M2", "def model_run(request):\n user_name = request.user.username\n\n # Defaults\n test_string = \"Test_string_default\"\n test_variable = \"Test_variable_default\"\n fac_L_form= \"\"\n simulation_name = \"\"\n outlet_y = \"\"\n outlet_x = \"\"\n\n hydrograph_series_obs = None\n hydrograph_series_sim = None\n hydrograph_opacity =0.1\n observed_hydrograph = \"\"\n observed_hydrograph2 = ''\n observed_hydrograph3 = ''\n vol_bal_graphs = ''\n\n observed_hydrograph_userModified = \"\"\n observed_hydrograph_userModified2 = \"\"\n observed_hydrograph_userModified3 = \"\"\n vol_bal_graphs_userModified = ''\n \n observed_hydrograph_loaded = \"\"\n observed_hydrograph_loaded2 = \"\"\n observed_hydrograph_loaded3 = \"\"\n vol_bal_graphs_loaded = ''\n \n eta_ts_obj = eta_ts_obj_modified = eta_ts_obj_loaded = ''\n vo_ts_obj = vo_ts_obj_modified = vo_ts_obj_loaded = ''\n vc_ts_obj = vc_ts_obj_modified = vc_ts_obj_loaded = ''\n vs_ts_obj = vs_ts_obj_modified = vs_ts_obj_loaded = ''\n ppt_ts_obj = ppt_ts_obj_modified = ppt_ts_obj_loaded = ''\n\n model_run_hidden_form = ''\n model_input_prepare_request = None\n hs_resource_id_created = ''\n hs_resource_id_loaded = ''\n hs_resource_id_modified = ''\n\n\n simulation_loaded_id = \"\"\n current_model_inputs_table_id = 0\n model_inputs_table_id_from_another_html = 0 #:TODO need to make it point to last sim by default\n\n\n # if user wants to download the file only\n download_response = {}\n hs_res_downloadfile = ''\n download_status = download_response['download_status'] = None #False\n download_link = download_response['download_link'] = 'http://link.to.zipped.files'\n hs_res_created = download_response['hs_res_created'] = ''\n files_created_dict = 'No dict created'\n download_choice = []\n\n # initial values\n fac_L_init = fac_Ks_init = fac_n_o_init = fac_n_c_init = fac_th_s_init = 1.0\n pvs_t0_init = 10.0\n vo_t0_init = 5.0\n qc_t0_init = 1.0\n kc_init = 1.0\n\n # test\n if request.is_ajax and request.method == 'POST':\n pass\n\n '''\n model_run can receive request from three sources:\n 1) model_input, prepare model (if model_input_prepare_request != None)\n 2) model_input, load model (if model_input_load_request != None)\n 3) model_run, calibrate and change the result seen. i.e. passes to itself (if model_run_calib_request != None)\n '''\n\n\n # # check to see if the request is from method (1)\n try:\n model_input_prepare_request = request.POST['simulation_name']\n # if request.POST['download_choice'] != None:\n # model_input_prepare_request = None\n print \"MSG from I: Preparing model simulation, simulation name is: \", model_input_prepare_request\n except:\n\n model_input_prepare_request = None\n\n\n # # check to see if the request is from method (2)\n try:\n # for the input text\n try:\n model_input_load_request = hs_resource_id_created = request.POST['existing_sim_res_id']\n print \"MSG from II: Previous simulation is loaded.the simulation loaded from hs_res_id from text box is.\", hs_resource_id_created\n\n # chose dropdown if the field is blank. :TODO need to get rid of the except part below:\n if hs_resource_id_created == \"\":\n model_input_load_request = hs_resource_id_created = request.POST['simulation_names_list']\n print \"MSG from II: Previous simulation is loaded. The name of simulation loaded is: \", hs_resource_id_created\n\n # for the drop down list\n except:\n model_input_load_request = hs_resource_id_created = request.POST['simulation_names_list'] # from drop down menu\n b = request.POST['load_simulation_name']\n print 'MSG from II: The name of simulation loaded from dropdown menu is: ',hs_resource_id_created\n print \"MSG from II: Previous simulation is loaded. The name of simulation loaded is: \", hs_resource_id_created\n except:\n model_input_load_request = None\n\n\n # # check to see if the request is from method (3)\n try:\n model_run_calib_request = request.POST['fac_L']\n print 'MSG: Calibration parameters are modified'\n except:\n model_run_calib_request = None\n\n\n # Method (1), request from model_input-prepare model\n if model_input_prepare_request != None:\n print 'MSG: Method I initiated.'\n\n # Checks the model chosen\n model_engine_chosen = request.POST['model_engine']\n\n if model_engine_chosen.lower() == 'download' : #request.POST.getlist('download_choice2[]') != []:\n print 'User action: DOWNLOAD'\n\n download_choice = request.POST.getlist('download_choice2[]')\n\n print 'download_choice(s)=', download_choice\n\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n\n download_request_response = {\n u'output_response_txt': u'http://129.123.9.159:20199/files/data/user_6/metadata.txt',\n u'output_zipfile': u'http://129.123.9.159:20199/files/data/user_6/output.zip',\n u'output_json_string': {'hs_res_id_created': '12456'}}\n\n download_request_response = app_utils.download_geospatial_and_forcing_files(inputs_dictionary,\n download_request=download_choice)\n\n print \"Downloading all the files successfully completed\"\n\n if download_request_response != {}:\n download_status = True\n download_link = download_request_response['output_zipfile']\n hs_res_downloadfile = download_request_response['output_json_string']['hs_res_id_created']\n\n elif model_engine_chosen.lower() == 'topnet':\n print 'User action: TOPNET'\n\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n run_request = app_utils.run_topnet(inputs_dictionary)\n\n elif model_engine_chosen.lower() == 'topkapi':\n print 'User action: topkapi'\n\n # # Method (1), STEP (1): get input dictionary from request ( request I)\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n test_string = str(\"Prepared Values: \")+str(inputs_dictionary)\n simulation_name = inputs_dictionary['simulation_name']\n print \"MSG: Inputs from user read\"\n\n\n\n # # Method (1), STEP (2):call_runpytopkapi function\n # response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/e14239bf38bc490cae63e131c822a17d/pytopkpai_responseJSON.txt'\n # response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/a3c75f158ad44fe1a46ceb8a67224aae/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.call_runpytopkapi(inputs_dictionary= inputs_dictionary)\n\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n print 'MSG: Prepared Simulation Hydrograph ...'\n\n hs_resource_id_created = json_data['hs_res_id_created']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n\n\n eta = json_data['eta']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n ppt= json_data['ppt']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n\n # initial values\n # calib_parameter= {\"fac_l\": 1.0, \"fac_n_o\": 1.0, \"fac_n_c\": 1.0, \"fac_th_s\": 1.0, \"fac_ks\": 1.0},\n # numeric_param= {\"pvs_t0\": 50, \"vo_t0\": 750.0, \"qc_t0\": 0.0, \"kc\": 1.0},\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n\n print '*****************', hs_resource_id_created\n # print [i[-1] for i in hydrograph_series_sim]\n # hydrograph_series_obs = np.nan_to_num(hydrograph_series_obs).tolist()\n\n # replace nan values to 0 because Tethys timeseries cannot display nan\n hydrograph_series_obs = [[item[0], 0] if np.isnan(item[-1]) else item for item in hydrograph_series_obs]\n\n\n try:\n try:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim],\n [i[-1] for i in hydrograph_series_obs])\n except:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim])\n\n # Writing to model_inputs_table\n current_model_inputs_table_id = app_utils.write_to_model_input_table(inputs_dictionary=inputs_dictionary, hs_resource_id= hs_resource_id_created)\n\n # Writing to model_calibraiton_table (Because it is first record of the simulation)\n # IF the model did not run, or if user just wants the files, we don't write to calibration table\n current_model_calibration_table_id = app_utils.write_to_model_calibration_table( model_input_table_id=current_model_inputs_table_id,\n numeric_parameters_list=[pvs_t0_init, vo_t0_init,qc_t0_init, kc_init],\n calibration_parameters_list=[fac_L_init,fac_Ks_init, fac_n_o_init,fac_n_c_init,fac_th_s_init])\n\n # Writing to model_result_table\n current_model_result_table_id = app_utils.write_to_model_result_table(model_calibration_table_id=current_model_calibration_table_id,\n timeseries_discharge_list=data_qsim_qobs)\n except Exception, e:\n print \"Error ---> Writing to DB\", e\n\n\n\n\n observed_hydrograph3 = TimeSeries(\n height='300px', width='500px', engine='highcharts',\n title=\"Simulated and Observed Hydrographs\",\n subtitle='Nash value: %s, R2: %s'%(json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge ',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }])\n\n\n vol_bal_graphs = TimeSeries(\n height='600px', width='500px', engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n },{\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n } , {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n eta_ts_obj =app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n vc_ts_obj = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells', unit='mm/day')\n vs_ts_obj = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells', unit='mm/day')\n ppt_ts_obj = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n\n\n\n # Method (2), request from model_input-load simulation\n if model_input_load_request != None:\n hs_resource_id = model_input_load_request\n\n print 'MSG: Method II initiated.'\n print 'MSG: Model run for HydroShare resource ID ', hs_resource_id , \" is being retreived..\"\n\n\n # # STEP1: Retrieve simulation information (files stored in HydroShare) from db in a dict\n # inputs_dictionary = app_utils.create_model_input_dict_from_db( hs_resource_id= hs_resource_id,user_name= user_name )\n # test_string = str(\"Loaded Values: \")+str(inputs_dictionary)\n\n\n\n ######### START: need to get two variables: i) hs_resource_id_created, and ii) hydrograph series ##############\n response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/16ea0402dd4c403bbb4e5b23ed597728/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.loadpytopkapi(hs_res_id=hs_resource_id, out_folder='')\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n hs_resource_id_created = hs_resource_id_loaded =hs_resource_id #json_data['hs_res_id_created']\n\n\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n eta = json_data['eta']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n ppt = json_data['ppt']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n \n \n # init values in the form\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n\n observed_hydrograph_loaded = TimeSeries(\n height='300px',width='500px', engine='highcharts',title=' Simulated Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_sim\n }])\n\n observed_hydrograph_loaded2 = TimeSeries(\n height='500px',width='500px', engine='highcharts',title='Observed (actual) Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_obs\n }])\n\n observed_hydrograph_loaded3 = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title= \"Simulated and Observed flow \" ,\n subtitle='Nash value: %s, R2: %s'%(json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }]\n )\n\n vol_bal_graphs_loaded = TimeSeries(\n height='600px',\n width='500px',\n engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n vc_ts_obj_loaded = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells',unit='mm/day')\n vs_ts_obj_loaded = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj_loaded = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells',unit='mm/day')\n ppt_ts_obj_loaded = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n eta_ts_obj_loaded = app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n # STEP2: Because in this part we load previous simulation, Load the model from hydroshare to hydroDS,\n # STEP2: And from the prepeared model, if the result is not available, run. Otherwise just give the result\n # hydrograph2, table_id = app_utils.run_model_with_input_as_dictionary(inputs_dictionary, False)\n #* STEP3: Make sure a string/variable/field remains that contains the id of the model. SO when user modifies it, that model is modifed\n # # STEP4B: Write to db\n # current_model_inputs_table_id = app_utils.write_to_model_input_table(inputs_dictionary,simulation_folder)\n # print \"MSG: Inputs from model_input form written to db. Model RAN already\"\n # STEP5: get the revised hydrographs, and plot it\n # preparing timeseries data in the format shown in: http://docs.tethysplatform.org/en/latest/tethys_sdk/gizmos/plot_view.html#time-series\n\n # hydrograph2 = []\n # observed_hydrograph_loaded = ''\n\n\n # Method (3), request from model_run, change calibration parameters\n if model_run_calib_request != None :\n\n fac_L_form = float(request.POST['fac_L'])\n fac_Ks_form = float(request.POST['fac_Ks'])\n fac_n_o_form = float(request.POST['fac_n_o'])\n fac_n_c_form = float(request.POST['fac_n_c'])\n fac_th_s_form = float(request.POST['fac_th_s'])\n\n pvs_t0_form = float(request.POST['pvs_t0'])\n vo_t0_form = float(request.POST['vo_t0'])\n qc_t0_form = float(request.POST['qc_t0'])\n kc_form = float(request.POST['kc'])\n\n # model_inputs_table_id_from_another_html = request.POST['model_inputs_table_id_from_another_html']\n hs_resource_id_from_previous_simulation = request.POST['model_inputs_table_id_from_another_html']\n # current_model_inputs_table_id =hs_resource_id_from_previous_simulation\n hs_resource_id_created = hs_resource_id_from_previous_simulation\n\n print 'MSG: Method III initiated. The model id we are looking at is: ', hs_resource_id_from_previous_simulation\n\n\n ######### START: need to get at leaset two variables: i) hs_resource_id_created, and ii) hydrograph series #####\n response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/16ea0402dd4c403bbb4e5b23ed597728/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.modifypytopkapi(hs_res_id=hs_resource_id_created, out_folder='',\n fac_l=fac_L_form, fac_ks=fac_Ks_form, fac_n_o=fac_n_o_form,\n fac_n_c=fac_n_c_form, fac_th_s=fac_th_s_form,\n pvs_t0=pvs_t0_form, vo_t0=vo_t0_form, qc_t0=qc_t0_form,\n kc=kc_form )\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n hs_resource_id_created = hs_resource_id_modified = json_data['hs_res_id_created']\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n eta = json_data['eta']\n ppt = json_data['ppt']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n\n print 'hydrograph_series_sim is ',[item[-1] for item in hydrograph_series_sim]\n\n # init values in the form\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n print '***hs_resource_id_created', hs_resource_id_created\n # print [i[-1] for i in hydrograph_series_sim]\n ######### END : ###############\n\n # # # -------DATABASE STUFFS <start>----- # #\n # # retreive the model_inputs_table.id of this entry to pass it to the next page (calibration page)\n # from .model import engine, SessionMaker, Base, model_calibration_table\n # session = SessionMaker() # Make session\n #\n # # STEP1: retrieve the model_inputs_table.id of this entry to pass it to the next page (calibration page)\n # current_model_inputs_table_id = str(len(session.query(model_inputs_table).filter(\n # model_inputs_table.user_name == user_name).all())) # because PK is the same as no of rows, i.e. length\n #\n # # STEP2: use the id retrieved in STEP1 to get all the remaining parameters\n # print 'model_input ID for which rest of the inputs are being retrieved: ', current_model_inputs_table_id\n #\n # all_rows = session.query(model_inputs_table).filter(model_inputs_table.id == current_model_inputs_table_id).all()\n #\n # # retrieve the parameters and write to a dictionary\n # inputs_dictionary = {}\n #\n # for row in all_rows:\n # inputs_dictionary['id'] = row.id\n # inputs_dictionary['user_name'] = row.user_name\n # inputs_dictionary['simulation_name'] = row.simulation_name\n # inputs_dictionary['simulation_folder'] = row.simulation_folder\n # inputs_dictionary['simulation_start_date'] = row.simulation_start_date\n # inputs_dictionary['simulation_end_date'] = row.simulation_end_date\n # inputs_dictionary['USGS_gage'] = row.USGS_gage\n #\n # inputs_dictionary['outlet_x'] = row.outlet_x\n # inputs_dictionary['outlet_y'] = row.outlet_y\n # inputs_dictionary['box_topY'] = row.box_topY\n # inputs_dictionary['box_bottomY'] = row.box_bottomY\n # inputs_dictionary['box_rightX'] = row.box_rightX\n # inputs_dictionary['box_leftX'] = row.box_leftX\n #\n # timeseries_source,threshold, cell_size,timestep = row.other_model_parameters.split(\"__\")\n # inputs_dictionary['timeseries_source'] = timeseries_source\n # inputs_dictionary['threshold'] = threshold\n # inputs_dictionary['cell_size'] = cell_size\n # inputs_dictionary['timestep'] = timestep\n #\n # inputs_dictionary['model_engine'] = row.model_engine\n\n\n observed_hydrograph_userModified3 = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title= \"Simulated and Observed flow \" ,\n subtitle='Nash value: %s, R2: %s' % (json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge ',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }]\n )\n\n observed_hydrograph_userModified = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title=' Corrected Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_sim\n }]\n )\n\n observed_hydrograph_userModified2 = TimeSeries(\n height='500px', width='500px', engine='highcharts', title=' Observed (Actual) Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge', y_axis_units='cfs',\n series=[{\n 'name': 'Observed Flow',\n 'data': hydrograph_series_obs\n }])\n\n vol_bal_graphs_userModified = TimeSeries(\n height='600px',\n width='500px',\n engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n vc_ts_obj_modified = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells',unit='mm/day')\n vs_ts_obj_modified = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj_modified = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells',unit='mm/day')\n ppt_ts_obj_modified = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n eta_ts_obj_modified = app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n\n # create input_dictionary for the last run. Because we are modifying, we need to load the last run\n inputs_dictionary = app_utils.create_model_input_dict_from_db(hs_resource_id = hs_resource_id_from_previous_simulation, user_name= user_name )\n\n # Writing to db\n try:\n try:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim],\n [i[-1] for i in hydrograph_series_obs])\n except:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim])\n\n # Writing to model_calibration_table\n current_model_calibration_table_id= app_utils.write_to_model_calibration_table(hs_resource_id=hs_resource_id_from_previous_simulation,\n numeric_parameters_list=[pvs_t0_init, vo_t0_init, qc_t0_init,\n kc_init],\n calibration_parameters_list=[fac_L_init, fac_Ks_init,\n fac_n_o_init, fac_n_c_init,\n fac_th_s_init])\n # Writing to model_result_table\n current_model_result_table_id = app_utils.write_to_model_result_table(\n model_calibration_table_id=current_model_calibration_table_id,\n timeseries_discharge_list =data_qsim_qobs)\n\n\n except Exception, e:\n print \"Error ---> Writing to DB\", e\n \n\n # # # -------DATABASE STUFFS <ends> ----- # #\n\n\n\n\n print 'simulation_loaded_id',simulation_loaded_id # probably useless\n print 'hs_resource_id_created', hs_resource_id_created\n\n print 'hs_resource_id_prepared', model_input_prepare_request\n print 'hs_resource_id_loaded', model_input_load_request\n print 'hs_resource_id_modified', model_run_calib_request\n\n\n\n # gizmo settings\n fac_L = TextInput(display_text='Soil depth across all model cells', name='fac_L', initial=float(fac_L_init))\n fac_Ks = TextInput(display_text='Saturated hydraulic conductivity', name='fac_Ks', initial=float(fac_Ks_init))\n fac_n_o = TextInput(display_text=\"Manning's n for overland\", name='fac_n_o', initial=float(fac_n_o_init))\n fac_n_c = TextInput(display_text=\"Manning's n for channel\", name='fac_n_c', initial=float(fac_n_c_init))\n fac_th_s = TextInput(display_text='Soil saturation', name='fac_th_s', initial=float(fac_th_s_init))\n\n pvs_t0 = TextInput(display_text=\"Soil cell's saturation %\", name='pvs_t0', initial=float(pvs_t0_init))\n vo_t0 = TextInput(display_text=\"Water volume in Overland cells (m3)\", name='vo_t0', initial=float(vo_t0_init))\n qc_t0 = TextInput(display_text='Flow in channel cells (m3/s)', name='qc_t0', initial=float(qc_t0_init))\n kc = TextInput(display_text='Crop coefficient across all model cells', name='kc', initial=float(kc_init))\n\n\n\n\n context = {'simulation_name':simulation_name,\n 'outlet_y': outlet_y,\n 'outlet_x': outlet_x,\n\n\n 'fac_L': fac_L, 'fac_Ks': fac_Ks, 'fac_n_o': fac_n_o, \"fac_n_c\": fac_n_c, \"fac_th_s\": fac_th_s,\n 'pvs_t0': pvs_t0, 'vo_t0': vo_t0, 'qc_t0': qc_t0, \"kc\": kc,\n\n 'fac_L_form': fac_L_form,\n 'user_name':user_name,\n\n #'Iwillgiveyou_model_inputs_table_id_from_another_html':model_inputs_table_id_from_another_html,\n # \"current_model_inputs_table_id\":current_model_inputs_table_id, # model_inputs_table_id\n\n 'observed_hydrograph3': observed_hydrograph3,\n 'observed_hydrograph': observed_hydrograph,\n 'observed_hydrograph2': observed_hydrograph2,\n\n\n \"observed_hydrograph_userModified\":observed_hydrograph_userModified,\n \"observed_hydrograph_userModified2\": observed_hydrograph_userModified2,\n \"observed_hydrograph_userModified3\": observed_hydrograph_userModified3,\n\n \"observed_hydrograph_loaded\":observed_hydrograph_loaded,\n \"observed_hydrograph_loaded2\": observed_hydrograph_loaded2,\n \"observed_hydrograph_loaded3\": observed_hydrograph_loaded3,\n\n 'eta_ts_obj': eta_ts_obj,\n 'vs_ts_obj': vs_ts_obj,\n 'vc_ts_obj': vc_ts_obj,\n 'vo_ts_obj': vo_ts_obj,\n 'ppt_ts_obj': ppt_ts_obj,\n 'vol_bal_graphs':vol_bal_graphs,\n\n 'eta_ts_obj_modified': eta_ts_obj_modified,\n 'vs_ts_obj_modified': vs_ts_obj_modified,\n 'vc_ts_obj_modified': vc_ts_obj_modified,\n 'vo_ts_obj_modified': vo_ts_obj_modified,\n 'ppt_ts_obj_modified': ppt_ts_obj_modified,\n 'vol_bal_graphs_userModified':vol_bal_graphs_userModified,\n\n 'eta_ts_obj_loaded': eta_ts_obj_loaded,\n 'vs_ts_obj_loaded': vs_ts_obj_loaded,\n 'vc_ts_obj_loaded': vc_ts_obj_loaded,\n 'vo_ts_obj_loaded': vo_ts_obj_loaded,\n 'ppt_ts_obj_loaded': ppt_ts_obj_loaded,\n 'vol_bal_graphs_loaded':vol_bal_graphs_loaded,\n\n\n \"simulation_loaded_id\":simulation_loaded_id,\n 'test_string':simulation_loaded_id, #test_string\n 'test_variable':test_variable,\n\n 'hs_resource_id_created':hs_resource_id_created,\n\n 'hs_resource_id_prepared': model_input_prepare_request,\n 'hs_resource_id_loaded': model_input_load_request,\n 'hs_resource_id_modified': model_run_calib_request,\n\n # fow download request\n 'hs_res_downloadfile':hs_res_downloadfile,\n 'download_status': download_status,\n 'download_link': download_link,\n 'hs_res_created': hs_res_created,\n 'dict_files_created': files_created_dict,\n }\n\n return render(request, 'hydrologic_modeling/model-run.html', context)", "def send_to_mco(self, model, kpi_results):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
perform_tal1_ko(model) > DataFrame Performs knockout of selected sites in TAL1 and LMO2 regions.
def perform_lmo2_ko(self, model): "save representations" self.chr = 11 self.cfg.get_lmo2_only = True ko_ob.test_tal1_lmo2(model) "perform ko" self.cfg.hnisz_region = "lmo2" _, ko_pred_df, _ = self.perform_ko(model) return ko_pred_df
[ "def perform_tal1_ko(self, model):\n\n \"save representations\"\n self.chr = 1\n self.cfg.get_tal1_only = True\n ko_ob.test_tal1_lmo2(model)\n\n \"perform ko\"\n self.cfg.hnisz_region = \"tal1\"\n _, ko_pred_df, _ = self.perform_ko(model)\n return ko_pred_df", "def perform_ko(self, model):\n\n cfg = self.cfg\n\n \"load data\"\n if cfg.run_tal and cfg.hnisz_region == \"tal1\":\n self.cfg.get_tal1_only = True\n data_loader = self.prepare_tal1_lmo2()\n elif cfg.run_tal and cfg.hnisz_region == \"lmo2\":\n self.cfg.get_lmo2_only = True\n data_loader = self.prepare_tal1_lmo2()\n else:\n data_loader = get_data_loader_chr(cfg, self.chr, shuffle=False)\n\n \"get zero embed\"\n cfg.full_test = False\n cfg.compute_pca = False\n cfg.get_zero_pred = True\n zero_embed = test_model(model, cfg, self.chr)\n\n \"get knockout indices depending on experiment\"\n if cfg.run_tal:\n if cfg.hnisz_region == \"tal1\":\n cfg.ko_experiment = \"ctcf\"\n indices = cfg.tal1ko_indices\n elif cfg.hnisz_region == \"lmo2\":\n cfg.ko_experiment = \"ctcf\"\n indices = np.array(cfg.lmo2ko_indices) + get_cumpos(cfg, 11)\n else:\n if cfg.ko_experiment == \"ctcf\":\n if cfg.ctcf_indices == \"all\":\n indices = ko_ob.get_ctcf_indices()\n indices = sample(list(indices), 10)\n else:\n indices = ko_ob.cfg.ctcf_indices_22\n elif cfg.ko_experiment == \"foxg1\":\n indices = cfg.foxg1_indices\n elif cfg.ko_experiment == \"tadbs\":\n indices = ko_ob.get_tadbs()\n\n \"plotting and metrics\"\n n_indices = len(indices)\n diff_list = np.zeros((n_indices, 11))\n diff_mat = np.zeros((n_indices, 200, 200))\n \"run for all indices\"\n for i, indice in enumerate(indices):\n \"get representations\"\n representations, start, stop, pred_data = self.get_trained_representations(method=\"hiclstm\")\n\n \"alter representations\"\n representations, zero_embed = self.ko_representations(representations, start, indice, zero_embed,\n mode=cfg.ko_mode)\n\n if self.cfg.load_ko:\n ko_pred_df = pd.read_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n else:\n \"run through model using altered representations, save ko predictions\"\n _, ko_pred_df = model.perform_ko(data_loader, representations, start, zero_embed, mode=\"ko\")\n if self.cfg.save_kopred:\n ko_pred_df.to_csv(cfg.output_directory + \"hiclstm_%s_afko_chr%s.csv\" % (cfg.cell, str(chr)),\n sep=\"\\t\")\n\n \"compute difference between WT and KO predictions\"\n if self.cfg.compute_avg_diff:\n ko_diffs = self.compute_kodiff(pred_data, ko_pred_df, indice)\n diff_list[i] = ko_diffs\n\n \"get merged heatmap\"\n pred_data = pd.merge(pred_data, ko_pred_df, on=[\"i\", \"j\"])\n pred_data = pred_data.rename(columns={\"ko_pred\": \"v\"})\n hic_mat, st = get_heatmaps(pred_data, no_pred=False)\n # simple_plot(hic_mat, mode=\"reds\")\n\n \"get diff mat\"\n hic_win = indices_diff_mat(indice, st, hic_mat, mode=cfg.ko_experiment)\n n_win = len(hic_win)\n diff_mat[i, :n_win, :n_win] = hic_win\n\n diff_mat = diff_mat.mean(axis=0)\n ko = np.triu(diff_mat)\n pred = np.tril(diff_mat).T\n diff_mat = ko - pred\n simple_plot(diff_mat, mode=\"diff\")\n np.save(cfg.output_directory + \"tad_diff_zero_ctctn.npy\", diff_mat)\n mean_diff = np.mean(diff_list, axis=1)\n return mean_diff, ko_pred_df, pred_data", "def test_tal1_lmo2(self, model):\n\n \"prepare dataloader\"\n data_loader = self.prepare_tal1_lmo2()\n\n \"test model\"\n self.cfg.full_test = True\n self.cfg.compute_pca = False\n self.cfg.get_zero_pred = False\n _, _, _, pred_df, _ = model.test(data_loader)\n\n \"save predictions\"\n pred_df.to_csv(self.cfg.output_directory + \"hiclstm_%s_predictions_chr%s.csv\" % (self.cell, str(self.chr)),\n sep=\"\\t\")\n return pred_df", "def train_tal1_lmo2(self, model):\n\n \"summary writer\"\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n writer = SummaryWriter('./tensorboard_logs/' + cfg.model_name + timestr)\n\n \"initialize optimizer and prepare dataloader\"\n self.cfg.get_tal1_only = False\n self.cfg.get_lmo2_only = False\n optimizer, criterion = model.compile_optimizer()\n data_loader = self.prepare_tal1_lmo2()\n\n \"train and save the model\"\n model.train_model(data_loader, criterion, optimizer, writer)\n torch.save(model.state_dict(), cfg.model_dir + cfg.model_name + '.pth')", "def prepare_tal1_lmo2(self):\n\n \"load Hi-C like data\"\n tal_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/tal_df.txt\", sep=\"\\t\")\n lmo2_df = pd.read_csv(cfg.hic_path + cfg.cell + \"/lmo2_df.txt\", sep=\"\\t\")\n\n \"preprocess\"\n tal_df = tal_df.drop(['Unnamed: 0'], axis=1)\n lmo2_df = lmo2_df.drop(['Unnamed: 0'], axis=1)\n tal_df[['i', 'j']] = tal_df[['i', 'j']].astype('int64')\n lmo2_df[['i', 'j']] = lmo2_df[['i', 'j']].astype('int64')\n\n \"prepare indices and values for TAL1 in chromosome 1\"\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n input_idx_tal1, values_tal1 = get_samples_sparse(tal_df, 1, cfg)\n values_tal1 = F.pad(input=values_tal1, pad=(0, 4, 0, 0), mode='constant', value=0)\n input_idx_tal1 = F.pad(input=input_idx_tal1, pad=(0, 0, 0, 4, 0, 0), mode='constant', value=0)\n values = torch.cat((values, values_tal1.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_tal1), 0)\n\n if self.cfg.get_tal1_only:\n \"create tal dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n return data_loader\n\n if self.cfg.get_lmo2_only:\n values = torch.empty(0, cfg.sequence_length)\n input_idx = torch.empty(0, cfg.sequence_length, 2)\n\n \"prepare indices and values for LMO2 in chromosome 11\"\n input_idx_lmo2, values_lmo2 = get_samples_sparse(lmo2_df, 11, cfg)\n values = torch.cat((values, values_lmo2.float()), 0)\n input_idx = torch.cat((input_idx, input_idx_lmo2), 0)\n\n \"create dataloader\"\n dataset = torch.utils.data.TensorDataset(input_idx, values)\n data_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=cfg.batch_size, shuffle=True)\n\n return data_loader", "def test5_SingleObservationSelectByIntentNewModel(self):\n\n # print out some values for debugging\n debug=False\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test5\")\n self.inpms += \".test5\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=False, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2012', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 10)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 11)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['med4'] = tblocal.getcell('MODEL_DATA', 4)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n # row numbers for specific data changed...\n #record['auto2'] = tblocal.getcell('MODEL_DATA', 1892)\n #record['long2'] = tblocal.getcell('MODEL_DATA', 1930)\n #record['auto3'] = tblocal.getcell('MODEL_DATA', 2838)\n #record['med3'] = tblocal.getcell('MODEL_DATA', 2854)\n #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto2query)\n record['auto2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long2query = querystr+' AND DATA_DESC_ID==2 AND ANTENNA1==5 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long2query)\n record['long2'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==3 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n med3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==1 AND ANTENNA2==4 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(med3query)\n record['med3'] = subt.getcell('MODEL_DATA', 0)\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME<2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto2'] = tblocal.getcell('MODEL_DATA', 270)\n\t\t record['long2'] = tblocal.getcell('MODEL_DATA', 310)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 408)\n\t\t record['med3'] = tblocal.getcell('MODEL_DATA', 424)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Uranus')\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='setjy', hint='Titan')\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n if debug:\n if not self.ismms: print \"self.result['history']=\",self.result['history']\n print \"self.result['auto0']=\",self.result['auto0']\n print \"self.result['auto3']=\",self.result['auto3']\n\n #\"\"\"Flux density in HISTORY (Uranus)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus:\", \"V=0.0] Jy\"])\n #\"\"\"WVR spw\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33798409+0.j,25.33798409+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['wvr'], numpy.array([[ 25.33490372+0.j, 25.33490372+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 3\"\"\"\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.72530365+0.j],[ 66.72530365+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n\t#self.check_eq(self.result['auto3'], numpy.array([[ 66.71941376+0.j], [ 66.71941376+0.j]]),0.0001)\n #\"\"\"Zero spacing of spw 4\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.40153503+0.j],[ 70.40153503+0.j]]),0.0001)\n # new value after code and ephemeris data update 2012-10-03\n #self.check_eq(self.result['auto4'], numpy.array([[ 70.39561462+0.j], [ 70.39561462+0.j]]), 0.0001)\n #Titan\n \"\"\"Zero spacing of spw 2\"\"\"\n self.check_eq(self.result['auto2'][0][0], (6.69543791+0.j),0.0001)\n\n \"\"\"Long spacing of spw 2\"\"\"\n self.check_eq(self.result['long2'][0][0],(6.09987020 +2.47228783e-11j),0.0001)\n\n \"\"\"Zero spacing of spw 3\"\"\"\n self.check_eq(self.result['auto3'][0][0], (3.13487768+0.j),0.0001)\n\n \"\"\"Medium spacing of spw 3\"\"\"\n self.check_eq(self.result['med3'][0][0],(3.09678578 -2.19477778e-12j) ,0.0001)\n\n \"\"\"Long spacing of spw 3\"\"\"\n self.check_eq(self.result['long3'][0][0], (2.66332293 +1.29327478e-11j),0.0001)\n\n return sjran", "def test4_SingleObservationSelectByIntent(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test4\")\n self.inpms += \".test4\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n sjran = setjy(vis=self.inpms, field='', spw='', modimage='',\n selectdata=True, intent=\"*AMPLI*\",\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n if 'MODEL_DATA' not in cols:\n #raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n raise AssertionError, \"setjy(field='Titan') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n #Titan\n if self.ismms:\n #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n\n else:\n record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n #record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (selectbyIntent)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with selectbyIntent\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n #\"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n\n #\"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Zero spacing of spw 3 with scalebychan, selectbyintent\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n return sjran", "def run_optuna2():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n dataset = dataset.sample(frac=0.02)\n print(dataset.shape)\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n # X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n # dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n # dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n #\"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"boosting_type\": trial.suggest_categorical(\"boosting_type\", ['gbdt', 'rf']),\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n\n model = lgb.train(params, dtrain)\n vd_preds = model.predict(X_test)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n accuracy = accuracy_score(y_test, vd_preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def run_model(steps):\n model = ShoalModel()\n for j in range(steps):\n model.step()\n data = model.datacollector.get_model_vars_dataframe()\n return data", "def lacer(df, df1, train_start_date, train_end_date, test_start_date, test_end_date, request_type, CD, predictor_num): #Once model is ready, replace df with csv\n\n #Create Training and Testing Sets\n dftrain = preprocessing(df , train_start_date, train_end_date)\n dftrain = dftrain.reset_index(drop = True)\n dftest = preprocessing(df1, test_start_date, test_end_date)\n dftest = dftest.reset_index(drop = True)\n\n #Reserve test set for training on all 3 models. \n y_train, y_test = lc.CreateTestSet(dftest, predictor_num)\n y_test = y_test.reshape((-1, 1))\n\n\n## 2 Models\n #Model1: CD\n modelCD = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainCD = dftrain[dftrain['CD'] == CD].reset_index(drop = True)\n\n X_trainCD, X_testCD = lc.CreateTrainSet(dftrainCD, predictor_num)\n X_testCD = X_testCD.reshape((-1, 1))\n modelCD.fit(X_trainCD, X_testCD)\n\n y_predCD = modelCD.predict(y_train)\n\n #Model2: Request_type\n modelRT = SparseGaussianCRF(lamL=0.1, lamT=0.1, n_iter=10000)\n dftrainRT = dftrain[dftrain['RequestType'] == request_type].reset_index(drop = True)\n\n X_trainRT, X_testRT = lc.CreateTrainSet(dftrainRT, predictor_num)\n X_testRT = X_testRT.reshape((-1, 1))\n\n modelRT.fit(X_trainRT, X_testRT)\n\n y_predRT = modelRT.predict(y_train)\n\n\n #Average out all predictions\n y_predFinal = (y_predCD + y_predRT )/2\n\n #Return metrics \n return lc.metrics(y_predFinal, y_test)", "def train_rb_system(self):\r\n\r\n train_case_no = pd.Series(0, index=self.train.index)\r\n # case_no in training and forecast samples are independent with each other\r\n # they are not of much use but merely for counting and comparing purposes\r\n gb_train = self.train.groupby([\"lu\", \"ld\", \"sp\", \"sn\", \"ud\", \"aud\", \"rsi1\", \"rsi2\", \"rsi3\", \"rsi4\"])\r\n for i, key in enumerate(gb_train.indices.keys()):\r\n train_case_no.loc[gb_train.groups[key]] = i\r\n train_ncase = gb_train.ngroups\r\n \r\n train_case_result = pd.Series(\"\", index=self.train.index)\r\n # store case_result for case observations in the training sample\r\n \r\n for i in range(train_ncase):\r\n case = self.train[train_case_no == i]\r\n if ((case.lu[0] == -1) & (case.ld[0] == -1) &\r\n (case.rsi1[0] == -1) & (case.rsi2[0] == -1)):\r\n train_case_result[case.index] = \"Trigger_OFF\"\r\n else:\r\n u1, u2, u3, u4, d1, d2, d3, d4 = (0.0,)*8\r\n if case.lu[0] == 1:\r\n u1 = self.train.ix[(self.train.lu == case.lu[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d1 = self.train.ix[(self.train.lu == case.lu[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.ld[0] == 1:\r\n u2 = self.train.ix[(self.train.ld == case.ld[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d2 = self.train.ix[(self.train.ld == case.ld[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.rsi1[0] == 1:\r\n u3 = self.train.ix[(self.train.rsi1 == case.rsi1[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d3 = self.train.ix[(self.train.rsi1 == case.rsi1[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n if case.rsi2[0] == 1:\r\n u4 = self.train.ix[(self.train.rsi2 == case.rsi2[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"upd\"].sum()\r\n d4 = self.train.ix[(self.train.rsi2 == case.rsi2[0]) &\r\n (self.train.sp == case.sp[0]) &\r\n (self.train.sn == case.sn[0]) &\r\n (self.train.ud == case.ud[0]) &\r\n (self.train.aud == case.aud[0]) &\r\n (self.train.rsi3 == case.rsi3[0]) &\r\n (self.train.rsi4 == case.rsi4[0]), \"dnd\"].sum()\r\n u = u1 + u2 + u3 + u4\r\n d = d1 + d2 + d3 + d4\r\n \r\n if u == d == 0.0:\r\n # This could happen it there is only one observation for this case,\r\n # and the Close of next day does not change, i.e. no up-day or down-day.\r\n # Assign it to be \"Obvious_WAIT\" by discretion.\r\n train_case_result[case.index] = \"Obvious_WAIT\"\r\n elif (u/(u+d)) >= .55:\r\n train_case_result[case.index] = \"Obvious_LONG\"\r\n elif (d/(u+d)) >= .55:\r\n train_case_result[case.index] = \"Obvious_SHORT\"\r\n elif u == d != 0.0:\r\n train_case_result[case.index] = \"Obvious_WAIT\"\r\n elif (.50 < (u/(u+d)) < .55) or (.45 < (u/(u+d)) < .50):\r\n train_case_result[case.index] = \"Non_Obvious\"\r\n self.trained_case = pd.concat([train_case_no, train_case_result], axis=1)\r\n self.trained_case.columns = [\"case_no\", \"case_result\"]", "def test2_SingleObservationScaleByChan(self):\n\n os.system(\"mv \" + self.inpms + \" \" + self.inpms + \".test2\")\n self.inpms += \".test2\"\n record = {}\n\n tblocal = tbtool()\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n tblocal.close()\n if 'MODEL_DATA' in cols:\n raise ValueError, \"The input MS, \" + self.inpms + \" already has a MODEL_DATA col\" + str(cols)\n\n try:\n #print \"\\nRunning setjy(field='Uranus').\"\n print \"\\nRunning setjy(field='Titan').\"\n #sjran = setjy(vis=self.inpms, field='Uranus', spw='', modimage='',\n sjran = setjy(vis=self.inpms, field='Titan', spw='', modimage='',\n scalebychan=True, fluxdensity=-1,\n standard='Butler-JPL-Horizons 2010', usescratch=True)\n except Exception, e:\n #print \"\\nError running setjy(field='Uranus')\"\n print \"\\nError running setjy(field='Titan')\"\n raise e\n try:\n tblocal.open(self.inpms)\n cols = tblocal.colnames()\n \n if 'MODEL_DATA' not in cols:\n raise AssertionError, \"setjy(field='Uranus') did not add a MODEL_DATA column\"\n else:\n #record['wvr'] = tblocal.getcell('MODEL_DATA', 0)\n #record['auto1'] = tblocal.getcell('MODEL_DATA', 18)\n #record['long1'] = tblocal.getcell('MODEL_DATA', 19)\n #record['auto4'] = tblocal.getcell('MODEL_DATA', 2)\n #record['long4'] = tblocal.getcell('MODEL_DATA', 3)\n # Titan\n if self.ismms:\n\t\t #record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t #record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t #record['auto3'] = tblocal.getcell('MODEL_DATA', 2835)\n\t\t #record['long3'] = tblocal.getcell('MODEL_DATA', 2868)\n querystr = 'FIELD_ID==1'\n auto0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(auto0query)\n record['auto0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long0query = querystr+' AND DATA_DESC_ID==0 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME/(24*3600) IN [{MJD(2011/04/22/00:07:03),MJD(2011/04/22/00:07:13)}]'\n subt = tblocal.query(long0query)\n\t\t record['long0'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n auto3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==0 AND ANTENNA2==0 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(auto3query)\n\t\t record['auto3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n long3query = querystr+' AND DATA_DESC_ID==3 AND ANTENNA1==3 AND ANTENNA2==7 AND TIME < 2011/04/22/00:07:03'\n subt = tblocal.query(long3query)\n\t\t record['long3'] = subt.getcell('MODEL_DATA', 0)\n subt.close()\n else:\n\t\t record['auto0'] = tblocal.getcell('MODEL_DATA', 45)\n\t\t record['long0'] = tblocal.getcell('MODEL_DATA', 78)\n\t\t record['auto3'] = tblocal.getcell('MODEL_DATA', 405)\n\t\t record['long3'] = tblocal.getcell('MODEL_DATA', 438)\n tblocal.close()\n # record['history'] = self.get_last_history_line(self.inpms, origin='setjy::imager::setjy()', hint=\"V=0] Jy\")\n #record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n if not self.ismms: record['history'] = self.get_last_history_line(self.inpms, origin='imager::setjy()', hint=\"V=0] Jy\")\n self.result = record\n except AssertionError, e:\n print \"\\nError accesing MODEL_DATA\"\n tblocal.close()\n raise e\n\n \"\"\"Flux density in HISTORY (scalebychan)?\"\"\"\n #self.check_history(self.result['history'], [\"Uranus\", \"V=0] Jy\"])\n if not self.ismms: self.check_history(self.result['history'], [\"Titan\", \"V=0] Jy\"])\n\n #\"\"\"WVR spw with scalebychan\"\"\"\n #self.check_eq(self.result['wvr'], numpy.array([[25.93320656+0.j,\n # 26.88228607+0.j]]),\n # 0.003)\n\n \"\"\"Zero spacing of spw 1 with scalebychan\"\"\"\n # 8 (decreasing freq!) chans, XX & YY.\n #self.check_eq(self.result['auto1'],\n # numpy.array([[65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j],\n # [65.49415588+0.j, 65.42105865+0.j,\n # 65.34798431+0.j, 65.27491760+0.j,\n # 65.20187378+0.j, 65.12883759+0.j,\n # 65.05581665+0.j, 64.98281097+0.j]]),0.0001)\n # Titan ------------\n # check spw0, YY chan 0, 1920, 3839\n self.check_eq(self.result['auto0'][1][0], 3.30965233+0.j, 0.0001)\n self.check_eq(self.result['auto0'][1][1920], 3.31375313+0j, 0.0001)\n self.check_eq(self.result['auto0'][1][3839], 3.31785417+0j, 0.0001)\n\n \"\"\"Long spacing of spw 1 with scalebychan\"\"\"\n #self.check_eq(self.result['long1'],\n # numpy.array([[4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j],\n # [4.92902184+0.j, 4.96826363+0.j,\n # 5.00747252+0.j, 5.04664850+0.j,\n # 5.08579159+0.j, 5.12490082+0.j,\n # 5.16397619+0.j, 5.20301771+0.j]]),0.0001)\n # Titan\n self.check_eq(self.result['long0'][1][0],(2.77658414+6.98719121e-12j),0.0001)\n self.check_eq(self.result['long0'][1][1920],(2.77936244+6.99878090e-12j),0.0001)\n self.check_eq(self.result['long0'][1][3839],(2.78213906+7.01037362e-12j),0.0001)\n\n # spw 4 only has 1 chan, so it should be the same as without scalebychan.\n #\"\"\"Zero spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['auto4'], numpy.array([[69.33396912+0.j],[69.33396912+0.j]]),0.0001)\n \"\"\"Zero spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['auto3'][1][0], (3.0934467+0j),0.0001)\n self.check_eq(self.result['auto3'][1][1920], (3.08946729+0j),0.0001)\n self.check_eq(self.result['auto3'][1][3839], (3.08549213+0j),0.0001)\n\n #\"\"\"Long spacing of spw 4 with scalebychan\"\"\"\n #self.check_eq(self.result['long4'], numpy.array([[2.83933783+0.j],[2.83933783+0.j]]),0.0001)\n\n \"\"\"Long spacing of spw 3 with scalebychan\"\"\"\n self.check_eq(self.result['long3'][1][0],(2.62812424+6.38091359e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][1920],(2.62534332+6.36981873e-12j) ,0.0001)\n self.check_eq(self.result['long3'][1][3839],(2.62256360+6.35873776e-12j) ,0.0001)\n\n return sjran", "def Model2():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n \n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*0.5\n \n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n sn.thresholds = tan.GetActivationFunction()(np.zeros(sn.inputs.shape)-.5)\n sp.thresholds = tan.GetActivationFunction()(np.zeros(sp.inputs.shape)-.5)\n c2tan.weights = np.random.random(c2tan.weights.shape)/-100.0\n c2tan.learningEnabled = True\n\n c2tan.learningFunction = TAN_LearningRule\n \n return M2", "def ELRscript(model,mon,fday,fyr,day1,day2,nday,hdate_last,lit,liti,wk,nla1,sla1,wlo1,elo1,nla2,sla2,wlo2,elo2,fprefix,mpref,training_season,ntrain,rainfall_frequency,MOS):\n\n#%% model Hindcasts \n\tfh_xh = Dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc', mode='r')\n\tfh_yh = Dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc', mode='r')\n\n\tlons = fh_xh.variables['X'][:]\n\tlats = fh_xh.variables['Y'][:]\n\n\tx = fh_xh.variables['tp'][:]; x = np.squeeze(x)\n\ty = fh_yh.variables['tp'][:]\n\tndat1, nlat, nlon = np.shape(x)\n\tx1=x[:,1,1]\n\tI = np.where(x1>10000)\n\tbad_value_num=len(x1[I])\n\tndat=ndat1-bad_value_num\n\n#%% ELR: Train the models\n# Make a dictionary to contain the 'LogisticRegression' objects and terciles\n\telr_dict = {} # create an empty dictionary\n\telr_climo_dict = {} # create an empty dictionary for the climo forecast\n\n\tym = np.mean(y,axis=0)\n\tmsk = ma.getmask(ym)\n\tindex_land = np.empty((nlat,nlon),dtype=int)\n\txm0 = x\n\t#xm = xm0[0:int(ndat/2),:,:]\n\txm = xm0[0:lit,:,:]\n\n\tx0 = np.zeros(np.shape(xm)) # array of zeros to construct the climo forecast\n\tijland = -1\n\tfor j in range(nlat):\n\t# print(\"in j loop, j=\", j)\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\tindex_land[j,i] = ijland # index of land points\n\t\t\t\t#elr_dict[ijland] = elr_fit(xm[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\t#elr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:int(ndat/2),j,i])\n\t\t\t\telr_dict[ijland] = elr_fit(xm[:,j,i], y[0:lit,j,i])\n\t\t\t\telr_climo_dict[ijland] = elr_fit(x0[:,j,i], y[0:lit,j,i])\n\t\t\t# ijland is the dictionary key that can be used to assess the entries, like this\n\t\t\t# mymodel, mytercs = mydict[0]\n\t\t\t# mymodel.coef_\n\tnland = ijland+1\n\t#print('ELR training done with total landpoints = ',nland)\n\n\t#%% Make set of ELR in-sample hindcasts (no XV)\n\t#elr_hc = np.empty((ndat,nlat,nlon,3)); elr_hc.fill(np.nan)\n\t#elr_hc = np.empty((int(ndat/2),nlat,nlon)); elr_hc.fill(np.nan)\n\telr_hc = np.empty((lit,nlat,nlon)); elr_hc.fill(np.nan)\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\t#elr_hc[:,j,i,:] = elr_tercilesPredict(xm[:,j,i], terciles, elrmodel)\n\t\t\t\telr_hc[:,j,i] = elr_quantilePredict(xm[:,j,i], elrmodel)\n\n# ijland = index_land[lat1, lon1]\n# elrmodel, terciles = elr_dict[ijland]\n# elrmodel_climo, terciles = elr_climo_dict[ijland]\n# poe, q_fcst, q_clim, = elr_poe( xm[idat,lat1,lon1], elrmodel, elrmodel_climo )\n# plt.figure()\n\n\t#print('Set of ELR hindcasts made on a map of xy gridpoints')\n#---------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\t#T=int(ndat/2)\n\tT=lit\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(elr_hc, axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tTarr=np.empty(ndat,dtype=int)\n\tfor it in range(ndat):\n\t\tTarr[it]=1901+it\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_training.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\tvar=np.flip(y[0:lit,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\tT1=lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\tndat_fc = ndat-lit\n\txf = x[lit:ndat,:,:]\n\tyf = y[lit:ndat,:,:]\n\n#%% Verification period\n########################################\n\n\telr_fc = np.empty((ndat_fc,nlat,nlon,3)); elr_fc.fill(np.nan)\n\trpss_ELR_fc = np.ma.array(np.empty((nlat,nlon)), mask=msk, fill_value=np.nan)\n\n\tijland = -1\n\tfor j in range(nlat):\n\t\tfor i in range(nlon):\n\t\t\tif msk[j,i] == False: # fit model just for landpoints\n\t\t\t\tijland = ijland + 1\n\t\t\t\telrmodel, terciles = elr_dict[ijland]\n\t\t\t\telr_fc[:,j,i,:] = elr_tercilesPredict(xf[:,j,i], terciles, elrmodel)\n\t#print('Set of ELR forcasts made on a map of xy gridpoints')\n\n#----------------------------------------------------------\n\t#Now write the CPT file\n\toutfile=model+'_precip_'+mon+'_wk'+str(wk)+'_elr_verification.txt'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tf.write(\"cpt:ncats=3\\n\")\n\tW=nlon\n\tH=nlat\n\tds=xr.open_dataset('../input/'+model+'_precip_'+mon+'_wk'+str(wk)+'.nc',decode_times=False)\n\tT=ndat-lit\n\tTarr1=Tarr[lit:]\n\tXarr=lons\n\tYarr1=lats\n\tYarr=Yarr1[::-1] #Y should from N to S\n\tvari='tp'\n\tvar=np.flip(elr_fc, axis=1)*100\n\tvar[np.isnan(var)]=-1.0 #use CPT missing value\n\n\tfor it in range(T):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:C=1, cpt:clim_prob=0.33333333333300003, cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=probability (%), cpt:missing=-1.0000000000000000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,0]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=2, cpt:clim_prob=0.33333333333400000\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,1]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\t\tf.write(\"cpt:C=3, cpt:clim_prob=0.33333333333299997\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:,2]],fmt=\"%.1f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()\n\n\t#write CPT for observation\n\toutfile='obs_precip_'+mon+'_wk'+str(wk)+'_verification.tsv'\n\tf = open(outfile, 'w')\n\tf.write(\"xmlns:cpt=http://iri.columbia.edu/CPT/v10/\\n\")\n\tf.write(\"cpt:nfields=1\\n\")\n\tW=nlon\n\tH=nlat\n\tXarr=lons\n\tYarr=lats[::-1]\n\tvari='tp'\n\t#var=np.flip(y[int(ndat/2):,:,:], axis=1)\n\tvar=np.flip(y[lit:,:,:], axis=1)\n\tvar[np.isnan(var)]=-999. #use CPT missing value\n\tdss=xr.open_dataset('../input/obs_precip_'+mon+'_wk'+str(wk)+'_hc.nc',decode_times=False)\n\ta=list(dss)\n\tunits=dss[a[0]].units\n\t#T1=int(ndat/2)\n\tT1=ndat-lit\n\tfor it in range(T1):\n\t\tf.write(\"cpt:field=\"+vari+\", cpt:T=\"+str(Tarr1[it])+\"-01, cpt:nrow=\"+str(H)+\", cpt:ncol=\"+str(W)+\", cpt:row=Y, cpt:col=X, cpt:units=\"+units+\", cpt:missing=-999.\\n\")\n\t\tf.write(\"\\t\")\n\t\tnp.savetxt(f, Xarr, fmt=\"%.1f\",newline='\\t')\n\t\tf.write(\"\\n\") #next line\n\t\tfor iy in range(H):\n\t\t\tnp.savetxt(f,np.r_[Yarr[iy],var[it,iy,0:]],fmt=\"%.4f\", newline='\\t') #excise extra line\n\t\t\tf.write(\"\\n\") #next line\n\tf.close()", "def run_optuna():\n # rf_params = {\"max_depth\": [5, 15, None],\n # \"max_features\": [5, 9, \"auto\"],\n # \"min_samples_split\": [6, 8, 15],\n # \"n_estimators\": [150, 200, 300]}\n import optuna\n import lightgbm as lgb\n import sklearn.datasets\n import sklearn.metrics\n from sklearn.model_selection import train_test_split\n\n # FYI: Objective functions can take additional arguments\n # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\n def objective(trial):\n dataset = df.copy()\n\n data = dataset.drop(['Cover_Type'], axis=1)\n target = dataset['Cover_Type']\n\n X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.20)\n X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.25)\n\n dtrain = lgb.Dataset(X_train, label=y_train)\n dvalid = lgb.Dataset(X_valid, label=y_valid, reference=dtrain)\n dtest = lgb.Dataset(X_test)\n\n\n params = {\n 'num_class': 8, ## We have 7 tree types...\n #\"objective\": \"regression\",\n #\"objective\": \"binary\",\n \"objective\": \"multiclass\",\n #\"metric\": 'f1_macro',\n #\"metric\": 'multi_error',\n \"metric\": 'multi_logloss',\n \"verbosity\": -1,\n \"boosting_type\": \"gbdt\",\n #\"boosting_type\": \"rf\",\n \"lambda_l1\": trial.suggest_float(\"lambda_l1\", 1e-8, 10.0, log=True),\n \"lambda_l2\": trial.suggest_float(\"lambda_l2\", 1e-8, 10.0, log=True),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 2, 256),\n \"feature_fraction\": trial.suggest_float(\"feature_fraction\", 0.4, 1.0),\n \"bagging_fraction\": trial.suggest_float(\"bagging_fraction\", 0.4, 1.0),\n \"bagging_freq\": trial.suggest_int(\"bagging_freq\", 1, 7),\n \"min_child_samples\": trial.suggest_int(\"min_child_samples\", 5, 100),\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 0.001, 0.1),\n \"max_depth\": trial.suggest_int(\"max_depth\", 1, 110),\n \"num_leaves\": trial.suggest_int(\"num_leaves\", 31, 128),\n }\n pruning_callback = optuna.integration.LightGBMPruningCallback(trial,\n \"multi_logloss\")\n model = lgb.train(params, dtrain,\n num_boost_round=1000,\n early_stopping_rounds=30,\n valid_sets=dvalid,\n callbacks=[pruning_callback]\n )\n vd_preds = model.predict(X_test, num_iteration=model.best_iteration)\n vd_preds = np.argmax(vd_preds, axis=1) # since its a multiclass we need the most freq. Returns max\n\n accuracy = accuracy_score(y_test, vd_preds)\n # gbm = lgb.train(param, dtrain)\n # pred_labels = np.rint(preds)\n # rmse = sklearn.metrics.mean_squared_error(valid_y, preds, squared=False)\n #accuracy = accuracy_score(valid_y, preds)\n return 1 - round(accuracy, 2) # we need to minimize\n\n if __name__ == \"__main__\":\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=250)\n\n print(\"Number of finished trials: {}\".format(len(study.trials)))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: {}\".format(trial.value))\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))", "def main():\n sound_path = []\n textgrid_path = []\n # change the data to your own recorded sounds, make sure you have wav file and TextGrid file which\n # mark every vowel with some character, and every word end must be marked with \"wordend\"\n # theses recordings are only [a], [e], [i], [o], [u] sequence to test the pipeline flow,\n # the phonological rule learner tests are in the \"phonological_learner\" py file.\n for i in range(1, 5):\n sound_path.append(\"recordings\\\\aeiou{}.wav\".format(str(i)))\n textgrid_path.append(\"recordings\\\\aeiou{}.TextGrid\".format(str(i)))\n data = signal_parser.parse_input_sound(sound_path, textgrid_path) # from sound to vowel objects with f1 and f2\n clustered_data = mdl_clustering.mdl_cluster(data) # cluster into main values\n final_tagged_data = phonology_learner.extract_features(clustered_data.cluster) # add phonological features\n update_data(data, final_tagged_data) # update the input data with the phonological features\n lexicon = find_lexicon(data) # get lexicon from data\n data = separate_data_into_words(data)\n model = phonology_learner.MdlPhonology(final_tagged_data.keys(), lexicon, data, POSSIBLE_FEATURES)\n print model\n model = phonology_learner.mdl_phonology_learner(model)\n print model", "def Model3():\n M2 = Model()\n tan = M2.GetGroupByName(\"TAN\")\n sn = M2.GetGroupByName(\"SN\")\n sp = M2.GetGroupByName(\"SP\")\n da = M2.GetGroupByName(\"Da\")\n context = M2.GetGroupByName(\"Context\")\n snr = M2.GetGroupByName(\"SNr/GPi\")\n\n \n \n c2tan = M2.GetProjectionsBetweenGroups(context, tan)[0]\n tan2sn = M2.GetProjectionsBetweenGroups(tan, sn)[0]\n tan2sp = M2.GetProjectionsBetweenGroups(tan, sp)[0]\n da2tan = M2.GetProjectionsBetweenGroups(da, tan)[0]\n\n sn.SetActivationFunction(neu.Tanh_plus)\n sp.SetActivationFunction(neu.Tanh_plus)\n snr.SetActivationFunction(neu.Tanh_plus)\n\n W = np.zeros((6, 3))\n W[0:2,0] = W[2:4,1] = W[4:6,2] = 1.0\n tan2sn.mask = np.copy(W)\n tan2sp.mask = np.copy(W)\n tan2sn.weights = W*-1\n tan2sp.weights = W*-1\n\n sn2tan = sn.ConnectTo(tan)\n sp2tan = sp.ConnectTo(tan)\n sn2tan.weights = W.T/-10\n sp2tan.weights = W.T/-10\n da2tan.weights = np.ones(da2tan.weights.shape)*-0.25\n \n tan.SetActivationFunction(np.vectorize(lambda x: SSigmoid(x, tgain)))\n tan.thresholds=0.5*np.ones(tan.inputs.shape)\n hb = np.average(sn.thresholds)/-tan.size\n HB = np.ones(tan.inputs.shape)*hb\n sn.thresholds = 0.1*np.ones(sn.activations.shape)\n sp.thresholds = 0.1*np.ones(sp.activations.shape)\n #sn.thresholds = -1*tan.GetActivationFunction()(np.ones(sn.inputs.shape)-1)\n #sp.thresholds = -1*tan.GetActivationFunction()(np.ones(sp.inputs.shape)-1)\n #sn.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sn.inputs.shape))\n #sp.thresholds = -0.1*tan.GetActivationFunction()(np.zeros(sp.inputs.shape))\n #c2tan.weights = np.random.random(c2tan.weights.shape)\n c2tan.weights = np.ones(c2tan.weights.shape)*1.5\n c2tan.mask = np.dot(np.ones(tan.inputs.shape),\n np.array([[1,1,1,0,0,0,0,0,0]]))\n c2tan.learningEnabled = True\n c2tan.learningFunction = TAN_LearningRule\n\n M2.SetParameter(\"TRACE_TAN\", True)\n M2.SetParameter(\"HB\", HB)\n return M2", "def model_run(request):\n user_name = request.user.username\n\n # Defaults\n test_string = \"Test_string_default\"\n test_variable = \"Test_variable_default\"\n fac_L_form= \"\"\n simulation_name = \"\"\n outlet_y = \"\"\n outlet_x = \"\"\n\n hydrograph_series_obs = None\n hydrograph_series_sim = None\n hydrograph_opacity =0.1\n observed_hydrograph = \"\"\n observed_hydrograph2 = ''\n observed_hydrograph3 = ''\n vol_bal_graphs = ''\n\n observed_hydrograph_userModified = \"\"\n observed_hydrograph_userModified2 = \"\"\n observed_hydrograph_userModified3 = \"\"\n vol_bal_graphs_userModified = ''\n \n observed_hydrograph_loaded = \"\"\n observed_hydrograph_loaded2 = \"\"\n observed_hydrograph_loaded3 = \"\"\n vol_bal_graphs_loaded = ''\n \n eta_ts_obj = eta_ts_obj_modified = eta_ts_obj_loaded = ''\n vo_ts_obj = vo_ts_obj_modified = vo_ts_obj_loaded = ''\n vc_ts_obj = vc_ts_obj_modified = vc_ts_obj_loaded = ''\n vs_ts_obj = vs_ts_obj_modified = vs_ts_obj_loaded = ''\n ppt_ts_obj = ppt_ts_obj_modified = ppt_ts_obj_loaded = ''\n\n model_run_hidden_form = ''\n model_input_prepare_request = None\n hs_resource_id_created = ''\n hs_resource_id_loaded = ''\n hs_resource_id_modified = ''\n\n\n simulation_loaded_id = \"\"\n current_model_inputs_table_id = 0\n model_inputs_table_id_from_another_html = 0 #:TODO need to make it point to last sim by default\n\n\n # if user wants to download the file only\n download_response = {}\n hs_res_downloadfile = ''\n download_status = download_response['download_status'] = None #False\n download_link = download_response['download_link'] = 'http://link.to.zipped.files'\n hs_res_created = download_response['hs_res_created'] = ''\n files_created_dict = 'No dict created'\n download_choice = []\n\n # initial values\n fac_L_init = fac_Ks_init = fac_n_o_init = fac_n_c_init = fac_th_s_init = 1.0\n pvs_t0_init = 10.0\n vo_t0_init = 5.0\n qc_t0_init = 1.0\n kc_init = 1.0\n\n # test\n if request.is_ajax and request.method == 'POST':\n pass\n\n '''\n model_run can receive request from three sources:\n 1) model_input, prepare model (if model_input_prepare_request != None)\n 2) model_input, load model (if model_input_load_request != None)\n 3) model_run, calibrate and change the result seen. i.e. passes to itself (if model_run_calib_request != None)\n '''\n\n\n # # check to see if the request is from method (1)\n try:\n model_input_prepare_request = request.POST['simulation_name']\n # if request.POST['download_choice'] != None:\n # model_input_prepare_request = None\n print \"MSG from I: Preparing model simulation, simulation name is: \", model_input_prepare_request\n except:\n\n model_input_prepare_request = None\n\n\n # # check to see if the request is from method (2)\n try:\n # for the input text\n try:\n model_input_load_request = hs_resource_id_created = request.POST['existing_sim_res_id']\n print \"MSG from II: Previous simulation is loaded.the simulation loaded from hs_res_id from text box is.\", hs_resource_id_created\n\n # chose dropdown if the field is blank. :TODO need to get rid of the except part below:\n if hs_resource_id_created == \"\":\n model_input_load_request = hs_resource_id_created = request.POST['simulation_names_list']\n print \"MSG from II: Previous simulation is loaded. The name of simulation loaded is: \", hs_resource_id_created\n\n # for the drop down list\n except:\n model_input_load_request = hs_resource_id_created = request.POST['simulation_names_list'] # from drop down menu\n b = request.POST['load_simulation_name']\n print 'MSG from II: The name of simulation loaded from dropdown menu is: ',hs_resource_id_created\n print \"MSG from II: Previous simulation is loaded. The name of simulation loaded is: \", hs_resource_id_created\n except:\n model_input_load_request = None\n\n\n # # check to see if the request is from method (3)\n try:\n model_run_calib_request = request.POST['fac_L']\n print 'MSG: Calibration parameters are modified'\n except:\n model_run_calib_request = None\n\n\n # Method (1), request from model_input-prepare model\n if model_input_prepare_request != None:\n print 'MSG: Method I initiated.'\n\n # Checks the model chosen\n model_engine_chosen = request.POST['model_engine']\n\n if model_engine_chosen.lower() == 'download' : #request.POST.getlist('download_choice2[]') != []:\n print 'User action: DOWNLOAD'\n\n download_choice = request.POST.getlist('download_choice2[]')\n\n print 'download_choice(s)=', download_choice\n\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n\n download_request_response = {\n u'output_response_txt': u'http://129.123.9.159:20199/files/data/user_6/metadata.txt',\n u'output_zipfile': u'http://129.123.9.159:20199/files/data/user_6/output.zip',\n u'output_json_string': {'hs_res_id_created': '12456'}}\n\n download_request_response = app_utils.download_geospatial_and_forcing_files(inputs_dictionary,\n download_request=download_choice)\n\n print \"Downloading all the files successfully completed\"\n\n if download_request_response != {}:\n download_status = True\n download_link = download_request_response['output_zipfile']\n hs_res_downloadfile = download_request_response['output_json_string']['hs_res_id_created']\n\n elif model_engine_chosen.lower() == 'topnet':\n print 'User action: TOPNET'\n\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n run_request = app_utils.run_topnet(inputs_dictionary)\n\n elif model_engine_chosen.lower() == 'topkapi':\n print 'User action: topkapi'\n\n # # Method (1), STEP (1): get input dictionary from request ( request I)\n inputs_dictionary = app_utils.create_model_input_dict_from_request(request)\n test_string = str(\"Prepared Values: \")+str(inputs_dictionary)\n simulation_name = inputs_dictionary['simulation_name']\n print \"MSG: Inputs from user read\"\n\n\n\n # # Method (1), STEP (2):call_runpytopkapi function\n # response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/e14239bf38bc490cae63e131c822a17d/pytopkpai_responseJSON.txt'\n # response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/a3c75f158ad44fe1a46ceb8a67224aae/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.call_runpytopkapi(inputs_dictionary= inputs_dictionary)\n\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n print 'MSG: Prepared Simulation Hydrograph ...'\n\n hs_resource_id_created = json_data['hs_res_id_created']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n\n\n eta = json_data['eta']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n ppt= json_data['ppt']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n\n # initial values\n # calib_parameter= {\"fac_l\": 1.0, \"fac_n_o\": 1.0, \"fac_n_c\": 1.0, \"fac_th_s\": 1.0, \"fac_ks\": 1.0},\n # numeric_param= {\"pvs_t0\": 50, \"vo_t0\": 750.0, \"qc_t0\": 0.0, \"kc\": 1.0},\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n\n print '*****************', hs_resource_id_created\n # print [i[-1] for i in hydrograph_series_sim]\n # hydrograph_series_obs = np.nan_to_num(hydrograph_series_obs).tolist()\n\n # replace nan values to 0 because Tethys timeseries cannot display nan\n hydrograph_series_obs = [[item[0], 0] if np.isnan(item[-1]) else item for item in hydrograph_series_obs]\n\n\n try:\n try:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim],\n [i[-1] for i in hydrograph_series_obs])\n except:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim])\n\n # Writing to model_inputs_table\n current_model_inputs_table_id = app_utils.write_to_model_input_table(inputs_dictionary=inputs_dictionary, hs_resource_id= hs_resource_id_created)\n\n # Writing to model_calibraiton_table (Because it is first record of the simulation)\n # IF the model did not run, or if user just wants the files, we don't write to calibration table\n current_model_calibration_table_id = app_utils.write_to_model_calibration_table( model_input_table_id=current_model_inputs_table_id,\n numeric_parameters_list=[pvs_t0_init, vo_t0_init,qc_t0_init, kc_init],\n calibration_parameters_list=[fac_L_init,fac_Ks_init, fac_n_o_init,fac_n_c_init,fac_th_s_init])\n\n # Writing to model_result_table\n current_model_result_table_id = app_utils.write_to_model_result_table(model_calibration_table_id=current_model_calibration_table_id,\n timeseries_discharge_list=data_qsim_qobs)\n except Exception, e:\n print \"Error ---> Writing to DB\", e\n\n\n\n\n observed_hydrograph3 = TimeSeries(\n height='300px', width='500px', engine='highcharts',\n title=\"Simulated and Observed Hydrographs\",\n subtitle='Nash value: %s, R2: %s'%(json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge ',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }])\n\n\n vol_bal_graphs = TimeSeries(\n height='600px', width='500px', engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n },{\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n } , {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n eta_ts_obj =app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n vc_ts_obj = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells', unit='mm/day')\n vs_ts_obj = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells', unit='mm/day')\n ppt_ts_obj = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n\n\n\n # Method (2), request from model_input-load simulation\n if model_input_load_request != None:\n hs_resource_id = model_input_load_request\n\n print 'MSG: Method II initiated.'\n print 'MSG: Model run for HydroShare resource ID ', hs_resource_id , \" is being retreived..\"\n\n\n # # STEP1: Retrieve simulation information (files stored in HydroShare) from db in a dict\n # inputs_dictionary = app_utils.create_model_input_dict_from_db( hs_resource_id= hs_resource_id,user_name= user_name )\n # test_string = str(\"Loaded Values: \")+str(inputs_dictionary)\n\n\n\n ######### START: need to get two variables: i) hs_resource_id_created, and ii) hydrograph series ##############\n response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/16ea0402dd4c403bbb4e5b23ed597728/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.loadpytopkapi(hs_res_id=hs_resource_id, out_folder='')\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n hs_resource_id_created = hs_resource_id_loaded =hs_resource_id #json_data['hs_res_id_created']\n\n\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n eta = json_data['eta']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n ppt = json_data['ppt']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n \n \n # init values in the form\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n\n observed_hydrograph_loaded = TimeSeries(\n height='300px',width='500px', engine='highcharts',title=' Simulated Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_sim\n }])\n\n observed_hydrograph_loaded2 = TimeSeries(\n height='500px',width='500px', engine='highcharts',title='Observed (actual) Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_obs\n }])\n\n observed_hydrograph_loaded3 = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title= \"Simulated and Observed flow \" ,\n subtitle='Nash value: %s, R2: %s'%(json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }]\n )\n\n vol_bal_graphs_loaded = TimeSeries(\n height='600px',\n width='500px',\n engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n vc_ts_obj_loaded = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells',unit='mm/day')\n vs_ts_obj_loaded = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj_loaded = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells',unit='mm/day')\n ppt_ts_obj_loaded = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n eta_ts_obj_loaded = app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n # STEP2: Because in this part we load previous simulation, Load the model from hydroshare to hydroDS,\n # STEP2: And from the prepeared model, if the result is not available, run. Otherwise just give the result\n # hydrograph2, table_id = app_utils.run_model_with_input_as_dictionary(inputs_dictionary, False)\n #* STEP3: Make sure a string/variable/field remains that contains the id of the model. SO when user modifies it, that model is modifed\n # # STEP4B: Write to db\n # current_model_inputs_table_id = app_utils.write_to_model_input_table(inputs_dictionary,simulation_folder)\n # print \"MSG: Inputs from model_input form written to db. Model RAN already\"\n # STEP5: get the revised hydrographs, and plot it\n # preparing timeseries data in the format shown in: http://docs.tethysplatform.org/en/latest/tethys_sdk/gizmos/plot_view.html#time-series\n\n # hydrograph2 = []\n # observed_hydrograph_loaded = ''\n\n\n # Method (3), request from model_run, change calibration parameters\n if model_run_calib_request != None :\n\n fac_L_form = float(request.POST['fac_L'])\n fac_Ks_form = float(request.POST['fac_Ks'])\n fac_n_o_form = float(request.POST['fac_n_o'])\n fac_n_c_form = float(request.POST['fac_n_c'])\n fac_th_s_form = float(request.POST['fac_th_s'])\n\n pvs_t0_form = float(request.POST['pvs_t0'])\n vo_t0_form = float(request.POST['vo_t0'])\n qc_t0_form = float(request.POST['qc_t0'])\n kc_form = float(request.POST['kc'])\n\n # model_inputs_table_id_from_another_html = request.POST['model_inputs_table_id_from_another_html']\n hs_resource_id_from_previous_simulation = request.POST['model_inputs_table_id_from_another_html']\n # current_model_inputs_table_id =hs_resource_id_from_previous_simulation\n hs_resource_id_created = hs_resource_id_from_previous_simulation\n\n print 'MSG: Method III initiated. The model id we are looking at is: ', hs_resource_id_from_previous_simulation\n\n\n ######### START: need to get at leaset two variables: i) hs_resource_id_created, and ii) hydrograph series #####\n response_JSON_file = '/home/prasanna/tethysdev/hydrologic_modeling/tethysapp/hydrologic_modeling/workspaces/user_workspaces/16ea0402dd4c403bbb4e5b23ed597728/pytopkpai_responseJSON.txt'\n response_JSON_file = app_utils.modifypytopkapi(hs_res_id=hs_resource_id_created, out_folder='',\n fac_l=fac_L_form, fac_ks=fac_Ks_form, fac_n_o=fac_n_o_form,\n fac_n_c=fac_n_c_form, fac_th_s=fac_th_s_form,\n pvs_t0=pvs_t0_form, vo_t0=vo_t0_form, qc_t0=qc_t0_form,\n kc=kc_form )\n json_data = app_utils.read_data_from_json(response_JSON_file)\n\n hs_resource_id_created = hs_resource_id_modified = json_data['hs_res_id_created']\n hydrograph_series_sim = json_data['hydrograph_series_sim']\n hydrograph_series_obs = json_data['hydrograph_series_obs']\n eta = json_data['eta']\n ppt = json_data['ppt']\n vo = json_data['vo']\n vc = json_data['vc']\n vs = json_data['vs']\n\n ppt_cum = json_data['ppt_cum'] # cumulative\n eta_cum = json_data['eta_cum']\n q_obs_cum = json_data['q_obs_cum']\n q_sim_cum = json_data['q_sim_cum']\n\n print 'hydrograph_series_sim is ',[item[-1] for item in hydrograph_series_sim]\n\n # init values in the form\n if json_data['calib_parameter'] != None:\n fac_L_init = json_data['calib_parameter']['fac_l']\n fac_Ks_init = json_data['calib_parameter']['fac_ks']\n fac_n_o_init = json_data['calib_parameter']['fac_n_o']\n fac_n_c_init = json_data['calib_parameter']['fac_n_c']\n fac_th_s_init = json_data['calib_parameter']['fac_th_s']\n if json_data['numeric_param'] != None:\n pvs_t0_init = json_data['numeric_param']['pvs_t0']\n vo_t0_init = json_data['numeric_param']['vo_t0']\n qc_t0_init = json_data['numeric_param']['qc_t0']\n kc_init = json_data['numeric_param']['kc']\n print '***hs_resource_id_created', hs_resource_id_created\n # print [i[-1] for i in hydrograph_series_sim]\n ######### END : ###############\n\n # # # -------DATABASE STUFFS <start>----- # #\n # # retreive the model_inputs_table.id of this entry to pass it to the next page (calibration page)\n # from .model import engine, SessionMaker, Base, model_calibration_table\n # session = SessionMaker() # Make session\n #\n # # STEP1: retrieve the model_inputs_table.id of this entry to pass it to the next page (calibration page)\n # current_model_inputs_table_id = str(len(session.query(model_inputs_table).filter(\n # model_inputs_table.user_name == user_name).all())) # because PK is the same as no of rows, i.e. length\n #\n # # STEP2: use the id retrieved in STEP1 to get all the remaining parameters\n # print 'model_input ID for which rest of the inputs are being retrieved: ', current_model_inputs_table_id\n #\n # all_rows = session.query(model_inputs_table).filter(model_inputs_table.id == current_model_inputs_table_id).all()\n #\n # # retrieve the parameters and write to a dictionary\n # inputs_dictionary = {}\n #\n # for row in all_rows:\n # inputs_dictionary['id'] = row.id\n # inputs_dictionary['user_name'] = row.user_name\n # inputs_dictionary['simulation_name'] = row.simulation_name\n # inputs_dictionary['simulation_folder'] = row.simulation_folder\n # inputs_dictionary['simulation_start_date'] = row.simulation_start_date\n # inputs_dictionary['simulation_end_date'] = row.simulation_end_date\n # inputs_dictionary['USGS_gage'] = row.USGS_gage\n #\n # inputs_dictionary['outlet_x'] = row.outlet_x\n # inputs_dictionary['outlet_y'] = row.outlet_y\n # inputs_dictionary['box_topY'] = row.box_topY\n # inputs_dictionary['box_bottomY'] = row.box_bottomY\n # inputs_dictionary['box_rightX'] = row.box_rightX\n # inputs_dictionary['box_leftX'] = row.box_leftX\n #\n # timeseries_source,threshold, cell_size,timestep = row.other_model_parameters.split(\"__\")\n # inputs_dictionary['timeseries_source'] = timeseries_source\n # inputs_dictionary['threshold'] = threshold\n # inputs_dictionary['cell_size'] = cell_size\n # inputs_dictionary['timestep'] = timestep\n #\n # inputs_dictionary['model_engine'] = row.model_engine\n\n\n observed_hydrograph_userModified3 = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title= \"Simulated and Observed flow \" ,\n subtitle='Nash value: %s, R2: %s' % (json_data['nash_value'], json_data['r2_value']),\n y_axis_title='Discharge ',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Hydrograph',\n 'data': hydrograph_series_sim,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Hydrograph',\n 'data': hydrograph_series_obs,\n 'fillOpacity': hydrograph_opacity,\n }]\n )\n\n observed_hydrograph_userModified = TimeSeries(\n height='300px',\n width='500px',\n engine='highcharts',\n title=' Corrected Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge',\n y_axis_units='cfs',\n series=[{\n 'name': 'Simulated Flow',\n 'data': hydrograph_series_sim\n }]\n )\n\n observed_hydrograph_userModified2 = TimeSeries(\n height='500px', width='500px', engine='highcharts', title=' Observed (Actual) Hydrograph ',\n subtitle=\"Simulated and Observed flow \" ,\n y_axis_title='Discharge', y_axis_units='cfs',\n series=[{\n 'name': 'Observed Flow',\n 'data': hydrograph_series_obs\n }])\n\n vol_bal_graphs_userModified = TimeSeries(\n height='600px',\n width='500px',\n engine='highcharts',\n title=\"Cumulative volume of water in the basin\",\n y_axis_title='Volume of water ',\n y_axis_units='mm',\n series=[{\n 'name': 'Simulated Q',\n 'data': q_sim_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'Observed Q',\n 'data': q_obs_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'ETa',\n 'data': eta_cum,\n 'fillOpacity': hydrograph_opacity,\n }, {\n 'name': 'PPT',\n 'data': ppt_cum,\n 'fillOpacity': hydrograph_opacity,\n }\n ])\n\n vc_ts_obj_modified = app_utils.create_1d(timeseries_list=vc, label='Average Water Volume in Channel Cells',unit='mm/day')\n vs_ts_obj_modified = app_utils.create_1d(timeseries_list=vs, label='Average Water Volume in Soil Cells', unit='mm/day')\n vo_ts_obj_modified = app_utils.create_1d(timeseries_list=vo, label='Average Water Volume in Overland Cells',unit='mm/day')\n ppt_ts_obj_modified = app_utils.create_1d(timeseries_list=ppt, label='Rainfall', unit='mm/day')\n eta_ts_obj_modified = app_utils.create_1d(timeseries_list=eta, label='Actual Evapotranspiration', unit='mm/day')\n\n # create input_dictionary for the last run. Because we are modifying, we need to load the last run\n inputs_dictionary = app_utils.create_model_input_dict_from_db(hs_resource_id = hs_resource_id_from_previous_simulation, user_name= user_name )\n\n # Writing to db\n try:\n try:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim],\n [i[-1] for i in hydrograph_series_obs])\n except:\n data_qsim_qobs = zip([i[0] for i in hydrograph_series_sim], [i[-1] for i in hydrograph_series_sim])\n\n # Writing to model_calibration_table\n current_model_calibration_table_id= app_utils.write_to_model_calibration_table(hs_resource_id=hs_resource_id_from_previous_simulation,\n numeric_parameters_list=[pvs_t0_init, vo_t0_init, qc_t0_init,\n kc_init],\n calibration_parameters_list=[fac_L_init, fac_Ks_init,\n fac_n_o_init, fac_n_c_init,\n fac_th_s_init])\n # Writing to model_result_table\n current_model_result_table_id = app_utils.write_to_model_result_table(\n model_calibration_table_id=current_model_calibration_table_id,\n timeseries_discharge_list =data_qsim_qobs)\n\n\n except Exception, e:\n print \"Error ---> Writing to DB\", e\n \n\n # # # -------DATABASE STUFFS <ends> ----- # #\n\n\n\n\n print 'simulation_loaded_id',simulation_loaded_id # probably useless\n print 'hs_resource_id_created', hs_resource_id_created\n\n print 'hs_resource_id_prepared', model_input_prepare_request\n print 'hs_resource_id_loaded', model_input_load_request\n print 'hs_resource_id_modified', model_run_calib_request\n\n\n\n # gizmo settings\n fac_L = TextInput(display_text='Soil depth across all model cells', name='fac_L', initial=float(fac_L_init))\n fac_Ks = TextInput(display_text='Saturated hydraulic conductivity', name='fac_Ks', initial=float(fac_Ks_init))\n fac_n_o = TextInput(display_text=\"Manning's n for overland\", name='fac_n_o', initial=float(fac_n_o_init))\n fac_n_c = TextInput(display_text=\"Manning's n for channel\", name='fac_n_c', initial=float(fac_n_c_init))\n fac_th_s = TextInput(display_text='Soil saturation', name='fac_th_s', initial=float(fac_th_s_init))\n\n pvs_t0 = TextInput(display_text=\"Soil cell's saturation %\", name='pvs_t0', initial=float(pvs_t0_init))\n vo_t0 = TextInput(display_text=\"Water volume in Overland cells (m3)\", name='vo_t0', initial=float(vo_t0_init))\n qc_t0 = TextInput(display_text='Flow in channel cells (m3/s)', name='qc_t0', initial=float(qc_t0_init))\n kc = TextInput(display_text='Crop coefficient across all model cells', name='kc', initial=float(kc_init))\n\n\n\n\n context = {'simulation_name':simulation_name,\n 'outlet_y': outlet_y,\n 'outlet_x': outlet_x,\n\n\n 'fac_L': fac_L, 'fac_Ks': fac_Ks, 'fac_n_o': fac_n_o, \"fac_n_c\": fac_n_c, \"fac_th_s\": fac_th_s,\n 'pvs_t0': pvs_t0, 'vo_t0': vo_t0, 'qc_t0': qc_t0, \"kc\": kc,\n\n 'fac_L_form': fac_L_form,\n 'user_name':user_name,\n\n #'Iwillgiveyou_model_inputs_table_id_from_another_html':model_inputs_table_id_from_another_html,\n # \"current_model_inputs_table_id\":current_model_inputs_table_id, # model_inputs_table_id\n\n 'observed_hydrograph3': observed_hydrograph3,\n 'observed_hydrograph': observed_hydrograph,\n 'observed_hydrograph2': observed_hydrograph2,\n\n\n \"observed_hydrograph_userModified\":observed_hydrograph_userModified,\n \"observed_hydrograph_userModified2\": observed_hydrograph_userModified2,\n \"observed_hydrograph_userModified3\": observed_hydrograph_userModified3,\n\n \"observed_hydrograph_loaded\":observed_hydrograph_loaded,\n \"observed_hydrograph_loaded2\": observed_hydrograph_loaded2,\n \"observed_hydrograph_loaded3\": observed_hydrograph_loaded3,\n\n 'eta_ts_obj': eta_ts_obj,\n 'vs_ts_obj': vs_ts_obj,\n 'vc_ts_obj': vc_ts_obj,\n 'vo_ts_obj': vo_ts_obj,\n 'ppt_ts_obj': ppt_ts_obj,\n 'vol_bal_graphs':vol_bal_graphs,\n\n 'eta_ts_obj_modified': eta_ts_obj_modified,\n 'vs_ts_obj_modified': vs_ts_obj_modified,\n 'vc_ts_obj_modified': vc_ts_obj_modified,\n 'vo_ts_obj_modified': vo_ts_obj_modified,\n 'ppt_ts_obj_modified': ppt_ts_obj_modified,\n 'vol_bal_graphs_userModified':vol_bal_graphs_userModified,\n\n 'eta_ts_obj_loaded': eta_ts_obj_loaded,\n 'vs_ts_obj_loaded': vs_ts_obj_loaded,\n 'vc_ts_obj_loaded': vc_ts_obj_loaded,\n 'vo_ts_obj_loaded': vo_ts_obj_loaded,\n 'ppt_ts_obj_loaded': ppt_ts_obj_loaded,\n 'vol_bal_graphs_loaded':vol_bal_graphs_loaded,\n\n\n \"simulation_loaded_id\":simulation_loaded_id,\n 'test_string':simulation_loaded_id, #test_string\n 'test_variable':test_variable,\n\n 'hs_resource_id_created':hs_resource_id_created,\n\n 'hs_resource_id_prepared': model_input_prepare_request,\n 'hs_resource_id_loaded': model_input_load_request,\n 'hs_resource_id_modified': model_run_calib_request,\n\n # fow download request\n 'hs_res_downloadfile':hs_res_downloadfile,\n 'download_status': download_status,\n 'download_link': download_link,\n 'hs_res_created': hs_res_created,\n 'dict_files_created': files_created_dict,\n }\n\n return render(request, 'hydrologic_modeling/model-run.html', context)", "def send_to_mco(self, model, kpi_results):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This script takes in the congressional records csv file and removes any data with the year 2001. This somehow happened along the way even though our data should only have years 2006 and on.
def main(): cong = pd.read_csv(sys.argv[1], parse_dates = ["date"]) cong = cong[cong["date"].dt.year != 2001] # Removes about 1400 instances cong.to_csv("congressionalRecords.csv")
[ "def createFileByYearIgnoreMissingColumn(year, destinationFolder='Formatted Files Without Missing'):\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tcur.execute(\"SELECT DISTINCT indicator_key FROM record_fact WHERE year = %d ORDER BY indicator_key ASC;\" % (year))\t\n\tfor row in cur.fetchall():\n\t\tindicatorIDs.append(row[0])\n\t\n\ti = 0\t\n\tfor indicator_key in indicatorIDs:\n\t\tcur.execute(\"SELECT * FROM indicator_dim WHERE indicator_key = %d;\" % (indicator_key))\n\t\trow = cur.fetchone()\n\t\tindicator = str(row[0]).zfill(4) + ': ' + row[1]\n\t\tif row[2] != None:\n\t\t\tindicator += ' - ' + row[2]\n\t\tif row[3] != None:\n\t\t\tindicator += ' (' + row[3] + ')'\n\t\tindicator += ' - ' + row[6]\n\t\tindicator += ' - ' + str(i)\n\t\tindicator = indicator.replace(',', ';')\n\t\t# print indicator\n\t\tindicatorList.append(indicator)\n\t\t# indicatorIDs.append(row[0])\n\t\ti += 1\n\t\n\t# if year == 1919:\n\t# \tonlyIndicators = indicatorList[1:]\n\t# \tfor i in range(len(onlyIndicators)):\n\t# \t\tprint onlyIndicators[i] + '==>' + str(i)\n\n\trows.append(indicatorList)\n\n\t# Create a row for each country\n\tcountryList = []\n\tcur.execute(\"SELECT * FROM country_dim ORDER BY country_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tcountryList.append((row[0],row[1],row[2]))\n\t# countryList = [(3,'AFG','Afghanistan')]\n\tfor (cid, abbr, cname) in countryList:\n\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d ORDER BY indicator_key ASC;\" % (cid,year))\t\n\t\tfacts = []\n\t\tfor row in cur.fetchall():\n\t\t\t# print (row[0],row[1],row[2],row[4])\n\t\t\tfacts.append((row[0],row[1],row[2],row[4]))\n\t\tallRecords += len(facts)\n\t\tarow = [str(cid).zfill(3) + ': ' + cname + ' (' + abbr + ')']\n\t\tfor i in range(len(indicatorIDs)):\n\t\t\tif len(facts) == 0:\n\t\t\t\tarow.append('')\n\t\t\telif facts[0][1] == indicatorIDs[i]:\n\t\t\t\tarow.append(facts[0][3])\n\t\t\t\tfacts.pop(0)\n\t\t\telse:\n\t\t\t\tarow.append('')\n\t\t# print arow\n\t\trows.append(arow)\n\t\t# print (cid, abbr, cname)\n\n\tdb.close()\n\n\t# CWrite to file\n\tfilename = './'+destinationFolder+'/'+str(year)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")+'.csv'\n\twith open(filename,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(rows)\n\tw.close()\n\n\treturn (filename, 1.0*allRecords/(len(countryList)*len(indicatorIDs)))", "def __read_and_filter(self):\n\n data = pd.read_csv('data/us_bills.csv', delimiter=';')\n data = data.filter(['Title', 'Major'])\n # data = data.drop(x for x in data.Major if x == 'nan')\n data = data.mask(data.Major == 'NaN').dropna()\n self.data = data", "def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n print year\n filter_terms(infile, outfile1, outfile2)\n print", "def map_xlsx_to_csv(clean_data, yr, src):\n with open('myanmar_clean_data.csv', 'wb') as csvfile:\n write = csv.writer(csvfile, delimiter=',')\n #region, flow, entity, budget, source, values, yr, src\n write.writerow(['Region','Flow','Entity','Budget','Sources', 'Values', 'Year', 'Source Contents'])\n \n for row in clean_data:\n #print row\n write.writerow(row)", "def removeperiods(filename):\n import csv\n with open(filename + '.csv', 'rU') as inf, \\\n open(filename + 'X.csv', 'wb') as outf:\n outwriter = csv.writer(outf)\n for entry in csv.reader(inf):\n outwriter.writerow((entry[0], stripperiods(entry[1])))", "def remove_broken_lines(): # old_file, new_file, delimiter, delete=False):\n with open('book-data/BX-Book-Ratings.csv', 'r', encoding=\"latin-1\") as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=';')\n line_count = 0\n\n # read rwos in the csv file\n for row in csv_reader:\n if line_count == 0:\n fieldnames = list(row)\n\n try:\n int(row['ISBN'])\n\n # write proper values into new csv document\n with open('book-data/FX-Book-Ratings.csv', 'a') as fixed_csv:\n writer = csv.DictWriter(fixed_csv, fieldnames=fieldnames, delimiter=',')\n if line_count == 0:\n writer.writeheader()\n writer.writerow(row)\n\n except Exception as e:\n continue\n\n line_count += 1\n return line_count", "def createFileToTheNextYearIgnoreMissingColumn(year, destinationFolder='NewFormattedFilesWithoutMissingToNextYear'):\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\trows = []\n\tallRecords = 0\n\t\n\t# Setup database connection\n\tcur = db.cursor()\n\n\t\n\t# Create a header row\n\tindicatorList = ['Country']\n\tindicatorIDs = []\n\n\tfor yrIndex in range(len(yearList)):\n\t\tif year >= yearList[yrIndex][0]-1 and year < yearList[yrIndex][1]:\n\t\t\tindicatorIDs.append(targetList[yrIndex])\n\n\tnextYearIndicator = len(indicatorIDs)\n\n\tcur.execute(\"SELECT DISTINCT indicator_key FROM record_fact WHERE year = %d ORDER BY indicator_key ASC;\" % (year))\t\n\tfor row in cur.fetchall():\n\t\tindicatorIDs.append(row[0])\n\t\n\ti = 0\t\n\tfor indicator_key in indicatorIDs:\n\t\tisN = 'N' if i < nextYearIndicator else ''\n\t\tcur.execute(\"SELECT * FROM indicator_dim WHERE indicator_key = %d;\" % (indicator_key))\n\t\trow = cur.fetchone()\n\t\tindicator = str(row[0]).zfill(4) + isN + ': ' + row[1]\n\t\tif row[2] != None:\n\t\t\tindicator += ' - ' + row[2]\n\t\tif row[3] != None:\n\t\t\tindicator += ' (' + row[3] + ')'\n\t\tindicator += ' - ' + row[6]\n\t\tindicator += ' - ' + str(i)\n\t\tindicator = indicator.replace(',', ';')\n\t\t# print indicator\n\t\tindicatorList.append(indicator)\n\t\t# indicatorIDs.append(row[0])\n\t\ti += 1\n\t\n\t# if year == 1919:\n\t# \tonlyIndicators = indicatorList[1:]\n\t# \tfor i in range(len(onlyIndicators)):\n\t# \t\tprint onlyIndicators[i] + '==>' + str(i)\n\n\trows.append(indicatorList)\n\n\t# Create a row for each country\n\tcountryList = []\n\tcur.execute(\"SELECT * FROM country_dim ORDER BY country_key ASC;\")\t\n\tfor row in cur.fetchall():\n\t\tcountryList.append((row[0],row[1],row[2]))\n\t# countryList = [(3,'AFG','Afghanistan')]\n\tfor (cid, abbr, cname) in countryList:\n\t\tarow = [str(cid).zfill(3) + ': ' + cname + ' (' + abbr + ')']\n\n\t\tfor yrIndex in range(len(yearList)):\n\t\t\tif year >= yearList[yrIndex][0]-1 and year < yearList[yrIndex][1]:\n\t\t\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d AND indicator_key = %d;\" % (cid,year+1,targetList[yrIndex]))\n\t\t\t\trow_count = cur.rowcount\n\t\t\t\tif row_count == 0:\n\t\t\t\t\tarow.append('')\n\t\t\t\telse:\n\t\t\t\t\trow = cur.fetchone()\n\t\t\t\t\tarow.append(row[4])\n\t\t\t\t\tallRecords += 1\n\n\t\tcur.execute(\"SELECT * FROM record_fact WHERE country_key = %d AND year = %d ORDER BY indicator_key ASC;\" % (cid,year))\t\n\t\tfacts = []\n\t\tfor row in cur.fetchall():\n\t\t\t# print (row[0],row[1],row[2],row[4])\n\t\t\tfacts.append((row[0],row[1],row[2],row[4]))\n\t\tallRecords += len(facts)\n\t\t\n\t\tfor i in range(nextYearIndicator,len(indicatorIDs)):\n\t\t\tif len(facts) == 0:\n\t\t\t\tarow.append('')\n\t\t\telif facts[0][1] == indicatorIDs[i]:\n\t\t\t\tarow.append(facts[0][3])\n\t\t\t\tfacts.pop(0)\n\t\t\telse:\n\t\t\t\tarow.append('')\n\t\t# print arow\n\t\trows.append(arow)\n\t\t# print (cid, abbr, cname)\n\n\tdb.close()\n\n\t# CWrite to file\n\tfilename = './'+destinationFolder+'/'+str(year)+'_'+time.strftime(\"%Y-%m-%d-%H-%M-%S\")+'.csv'\n\twith open(filename,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(rows)\n\tw.close()\n\n\treturn (filename, 1.0*allRecords/(len(countryList)*len(indicatorIDs)), nextYearIndicator)", "def diff(year):\n f1 = open(\"data/old/data\" + year + \".csv\")\n oldFile1 = csv.reader(f1)\n oldList1 = list(oldFile1)\n\n f2 = open(\"data/new/data\" + year + \".csv\")\n newFile2 = csv.reader(f2)\n newList2 = list(newFile2)\n\n #Close files\n f1.close()\n f2.close()\n\n newRows = [row for row in newList2 if row not in oldList1]\n return newRows", "def read_and_clean_yearly_stats(fname, year, veteran_ids, previous_rookie_ids):\n df = parse_bball_ref_common_cols(pd.read_csv(fname))\n df = add_additional_stats(df)\n df['Year'] = int(year) #datetime.datetime(year, 6, 1)\n \n if year < 2019:\n champ = finals_team_data['Champion'][year]\n runnerup = finals_team_data['Runner-Up'][year]\n\n champ_players = df['Team'] == champ\n ru_players = df['Team'] == runnerup \n \n if not champ_players.any():\n print(\"No players on championship team in {}\".format(year))\n if not ru_players.any():\n print(\"No players on runner-up team in {}\".format(year))\n\n champ_leaders = get_leader_stats(df, msk=champ_players)\n ru_leaders = get_leader_stats(df, msk=ru_players)\n \n dpoy = dpoys['PlayerID'][year]\n sixth_man = sixth_man_winners['PlayerID'][year]\n mvpid = mvps['PlayerID'][year]\n finals_mvp = finals_team_data['Finals MVP'][year]\n all_nba_players = all_nba_players_by_year[year]\n else:\n champ = None\n runnerup = None\n \n mvpid = None\n finals_mvp = None\n dpoy = None\n sixth_man = None\n all_nba_players = {'1st':[], '2nd':[], '3rd':[]}\n\n all_stars = all_star_pids[year] \n league_leaders = get_leader_stats(df)\n\n def calculate_regseason_value(row): \n if row['Team'] in [champ, runnerup]:\n ## did you play significant minutes on a team that made it to the finals?\n champ_value = finals_minutes_multiplier * (\n row['MinutesPlayed']/3000 + \n row['GamesStarted']/82 + \n 0.33 * row['GamesPlayed']/82)\n \n ## did you contribute significantly in terms of pts, rbs, etc?\n if row['Team'] == champ:\n multiplier = champ_multiplier\n leader_values = champ_leaders \n else:\n multiplier = ru_multiplier\n leader_values = ru_leaders\n \n champ_value += add_weighted_stat_values(row, leader_values)\n champ_value *= multiplier\n else:\n champ_value = 0\n \n league_value = add_weighted_stat_values(row, league_leaders)\n return champ_value + league_value\n\n def calculate_playoff_value(row):\n ### no credit if you weren't with the team at the end of the season\n if not row['EndOfSeason']:\n return 0\n\n playoff_stats_by_round = playoff_stats_by_year[year]\n pid = row['PlayerID']\n\n total_value = 0\n for playoff_round in range(1, 5):\n # 1 = first round\n # 2 = conference semifinals\n # 3 = east/west finals\n # 4 = nba finals\n playoff_round = str(playoff_round)\n\n multiplier = playoff_multipliers(playoff_round)\n round_stats = playoff_stats_by_year[year][playoff_round]\n loc = round_stats['PlayerID'] == pid\n \n if np.count_nonzero(loc):\n round_leader_stats = get_leader_stats(round_stats)\n player_round_stats = round_stats.loc[loc] \n to_add = add_weighted_stat_values(player_round_stats, round_leader_stats).values[0] * multiplier\n \n if np.isnan(to_add):\n print(\"Going to add a NaN for pid = {}, year = {}, round = {}\".format(pid, year, playoff_round))\n vals = round_leader_stats.values()\n if pd.isnull(vals):\n print('got a NaN in leader stats, year {}, round {}'.format(year, playoff_round))\n print(round_leader_stats)\n if pd.isnull(player_round_stats).any(axis=None):\n print(\"got a NaN in player stats, pid = {}, year = {}, round = {}\".format(pid, year, playoff_round))\n for colname in stat_keys:\n print(colname, player_round_stats[colname])\n# if pd.isnull(player_round_stats[colname]):\n# print(colname, player_round_stats[colname])\n raise TypeError(\"got a nan\")\n total_value += to_add\n return total_value\n \n def calculate_awards_value(row):\n \"\"\"\n how much do we award a player in terms of all stars, mvps, and finals mvps?\n \"\"\"\n \n if not row['EndOfSeason']:\n ## only get credit for awards once\n ## (on the team you end the season with)\n return 0\n \n awards_value = 0\n if row['PlayerID'] in all_stars:\n awards_value += all_star_value\n \n for team in ['1st', '2nd', '3rd']:\n if row['isAllNBA_{}'.format(team)]:\n awards_value += all_nba_values[team]\n \n if row['PlayerID'] == mvpid:\n awards_value += mvp_value\n \n if row['PlayerID'] == dpoy:\n awards_value += dpoy_value\n \n if row['PlayerID'] == sixth_man:\n awards_value += sixth_man_value\n \n if row['isFMVP']:\n awards_value += finals_mvp_value\n \n return awards_value\n \n def set_veteran_status(pid):\n if pid in previous_rookie_ids:\n return 1\n elif pid in veteran_ids:\n return 2\n else:\n return 0\n \n def set_isFMVP(row):\n pname = row['PlayerName']\n team = row['Team']\n name = pname.rsplit(maxsplit=1)\n name = name[0][0] + '. ' + name[1]\n if name == finals_mvp and team == champ:\n return True\n else:\n return False\n \n def set_allNBAteam(pname, team):\n if pname in all_nba_players[team]:\n return True\n else:\n return False\n \n \n ## drop the \"total\" values of players now (not earlier, since we want \n ## to use total stats to normalize our value added above)\n ## will sum-up player values later, \n ## but a player gets value from their contribution to each team\n df = df[df['Team'] != 'TOT']\n \n ## then a player only gets credit for the team they're with at the\n ## end of the season, which is the first one to appear in the list\n with_at_eos = np.zeros(df.shape[0])\n msk = np.logical_not(df.duplicated('PlayerID', keep='first'))\n with_at_eos[msk] = True\n df['EndOfSeason'] = with_at_eos\n \n ## set whether a player was the finals mvp:\n df['isFMVP'] = df.apply(set_isFMVP, axis=1)\n num_fmvp = np.count_nonzero(df['isFMVP'].values)\n if num_fmvp != 1:\n print(\"Wrong number of FMVPs ({}) in year {}\".format(num_fmvp, year))\n \n ## set whether a player made each of the all NBA teams:\n for team in ['1st', '2nd', '3rd']:\n dset_name = 'isAllNBA_{}'.format(team)\n df[dset_name] = df['PlayerName'].apply(set_allNBAteam, args=(team,))\n num_on_team = np.count_nonzero(df[dset_name].values)\n if num_on_team != 5:\n print(\"Wrong number of players ({}) on {} All NBA {} Team\".format(num_on_team, year, team))\n ### note -- these datasets will get used later to calculate awards value\n \n df['YearlyRegularSeasonValue'] = df.apply(calculate_regseason_value, axis=1)\n if year < 2019:\n df['YearlyAwardsValue'] = df.apply(calculate_awards_value, axis=1)\n df['YearlyPlayoffsValue'] = df.apply(calculate_playoff_value, axis=1)\n else:\n df['YearlyAwardsValue'] = np.zeros(df.shape[0])\n df['YearlyPlayoffsValue'] = np.zeros(df.shape[0])\n \n df['VeteranStatus'] = df['PlayerID'].apply(set_veteran_status)\n df['isYoungPlayer'] = df['Age'] <= 23\n \n # everyone who was a rookie last year will be a veteran next year\n next_veteran_ids = np.union1d(veteran_ids, previous_rookie_ids)\n rookie_ids = np.array(df['PlayerID'].loc[df['VeteranStatus']==0].values)\n \n df['TotalValue'] = df['YearlyRegularSeasonValue'] + df['YearlyAwardsValue'] + df['YearlyPlayoffsValue']\n\n ## no longer need to know whether it's the EndOfSeason row\n df.drop(columns=['EndOfSeason'], inplace=True)\n \n ## now handle players that are duplicated (i.e. that were on multiple teams in a given year because of trades)\n ## I'm going to just sum those up basically...\n is_a_duplicate_row = df.duplicated('PlayerID', keep=False)\n \n players_traded = np.unique(df['PlayerID'].loc[is_a_duplicate_row])\n print(\"Now dealing with {} players that were traded and appear more than once...\".format(\n players_traded.size))\n \n df_with_no_dupes = df.drop_duplicates('PlayerID', keep=False, inplace=False)\n ### now add the total values back on to df_with_no_dupes\n to_append = []\n for pid in players_traded:\n rows = df[df['PlayerID']==pid]\n assert rows.shape[0] > 1, \"Got a dupilicate PlayerID but only one row...\"\n new_row = combine_traded_player(rows)\n to_append.append(new_row)\n df_with_no_dupes = df_with_no_dupes.append(to_append, ignore_index=True, sort=False)\n\n return df_with_no_dupes, rookie_ids, next_veteran_ids", "def clean_csv( source, target ):\n with open( source ) as sfh:\n reader = csv.reader( sfh )\n with open( target, 'w' ) as tfh:\n writer = csv.writer( tfh )\n for record in reader:\n writer.writerow( map( lambda x : x.strip(), record ) )", "def _get_data_pre2007(date): \r\n \r\n # build the url based on year\r\n url = '{}/Environmental_Data_{}.txt'.format(BASE_URL, date.year)\r\n print('Fetching online data for {} (full year)'.format(date.year))\r\n \r\n try:\r\n year_data = request.urlopen(url).read().decode(encoding='utf_8').split('\\n') \r\n except:\r\n raise ValueError(date) # error accessing website\r\n else:\r\n year_data.pop(0) # remove first item which contain column header info\r\n \r\n for line in year_data:\r\n \r\n elements = line.split()\r\n yield dict(Date = elements[0],\r\n Time = elements[1],\r\n Status = 'COMPLETE', # all data from pre2007 will be complete\r\n Air_Temp = elements[5],\r\n Barometric_Press = elements[7],\r\n Wind_Speed = elements[2])", "def drop_unwanted_data(row):\n if not row[\"PatientDOB\"]:\n raise StopProcessing()\n if row[\"SpecialtyCode\"] not in [\"600\", \"180\"]:\n raise StopProcessing()", "def drop_least_ap(csv):\n number = csv.ap_id.value_counts()\n ap_to_drop = number[number < 2000].index\n idx = csv[csv.ap_id.isin(list(ap_to_drop))].index\n parsed_csv = csv.drop(idx).reset_index(drop=True)\n return parsed_csv", "def xlsx_to_csv(years):\n \n start_time = time.time()\n \n s3_resource = boto3.resource('s3')\n s3 = boto3.client('s3')\n for year in years:\n file = 'all_data_M_{}'.format(year)\n obj = s3.get_object(Bucket='dataeng-capstone-1', Key='all_data_M_{}.xlsx'.format(year))\n df = pd.read_excel(obj['Body'].read())\n df.insert(0, 'FY_YEAR', int('{}'.format(year))) \n csv_buffer = StringIO()\n df.to_csv(csv_buffer, sep=\"|\",index=False,quoting=csv.QUOTE_NONNUMERIC)\n s3_resource.Object('dataeng-capstone-1', 'clean/all_data_M_{}.dat'.format(year)).put(Body=csv_buffer.getvalue())\n print('converted ',file,' from .xlsx to .dat')\n \n \n file = '2017_NAICS_Descriptions' \n obj = s3.get_object(Bucket='dataeng-capstone-1', Key='2017_NAICS_Descriptions.xlsx')\n df = pd.read_excel(obj['Body'].read())\n df = df[['Code','Title']] \n\n csv_buffer = StringIO()\n df.to_csv(csv_buffer, sep=\"|\",index=False)\n s3_resource.Object('dataeng-capstone-1', 'clean/all_naics_codes.dat').put(Body=csv_buffer.getvalue())\n print('converted ',file,' from .xlsx to .dat')\n\n end_time = time.time()\n \n runtime = end_time - start_time\n \n print('\\n')\n print('runtime: ',runtime)\n print('\\n')\n dataend_bucket = s3_resource.Bucket('dataeng-capstone-1')\n\n print('List files in clean bucket: ')\n for objct in dataend_bucket.objects.filter(Delimiter='/',Prefix='clean/all'):\n print(objct.key)\n \n print('\\n')", "def remove_person(path, name):\n csv_db = pd.read_csv(path)\n csv_db.set_index(['Unnamed: 0'], inplace=True)\n if 'Unnamed: 0.1' in csv_db.columns:\n del csv_db['Unnamed: 0.1']\n name = str(name)\n try:\n csv_db.drop(name, axis=0).to_csv(path)\n except ValueError:\n csv_db.to_csv(path)", "def trimCSV(csv1,start,end):\n s = datetime.strptime( start,'%Y-%m-%d')\n e = datetime.strptime( end,'%Y-%m-%d')\n dateRange = []\n\n while(s != e):\n dateRange.append(s)\n s += timedelta(days=1)\n\n with open(csv1, mode='r') as inp, open('Trim.csv', mode='w') as out:\n writer = csv.writer(out)\n count = 0\n for row in csv.reader(inp):\n if count == 0:\n writer.writerow(row)\n count = 1\n continue\n if datetime.strptime( row[0],'%Y-%m-%d') in dateRange:\n writer.writerow(row)\n continue", "def clean_cary_uvvis_data(filename):\n df = pd.read_csv(filename, skiprows=1)\n\n time_rows = df['Wavelength (nm)'].values == '[Time] '\n time_indices = np.argwhere(time_rows).flatten()\n time_vals = df['Abs'][time_indices].values.astype(float)\n\n non_data_cols = df['Wavelength (nm)'].values == '[Wavelength] '\n n_cols_to_delete = len(np.argwhere(non_data_cols).flatten())\n\n last_wavelength_index = np.argwhere(pd.isnull(df['Abs'].values))[0][0]\n\n df = df.drop(df.index[last_wavelength_index:])\n df = df.drop(df.columns[2::2], axis = 1)\n df = df.set_index('Wavelength (nm)')\n if n_cols_to_delete > 0:\n df = df.drop(df.columns[-n_cols_to_delete:], axis = 1)\n\n n_rows = df.shape[1]\n df.columns = range(n_rows)\n\n df = df.sort_index()\n\n return df, time_vals", "def read_words_years(file):\n reader = csv.reader(open(file))\n dict1 = {}\n for row in reader:\n if row[0] in dict1:\n temp = dict1[row[0]]\n temp.append(Year(\n year=int(row[1]),\n occ=int(row[2]),\n ))\n dict1[row[0]] = temp\n else:\n temp = []\n temp.append(Year(\n year=int(row[1]),\n occ=int(row[2]),\n ))\n dict1[row[0]] = temp\n return dict1", "def analyze(filename):\n start = datetime.datetime.now()\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n # Skip the header rows so y\n next(reader, None)\n\n # Return year_count in a dictionary\n year_count = {}\n\n # Return 'ao' count start at 0\n found = 0\n\n # Loop through date strings between 2012 and 2018 and return count for each year\n # Date column is index 5 in file and [-4:] parseson just year of the date string\n for row in reader:\n if \"2012\" < row[5][-4:] < \"2019\":\n year_count[row[5][-4:]] = year_count.get(row[5][-4:], 0) + 1\n\n # Counts how many times 'ao' appears in file which\n if 'ao' in row[6]:\n found += 1\n\n print(year_count)\n print(f\"'ao' was found {found} times\")\n\n end = datetime.datetime.now()\n\n return start, end, year_count, found" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to handle user command. Receives parsed input and sends data to Central Server (CS) accordingly to specified command.
def handleUserCommand(cmd): """ Receive and parse input. """ cmd = cmd.split() if cmd and cmd[0] in client.getCommands(): task = cmd[0] client.connect() # Exit command. if task == "exit": client.disconnect() os._exit(0) # List command. elif task == "list": cs_cmd = client.getCommands()[task] client.sendData(cs_cmd) handleCSResponses(None, None) # Request command. elif task == "request": if len(cmd) == 3 and cmd[1] in client.getPtcs(): args = cmd[1:] # Checks if file exists. if not os.path.isfile(args[1]): print 'ERROR: File Not found' return data = open(args[1]).read() cs_cmd = client.getCommands()[task] data_size = str(len(data)) print '\t', data_size, 'bytes to transmit.' client.sendData(cs_cmd + " " + args[0] + " " + data_size + " " + data) handleCSResponses(args[1], args[0]) else: print 'Invalid request format.' else: print "No such command."
[ "def handle_command(self, data):\n if data is not None:\n command, input = data\n if command == CommandTypes.GUI:\n self.exec_gui(input)\n elif command == CommandTypes.CONNECT:\n self.exec_connect(input)\n elif command == CommandTypes.REFRESH:\n self.exec_refresh()\n elif command == CommandTypes.BACK:\n self.exec_back()", "def handle_client_msg(self,msg):\n command=msg.split(\" \")\n command=command[0]\n\n if self.is_not_included_in_options(command):\n input_msg = '(failure on last attempt)--Enter your command: ' \n elif command == 'EXIT':\n self.socket.sendall(msg.encode('utf-8'))\n time.sleep(3)\n self.socket.close()\n sys.exit()\n elif command == 'HELP':\n input_msg='Enter your command: '\n print('\\nREAD usage: READ <code>\\n'+\n 'WRITE usage WRITE <code> <status> <time>\\n'+\n 'DELETE usage DELETE <code>\\n'+\n 'MODIFY usage MODIFY <code> <status> <time>\\n'+\n 'DATA usage DATA\\n'+\n 'EXIT usage EXIT\\n')\n\n else:\n input_msg='Enter your command: '\n print(f'Your message: {msg}')\n print('Message sent \\t time: ' + str(datetime.datetime.now().time()))\n self.socket.sendall(msg.encode('utf-8'))\n data = self.socket.recv(1024).decode('utf-8')\n print('reply received \\t time: '+str(datetime.datetime.now().time()))\n print(data+'\\n')\n return input_msg", "def process_line(self, line):\n args = line.split(' ')\n command = args[0]\n try:\n handler = getattr(self, f'c_{command}')\n except AttributeError:\n log.warning(f'command {command!r} not found')\n\n try:\n handler(args)\n except ShutdownClient as err:\n self.shutdown(err.args[0])\n except Exception:\n log.exception('error executing command')", "def ProcessPlayerInput(self):\r\n\r\n #--------------------------\r\n # Parse Player's Command(s)\r\n #--------------------------\r\n \r\n # This takes the typed command and performs a full parse. By the time\r\n # this line is done the typed data has been converted to a known \r\n # state.\r\n #\r\n # The WHILE statement handles the case where the player typed multiple \r\n # commands on one line.\r\n\r\n P.AP().GetPlayerInput()\r\n while P.AP().CommandsList:\r\n \r\n #------------------\r\n # Clear Command Box\r\n #------------------\r\n \r\n # Once the typed command has been processed we erase the player's\r\n # command box to ready it for the next command.\r\n \r\n Terminal.Frame.TInput.Clear()\r\n\r\n #-----------------------\r\n # Handle Pre-Turn Events\r\n #-----------------------\r\n \r\n # These are any events (usually fuses, daemons, etc) that happen\r\n # so quickly they occur EVEN IF THE PLAYER'S COMMAND WAS NOT \r\n # UNDERSTOOD!\r\n #\r\n # This includes combat, poison, events that take seconds, etc.\r\n \r\n Engine.PreTurnHandler()\r\n\r\n #-------------------------------\r\n # If Command Successfully Parsed\r\n #-------------------------------\r\n \r\n # If the command was successfully parsed the engine calls the \r\n # turn handler is called. If the turn completed successfully then\r\n # the AFTER turn handler is called.\r\n \r\n if P.AP().Parser() == SUCCESS:\r\n if Engine.TurnHandler() == TURN_ENDS:\r\n Engine.AfterTurnHandler()\r\n\r\n #---------------------------\r\n # Display The Command Prompt\r\n #--------------------------- \r\n\r\n # Leave the prompt out for now, as it complicates input retrieval.\r\n # Save this for the future.\r\n\r\n Say(\"~p \" + P.AP().Prompt())\r\n\r\n #---------------\r\n # Game Finished?\r\n #---------------\r\n\r\n # If the player ended the game by typing quit, or if the game itself\r\n # decided to end, the Global.GameState property changes from RUNNING\r\n # to FINISHED.\r\n #\r\n # Once the game is finished the post game wrap up is called, then \r\n # the terminal session is terminated (closing the terminal).\r\n \r\n if Global.GameState == FINISHED:\r\n Engine.PostGameWrapUp()\r\n Terminal.Terminate()", "def handle_command():\n # Get the fully populated argparser\n parser = _construct_parser()\n # Parse the arguments\n args = parser.parse_args()\n # Execute the handler. Every subparser has handler so either it is set or the\n # ArgumentParser informs the user that the given command does not exist and this code\n # isn't reached.\n args.handler(args)", "def processCommand(self, message):\n if message.startswith(\"/\"):\n # meant to be a command\n message = message.replace('-', '_')\n command = message.split(\" \")\n if 'spawn' in message:\n self.game.Monster(self.game, command[1], self.game.unoff(list(pygame.mouse.get_pos())), 1)\n elif 'place' in message:\n self.game.Item(self.game, command[1], self.game.unoff(list(pygame.mouse.get_pos())), world=1)\n elif 'get' in message:\n self.game.Invent.add(command[1], self.game.Invent.nextFreeSlot())\n elif 'map' in message:\n self.game.blit_list = mapLoader.load(command[1], self.game)", "def process_command(self, sender, cmd, args):\n if cmd:\n self.cmd_processor.process_command(sender, cmd, args)", "def command_run_input(data, buffer, command):\n if command == \"/input return\": # As in enter was pressed.\n\n # Get input contents\n input_s = w.buffer_get_string(buffer, 'input')\n\n # Skip modification of settings\n if input_s.startswith('/set '):\n return w.WEECHAT_RC_OK\n\n # Iterate transformation pairs\n for replace_item in w.config_get_plugin('replacement_pairs').split(','):\n if replace_item:\n orig, replaced = replace_item.split('=')\n input_s = input_s.replace(orig, replaced)\n # Iterate words\n for replace_item in w.config_get_plugin('replacement_words').split(','):\n if replace_item:\n orig, replaced = replace_item.split('=')\n # Search for whitespace+word+whitespace and replace the word\n input_s = re.sub('(\\s+|^)%s(\\s+|$)' %orig, '\\\\1%s\\\\2' %replaced, input_s)\n\n # Spit it out\n w.buffer_set(buffer, 'input', input_s)\n return w.WEECHAT_RC_OK", "def parseCmd(self, arg):\n args = arg.split(';')\n if len(args) > 1:\n for a in args:\n self.parseCmd(a)\n args = arg.split()\n log.debug(str(len(args))+\" arguments: \"+str(args))\n\n # Skip one-word commands.\n # They would be commands for this app and they should've been\n # already handled.\n if len(args) <= 1:\n return\n\n action = args[0]\n target = \"clip\"\n\n if self.reader.isGroupCommand(args[1]):\n target = args[1]\n args.pop(1)\n\n # translate the words to commands\n # first argument is the command\n cmd = self.reader.getCommand(action, target)\n if cmd == None:\n return\n # the rest are parameters\n args.pop(0)\n args = \" \".join(args)\n\n osc.send(cmd, args)\n\n # execute translation as if it had been typed\n # self.onecmd(str(cmd))", "def dispatch_client_command(self, conn, data):\n\n # First determine the package length\n package_length_bytes = data[0:2]\n package_length = determine_package_length(package_length_bytes)\n\n # Extract the complete data\n data = data[2:package_length+2]\n\n # The first three chars are the command the rest is payload\n command, information = data[0:3], data[3:]\n\n # Put the command to the player queue\n player = self.online_player_system.get_player_on_connection(conn)\n player.add_command(command, information)", "def input_listener(self):\n while self.socket:\n line = self.read()\n if line:\n if line[0] == \"/\":\n # All commands start with a slash.\n args = line.split(\" \")\n\n # Grab the command without the leading slash\n command = args[0][1:]\n\n if command == \"list\" or command == \"l\":\n self.list_connected_users()\n\n elif command == \"message\" or command == \"m\":\n to_username = args[1]\n message = \" \".join(args[2:])\n self.send_message(message, to_username=to_username)\n\n elif command == \"status\" or command == \"s\":\n status = args[1]\n self.set_status(status)\n\n elif command == \"quit\" or command == \"q\":\n self.session_end()\n\n else:\n self.print_help()\n\n else:\n self.send_message(line)\n\n gevent.sleep(self.tick)", "def input_command(self, cmd):\n self._write_line(cmd)", "def handleCommand(command):\n try:\n print(f'Processing {command}')\n\n # Send command and its options to the command class in commands.py\n c = Command(command['command'], command['options'])\n\n # Run corresponding handle command function\n out = c.handleCommand()\n print(out)\n\n # Send result of the command to redis key value store\n r.set(out['key'], out['result'])\n\n # Send result of command over redis pubsub\n r.publish('data', json.dumps(out))\n except json.JSONDecodeError as error:\n print(error.msg)\n except redis.exceptions.TimeoutError:\n print('Redis connection timed out')\n except redis.exceptions.ConnectionError:\n print('Could not establish Redis connection')\n except Exception as e:\n print(e)", "def handle(self, message):\n for command in self.commands:\n response = Response(message.chat.id)\n if message.text:\n first_word = message.text.split()[0].split('@')[0]\n else:\n first_word = None\n if command.listen(message):\n try:\n if first_word == '/cancel':\n response = command.cancel(response)\n elif first_word == '/done':\n response = command.done(response)\n elif command.requires_arguments and not command.arguments and not command.is_active():\n response = Response(message.chat.id)\n response.send_message.text = self.dialogs['input'] % command.name\n elif command.arguments and command.arguments.lower() == 'help' and message.text.split()[0] == command.name:\n response = command.get_help(response)\n else:\n response = command.reply(response)\n except:\n response.send_message.text = self.dialogs['command_failed'] % command.name\n self.log(traceback.format_exc(), 'error')\n if isinstance(response, list):\n for rsp in response:\n self.reply(rsp)\n elif response:\n self.reply(response)\n scheduled_responses = command.get_scheduled()\n if scheduled_responses:\n for response in scheduled_responses:\n self.reply(response)\n if message.contains_command() and message.command.lower() not in self.command_names:\n response = Response(message.chat.id)\n response.send_message.text = self.dialogs['no_such_command']\n self.reply(response)", "def parse_input(sock, data, vlog):\n words = data.split()\n\n cmd = words[0]\n cmd = cmd.upper()\n\n msg = cmd + \" \" + \" \".join(words[1:]) + LINESEP\n vlog(\"SENDING: \" + msg)\n ircsocket.send(sock, msg, vlog)\n\n return cmd != \"QUIT\"", "def HandleCommand(message):\n global _commandHandlers\n ClientAPI.Log(\"HandleCommand: %s\" % message);\n if len(message) == 0:\n return\n if not message.startswith(\"/\"):\n message = \"/say \" + message\n # Handle some client side commands\n tokens = message.split()\n if len(tokens) <= 0:\n return\n args = \"\"\n if len(message) > len(tokens[0]):\n args = message[len(tokens[0])+1:]\n command = tokens[0][1:]\n if _commandHandlers.has_key(command):\n # We have a local handler for this command on the client.\n func = _commandHandlers[command]\n try:\n func(args)\n except Exception, e:\n ClientAPI.LogWarn(\"Failed to run command handler '%s' for command line: '%s'\" % (str(command), message))\n ClientAPI.LogWarn(\"Exception: %s\" % str(e))\n ClientAPI.LogWarn(\"Backtrace: %s\" % e.clsException.StackTrace)\n else:\n # This command is not handled on the client. Send it to the server.\n target = MarsTarget.GetCurrentTarget()\n if target is None:\n target = ClientAPI.GetPlayerObject()\n ClientAPI.Network.SendTargetedCommand(target.OID, message)", "def execute_cmd(self, raw_string, sessid=None):\r\n raw_string = utils.to_unicode(raw_string)\r\n raw_string = self.nicks.nickreplace(raw_string,\r\n categories=(\"inputline\", \"channel\"), include_player=False)\r\n if not sessid and _MULTISESSION_MODE in (0, 1):\r\n # in this case, we should either have only one sessid, or the sessid\r\n # should not matter (since the return goes to all of them we can\r\n # just use the first one as the source)\r\n try:\r\n sessid = self.get_all_sessions()[0].sessid\r\n except IndexError:\r\n # this can happen for bots\r\n sessid = None\r\n return cmdhandler.cmdhandler(self.typeclass, raw_string,\r\n callertype=\"player\", sessid=sessid)", "def handle_service(self, args):\n if not args or args.count(' ') < 5:\n self.error(IRC.ERR_NEEDMOREPARAMS)\n return\n self.error(IRC.ERR_UNKNOWNCOMMAND)", "def do_action_for_input(self, user_input):\n try:\n if user_input == CommandLineProgram.ACTION.HELP:\n self.print_help()\n elif user_input == CommandLineProgram.ACTION.ADD_USER:\n self.input_and_create_user()\n elif user_input == CommandLineProgram.ACTION.LIST_USERS:\n self.print_users()\n elif user_input == CommandLineProgram.ACTION.ADD_TRANSACTION:\n self.select_user_and_add_transaction()\n elif user_input == CommandLineProgram.ACTION.GENERATE_REPORT:\n self.select_user_and_print_report()\n except Exception:\n print(\"Try again\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to handle Central Server responses. Handles received answer from the Central Server (CS) and displays them.
def handleCSResponses(filename, ptc): cmd = client.receiveData(3) # List command CS response. if cmd == "FPT": # Read space. client.receiveData(1) ptc_count = '' message = '' while True: char = client.receiveData(1) if char.isdigit(): ptc_count += char else: message += char break if ptc_count == '': while True: char = client.receiveData(1) if char == '\n': if message == 'ERR': print "Error: FPT ERR." client.disconnect() return elif message == 'EOF': print "Error: FPT EOF." client.disconnect() return else: message += char ptcs = [] for i in range(int(ptc_count)): ptcs.append(client.receiveData(4).split()[0]) for i in range(int(ptc_count)): print str(i + 1) + " - " + ptcs[i] # Request command CS response. elif cmd == "REP": # Read space. client.receiveData(1) rt = client.receiveData(1) if rt not in ['F', 'R']: err = rt + client.receiveData(2) if err == 'ERR': print "Error: REP ERR." client.disconnect() return elif err == 'EOF': print "Error: REP EOF." client.disconnect() return # Read space. client.receiveData(1) data_size = '' while True: char = client.receiveData(1) if char.isdigit(): data_size += char else: break # Receive CS data. data = client.receiveData(int(data_size)) # Upper or Lower command output. if rt == 'F': received_file = filename[:-4] + "_" + ptc + '.txt' print 'received file ' + received_file #print '\t' + data_size + ' Bytes expected. Received: ' + str(len(data)) # Write result to file. file = open(received_file, 'w') file.write(data) file.flush() os.fsync(file.fileno()) file.close() # Word count command output. elif ptc == 'WCT': print 'Number of words: ' + data # Find longest Word command output. elif ptc == 'FLW': print 'Longest word: ' + data client.disconnect()
[ "def handle_server_response(my_socket, cmd):\r\n valid_msg, response = protocol.get_msg(my_socket)\r\n if valid_msg:\r\n list_cmd = cmd.split()\r\n if list_cmd[0] == 'DIR':\r\n print(\"The files in {} are: \".format(list_cmd[1]))\r\n print(response)\r\n if list_cmd[0] == 'DELETE':\r\n if not os.path.isfile(list_cmd[1]):\r\n print(\"Server response: \", response)\r\n else:\r\n print(\"The server didn't delete the file\")\r\n if list_cmd[0] == 'COPY':\r\n if os.path.isfile(list_cmd[2]):\r\n if filecmp.cmp(list_cmd[1], list_cmd[2]):\r\n print(\"Server response: \", response)\r\n else:\r\n print(\"The server didn't copy the file\")\r\n else:\r\n print(\"The server didn't copy the file\")\r\n if list_cmd[0] == 'EXECUTE' or list_cmd[0] == 'TAKE_SCREENSHOT':\r\n print(response)\r\n if list_cmd[0] == 'SEND_PHOTO':\r\n count = 0\r\n size_file = int(response)\r\n with open(SAVED_PHOTO_LOCATION, 'ab') as hfile:\r\n while count < size_file:\r\n packet = my_socket.recv(1024)\r\n hfile.write(packet)\r\n count += 1024\r\n print(\"The photo was sent and saved\")\r\n elif list_cmd[0] == 'EXIT':\r\n print(\"The server response: \", response)\r\n print(\"Pls close your socket too\")", "def handle_reply(self, msg):\n print msg", "def get_reply(self) -> str:\n data = bytearray()\n while True:\n # resetting data to the next line:\n data = data[data.find(b\"\\n\") + 1:]\n while b\"\\n\" not in data:\n # reading until there is a newline\n data.extend(self.conn.recv(256))\n replies = data.decode(\"utf-8\").split(\"\\n\")\n for r in replies:\n if not r:\n pass # server replied with an empty line\n elif CAPIReply.RUNNING.value in r:\n print(r)\n elif CAPIReply.COMPLETE.value in r:\n print(r)\n return r\n elif CAPIReply.INVALID.value in r or CAPIReply.ERROR.value in r:\n raise ValueError(\"Server replied with {}\".format(r))\n else:\n raise ValueError(\"Received an unknown reply from the server:\\n {}\".format(r))", "def read_response(self, read_packet):\n pass", "def handle(self):\n request_data = parse_request_json(self.request)\n response = None\n if request_data[SC.MSG_TITLE] == SC.MESSAGE_GET_ROLE:\n response = self.handle_get_role(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_BROADCAST_ROLES:\n response = self.handle_get_network_information(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_PRODUCE_VOTES:\n response = self.handle_produce_votes(request_data)\n elif request_data[SC.MSG_TITLE] == SC.MESSAGE_DISTRIBUTE_VOTES:\n response = self.handle_distribute_votes(request_data)\n else:\n response = self.handle_unexpected_request()\n send_response_json(self.request, response, request_data[SC.MSG_ORIGIN])", "def handle_client_msg(self,msg):\n command=msg.split(\" \")\n command=command[0]\n\n if self.is_not_included_in_options(command):\n input_msg = '(failure on last attempt)--Enter your command: ' \n elif command == 'EXIT':\n self.socket.sendall(msg.encode('utf-8'))\n time.sleep(3)\n self.socket.close()\n sys.exit()\n elif command == 'HELP':\n input_msg='Enter your command: '\n print('\\nREAD usage: READ <code>\\n'+\n 'WRITE usage WRITE <code> <status> <time>\\n'+\n 'DELETE usage DELETE <code>\\n'+\n 'MODIFY usage MODIFY <code> <status> <time>\\n'+\n 'DATA usage DATA\\n'+\n 'EXIT usage EXIT\\n')\n\n else:\n input_msg='Enter your command: '\n print(f'Your message: {msg}')\n print('Message sent \\t time: ' + str(datetime.datetime.now().time()))\n self.socket.sendall(msg.encode('utf-8'))\n data = self.socket.recv(1024).decode('utf-8')\n print('reply received \\t time: '+str(datetime.datetime.now().time()))\n print(data+'\\n')\n return input_msg", "def read_handler_responses(self):\r\n try:\r\n while True:\r\n if self.pending_responses:\r\n resp = self.pending_responses.popleft()\r\n handler = None\r\n else:\r\n resp = None\r\n handler = self.disp_sock.recv(zmq.NOBLOCK)\r\n delim = self.disp_sock.recv(zmq.NOBLOCK)\r\n assert delim == \"\", \"non-empty msg delimiter: \"+delim\r\n resp = self.disp_sock.recv(zmq.NOBLOCK)\r\n if resp == \"X\":\r\n self.mark_handler_disconnecting(handler)\r\n else:\r\n if handler is not None:\r\n self.mark_handler_alive(handler)\r\n if resp:\r\n self.recv_sock.send(resp,zmq.NOBLOCK)\r\n resp = None\r\n except zmq.ZMQError, e:\r\n if resp is not None:\r\n self.pending_responses.appendleft(resp)\r\n if e.errno not in (errno.EINTR,zmq.EAGAIN,):\r\n raise", "def response( self, msg ):\n\t\tPROTOCOL.info( 'Sending UMCP RESPONSE %s' % msg.id )\n\t\tdata = str( msg )\n\t\tself.__queue += str(msg)\n\n\t\tif self._do_send( self.__comm ):\n\t\t\tnotifier.socket_add( self.__comm, self._do_send, notifier.IO_WRITE )", "def translate_response(self, s=None, force_reply=False):\n \n if not s:\n if not self.s:\n print \"Error translate_response: Connection required.\"\n return None\n else:\n s = self.s\n \n result = {}\n head = s.recv(4)\n if not head:\n print \"Error translate_response: Connection closed.\"\n s.close()\n \n return None\n elif head == \"\\xFF\\xFF\\xFF\\xFF\":\n body = s.recv(18)\n \n result[\"packet_id\"] = head\n result[\"status_code\"] = body[0:2]\n result[\"command\"] = body[2:13].replace(\"\\x00\", \"\")\n result[\"body_type\"] = body[13:14]\n result[\"body_length\"] = struct.unpack(\"I\", body[14:18])[0]\n result[\"body\"] = decode_all(s.recv(result[\"body_length\"]))[0]\n \n return result\n else:\n encrypted_body_length = struct.unpack(\"I\", head)[0]\n \n encrypted_body = \"\"\n recv_encrypted_body_length = 0\n while recv_encrypted_body_length < encrypted_body_length:\n new = s.recv(encrypted_body_length - recv_encrypted_body_length)\n encrypted_body += new\n recv_encrypted_body_length += len(new)\n total_body = self.dec_aes(encrypted_body)\n \n total_body_length = struct.unpack(\"I\", total_body[18:22])[0]\n recv_total_body_length = len(total_body[22:])\n while recv_total_body_length < total_body_length:\n encrypted_body_length = struct.unpack(\"I\", s.recv(4))[0]\n \n encrypted_body = \"\"\n recv_encrypted_body_length = 0\n while recv_encrypted_body_length < encrypted_body_length:\n new = s.recv(encrypted_body_length - recv_encrypted_body_length)\n encrypted_body += new\n recv_encrypted_body_length += len(new)\n \n body = self.dec_aes(encrypted_body)\n total_body += body\n recv_total_body_length += len(body)\n \n result[\"packet_id\"] = total_body[0:4]\n result[\"status_code\"] = total_body[4:6]\n result[\"command\"] = total_body[6:17].replace(\"\\x00\", \"\")\n result[\"body_type\"] = total_body[17:18]\n result[\"body_length\"] = struct.unpack(\"I\", total_body[18:22])[0]\n \n result[\"body\"] = decode_all(total_body[22:])[0]\n \n if result[\"packet_id\"] != \"\\xFF\\xFF\\xFF\\xFF\" and force_reply:\n self.handle_packet(result)\n \n return self.translate_response(s, force_reply)\n else:\n return result", "def __on_request_response__(self, ch, method, props, body):\r\n\t\ttry:\r\n\t\t\tself.last_message = json.loads(body)\r\n\t\texcept ValueError:\r\n\t\t\tprint 'encountered an error while decoding the message'\r\n\t\t\tself.last_message = body\r\n\r\n\t\tself.response = 'received'", "def handle_WHATSAT(self, args):\n args[-1] = args[-1].strip() # get rid of \\n\n client = args[1]\n stamp = ServerClientProtocol.ClientTimeStamps[client] # get most recent time stamp\n\n # get parameters\n radius = args[2]\n days_ahead = int(args[3])\n\n # break up response\n stampData = stamp.split(' ')\n loc = stampData[3]\n \n # handle location signs\n if '+' in loc[0]:\n if '-' in loc: \n loc = ''.join(loc.split('+')).split('-')\n loc[0], loc[1] = loc[0], '-' + loc[1]\n else:\n loc = loc.split('+')[1:]\n loc[0], loc[1] = loc[0], loc[1]\n else:\n if '+' in loc:\n loc = ''.join(loc.split('-')).split('+')\n loc[0], loc[1] = '-' + loc[0], loc[1]\n else:\n loc = loc.split('-')[1:]\n loc[0], loc[1] = '-' + loc[0], '-' + loc[1]\n\n \n latitude, longitude = loc[0], loc[1]\n \n # now we send out HTTP request\n req = self.gatherHTTPRequest(latitude, longitude, radius, days_ahead, API_KEY)\n \n # we can create an SSL context to base our connections\n ssl_context = ssl.create_default_context()\n ssl_context.check_hostname = False\n ssl_context.verify_mode = ssl.CERT_NONE\n\n # create new coroutine\n log_write('Sent HTTP request: {}'.format(req))\n coro = loop.create_connection(\n lambda: HTTPClientProtocol(self.transport, args, req, stamp),\n 'app.ticketmaster.com', # Host\n 443, # HTTPS Port\n ssl=ssl_context\n )\n loop.create_task(coro)", "def __verify_answer(self, rcv):\n debug('ReplyHandler.receive: %r', rcv)\n if self.__send_from_queue(rcv):\n return ''\n if self.__send_from_static(rcv):\n return ''\n if len(self.queue) == 0:\n self.__seterror('svr queue is empty but received %r' % rcv)\n else:\n self.__seterror('svr expected %r but received %r' % (self.queue[0]['cmd'], rcv))\n if rcv.find('?') > -1:\n self.request.sendall('%s\\n' % self.server.RequestHandlerClass.errorbuf) # to prevent GCS timeout error\n return ''", "def handleRequest(s, acl2):\n\n\t# Receive a command from the socket\n\tbuff = socketFuncs.reliableRecv(s)\n\tprint(f\"Buff: {buff}\")\n\tif buff == []: return False\n\n\t# Convert the buffer: [bytes] -> [string] -> string -> bytes\n\tbuff = [b.decode(\"utf-8\") for b in buff]\n\tbuff = ''.join(buff)\n\tbuff = bytes(buff, \"utf-8\")\n\n\t# Send the request to the ACL2 server\n\tresponse = interactFromPrompt(buff, acl2)\n\tprint(f\"Response:\\n{response}\")\n\n\t# Send response back\n\tsocketFuncs.reliableSend(response, s)\n\n\treturn True", "def status_callback(self, response):\n print response", "def handle_one_request(self):\n\n request = self.rfile.readline()\n response = \"\"\n try:\n # Access to protected member; pylint: disable=W0212\n response = self.server._marshaled_dispatch(request)\n # No exception type specified; pylint: disable=W0702\n except:\n # The server had an unexpected exception.\n # dump the error to stderr\n print(traceback.format_exc(), file=sys.stderr)\n\n # Return the error to the caller.\n err_lines = traceback.format_exc().splitlines()\n trace_string = '{0} | {1}'.format(\n err_lines[-3], err_lines[-1])\n fault = rpclib.Fault(-32603,\n 'Server error: {0}'.format(trace_string))\n response = fault.response()\n\n # tell the server to exit\n self.server.initiate_shutdown()\n\n self.wfile.write(response)\n self.wfile.flush()", "def cb(self, future):\r\n try:\r\n res = future.result()\r\n resp = res.message\r\n print(\"Response received: \", resp)\r\n self.process_response(resp)\r\n except Exception as ex:\r\n print(future.exception())", "def handle_response( resp):\n if resp.status_code != 200:\n msg = \"OSRM HTTP {0}: {1}\".format(resp.status_code, resp.content)\n print msg\n return False\n resp_json = resp.json()\n if 'code' not in resp_json:\n msg = 'OSRM RESPONSE: Missing \\'code\\' from JSON response.'\n print msg\n return False\n if resp_json['code'] != 'Ok':\n msg = ('OSRM RESPONSE: Code is NOT \\'Ok\\'. Code: \\'{}\\''\n .format(resp_json['code']))\n print msg\n return False\n return True", "def respond(client):\n response = input(\"Enter a value: \")\n client.send(bytes(response, 'utf8'))\n client.close()", "def process_cleared_requests(msg):\n \"\"\"\n This is the HTTP response message with \n\n HTTP/1.0 200 OK --- status line [index 0]\n Content-Type:Application/json -- headers [index 1]\n Content-Length:2\n Host:127.0.0.1\n Date:2019-04-21 00:51:56.592347\n User-Agent:Custom HTTP endpoint written for CSE5306 lab [index 5]\n [index 6]\n [[\"get/cleared/requests\"], [\"sss\", \"ccc\", true]] actual data [index 7]\n \"\"\"\n # see above that index 7 is the line we care about\n msg = msg.split(\"\\n\")\n response_body = msg[7]\n import json\n\n clearance_requests = json.loads(response_body)\n # if there's more than one entry, we have some data to process\n if len(clearance_requests) > 1:\n # start from entry 2 as first entry is dummy entry\n clearance_requests = clearance_requests[1:]\n\n for request in clearance_requests:\n if request[2]:\n status = \"Approved\"\n else:\n status = \"Rejected\"\n # show approval status in UI\n add_msg_to_scrollbox(\n \"Student name {} \\nCourse Requested: {} \\nAdvisor Decision: {}\\n\".format(\n request[0], request[1], status\n )\n )\n else:\n add_msg_to_scrollbox(\"No message found\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the smallest number n that is a multiple of both a and b. >>> multiple(3, 4) 12 >>> multiple(14, 21) 42
def multiple(a, b): import math return a * b // math.gcd(a, b)
[ "def least_common_multiple(a, b):\n a, b = min(a, b), max(a, b)\n\n if b % a == 0:\n return b\n\n found = False\n number_i = 1\n multiple_i = a\n while not found:\n if multiple_i % b == 0:\n return multiple_i\n number_i += 1\n multiple_i = a * number_i", "def smallest_multiple(N):", "def lowest_common_multiple(num_a, num_b):\n validate_integers(num_a, num_b)\n gcd_result = greatest_common_divisor(num_a, num_b)\n result = (num_a * num_b) / gcd_result\n return int(result)", "def _next_multiple(n: int, k: int) -> int:\n div, mod = divmod(n, k)\n if mod > 0:\n div += 1\n return div * k", "def closest_multiple(target, number):\n if number > target:\n return number - target, 1, number\n z = int(number / 2)\n new_target = target + z\n new_target -= new_target % number\n multiplier = new_target // number\n return abs(target - new_target), multiplier, number", "def smallest_mult(n_mult):\n # comment for testing\n return reduce(scm, range(1, n_mult + 1))", "def lcm(*args):\r\n return reduce(lambda a, b: a * b / gcd(a, b), args)", "def lcm(x):\n# \"\"\"Return the least common multiple of a set of numbers. Call as\n# 'lcm(2, 3, 4, 5)', e.g.\n# \"\"\"\n from fractions import gcd\n z = x[0]\n for y in x[1:]:\n z = z*y//gcd(z, y)\n return z", "def multiples(n):\n if n % 3 == 0 or n % 5 == 0:\n return True\n return False", "def multiples_of_3_and_5():\n return (x for x in itertools.count(1) if x % 3 == 0 or x % 5 == 0)", "def get_smallest_divisible_number(max_factor):\n res = 1\n for factor_i in range(1, max_factor + 1):\n res = least_common_multiple(res, factor_i)\n return res", "def mult(a,b):\n if b==1:\n return a\n else:\n return a + mult(a, b -1)", "def modular_linear_equation_solver(a, b, n):\n d, x_prime, _ = recur_extended_euclid(a, n)\n if not b % d:\n x = x_prime*(b//d) % n\n return True, [(x + i*(n//d)) % n for i in range(d)]\n return False, []", "def maxDivide(a, b):\n\t\twhile a % b == 0:\n\t\t\ta = a / b\n\t\treturn a", "def round_to_multiple(number, multiple) -> int:\n return multiple * round(number / multiple)", "def find_next_multiple_of_power_two(number, initial=3):\n msb = number.bit_length()\n return 3 if number <= 1 else initial << msb - 2 << (1 & number >> msb - 2)", "def sumOfMultiples(m, b):\n u = ( b - 1 ) / m\n result = m * u * ( u + 1 ) / 2\n return result", "def son_congruentes_modulo(a,b,n):\n\treturn n_esmultiplode_m(a-b,n)", "def divide_ceil(a, b):\n q, r = divmod(a, b)\n if r > 0:\n q += 1\n return q" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of unique digits in positive integer n >>> unique_digits(8675309) All are unique 7 >>> unique_digits(1313131) 1 and 3 2 >>> unique_digits(13173131) 1, 3, and 7 3 >>> unique_digits(10000) 0 and 1 2 >>> unique_digits(101) 0 and 1 2 >>> unique_digits(10) 0 and 1 2
def unique_digits(n): uni = 0 found = {i: False for i in range(10)} while n != 0: d = n % 10 if not found[d]: found[d] = True uni += 1 n = n // 10 return uni
[ "def unique_digits(n):\n \"*** YOUR CODE HERE ***\"\n s = []\n while n>0:\n s.append(n%10)\n n=n//10\n set(s)\n return len(set(s))\n\n \"\"\"Bonus Code that counts how many repeating digits in a number\n ud=0\n while n>0:\n x=1\n #print (\"n:\",n)\n nc = n//10\n while x<len(str(n)):\n #print (\"nc:\", nc)\n print(\"n\",n%10)\n print( \"nc\",nc%10)\n if ((n%10) == (nc%10)):\n print(\"worng\")\n ud += 1\n nc = nc // 10\n x += 1\n print (\"x:\", x)\n n = n // 10\n return ud\n \"\"\"", "def digit_count(n):\n return len(str(n))", "def ndigits(number):\n count=1\n if abs(number//10) == 0:return 1\n else:return count+1*ndigits(abs(number//10))", "def p63():\n count = 0\n for n in range(1000):\n for i in itertools.count(1):\n digits = len(str(i ** n))\n if digits == n:\n count += 1\n print(\"%d: %d\" % (n, i ** n))\n elif digits > n:\n break\n return count", "def get_number_of_digits(number):\n return int(math.log10(number))+1", "def repeated_digit_counts(digits):\n result = []\n\n i, j = 0, 0\n while i < len(digits):\n while j < len(digits) and digits[j] == digits[i]:\n j += 1\n result.append(j-i)\n i = j\n return result", "def find_digit_factorial_sum(n):\n return sum(FACT_DIGITS[digit] for digit in gen_reverse_digits(n))", "def n_unique(self) -> int:\n return self._s.n_unique()", "def num_of_digits(test_number):\n return int(math.log(test_number,10))+1", "def sum_digits(n):\n\tif n < 10:\n\t\treturn n\n\n\treturn sum_digits(n//10) + n % 10", "def iter_digits(n):\n return (digit(n, i) for i in range(digit_count(n)))", "def get_digit_sum(n):\n return sum(int(digit) for digit in str(n))", "def __num_digits(num: int):\n return len(str(num))", "def digits_of(n, base=10):\n # Early exit for base 10, does divmod stuff in C.\n if base == 10:\n return [int(d) for d in str(n)]\n\n if n == 0:\n return [0]\n digits = []\n while n > 0:\n n, d = divmod(n, base)\n digits.append(d)\n digits.reverse()\n return digits", "def make_unqique_sorted_random_numbers(n):\n lower_bound = 0\n upper_bound = n * 10\n\n already_used_numers = set()\n\n accumulator = []\n\n while len(accumulator) < n:\n random_number = random.randint(lower_bound, upper_bound)\n if random_number not in already_used_numers:\n accumulator.append(random_number)\n already_used_numers.add(random_number)\n\n return list(sorted(accumulator))", "def digits(num):\n\treturn set(map(int, str(num)))", "def ndigits(x):\n if abs(x) == 0:\n return 0\n else:\n return 1 + ndigits(abs(x) / 10)", "def random_with_n_digits(self, n):\n\n range_start = 10 ** (n - 1)\n range_end = (10 ** n) - 1\n return randint(range_start, range_end)", "def champernowne(n):\n digit_count, next_integer = 0, 1\n while digit_count + len(str(next_integer)) < n:\n digit_count += len(str(next_integer))\n next_integer += 1\n return int(str(next_integer)[n - digit_count - 1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract data from the summarizer and dump it to the reporter
def dump(self, summarizer): pass
[ "def summariseSuiteResult(self, suite):", "def summaries(self, data):\n return data", "def summariseResult(self, test):", "def exportAnalysisDataAfterIteration(self):\n\n\n # Metrics output\n df = pd.DataFrame(self.data[\"Diversity\"])\n df.to_pickle(self.outfolder + \"/metrics analysis.pkl\")\n\n # Topics distribution output\n df = pd.DataFrame(self.data[\"Distribution\"])\n df.to_pickle(self.outfolder + \"/metrics distribution.pkl\")", "def summarize(self):\n for stats in self.stats_pool.values():\n _ = stats.summarize()", "def summariseResult(self, test):\n assert not test.isSuite\n if self.mode not in [\"FAIL-SUMMARY\"]:\n return\n result = test.result\n itemType = \"test\"\n if test.isSuite: ## TODO???\n itemType = \"suite\"\n lines, textLen, dotLen = self.formatAnnouncement(test.summary,\n number=test.number, testID=test.testID)\n text = \"\\n\".join(lines)\n writer = sNormal.write\n writer(\"%s%s%12s\\n\" % (text, \".\" * dotLen, result.state))", "def summariseSuiteResult(self, suite):\n try:\n if self.mode not in [\"FAIL-SUMMARY\"]:\n return\n result = suite.result\n itemType = \"suite\"\n lines, textLen, dotLen = self.formatAnnouncement(suite.summary)\n text = \"\\n\".join(lines)\n writer = sNormal.write\n writer(\"%s%s%12s\\n\" % (text, \".\" * dotLen, result.state))\n finally:\n self.level += 1", "def _summarize_expt(self):\n\n print('\\nCURRENT EXPERIMENT:\\n{line}'.format(line='-' * 50))\n print('Training percentage : {:.2}'.format(self.train_perc))\n print('Number of CV repetitions : {}'.format(self.num_rep_cv))\n print('Number of processors : {}'.format(self.num_procs))\n print('Dim reduction method : {}'.format(self.dim_red_method))\n print('Dim reduction size : {}'.format(self.reduced_dim))\n print('Predictive model chosen : {}'.format(self.pred_model))\n print('Grid search level : {}\\n'.format(self.grid_search_level))\n\n if len(self.covariates) > 0:\n print('Covarites selected : {}'.format(', '.join(self.covariates)))\n print('Deconfoudning method : {}\\n'.format(self.deconfounder))\n\n if self._workflow_type == 'classify':\n self._target_sizes = list(self.datasets.target_sizes.values())\n self._chance_accuracy = chance_accuracy(self._target_sizes, 'balanced')\n print('Estimated chance accuracy : {:.3f}\\n'\n ''.format(self._chance_accuracy))", "def summary(self):\n\n failedNumber = 0\n successfulNumber = 0\n\n self.logUtil.log(\"Test Summary\")\n\n for runName in self.summaries:\n\n successfulCases = self.summaries[runName]['successfulCases']\n failedCases = self.summaries[runName]['failedCases']\n\n successfulNumber += len(successfulCases)\n failedNumber += len(failedCases)\n\n self.logUtil.log(\"++++++++++++++++++++++++++++++\")\n self.logUtil.log(\"Test Run:\" + runName)\n self.logUtil.log(\"++++++++++++++++++++++++++++++\")\n\n self.logUtil.log(\"Successful cases (\" + str(len(successfulCases)) + '):', 'success')\n\n for case in successfulCases:\n self.logUtil.log('+ ' + case['id'] + ' (' + str(case['testResult']['actualTime']) + ' ms)')\n\n self.logUtil.log(\"\")\n self.logUtil.log(\"Failed cases (\" + str(len(failedCases)) + '):', \"warning\")\n\n for case in failedCases:\n testResult = case['testResult']\n self.logUtil.log('+ ' + case['id'] + ' (' + str(testResult['actualTime']) + ' ms)')\n\n self.logUtil.log(\"-----------------------------\")\n self.logUtil.log(\"Reason: \")\n\n if not testResult['expectedValueTestResult'] and not (testResult['expectedReturn'] is None or testResult['expectedReturn'] == 'any'):\n self.logUtil.log(\"Value dosn't match: (expect - \" + str(testResult['expectedReturn']) + \" | actual - \" + str(testResult['actualReturn']) + ')', 'warning')\n\n if not testResult['expectedTimeTestResult'] and not (testResult['expectedTime'] is None or testResult['expectedTime'] == 'any'):\n self.logUtil.log(\"Unexpected time consumption (ms): (expect - \" + str(testResult['expectedTime']) + \" | actual - \" + str(testResult['actualTime']) + ')', 'warning')\n\n self.logUtil.log(\"-----------------------------\")\n\n return {\n 'successNumber': successfulNumber,\n 'failedNumber': failedNumber,\n }", "def analyze_minerals():\n datapath = os.path.join(DATA_DIR, 'minerals.json')\n with open(datapath) as datafile:\n\n fields = defaultdict(dict)\n valuesets = defaultdict(set)\n occurences = defaultdict(int)\n\n mineralsjson = json.load(datafile)\n\n for mineral in mineralsjson:\n for key, value in mineral.items():\n if value != '':\n occurences[key] += 1\n valuesets[key].add(value)\n if 'length' in fields[key].keys():\n if len(value) < fields[key]['length']:\n continue\n fields[key]['length'] = len(value)\n fields[key]['example'] = value\n\n with open('data_details.txt', 'w') as resultfile:\n for key in sorted(occurences,\n key=occurences.get,\n reverse=True):\n resultfile.write(\n (\"{4}\\nField: {0:25s}\\n{4}\\noccurence: #{1:3d}, \"\n \"max_length: {2:3d} \\nValues: {3}\\n\")\n .format(\n key,\n occurences[key],\n fields[key]['length'],\n valuesets[key],\n 80 * '-',\n )\n )\n\n with open('data_summary.txt', 'w') as resultfile:\n resultfile.write(\"{0:25s}|{1:15s}|{2:15s}|{3:15s}\\n\".format(\n 'Fieldname',\n 'occurence count',\n 'distinct count',\n 'max length',\n ))\n resultfile.write(\"{0:25s}|{1:15s}|{1:15s}|{1:15s}\\n\".format(\n 25 * '-',\n 15 * '-',\n ))\n for key in sorted(occurences, key=occurences.get,\n reverse=True):\n\n resultfile.write(\"{0:25s}|{1:15d}|{2:15d}|{3:15d}\\n\".format(\n key,\n occurences[key],\n len(valuesets[key]),\n fields[key]['length'],\n ))", "def summarise(self) -> None:\n with open(self.parsed_replay_file, \"r\") as f:\n self.parsed_replay = [line for line in f]\n\n # Some parsing stuff here\n\n self.match_summary = {\n \"match_id\": 123345,\n \"match_date\": \"2019-07-07\", #If we can get it, otherwise upload timestamp from the meta file\n \"radiant\": \"Radiant team name\",\n \"dire\": \"Dire team name\",\n \"radiant_won\": True,\n \"radiant_kills\": 22,\n \"dire_kills\": 3,\n \"duration\": 3600, # Time in seconds,\n \"first_blood_time\": 120, # Time in seconds\n \"first_blood_hero\": \"Hero name\",\n \"picks\": {\n \"radiant\": {\n \"pick_1\": \"Hero name\",\n \"pick_2\": \"Hero name\"\n # etc\n },\n \"dire\": {\n \"pick_1\": \"Hero name\",\n \"pick_2\": \"Hero name\"\n }\n },\n \"bans\": {\n \"radiant\": {\n \"ban_1\": \"Hero name\",\n \"ban_2\": \"Hero name\"\n },\n \"dire\": {\n \"ban_1\": \"Hero name\",\n \"ban_2\": \"Hero name\"\n }\n }\n }\n\n # A list of player summaries\n self.player_summaries = [\n {\n \"match_id\": 123345,\n \"hero\": \"Hero name\",\n \"player\": \"Player name\",\n \"team\": \"Team name\",\n \"side\": \"Radiant\",\n \"won\": True,\n \"kills\": 30,\n \"deaths\": 5,\n \"assists\": 6,\n \"net_worth\": 31493, # At end of game\n \"level\": 25,\n \"gpm\": 800,\n \"xpm\": 400,\n \"last_hits\": 200,\n \"denies\": 30,\n \"hero_damage\": 10000,\n \"building_damage\": 20000,\n \"damage_taken\": 5000,\n \"biggest_kill_streak\": 4,\n \"bounty_runes\": 4,\n \"wards_placed\": 5,\n \"items\": { # Not sure on this data structure\n \"slot_1\": {\n \"name\": \"BKB\",\n \"time\": 900 # Game time item bought in seconds\n } # repeat for other item slots and backpack\n },\n \"timings\": {\n \"gold\": {\n 0: 600,\n 1: 800\n # per minute net worth total\n },\n \"xp\": {\n 0: 0,\n 1: 150\n # per minute xp total\n }\n }\n\n }\n ]", "def __ExecuteSummarize(self):\n\n # If no results file is specified, use a default value.\n if len(self.__arguments) == 0:\n results_path = \"results.qmr\"\n else:\n results_path = self.__arguments[0]\n\n # The remaining arguments, if any, are test and suite IDs.\n id_arguments = self.__arguments[1:]\n # Are there any?\n if len(id_arguments) > 0:\n filter = 1\n # Expand arguments into test IDs.\n try:\n test_ids, suite_ids \\\n = self.GetDatabase().ExpandIds(id_arguments)\n except (qm.test.database.NoSuchTestError,\n qm.test.database.NoSuchSuiteError), exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n except ValueError, exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n else:\n # No IDs specified. Show all test and resource results.\n # Don't show any results by test suite though.\n filter = 0\n suite_ids = []\n\n # Get an iterator over the results.\n try:\n results = base.load_results(open(results_path, \"rb\"),\n self.GetDatabase())\n except (IOError, xml.sax.SAXException), exception:\n raise QMException, \\\n qm.error(\"invalid results file\",\n path=results_path,\n problem=str(exception))\n\n any_unexpected_outcomes = 0\n\n # Compute the list of result streams to which output should be\n # written. Results path only used for HTML/NexTest\n streams = self.__GetResultStreams(results_path)\n \n # Send the annotations through.\n for s in streams:\n s.WriteAllAnnotations(results.GetAnnotations())\n\n # Get the expected outcomes.\n outcomes = self.__GetExpectedOutcomes()\n\n # Our filtering function. Should use itertools.ifilter, once\n # we can depend on having Python 2.3.\n def good(r):\n return r.GetKind() == Result.TEST \\\n and r.GetId() in test_ids\n\n # Simulate the events that would have occurred during an\n # actual test run.\n for r in results:\n if not filter or good(r):\n for s in streams:\n s.WriteResult(r)\n if (r.GetOutcome()\n != outcomes.get(r.GetId(), Result.PASS)):\n any_unexpected_outcomes = 1\n for s in streams:\n s.Summarize()\n\n if any_unexpected_outcomes:\n return 1\n \n return 0", "def write_summary(self):\n start = datetime.fromtimestamp(self.startUpTime)\n end = datetime.now()\n dtformat = '%Y-%m-%d %H:%M:%S'\n utcformat = '%Y-%m-%dT%H:%M:%SZ'\n output_count = 0\n output_size = 0\n\n if self.outputFilePath is not None:\n output_count = 1\n output_size = self.get_size(self.outputFilePath)\n elif self.outputDir is not None:\n output_count = len(self.get_files(self.outputDir, 'xml'))\n output_size = self.get_size(self.outputDir)\n\n harvest_frequency = 'once'\n if 'harvest_frequency' in self.harvestInfo and self.harvestInfo['harvest_frequency'] != '':\n harvest_frequency = self.harvestInfo['harvest_frequency']\n\n summary = {\n 'id': self.harvestInfo['harvest_id'],\n 'batch': self.harvestInfo['batch_number'],\n # 'mode': self.harvestInfo['mode'],\n 'method': self.harvestInfo['harvest_method'],\n 'advanced_harvest_mode': self.harvestInfo['advanced_harvest_mode'],\n 'crosswalk': 'xsl_file' in self.harvestInfo and self.harvestInfo['xsl_file'] != \"\",\n 'frequency': harvest_frequency,\n 'url': self.harvestInfo['uri'],\n 'error': {\n 'log': str.strip(self.errorLog),\n 'errored': self.errored\n },\n 'completed': self.completed,\n 'start_utc': datetime.fromtimestamp(self.startUpTime, timezone.utc).strftime(utcformat),\n 'end_utc': datetime.now(timezone.utc).strftime(utcformat),\n 'start': start.strftime(dtformat),\n 'end': end.strftime(dtformat),\n 'duration': (end - start).seconds,\n 'output': {\n 'file': self.outputFilePath,\n 'dir': self.outputDir,\n 'count': output_count,\n 'size': output_size\n }\n }\n self.write_to_field(summary, 'summary')", "def gen_report_data(self):\n pass", "def create_report():\n donations_list.get_summary", "def test_get_all_summaries(self):\n summaries = get_all_summaries(self.rec)\n self.assertEqual(len(summaries), 2)\n self.assertEqual(isinstance(summaries,list), True)\n self.assertEqual(len(summaries[0]), 18)\n self.assertEqual(summaries[0][0], '>FIQU8OX05GCVRO')\n self.assertEqual(summaries[1][0], '>FIQU8OX05F8ILF')", "def record_summary(self, t):\n\n fd = {\n self.avg_reward_placeholder: self.avg_reward,\n self.avg_collsions_placeholder: self.avg_collisions,\n self.avg_distance_placeholder: self.avg_distance,\n #self.eval_reward_placeholder: self.eval_reward,\n }\n summary = self.sess.run(self.merged, feed_dict=fd)\n # tensorboard stuff\n self.file_writer.add_summary(summary, t)", "def summarize(usl_fit):\n print\n print '----- Summary -----'\n print\n print usl_fit.fit_report()", "def test_get_report_data(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract oblique slice from SimpleITK image. Efficient, because it rotates the grid and only samples the desired slice.
def extract_slice_from_sitk_image(sitk_image, point, Z, X, new_size, fill_value=0): num_dim = sitk_image.GetDimension() orig_pixelid = sitk_image.GetPixelIDValue() orig_direction = sitk_image.GetDirection() orig_spacing = np.array(sitk_image.GetSpacing()) new_size = [int(el) for el in new_size] # SimpleITK expects lists, not ndarrays point = [float(el) for el in point] rotation_center = sitk_image.TransformContinuousIndexToPhysicalPoint(point) X = X / np.linalg.norm(X) Z = Z / np.linalg.norm(Z) assert np.dot(X, Z) < 1e-12, 'the two input vectors are not perpendicular!' Y = np.cross(Z, X) orig_frame = np.array(orig_direction).reshape(num_dim, num_dim) new_frame = np.array([X, Y, Z]) # important: when resampling images, the transform is used to map points from the output image space into the input image space rot_matrix = np.dot(orig_frame, np.linalg.pinv(new_frame)) transform = sitk.AffineTransform(rot_matrix.flatten(), np.zeros(num_dim), rotation_center) phys_size = new_size * orig_spacing new_origin = rotation_center - phys_size / 2 resample_filter = sitk.ResampleImageFilter() resampled_sitk_image = resample_filter.Execute(sitk_image, new_size, transform, sitk.sitkLinear, new_origin, orig_spacing, orig_direction, fill_value, orig_pixelid) return resampled_sitk_image
[ "def slice_image(image):\n rz_image = resize_image(image)\n\n # new image size\n (new_height, new_width) = rz_image.shape\n\n # 3% height\n startY = int(0.03 * new_height)\n\n # 60% height\n endY = int(20 * startY)\n\n # 10% width\n startX = int(0.05 * new_width)\n\n endX = int(new_width - startX)\n\n sliced_image = rz_image[startY:endY, startX:endX]\n\n # cv2.imshow('sliced image', sliced_image)\n # cv2.waitKey()\n\n return sliced_image", "def get_slice_from_volume(image, view, slice_id):\n if(view == 1):\n image = np.transpose(image, [2, 0, 1])\n elif(view == 2):\n image = np.transpose(image, [1, 0, 2])\n return image[slice_id]", "def _get_image_slice_around_atom(\n self,\n image_data,\n slice_size):\n x0 = self.pixel_x - slice_size/2\n x1 = self.pixel_x + slice_size/2\n y0 = self.pixel_y - slice_size/2\n y1 = self.pixel_y + slice_size/2\n\n if x0 < 0.0:\n x0 = 0\n if y0 < 0.0:\n y0 = 0\n if x1 > image_data.shape[1]:\n x1 = image_data.shape[1]\n if y1 > image_data.shape[0]:\n y1 = image_data.shape[0]\n x0, x1, y0, y1 = int(x0), int(x1), int(y0), int(y1)\n data_slice = copy.deepcopy(image_data[y0:y1, x0:x1])\n return data_slice, x0, y0", "def test_slice(self):\r\n img = Image(np.random.randint(0, 255, size=(100, 500, 3), dtype=np.uint8))\r\n\r\n sliced = img[...]\r\n assert np.allclose(sliced, img)\r\n assert sliced.name == img.name\r\n assert not _is_ref_unequal(sliced, img)", "def _get_clipping_slices(cost_fpath, sc_point_idx, radius=None):\n with ExclusionLayers(cost_fpath) as f:\n shape = f.shape\n\n if radius is not None:\n row, col = sc_point_idx\n row_min = max(row - radius, 0)\n row_max = min(row + radius, shape[0])\n col_min = max(col - radius, 0)\n col_max = min(col + radius, shape[1])\n\n start_indices = (row - row_min, col - col_min)\n else:\n start_indices = sc_point_idx\n row_min, row_max = None, None\n col_min, col_max = None, None\n\n row_slice = slice(row_min, row_max)\n col_slice = slice(col_min, col_max)\n\n return start_indices, row_slice, col_slice", "def slice(self, view, numpy_order=True):\n if hasattr(view, \"__len__\") and len(view) > self.wcs.naxis:\n raise ValueError(\"Must have # of slices <= # of WCS axes\")\n elif not hasattr(view, \"__len__\"): # view MUST be an iterable\n view = [view]\n\n if not all(isinstance(x, slice) for x in view):\n # We need to drop some dimensions, but this may not always be\n # possible with .sub due to correlated axes, so instead we use the\n # generalized slicing infrastructure from astropy.wcs.wcsapi.\n return SlicedFITSWCS(self, view)\n\n # NOTE: we could in principle use SlicedFITSWCS as above for all slicing,\n # but in the simple case where there are no axes dropped, we can just\n # create a full WCS object with updated WCS parameters which is faster\n # for this specific case and also backward-compatible.\n\n wcs_new = self.deepcopy()\n if wcs_new.sip is not None:\n sip_crpix = wcs_new.sip.crpix.tolist()\n\n for i, iview in enumerate(view):\n if iview.step is not None and iview.step < 0:\n raise NotImplementedError(\"Reversing an axis is not implemented.\")\n\n if numpy_order:\n wcs_index = self.wcs.naxis - 1 - i\n else:\n wcs_index = i\n\n if iview.step is not None and iview.start is None:\n # Slice from \"None\" is equivalent to slice from 0 (but one\n # might want to downsample, so allow slices with\n # None,None,step or None,stop,step)\n iview = slice(0, iview.stop, iview.step)\n\n if iview.start is not None:\n if iview.step not in (None, 1):\n crpix = self.wcs.crpix[wcs_index]\n cdelt = self.wcs.cdelt[wcs_index]\n # equivalently (keep this comment so you can compare eqns):\n # wcs_new.wcs.crpix[wcs_index] =\n # (crpix - iview.start)*iview.step + 0.5 - iview.step/2.\n crp = (\n (crpix - iview.start - 1.0) / iview.step\n + 0.5\n + 1.0 / iview.step / 2.0\n )\n wcs_new.wcs.crpix[wcs_index] = crp\n if wcs_new.sip is not None:\n sip_crpix[wcs_index] = crp\n wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step\n else:\n wcs_new.wcs.crpix[wcs_index] -= iview.start\n if wcs_new.sip is not None:\n sip_crpix[wcs_index] -= iview.start\n\n try:\n # range requires integers but the other attributes can also\n # handle arbitrary values, so this needs to be in a try/except.\n nitems = len(builtins.range(self._naxis[wcs_index])[iview])\n except TypeError as exc:\n if \"indices must be integers\" not in str(exc):\n raise\n warnings.warn(\n f\"NAXIS{wcs_index} attribute is not updated because at \"\n f\"least one index ('{iview}') is no integer.\",\n AstropyUserWarning,\n )\n else:\n wcs_new._naxis[wcs_index] = nitems\n\n if wcs_new.sip is not None:\n wcs_new.sip = Sip(\n self.sip.a, self.sip.b, self.sip.ap, self.sip.bp, sip_crpix\n )\n\n return wcs_new", "def _slic(image,\n n_segments=15000,\n compactness=0.1,\n verbose=True):\n \n if len(image.shape) > 4:\n err = 'Sorry, 2D 3D or 4D numpy array expected!'\n raise RuntimeError(err)\n if len(image.shape) == 4:\n if verbose:\n print 'Multi-modal supervoxel calculation!'\n \n start = time.time()\n min_size_factor = 0.5\n int_mask = skimage_slic(image,\n n_segments=n_segments,\n compactness=compactness,\n multichannel=False,\n min_size_factor=min_size_factor)\n # within slic, multichannel will automatically be set to True\n # if 4D input\n \n if verbose:\n print 'SLIC RAN w PARAMS: '\n print '\\t compactness ' + str(compactness)\n print '\\t n_segments ' + str(n_segments)\n print '\\t segmented areas ' + str(np.max(int_mask))\n print '\\t computation time: ' + str(time.time() - start)\n \n int_max = np.max(int_mask)\n while int_max > 10 * n_segments:\n if verbose:\n print 'Too many supervoxels, increasing min_size_factor...'\n min_size_factor *= 10\n int_mask = skimage_slic(image,\n n_segments=n_segments,\n compactness=compactness,\n multichannel=False,\n min_size_factor=min_size_factor)\n\n int_max = np.max(int_mask)\n \n if int_max < n_segments / 2.:\n err = 'Supervoxel calculation problem here...'\n raise RuntimeError(err)\n \n return int_mask", "def crop(img, side):\n side = side // 2\n y = img.shape[0] // 2\n x = img.shape[1] // 2\n print(y, x)\n print(img.shape)\n return img[y - side:y +side, x - side : x+side]", "def get_slice(self, ref, sci):\n\n sci_shape = sci.shape\n sci_nx = sci_shape[-1]\n sci_ny = sci_shape[-2]\n\n # These are limits of slices.\n sci_x1 = sci.meta.subarray.xstart - 1\n sci_y1 = sci.meta.subarray.ystart - 1\n\n ref_x1 = ref.meta.subarray.xstart - 1\n ref_y1 = ref.meta.subarray.ystart - 1\n if ref_x1 is None:\n ref_x1 = sci_x1\n if ref_y1 is None:\n ref_y1 = sci_y1\n\n # Compute the slicing indexes\n xstart = sci_x1 - ref_x1\n ystart = sci_y1 - ref_y1\n xstop = xstart + sci_nx\n ystop = ystart + sci_ny\n\n # Check for errors in the slice indexes\n if (xstart < 0 or ystart < 0 or\n xstop > ref.data.shape[-1] or ystop > ref.data.shape[-2]):\n log.error(\"Science and reference file arrays not compatible\")\n raise ValueError(\"Can't extract matching subarray from \"\n \"reference data\")\n\n return slice(ystart, ystop), slice(xstart, xstop)", "def getXZSlice(self, nslice=0):\n from .surface import Plane\n slice = []\n if self.outRadius is None:\n return slice\n if nslice <= 0:\n if isinstance(self.surface, Plane):\n nslice = 2\n else:\n nslice = 50\n # Calculate (x,z) slice in local coordinates for x <= 0.\n x = np.linspace(-self.outRadius, -self.inRadius, nslice)\n y = np.zeros_like(x)\n z = self.surface.sag(x, y)\n # Transform slice to global coordinates.\n transform = CoordTransform(self.coordSys, globalCoordSys)\n xneg, yneg, zneg = transform.applyForward(x, y, z)\n if np.any(yneg != 0):\n print('WARNING: getXZSlice used for rotated surface \"{0}\".'\n .format(self.name)\n )\n # Calculate (x,z) slice in local coordinates for x >= 0.\n x *= -1\n x = x[::-1]\n z[:] = self.surface.sag(x, y)\n # Transform slice to global coordinates.\n xpos, ypos, zpos = transform.applyForward(x, y, z)\n if np.any(ypos != 0):\n print('WARNING: getXZSlice used for rotated surface \"{0}\".'\n .format(self.name)\n )\n slice.append(np.stack((xpos, zpos), axis=0))\n # Combine x <= 0 and x >= 0 half slices when inner = 0.\n if self.inRadius == 0:\n assert xneg[-1] == xpos[0] and zneg[-1] == zpos[0]\n return (\n np.stack((\n np.hstack((xneg, xpos[1:])),\n np.hstack((zneg, zpos[1:]))\n ), axis=0),\n )\n else:\n return (\n np.stack((xneg, zneg), axis=0),\n np.stack((xpos, zpos), axis=0)\n )", "def cut_from_rect(img, rect):\n return img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])]", "def DrawReslice(self, nrow, ncol, image, image_plane, slice_plane, \\\n xyzsize=None, scaling='local', xlabels=None, ylabels=None, \\\n x_label=None, y_label=None, fig_title=None, middle96=None):\n# Encode actions: (slice_axis, flip_slice, fliplr, flipud, transpose)\n axes_defs = {\\\n 'RAI':{'axial':(0,0,1,1,0), 'sagittal':(2,1,0,0,0), 'coronal':(1,0,1,0,0)}, \\\n 'ASL':{'axial':(1,1,0,1,1), 'sagittal':(0,0,0,1,0), 'coronal':(2,0,0,1,1)}, \\\n 'RSA':{'axial':(1,1,1,1,0), 'sagittal':(2,1,0,1,1), 'coronal':(0,0,1,1,0)}, \\\n 'RAS':{'axial':(0,1,1,1,0), 'sagittal':(2,0,0,0,0), 'coronal':(1,0,0,0,0)}, \\\n 'ASR':{'axial':(1,0,0,1,1), 'sagittal':(0,1,0,1,0), 'coronal':(2,1,0,1,1)}, \\\n 'RSP':{'axial':(1,0,1,1,0), 'sagittal':(2,0,0,1,1), 'coronal':(0,1,1,1,0)}}\n\n if image.ndim == 3:\n zdim, ydim, xdim = image.shape\n tdim = 1\n elif image.ndim == 4:\n tdim, zdim, ydim, xdim = image.shape\n shp = (tdim, zdim, ydim, xdim)\n\n images = []\n for entry in slice_plane:\n# Loop through each 2D image\n plane, slc = entry[:2]\n if len(entry) == 3:\n frm = entry[2]\n else:\n frm = 0\n axis_def = axes_defs[image_plane][plane]\n img = self.GetSlice(image.reshape(shp), axis_def, frm, slc, xyzsize)\n images.append(img)\n images = self.PadImages(images)\n self.DrawMany(images, nrow, ncol, xlabels, ylabels, x_label, y_label, \\\n scaling=scaling, fig_title=fig_title, middle96=middle96)", "def get_slice_mp(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n\n # pylint: disable=too-many-locals\n def work_get_slice(block, array_name, offset, s3_bucket, s3_key, shape, dtype):\n result = sa.attach(array_name)\n cell, sub_range = block\n\n item_size = np.dtype(dtype).itemsize\n s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size\n s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size\n data = self.s3io.get_byte_range(s3_bucket, s3_key, s3_start, s3_end)\n\n t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in\n zip(cell + tuple(sub_range), offset)]\n if data.dtype != dtype:\n data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)\n # data = data.reshape([s.stop - s.start for s in sub_range])\n\n result[t] = data.reshape([s.stop - s.start for s in sub_range])\n\n if self.enable_compression:\n return self.get_slice_by_bbox(array_slice, shape, dtype, s3_bucket, s3_key)\n\n cdim = self.cdims(array_slice, shape)\n\n try:\n end = cdim[::-1].index(False) + 1\n except ValueError:\n end = len(shape)\n\n start = len(shape) - end\n\n outer = array_slice[:-end]\n outer_ranges = [range(s.start, s.stop) for s in outer]\n outer_cells = list(product(*outer_ranges))\n blocks = list(zip(outer_cells, repeat(array_slice[start:])))\n offset = [s.start for s in array_slice]\n\n array_name = generate_array_name('S3AIO')\n sa.create(array_name, shape=[s.stop - s.start for s in array_slice], dtype=dtype)\n shared_array = sa.attach(array_name)\n\n self.pool.map(work_get_slice, blocks, repeat(array_name), repeat(offset), repeat(s3_bucket),\n repeat(s3_key), repeat(shape), repeat(dtype))\n\n sa.delete(array_name)\n return shared_array", "def test_get_slice_dense(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n\n bn, bm = 5, 5\n x = np.random.randint(100, size=(30, 30))\n ds_data = ds.array(x=x, block_size=(bn, bm))\n data = ds.array(x=x, block_size=(bn, bm))\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n\n slice_indices = [(7, 22, 7, 22), # many row-column\n (6, 8, 6, 8), # single block row-column\n (6, 8, None, None), # single-block rows, all columns\n (None, None, 6, 8), # all rows, single-block columns\n (15, 16, 15, 16), # single element\n # (-10, -5, -10, -5), # out-of-bounds (not\n # implemented)\n # (-10, 5, -10, 5), # out-of-bounds (not implemented)\n (21, 40, 21, 40)] # out-of-bounds (correct)\n\n for top, bot, left, right in slice_indices:\n got = data[top:bot, left:right].collect()\n expected = ds_data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))\n\n # Try slicing with irregular array\n x = data[1:, 1:]\n data = ds_data[1:, 1:]\n\n for top, bot, left, right in slice_indices:\n got = x[top:bot, left:right].collect()\n expected = data[top:bot, left:right].collect()\n\n self.assertTrue(equal(got, expected))", "def __getslice__(self, i, j): \n ids = numpy.where((self.id_list >= i) & (self.id_list < j))[0]\n return self.id_slice(ids)", "def get_slice_by_bbox(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n # Todo:\n # - parallelise reads and writes\n # - option 1. use get_byte_range_mp\n # - option 2. smarter byte range subsets depending on:\n # - data size\n # - data contiguity\n\n item_size = np.dtype(dtype).itemsize\n s3_begin = (np.ravel_multi_index(tuple([s.start for s in array_slice]), shape)) * item_size\n s3_end = (np.ravel_multi_index(tuple([s.stop - 1 for s in array_slice]), shape) + 1) * item_size\n\n # if s3_end-s3_begin <= 5*1024*1024:\n # d = self.s3io.get_byte_range(s3_bucket, s3_key, s3_begin, s3_end)\n # else:\n # d = self.s3io.get_byte_range_mp(s3_bucket, s3_key, s3_begin, s3_end, 5*1024*1024)\n\n d = self.s3io.get_bytes(s3_bucket, s3_key)\n\n if self.enable_compression:\n cctx = zstd.ZstdDecompressor()\n d = cctx.decompress(d)\n\n d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)\n d = d[s3_begin:s3_end]\n\n cdim = self.cdims(array_slice, shape)\n\n try:\n end = cdim[::-1].index(False) + 1\n except ValueError:\n end = len(shape)\n\n start = len(shape) - end\n\n outer = array_slice[:-end]\n outer_ranges = [range(s.start, s.stop) for s in outer]\n outer_cells = list(product(*outer_ranges))\n blocks = list(zip(outer_cells, repeat(array_slice[start:])))\n item_size = np.dtype(dtype).itemsize\n\n results = []\n for cell, sub_range in blocks:\n s3_start = (np.ravel_multi_index(cell + tuple([s.start for s in sub_range]), shape)) * item_size\n s3_end = (np.ravel_multi_index(cell + tuple([s.stop - 1 for s in sub_range]), shape) + 1) * item_size\n data = d[s3_start - s3_begin:s3_end - s3_begin]\n results.append((cell, sub_range, data))\n\n result = np.empty([s.stop - s.start for s in array_slice], dtype=dtype)\n offset = [s.start for s in array_slice]\n\n for cell, sub_range, data in results:\n t = [slice(x.start - o, x.stop - o) if isinstance(x, slice) else x - o for x, o in\n zip(cell + tuple(sub_range), offset)]\n if data.dtype != dtype:\n data = np.frombuffer(data, dtype=dtype, count=-1, offset=0)\n result[tuple(t)] = data.reshape([s.stop - s.start for s in sub_range])\n\n return result", "def slicer(array, idx, axis):\n sl = [slice(None)] * array.ndim\n sl[axis] = idx\n return array[tuple(sl)]", "def _slice(self, X):\n XTranspose = np.array(X).transpose()\n slices = []\n nbFeatures, nbGroups, nbFeaturePerGroup = self._groupsInfo(X.shape[1])\n imgIncluded = self._convolExtractor.isImageIncluded()\n for i in xrange(nbGroups):\n Xtmp = XTranspose[i*nbFeaturePerGroup:(i+1)*nbFeaturePerGroup]\n slices.append(Xtmp.transpose())\n if imgIncluded and not self._compressImage:\n slices = slices[1:]\n self._imgSize = nbFeaturePerGroup\n return slices", "def plot_slice(image: sitk.Image):\n img_arr = sitk.GetArrayFromImage(image)\n plt.figure()\n plt.imshow(img_arr[80, :, :], cmap='gray')\n plt.colorbar()\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a timer at the end of which a new vessel will be generated.
def _create_vessel_generation_timer(self): inter_arrival_time = self.inter_arrival_time_sampler() self.generation_timer = SimulationTimer( duration=inter_arrival_time, target_function=self.generate_vessel) TimerScheduler.get_instance().schedule(self.generation_timer)
[ "def handle_create_timer(self, message):\n if self.neon_in_request(message):\n content = self._extract_alert_params(message, AlertType.TIMER)\n content[\"kind\"] = int(AlertType.TIMER)\n LOG.info(content)\n self.confirm_alert(\"timer\", content, message)", "def _create(self, duration_s, name, context):\n # Add a new timer\n uid = str(uuid.uuid1())\n context.minuteur_timers[uid] = {'uid': uid}\n context.minuteur_timers[uid]['name'] = name\n context.minuteur_timers[uid]['start'] = time()\n context.minuteur_timers[uid]['end'] = time() + duration_s\n context.minuteur_timers[uid]['active'] = True\n Minuteur._ActiveTimers[uid] = NeoTimer(duration_s = duration_s, user_cbk = self._timeout_cbk, user_param = {'context': context, 'uid': uid})", "def _start_timer(self):\r\n self.timer.Start(50)", "def set_timer(self, timer):\n self.timer = timer", "def startTimer(self, description):\r\n pbfProject = GetCurrentTogglProject()\r\n timeEntry = pytoggl.TimeEntry(description=description, pid=pbfProject.togglProject.id, created_with=\"TogglPBF\")\r\n pbfProject.togglAPI.timer.startTimer(timeEntry)", "def create_timer_objects(streamer_obj, viewer_name, timer, curr_time) -> None:\r\n\r\n viewer_object = streamer_obj.viewer_objects[viewer_name]\r\n viewer_object.join_time = curr_time\r\n viewer_object.timer_obj = timer()\r\n\r\n viewer_object.time_passed_obj = timer()\r\n\r\n class_definition_and_manipulation.set_active_viewer(viewer_object)", "def deadlineTimer(deadline):", "def update_timer(self, time):\n self.timer += time", "def randomise(self):\n self.timer = self.period * random.random()", "def start_timer(self):\n\t\tself.start_time = time.clock()", "def createPlayTimers(self):\n # Destroy all old timers\n self.destroyPlayTimers()\n\n # Create the timers\n self.timSec = gobject.timeout_add_seconds(1, self.secondTimer)", "def createIdleTimer(self):\n self._idleTimer = gobject.timeout_add(globals.IDLE_TIMEOUT, self.hideFullscreenControls)", "def simulate( self, finishTime ):\n ...", "def start_timer():\n TIMERS[\"procedure worker\"] = Timer(1, worker)\n TIMERS[\"procedure worker\"].start()", "def start_timing(self):\n self._time = time()\n self._timer = time()", "def init_timer(self):\n self._start_time = time.time()", "def tween_timer(self, duration: float) -> TimerTweener:\n tweener = TimerTweener(duration=duration)\n self._tweeners.append(tweener)\n return tweener", "def test_dummy_timer (self):\n with Timeout(None):\n sleep(DELAY)\n sleep(DELAY)", "def _set_up_timer(self):\r\n timer_frame = tk.Frame(self)\r\n timer_frame.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)\r\n tk.Label(timer_frame, text='Timer').pack()\r\n\r\n self._time_label = tk.Label(timer_frame)\r\n self._time_label.pack()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialize the vessel and its components and add them to the world.
def generate_vessel(self): # FIXME: This should be dependent on the vessel type vessel_velocity = self.default_speed_knots vessel = self.world.create_entity() spawn_point = random_point_in_polygon(self.spawn_area) vessel_info = self.vessel_info_sampler() velocity = Velocity(velocity=vessel_velocity) self.world.add_component(vessel, Position(lonlat=spawn_point)) self.world.add_component(vessel, FrameCounter()) self.world.add_component(vessel, Course()) self.world.add_component(vessel, velocity) self.world.add_component(vessel, vessel_info) self.world.add_component(vessel, VesselPath()) if random.random() <= self.anomalous_vessels_percent: # Add a markov model for creating speed anomalies speed_fsm = SpeedStateMachine( double_p=self.speed_model_probabilities["double"], halve_p=self.speed_model_probabilities["half"], normal_p=self.speed_model_probabilities["reset"]) self.world.add_component(vessel, speed_fsm) vessel_state_machine = VesselStateMachine() vessel_state_machine.generate() vessel_state_machine.fsm.onchangestate = lambda x: self._log_vessel_event( vessel, vessel_info, vessel_state_machine, velocity, f"{x.src} → {x.dst}") self.world.add_component( vessel, vessel_state_machine)
[ "def place_vessel(type, x, y):\n vessel = scene.addObject(type, \"gameLogic\")\n vessel.worldPosition = (x, y, 0.0)\n return vessel", "def initGL(self):\t\t\n\n\t\tpass", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoGLVBOElement_init(self, state)", "def __init__(self):\n this = _coin.new_SoVRMLSphere()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFVec2d()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoVRMLWorldInfo()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateSphere()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoTextureCoordinateEnvironment()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def initialize_enter(self):\n GL.glClearColor(0.25, 0.28, 0.31, 1.0)\n GL.glClearDepth(1.0)\n\n # Polygon Rasterization\n GL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n\n # Polygon Anti-Aliasing\n GL.glEnable( GL.GL_POLYGON_SMOOTH )\n\n # Light Shading\n GL.glShadeModel( GL.GL_SMOOTH )\n\n # Enable Back Face Culling\n GL.glEnable( GL.GL_CULL_FACE )\n GL.glCullFace( GL.GL_BACK )\n\n # Enable Depth Testing\n GL.glEnable( GL.GL_DEPTH_TEST )\n GL.glDepthFunc( GL.GL_LEQUAL )\n GL.glDepthMask( GL.GL_TRUE )\n\n # Misc\n GL.glDisable( GL.GL_FOG )\n GL.glDisable( GL.GL_TEXTURE_2D )", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoGLUpdateAreaElement_init(self, state)", "def __init__(self):\n this = _coin.new_SoVRMLSphereSensor()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n # Define vector displacement from COM to Sun\n self.r_s = np.array([-constants.MASS_JUPITER * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])\n\n # Define vector displacement from COM to Jupiter\n self.r_j = np.array([constants.MASS_SUN * constants.R / (constants.MASS_JUPITER + constants.MASS_SUN), 0,\n 0])", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoGLViewportRegionElement_init(self, state)", "def __init__(self):\n this = _coin.new_SoVRMLCylinder()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoSFVec3d()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def _init_vectors(self):\r\n self.vectors = VectorsDict(self.modhandler)\r\n self.support_vectors = VectorsDict(self.modhandler)", "def __init__(self):\n this = _coin.new_SoSFVec4ub()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def initialize_panel_vortices(self):\n # Find the freestream direction in geometry axes.\n freestream_direction = (\n self.operating_point.calculate_freestream_direction_geometry_axes()\n )\n\n # Iterate through the current_airplane's wings.\n for wing in self.airplane.wings:\n\n # Find a suitable length for the \"infinite\" legs of the horseshoe\n # vortices on this wing. At twenty-times the wing's span, these legs are\n # essentially infinite.\n infinite_leg_length = wing.span * 20\n\n # Iterate through the wing's chordwise and spanwise panel positions.\n for chordwise_position in range(wing.num_chordwise_panels):\n for spanwise_position in range(wing.num_spanwise_panels):\n # Pull the panel object out of the wing's list of panels.\n panel = wing.panels[chordwise_position, spanwise_position]\n\n # Find the location of the panel's front and right vortex vertices.\n front_left_vortex_vertex = panel.front_left_vortex_vertex\n front_right_vortex_vertex = panel.front_right_vortex_vertex\n\n # Initialize the horseshoe vortex at this panel.\n panel.horseshoe_vortex = aerodynamics.HorseshoeVortex(\n finite_leg_origin=front_right_vortex_vertex,\n finite_leg_termination=front_left_vortex_vertex,\n strength=None,\n infinite_leg_direction=freestream_direction,\n infinite_leg_length=infinite_leg_length,\n )", "def __init__(self):\n this = _coin.new_SoSFVec2s()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes counts and proportions for assignments of tokens to topics.
def write_token_topic_freqs(fpath, topic_counts): topic_props = topic_counts / np.sum(topic_counts) with open(fpath, 'w', newline='') as outfile: fwriter = csv.writer(outfile) fwriter.writerow(['topic', 'token_count', 'token_proportion']) for (t_index, (t_count, t_prop)) in enumerate(zip(topic_counts, topic_props)): fwriter.writerow([t_index, t_count, t_prop])
[ "def _update_topic_size(self, documents: pd.DataFrame):\n self.topic_sizes_ = collections.Counter(documents.Topic.values.tolist())\n self.topics_ = documents.Topic.astype(int).tolist()", "def _write_topic(self, topic):\n index_of = self._index\n startElement, endElement, newline = self._writer.startElement, self._writer.endElement, self._writer.newline\n startElement(u'topic', {u'number': index_of(topic)})\n newline()\n self._write_locators(u'subjectIdentifiers', topic.sids)\n self._write_locators(u'subjectLocators', topic.slos)\n self._write_iids(topic)\n write_name = self._write_name\n for pos, name in enum(self._names(topic)):\n write_name(name, pos)\n write_occurrence = self._write_occurrence\n for pos, occ in enum(self._occs(topic)):\n write_occurrence(occ, pos)\n emptyElement = self._writer.emptyElement\n for role in sorted(topic.roles_played, self._cmp_role):\n emptyElement(u'rolePlayed', {u'ref': u'association.%s.role.%s' % (index_of(role.parent), index_of(role))})\n newline()\n endElement(u'topic')\n newline()", "def sample_counts(self):\n\n self.A_dk.fill(0)\n self.B_kw.fill(0)\n\n if self.do_test:\n self.A_dk_test.fill(0)\n self.B_kw_test.fill(0)\n\n # todo: sample a topic for each (doc, word) and update A_dk, B_kw correspondingly\n self.docs_topic = np.zeros((self.n_docs,self.n_words)) #np.zeros((self.n_docs, self.n_words))\n for docs in range(self.n_docs):\n for word in range(self.n_words):\n for _ in range(self.docs_words[docs, word]): #for number of occurance of the word\n sample_topic = np.random.choice(self.topics_space,p = self.topic_doc_words_distr[:,docs,word]) #sample topic for each word in each document\n self.A_dk[docs, sample_topic] += 1\n self.B_kw[sample_topic, word] += 1\n #update A_dk\n #print(self.docs_topic[docs,:])\n #unique , counts = np.unique(self.docs_topic[docs,:],return_counts = True)\n #for topic in range(self.n_topics):\n # self.A_dk[docs,topic] = np.count_nonzero(self.docs_topic[docs,:] == topic)\n\n #update B_kw np.zeros((self.n_topics, self.n_words))\n\n #for topic in range(self.n_topics):\n # for word in range(self.n_words):\n # self.B_kw[topic , word] = np.count_nonzero(self.docs_topic[:,word] == topic )\n\n pass", "def get_topic_distributions(model_info, corpus_name, subreddit_list):\r\n\t\r\n # initialize where topic counts will be stored for each model indicated in model_info\r\n model_dict = initialize_model_counters(model_info, subreddit_list)\r\n print()\r\n\r\n # iterate through each subreddit, each of its documents, and each word type in its documents to get counts.\r\n for subreddit in subreddit_list:\r\n\r\n current_time = datetime.datetime.now()\r\n print(str(current_time) + ' : starting ' + subreddit)\r\n print('--------------------')\r\n\r\n corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name)\r\n corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv')\r\n corpus = gensim.corpora.MmCorpus(corpus_fpath)\r\n\r\n for doc in corpus:\r\n if len(doc) < 25:\r\n continue\r\n\r\n # For each model, get theta for the document.\r\n model_theta_dict = {}\r\n for model_key in model_dict.keys():\r\n doc_dist_gensim = model_dict[model_key]['model'][doc]\r\n k = model_key[3]\r\n doc_dist_numpy = np.zeros(k, dtype='float64')\r\n for (topic, val) in doc_dist_gensim:\r\n doc_dist_numpy[topic] = val\r\n\r\n # now that we have this document's theta, add it to the sum.\r\n model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy\r\n\r\n # From Thompson & Blei (2018):\r\n nz_theta_d = doc_dist_numpy > 0\r\n log_theta_d = xlogy(nz_theta_d, doc_dist_numpy)\r\n\r\n model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d}\r\n\r\n # For each word type that occurs in doc, iterate through each model to make topic assignments.\r\n model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict}\r\n for (word_id, word_count) in doc:\r\n\r\n # Estimate topics for each model.\r\n for model_key in model_dict:\r\n k = model_key[3]\r\n #topic_assingments = assign_type_to_topic()\r\n\r\n # From Thompson & Blei (2018). Basically for the current word, get its\r\n # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional\r\n # vector by the corresponding elements in the document's nonzero theta vector. For each element\r\n # that is nonzero, return exponent(log phi values of the word in each topic + log theta values\r\n # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that\r\n # this will return a k-dimensional vector with or without it. The resulting distribution\r\n # provides the distribution p(topic | word) from which we can make an assignment of the token\r\n # to a topic.\r\n topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0,\r\n np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']),\r\n 0.0).ravel()\r\n\r\n # Normalize distribution p(topic | word, phi, theta):\r\n topic_dist = topic_dist / topic_dist.sum()\r\n\r\n # Draw a topic from topic_dist for however many times the word occurs in the document.\r\n topics = np.random.choice(k, size=int(word_count), p=topic_dist)\r\n\r\n for topic_i in topics:\r\n model_doc_token_topics[model_key][topic_i] += 1\r\n\r\n # now we have token-topic assingment counts for each word type present in the current document.\r\n # START HERE -->\r\n # update token-topic assignment counts\r\n for model_key in model_dict:\r\n model_doc_topic_counts = model_doc_token_topics[model_key]\r\n\r\n model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts\r\n\r\n # also make the token-topic distribution and add it to ongoing count\r\n model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum()\r\n model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist\r\n\r\n model_dict[model_key]['doc_counts'][subreddit] += 1\r\n\r\n # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated\r\n # including the average theta distribution, the distribution of token-topic assignments, & the average\r\n # token-topic document distribution.\r\n for model_key in model_dict.keys():\r\n\r\n # All token-topic assignments have been counted for this subreddit, so store those counts in\r\n # token_assignment_counts for later use and write them to file.\r\n token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'token_topic_freqs_' + subreddit + '.csv')\r\n write_token_topic_freqs(token_topic_freqs_fpath,\r\n model_dict[model_key]['token_topic_counts'][subreddit])\r\n\r\n # Find average theta distribution by dividing the summed thetas by the number of documents.\r\n avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_' + subreddit + '.txt')\r\n avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_fpath, avg_doc_topic)\r\n\r\n # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the\r\n # document distributions by the number of documents.\r\n avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_from_tokens_' + subreddit + '.txt')\r\n avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens)\r\n\r\n # topic model summary files can now be written\r\n # Topic summary file. Possible things to include:\r\n # - entropy of the topic's word distribution (what does this really tell us that is useful?)\r\n # - entropy of topic over subreddits\r\n # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the\r\n # topic words needed to account for at least 50% of the topic's word distribution.\r\n # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens\r\n # assigned to each topic.\r\n for model_key in model_dict:\r\n subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts'])\r\n\r\n phis = model_dict[model_key]['model'].get_topics()\r\n k = model_key[3]\r\n topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)]\r\n\r\n max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts'])\r\n\r\n # model_key = (training_corpus_type, sample_name, corpus_name, k)\r\n topic_summary_fpath = os.path.join(cons.lda_dir,\r\n model_key[0],\r\n model_key[1],\r\n model_key[1] + '-' + str(k),\r\n 'topics_summary.csv')\r\n write_topics_summary_file(topic_summary_fpath,\r\n subreddit_entropy_counts, subreddit_entropy_props,\r\n topic_entropies,\r\n max_subreddit_count, max_subreddit_prop,\r\n model_dict[model_key]['model'],\r\n k)", "def write(self, topicmap):\n remove_duplicates(topicmap)\n self._create_index(topicmap)\n writer = self._writer\n writer.startDocument()\n writer.startElement(u'topicMap', self._add_reifier({}, topicmap))\n writer.newline()\n self._write_iids(topicmap)\n write_topic = self._write_topic\n for topic in self._topics:\n write_topic(topic)\n write_assoc = self._write_association\n for assoc in self._assocs:\n write_assoc(assoc)\n writer.endElement(u'topicMap')\n writer.newline()\n writer.endDocument()\n self._topics = None\n self._assocs = None\n self._assoc2roles = None\n self._tmc2id = None\n self._iri2norm = None", "def __init__(self, n, ngram_counts, vocab, unk=False):\n\n self.n = n\n\n self.vocab = vocab\n\n self.V = len(vocab)\n\n self.ngram_counts = ngram_counts\n\n # YOUR CODE HERE\n # START BY MAKING THE RIGHT COUNTS FOR THIS PARTICULAR self.n\n # for unigrams, we only need total word count\n if n == 1:\n self.total_count = sum(self.ngram_counts.values())\n # for bigrams, we need total count wrt each word. In our language, it is history count.\n elif n == 2:\n self.history_count = Counter()\n for k, v in self.ngram_counts.items():\n self.history_count[k[0]] = self.history_count[k[0]] + v\n # since we only count for the first word in the tuple, we will always\n # miss counting </s>. However, since the frequency of </s> is the same\n # as the frequency of <s>, we can simply assign it equal to it.\n self.history_count['</s>'] = self.history_count['<s>']", "def get_subreddits_w_max_topics(token_assignment_counts):\r\n max_topic_counts = []\r\n max_topic_props = []\r\n\r\n sub_list = [sub for sub in token_assignment_counts.keys()]\r\n\r\n k = len(token_assignment_counts[sub_list[0]])\r\n\r\n for topic_index in range(k):\r\n sub_topic_counts = []\r\n sub_topic_props = []\r\n\r\n for subreddit in sub_list:\r\n\r\n # Number of tokens from subreddit assigned to topic.\r\n subreddit_topic_count = token_assignment_counts[subreddit][topic_index]\r\n\r\n # Count of all tokens from the subreddit (sums over how many tokens from the subreddit were assigned to each\r\n # topic).\r\n subreddit_count = sum(token_assignment_counts[subreddit])\r\n\r\n subreddit_topic_prop = subreddit_topic_count / subreddit_count\r\n\r\n sub_topic_counts.append((subreddit, subreddit_topic_count))\r\n sub_topic_props.append((subreddit, subreddit_topic_prop))\r\n\r\n # Sort the tuples of (subreddit, topic count) from highest to lowest topic counts. Then take the top 3. Do the\r\n # same for proportions.\r\n top_3_counts = sorted(sub_topic_counts, key=lambda x: x[1], reverse=True)[:3]\r\n top_3_props = sorted(sub_topic_props, key=lambda x: x[1], reverse=True)[:3]\r\n\r\n max_topic_counts.append(top_3_counts)\r\n max_topic_props.append(top_3_props)\r\n\r\n return max_topic_counts, max_topic_props", "def score(self, tokens, *args, **kwargs):\n\n weights = { token: tokens.count(token) for token in tokens }\n return weights", "def add_tokens_count(article):\n article['tokens_count'] = Counter(article['text'])\n return article", "def topic_counts(model, top_n):\n counts = {}\n threshold = 2.0 / model.k\n for k in range(model.k):\n counts[k] = 0\n for doc in tqdm(model.docs):\n for k, v in doc.get_topics(top_n=top_n):\n if v < threshold: next\n counts[k] += 1\n return counts", "def topicPartitionCount(self, topic):\n raise NotImplementedError", "def tweet_stats(twitter_content, language, outfile):\r\n\ttoken_without_frequencies, token_frequencies, upos_frequencies, ner_frequencies, num_sentences, max_sentence, min_sentence, max_tweet, min_tweet = calculate_stats(twitter_content, language)\r\n\tprint(\"Number of types:\" + str(len(token_frequencies.keys())))\r\n\tprint(\"Number of tokens:\" + str(sum(token_frequencies.values())))\r\n\tprint(\"Type/token ratio:\" + str((len(token_frequencies.keys()) / sum(token_frequencies.values()))))\r\n\tprint()\r\n\tprint(\"Average number of tokens per sentence:\" + str(sum(token_frequencies.values()) / num_sentences))\r\n\tprint(\"Highest number of tokens in a sentence:\" + str(max_sentence))\r\n\tprint(\"Lowest number of tokens in a sentence:\" + str(min_sentence))\r\n\tprint()\r\n\tprint(\"Average number of tokens per tweet:\" + str(sum(token_frequencies.values()) / len(twitter_content['Text'])))\r\n\tprint(\"Highest number of tokens in a tweet:\" + str(max_tweet))\r\n\tprint(\"Lowest number of tokens in a tweet:\" + str(min_tweet))\r\n\tprint()\r\n\tprint(\"Number of types without stopwords and punctuation:\" + str(len(token_without_frequencies.keys())))\r\n\tprint(\"Number of tokens without stopwords and punctuation:\" + str(sum(token_without_frequencies.values())))\r\n\tprint(\"Type/token ratio without stopwords and punctuation:\" + str(\r\n\t\t(len(token_without_frequencies.keys()) / sum(token_frequencies.values()))))\r\n\tprint(\"50 most common tokens without stopwords and punctuation:\" + str(token_without_frequencies.most_common(50)))\r\n\tprint()\r\n\tprint(\"Most common pos-tags:\" + str(upos_frequencies.most_common()))\r\n\tprint()\r\n\tprint(\"Most common named entity tags:\" + str(ner_frequencies.most_common()))\r\n\r\n\t#writes the results to the outfile\r\n\toutfile.write(\"\\n\\nNumber of types:\" + str(len(token_frequencies.keys())))\r\n\toutfile.write(\"\\nNumber of tokens:\" + str(sum(token_frequencies.values())))\r\n\toutfile.write(\"\\nType/token ratio:\" + str((len(token_frequencies.keys()) / sum(token_frequencies.values()))))\r\n\toutfile.write(\"\\n\\nAverage number of tokens per sentence:\" + str(sum(token_frequencies.values()) / num_sentences))\r\n\toutfile.write(\"\\nHighest number of tokens in a sentence:\" + str(max_sentence))\r\n\toutfile.write(\"\\nLowest number of tokens in a sentence:\" + str(min_sentence))\r\n\toutfile.write(\"\\n\\nAverage number of tokens per tweet:\" + str(sum(token_frequencies.values()) / len(twitter_content['Text'])))\r\n\toutfile.write(\"\\nHighest number of tokens in a tweet:\" + str(max_tweet))\r\n\toutfile.write(\"\\nLowest number of tokens in a tweet:\" + str(min_tweet))\r\n\toutfile.write(\"\\n\\nNumber of types without stopwords and punctuation:\" + str(len(token_without_frequencies.keys())))\r\n\toutfile.write(\"\\nNumber of tokens without stopwords and punctuation:\" + str(sum(token_without_frequencies.values())))\r\n\toutfile.write(\"\\nType/token ratio without stopwords and punctuation:\" + str(\r\n\t\t(len(token_without_frequencies.keys()) / sum(token_frequencies.values()))))\r\n\toutfile.write(\"\\n50 most common tokens without stopwords and punctuation:\" + str(token_without_frequencies.most_common(50)))\r\n\toutfile.write(\"\\n\\nMost common pos-tags:\" + str(upos_frequencies.most_common()))\r\n\toutfile.write(\"\\nMost common named entity tags:\" + str(ner_frequencies.most_common()))\r\n\tplot_general_freqs(upos_frequencies, 'Part of Speech', language)\r\n\tplot_general_freqs(ner_frequencies, 'Named Entity', language)", "def split():\n with open(SPEECHES_FILE, 'rb') as pickle_file:\n speeches = pickle.load(pickle_file)\n\n for politician, speeches in speeches.items():\n filename = './data/{}.txt'.format(politician)\n with open(filename, 'wt', encoding='utf8') as speeches_file:\n num_char = 0\n num_words = 0\n for speech in speeches:\n # write header and text of speech to file\n session = speech['session']\n header = '# {period:} - {title:} am {published:} ({url:})\\n'.format(\n **session)\n speeches_file.write(header)\n\n # write speech\n speech_text = speech['speech'].replace('- - ', '') # parenthesis artifcat\n speeches_file.write(speech_text + '\\n\\n')\n\n # count metrics\n num_char += len(speech['speech'])\n num_words += len(speech['speech'].split())\n\n logger.info('Metrics of {}: chars: {}, words: {}'.format(\n politician, num_char, num_words))", "def send_metrics(self) -> None:\n\t\tself.get_cpu_metrics()\n\t\tself.get_memory_metrics()\n\t\tmessage = {\n\t\t\t'type': 'log',\n\t\t\t'content': {\n\t\t\t\t'mac_id': self.mac_id,\n\t\t\t\t'producer_id': self.client_id,\n\t\t\t\t'cpu_metrics': self.cpu_percentages,\n\t\t\t\t'memory_metrics': self.memory_percentages\n\t\t\t}\n\t\t}\n\t\tself.producer.send(self.kafka_topic, json.dumps(message).encode(\"utf-8\"))\n\t\tself.producer.flush()", "def task_report(self, topics):\n topic_action_target = {\n \"size\": (self.write_size, self.target_size_path),\n \"mtime\": (self.write_mtime, self.target_mtime_path),\n \"alldata\": (self.write_alldata, self.target_alldata_path),\n }\n return {\n \"name\": self.source.name,\n \"file_dep\": [self.source],\n \"actions\": [self.ensure_target_dir] +\n [topic_action_target[topic][0] for topic in topics],\n \"targets\": [topic_action_target[topic][1] for topic in topics],\n \"clean\": [clean_targets, self.remove_target_dir],\n }", "def _count_frequencies(self, tokens: list) -> dict:\n frequencies = defaultdict(lambda: 0)\n\n for token in tokens:\n frequencies[token] += 1\n\n return frequencies", "def report(result, filename=\"default\", limit=25):\n topicsfile = open(filename + \".txt\", 'w')\n for topic in result:\n topicsfile.write(\"------------\\nTopic %i\\n------------\\n\" % \\\n (topic[0]))\n\n word = 0\n words_list = topic[1].split(\"*\")\n for i in range(len(words_list) - 1):\n first_ind = words_list[i + 1].find('\"')\n second_ind = words_list[i + 1].find('\"', first_ind + 1)\n topicsfile.write(\"%0.5f\\t%s\\n\" % \\\n (float(words_list[i][-5:]),\n words_list[i + 1][first_ind + 1:second_ind]))\n\n word += 1\n if word > limit:\n break\n topicsfile.close()", "def add_tokens_to_namespace(self, tokens, namespace):\n\n if namespace not in self.vocab:\n self.__namespace_init(namespace)\n logger.error('Add Namespace {} into vocabulary.'.format(namespace))\n\n for token in tokens:\n if token not in self.vocab[namespace]:\n self.vocab[namespace][token] = len(self.vocab[namespace])", "def store_phrase_topics(document_phrase_topics, path=\"topmine/intermediate_output/phrase_topics.txt\"):\n f = open(path, 'w')\n for document in document_phrase_topics:\n f.write(\",\".join(str(phrase) for phrase in document))\n f.write(\"\\n\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get entropy of topics' spread over subreddits to see how subredditspecific they are.
def get_subreddit_entropy(token_assignment_counts): sub_entropy_counts = [] sub_entropy_props = [] sub_list = [sub for sub in token_assignment_counts.keys()] k = len(token_assignment_counts[sub_list[0]]) for topic_index in range(k): topic_counts = [] topic_props = [] for subreddit in sub_list: subreddit_topic_count = token_assignment_counts[subreddit][topic_index] topic_counts.append(subreddit_topic_count) subreddit_topic_prop = subreddit_topic_count / float(sum(token_assignment_counts[subreddit])) topic_props.append(subreddit_topic_prop) topic_counts = np.array(topic_counts) topic_props = np.array(topic_props) topic_counts_dist = topic_counts / np.sum(topic_counts) topic_props_dist = topic_props / np.sum(topic_props) sub_entropy_counts.append(measures.entropy(topic_counts_dist)) sub_entropy_props.append(measures.entropy(topic_props_dist)) return sub_entropy_counts, sub_entropy_props
[ "def corrected_discrete_entropy(topTopics):\n uniqueTopTopics = set()\n uniqueTopTopics.update(topTopics)\n sumVar = 0\n for topic in uniqueTopTopics:\n pTopic = topTopics.count(topic)/float(len(topTopics))\n sumVar += pTopic*np.log(pTopic)\n if len(topTopics) > 1: # handles edge case of having just 1 source and 1 top topic\n correction = len(topTopics)/(len(topTopics)-1) # N/(N-1)\n else:\n correction = 1\n return -1*sumVar*correction", "def get_subreddits_w_max_topics(token_assignment_counts):\r\n max_topic_counts = []\r\n max_topic_props = []\r\n\r\n sub_list = [sub for sub in token_assignment_counts.keys()]\r\n\r\n k = len(token_assignment_counts[sub_list[0]])\r\n\r\n for topic_index in range(k):\r\n sub_topic_counts = []\r\n sub_topic_props = []\r\n\r\n for subreddit in sub_list:\r\n\r\n # Number of tokens from subreddit assigned to topic.\r\n subreddit_topic_count = token_assignment_counts[subreddit][topic_index]\r\n\r\n # Count of all tokens from the subreddit (sums over how many tokens from the subreddit were assigned to each\r\n # topic).\r\n subreddit_count = sum(token_assignment_counts[subreddit])\r\n\r\n subreddit_topic_prop = subreddit_topic_count / subreddit_count\r\n\r\n sub_topic_counts.append((subreddit, subreddit_topic_count))\r\n sub_topic_props.append((subreddit, subreddit_topic_prop))\r\n\r\n # Sort the tuples of (subreddit, topic count) from highest to lowest topic counts. Then take the top 3. Do the\r\n # same for proportions.\r\n top_3_counts = sorted(sub_topic_counts, key=lambda x: x[1], reverse=True)[:3]\r\n top_3_props = sorted(sub_topic_props, key=lambda x: x[1], reverse=True)[:3]\r\n\r\n max_topic_counts.append(top_3_counts)\r\n max_topic_props.append(top_3_props)\r\n\r\n return max_topic_counts, max_topic_props", "def get_topic_distributions(model_info, corpus_name, subreddit_list):\r\n\t\r\n # initialize where topic counts will be stored for each model indicated in model_info\r\n model_dict = initialize_model_counters(model_info, subreddit_list)\r\n print()\r\n\r\n # iterate through each subreddit, each of its documents, and each word type in its documents to get counts.\r\n for subreddit in subreddit_list:\r\n\r\n current_time = datetime.datetime.now()\r\n print(str(current_time) + ' : starting ' + subreddit)\r\n print('--------------------')\r\n\r\n corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name)\r\n corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv')\r\n corpus = gensim.corpora.MmCorpus(corpus_fpath)\r\n\r\n for doc in corpus:\r\n if len(doc) < 25:\r\n continue\r\n\r\n # For each model, get theta for the document.\r\n model_theta_dict = {}\r\n for model_key in model_dict.keys():\r\n doc_dist_gensim = model_dict[model_key]['model'][doc]\r\n k = model_key[3]\r\n doc_dist_numpy = np.zeros(k, dtype='float64')\r\n for (topic, val) in doc_dist_gensim:\r\n doc_dist_numpy[topic] = val\r\n\r\n # now that we have this document's theta, add it to the sum.\r\n model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy\r\n\r\n # From Thompson & Blei (2018):\r\n nz_theta_d = doc_dist_numpy > 0\r\n log_theta_d = xlogy(nz_theta_d, doc_dist_numpy)\r\n\r\n model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d}\r\n\r\n # For each word type that occurs in doc, iterate through each model to make topic assignments.\r\n model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict}\r\n for (word_id, word_count) in doc:\r\n\r\n # Estimate topics for each model.\r\n for model_key in model_dict:\r\n k = model_key[3]\r\n #topic_assingments = assign_type_to_topic()\r\n\r\n # From Thompson & Blei (2018). Basically for the current word, get its\r\n # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional\r\n # vector by the corresponding elements in the document's nonzero theta vector. For each element\r\n # that is nonzero, return exponent(log phi values of the word in each topic + log theta values\r\n # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that\r\n # this will return a k-dimensional vector with or without it. The resulting distribution\r\n # provides the distribution p(topic | word) from which we can make an assignment of the token\r\n # to a topic.\r\n topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0,\r\n np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']),\r\n 0.0).ravel()\r\n\r\n # Normalize distribution p(topic | word, phi, theta):\r\n topic_dist = topic_dist / topic_dist.sum()\r\n\r\n # Draw a topic from topic_dist for however many times the word occurs in the document.\r\n topics = np.random.choice(k, size=int(word_count), p=topic_dist)\r\n\r\n for topic_i in topics:\r\n model_doc_token_topics[model_key][topic_i] += 1\r\n\r\n # now we have token-topic assingment counts for each word type present in the current document.\r\n # START HERE -->\r\n # update token-topic assignment counts\r\n for model_key in model_dict:\r\n model_doc_topic_counts = model_doc_token_topics[model_key]\r\n\r\n model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts\r\n\r\n # also make the token-topic distribution and add it to ongoing count\r\n model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum()\r\n model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist\r\n\r\n model_dict[model_key]['doc_counts'][subreddit] += 1\r\n\r\n # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated\r\n # including the average theta distribution, the distribution of token-topic assignments, & the average\r\n # token-topic document distribution.\r\n for model_key in model_dict.keys():\r\n\r\n # All token-topic assignments have been counted for this subreddit, so store those counts in\r\n # token_assignment_counts for later use and write them to file.\r\n token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'token_topic_freqs_' + subreddit + '.csv')\r\n write_token_topic_freqs(token_topic_freqs_fpath,\r\n model_dict[model_key]['token_topic_counts'][subreddit])\r\n\r\n # Find average theta distribution by dividing the summed thetas by the number of documents.\r\n avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_' + subreddit + '.txt')\r\n avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_fpath, avg_doc_topic)\r\n\r\n # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the\r\n # document distributions by the number of documents.\r\n avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_from_tokens_' + subreddit + '.txt')\r\n avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens)\r\n\r\n # topic model summary files can now be written\r\n # Topic summary file. Possible things to include:\r\n # - entropy of the topic's word distribution (what does this really tell us that is useful?)\r\n # - entropy of topic over subreddits\r\n # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the\r\n # topic words needed to account for at least 50% of the topic's word distribution.\r\n # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens\r\n # assigned to each topic.\r\n for model_key in model_dict:\r\n subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts'])\r\n\r\n phis = model_dict[model_key]['model'].get_topics()\r\n k = model_key[3]\r\n topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)]\r\n\r\n max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts'])\r\n\r\n # model_key = (training_corpus_type, sample_name, corpus_name, k)\r\n topic_summary_fpath = os.path.join(cons.lda_dir,\r\n model_key[0],\r\n model_key[1],\r\n model_key[1] + '-' + str(k),\r\n 'topics_summary.csv')\r\n write_topics_summary_file(topic_summary_fpath,\r\n subreddit_entropy_counts, subreddit_entropy_props,\r\n topic_entropies,\r\n max_subreddit_count, max_subreddit_prop,\r\n model_dict[model_key]['model'],\r\n k)", "def get_sub_topics(model,\n doc2bow,\n corpus,\n n_topics=params.lda_params_default['n_topics']):\n # Gets dominant topic for each subreddit (hard clustering)\n sub_topics_array = [sorted(doc,\n key=itemgetter(1),\n reverse=True,\n )[0] for doc in model.get_document_topics(doc2bow)]\n top_topics_df = pd.DataFrame(sub_topics_array,\n columns=['topic_number', 'topic_percentage'])\n top_topics_df = top_topics_df.join(corpus.loc[:, ['subreddit', 'health']],\n how='left',\n )\n top_topics_df = top_topics_df[['subreddit', 'health', 'topic_number', 'topic_percentage']]\n all_topic_terms = model.show_topics(num_topics=n_topics,\n formatted=False,\n )\n terms_df = pd.concat([pd.DataFrame(all_topic_terms[i][1],\n columns=['terms', 'term_probability'],\n index=[i] * len(all_topic_terms[i][1])) for i in range(0, n_topics)])\n terms_df['terms_list'] = terms_df.groupby(terms_df.index)['terms'].apply(lambda x: x.to_list())\n terms_df['term_probabilities'] = terms_df.groupby(terms_df.index)['term_probability'].apply(lambda x: x.to_list())\n terms_df.drop(['terms', 'term_probability'],\n axis=1,\n inplace=True,\n )\n terms_df = terms_df.rename_axis('topic_number').reset_index()\n terms_df = terms_df.drop_duplicates(subset='topic_number',\n ).set_index('topic_number')\n top_topics_df = pd.merge(top_topics_df, terms_df, how='left', on='topic_number')\n print('LDA topics data: \\n{}'.format(top_topics_df))\n\n top_health_topics = top_topics_df.loc[top_topics_df['health'] == 1, ['subreddit', 'topic_number']]\n top_health_topics = top_health_topics['topic_number'].value_counts().rename('subreddit_count')\n print('Health-related topics: \\n{}'.format(top_health_topics))\n\n pd.to_pickle(top_topics_df, 'lda_topic_data_{}'.format(n_topics))\n\n return top_topics_df, terms_df", "def findTopicSimilarity(self, bug, topics):\n for turn in bug.get_turns():\n for sent in turn.get_sentences():\n temp_tags = sent.get_tags()\n if 'OT' not in temp_tags and 'Code' not in temp_tags and 'URL' not in temp_tags:\n sent_words = [sent.get_cleaned_text().split()]\n sent2bow = [self.id2word.doc2bow(word) for word in sent_words]\n \n #print('{} : {}'.format(sent.get_id(),self.lda_model.get_document_topics(sent2bow)))\n for prob in self.lda_model.get_document_topics(sent2bow):\n print('{} : {}'.format(sent.get_id(),prob))", "def trendingTopics():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for topic in trending_topics:\n util.safe_print(topic.name)", "def general_analysis(ciphertext):\n print('Total length of ciphertext:', len(ciphertext))\n print('Unique letters:',len(find_letter_distribution(ciphertext)))", "def get_word_distribution():\n db = DataParser.get_connection()\n cursor = db.cursor()\n config = DataParser.get_config()\n cursor.execute(\"use %s\" % config[\"database\"][\"database_name\"])\n cursor.execute(\"select article_id, word_id from words_articles order by article_id, word_id\")\n article_words = cursor.fetchall()\n article_words = list(map(lambda t: (t[0] - 1, t[1] - 1), article_words))\n cursor.execute(\"select word_id, count(*) as word_count from words_articles group by word_id order by word_id\")\n word_count = cursor.fetchall()\n word_count = list(map(lambda t: (t[0] - 1, t[1]), word_count))\n cursor.execute(\"select article_id, word_id, count(*) as word_count \"\n \"from words_articles group by word_id, article_id order by article_id, word_id\")\n word_article_count = cursor.fetchall()\n word_article_count = list(map(lambda t: (t[0] - 1, t[1] - 1, t[2]), word_article_count))\n return word_count, article_words, word_article_count", "def getEntropyOfReaction(self, T):\n cython.declare(dSrxn=cython.double, reactant=Species, product=Species)\n dSrxn = 0.0\n for reactant in self.reactants:\n dSrxn -= reactant.thermo.getEntropy(T)\n for product in self.products:\n dSrxn += product.thermo.getEntropy(T)\n return dSrxn", "def get_topics(topics, mode='p', top=0.5):\n t = sorted(topics, key=lambda x: x[1], reverse=True)\n t2 = []\n s = 0\n i = 0\n if mode == 'p':\n while s < top and i < len(t):\n t2.append(t[i])\n s += t[i][1]\n i += 1\n elif mode == 'n':\n while i < top and i < len(t):\n t2.append(t[i])\n i += 1\n return t2", "def get_doc_ids_with_non_zero_topic_proportions(topic):\n\n tuples = {}\n\n for number, dist in get_doc_topic_dists().items():\n if topic in dist:\n tuples[number] = dist[topic]\n\n return sorted(tuples.items(), key=lambda x: x[1], reverse=True)", "def ReturnEntropyDensity(self):\n return self.ReturnEntropy()/float(self.n)", "def partition_entropy(subsets):\n total_count = sum(len(subset) for subset in subsets)\n return sum( ent.data_entropy(subset) * len(subset) / total_count\n for subset in subsets )", "def sampleNewTopicForWords(amount_of_topics, doc_idx, document_topic_count,document_topic_sum,\n topic_term_count,topic_term_sum, word,alpha_sum):\n sample_list = list()\n val = 0\n for topic_check in range(amount_of_topics):\n first_fraction=(document_topic_count[doc_idx][topic_check]+alpha)/(document_topic_sum[doc_idx]+alpha_sum)\n # second fraction\n second_fraction=0\n if(word in topic_term_count[topic_check]):\n second_fraction=(topic_term_count[topic_check][word]+beta)/(topic_term_sum[topic_check]+precalc_beta[topic_check])\n else:\n second_fraction = (beta) / (\n topic_term_sum[topic_check] + precalc_beta[topic_check])\n val += first_fraction*second_fraction\n sample_list.append(val)\n # normalised_sample_list = [float(i) / sum(sample_list) for i in sample_list]\n return random.choices(index_list, cum_weights=sample_list)", "def condensed_topics(self, request):\n if request.method == 'POST':\n user_id = request.data['id']\n user = get_user_model().objects.get(id=user_id)\n else:\n user = request.user\n profile = BaseUserProfile.objects.get(user=user)\n query = profile.topics_subscribed_to\n serializer = self.get_serializer(query, many=True)\n return Response(serializer.data)", "def number_of_articles():", "def topics(self):\n return str(self.__topics)", "def get_popular_subreddits(self, *args, **kwargs):\n url = self.config['popular_subreddits']\n return self.get_content(url, *args, **kwargs)", "def entropy (distr):\n return -sum([p * np.log(p) for p in distr.values()])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For each topic, return which subreddit had the most tokens assigned to the topic. Do this based on count of tokens as well as on which subreddit had the highest proportion of its tokens assigned to the topic.
def get_subreddits_w_max_topics(token_assignment_counts): max_topic_counts = [] max_topic_props = [] sub_list = [sub for sub in token_assignment_counts.keys()] k = len(token_assignment_counts[sub_list[0]]) for topic_index in range(k): sub_topic_counts = [] sub_topic_props = [] for subreddit in sub_list: # Number of tokens from subreddit assigned to topic. subreddit_topic_count = token_assignment_counts[subreddit][topic_index] # Count of all tokens from the subreddit (sums over how many tokens from the subreddit were assigned to each # topic). subreddit_count = sum(token_assignment_counts[subreddit]) subreddit_topic_prop = subreddit_topic_count / subreddit_count sub_topic_counts.append((subreddit, subreddit_topic_count)) sub_topic_props.append((subreddit, subreddit_topic_prop)) # Sort the tuples of (subreddit, topic count) from highest to lowest topic counts. Then take the top 3. Do the # same for proportions. top_3_counts = sorted(sub_topic_counts, key=lambda x: x[1], reverse=True)[:3] top_3_props = sorted(sub_topic_props, key=lambda x: x[1], reverse=True)[:3] max_topic_counts.append(top_3_counts) max_topic_props.append(top_3_props) return max_topic_counts, max_topic_props
[ "def most_frequent_words(subreddit):\n freq_dists = []\n names = []\n titles_all = []\n for name, data in subreddit.items()[-1:]:\n titles_subs = []\n all_words = ['']\n for sub_id, sub in data.items():\n all_words = \" \".join([fixer(comment, True, False) \n for comment in sub.comments]).split()\n \n titles_subs.append(sub.title) \n \n freq_dist = nltk.probability.FreqDist(all_words)\n names.append(name)\n titles_all.append(titles_subs)\n freq_dists.append(freq_dist)\n return names, freq_dists, titles_all", "def topic_counts(model, top_n):\n counts = {}\n threshold = 2.0 / model.k\n for k in range(model.k):\n counts[k] = 0\n for doc in tqdm(model.docs):\n for k, v in doc.get_topics(top_n=top_n):\n if v < threshold: next\n counts[k] += 1\n return counts", "def get_sub_topics(model,\n doc2bow,\n corpus,\n n_topics=params.lda_params_default['n_topics']):\n # Gets dominant topic for each subreddit (hard clustering)\n sub_topics_array = [sorted(doc,\n key=itemgetter(1),\n reverse=True,\n )[0] for doc in model.get_document_topics(doc2bow)]\n top_topics_df = pd.DataFrame(sub_topics_array,\n columns=['topic_number', 'topic_percentage'])\n top_topics_df = top_topics_df.join(corpus.loc[:, ['subreddit', 'health']],\n how='left',\n )\n top_topics_df = top_topics_df[['subreddit', 'health', 'topic_number', 'topic_percentage']]\n all_topic_terms = model.show_topics(num_topics=n_topics,\n formatted=False,\n )\n terms_df = pd.concat([pd.DataFrame(all_topic_terms[i][1],\n columns=['terms', 'term_probability'],\n index=[i] * len(all_topic_terms[i][1])) for i in range(0, n_topics)])\n terms_df['terms_list'] = terms_df.groupby(terms_df.index)['terms'].apply(lambda x: x.to_list())\n terms_df['term_probabilities'] = terms_df.groupby(terms_df.index)['term_probability'].apply(lambda x: x.to_list())\n terms_df.drop(['terms', 'term_probability'],\n axis=1,\n inplace=True,\n )\n terms_df = terms_df.rename_axis('topic_number').reset_index()\n terms_df = terms_df.drop_duplicates(subset='topic_number',\n ).set_index('topic_number')\n top_topics_df = pd.merge(top_topics_df, terms_df, how='left', on='topic_number')\n print('LDA topics data: \\n{}'.format(top_topics_df))\n\n top_health_topics = top_topics_df.loc[top_topics_df['health'] == 1, ['subreddit', 'topic_number']]\n top_health_topics = top_health_topics['topic_number'].value_counts().rename('subreddit_count')\n print('Health-related topics: \\n{}'.format(top_health_topics))\n\n pd.to_pickle(top_topics_df, 'lda_topic_data_{}'.format(n_topics))\n\n return top_topics_df, terms_df", "def get_max_topic_similarity(a: list, b: list) -> float:\n similarities = compare_topic_sets(a, b)\n similarities = [s for s in similarities if s]\n return max(similarities)", "def get_subreddit_entropy(token_assignment_counts):\r\n sub_entropy_counts = []\r\n sub_entropy_props = []\r\n\r\n sub_list = [sub for sub in token_assignment_counts.keys()]\r\n\r\n k = len(token_assignment_counts[sub_list[0]])\r\n\r\n for topic_index in range(k):\r\n topic_counts = []\r\n topic_props = []\r\n for subreddit in sub_list:\r\n subreddit_topic_count = token_assignment_counts[subreddit][topic_index]\r\n topic_counts.append(subreddit_topic_count)\r\n\r\n subreddit_topic_prop = subreddit_topic_count / float(sum(token_assignment_counts[subreddit]))\r\n topic_props.append(subreddit_topic_prop)\r\n\r\n topic_counts = np.array(topic_counts)\r\n topic_props = np.array(topic_props)\r\n\r\n topic_counts_dist = topic_counts / np.sum(topic_counts)\r\n topic_props_dist = topic_props / np.sum(topic_props)\r\n\r\n sub_entropy_counts.append(measures.entropy(topic_counts_dist))\r\n sub_entropy_props.append(measures.entropy(topic_props_dist))\r\n\r\n return sub_entropy_counts, sub_entropy_props", "def get_topic_distributions(model_info, corpus_name, subreddit_list):\r\n\t\r\n # initialize where topic counts will be stored for each model indicated in model_info\r\n model_dict = initialize_model_counters(model_info, subreddit_list)\r\n print()\r\n\r\n # iterate through each subreddit, each of its documents, and each word type in its documents to get counts.\r\n for subreddit in subreddit_list:\r\n\r\n current_time = datetime.datetime.now()\r\n print(str(current_time) + ' : starting ' + subreddit)\r\n print('--------------------')\r\n\r\n corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name)\r\n corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv')\r\n corpus = gensim.corpora.MmCorpus(corpus_fpath)\r\n\r\n for doc in corpus:\r\n if len(doc) < 25:\r\n continue\r\n\r\n # For each model, get theta for the document.\r\n model_theta_dict = {}\r\n for model_key in model_dict.keys():\r\n doc_dist_gensim = model_dict[model_key]['model'][doc]\r\n k = model_key[3]\r\n doc_dist_numpy = np.zeros(k, dtype='float64')\r\n for (topic, val) in doc_dist_gensim:\r\n doc_dist_numpy[topic] = val\r\n\r\n # now that we have this document's theta, add it to the sum.\r\n model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy\r\n\r\n # From Thompson & Blei (2018):\r\n nz_theta_d = doc_dist_numpy > 0\r\n log_theta_d = xlogy(nz_theta_d, doc_dist_numpy)\r\n\r\n model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d}\r\n\r\n # For each word type that occurs in doc, iterate through each model to make topic assignments.\r\n model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict}\r\n for (word_id, word_count) in doc:\r\n\r\n # Estimate topics for each model.\r\n for model_key in model_dict:\r\n k = model_key[3]\r\n #topic_assingments = assign_type_to_topic()\r\n\r\n # From Thompson & Blei (2018). Basically for the current word, get its\r\n # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional\r\n # vector by the corresponding elements in the document's nonzero theta vector. For each element\r\n # that is nonzero, return exponent(log phi values of the word in each topic + log theta values\r\n # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that\r\n # this will return a k-dimensional vector with or without it. The resulting distribution\r\n # provides the distribution p(topic | word) from which we can make an assignment of the token\r\n # to a topic.\r\n topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0,\r\n np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']),\r\n 0.0).ravel()\r\n\r\n # Normalize distribution p(topic | word, phi, theta):\r\n topic_dist = topic_dist / topic_dist.sum()\r\n\r\n # Draw a topic from topic_dist for however many times the word occurs in the document.\r\n topics = np.random.choice(k, size=int(word_count), p=topic_dist)\r\n\r\n for topic_i in topics:\r\n model_doc_token_topics[model_key][topic_i] += 1\r\n\r\n # now we have token-topic assingment counts for each word type present in the current document.\r\n # START HERE -->\r\n # update token-topic assignment counts\r\n for model_key in model_dict:\r\n model_doc_topic_counts = model_doc_token_topics[model_key]\r\n\r\n model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts\r\n\r\n # also make the token-topic distribution and add it to ongoing count\r\n model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum()\r\n model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist\r\n\r\n model_dict[model_key]['doc_counts'][subreddit] += 1\r\n\r\n # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated\r\n # including the average theta distribution, the distribution of token-topic assignments, & the average\r\n # token-topic document distribution.\r\n for model_key in model_dict.keys():\r\n\r\n # All token-topic assignments have been counted for this subreddit, so store those counts in\r\n # token_assignment_counts for later use and write them to file.\r\n token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'token_topic_freqs_' + subreddit + '.csv')\r\n write_token_topic_freqs(token_topic_freqs_fpath,\r\n model_dict[model_key]['token_topic_counts'][subreddit])\r\n\r\n # Find average theta distribution by dividing the summed thetas by the number of documents.\r\n avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_' + subreddit + '.txt')\r\n avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_fpath, avg_doc_topic)\r\n\r\n # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the\r\n # document distributions by the number of documents.\r\n avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_from_tokens_' + subreddit + '.txt')\r\n avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens)\r\n\r\n # topic model summary files can now be written\r\n # Topic summary file. Possible things to include:\r\n # - entropy of the topic's word distribution (what does this really tell us that is useful?)\r\n # - entropy of topic over subreddits\r\n # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the\r\n # topic words needed to account for at least 50% of the topic's word distribution.\r\n # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens\r\n # assigned to each topic.\r\n for model_key in model_dict:\r\n subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts'])\r\n\r\n phis = model_dict[model_key]['model'].get_topics()\r\n k = model_key[3]\r\n topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)]\r\n\r\n max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts'])\r\n\r\n # model_key = (training_corpus_type, sample_name, corpus_name, k)\r\n topic_summary_fpath = os.path.join(cons.lda_dir,\r\n model_key[0],\r\n model_key[1],\r\n model_key[1] + '-' + str(k),\r\n 'topics_summary.csv')\r\n write_topics_summary_file(topic_summary_fpath,\r\n subreddit_entropy_counts, subreddit_entropy_props,\r\n topic_entropies,\r\n max_subreddit_count, max_subreddit_prop,\r\n model_dict[model_key]['model'],\r\n k)", "def get_topics_in_ranked_order(request):\n topics = Topic.objects.order_by('-amount_of_posts_linked')[:8]\n if request.user.is_authenticated:\n return {'top_topics': topics}\n else:\n return {'top_topics': topics}", "def get_topics(topics, mode='p', top=0.5):\n t = sorted(topics, key=lambda x: x[1], reverse=True)\n t2 = []\n s = 0\n i = 0\n if mode == 'p':\n while s < top and i < len(t):\n t2.append(t[i])\n s += t[i][1]\n i += 1\n elif mode == 'n':\n while i < top and i < len(t):\n t2.append(t[i])\n i += 1\n return t2", "def get_max_tokens_per_dialogue(self, tokenizer):\n \n curr_max = {'num_tokens': 0,\\\n 'id': '',\\\n 'num_sentences': 0}\n\n for dialogue in self._dataset:\n curr_tokens = 0\n for utt in dialogue['utterances']:\n curr_tokens += len(tokenizer.tokenize(utt))\n if curr_tokens > curr_max['num_tokens']:\n curr_max['num_tokens'] = curr_tokens\n curr_max['id'] = dialogue['id']\n curr_max['num_sentences'] = len(dialogue['utterances'])\n\n return curr_max['num_tokens'], curr_max['num_sentences'], curr_max['id']", "def print_topics_by_org(filename, num_topics):\n if os.path.exists(filename):\n with open(filename, \"rb\") as file_in:\n top_topics_by_org = pickle.load(file_in)\n for organization in top_topics_by_org.keys():\n print(f\"Organization: {organization}\")\n for topic, count in top_topics_by_org[organization].most_common(num_topics):\n print(f\"Topic {topic}: {count} papers\")\n print(\"---------------------\")", "def get_max_tokens_per_sentence(self, tokenizer):\n curr_max = {'num_tokens': 0,\\\n 'sentence': '',\\\n 'id': ''}\n\n for dialogue in self._dataset:\n for utt in dialogue['utterances']:\n curr_len = len(tokenizer.tokenize(utt))\n if curr_len > curr_max['num_tokens']:\n curr_max['num_tokens'] = curr_len\n curr_max['sentence'] = utt\n curr_max['id'] = dialogue['id']\n \n return curr_max['num_tokens'], curr_max['sentence'], curr_max['id']", "def get_matrix_with_most_used_topics(cls, users_topics_matrix):\n sums = []\n for x in range(users_topics_matrix.shape[1]):\n sums.append(sum(users_topics_matrix.getcol(x).data))\n\n required_index = []\n value = np.percentile(sums, 95)\n for x in range(users_topics_matrix.shape[1]):\n if sum(users_topics_matrix.getcol(x).data) >= value:\n required_index.append(x)\n return users_topics_matrix.transpose()[required_index].transpose()", "def genreClassifier(test_tokens, genre_models):\n tokens = test_tokens\n most_common = Counter(test_tokens).most_common()\n top100 = [x[0] for x in most_common]\n top100 = top100[:100]\n\n models = {\n 'children': genre_models['children']['good_turing_uni'], \n 'history': genre_models['history']['good_turing_uni'], \n 'crime': genre_models['crime']['good_turing_uni']\n }\n\n probs = {'children':1, 'history': 1, 'crime': 1}\n for word in top100:\n for genre in probs:\n if word in models[genre]:\n probs[genre] *= models[genre][word]\n print probs\n return max(probs, key=probs.get)", "def most_frequent(data: list) -> str:\n #return max(Counter(data), key=lambda i: Counter(data)[i])\n return max(data, key=data.count)", "def get_related_terms(token, topn=20):\r\n\r\n for word, similarity in model.most_similar(positive=[token], topn=topn):\r\n print(word, round(similarity, 3))", "def most_probable_words(model, vocabulary, num_words):\n ## create array of vocabulary, sorted by topic\n ## probabilities, one row for each topic.\n vocab = np.asarray(vocabulary)[np.argsort(model.topic_word_)]\n wp = np.sort(model.topic_word_)\n\n ## select n most probable words, which are the right-most\n ## columns in the vocab array.\n words = vocab[:, -num_words:-1]\n\n words = pd.DataFrame(words.T)\n words['rank'] = words.index\n words = pd.melt(words, id_vars='rank')\n\n word_probs = wp[:, -num_words:-1]\n word_probs = pd.DataFrame(word_probs.T)\n word_probs['rank'] = word_probs.index\n word_probs = pd.melt(word_probs, id_vars='rank')\n\n ww = words.merge(word_probs, on=['rank', 'variable'])\n\n ww.columns = ['rank', 'topic', 'word', 'prob']\n return ww", "def max_frequency(self):\n max = 0\n for term, frequency in self.vocabulary.items() :\n if frequency > max :\n max = frequency\n return max", "def optimal_topic_number(filename_1, top_idf_number):\n common_texts = process_doc(filename_1, top_idf_number)\n common_dictionary = Dictionary(common_texts)\n common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]\n coherence_score = []\n for i in range(20, 40):\n lda = LdaModel(common_corpus, id2word=common_dictionary, iterations=50, num_topics=i,\n random_state=np.random.RandomState(23455))\n coherence_model_lda = CoherenceModel(model=lda, texts=common_texts, dictionary=common_dictionary,\n coherence='u_mass')\n coherence_lda = coherence_model_lda.get_coherence()\n print('\\nCoherence Score: ', coherence_lda)\n coherence_score.append(coherence_lda)\n plt.plot(range(20, 40, 1), coherence_score)\n plt.xlabel(\"Num Topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend(\"coherence_values\", loc='best')\n plt.show()", "def get_doc_topic_max_df(doc_topic,cols,rnd=3):\n \n doc_topic_df = pd.DataFrame(doc_topic.round(rnd),columns=cols)\n # below code is for each paragraph, but actually want for each transcript\n# doc_topic_df['topic_max'] = doc_topic_df.max(axis=1)\n# doc_topic_df['cluster'] = doc_topic_df.idxmax(axis=1)\n \n return doc_topic_df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates and returns a dictionary for each LDA model with relevant model data and initialized places to store counts for each model.
def initialize_model_counters(model_info, subreddit_list): model_counts_dict = {} for training_corpus_type, sample_name, corpus_name, k_list in model_info: for k in k_list: model_name = sample_name + '-' + str(k) model_dir = os.path.join(cons.lda_dir, training_corpus_type, sample_name, model_name) model_tdists_dir = cons.makedir(os.path.join(model_dir, 'topic_analysis')) model_path = os.path.join(model_dir, 'model_files') lda_model = gensim.models.LdaModel.load(model_path) # Get model phi as a topic-word matrix where each row is the word distribution that constitutes the topic. phis = lda_model.get_topics() # Thompson & Blei (2018) do this, but I think phis is already normalized when calling .get_topics(). phis = phis / phis.sum(axis=1, keepdims=True) # This is from Thompson & Blei (2018). Gives log of each phi value, but if phi value is zero, keeps it zero. nz_phis = phis > 0 log_phis = xlogy(nz_phis, phis) model_dict = {'tdists_dir': model_tdists_dir, 'model': lda_model, 'nz_phis': nz_phis, 'log_phis': log_phis, 'token_topic_counts': {subreddit: np.zeros(k) for subreddit in subreddit_list}, 'doc_topic_sums': {subreddit: np.zeros(k) for subreddit in subreddit_list}, 'doc_topic_tokens_sums': {subreddit: np.zeros(k) for subreddit in subreddit_list}, 'doc_counts': {subreddit: 0 for subreddit in subreddit_list}} model_counts_dict[(training_corpus_type, sample_name, corpus_name, k)] = model_dict print(training_corpus_type + '\\' + sample_name + '-' + str(k)) return model_counts_dict
[ "def load_models_and_predictors(self):\n self.models = {}\n self.predictors = {}\n model_paths = [path.join(self.data_dir, timepoint + self.embedding_suffix) for timepoint in self.timepoints]\n predictor_handles = [timepoint for timepoint in self.timepoints]\n loaded_models = Parallel(n_jobs=self.workers)(delayed(self.load_model)(model_path) for model_path in model_paths)\n for i, timepoint in enumerate(self.timepoints):\n self.models[timepoint] = loaded_models[i]\n self.predictors[timepoint] = self.load_predictor(predictor_handles[i])\n print \"Done loading predictors\"", "def predict_all():\n \n # Loads the serialised analytic models. \n lrm = joblib.load(\"app/mod_stat/model_linear.pkl\") \n log = joblib.load(\"app/mod_stat/model_binary.pkl\")\n \n # Queries each unique associated count value from the database.\n results = Counts.select(Counts.counts_associated).distinct()\n \n count_values = []\n for result in results:\n if result.get_result()[\"counts_associated\"] != \"None\":\n count_values.append(result.get_result()[\"counts_associated\"])\n\n # For each unique associated count value:\n for count in count_values:\n # Updates every row of the database having that value with a corresponding predicted count. \n query = Counts.update(counts_predicted=int(lrm.predict(int(count))[0])).where(Counts.counts_associated == count)\n query.execute()\n\n # Updates every row of the database having that value with a corresponding binary estimation. \n query = Counts.update(counts_predicted_is_occupied=log.predict(int(count))[0]).where(Counts.counts_associated == count)\n query.execute()", "def _run_analysis(self):\n\n count = {}\n frequencies = {}\n relatives = {}\n\n for document in self.corpus:\n count[document] = Counter()\n frequencies[document] = {}\n relatives[document] = {}\n for gender in self.genders:\n count[document][gender] = document.get_count_of_words(gender.identifiers)\n frequencies[document][gender] = document.get_word_frequencies(gender.identifiers)\n relatives[document] = _get_gender_word_frequencies_relative(count[document])\n\n return count, frequencies, relatives", "def setup_score(embedding_model_name):\n global dic_score\n if not embedding_model_name in dic_score:\n dic_score[embedding_model_name] = {}\n\n dic_score[embedding_model_name][\"davies_bouldin\"] = []\n dic_score[embedding_model_name][\"calinski_harabasz\"] = []\n dic_score[embedding_model_name][\"silhouette_score\"] = []", "def create_mutual_info_dicts(self):\n res = dict(zip(self.vocab, mutual_info_classif(self.matrix, self.labels)))\n self.ig_dict = res\n self.indexes_dict = dict(zip(self.vocab, range(len(self.vocab))))", "def read_models():\n unigram = open(unigram_file, 'rb')\n bigram = open(bigram_file, 'rb')\n delete = open(del_file, 'rb')\n insert = open(ins_file, 'rb')\n subs = open(sub_file, 'rb')\n trans = open(trans_file, 'rb')\n count = open(count_file, 'rb')\n unigram_counts = cPickle.load(unigram)\n bigram_counts = cPickle.load(bigram)\n term_count = len(unigram_counts)\n del_dic = cPickle.load(delete)\n ins_dic = cPickle.load(insert)\n sub_dic = cPickle.load(subs)\n trans_dic = cPickle.load(trans)\n count_dic = cPickle.load(count)", "def create_dicts(self):\n print(\"There are \" + str(self.matrix.shape[1]) + \" features and \")\n print(str(self.matrix.shape[0]) + \" instances to consider\")\n possible_labels = list(set(self.labels))\n matricies = {}\n ig_dict = {}\n indexes_dict = {}\n sums = {}\n probabilities = {}\n total_sum = float(self.matrix.sum())\n ig_term1 = 0\n for label in possible_labels:\n row_slice = [True if val == label else False for val in self.labels]\n matricies[label] = self.matrix[row_slice, :]\n sums[label] = float(matricies[label].sum())\n probabilities[label] = max(sums[label] / total_sum, 0.00000000001)\n ig_term1 += probabilities[label] * log(probabilities[label])\n\n ig_term1 *= -1\n print(\"Calculating information gain for feature: \")\n print(\"\\r0\", end='')\n for col_index in range(len(self.vocab)):\n if col_index % 100 == 0:\n print(\"\\r\" + str(col_index), end=\"\")\n term = self.vocab[col_index]\n t_count = max(float(self.matrix[:, col_index].sum()), 0.00000000001)\n label_counts = {}\n ig_term2 = 0\n ig_term3 = 0\n p_t = float(t_count) / total_sum\n p_tbar = 1 - p_t\n for label in possible_labels:\n try:\n label_counts[label] = float(a_matrix[:, col_index].sum())\n except:\n label_counts[label] = 0.0\n p_c1_t = max(label_counts[label] / t_count, 0.00000000001)\n ig_term2 += p_c1_t * log(p_c1_t)\n p_c1_tbar = max((sums[label] - label_counts[label]) / (total_sum - t_count), 0.00000000001)\n ig_term3 += p_c1_tbar * log(p_c1_tbar)\n\n ig_term2 *= p_t\n ig_term3 *= p_tbar\n ig = ig_term1 + ig_term2 + ig_term3\n # print ig\n ig_dict[term] = ig\n indexes_dict[term] = col_index\n\n self.ig_dict = ig_dict\n self.indexes_dict = indexes_dict", "def process_data():\r\n print('Preparing data for model ready')\r\n build_vocab('train.enc')\r\n build_vocab('train.dec')\r\n token2id('train', 'enc')\r\n token2id('train', 'dec')\r\n token2id('test', 'enc')\r\n token2id('test', 'dec')", "def get_models():\n models = {}\n models['LR'] = LogisticRegression()\n models['LDA'] = LinearDiscriminantAnalysis()\n models['KNN'] = KNeighborsClassifier()\n models['CART'] = DecisionTreeClassifier()\n models['NB'] = GaussianNB()\n models['SVM'] = SVC()\n return models", "def get_dictionaries(self):\n\n model = Word2Vec.load(self.output_directory + '/word2vec')\n index2word = model.index2word\n index_dict = {}\n word_vectors = {}\n\n for word in index2word:\n index_dict[word] = index2word.index(word) + 1 # +1 to use index 0 as the unknown token or no token index\n word_vectors[word] = model[word]\n with open(self.output_directory + '/index_dict.pk', 'wb') as f:\n pickle.dump(index_dict, f)\n with open(self.output_directory + '/word_vectors.pk', 'wb') as f:\n pickle.dump(word_vectors, f)\n\n print('lenght of dictionary (voc_dim):', len(index_dict))\n return index_dict, word_vectors", "def __model_dic(self):\n model_dic = {}\n for model in self.models:\n model_dic[model.id] = IModel(model)\n return model_dic", "def init_dic(self):\n self.word_dic = {}\n self.bigram = 0 # count counts the number of bigrams for Laplace smoothing\n for i in range(len(self.corpus)):\n ch = self.corpus[i]\n if ch not in self.word_dic:\n self.word_dic[ch] = {}\n # The number of times the word appears independently\n self.word_dic[ch][ch] = 1 + self.word_dic[ch].get(ch, 0)\n if i != len(self.corpus) - 1:\n ch_next = self.corpus[i + 1]\n # Count the frequency of occurrence of the word and the following word\n self.word_dic[ch][ch_next] = 1 + self.word_dic[ch].get(ch_next, 0)\n\n for key in self.word_dic.keys():\n self.bigram += len(self.word_dic[key].keys()) - 1 # Count the total number of all bigrams", "def build_bag_of_words_model(self):\n\n lda_dictionary = Dictionary(self.cleaned_data.values())\n lda_bag_of_words = [lda_dictionary.doc2bow(c, allow_update=True) for c in self.cleaned_data.values()]\n\n return lda_dictionary, lda_bag_of_words", "def _extract_models(self, name, from_dict):\n\n # Extract all the model list\n mlist = self._obj.get(name, [])\n\n # Convert the model from dictionary to concreate\n # python class for the model.\n mlist = [from_dict(d) for d in mlist]\n\n # Dictionaries for file mappings\n mmap = {}\n\n # For each experiment, check the model is not specified by\n # a path, if it is then get the dictionary of the model\n # and insert it into the list. Replace the path reference\n # with an index\n for eobj in self._obj[\"experiment\"]:\n value = eobj.get(name)\n if value is None:\n continue\n elif isinstance(value, str):\n if value not in mmap:\n mmap[value] = len(mlist)\n mlist.append(\n from_dict(_experimentlist_from_file(value, self._directory))\n )\n eobj[name] = mmap[value]\n elif not isinstance(value, int):\n raise TypeError(\"expected int or str, got %s\" % type(value))\n\n return mlist", "def get_topic_distributions(model_info, corpus_name, subreddit_list):\r\n\t\r\n # initialize where topic counts will be stored for each model indicated in model_info\r\n model_dict = initialize_model_counters(model_info, subreddit_list)\r\n print()\r\n\r\n # iterate through each subreddit, each of its documents, and each word type in its documents to get counts.\r\n for subreddit in subreddit_list:\r\n\r\n current_time = datetime.datetime.now()\r\n print(str(current_time) + ' : starting ' + subreddit)\r\n print('--------------------')\r\n\r\n corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name)\r\n corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv')\r\n corpus = gensim.corpora.MmCorpus(corpus_fpath)\r\n\r\n for doc in corpus:\r\n if len(doc) < 25:\r\n continue\r\n\r\n # For each model, get theta for the document.\r\n model_theta_dict = {}\r\n for model_key in model_dict.keys():\r\n doc_dist_gensim = model_dict[model_key]['model'][doc]\r\n k = model_key[3]\r\n doc_dist_numpy = np.zeros(k, dtype='float64')\r\n for (topic, val) in doc_dist_gensim:\r\n doc_dist_numpy[topic] = val\r\n\r\n # now that we have this document's theta, add it to the sum.\r\n model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy\r\n\r\n # From Thompson & Blei (2018):\r\n nz_theta_d = doc_dist_numpy > 0\r\n log_theta_d = xlogy(nz_theta_d, doc_dist_numpy)\r\n\r\n model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d}\r\n\r\n # For each word type that occurs in doc, iterate through each model to make topic assignments.\r\n model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict}\r\n for (word_id, word_count) in doc:\r\n\r\n # Estimate topics for each model.\r\n for model_key in model_dict:\r\n k = model_key[3]\r\n #topic_assingments = assign_type_to_topic()\r\n\r\n # From Thompson & Blei (2018). Basically for the current word, get its\r\n # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional\r\n # vector by the corresponding elements in the document's nonzero theta vector. For each element\r\n # that is nonzero, return exponent(log phi values of the word in each topic + log theta values\r\n # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that\r\n # this will return a k-dimensional vector with or without it. The resulting distribution\r\n # provides the distribution p(topic | word) from which we can make an assignment of the token\r\n # to a topic.\r\n topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0,\r\n np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']),\r\n 0.0).ravel()\r\n\r\n # Normalize distribution p(topic | word, phi, theta):\r\n topic_dist = topic_dist / topic_dist.sum()\r\n\r\n # Draw a topic from topic_dist for however many times the word occurs in the document.\r\n topics = np.random.choice(k, size=int(word_count), p=topic_dist)\r\n\r\n for topic_i in topics:\r\n model_doc_token_topics[model_key][topic_i] += 1\r\n\r\n # now we have token-topic assingment counts for each word type present in the current document.\r\n # START HERE -->\r\n # update token-topic assignment counts\r\n for model_key in model_dict:\r\n model_doc_topic_counts = model_doc_token_topics[model_key]\r\n\r\n model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts\r\n\r\n # also make the token-topic distribution and add it to ongoing count\r\n model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum()\r\n model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist\r\n\r\n model_dict[model_key]['doc_counts'][subreddit] += 1\r\n\r\n # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated\r\n # including the average theta distribution, the distribution of token-topic assignments, & the average\r\n # token-topic document distribution.\r\n for model_key in model_dict.keys():\r\n\r\n # All token-topic assignments have been counted for this subreddit, so store those counts in\r\n # token_assignment_counts for later use and write them to file.\r\n token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'token_topic_freqs_' + subreddit + '.csv')\r\n write_token_topic_freqs(token_topic_freqs_fpath,\r\n model_dict[model_key]['token_topic_counts'][subreddit])\r\n\r\n # Find average theta distribution by dividing the summed thetas by the number of documents.\r\n avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_' + subreddit + '.txt')\r\n avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_fpath, avg_doc_topic)\r\n\r\n # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the\r\n # document distributions by the number of documents.\r\n avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'],\r\n 'avg_doc_topic_from_tokens_' + subreddit + '.txt')\r\n avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit])\r\n np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens)\r\n\r\n # topic model summary files can now be written\r\n # Topic summary file. Possible things to include:\r\n # - entropy of the topic's word distribution (what does this really tell us that is useful?)\r\n # - entropy of topic over subreddits\r\n # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the\r\n # topic words needed to account for at least 50% of the topic's word distribution.\r\n # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens\r\n # assigned to each topic.\r\n for model_key in model_dict:\r\n subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts'])\r\n\r\n phis = model_dict[model_key]['model'].get_topics()\r\n k = model_key[3]\r\n topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)]\r\n\r\n max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts'])\r\n\r\n # model_key = (training_corpus_type, sample_name, corpus_name, k)\r\n topic_summary_fpath = os.path.join(cons.lda_dir,\r\n model_key[0],\r\n model_key[1],\r\n model_key[1] + '-' + str(k),\r\n 'topics_summary.csv')\r\n write_topics_summary_file(topic_summary_fpath,\r\n subreddit_entropy_counts, subreddit_entropy_props,\r\n topic_entropies,\r\n max_subreddit_count, max_subreddit_prop,\r\n model_dict[model_key]['model'],\r\n k)", "def init_models():\n\n return {\n 'KNN': (KNeighborsClassifier(weights='uniform',\n algorithm='auto',\n p=2,\n metric='minkowski'),\n {'n_neighbors': [3, 5, 7]}),\n 'Naive-Bayes': (GaussianNB(), {'var_smoothing': np.logspace(-12, 0, 11)}),\n 'Logistic-Regression': (\n LogisticRegression(penalty='l2',\n dual=False,\n tol=1e-4,\n fit_intercept=True,\n class_weight='balanced',\n random_state=SEED,\n solver='sag', # fast for large dataset\n max_iter=10000,\n verbose=1),\n {\n 'C': np.logspace(-3, 3, 11),\n 'n_jobs': [5]\n }),\n 'SVM': (\n LinearSVC(class_weight='balanced',\n # random folds so class frequencies are unexpected\n dual=False, # n_samples > n_features\n random_state=SEED,\n max_iter=10000,\n verbose=1),\n {'C': np.logspace(-3, 3, 11)}),\n 'Random-Forest': (\n RandomForestClassifier(criterion='gini',\n bootstrap=True,\n verbose=1,\n max_depth=25,\n min_samples_split=2,\n min_samples_leaf=4,\n random_state=SEED,\n max_features='auto'),\n # will do sqrt at each split\n {\n 'n_estimators': [10, 50, 100, 500, 1000],\n 'n_jobs': [5]\n }),\n 'Neural-Network': (\n MLPClassifier(solver='adam',\n learning_rate='adaptive',\n learning_rate_init=0.001,\n max_iter=10000,\n random_state=SEED,\n verbose=True,\n activation='relu',\n early_stopping=True),\n {\n 'hidden_layer_sizes': [(size,) for size in [1, 5, 20, 80, 320, 1280]],\n 'alpha': np.logspace(-3, 3, 11),\n }),\n }", "def term_counts(self):\n self.term_counts = dict()\n\n for i in range(self.num_docs):\n # set to remove duplicates\n\n true_idx = self.shuffled_docs[i]\n text = set(normalize(tokenize(self.get_tokens(true_idx))))\n c = self.get_relation(true_idx)\n for p in range(self.partitions):\n if i < self.folds[p][\"start_index\"] or (p < self.partitions-1 and i >= self.folds[p+1][\"start_index\"]):\n for t in text:\n if t not in self.term_counts:\n self.term_counts[t] = dict()\n for cl in CLASSES:\n for par in range(self.partitions):\n self.term_counts[t][(cl, par)] = 0\n self.term_counts[t][cl] = 0\n self.term_counts[t][(c, p)] += 1\n\n # compute for whole training dataset in addition to each partition\n for t in text:\n if t not in self.term_counts:\n self.term_counts[t] = dict()\n for cl in CLASSES:\n self.term_counts[t][(cl, par)] = 0\n self.term_counts[t][cl] = 0\n\n self.term_counts[t][c] += 1", "def speaker_dict(self):\n train_dict = collections.defaultdict(list)\n if self._one_hot == True:\n tmp_label = np.argmax(self._train_label, axis=1)\n else:\n tmp_label = self._train_label\n \n for d, l in zip(self._train_data, tmp_label):\n train_dict[l].append(d) \n self._train_dict = [np.vstack(train_dict[k]) for k in range(self._n_class)]\n self._dict = True\n print('Creating dict')", "def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n sequences = test_set.get_all_sequences()\n XLenghts = test_set.get_all_Xlengths()\n\n for s in sequences:\n X, length = XLenghts[s]\n p = {}\n guess = \"\"\n for word, model in models.items():\n try:\n p[word] = model.score(X, length)\n except:\n p[word] = float('-inf')\n probabilities.append(p)\n values = list(p.values())\n keys = list(p.keys())\n guesses.append(keys[values.index(max(values))])\n\n return probabilities, guesses\n\n \"\"\"\n valid_models = {word: model for word,model in models.items() if model is not None}\n probabilities = [word_probabilities(valid_models, *test_set.get_item_Xlengths(i))\n for i,_ in enumerate(test_set.wordlist)]\n guesses = [best_guess(word_probs) for word_probs in probabilities]\n return probabilities, guesses\n\ndef word_probabilities(models, X, lengths):\n word_probs = {}\n\n for word,model in models.items():\n try:\n word_probs[word] = model.score(X, lengths)\n except ValueError: # The hmmlearn library may not be able to train or score all models.\n word_probs[word] = float('-inf')\n\n return word_probs\n\ndef best_guess(word_probs):\n return max(word_probs.keys(), key=lambda word: word_probs[word])\n \"\"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function probabilistically assigns each token to a topic from specified models and writes assignment statistics to file.
def get_topic_distributions(model_info, corpus_name, subreddit_list): # initialize where topic counts will be stored for each model indicated in model_info model_dict = initialize_model_counters(model_info, subreddit_list) print() # iterate through each subreddit, each of its documents, and each word type in its documents to get counts. for subreddit in subreddit_list: current_time = datetime.datetime.now() print(str(current_time) + ' : starting ' + subreddit) print('--------------------') corpus_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name) corpus_metadata_fpath = os.path.join(cons.corpora_dir, subreddit, corpus_name, corpus_name + '_metadata.csv') corpus = gensim.corpora.MmCorpus(corpus_fpath) for doc in corpus: if len(doc) < 25: continue # For each model, get theta for the document. model_theta_dict = {} for model_key in model_dict.keys(): doc_dist_gensim = model_dict[model_key]['model'][doc] k = model_key[3] doc_dist_numpy = np.zeros(k, dtype='float64') for (topic, val) in doc_dist_gensim: doc_dist_numpy[topic] = val # now that we have this document's theta, add it to the sum. model_dict[model_key]['doc_topic_sums'][subreddit] += doc_dist_numpy # From Thompson & Blei (2018): nz_theta_d = doc_dist_numpy > 0 log_theta_d = xlogy(nz_theta_d, doc_dist_numpy) model_theta_dict[model_key] = {'nz_theta_d': nz_theta_d, 'log_theta_d': log_theta_d} # For each word type that occurs in doc, iterate through each model to make topic assignments. model_doc_token_topics = {model_key: np.zeros(model_key[3]) for model_key in model_dict} for (word_id, word_count) in doc: # Estimate topics for each model. for model_key in model_dict: k = model_key[3] #topic_assingments = assign_type_to_topic() # From Thompson & Blei (2018). Basically for the current word, get its # probability in each topic (nz_phis.T[word_id]). Multiply each element in this k-dimensional # vector by the corresponding elements in the document's nonzero theta vector. For each element # that is nonzero, return exponent(log phi values of the word in each topic + log theta values # of the document. Otherwise, return 0. Not sure why the .ravel() at the end--it seems that # this will return a k-dimensional vector with or without it. The resulting distribution # provides the distribution p(topic | word) from which we can make an assignment of the token # to a topic. topic_dist = np.where(model_dict[model_key]['nz_phis'].T[word_id] * model_theta_dict[model_key]['nz_theta_d'] != 0, np.exp(model_dict[model_key]['log_phis'].T[word_id] + model_theta_dict[model_key]['log_theta_d']), 0.0).ravel() # Normalize distribution p(topic | word, phi, theta): topic_dist = topic_dist / topic_dist.sum() # Draw a topic from topic_dist for however many times the word occurs in the document. topics = np.random.choice(k, size=int(word_count), p=topic_dist) for topic_i in topics: model_doc_token_topics[model_key][topic_i] += 1 # now we have token-topic assingment counts for each word type present in the current document. # START HERE --> # update token-topic assignment counts for model_key in model_dict: model_doc_topic_counts = model_doc_token_topics[model_key] model_dict[model_key]['token_topic_counts'][subreddit] += model_doc_topic_counts # also make the token-topic distribution and add it to ongoing count model_doc_token_dist = model_doc_topic_counts / model_doc_topic_counts.sum() model_dict[model_key]['doc_topic_tokens_sums'][subreddit] += model_doc_token_dist model_dict[model_key]['doc_counts'][subreddit] += 1 # Now we are done with all documents in a subreddit. Summary stats for the subreddit can now be calculated # including the average theta distribution, the distribution of token-topic assignments, & the average # token-topic document distribution. for model_key in model_dict.keys(): # All token-topic assignments have been counted for this subreddit, so store those counts in # token_assignment_counts for later use and write them to file. token_topic_freqs_fpath = os.path.join(model_dict[model_key]['tdists_dir'], 'token_topic_freqs_' + subreddit + '.csv') write_token_topic_freqs(token_topic_freqs_fpath, model_dict[model_key]['token_topic_counts'][subreddit]) # Find average theta distribution by dividing the summed thetas by the number of documents. avg_doc_topic_fpath = os.path.join(model_dict[model_key]['tdists_dir'], 'avg_doc_topic_' + subreddit + '.txt') avg_doc_topic = model_dict[model_key]['doc_topic_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit]) np.savetxt(avg_doc_topic_fpath, avg_doc_topic) # Find the average topic distribution of each document from token-topic assignments by dividing the sum of the # document distributions by the number of documents. avg_doc_topic_tokens_fpath = os.path.join(model_dict[model_key]['tdists_dir'], 'avg_doc_topic_from_tokens_' + subreddit + '.txt') avg_doc_topic_from_tokens = model_dict[model_key]['doc_topic_tokens_sums'][subreddit] / float(model_dict[model_key]['doc_counts'][subreddit]) np.savetxt(avg_doc_topic_tokens_fpath, avg_doc_topic_from_tokens) # topic model summary files can now be written # Topic summary file. Possible things to include: # - entropy of the topic's word distribution (what does this really tell us that is useful?) # - entropy of topic over subreddits # - top N words & probabilities OR top words & probabilities up to some cumulative probability (eg, the # topic words needed to account for at least 50% of the topic's word distribution. # - number of tokens assigned to each subreddit. Can also do as a proportion of a subreddit's tokens # assigned to each topic. for model_key in model_dict: subreddit_entropy_counts, subreddit_entropy_props = get_subreddit_entropy(model_dict[model_key]['token_topic_counts']) phis = model_dict[model_key]['model'].get_topics() k = model_key[3] topic_entropies = [measures.entropy(phis[topic_i]) for topic_i in range(k)] max_subreddit_count, max_subreddit_prop = get_subreddits_w_max_topics(model_dict[model_key]['token_topic_counts']) # model_key = (training_corpus_type, sample_name, corpus_name, k) topic_summary_fpath = os.path.join(cons.lda_dir, model_key[0], model_key[1], model_key[1] + '-' + str(k), 'topics_summary.csv') write_topics_summary_file(topic_summary_fpath, subreddit_entropy_counts, subreddit_entropy_props, topic_entropies, max_subreddit_count, max_subreddit_prop, model_dict[model_key]['model'], k)
[ "def initialize_model_counters(model_info, subreddit_list):\r\n model_counts_dict = {}\r\n\r\n for training_corpus_type, sample_name, corpus_name, k_list in model_info:\r\n for k in k_list:\r\n model_name = sample_name + '-' + str(k)\r\n model_dir = os.path.join(cons.lda_dir, training_corpus_type, sample_name, model_name)\r\n model_tdists_dir = cons.makedir(os.path.join(model_dir, 'topic_analysis'))\r\n model_path = os.path.join(model_dir, 'model_files')\r\n\r\n lda_model = gensim.models.LdaModel.load(model_path)\r\n\r\n # Get model phi as a topic-word matrix where each row is the word distribution that constitutes the topic.\r\n phis = lda_model.get_topics()\r\n # Thompson & Blei (2018) do this, but I think phis is already normalized when calling .get_topics().\r\n phis = phis / phis.sum(axis=1, keepdims=True)\r\n # This is from Thompson & Blei (2018). Gives log of each phi value, but if phi value is zero, keeps it zero.\r\n nz_phis = phis > 0\r\n log_phis = xlogy(nz_phis, phis)\r\n\r\n model_dict = {'tdists_dir': model_tdists_dir,\r\n 'model': lda_model,\r\n 'nz_phis': nz_phis,\r\n 'log_phis': log_phis,\r\n 'token_topic_counts': {subreddit: np.zeros(k) for subreddit in subreddit_list},\r\n 'doc_topic_sums': {subreddit: np.zeros(k) for subreddit in subreddit_list},\r\n 'doc_topic_tokens_sums': {subreddit: np.zeros(k) for subreddit in subreddit_list},\r\n 'doc_counts': {subreddit: 0 for subreddit in subreddit_list}}\r\n\r\n model_counts_dict[(training_corpus_type, sample_name, corpus_name, k)] = model_dict\r\n\r\n print(training_corpus_type + '\\\\' + sample_name + '-' + str(k))\r\n return model_counts_dict", "def write_token_topic_freqs(fpath, topic_counts):\r\n\r\n topic_props = topic_counts / np.sum(topic_counts)\r\n\r\n with open(fpath, 'w', newline='') as outfile:\r\n fwriter = csv.writer(outfile)\r\n fwriter.writerow(['topic', 'token_count', 'token_proportion'])\r\n\r\n for (t_index, (t_count, t_prop)) in enumerate(zip(topic_counts, topic_props)):\r\n fwriter.writerow([t_index, t_count, t_prop])", "def nn_pf(path_to_run, model, topics, index, idx_to_docid, docid_to_doc, rel_docs=5, k=20):\n\n\n run = loadRun(path_to_run)\n for topic in run:\n passages = []\n for docid,_ in run[topic][:rel_docs]:\n passages += docid_to_doc[docid]\n\n\n encoded_passages = model.encode(passages)\n scores = {}\n labels, distances = index.knn_query(encoded_passages, k=k)\n for i in range(len(encoded_passages)):\n for docidx, dist in zip(labels[i], distances[i]):\n docid = idx_to_docid[docidx]\n if docid not in scores:\n scores[docid] = 0\n scores[docid] += 1-dist\n sorted_scores = sorted([(docidx, scores[docidx]) for docidx in scores], reverse=True, key=lambda x: x[1])\n run[topic] = sorted_scores\n return run", "def create_parallel_files(infilename, outfile_prefix, output_blank_for_failure=False):\n data = load_serialized_from_file(infilename)\n sys.stderr.write('Deserializing and processing {} graphs.'.format(len(data)))\n sys.stderr.write('Using Moses tokenization from the nltk package.\\n')\n with io.open(get_src_filename(outfile_prefix), 'w', encoding='utf8') as outfile_src, \\\n io.open(get_tgt_filename(outfile_prefix), 'w', encoding='utf8') as outfile_tgt, \\\n io.open(get_anon_filename(outfile_prefix), 'w', encoding='utf8') as outfile_anon, \\\n io.open(get_orig_filename(outfile_prefix), 'w', encoding='utf8') as outfile_orig:\n sys.stderr.write(\n 'Writing serialized graphs to {}.\\n'.format(os.path.abspath(outfile_src.name)))\n sys.stderr.write(\n 'Writing tokenized sentences to {}.\\n'.format(os.path.abspath(outfile_tgt.name)))\n sys.stderr.write(\n 'Writing anonymization map to {}.\\n'.format(os.path.abspath(outfile_anon.name)))\n sys.stderr.write(\n 'Writing original sentences to {}.\\n'.format(os.path.abspath(outfile_orig.name)))\n num_written = 0\n num_skipped = 0\n for label, penman_serialized in data:\n try:\n # treat unknowns same as named tokens so they'll be copied exactly\n penman_serialized = re.sub(r'_([^\\s]+)\\/(.*?_unknown)', r'UNK\\1 :carg \"\\1\"', penman_serialized)\n # simplify, linearize, and anonymize graphs\n linearized, anon_map = preprocess_penman(penman_serialized)\n # tokenize and anonymize sentences (assumes last comment is sentence)\n sentence = label.split('# ::snt ')[-1].strip()\n outfile_tgt.write('{}\\n'.format(preprocess_sentence(sentence, anon_map))) # modifies anon_map\n outfile_src.write('{}\\n'.format(linearized))\n # store anonymization info for use in postprocessing\n outfile_anon.write('{}\\n'.format(json.dumps(anon_map)))\n # also write original sentence, which will be compared against during eval\n outfile_orig.write('{}\\n'.format(_normalize_sentence(sentence)))\n num_written += 1\n except Exception as e:\n sys.stderr.write(\n 'Deserialization failed for {}, skipping. Error was: {}\\n'.format(label, e))\n num_skipped += 1\n if output_blank_for_failure:\n outfile_src.write('\\n')\n outfile_tgt.write('\\n')\n outfile_anon.write('[]\\n')\n outfile_orig.write('\\n')\n ratio_skipped = float(num_skipped) / num_written\n sys.stderr.write(\n 'Linearized {} graphs. Skipped {} due to deserialization errors ({}).\\n'.format(\n num_written, num_skipped, ratio_skipped))", "def process_data():\r\n print('Preparing data for model ready')\r\n build_vocab('train.enc')\r\n build_vocab('train.dec')\r\n token2id('train', 'enc')\r\n token2id('train', 'dec')\r\n token2id('test', 'enc')\r\n token2id('test', 'dec')", "def topics(fondz_dir, \n num_topics=20, \n num_top_words=15,\n num_threads=2, \n iterations=1000, \n doc_topics_threshold='0.1', \n optimize_interval=10):\n\n if not mallet:\n raise Exception(\"mallet not found on PATH\")\n\n text_dir = abspath(join(fondz_dir, \"derivatives\"))\n tmp_dir = tempfile.mkdtemp()\n data_file = join(tmp_dir, 'data.mallet')\n state_file = join(tmp_dir, 'state.gz')\n topics_file = join(tmp_dir, 'topics.txt')\n topic_keys_file = join(tmp_dir, 'topic_keys.txt')\n xml_topic_report = join(tmp_dir, 'report.xml')\n\n import_cmd = [\n mallet, \n 'import-dir', \n '--input', text_dir, \n '--output', data_file, \n '--keep-sequence', \n '--skip-html',\n '--remove-stopwords'\n ]\n logger.debug(\"running topic modeling import: %s\", import_cmd)\n run(import_cmd)\n\n train_cmd = [mallet, 'train-topics',\n '--input', data_file,\n '--output-state', state_file, \n '--num-topics', str(num_topics),\n '--num-threads', str(num_threads), \n '--num-iterations', str(iterations),\n '--doc-topics-threshold', str(doc_topics_threshold), \n '--optimize-interval', str(optimize_interval),\n '--num-top-words', str(num_top_words),\n '--output-doc-topics', topics_file,\n '--output-topic-keys', topic_keys_file\n ]\n logger.debug(\"running topic model training %s\", train_cmd)\n rc, stdout = run(train_cmd)\n\n results = summarize(text_dir, topics_file, topic_keys_file, import_cmd,\n train_cmd)\n\n shutil.rmtree(tmp_dir)\n\n return results", "def generate_topics(documents, store_path, nbr_topics=TOPIC_NBR, tfidf_on=False):\n logging.info(\"Start generating topics\")\n dictionary = corpora.Dictionary(documents)\n corpus = [dictionary.doc2bow(document) for document in documents]\n\n # Generate a tf idf model\n if tfidf_on:\n tfidf = models.TfidfModel(corpus)\n corpus = tfidf[corpus]\n topic_model = models.LdaModel(corpus, id2word=dictionary, num_topics=nbr_topics)\n\n dictionary.save(os.path.join(store_path, \"dictionary.dict\"))\n topic_model.save(os.path.join(store_path, \"model.lda\"))\n logging.info(\"Done generating topics\")", "def evaluate_models_1():\n df = prepare_general_dataset()\n X = df.loc[:, df.columns != 'bikes']\n Y = df['bikes']\n scores = []\n\n for key, value in MODELS.items():\n X = df.loc[:, df.columns != 'bikes']\n temp_scores = []\n for file in tqdm(glob.glob(value)):\n model = pd.read_csv(file)\n features = model['feature']\n\n # weights\n intercept = model['weight'].values[0]\n weights = model['weight'][1:]\n\n features_used = features.values[1:]\n X = X.filter(items=features_used)\n\n # reindex to perform series multiplication\n weights.index = X.iloc[1, :].index\n\n predictions = X.apply(lambda row: intercept + row.dot(weights), axis=1).astype('int64')\n temp_scores.append(mean_absolute_error(predictions, Y))\n\n print(f'\\nModel {key} performance:')\n print(mean(temp_scores))\n print(min(temp_scores))\n print('\\n')\n\n scores.append(temp_scores)\n\n with open('scores.txt', 'wb') as file:\n pickle.dump(scores, file)\n\n plot_scores_1(scores)", "def optimal_topic_number(filename_1, top_idf_number):\n common_texts = process_doc(filename_1, top_idf_number)\n common_dictionary = Dictionary(common_texts)\n common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]\n coherence_score = []\n for i in range(20, 40):\n lda = LdaModel(common_corpus, id2word=common_dictionary, iterations=50, num_topics=i,\n random_state=np.random.RandomState(23455))\n coherence_model_lda = CoherenceModel(model=lda, texts=common_texts, dictionary=common_dictionary,\n coherence='u_mass')\n coherence_lda = coherence_model_lda.get_coherence()\n print('\\nCoherence Score: ', coherence_lda)\n coherence_score.append(coherence_lda)\n plt.plot(range(20, 40, 1), coherence_score)\n plt.xlabel(\"Num Topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend(\"coherence_values\", loc='best')\n plt.show()", "def init(self, trainfiles):\n for filepaths in trainfiles:\n\n # load files and tokenize words in sentences\n with open(filepaths, \"r\") as text:\n sent_list = tokenize_sentence(text.read())\n\n for sentences in sent_list:\n word_list = sentence_to_word(sentences)\n\n # check unknown words\n for index, words in enumerate(word_list):\n if words not in self.token_list:\n word_list[index] = \"<UNK>\"\n\n # add word to vocab\n self.token_list.append(words)\n\n word_list.insert(0, \"<s>\")\n word_list.append(\"</s>\")\n\n for i in range(len(word_list)-1):\n self.lang_model.append((word_list[i], word_list[i+1]))\n\n for (word1, word2) in self.lang_model:\n self.bigram_dict[(word1, word2)] += 1\n self.words_dict[word1] += 1", "def perform_lda(id2word,\n doc2bow,\n corpus,\n *n_topics):\n\n summary_all = []\n sub_topics_all = []\n health_topics_all = []\n\n print(n_topics)\n\n for n in n_topics:\n print('\\nLDA with {} topics...'.format(n))\n model = lda_gensim(id2word, doc2bow, n)\n\n # Gets coherence scores for trained LDA model\n # coherence_umass = None\n # coherence_umass = coh_model_umass.get_coherence()\n # print('Coherence score (u_mass): {}'.format(coherence_umass))\n # coherence_cv = coh_model_cv.get_coherence()\n # print('Coherence score (c_v): {}'.format(coherence_cv))\n\n # Gets document topics and key terms and health topics\n top_topics_df, terms_df = get_sub_topics(model, doc2bow, corpus, n)\n\n sub_topics = top_topics_df[['subreddit', 'health', 'topic_number']]\n sub_topics.set_index(['subreddit',\n 'health',\n ], inplace=True,\n )\n sub_topics.columns = pd.MultiIndex.from_product([sub_topics.columns,\n [n],\n ])\n sub_topics_all.append(sub_topics)\n\n health_sub_topics = sub_topics.loc[sub_topics.index.get_level_values(level=1) == 1].transpose()\n health_sub_topics = health_sub_topics.droplevel(level=0,\n )\n health_sub_topics = health_sub_topics.droplevel('health',\n axis=1,\n )\n health_sub_topics.index.name = 'topic_label'\n health_topics_all.append(health_sub_topics)\n\n health_topics_top = top_topics_df.loc[top_topics_df['health'] == 1, 'topic_number'].value_counts().rename(\n 'subreddit_count')\n\n # top_health = health_topics.idxmax()\n # possible_health_subs = top_topictops_df.loc[top_topics_df['topic_number'] == top_health]\n\n # Saves results to dictionary\n summary = {\n 'n_topics': n,\n # 'coherence_score_umass': coherence_umass,\n # 'coherence_score_cv': coherence_cv,\n 'top_health_topics': health_topics_top.to_dict(),\n 'topic_terms': terms_df['terms_list'].apply(lambda x: ', '.join(x)).to_dict()\n }\n summary_all.append(summary)\n # sub_topics_file = pd.read_pickle('all_lda_topics.pkl')\n # sub_topics_all.append(sub_topics_file)\n sub_topics_all = pd.concat(sub_topics_all,\n axis=1,\n )\n sub_topics_all.to_pickle('all_lda_topics.pkl')\n health_topics_all = pd.concat(health_topics_all)\n health_topics_all.to_pickle('health_lda_topics.pkl')\n\n return summary_all, health_topics_all", "def print_topics_by_org(filename, num_topics):\n if os.path.exists(filename):\n with open(filename, \"rb\") as file_in:\n top_topics_by_org = pickle.load(file_in)\n for organization in top_topics_by_org.keys():\n print(f\"Organization: {organization}\")\n for topic, count in top_topics_by_org[organization].most_common(num_topics):\n print(f\"Topic {topic}: {count} papers\")\n print(\"---------------------\")", "def write_output(options, models):\n writer = select_writer(options.aprolog)\n map(writer, models)", "def parallel_model(trees,argsints,argsflts,windowsize):\n print(\"inside model run function\")\n import numpy as np\n store_counts_parallel = np.empty([windowsize,16,16])\n for idx in xrange(windowsize):\n treenum, sourcebr, destbr, Ne, nsnps, seed = argsints[idx,:]\n mtimerecent, mtimedistant, mrate, mut = argsflts[idx,:]\n mod = Model(tree = trees[treenum],\n admixture_edges = [(sourcebr,destbr,mtimerecent,mtimedistant,mrate)],\n Ne = Ne,\n nsnps = nsnps,\n mut = mut,\n seed = seed,\n ntests = 1)\n mod.run()\n store_counts_parallel[idx,:,:]=mod.counts\n return store_counts_parallel", "def save_training(tokens_all, file_name='training_affiliation.json'):\n training = [tag_pos_token(tokens)[:-1] for tokens in tokens_all]\n training_json = [token2json(tokens) for tokens in training]\n json.dump(training_json, open(file_name, 'w'))", "def split():\n with open(SPEECHES_FILE, 'rb') as pickle_file:\n speeches = pickle.load(pickle_file)\n\n for politician, speeches in speeches.items():\n filename = './data/{}.txt'.format(politician)\n with open(filename, 'wt', encoding='utf8') as speeches_file:\n num_char = 0\n num_words = 0\n for speech in speeches:\n # write header and text of speech to file\n session = speech['session']\n header = '# {period:} - {title:} am {published:} ({url:})\\n'.format(\n **session)\n speeches_file.write(header)\n\n # write speech\n speech_text = speech['speech'].replace('- - ', '') # parenthesis artifcat\n speeches_file.write(speech_text + '\\n\\n')\n\n # count metrics\n num_char += len(speech['speech'])\n num_words += len(speech['speech'].split())\n\n logger.info('Metrics of {}: chars: {}, words: {}'.format(\n politician, num_char, num_words))", "def main():\n #load data needed for ploting topicXtime\n print('loading data...')\n model_name = str(sys.argv[1])\n num_topics = str(sys.argv[2])\n file_name = get_document_topic_distribution(model_name,num_topics)\n topic_matrix = pd.read_csv(file_name,index_col=0)\n print('data loaded!!')\n #transform topic_matrix according to different model\n print('transforming data...')\n get_document_item_vectorize = np.vectorize(get_document_item)\n if (model_name=='Dc_v1'):\n topic_matrix['dealer'] = get_document_item_vectorize(topic_matrix.index,0)\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v2'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v3'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Dc_v4'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,2)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,1))\n if (model_name=='Tc_v1'):\n topic_matrix['dealer'] = pd.Series(list(zip(get_document_item_vectorize(topic_matrix.index,0),get_document_item_vectorize(topic_matrix.index,1)))).values\n topic_matrix.index = pd.to_datetime(get_document_item_vectorize(topic_matrix.index,2))\n \"\"\"\n #transform 0-based index to 1-based indexing for readability\n increment_topic_dict = {}\n for i in range(len(topic_matrix.columns)):\n increment_topic_dict[str(i)] = str(i+1)\n topic_matrix.rename(columns=increment_topic_dict,inplace=True)\n \"\"\"\n print('data transformed!!')\n print('creating plots...')\n dealer_df_list = list(map(lambda x: get_dealer_by_ID(topic_matrix,x,model_name),list(topic_matrix['dealer'].unique())))\n cpu_cores = multiprocessing.cpu_count() - 1\n pool = multiprocessing.Pool(cpu_cores)\n #pool.map(topicXtime_plotly_parallel,dealer_df_list)\n pool.map(topicXtime_matplotlib,dealer_df_list)\n pool.close()\n print('plots created!!')", "def create_omniglot_allcharacters_task_distribution(path_to_pkl,\n num_training_samples_per_class=10,\n num_test_samples_per_class=-1,\n num_training_classes=20,\n meta_batch_size=5):\n\n with open(path_to_pkl, 'rb') as f:\n d = pickle.load(f)\n trainX_ = d['trainX']\n trainY_ = d['trainY']\n testX_ = d['testX']\n testY_ = d['testY']\n trainX_.extend(testX_)\n trainY_.extend(testY_)\n\n global charomniglot_trainX\n global charomniglot_trainY\n global charomniglot_testX\n global charomniglot_testY\n\n cutoff = 36\n charomniglot_trainX = trainX_[:cutoff]\n charomniglot_trainY = trainY_[:cutoff]\n charomniglot_testX = trainX_[cutoff:]\n charomniglot_testY = trainY_[cutoff:]\n\n # Create a single large dataset with all characters, each for train and test, and rename the targets appropriately\n trX = []\n trY = []\n teX = []\n teY = []\n\n cur_label_start = 0\n for alphabet_i in range(len(charomniglot_trainY)):\n charomniglot_trainY[alphabet_i] += cur_label_start\n trX.extend(charomniglot_trainX[alphabet_i])\n trY.extend(charomniglot_trainY[alphabet_i])\n cur_label_start += len(set(charomniglot_trainY[alphabet_i]))\n\n cur_label_start = 0\n for alphabet_i in range(len(charomniglot_testY)):\n charomniglot_testY[alphabet_i] += cur_label_start\n teX.extend(charomniglot_testX[alphabet_i])\n teY.extend(charomniglot_testY[alphabet_i])\n cur_label_start += len(set(charomniglot_testY[alphabet_i]))\n\n trX = np.asarray(trX, dtype=np.float32) / 255.0\n trY = np.asarray(trY, dtype=np.float32)\n teX = np.asarray(teX, dtype=np.float32) / 255.0\n teY = np.asarray(teY, dtype=np.float32)\n\n charomniglot_trainX = trX\n charomniglot_testX = teX\n charomniglot_trainY = trY\n charomniglot_testY = teY\n\n print('Loaded ', len(trY), 'training classes and ', len(teY), 'test classes.')\n\n metatrain_tasks_list = [ClassificationTask(charomniglot_trainX,\n charomniglot_trainY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=-1)] # defaults to num_train / (num_train+num_test)\n metatest_tasks_list = [ClassificationTask(charomniglot_testX,\n charomniglot_testY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=-1)]\n\n metatrain_task_distribution = TaskDistribution(tasks=metatrain_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metatest_task_distribution = TaskDistribution(tasks=metatest_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n # TODO: split into validation and test!\n return metatrain_task_distribution, metatest_task_distribution, metatest_task_distribution", "def recognize(models: dict, test_set: SinglesData):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n probabilities = []\n guesses = []\n\n all_sequences = test_set.get_all_sequences()\n all_Xlenghts = test_set.get_all_Xlengths()\n\n print('Started recognizing ...')\n\n for i, test_word in zip( range(0,len(all_sequences) ), test_set.wordlist):\n\n bestLogL = float(\"-inf\")\n bestWord = ''\n\n myProbs = {}\n \n for word in models.keys():\n\n model = models[word]\n\n try: \n\n logL = model.score(all_sequences[i][0],all_Xlenghts[i][1] )\n\n if logL > bestLogL:\n bestLogL = logL\n bestWord = word\n\n except Exception:\n pass\n \n myProbs[word] = logL \n \n \n guesses.append(bestWord)\n probabilities.append(myProbs)\n \n print('Finished analyzing {} words '.format(len(all_sequences)))\n\n return probabilities, guesses" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a units_dict from the environment instance if the numerical value of 'factor' is a match for a derived unit defined in the environment instance and the dimensions stored in the units_dict are equal to 'dims'. Returns an empty dict, otherwise.
def _get_units_by_factor( factor: float, dims: Dimensions, units_env: Callable, power: Union[int, float] ) -> dict: ## TODO Write a pow() to handle fractions and rationals new_factor = fraction_pow(factor, -Fraction(1 / power)) units_match = _match_factors(new_factor, units_env()) try: units_name = tuple(units_match.keys())[0] except IndexError: units_name = "" retrieved_dims = units_match.get(units_name, dict()).get("Dimension", dict()) if dims != retrieved_dims: return dict() return units_match
[ "def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]:\n derived = units_env()[\"derived\"]\n defined = units_env()[\"defined\"]\n all_units = ChainMap(defined, derived)\n potential_inv = None # A flag to catch a -1 value (an inversion)\n quotient = None\n quotient_result = None\n for dimension_key in all_units.keys():\n if _check_dims_parallel(dimension_key, dimensions):\n quotient = cache_vec_divide(dimensions, dimension_key, ignore_zeros=True)\n mean = cache_vec_mean(quotient, ignore_empty=True)\n if mean == -1:\n potential_inv = quotient\n elif -1 < mean < 1:\n return (\n None # Ignore parallel dimensions if they are fractional dimensions\n )\n else:\n quotient_result = quotient\n return quotient_result or potential_inv # Inversion ok, if only option", "def get_dimensions(self, units):\n return self.id.to(units), self.od.to(units)", "def utility_characterization_factors(self) -> dict[tuple[str, str], tuple[float, AbsoluteUnitsOfMeasure]]:\n return bst.HeatUtility.characterization_factors", "def units_dict(cls, module_name: str) -> Dict[str, str]:\n return _UNITS.setdefault(module_name, dict())", "def _unit_dict(suffix):\n\n matched_unit = _get_unit(suffix)\n \n return {\n 'f' + matched_unit : 1e-15,\n 'p' + matched_unit : 1e-12,\n 'n' + matched_unit : 1e-9,\n 'u' + matched_unit : 1e-6,\n 'm' + matched_unit : 1e-3,\n '' + matched_unit : 1e0,\n 'k' + matched_unit : 1e3,\n 'M' + matched_unit : 1e6\n }", "def _dimensions_check(self, element):\n if not (\"dimensions\" in self.attributes and \"dimensions\" in element.attributes):\n return True\n elif \"dimensions\" in self.attributes and \"dimensions\" in element.attributes:\n #The dimension text has to match perfectly. If variables names are specified\n #for bounds, we have no way of knowing whether the sizes are the same before\n #runtime. However, we can do some cleanup befor comparing.\n match = True\n selfdim = self.attributes[\"dimensions\"].lower().split(\",\")\n eldim = element.attributes[\"dimensions\"].lower().split(\",\")\n\n i = 0\n #We only need to compare dimensions until one fails\n while match and i < len(selfdim):\n if selfdim[i].strip() != eldim[i].strip():\n match = False\n i += 1\n \n return match\n else:\n return False", "def canonical_units(self):\n return {k: self.units[k].canonical_unit for k in self.units.keys()}", "def _get_required_units_and_dtype(key):\n try:\n unit = DEFAULT_UNITS[key][\"unit\"]\n except KeyError:\n # hold the error and check for valid substrings\n key = _find_dict_key(key)\n unit = DEFAULT_UNITS[key][\"unit\"]\n\n try:\n dtype = DEFAULT_UNITS[key][\"dtype\"]\n except KeyError:\n dtype = np.float32\n\n return unit, dtype", "def getdimensionunits(self, dname, vname=None):\n x = self.dimensionobject(dname, vname)\n return x.units", "def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]:\n quotient_1 = _dims_quotient(dims, units_env)\n quotient_2 = _dims_basis_multiple(dims)\n quotient_1_mean = None\n if quotient_1 is not None:\n quotient_1_mean = cache_vec_mean(quotient_1, ignore_empty=True)\n\n if quotient_1 is not None and quotient_1_mean != -1:\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_1_mean == -1 and quotient_2 is not None: # Situations like Hz and s\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n elif quotient_1_mean == -1: # Now we can proceed with an inverse unit\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_2 is not None:\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n else:\n return (1, dims)", "def check_units(self, ds):\n ret_val = []\n\n deprecated = ['level', 'layer', 'sigma_level']\n\n for k, v in ds.dataset.variables.iteritems():\n\n # skip climatological vars, boundary vars\n if v in self._find_clim_vars(ds) or \\\n v in self._find_boundary_vars(ds).itervalues() or \\\n v.shape == ():\n continue\n\n # skip string type vars\n if v.dtype.char == 'S':\n continue\n\n units = getattr(v, 'units', None)\n\n # 1) \"units\" attribute must be present\n presence = Result(BaseCheck.HIGH, units is not None, ('units', k, 'present'))\n if not presence.value:\n presence.msgs = ['units attribute required']\n ret_val.append(presence)\n continue\n\n # 2) units attribute must be a string\n astring = Result(BaseCheck.HIGH, isinstance(units, basestring), ('units', k, 'string'))\n if not astring.value:\n astring.msgs = [\"units not a string (%s)\" % type(units)]\n ret_val.append(astring)\n continue\n\n # now, units are present and string\n # 3) units are not deprecated\n resdeprecated = Result(BaseCheck.LOW, not units in deprecated, ('units', k, 'deprecated'))\n if not resdeprecated.value:\n resdeprecated.msgs = ['units (%s) is deprecated' % units]\n ret_val.append(resdeprecated)\n continue\n\n # 4) units are known\n\n knownu = Result(BaseCheck.HIGH, units_known(units), ('units', k, 'known'))\n if not knownu.value:\n knownu.msgs = ['unknown units type (%s)' % units]\n ret_val.append(knownu)\n #continue\n # units look ok so far, check against standard name / cell methods\n std_name = getattr(v, 'standard_name', None)\n std_name_modifier = None\n\n if isinstance(std_name, basestring):\n if ' ' in std_name:\n std_name, std_name_modifier = std_name.split(' ', 1)\n\n\n # if no standard name or cell_methods, nothing left to do\n if std_name is None and not hasattr(v, 'cell_methods'):\n #ret_val.append(Result(BaseCheck.HIGH, True, ('units', k, 'ok')))\n continue\n\n # 5) if a known std_name, use the units provided\n if std_name is not None and std_name in self._std_names:\n\n std_units = self._std_names[std_name].canonical_units\n\n #@TODO modifiers changes units\n msgs = []\n valid = True\n if units is not None:\n if units.split(\" \")[0] in ['day', 'days', 'd', 'hour', 'hours', 'hr', 'hrs', 'h', 'year', 'years', 'minute', 'minutes', 'm', 'min', 'mins', 'second', 'seconds', 's', 'sec', 'secs']:\n if len(units.split(\" \"))>1:\n if units.split(\" \")[1] == 'since':\n std_units = units\n else:\n std_units = units\n \n if std_units == 'm' and units in ['meter', 'meters']:\n std_units = units\n \n if units != std_units and units not in ['degrees_north', 'degree_N', 'degreeN', 'degreesN', 'degrees_east', 'degree_E', 'degreeE', 'degreesE'] and not units_convertible(units, std_units):\n msgs = ['units are %s, standard_name units should be %s' % (units, std_units)]\n valid = False\n else:\n valid = False\n msgs = ['The unit for variable %s in of type None.'%name]\n\n ret_val.append(Result(BaseCheck.HIGH, valid, ('units', k, 'standard_name'), msgs))\n\n # 6) cell methods @TODO -> Isnt this in the check_cell_methods section?\n #if hasattr(v, 'cell_methods'):\n # cell_methods = v.cell_methods\n#\n # # placemarker for future check\n # ret_val.append(Result(BaseCheck.HIGH, False, ('units', k, 'cell_methods'), ['TODO: implement cell_methods check']))\n\n return ret_val", "def _build_cache(self):\n self._dimensional_equivalents = dict()\n\n deps = dict((name, set(definition.reference.keys() if definition.reference else {}))\n for name, definition in self._units.items())\n\n for unit_names in solve_dependencies(deps):\n for unit_name in unit_names:\n if '[' in unit_name:\n continue\n parsed_names = tuple(self.parse_unit_name(unit_name))\n _prefix = None\n if parsed_names:\n _prefix, base_name, _suffix = parsed_names[0]\n else:\n base_name = unit_name\n prefixed = True if _prefix else False\n try:\n uc = ParserHelper.from_word(base_name)\n\n bu = self._get_root_units(uc)\n di = self._get_dimensionality(uc)\n\n self._root_units_cache[uc] = bu\n self._dimensionality_cache[uc] = di\n\n if not prefixed:\n if di not in self._dimensional_equivalents:\n self._dimensional_equivalents[di] = set()\n\n self._dimensional_equivalents[di].add(self._units[base_name]._name)\n\n except Exception as e:\n logger.warning('Could not resolve {0}: {1!r}'.format(unit_name, e))", "def construct_ingredient_dict(self, scale_factor):\n ingredient_dict = {}\n for item in self.ingredients_list:\n quantity_string = \"\"\n item_name_string = \"\"\n for token in item.split(' '):\n if token in Recipe.measurement_set or Recipe.is_int(token):\n if Recipe.is_int(token):\n token = str(int(token) * scale_factor)\n quantity_string += token + ' '\n else:\n item_name_string += token + ' '\n ingredient_dict[item_name_string.strip()] = quantity_string.strip()\n return ingredient_dict", "def _compare_units(dic):\n # the lookup variable is a list of list, each list contains all the\n # possible name for a units\n lookup = [['meter', 'metre'], ['meters', 'metres'], ['kilometer',\n 'kilometre'], ['kilometers', 'kilometres']]\n for l in lookup:\n for n in range(len(dic['unit'])):\n if dic['unit'][n].lower() in l:\n dic['unit'][n] = l[0]\n for n in range(len(dic['units'])):\n if dic['units'][n].lower() in l:\n dic['units'][n] = l[0]\n return dic", "def units(self) -> int:\n units_mapping = dict(\n system=shared_enum.UnitSystem,\n flow=shared_enum.FlowUnits,\n pollutant=shared_enum.ConcUnits,\n )\n return {attribute: units_mapping[attribute](unit).name for unit, attribute in zip(\n output.get_units(self.handle),\n units_mapping.keys())}", "def _update_dimensions(self, dimensions):\n if self._dimensions:\n new_dimensions = self._dimensions.copy()\n else:\n new_dimensions = {}\n if dimensions:\n new_dimensions.update(dimensions)\n\n return new_dimensions", "def _handle_parameter_units(model, fitted_parameters_cube, param_units):\n\n fitted_parameters_dict = {}\n\n for index in range(len(model.parameters)):\n key = model.param_names[index]\n _ary = fitted_parameters_cube[index, :, :]\n fitted_parameters_dict[key] = u.Quantity(_ary, param_units[index])\n\n return fitted_parameters_dict", "def active_observation_shape(self):\r\n if not isinstance(self._env.observation_space, spaces.Dict):\r\n return super(GymAdapter, self).active_observation_shape\r\n\r\n observation_keys = (\r\n self.observation_keys\r\n or list(self._env.observation_space.spaces.keys()))\r\n\r\n active_size = sum(\r\n np.prod(self._env.observation_space.spaces[key].shape)\r\n for key in observation_keys)\r\n\r\n active_observation_shape = (active_size, )\r\n\r\n return active_observation_shape", "def test_environment_specs_roundtrip(self):\n # Each spec has a different shape, type and name\n observation_spec = specs.Array((1, 2, 3), np.float32, 'spec1')\n action_spec = specs.Array((4, 5), np.float64, 'spec2')\n reward_spec = specs.Array((1,), np.int32, 'spec3')\n discount_spec = specs.Array((2,), np.int64, 'spec4')\n\n env = CustomSpecsEnvironment(observation_spec, action_spec, reward_spec,\n discount_spec)\n\n env_specs = spec_codec.encode_environment_specs(env)\n\n decoded_specs = spec_codec.decode_environment_specs(env_specs)\n self.assertEqual(decoded_specs['observation_spec'], observation_spec)\n self.assertEqual(decoded_specs['action_spec'], action_spec)\n self.assertEqual(decoded_specs['reward_spec'], reward_spec)\n self.assertEqual(decoded_specs['discount_spec'], discount_spec)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Part of the __str__ and __repr__ process. Returns a string representing the SI unit components of the Physical instance extracted from the list of tuples, 'unit_components', using 'repr_format' as given by the _repr_x_ function it was called by. If 'repr_format' is not given, then terminal output is assumed.
def _get_unit_string(unit_components: list, repr_format: str) -> str: dot_operator = "·" # new: · , # old: ⋅ pre_super = "" post_super = "" pre_symbol = "" post_symbol = "" if repr_format == "html": dot_operator = "&#8901;" pre_super = "<sup>" post_super = "</sup>" elif repr_format == "latex": dot_operator = " \\cdot " pre_symbol = "\\mathrm{" post_symbol = "}" pre_super = "^{" post_super = "}" str_components = [] kg_only = "" for symbol, exponent in unit_components: if exponent: kg_only = symbol if exponent == 1: this_component = f"{pre_symbol}{symbol}{post_symbol}" else: if not repr_format: exponent = _get_superscript_string(str(exponent)) this_component = ( f"{pre_symbol}{symbol}{post_symbol}" f"{pre_super}{exponent}{post_super}" ) str_components.append(this_component) if kg_only == "kg": # Hack for lone special case of a kg only Physical return dot_operator.join(str_components).replace("kg", "g") return dot_operator.join(str_components)
[ "def __str__(self):\n return unit_format.Generic.to_string(self)", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def __str__(self):\n return str(self.unitName + ' (' + self.hexLocation + ')')", "def __str__(self):\r\n return str(self.value) + ' ' + self.units", "def get_str(self, unit=None, **vargsd):\n # logging.critical(f'{unit}, {vargsd}')\n if unit is None:\n unit = self._displayUnit\n value = self.convert2unit(self._value, unit)\n if self._str_quantization is None:\n ret = qnt.quant(value, rettype='string', **DEFAULT_STR_QUANTIZATION)\n else:\n ret = qnt.quant(value, rettype='string', **self._str_quantization)\n if not vargsd.get('alignment', True):\n ret = ret.strip() #IGNORE:E1103\n if vargsd.get('withUnit', True):\n if vargsd.get('alignment', True):\n ret = '%s %-7s' % (ret, unit)\n if vargsd.get('withQuantity', True):\n ret = '%s (%s)' % (ret, self.__class__.__name__)\n else:\n ret = '%s %s' % (ret, unit)\n if vargsd.get('withQuantity', True):\n ret = '%s (%s)' % (ret, self.__class__.__name__)\n return ret", "def units_html_list():\n from IPython.display import HTML\n table = \"<table>\"\n table += \"<tr><th>Name</th><th>Base Unit</th><th>Quantity</th></tr>\"\n for name in unit_table:\n unit = unit_table[name]\n if isinstance(unit, PhysicalUnit):\n if unit.prefixed is False:\n if isinstance(unit.baseunit, PhysicalUnit):\n baseunit = '$ %s $' % unit.baseunit\n else:\n baseunit = '$ %s $' % _pretty(unit.baseunit.name)\n table += \"<tr><td>\" + unit.name + '</td><td>' + baseunit + \\\n '</td><td><a href=\"' + unit.url + '\" target=\"_blank\">' + unit.verbosename + \\\n '</a></td></tr>'\n table += \"</table>\"\n return HTML(table)", "def Units(self) -> str:", "def cits_unit_Object(self, unit=None):\n this_unit = \" \\0\"\n if unit != None:\n this_unit = unit + \"\\0\"\n brickByte = self.convStringToByte(\"unitstr\\0\")\n brickByte += self.convCharToByte(\"s\")\n brickByte += self.convStringToByte(this_unit)\n return brickByte, len(brickByte)", "def __str__(self):\n s = \"{0:15s} {1:30s}\".format(self.type, self.name)\n if (self.quantity):\n s += \" {0:10s}\".format(str(self.quantity))\n if (self.pct):\n s += \" ({0:5.1f}%)\".format(self.pct)\n if (len(self.properties) > 0):\n prop_strs = []\n for e in sorted(self.properties.keys()):\n prop_strs.append(self.properties[e].short_str())\n s += \" (\" + \", \".join(prop_strs) + \")\"\n return s", "def __str__(self):\n import abjad\n items = [str(_) for _ in self]\n separator = ' '\n if self.item_class is abjad.NumberedPitchClass:\n separator = ', '\n return 'PC<{}>'.format(separator.join(items))", "def __repr__(self) -> str:\r\n string = f\"Material = {self.mat}\\n\\n\"\r\n string += f\"temperature = {self.temperature}\\npressure = {self.pressure}\\n\"\r\n return string", "def __display_unit_name(self, amount: float, unit: str) -> str:\n unit = self.__pluralize_unit(unit) if amount != 1 else unit\n return self.display_names.get(unit, unit)", "def __str__(self):\n if self.__orientation :\n ori = \"vertically\"\n else :\n ori = \"horizontally\"\n return \"Vehicle {} of size {} and positioned {}.\".format(self.__name, self.__size, ori)", "def test_Units_formatted(self):\n u = Units(\"W\")\n self.assertEqual(u.units, \"W\")\n self.assertEqual(u.formatted(names=True), \"watt\")\n self.assertEqual(u.formatted(definition=True), \"m2.kg.s-3\")\n self.assertEqual(\n u.formatted(names=True, definition=True),\n \"meter^2-kilogram-second^-3\",\n )\n self.assertEqual(u.formatted(), \"W\")\n\n u = Units(\"tsp\")\n self.assertEqual(u.formatted(names=True), \"4.928921875e-06 meter^3\")\n u = Units(\"tsp\", names=True)\n self.assertEqual(u.units, \"4.928921875e-06 meter^3\")\n\n u = Units(\"m/s\", formatted=True)\n self.assertEqual(u.units, \"m.s-1\")\n\n u = Units(\"Watt\", formatted=True)\n self.assertEqual(u.units, \"W\")\n u = Units(\"Watt\", names=True)\n self.assertEqual(u.units, \"watt\")\n u = Units(\"Watt\", definition=True)\n self.assertEqual(u.units, \"m2.kg.s-3\")\n u = Units(\"Watt\", names=True, definition=True)\n self.assertEqual(u.units, \"meter^2-kilogram-second^-3\")\n\n u = Units(\"days since 1900-1-1 03:05\", names=True)\n self.assertEqual(u.units, \"day since 1900-01-01 03:05:00\")\n u = Units(\"days since 1900-1-1 03:05\", formatted=True)\n self.assertEqual(u.units, \"d since 1900-01-01 03:05:00\")\n u = Units(\"days since 1900-1-1 03:05\")\n self.assertEqual(u.formatted(), \"d since 1900-01-01 03:05:00\")\n\n u = Units(\"hours since 2100-1-1\", calendar=\"noleap\", names=True)\n self.assertEqual(u.units, \"hour since 2100-01-01 00:00:00\")\n u = Units(\"hours since 2100-1-1\", calendar=\"noleap\", formatted=True)\n self.assertEqual(u.units, \"h since 2100-01-01 00:00:00\")\n u = Units(\"hours since 2100-1-1\", calendar=\"noleap\")\n self.assertEqual(u.formatted(), \"h since 2100-01-01 00:00:00\")", "def __repr__(self):\n\n if not isinstance(self.value, dict):\n return super().__repr__()\n\n values = \",\".join([f\"{key}={value:.6f}\" for key, value in self.value.items()])\n return f\"{self.name}_meter({values})\"", "def __str__(self):\r\n representation_string = '{:^5}\\t{:^20}\\n\\n'.format('S. No.', 'Disk Library')\r\n\r\n for index, library in enumerate(self._libraries):\r\n sub_str = '{:^5}\\t{:20}\\n'.format(index + 1, library)\r\n representation_string += sub_str\r\n\r\n return representation_string.strip()", "def formatInternalValue(self, *args) -> \"std::string\" :\n return _core.UnitsManager_formatInternalValue(self, *args)", "def formatted_result(self) -> str:\n units = self.__display_unit_name(self.to_amount, self.to_unit)\n return f\"{self.__format_float(self.to_amount)} {units}\"", "def render(calculator) -> str:\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns 'symbol' formatted appropriately for the 'repr_format' output.
def _format_symbol(prefix: str, symbol: str, repr_format: str = "") -> str: # if r"\text" or "^" in symbol: # in case pre-formatted latex from unit_string # return symbol symbol_string_open = "" symbol_string_close = "" dot_operator = "·" ohm = "Ω" if repr_format == "html": dot_operator = "&#8901;" ohm = "&#0937;" elif repr_format == "latex": dot_operator = " \\cdot " ohm = "$\\Omega$" symbol_string_open = "\\mathrm{" symbol_string_close = "}" symbol = ( symbol.replace("·", symbol_string_close + dot_operator + symbol_string_open) .replace("*", symbol_string_close + dot_operator + symbol_string_open) .replace("Ω", ohm) ) formatted_symbol = f"{symbol_string_open}{prefix}{symbol}{symbol_string_close}" if symbol.startswith( "\\mathrm{" ): # special case for 'single dimension' Physicals... formatted_symbol = f"{symbol[0:8]}{prefix}{symbol[8:]}" return formatted_symbol
[ "def __repr__(self):\r\n return \"<Symbol({0}, {1})>\".format(self.id, self.language.name)", "def symbol(self):\n\t\tif self.rot == \"x\":\n\t\t\treturn self.symbol_tx\n\t\telif self.rot == \"z\":\n\t\t\treturn self.symbol_tz\n\t\telse:\n\t\t\treturn self.symbol_ty", "def priceToString(price, symbol=\"symbol\", position=\"after\"):", "def name(self):\n return self._symbol.name", "def __repr__(self):\n self.strg = replace_sym(self.strg, '___', self.frame+3)\n return replace_sym(self.strg, self.player.symbol, self.frame)", "def currency_symbol():\n if settings.CONVERT_XPF:\n return u\"XPF\"\n return get_format('CURRENCY_SYMBOL')", "def _symbol(label, flags, taillen=1):\n tail = \"\\n\" if (flags & ISLEAF) else \"-\" * taillen\n isonlychild = (flags & ISFIRST) and (flags & ISLAST)\n if isonlychild:\n stem = \"-\"\n elif flags & ISFIRST:\n stem = \"+\"\n elif flags & ISLAST:\n stem = \"`\"\n else:\n stem = \"|\"\n return \"%s-%s%s\" % (stem, label, tail)", "def __repr__(self):\n k = self._k\n if k == 1:\n kth = 'First'\n elif k == 2:\n kth = 'Second'\n elif k == 3:\n kth = 'Third'\n else:\n kth = '%s-th'%k\n return \"%s derivative of %s\"%(kth, self._lseries)", "def latex(self, rate = False):\n return \"{}: {} {} {}\".format(sp.latex(self.reactionid),\n sp.latex(self.reactant.symp()),\n str(\"\\\\xrightarrow{\" + sp.latex(self.rate if rate else self.kinetic_param) + \"}\") if self.rate else str(\"\\\\rightarrow\"),\n sp.latex(self.product.symp()))", "def quote_table_name(self, symbol: [str, Symbol]):\n if isinstance(symbol, Symbol):\n name = symbol.name\n else:\n name = symbol\n return SYMBOL_PREFIX+name+TICK_SUFFIX", "def getSymbol(id):", "def _sympy_(self) -> SympyFormat:\n return self.as_sympy()", "def symbol_name_or_value(val):\n if isinstance(val, symbol):\n return val.name\n return str(val)", "def label(self) -> str:\n label = self.expression.replace(\"_\", \"\\\\;\")\n if self.units_kind:\n symbol = wt_units.get_symbol(self.units)\n if symbol is not None:\n for v in self.variables:\n vl = \"%s_{%s}\" % (symbol, v.label)\n vl = vl.replace(\"_{}\", \"\") # label can be empty, no empty subscripts\n label = label.replace(v.natural_name, vl)\n val = round(self.value, self.round_spec) if self.round_spec is not None else self.value\n label += r\"\\,=\\,{}\".format(format(val, self.format_spec))\n if self.units_kind:\n label += rf\"\\,{wt_units.ureg.Unit(self.units):~}\"\n label = r\"$\\mathsf{%s}$\" % label\n return label", "def _repr_latex_(self):\n return unit_format.Latex.to_string(self)", "def _repr_latex_(self):\n return f\"${self._reprlatex}$\"", "def atomicNumberToSymbol(number):\n\tif(number==1):\n\t\treturn 'H'\n\telif(number==6):\n\t\treturn 'C'\n\telif(number==8):\n\t\treturn 'O'\n\telif(number==7):\n\t\treturn 'N'\n\telif(number==16):\n\t\treturn 'S'\n\telif(number==14):\n\t\treturn 'Si'\n\telse:\n\t\treturn 'Err'", "def __repr__(self):\n return f'<Stock:{self.stock_id},{self.stock_name}>'", "def symbol(self) -> Optional[str]:\n if (\n self.current_command_type == Command.A\n or self.current_command_type == Command.L\n ):\n return self._symbol" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number in 'power' as a formatted exponent for text display.
def _format_exponent(power: Union[int, float], repr_format: str = "", eps=1e-7) -> str: if power == 1: return "" if abs((abs(power) - round(abs(power)))) <= eps: power = int(round(power)) exponent = str(power) if not repr_format: exponent = _get_superscript_string(exponent) return exponent
[ "def power(number, exp=2):\n return number ** exp", "def exponent(a, b):\n result_exp = round(a ** b, 4)\n print(\"The result of \" + str(a) + \" raised to the \" + str(b) + \" is \" + str(result_exp))\n return str(a) + \" ** \" + str(b) + \" = \" + str(result_exp)", "def pow_to_mul_string(expr):\n pows = list(expr.atoms(Pow))\n if any(not e.is_Integer for b, e in (i.as_base_exp() for i in pows)):\n raise ValueError(\"A power contains a non-integer exponent\")\n s = str(expr)\n repl = zip(pows, (Mul(* [b] * e, evaluate=False)\n for b, e in (i.as_base_exp() for i in pows)))\n for fr, to in repl:\n s = s.replace(str(fr), str(to))\n return s", "def to_latex(value : Union[int, float]) -> str:\n fstr = f\"{value:.2E}\"\n val, power = fstr[:4], fstr[-1]\n return fr\"${val}\\,\\times\\,10^{int(power)}$\"", "def get_power(self):\r\n return self._power", "def get_power(self):\n return self.power_total", "def eisen_pow(base, exponent):\n result = Eisen(1,0)\n for i in range(exponent):\n result *= base\n return result", "def latex_exp(f):\n str = \"%.1e\" % f\n mantissa, exponent = str.split(\"e\")\n return r'%.1f \\times 10^{%d}' % (float(mantissa), int(exponent))", "def exponentiate(self, base, exponent):\n result = float (base) ** float (exponent)\n return result", "def pretty_exponent(string):\n\n # TODO: to be improved...\n\n def make_exp(string):\n # There must be a better way...\n replace_table = ('0⁰', '1¹', '2²', '3³', '4⁴', '5⁵', '6⁶', '7⁷', '8⁸', '9⁹')\n for sub in replace_table:\n string = string.replace(sub[0], sub[1])\n return string\n\n number_exp = re.compile('\\^[0-9]*')\n matches = number_exp.findall(string)\n\n for match in matches:\n string = string.replace(match, make_exp(match[1:]))\n\n return string", "def power(self):\n return self.curr * self.emf", "def getTerminalPower(self):\n return float(self.instr.query(\"MEAS:POW?\"))", "def format_scientific(self, number):\n return __format_obj().scientific(number)", "def getTerminalPower(self):\n return float(self.query(\"MEAS:POW?\"))", "def to_unicode(self) -> str:\n if self.is_zero: return \"0\"\n elif self.is_unknown: return \"Unknown\"\n f = self.floatfactor\n for node in self.phasenodes:\n f *= 1+cexp(node)\n phase = Fraction(self.phase)\n if self.phase >= 1:\n f *= -1\n phase -= 1\n\n if abs(f+1) > 0.001 and abs(f-1) > 0.001:\n return str(f)\n\n s = \"\"\n if abs(f+1) < 0.001: #f \\approx -1\n s += \"-\"\n if self.power2 != 0:\n s += r\"√2\"\n if self.power2 < 0:\n s += \"⁻\"\n val = str(abs(self.power2))\n s += \"\".join([unicode_superscript[i] for i in val])\n if phase != 0:\n s += \"exp(i\"\n if phase in unicode_fractions:\n s += unicode_fractions[phase] + \"π)\"\n else:\n s += \"{:d}/{:d}π)\".format(phase.numerator,phase.denominator)\n return s", "def _pow(phase):\n pow = int(np.round(phase * _pow_scale))\n return b\",\".join(map(_ascii_numerals, _2_bytes.pack(pow)))", "def two_pow(pow):\n\treturn 2**pow", "def latex_monomial(exponent, coef, var):\n if exponent == 0:\n return str(coef)\n if coef == 1:\n coef = ''\n if coef == -1:\n coef = '-'\n if exponent == 1:\n return f'{coef}{var}'\n return f'{coef}{var}^{{{exponent}}}'", "def __pow__(self, power):\n value = power * (self.val) ** (power - 1)\n der = {k: value * v for k, v in self.der.items()}\n return AutoDiff(self.var, self.val ** power, der)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns an integer value that represents the exponent of a unit if the dimensions array is a multiple of one of the defined derived units in dimension_keys. Returns None, otherwise. e.g. a force would have dimensions = [1,1,2,0,0,0,0] so a Physical object that had dimensions = [2,2,4,0,0,0,0] would really be a force to the power of 2. This function returns the 2, stating that `dims` is the second power of a derived dimension in `units_env`.
def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]: quotient_1 = _dims_quotient(dims, units_env) quotient_2 = _dims_basis_multiple(dims) quotient_1_mean = None if quotient_1 is not None: quotient_1_mean = cache_vec_mean(quotient_1, ignore_empty=True) if quotient_1 is not None and quotient_1_mean != -1: power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True) base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True) return ((power_of_derived or 1), base_dimensions) elif quotient_1_mean == -1 and quotient_2 is not None: # Situations like Hz and s power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True) base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True) return ((power_of_basis or 1), base_dimensions) elif quotient_1_mean == -1: # Now we can proceed with an inverse unit power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True) base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True) return ((power_of_derived or 1), base_dimensions) elif quotient_2 is not None: power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True) base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True) return ((power_of_basis or 1), base_dimensions) else: return (1, dims)
[ "def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]:\n derived = units_env()[\"derived\"]\n defined = units_env()[\"defined\"]\n all_units = ChainMap(defined, derived)\n potential_inv = None # A flag to catch a -1 value (an inversion)\n quotient = None\n quotient_result = None\n for dimension_key in all_units.keys():\n if _check_dims_parallel(dimension_key, dimensions):\n quotient = cache_vec_divide(dimensions, dimension_key, ignore_zeros=True)\n mean = cache_vec_mean(quotient, ignore_empty=True)\n if mean == -1:\n potential_inv = quotient\n elif -1 < mean < 1:\n return (\n None # Ignore parallel dimensions if they are fractional dimensions\n )\n else:\n quotient_result = quotient\n return quotient_result or potential_inv # Inversion ok, if only option", "def get_key_from_dimensions(derived):\n\n return tuple((i[\"base\"], i[\"power\"]) for i in derived)", "def _dims_basis_multiple(dims: Dimensions) -> Optional[Dimensions]:\n count = 0\n for dim in dims:\n if dim:\n count += 1\n if count > 1:\n return None\n return dims", "def getDimension(self, unit: 'int const'=0) -> \"int32_t\":\n return _coin.SoMultiTextureCoordinateElement_getDimension(self, unit)", "def searchDimension(self):\n\t\t\t\treturn pow(2, self._ppmResolution-1)", "def dimension(self, monomial_ideal):\n frobby_input = self._ideal_to_string(monomial_ideal)\n frobby_output = self('dimension', input=frobby_input)\n return int(frobby_output)", "def get_dimensions(self, units):\n return self.id.to(units), self.od.to(units)", "def _get_units_by_factor(\n factor: float, dims: Dimensions, units_env: Callable, power: Union[int, float]\n) -> dict:\n ## TODO Write a pow() to handle fractions and rationals\n new_factor = fraction_pow(factor, -Fraction(1 / power))\n units_match = _match_factors(new_factor, units_env())\n try:\n units_name = tuple(units_match.keys())[0]\n except IndexError:\n units_name = \"\"\n retrieved_dims = units_match.get(units_name, dict()).get(\"Dimension\", dict())\n if dims != retrieved_dims:\n return dict()\n return units_match", "def get_topology_dims(comm, ndims):\n return mpi().Compute_dims(comm.size, ndims)", "def get_dim(self, key):\n return self.dim.get(key, None)", "def get_dimension(self, dimension, default=None, strict=False):\n all_dims = self.dimensions()\n if isinstance(dimension, Dimension):\n dimension = dimension.name\n if isinstance(dimension, int):\n if 0 <= dimension < len(all_dims):\n return all_dims[dimension]\n elif strict:\n raise KeyError(\"Dimension %s not found\" % dimension)\n else:\n return default\n name_map = {dim.name: dim for dim in all_dims}\n if strict and dimension not in name_map:\n raise KeyError(\"Dimension %s not found\" % dimension)\n else:\n return name_map.get(dimension, default)", "def energyMultiplier(self) -> float:\n return self._getMultiplier('energy')", "def _hilbert_space_dims(oper):\n if isinstance(oper, list):\n return oper[0].dims\n elif oper.type == 'oper': # interpret as unitary quantum channel\n return oper.dims\n elif oper.type == 'super' and oper.superrep in ['choi', 'chi', 'super']:\n return [oper.dims[0][1], oper.dims[1][0]]\n else:\n raise TypeError('oper is not a valid quantum channel!')", "def space_dimensions(observation_space) -> int:\n if hasattr(observation_space, 'spaces'):\n # If we're a Tuple space, count the inputs across each space in the Tuple\n return sum([ int(np.prod(s.shape)) for s in observation_space.spaces ])\n else:\n # Otherwise just look at the shape of the space directly\n return int(np.prod(observation_space.shape))", "def _dimensions(ds):\n ds = dshape(ds)\n if isdimension(ds[0]):\n return 1 + _dimensions(ds.subarray(1))\n if isinstance(ds[0], Record):\n return 1 + max(map(_dimensions, ds[0].types))\n if len(ds) == 1 and isunit(ds[0]):\n return 0\n raise NotImplementedError('Can not compute dimensions for %s' % ds)", "def getDimension():\n ierr = c_int()\n api__result__ = lib.gmshModelGetDimension(\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGetDimension returned non-zero error code: \",\n ierr.value)\n return api__result__", "def flat_dim_with_categories(self):\n if self.global_bounds is False:\n return None\n return int(np.prod(self.shape) * self.global_bounds[1])", "def entity_dim(self) -> int:\n return self.entity[0]", "def num_dimensions(self):\n return self.numDim.value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a Dimensions object representing the elementwise quotient between 'dimensions' and a defined unit if 'dimensions' is a scalar multiple of a defined unit in the global environment variable. Returns None otherwise.
def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]: derived = units_env()["derived"] defined = units_env()["defined"] all_units = ChainMap(defined, derived) potential_inv = None # A flag to catch a -1 value (an inversion) quotient = None quotient_result = None for dimension_key in all_units.keys(): if _check_dims_parallel(dimension_key, dimensions): quotient = cache_vec_divide(dimensions, dimension_key, ignore_zeros=True) mean = cache_vec_mean(quotient, ignore_empty=True) if mean == -1: potential_inv = quotient elif -1 < mean < 1: return ( None # Ignore parallel dimensions if they are fractional dimensions ) else: quotient_result = quotient return quotient_result or potential_inv # Inversion ok, if only option
[ "def get_dimensions(self, units):\n return self.id.to(units), self.od.to(units)", "def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]:\n quotient_1 = _dims_quotient(dims, units_env)\n quotient_2 = _dims_basis_multiple(dims)\n quotient_1_mean = None\n if quotient_1 is not None:\n quotient_1_mean = cache_vec_mean(quotient_1, ignore_empty=True)\n\n if quotient_1 is not None and quotient_1_mean != -1:\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_1_mean == -1 and quotient_2 is not None: # Situations like Hz and s\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n elif quotient_1_mean == -1: # Now we can proceed with an inverse unit\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_2 is not None:\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n else:\n return (1, dims)", "def div_w_units(var_a, var_b):\n da_out = var_a / var_b\n if not isinstance(da_out, xr.DataArray):\n raise ValueError(\"quotient of var_a and var_b must be an xr.DataArray\")\n a_units = _get_units(var_a)\n b_units = _get_units(var_b)\n da_out.attrs[\"units\"] = cf_units.Unit(f\"({a_units})/({b_units})\").format()\n return da_out", "def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TemplateQuotaDimensionArgs']]]]:\n return pulumi.get(self, \"dimensions\")", "def getdimensionunits(self, dname, vname=None):\n x = self.dimensionobject(dname, vname)\n return x.units", "def dimensions(self) -> DimensionGraph:\n base = self.universe.empty\n if len(self) == 0:\n return base\n return base.union(*[datasetType.dimensions for datasetType in self.keys()])", "def test_compute_divisors_quantities_correctly(self):\n\t\tself.assertEqual(1, compute_divisor_quantity(1))\n\t\tself.assertEqual(2, compute_divisor_quantity(3))\n\t\tself.assertEqual(4, compute_divisor_quantity(15))\n\t\tself.assertEqual(6, compute_divisor_quantity(28))", "def div(self):\n raise ValueError(\"Cannot compute the div of a scalar-valued function.\")", "def deck_space_utilizations(self):\n\n try:\n max_deck_space = self.storage.max_deck_space\n return np.array(self.deck_space_list) / max_deck_space\n\n except MissingComponent:\n return np.array(np.NaN)", "def Division(self):\n return self._with_axiom('Division')", "def _determine_num_dimensions(self, total_dimensions: int) -> int:\n if self._dimensions_varied == \"all\":\n dimensions = total_dimensions\n elif isinstance(self._dimensions_varied, float):\n dimensions = int(np.ceil(self._dimensions_varied * total_dimensions))\n elif isinstance(self._dimensions_varied, int):\n dimensions = self._dimensions_varied\n else:\n dimensions = 0\n if dimensions <= 0 or dimensions > total_dimensions:\n raise BenchmarkError(\n f\"Rook design optimizer cannot vary {dimensions} dimensions \"\n f\"for a dataset that has {total_dimensions} dimensions\"\n )\n return dimensions", "def test_unit_div():\n\n length = Unit(\"10cm\")\n shorter_length = length / 2\n assert length.unit == 'cm'\n assert_almost_equal(shorter_length.value, 5)\n\n shorter_length = length / 2.\n assert length.unit == 'cm'\n assert_almost_equal(shorter_length.value, 5.)", "def test_division():\n\n # Same units\n a1 = unyt_array([1.0, 2.0, 3.0], \"cm\")\n a2 = unyt_array([4.0, 5.0, 6.0], \"cm\")\n a3 = [4 * cm, 5 * cm, 6 * cm]\n answer1 = unyt_array([0.25, 0.4, 0.5])\n answer2 = unyt_array([4, 2.5, 2])\n op = operator.truediv\n\n operate_and_compare(a1, a2, op, answer1)\n operate_and_compare(a2, a1, op, answer2)\n operate_and_compare(a1, a3, op, answer1)\n operate_and_compare(a3, a1, op, answer2)\n operate_and_compare(a1, a2, np.divide, answer1)\n operate_and_compare(a2, a1, np.divide, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n\n # different units, same dimension\n a1 = unyt_array([1.0, 2.0, 3.0], \"cm\")\n a2 = unyt_array([4.0, 5.0, 6.0], \"m\")\n a3 = [4 * m, 5 * m, 6 * m]\n answer1 = unyt_array([0.0025, 0.004, 0.005])\n answer2 = unyt_array([400, 250, 200])\n\n operate_and_compare(a1, a2, op, answer1)\n operate_and_compare(a2, a1, op, answer2)\n operate_and_compare(a1, a3, op, answer1)\n operate_and_compare(a3, a1, op, answer2)\n operate_and_compare(a1, a2, np.divide, answer1)\n operate_and_compare(a2, a1, np.divide, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n\n # different dimensions\n a1 = unyt_array([1.0, 2.0, 3.0], \"cm\")\n a2 = unyt_array([4.0, 5.0, 6.0], \"g\")\n a3 = [4 * g, 5 * g, 6 * g]\n answer1 = unyt_array([0.25, 0.4, 0.5], \"cm/g\")\n answer2 = unyt_array([4, 2.5, 2], \"g/cm\")\n\n operate_and_compare(a1, a2, op, answer1)\n operate_and_compare(a2, a1, op, answer2)\n operate_and_compare(a1, a3, op, answer1)\n operate_and_compare(a3, a1, op, answer2)\n operate_and_compare(a1, a2, np.divide, answer1)\n operate_and_compare(a2, a1, np.divide, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n\n # One dimensionless, one unitful\n a1 = unyt_array([1.0, 2.0, 3.0], \"cm\")\n a2 = array([4.0, 5.0, 6.0])\n a3 = [4, 5, 6]\n answer1 = unyt_array([0.25, 0.4, 0.5], \"cm\")\n answer2 = unyt_array([4, 2.5, 2], \"1/cm\")\n\n operate_and_compare(a1, a2, op, answer1)\n operate_and_compare(a2, a1, op, answer2)\n operate_and_compare(a1, a3, op, answer1)\n operate_and_compare(a3, a1, op, answer2)\n operate_and_compare(a1, a2, np.divide, answer1)\n operate_and_compare(a2, a1, np.divide, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n\n # Both dimensionless quantities\n a1 = unyt_array([1.0, 2.0, 3.0])\n a2 = array([4.0, 5.0, 6.0])\n a3 = [4, 5, 6]\n answer1 = unyt_array([0.25, 0.4, 0.5])\n answer2 = unyt_array([4, 2.5, 2])\n\n operate_and_compare(a1, a2, op, answer1)\n operate_and_compare(a2, a1, op, answer2)\n operate_and_compare(a1, a3, op, answer1)\n operate_and_compare(a3, a1, op, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n operate_and_compare(a1, a3, np.divide, answer1)\n operate_and_compare(a3, a1, np.divide, answer2)\n\n # With np.multiply.reduce\n a = unyt_array([3.0, 2.0, 1.0], \"cm\")\n answer = unyt_quantity(1.5, \"cm**-1\")\n assert_equal(np.divide.reduce(a), answer)\n a = unyt_array([[3.0, 2.0, 1.0], [6.0, 5.0, 4.0]], \"cm\")\n answer = unyt_array([1.5, 0.3], \"cm**-1\")\n assert_equal(np.divide.reduce(a, axis=1), answer)", "def divide(engine):\n divisor = engine.pop()\n dividend = engine.pop()\n engine.push(1. * dividend / divisor)", "def divide(divident: int, divisor: int) -> float:\n click.echo(f\"{divident} / {divisor} = {divident/divisor}\")", "def _get_units_by_factor(\n factor: float, dims: Dimensions, units_env: Callable, power: Union[int, float]\n) -> dict:\n ## TODO Write a pow() to handle fractions and rationals\n new_factor = fraction_pow(factor, -Fraction(1 / power))\n units_match = _match_factors(new_factor, units_env())\n try:\n units_name = tuple(units_match.keys())[0]\n except IndexError:\n units_name = \"\"\n retrieved_dims = units_match.get(units_name, dict()).get(\"Dimension\", dict())\n if dims != retrieved_dims:\n return dict()\n return units_match", "def normalize_dimensions(img, dimensions, return_ratio=False):\n max_width, max_height = dimensions\n dst_width, dst_height = max_width, max_height\n src_width, src_height = img.size\n\n if dst_width > 0 and dst_height > 0:\n pass\n elif dst_width <= 0:\n dst_width = float(src_width * max_height) / float(src_height)\n elif dst_height <= 0:\n dst_height = float(src_height * max_width) / float(src_width)\n else:\n raise ValueError(\"Width and height must be greater than zero\")\n\n if return_ratio:\n dst_ratio = float(dst_width) / float(dst_height)\n return (int(dst_width), int(dst_height), dst_ratio)\n else:\n return (int(dst_width), int(dst_height))", "def dimensions(self) -> DimensionGraph:\n base = self.universe.empty\n if len(self) == 0:\n return base\n return base.union(*[scaffolding.dimensions for scaffolding in self.values()])", "def __getitem__(self,units):\r\n if '/' in units:\r\n top_units, bottom_units = units.split('/')\r\n return self[top_units]/self[bottom_units]\r\n if '_' in units:\r\n prefix, unit = units.split('_')\r\n if prefix not in self.valid_prefixes:\r\n raise ValueError('Prefix {0} is not recognized'.format(prefix))\r\n \r\n if unit not in self.valid_units:\r\n raise ValueError(\"Units {0} are not recognized\".format(unit))\r\n \r\n return float(self.prefix_db[prefix]*self.unit_db[unit])\r\n else:\r\n if units not in self.valid_units:\r\n raise ValueError(\"Units {0} are not recognized\".format(units))\r\n return float(self.unit_db[units])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps vec.divide with an lru_cache
def cache_vec_divide(tuple_a, tuple_b, ignore_zeros): return vec.divide(tuple_a, tuple_b, ignore_zeros)
[ "def divide(key, it):\n def accumulate(acc, el):\n if key(el):\n acc[0].append(el)\n else:\n acc[1].append(el)\n\n return acc\n\n\n return reduce(accumulate, it, ([], []))", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUL___div__(self, *args)", "def div3(a, vec, result):\n\tresult[0] = vec[0] / a\n\tresult[1] = vec[1] / a\n\tresult[2] = vec[2] / a", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorSL___div__(self, *args)", "def vm_impl_div(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n return Tensor(x / y)\n\n return vm_impl", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorLD___div__(self, *args)", "def seq_divide1(seq, mod):\r\n return [seq[x:x+mod] for x in xrange(0, len(seq), mod)]", "def divide(engine):\n divisor = engine.pop()\n dividend = engine.pop()\n engine.push(1. * dividend / divisor)", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorSC___div__(self, *args)", "def __div__(self, vec: 'itkVersorD') -> \"itkVersorD\":\n return _itkVersorPython.itkVersorD___div__(self, vec)", "def vm_impl_real_div(self):\n\n def vm_impl(x, y):\n x = x.asnumpy()\n y = y.asnumpy()\n out = x / y\n out = np.array(out, x.dtype)\n return Tensor(out)\n\n return vm_impl", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorF___div__(self, *args)", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorD___div__(self, *args)", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorUS___div__(self, *args)", "def list_element_wise_division(a, b):\n return numpy.divide(a, b, out=numpy.zeros_like(a), where=b != 0.)", "def divide(base, array):\n return [base / item for item in array]", "def __div__(self, *args):\n return _vnl_vectorPython.vnl_vectorSI___div__(self, *args)", "def fast_divide(array, divisor):\n if divisor == 1:\n return array\n elif divisor == 2:\n return numpy.right_shift(array, 1)\n elif divisor == 4:\n return numpy.right_shift(array, 2)\n elif divisor == 8:\n return numpy.right_shift(array, 3)\n else:\n return numpy.floor_divide(array, divisor)", "def divide(self, delta):\n if uwsgi_loaded:\n uwsgi.metric_div(self._metric_name, delta)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wraps vec.mean with an lru_cache
def cache_vec_mean(tuple_a, ignore_empty): return vec.mean(tuple_a, ignore_empty)
[ "def vm_impl_reduce_mean(self):\n\n def vm_impl(x, axis):\n x = x.asnumpy()\n out = vm.mean(x, axis)\n return Tensor(out)\n\n return vm_impl", "def lru_cache(maxsize=100):\n \n def decorating_function(user_function):\n stats = [0, 0] # Hits, misses\n data = {}\n lastused = {}\n lock = Lock()\n \n @functools.wraps(user_function)\n def wrapper(*args):\n with lock:\n try:\n result = data[args]\n stats[0] += 1 # Hit\n except KeyError:\n stats[1] += 1 # Miss\n if len(data) == maxsize:\n for k, _ in nsmallest(maxsize // 10 or 1,\n iteritems(lastused),\n key=itemgetter(1)):\n del data[k]\n del lastused[k]\n data[args] = user_function(*args)\n result = data[args]\n finally:\n lastused[args] = time()\n return result\n \n def cache_info():\n with lock:\n return stats[0], stats[1], maxsize, len(data)\n \n def cache_clear():\n with lock:\n data.clear()\n lastused.clear()\n stats[0] = stats[1] = 0\n \n wrapper.cache_info = cache_info\n wrapper.cache_clear = cache_clear\n return wrapper\n return decorating_function", "def running_mean(x, N):\n # cumsum = np.cumsum(np.insert(x, 0, 0))\n # return (cumsum[N:] - cumsum[:-N]) / float(N)\n return uniform_filter1d(x, size=N)", "def running_mean(l, N):\r\n sum = 0\r\n result = list( 0 for x in l)\r\n\r\n for i in range( 0, N ):\r\n sum = sum + l[i]\r\n result[i] = sum / (i+1)\r\n\r\n for i in range( N, len(l) ):\r\n sum = sum - l[i-N] + l[i]\r\n result[i] = sum / N\r\n\r\n return result", "def lru_cache_for_sample_dict():\n maxsize = cache_maxsize()\n raw_decorating_function = functools.lru_cache(maxsize=maxsize, typed=False)\n\n def decorating_function(user_function):\n def wrapped_user_function(sender, *args, **kwargs):\n new_args = list(args)\n new_kwargs = dict(kwargs)\n for i in range(len(args)):\n if isinstance(args[i], FrozenSampleDict):\n new_args[i] = args[i].dict\n for key in kwargs.keys():\n if isinstance(kwargs[key], FrozenSampleDict):\n new_kwargs[key] = kwargs[key].dict\n return user_function(sender, *new_args, **new_kwargs)\n\n def frozen(wrapper):\n def frozen_wrapper(sender, *args, **kwargs):\n new_args = list(args)\n new_kwargs = dict(kwargs)\n for i in range(len(args)):\n if isinstance(args[i], list):\n new_args[i] = tuple(args[i])\n elif isinstance(args[i], dict):\n new_args[i] = FrozenSampleDict(args[i])\n for key in kwargs.keys():\n if isinstance(kwargs[key], list):\n new_kwargs[key] = tuple(kwargs[key])\n elif isinstance(kwargs[key], dict):\n new_kwargs[key] = FrozenSampleDict(kwargs[key])\n result = wrapper(sender, *new_args, **new_kwargs)\n return result\n return frozen_wrapper\n return frozen(raw_decorating_function(wrapped_user_function))\n return decorating_function", "def update_mean(X):\n\n return X.sum(axis=0) / X.shape[0]", "def batch_mean(fun, in_axes):\n mapped_fun = jax.vmap(fun, in_axes=in_axes)\n\n def batch_fun(*args):\n return jnp.mean(mapped_fun(*args))\n\n return batch_fun", "def _cache_average_attempts():\n games = Game.query(Game.game_over == False).fetch()\n if games:\n count = len(games)\n total_attempts_remaining = sum([game.attempts_remaining_player1 +\n game.attempts_remaining_player2\n for game in games])\n average = float(total_attempts_remaining)/count\n memcache.set(MEMCACHE_MOVES_REMAINING,\n 'The average moves remaining is {:.2f}'.format(average))", "def running_mean(x, N): \n cumsum = np.cumsum(np.insert(x, 0, 0)) \n return (cumsum[N:] - cumsum[:-N]) / float(N)", "def mean(self) -> float:\n return mean(self.iterable)", "def vector_mean(*args):\n sz = len(args)\n mean_vector = [0.0 for _ in range(len(args[0]))]\n for input_vector in args:\n mean_vector = [a+b for a, b in zip(mean_vector, input_vector)]\n mean_vector = [a / sz for a in mean_vector]\n return mean_vector", "def getMean(self):\r\n return np.mean(self.members)", "def add_mean(self):\n with tf.variable_scope(self._get_layer_str()):\n prev_shape = self.get_output().get_shape()\n reduction_indices = list(range(len(prev_shape)))\n assert len(reduction_indices) > 2 and \"Can't average a (batch, activation) tensor\"\n reduction_indices = reduction_indices[1:-1]\n out = tf.reduce_mean(self.get_output(), reduction_indices=reduction_indices)\n self.outputs.append(out)\n return self", "def get_mean(original_points):\n\n global points\n mean_result = []\n\n for i in range(total_number_of_features):\n sum = 0\n for index in original_points:\n sum += points[index][i]\n\n mean_result.append(sum / len(original_points))\n return mean_result", "def get_average(self):\n # compute the mean\n self.average_fit = statistics.mean([self.fitness_dict[key] for key in self.fitness_dict])\n self.average_age = statistics.mean([self.age_dict[key] for key in self.age_dict])\n\n # Add average fitness at each time step to the collector\n self.average_fit_list.append(self.average_fit)\n self.average_age_list.append(self.average_age)", "def mean(lst):\n return sum(lst) / float(len(lst))", "def update_mean(clusters: list) -> list:\n new_means = []\n for cluster in clusters:\n new_means.append(mean(cluster))\n return new_means", "def _update_stats_running_means(\n iter_: int,\n means: dict[str, float],\n new_vals: dict[str, float],\n):\n if iter_ == 1:\n means.update({key: float(val) for key, val in new_vals.items()})\n else:\n for key, val in new_vals.items():\n means[key] += (float(val) - means[key]) / iter_", "def mean_vector(self, tokens, remove_oov=False, norm=False):\n return self.vectorize(tokens, remove_oov, norm).mean(0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if d1 and d2 are parallel vectors. False otherwise.
def _check_dims_parallel(d1: Dimensions, d2: Dimensions) -> bool: return vec.multiply(d1, vec.dot(d2, d2)) == vec.multiply(d2, vec.dot(d1, d2))
[ "def parallel(self, vector):\n if self.cross(vector) == 0:\n return True\n return False", "def non_parallel(self, vector):\n if (self.parallel(vector) is not True and\n self.perpendicular(vector) is not True):\n return True\n return False", "def __eq__(self, *args):\n return _vnl_vectorPython.vnl_vectorD___eq__(self, *args)", "def equal_vectors(self, v1, v2):\n v1.expand()\n v2.expand()\n v1_2, v2_2 = v1._v2, v2._v2\n if v1_2 == v2_2 == 24:\n return True # then v1 = v1 = zero\n if v1_2 != v2_2:\n return False\n data1, data2 = v1.int_data, v2.int_data\n if v1.shift > v2.shift:\n data1 = data1 >> (v1.shift - v2.shift)\n if v2.shift > v1.shift:\n data2 = data2 >> (v2.shift - v1.shift)\n return (data1 == data2).all()", "def is_parallel(self, other: 'ConstructionRay') -> bool:\n\n if self._is_vertical:\n return other._is_vertical\n\n if other._is_vertical:\n return False\n\n return math.isclose(self._slope, other._slope, abs_tol=1e-12)", "def __eq__(self, other):\n if not isinstance(other, Vector):\n return False\n\n return all([x == y for x, y in zip(self, other)])", "def is_vector(self) -> bool:\n if self.real == 0.0 and (\n self.i != 0.0 or self.j != 0.0 or self.k != 0.0):\n return True\n\n return False", "def __eq__(self, *args):\n return _vnl_vectorPython.vnl_vectorLD___eq__(self, *args)", "def vectorCompare(v1, v2):\n\tx = abs(v1[0] - v2[0]) < LINEAR_TOLLERANCE\n\ty = abs(v1[1] - v2[1]) < LINEAR_TOLLERANCE\n\tz = abs(v1[2] - v2[2]) < LINEAR_TOLLERANCE\n\treturn (x and y and z)", "def _is_collinear(self, other):\n # type: (Segment) -> bool\n if almostequal(other, self) or almostequal(other, -self):\n return True\n a = self.p1 - other.p1\n b = self.p1 - other.p2\n angle_between = a.cross(b)\n if almostequal(angle_between, Vector3D(0, 0, 0)):\n return True\n a = self.p2 - other.p1\n b = self.p2 - other.p2\n angle_between = a.cross(b)\n if almostequal(angle_between, Vector3D(0, 0, 0)):\n return True\n return False", "def orthogonal_to_each_other(A, B):\n\tif A.ndim != 1 or B.ndim != 1:\n\t\tprint(\"At least one of the numpy array is not a vector!\")\n\t\treturn False\n\telse:\n\t\tif dot_product(A, B) == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def check_vector_inbetween(v1, v2, point):\n if (np.dot(np.cross(v1, point), np.cross(v1, v2))) >= 0 and (np.dot(np.cross(v2, point), np.cross(v2, v1))) >= 0:\n return True\n else:\n return False", "def is_vector(block):\n \n return is_1d_vector(block) or is_2d_vector(block)", "def isDisjoint(self, other) -> bool:\n if not isinstance(other, BoundingBox):\n raise ValueError(\"other must be a vector instead other is \"\n \"{}\".format(type(other)))\n\n result = any(r1 > r2 for r1, r2 in zip(self.getEdges()[0],\n other.getEdges()[1]))\n result |= any(r1 < r2 for r1, r2 in zip(self.getEdges()[1],\n other.getEdges()[0]))\n\n return result", "def are_orthonormal(A, B):\n\tif orthogonal_to_each_other(A, B):\n\t\tif lp_norm(A, 2) == 1 and lp_norm(B, 2) == 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\telse:\n\t\treturn False", "def do_intersect(p1, q1, p2, q2):\n # first finds orientations\n o1 = orientation(p1, q1, p2)\n o2 = orientation(p1, q1, q2)\n o3 = orientation(p2, q2, p1)\n o4 = orientation(p2, q2, q1)\n\n # general case\n if o1 != o2 and o3 != o4:\n return True\n\n # p1, q1 and p2 colinear and p2 on p1q1\n if o1 == 0 and on_segment(p1, p2, q1):\n return True\n\n # p1, q1 and q2 colinear and q2 on p1q1\n if o2 == 0 and on_segment(p1, q2, q1):\n return True\n\n # p2, q2 and p1 colinear and q1 on p2q2\n if o3 == 0 and on_segment(p2, p1, q2):\n return True\n \n # p2, q2 and q1 colinear and q1 on p2q2\n if o4 == 0 and on_segment(p2, q1, q2):\n return True\n \n return False", "def same_side(self, p1, p2, a, b):\n\n cp1 = np.cross(np.asarray(b)-np.asarray(a), np.asarray(p1)-np.asarray(a))\n cp2 = np.cross(np.asarray(b)-np.asarray(a), np.asarray(p2)-np.asarray(a))\n if np.dot(cp1, cp2) >= 0:\n return True\n else:\n return False", "def __eq__(self, plane):\n if self.normal_vector.is_zero():\n if not plane.normal_vector.is_zero():\n return False\n else:\n diff = self.constant_term - plane.constant_term\n return MyDecimal(diff).is_near_zero()\n elif plane.normal_vector.is_zero():\n return False\n\n if not self.is_parallel_to(plane):\n return False\n\n connecting_vector = self.basepoint - plane.basepoint # just subtract basepoints to find a connecting vector\n return connecting_vector.is_orthogonal_to(self.normal_vector) # because we already know they are parallel, we don't have to compare to each normal vector, just one", "def __eq__(self, vec2):\n return (self.data[X] == vec2.x() and self.data[Y] == vec2.y()\n and self.data[Z] == vec2.z() and self.data[W] == vec2.w())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns `dims` if `dims` is a scalar multiple of one of the basis vectors. Returns None, otherwise. This is used as a check to see if `dims` contains only a single dimension, even if that single dimension is to a higher power. e.g. if `dims` equals Dimensions(2, 0, 0, 0, 0, 0, 0) then `dims` will be returned. if `dims` equals Dimensions(0, 1, 1, 0, 0, 0, 0) then None will be returned. if `dims` equals Dimensions(0, 14, 0, 0, 0, 0, 0) then `dims` will be returned.
def _dims_basis_multiple(dims: Dimensions) -> Optional[Dimensions]: count = 0 for dim in dims: if dim: count += 1 if count > 1: return None return dims
[ "def only(self, dims: str or tuple or list or 'Shape'):\n if isinstance(dims, str):\n dims = parse_dim_order(dims)\n if isinstance(dims, (tuple, list)):\n return self[[i for i in range(self.rank) if self.names[i] in dims]]\n elif isinstance(dims, Shape):\n return self[[i for i in range(self.rank) if self.names[i] in dims.names]]\n elif dims is None: # keep all\n return self\n else:\n raise ValueError(dims)", "def _dims_quotient(dimensions: Dimensions, units_env: Callable) -> Optional[Dimensions]:\n derived = units_env()[\"derived\"]\n defined = units_env()[\"defined\"]\n all_units = ChainMap(defined, derived)\n potential_inv = None # A flag to catch a -1 value (an inversion)\n quotient = None\n quotient_result = None\n for dimension_key in all_units.keys():\n if _check_dims_parallel(dimension_key, dimensions):\n quotient = cache_vec_divide(dimensions, dimension_key, ignore_zeros=True)\n mean = cache_vec_mean(quotient, ignore_empty=True)\n if mean == -1:\n potential_inv = quotient\n elif -1 < mean < 1:\n return (\n None # Ignore parallel dimensions if they are fractional dimensions\n )\n else:\n quotient_result = quotient\n return quotient_result or potential_inv # Inversion ok, if only option", "def _valid_dimensions(self, dimensions):\n if dimensions is None:\n dimensions = self.kdims\n elif not isinstance(dimensions, list):\n dimensions = [dimensions]\n\n valid_dimensions = []\n for dim in dimensions:\n if isinstance(dim, Dimension): dim = dim.name\n if dim not in self.kdims:\n raise Exception(\"Supplied dimensions %s not found.\" % dim)\n valid_dimensions.append(dim)\n return valid_dimensions", "def has_dimension(self,name):\n return name in self.dims.keys()", "def has_var_dim(ds):\n test = []\n if isinstance(ds, (coretypes.Ellipsis, coretypes.Var)):\n return True\n elif isinstance(ds, coretypes.Record):\n test = ds.types\n elif isinstance(ds, coretypes.Mono):\n test = ds.parameters\n elif isinstance(ds, (list, tuple)):\n test = ds\n for ds_t in test:\n if has_var_dim(ds_t):\n return True\n return False", "def _get_axis_dims(self, element):\n dims = element.dimensions()[:2]\n if len(dims) == 1:\n return dims + [None, None]\n else:\n return dims + [None]", "def _check_dimensionality(self, array, dims_dict):\n if self.feature_type in [FeatureType.DATA, FeatureType.MASK]:\n return self._reshape_array(array, dims_dict)\n elif self.feature_type in [FeatureType.DATA_TIMELESS, FeatureType.MASK_TIMELESS]:\n array = array.squeeze(axis=0)\n return self._reshape_array(array, dims_dict)\n return array", "def without(self, dims: str or tuple or list or 'Shape') -> 'Shape':\n if isinstance(dims, str):\n return self[[i for i in range(self.rank) if self.names[i] != dims]]\n if isinstance(dims, (tuple, list)):\n return self[[i for i in range(self.rank) if self.names[i] not in dims]]\n elif isinstance(dims, Shape):\n return self[[i for i in range(self.rank) if self.names[i] not in dims.names]]\n # elif dims is None: # subtract all\n # return EMPTY_SHAPE\n else:\n raise ValueError(dims)", "def get_topology_dims(comm, ndims):\n return mpi().Compute_dims(comm.size, ndims)", "def _check_only_climpred_dims(pe):\n additional_dims = set(pe.get_initialized().dims) - set(CLIMPRED_DIMS)\n if len(additional_dims) != 0:\n raise DimensionError(\n f\"{type(pe.__name__)}.plot() does not allow dimensions other \"\n f\"than {CLIMPRED_DIMS}, found {additional_dims}. \"\n f\"Please use .mean({additional_dims}) \"\n f\"or .isel() before plot.\"\n )", "def _dimensions_check(self, element):\n if not (\"dimensions\" in self.attributes and \"dimensions\" in element.attributes):\n return True\n elif \"dimensions\" in self.attributes and \"dimensions\" in element.attributes:\n #The dimension text has to match perfectly. If variables names are specified\n #for bounds, we have no way of knowing whether the sizes are the same before\n #runtime. However, we can do some cleanup befor comparing.\n match = True\n selfdim = self.attributes[\"dimensions\"].lower().split(\",\")\n eldim = element.attributes[\"dimensions\"].lower().split(\",\")\n\n i = 0\n #We only need to compare dimensions until one fails\n while match and i < len(selfdim):\n if selfdim[i].strip() != eldim[i].strip():\n match = False\n i += 1\n \n return match\n else:\n return False", "def fits_into_dims(block_size):\n for md, bs in zip(max_dims, block_size):\n if md < bs:\n return False\n return True", "def dimensions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TemplateQuotaDimensionArgs']]]]:\n return pulumi.get(self, \"dimensions\")", "def _dimensions(ds):\n ds = dshape(ds)\n if isdimension(ds[0]):\n return 1 + _dimensions(ds.subarray(1))\n if isinstance(ds[0], Record):\n return 1 + max(map(_dimensions, ds[0].types))\n if len(ds) == 1 and isunit(ds[0]):\n return 0\n raise NotImplementedError('Can not compute dimensions for %s' % ds)", "def embedding_dim(self) -> Optional[int]:\n return None", "def _powers_of_derived(dims: Dimensions, units_env: Callable) -> Union[int, float]:\n quotient_1 = _dims_quotient(dims, units_env)\n quotient_2 = _dims_basis_multiple(dims)\n quotient_1_mean = None\n if quotient_1 is not None:\n quotient_1_mean = cache_vec_mean(quotient_1, ignore_empty=True)\n\n if quotient_1 is not None and quotient_1_mean != -1:\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_1_mean == -1 and quotient_2 is not None: # Situations like Hz and s\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n elif quotient_1_mean == -1: # Now we can proceed with an inverse unit\n power_of_derived = cache_vec_mean(quotient_1, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_1, ignore_zeros=True)\n return ((power_of_derived or 1), base_dimensions)\n elif quotient_2 is not None:\n power_of_basis = cache_vec_mean(quotient_2, ignore_empty=True)\n base_dimensions = cache_vec_divide(dims, quotient_2, ignore_zeros=True)\n return ((power_of_basis or 1), base_dimensions)\n else:\n return (1, dims)", "def guess_dim_type(dimension):\n\n dimclasses = {'T':_possiblet,\n 'Z':_possiblez,\n 'Y':_possibley,\n 'X':_possiblex}\n\n for dcname, dcvals in dimclasses.iteritems():\n if dimension in dcvals:\n return dcname\n\n return None", "def _check_dims_parallel(d1: Dimensions, d2: Dimensions) -> bool:\n return vec.multiply(d1, vec.dot(d2, d2)) == vec.multiply(d2, vec.dot(d1, d2))", "def _check_dimensions(self, states):\n if not states.shape[1] == self.ndim:\n raise DimensionError('the input argument has the wrong '\n 'dimensions.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string "prefix" of an appropriate value if self.value should be prefixed i.e. it is a big enough number (e.g. 5342 >= 1000; returns "k" for "kilo")
def _auto_prefix(value: float, power: Union[int, float], kg: bool = False) -> str: if value == 0: return "" kg_factor = 0 if kg: kg_factor = 3 prefixes = _prefixes abs_val = abs(value) value_power_of_ten = math.log10(abs_val) value_power_of_1000 = value_power_of_ten // (3 * power) prefix_power_of_1000 = value_power_of_1000 * 3 + kg_factor try: return _prefix_lookups[prefix_power_of_1000] except KeyError: return None
[ "def _auto_prefix_kg(value: float, power: Union[int, float]) -> str:\n prefixes = _prefixes\n if abs(value) >= 1:\n for prefix, power_of_ten in prefixes.items():\n if abs(value) >= (power_of_ten / 1000.) ** abs(power):\n return prefix\n else:\n reverse_prefixes = sorted(prefixes.items(), key=lambda prefix: prefix[0])\n # Get the smallest prefix to start...\n previous_prefix = reverse_prefixes[0][0]\n for prefix, power_of_ten in reversed(list(prefixes.items())):\n if abs(value) < (power_of_ten / 1000.) ** abs(power):\n return previous_prefix\n else:\n previous_prefix = prefix", "def prefix_from_value(value, short=True, tex=False): # FIXME: this could use a more sophisticated system as present for SI Units (or would this mean overkill?)\n\n import si.prefixes\n\n def mycmp(a, b): # FIXME: decorate/sort/undecorate is probably faster\n return cmp(len(a), len(b))\n variants = sorted((k for (k,v) in vars(si.prefixes).iteritems() if v==value), cmp=mycmp)\n\n if tex:\n return variants[0].replace('u','\\\\mu{}')\n\n if short:\n return variants[0]\n else:\n return variants[-1]", "def get_si_prefix(value: float) -> (float, str):\n prefixes = [\n \"a\",\n \"f\",\n \"p\",\n \"n\",\n \"μ\",\n \"m\",\n \"\",\n \"k\",\n \"M\",\n \"G\",\n \"T\",\n \"P\",\n \"E\",\n \"Z\",\n \"Y\",\n ]\n if abs(value) < 1e-18:\n return 0, \"\"\n i = int(math.floor(math.log10(abs(value))))\n i = int(i / 3)\n p = math.pow(1000, i)\n s = round(value / p, 2)\n ind = i + 6\n return s, prefixes[ind]", "def apply_prefix(value: float, unit: str) -> float:\n downfactors = {\"p\": 1e12, \"n\": 1e9, \"u\": 1e6, \"µ\": 1e6, \"m\": 1e3}\n upfactors = {\"k\": 1e3, \"M\": 1e6, \"G\": 1e9}\n if not unit:\n return value\n if unit[0] in downfactors:\n return value / downfactors[unit[0]]\n elif unit[0] in upfactors:\n return value * upfactors[unit[0]]\n else:\n raise Exception(\"Could not understand units: {u}\".format(u=unit))", "def getPrefix(prefix):\n if not prefix:\n return 1\n\n prefix = prefix.lower()\n\n if prefix in PREFIX_PICO:\n return 1.0e-12\n if prefix in PREFIX_NANO:\n return 1.0e-9\n if prefix in PREFIX_MICRO:\n return 1.0e-6\n if prefix in PREFIX_MILLI:\n return 1.0e-3\n if prefix in PREFIX_KILO:\n return 1.0e3\n if prefix in PREFIX_MEGA:\n return 1.0e6\n if prefix in PREFIX_GIGA:\n return 1.0e9\n\n return 1", "def __prefixNumber(num, leading):\n length = int(leading)+1\n num = str(num)\n while len(num) < length:\n num = '0' + num\n return num", "def _getPrefix(self) -> str:\n return 'CHAPTER' + ('0' if int(self.number) < 10 else '') + str(self.number)", "def starting_with(value, prefix):\n return str(value).startswith(str(prefix))", "def format_value(self, value: float) -> str:\n return f\"{value:.{self.display_decimals}f}{self.letter or ''}\"", "def get_human_number(num):\n if num < 1000:\n return num\n\n magnitude = 0\n while abs(num) >= 1000:\n magnitude += 1\n num /= 1000.0\n # add more suffixes if you need them\n return '%.2f%s' % (num, ['', 'K', 'M', 'G', 'T', 'P'][magnitude])", "def gpa_scale_emoji(value):\n if value is None:\n return u'\\U00002753'\n elif value <= 0.5:\n return u'\\U00002620'\n elif value <= 1.5:\n return u'\\U0001f621'\n elif value <= 2.5:\n return u'\\U0001f641'\n elif value <= 3.5:\n return u'\\U0001f610'\n elif value <= 4.0:\n return u'\\U0001f600'\n else:\n return 'ERROR'", "def name_from_value( self, value ):\n\t\tfor ( n, v ) in self.definitions.items():\n\t\t\tif value == v:\n\t\t\t\treturn n\n\t\treturn \"\"", "def message_prefix(self):\n prefixes = self.labels + ['']\n return \":\\n\".join(\" \"*i+label for i, label in enumerate(prefixes))", "def _stringify_time_unit(value: int, unit: str) -> str:\n if unit == \"seconds\" and value == 0:\n return \"0 seconds\"\n if value == 1:\n return f\"{value} {unit[:-1]}\"\n if value == 0:\n return f\"less than a {unit[:-1]}\"\n return f\"{value} {unit}\"", "def get_unit_suffix(unit):\n\n order = int(math.log10(max(unit, 1)))\n if order < 3:\n unitstr = \"\"\n unit2 = 1\n elif 3 <= order < 6:\n unitstr = \"K\"\n unit2 = 1000\n elif 6 <= order < 9:\n unitstr = \"M\"\n unit2 = 1e6\n elif 9 <= order < 12:\n unitstr = \"G\"\n unit2 = 1e9\n elif 12 <= order < 15:\n unitstr = \"T\"\n unit2 = 1e12\n elif 15 <= order:\n unitstr = \"e\" + str(order)\n unit2 = unit\n\n return unit2, unitstr", "def format_constant(self, value):\n return str(value)", "def devilry_verbosenumber(value, number):\n numbers = {\n 1: gettext_lazy('first'),\n 2: gettext_lazy('second'),\n 3: gettext_lazy('third'),\n 4: gettext_lazy('fourth'),\n 5: gettext_lazy('fifth'),\n 6: gettext_lazy('sixth'),\n 7: gettext_lazy('seventh'),\n 8: gettext_lazy('eighth'),\n 9: gettext_lazy('ninth'),\n 10: gettext_lazy('tenth')\n }\n\n if number <= 10:\n # use numbers dictionary\n # to get verbose result\n return numbers[number]\n return '{}.'.format(number)", "def long_name(self):\n value_translations = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\", 6: \"six\",7: \"seven\", 8: \"eight\", 9: \"nine\",\n 10: \"ten\", 'J': \"Jack\", \"Q\": \"Queen\", \"K\": \"King\", \"A\":\"Ace\"}\n return \"{} of {}\".format(value_translations[self.rank], self.suit)", "def get_cardinal_name(num):\n numbers = {\n 0: \"zero\", 1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 10: \"ten\",\n 11: \"eleven\", 12: \"twelve\", 13: \"thirteen\", 14: \"fourteen\",\n 15: \"fifteen\", 16: \"sixteen\", 17: \"seventeen\", 18: \"eighteen\",\n 19: \"nineteen\", 20: \"twenty\", 30: \"thirty\", 40: \"forty\",\n 50: \"fifty\", 60: \"sixty\", 70: \"seventy\", 80: \"eighty\", 90: \"ninety\",\n }\n def _get_tens(n):\n a, b = divmod(n, 10)\n return (numbers[n] if (n in numbers) else \"%s-%s\" % (numbers[10*a], numbers[b]))\n def _get_hundreds(n):\n tens = n % 100\n hundreds = (n // 100) % 10\n return list(compact([\n hundreds > 0 and numbers[hundreds],\n hundreds > 0 and \"hundred\",\n hundreds > 0 and tens and \"and\",\n (not hundreds or tens > 0) and _get_tens(tens),\n ]))\n\n # This needs some refactoring\n if not (0 <= num < 1e6):\n raise ValueError(\"value not supported: %s\" % num)\n thousands = (num // 1000) % 1000\n strings = compact([\n thousands and (_get_hundreds(thousands) + [\"thousand\"]),\n (num % 1000 or not thousands) and _get_hundreds(num % 1000),\n ])\n return \" \".join(flatten(strings))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just like _auto_prefix but handles the one special case for "kg" because it already has a prefix of "k" as an SI base unit. The difference is the comparison of 'power_of_ten'/1000 vs 'power_of_ten'.
def _auto_prefix_kg(value: float, power: Union[int, float]) -> str: prefixes = _prefixes if abs(value) >= 1: for prefix, power_of_ten in prefixes.items(): if abs(value) >= (power_of_ten / 1000.) ** abs(power): return prefix else: reverse_prefixes = sorted(prefixes.items(), key=lambda prefix: prefix[0]) # Get the smallest prefix to start... previous_prefix = reverse_prefixes[0][0] for prefix, power_of_ten in reversed(list(prefixes.items())): if abs(value) < (power_of_ten / 1000.) ** abs(power): return previous_prefix else: previous_prefix = prefix
[ "def _auto_prefix(value: float, power: Union[int, float], kg: bool = False) -> str:\n if value == 0:\n return \"\"\n kg_factor = 0\n if kg:\n kg_factor = 3\n prefixes = _prefixes\n abs_val = abs(value)\n value_power_of_ten = math.log10(abs_val)\n value_power_of_1000 = value_power_of_ten // (3 * power)\n prefix_power_of_1000 = value_power_of_1000 * 3 + kg_factor\n try:\n return _prefix_lookups[prefix_power_of_1000]\n except KeyError:\n return None", "def apply_prefix(value: float, unit: str) -> float:\n downfactors = {\"p\": 1e12, \"n\": 1e9, \"u\": 1e6, \"µ\": 1e6, \"m\": 1e3}\n upfactors = {\"k\": 1e3, \"M\": 1e6, \"G\": 1e9}\n if not unit:\n return value\n if unit[0] in downfactors:\n return value / downfactors[unit[0]]\n elif unit[0] in upfactors:\n return value * upfactors[unit[0]]\n else:\n raise Exception(\"Could not understand units: {u}\".format(u=unit))", "def prefix_from_value(value, short=True, tex=False): # FIXME: this could use a more sophisticated system as present for SI Units (or would this mean overkill?)\n\n import si.prefixes\n\n def mycmp(a, b): # FIXME: decorate/sort/undecorate is probably faster\n return cmp(len(a), len(b))\n variants = sorted((k for (k,v) in vars(si.prefixes).iteritems() if v==value), cmp=mycmp)\n\n if tex:\n return variants[0].replace('u','\\\\mu{}')\n\n if short:\n return variants[0]\n else:\n return variants[-1]", "def getPrefix(prefix):\n if not prefix:\n return 1\n\n prefix = prefix.lower()\n\n if prefix in PREFIX_PICO:\n return 1.0e-12\n if prefix in PREFIX_NANO:\n return 1.0e-9\n if prefix in PREFIX_MICRO:\n return 1.0e-6\n if prefix in PREFIX_MILLI:\n return 1.0e-3\n if prefix in PREFIX_KILO:\n return 1.0e3\n if prefix in PREFIX_MEGA:\n return 1.0e6\n if prefix in PREFIX_GIGA:\n return 1.0e9\n\n return 1", "def is_prefix(self, current_prefix, original, debug=DEBUG):\n if (current_prefix == original): #exit conditions\n return \"*\";\n else:\n #go backwards\n # 3 conditions for possible suffix\n split = (len(original)-len(current_prefix)) #the position at which the word is split 12 - 11 = 11 or -1\n first_part = original[0:split] #STILL Bb\n second_part = original[split:];\n second_part_cut = second_part[1:]; \n second_part_uncut = original[split-1:len(original)];\n if ((second_part in self.words_check) ): #and (not (second_part == original))\n second_condition = self.backward_trie.probability( reverse(second_part), reverse(second_part_cut), DEBUG) #could be switch cut and normal way round?\n if ((second_condition > 1 - threshold) and (second_condition < 1 + threshold)): #close to 1 (#TODO: Test closer values)\n third_condition = self.backward_trie.probability( reverse(second_part), reverse(second_part_uncut), DEBUG)\n if (third_condition < 1):\n if (first_part in self.word_score_prefix):\n self.word_score_prefix[first_part] = self.word_score_prefix.get(first_part, 0) + (reward) + 1 #20 instead of 19 because they'll be -1'd anyway. It avoids a few elses #morphemes might not in the original wordlist \n self.word_score_prefix[first_part] = self.word_score_prefix.get(first_part, 0) + punish;#self.word_score_prefix[first_part] -= 1; #if second part is not in words we don't care\n prefix_length = len(current_prefix)\n self.is_prefix(current_prefix + original[prefix_length :prefix_length+1], original, DEBUG) #recursively add on a new letter", "def get_si_prefix(value: float) -> (float, str):\n prefixes = [\n \"a\",\n \"f\",\n \"p\",\n \"n\",\n \"μ\",\n \"m\",\n \"\",\n \"k\",\n \"M\",\n \"G\",\n \"T\",\n \"P\",\n \"E\",\n \"Z\",\n \"Y\",\n ]\n if abs(value) < 1e-18:\n return 0, \"\"\n i = int(math.floor(math.log10(abs(value))))\n i = int(i / 3)\n p = math.pow(1000, i)\n s = round(value / p, 2)\n ind = i + 6\n return s, prefixes[ind]", "def koiname(k, star=False, koinum=False):\n name = ''\n if type(k) in (type(1),np.int64):\n name = 'K%08.2f' % (k+0.01)\n elif type(k) in (type(1.),np.float64,np.float32):\n name = 'K%08.2f' % k\n else:\n if type(k) == type(''):\n k = k.strip()\n m = re.search('^(\\d+)$',k)\n if m:\n name = 'K%08.2f' % (int(m.group(1)) + 0.01)\n m = re.search('^(\\d+\\.\\d+)$',k)\n if m:\n name = 'K%08.2f' % (float(m.group(1)))\n m = re.search('(K\\d\\d\\d\\d\\d)',k)\n if m:\n name = '{}.01'.format(m.group(1))\n m = re.search('(K\\d\\d\\d\\d\\d[A-Z]?$)',k)\n if m:\n name = '%s.01' % m.group(1)\n m = re.search('(K\\d\\d\\d\\d\\d\\.\\d\\d)',k)\n if m:\n name = '%s' % m.group(1)\n m = re.search('[Kk][Oo][Ii][-_]?(\\d+)$',k)\n if m:\n name = 'K%05i.01' % int(m.group(1))\n m = re.search('[Kk][Oo][Ii][-_]?((\\d+)\\.(\\d+))',k)\n if m:\n name = 'K%08.2f' % float(m.group(1))\n if name == '':\n raise KeyError('\"%s\" not a valid KOI name' % k)\n if star:\n name = name[:-3]\n if koinum:\n m = re.search('K(\\d\\d\\d\\d\\d)',name)\n name = int(m.group(1))\n else:\n if koinum:\n m = re.search('K(\\d\\d\\d\\d\\d\\.\\d\\d)',name)\n name = float(m.group(1))\n return name", "def get_unit_suffix(unit):\n\n order = int(math.log10(max(unit, 1)))\n if order < 3:\n unitstr = \"\"\n unit2 = 1\n elif 3 <= order < 6:\n unitstr = \"K\"\n unit2 = 1000\n elif 6 <= order < 9:\n unitstr = \"M\"\n unit2 = 1e6\n elif 9 <= order < 12:\n unitstr = \"G\"\n unit2 = 1e9\n elif 12 <= order < 15:\n unitstr = \"T\"\n unit2 = 1e12\n elif 15 <= order:\n unitstr = \"e\" + str(order)\n unit2 = unit\n\n return unit2, unitstr", "def lookup_prefix(digits: str) -> Tuple[int, int]:\n return lookup_ismn_prefix(digits)", "def my_kn1(x):\n return kn(1, x) if x<=600 else 1e-100", "def test_compute_prefixes():\n assert src.compute_prefixes([\"foo\", \"bar\", \"baz\"]) == {\n \"foo\": \"f\",\n \"bar\": \"bar\",\n \"baz\": \"baz\",\n }\n assert src.compute_prefixes([\"foo\", \"foo\"]) == {}\n assert src.compute_prefixes([\"foo\", \"foobar\"]) == {\"foobar\": \"foob\"}", "def ensure_starts_with(s: str, prefix: str) -> str:\n if not s.startswith(prefix):\n return prefix + s\n return s", "def get_letter_for_units(self, units):\n return 'F' if units == 'imperial' else 'C' if units == 'metric' else 'K'", "def poundsToKilograms(pounds):\n return(pounds * .453592)", "def __prefixNumber(num, leading):\n length = int(leading)+1\n num = str(num)\n while len(num) < length:\n num = '0' + num\n return num", "def test_get_suffix():\n from .divider import get_suffix\n\n # less than milliohms\n assert get_suffix(-20) == '* 10^-20 Ohm'\n assert get_suffix(-4) == '* 10^-4 Ohm'\n\n # milliohms\n assert get_suffix(-3) == 'mOhm'\n assert get_suffix(-2) == 'mOhm'\n assert get_suffix(-1) == 'mOhm'\n\n # ohms\n assert get_suffix(0) == 'Ohm'\n assert get_suffix(1) == 'Ohm'\n assert get_suffix(2) == 'Ohm'\n\n # kiloohms\n assert get_suffix(3) == 'kOhm'\n assert get_suffix(4) == 'kOhm'\n assert get_suffix(5) == 'kOhm'\n\n # megaohms\n assert get_suffix(6) == 'MOhm'\n assert get_suffix(7) == 'MOhm'\n assert get_suffix(8) == 'MOhm'\n\n # gigaohms\n assert get_suffix(9) == 'GOhm'\n assert get_suffix(10) == 'GOhm'\n assert get_suffix(11) == 'GOhm'\n\n # larger than gigaohms\n assert get_suffix(12) == '* 10^12 Ohm'\n assert get_suffix(20) == '* 10^20 Ohm'", "def max_prefix_match(str1, str2):\n result = 0\n for (char1, char2) in zip(str1, str2):\n assert char1 in \"ACGT\"\n if char1 in DEGENERACY_MAP[char2]:\n result += 1\n else:\n break\n return result", "def lookup_prefix(digits: str) -> int:\n if digits.startswith('977'):\n return 3\n raise ValueError(\"ISSN prefix must be '977'.\")", "def test_string_like_behavior():\n prefix = Prefix(\"/usr\")\n\n assert prefix == \"/usr\"\n assert isinstance(prefix, str)\n\n assert prefix + \"/bin\" == \"/usr/bin\"\n assert \"--prefix=%s\" % prefix == \"--prefix=/usr\"\n assert \"--prefix={0}\".format(prefix) == \"--prefix=/usr\"\n\n assert prefix.find(\"u\", 1)\n assert prefix.upper() == \"/USR\"\n assert prefix.lstrip(\"/\") == \"usr\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if 'value' is some form of NaN, whether float('nan') or a numpy or pandas Nan.
def is_nan(value: Any) -> bool: # Test for numpy.nan and float('nan') if not value == value: return True else: return False
[ "def is_nan(value: Any) -> bool:\n import numpy as np\n\n try:\n return np.isnan(value)\n except TypeError:\n return True", "def is_nan(val):\n return isinstance(val, float) and isnan(val)", "def has_nan_values(self):\n import numpy as np\n return np.any(np.isnan(self.as_vector()))", "def isNan(self):\n # Iterate over each component.\n for i in range(len(self)):\n # If this component is a NaN, return True.\n if math.isnan(self[i]):\n return True\n\n # Didn't find any NaNs, so return False.\n return False", "def is_finite(val):\n return type(val) in (float,int) and val not in (infinity, -infinity, nan)", "def is_missing(data, missing_value):\n if is_float(data) and isnan(missing_value):\n return isnan(data)\n elif is_datetime(data) and isnat(missing_value):\n return isnat(data)\n elif is_object(data) and missing_value is None:\n # XXX: Older versions of numpy returns True/False for array ==\n # None. Work around this by boxing None in a 1x1 array, which causes\n # numpy to do the broadcasted comparison we want.\n return data == np.array([missing_value])\n return (data == missing_value)", "def detect_na(data):\r\n \r\n df = pd.DataFrame(data)\r\n has_na = pd.isnull(df).values.any()\r\n\r\n return has_na", "def is_nan(self):\n if self.coordinates is None:\n return False\n elif self.singular:\n return np.all(np.isnan(self.coordinates))\n elif self.coordinates.ndim == 1:\n return np.isnan(self.coordinates)\n else:\n return self.apply_coordinate_mask_function(\n self.coordinates, csnf.check_nan)", "def test_sesgo_not_nan(self, df):\n self.assertFalse(df.isnull().values.any(), note=\"Las métricas de sesgo e inequidad contienen nulos\")", "def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")", "def is_name_null(name):\n if isinstance(name,str):\n if (name.strip()=='') | (name.lower()=='nan'):\n return True\n else: return False\n elif isinstance(name,float):\n if np.isnan(name):\n return True\n else: return False\n else: return False", "def check_for_missing(df):\n\n numeric_df = df.select_dtypes(include='number')\n assert not (numeric_df.isna().values.any() or np.isinf(numeric_df.values).any())", "def is_nan_model(model: RawGPModelType) -> bool:\n return np.isnan(model.param_array).any()", "def isnan(quat):\n if math.isnan(quat.s) or math.isnan(quat.v[0,0]) or math.isnan(quat.v[0,1]) or math.isnan(quat.v[0,2]):\n return True\n else:\n return False", "def nan(self, x):\n return math.isnan(x)", "def value_in_df(df: pd.DataFrame, *, value: Any) -> bool:\n return (pd.isnull(value) and df.isna().any().any()) or (df == value).any().any()", "def check_missing_values(data):\n if data.isnull().values.any():\n missing_values = data[data.isna().any(axis=1)]\n raise TwiFileErrorMissingValues(missing_values)", "def isfinite(x) -> bool:\n pass", "def check_nan(tensor):\n\tassert(not(torch.isnan(tensor).any()))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raises 'a' to the power of 'b' with the intention of returning a Fraction if the result can be expressed as a Fraction. Returns a float otherwise.
def fraction_pow(a: Fraction, b: Fraction) -> Union[Fraction, float]: if isinstance(b, int): return a**b else: c = a**b if isinstance(c, Fraction): return 1 / c x, y = c.as_integer_ratio() d = Decimal(str(x / y)) m, n = d.as_integer_ratio() return Fraction(n, m)
[ "def rational_div(a,b):\r\n if isinstance(a,Rational) and isinstance(b,Rational):\r\n return Fraction(a,b)\r\n return div(a,b)", "def power(a, b):\n\n if b == 0:\n return 1\n\n return a * power(a, (b - 1))", "def log_frac(a, b):\n return safe_log(a) - safe_log(b)", "def percent_fraction(a, b):\n return a / b * 100", "def smart_division(a, b):\n try:\n return float(a) / float(b)\n except ZeroDivisionError:\n return 0.0", "def exponent(a, b):\n result_exp = round(a ** b, 4)\n print(\"The result of \" + str(a) + \" raised to the \" + str(b) + \" is \" + str(result_exp))\n return str(a) + \" ** \" + str(b) + \" = \" + str(result_exp)", "def divide(self, a, b):\r\n return a.__div__(b, context=self)", "def percent_difference_fraction(a, b):\n return (a - b) / a * 100", "def maxDivide(a, b):\n\t\twhile a % b == 0:\n\t\t\ta = a / b\n\t\treturn a", "def divisor(a, b):\n try:\n ans = a / b\n except ZeroDivisionError as e:\n ans = None\n err = e\n except TypeError as e:\n ans = None\n err = e\n else:\n err = None\n finally:\n return ans, err", "def mult(a,b):\n if b==1:\n return a\n else:\n return a + mult(a, b -1)", "def calculate_root(f: Polynomial, a, b, eps):\n assert f(a)*f(b) < 0\n\n df = f.deriv()\n\n def newtons_lambda(x):\n return -1 / df(x)\n\n return sim.calculate_root(f, newtons_lambda, a, b, eps)", "def a_plus_abs_b(a, b):\n\tif b < 0:\n\t\tf = sub\n\telse:\n\t\tf = add\n\treturn f(a, b)", "def _safe_divide(a: int, b: int):\n\n if a < 0:\n raise ValueError('a ({}) < 0'.format(a))\n elif a > b:\n raise ValueError('a ({}) > b ({})'.format(a, b))\n\n return a / b", "def multiple(a, b):\n import math\n return a * b // math.gcd(a, b)", "def multiply(self,f):\n n=self.numerator*f.numerator\n d=self.denominator*f.denominator\n return(Fraction(n,d))", "def div(a, b):\n\n def divide(a, b):\n \"\"\"Division\"\"\"\n return a / b\n\n return op_with_scalar_cast(a, b, divide)", "def percent_difference_fraction_log(a, b):\n import numpy as np\n\n return (np.log10(a) - np.log10(b)) / np.log10(a) * 100", "def __mul__(self, other):\n self.__validate_type(other)\n return Fraction(\n self.numerator * other.numerator,\n self.denominator * other.denominator\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialises an Agent object _y, _x from web scraping passed into constructor function agent gets copy of the environment agent gets copy of list of all other agents sets (y,x) randomly in [0,300] if(y,x) arguments missing store attribute set to 0
def __init__(self,environment, agents,_y= None ,_x = None):#doesnt need _y and _x setting if generating random values below #self._x = random.randint(0,10) #changed from 300 to check share_with_neighbour #self._y = random.randint(0,10) if (_x == None): self._x = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape else: self._x = _x if (_y == None): self._y = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape else: self._y = _y self.environment = environment self.store = 0 self.agents = agents # Move the agents.
[ "def reset(self,**kwargs):\n self.rng = np.random.default_rng()\n try: \n # self.nb_agents = kwargs['nb_agents']\n self.nb_targets = kwargs['nb_targets']\n except:\n # self.nb_agents = np.random.random_integers(1, self.num_agents)\n self.nb_targets = np.random.random_integers(1, self.num_targets)\n obs_dict = {}\n init_pose = self.get_init_pose(**kwargs)\n # Initialize agents\n for ii in range(self.nb_agents):\n self.agents[ii].reset(init_pose['agents'][ii])\n obs_dict[self.agents[ii].agent_id] = []\n\n # Initialize targets and beliefs\n for nn in range(self.nb_targets):\n self.belief_targets[nn].reset(\n init_state=np.concatenate((init_pose['belief_targets'][nn], np.zeros(2))),\n init_cov=self.target_init_cov)\n t_init = np.concatenate((init_pose['targets'][nn], [self.target_init_vel[0], 0.0]))\n self.targets[nn].reset(t_init)\n # For nb agents calculate belief of targets assigned\n for jj in range(self.nb_targets):\n for kk in range(self.nb_agents):\n r, alpha = util.relative_distance_polar(self.belief_targets[jj].state[:2],\n xy_base=self.agents[kk].state[:2], \n theta_base=self.agents[kk].state[2])\n logdetcov = np.log(LA.det(self.belief_targets[jj].cov))\n obs_dict[self.agents[kk].agent_id].append([r, alpha, 0.0, 0.0, logdetcov, \n 0.0, 0.0, 0.0, self.sensor_r, np.pi])\n for agent_id in obs_dict:\n obs_dict[agent_id] = np.asarray(obs_dict[agent_id])\n return obs_dict", "def __init__(self,agent,agentinstance,relaxed=0):\n self.agent=agent\n self.agentinstance=agentinstance\n self.relaxed=relaxed\n self.depmap={}\n self.valmap={}\n #self.varorder=[] #the order in which variables must be instantiated to account for referential dependencies\n #self.mode=\"dummy\" #dummy means that a generic object will be returned just to account for dependencies", "def assign_attributes(self):\n\t\tfor agent in self.agents_list:\n\t\t\tagent.number_of_states = self.number_of_states\n\t\t\tagent.state = random.choice(self.states_list)", "def __init__(self, agent):\n self.agent: Agent = agent", "def _initialize_agents(self):\n\n for agent in self.agents:\n agent.fill_with_binary()\n\n self.best_agent = copy.deepcopy(self.agents[0])", "def generate_random_agents(self):\n\n agents = []\n for _ in range(self.num_agents):\n agent = deepcopy(self.agent)\n for param in agent.parameters():\n param.requires_grad = False\n agents.append(agent)\n return agents", "def create_agents_list(self):\n\t\tself.agents_list = [agents.Agent() for count in xrange(self.number_of_agents)]", "def createAgents(self, number):\r\n # initial empty agents list\r\n agents = []\r\n \r\n # create agents and append to agents list\r\n for _ in range(self.number):\r\n agentParams = self.randomizeAgentParams()\r\n agents.append(Agent(self.params, agentParams, agents))\r\n \r\n # return created agents\r\n return agents", "def __init_category_agents(self, num_category_agents):\n for i in range(num_category_agents):\n unique_id = \"category\" + i\n agents = []\n high_quality = 10\n low_quality = 1\n\n # 产品种类高低价格区间\n high_cost = random.randint(50,100)\n low_cost = random.randint(5,25)\n category_agent = CategoryAgent(unique_id, self, agents, high_quality, low_quality, high_cost, low_cost)\n self.category_schedule.add(category_agent)", "def create_world(num_agents):\n world_radius = 5 + 2 * num_agents\n zero_pose = Pose(0,0,0)\n object_list = [APFAgent(pose=zero_pose, goal_pose=zero_pose, radius=2, id=0)]\n delta_angle = 2*math.pi/num_agents\n \n agent_list = []\n for i in range(num_agents):\n pose_angle = i*delta_angle\n pose_opposite_angle= angle_diff(pose_angle + math.pi)\n pose_radius = world_radius - 1\n pose = Pose(pose_radius*math.cos(pose_angle), pose_radius*math.sin(pose_angle), pose_opposite_angle)\n goal_pose = Pose(pose_radius*math.cos(pose_opposite_angle), pose_radius*math.sin(pose_opposite_angle), pose_opposite_angle)\n agent = APFAgent(pose, goal_pose, default_agent_radius, i)\n \n agent_list.append(agent)\n \n return agent_list, object_list, world_radius", "def random_agent(env):\n start_state = AirplaneState(randint(0, env.grid_size), randint(0, env.grid_size),\n randint(10, env.max_height), 0, randint(0, 7), randint(env.min_speed, env.max_speed))\n goal_state = AirplaneState(randint(0, env.grid_size), randint(0, env.grid_size),\n randint(10, env.max_height), 0, randint(0, 7), randint(env.min_speed, env.max_speed))\n name = ''.join(choice(string.ascii_uppercase + string.digits) for _ in range(6))\n return AirplaneAgent(start_state, goal_state, name)", "def __init__(self, crawler, sensitive_data, test_vectors, slow_time, random):\n\t\tself.crawler = crawler\n\t\tself.sensitive_data = sensitive_data\n\t\tself.vectors = test_vectors\n\t\tself.slow_time = slow_time / 1000.0\n\t\t\n\t\tself.s = crawler.s", "def __init__(self, env, agent, interact, params):\n \n self.env = env\n self.agent = agent\n self.interact = interact\n self.params = params\n self.num_agents = params['num_agents']\n self.runs = params['runs']\n self.timesteps = params['timesteps']\n self.top_parents = params['top_parents']\n self.generations = params['generations']\n self.mutation_power = params['mutation_power']", "def __init__(\n self,\n time_step_spec,\n action_spec,\n # Specific to multi-agent case\n n_agents,\n learning_rate=1e-4,\n # Specific to multi-grid agents\n actor_fc_layers=(32, 32),\n value_fc_layers=(32, 32),\n lstm_size=(128,),\n conv_filters=8,\n conv_kernel=3,\n direction_fc=5,\n # Modifying agents\n inactive_agent_ids=tuple(),\n non_learning_agents=tuple(),\n # PPO Clip agent params\n importance_ratio_clipping=0.0,\n lambda_value=0.95,\n discount_factor=0.99,\n entropy_regularization=0.05,\n policy_l2_reg=0.0,\n value_function_l2_reg=0.0,\n shared_vars_l2_reg=0.0,\n value_pred_loss_coef=0.5,\n num_epochs=25,\n use_gae=False,\n use_td_lambda_return=False,\n normalize_rewards=True,\n reward_norm_clipping=10.0,\n normalize_observations=True,\n log_prob_clipping=0.0,\n gradient_clipping=None,\n check_numerics=False,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n train_step_counter=None,\n network_build_fn=multigrid_networks.construct_multigrid_networks,\n policy_class=multiagent_ppo_policy.MultiagentPPOPolicy,\n agent_class=ppo_clip_agent.PPOClipAgent,\n name='MultiagentPPO'):\n self.n_agents = n_agents\n self.inactive_agent_ids = inactive_agent_ids\n self.non_learning_agents = non_learning_agents\n\n # Get single-agent specs\n (single_obs_spec, single_time_step_spec,\n single_action_spec) = self.get_single_agent_specs(time_step_spec,\n action_spec)\n\n # Make baby agents\n self.agents = [None] * self.n_agents\n self.optimizers = [None] * self.n_agents\n for agent_id in range(self.n_agents):\n with tf.name_scope('agent_' + str(agent_id)):\n self.optimizers[agent_id] = tf.compat.v1.train.AdamOptimizer(\n learning_rate=learning_rate)\n\n # Build actor and critic networks\n actor_net, value_net = network_build_fn(\n single_obs_spec,\n single_action_spec,\n actor_fc_layers=actor_fc_layers,\n value_fc_layers=value_fc_layers,\n lstm_size=lstm_size,\n conv_filters=conv_filters,\n conv_kernel=conv_kernel,\n scalar_fc=direction_fc)\n\n logging.info('Creating agent %d...', agent_id)\n self.agents[agent_id] = agent_class(\n single_time_step_spec,\n single_action_spec,\n self.optimizers[agent_id],\n actor_net=actor_net,\n value_net=value_net,\n entropy_regularization=entropy_regularization,\n importance_ratio_clipping=0.2,\n normalize_observations=False,\n normalize_rewards=False,\n use_gae=True,\n num_epochs=num_epochs,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter,\n compute_value_and_advantage_in_train=True)\n self.agents[agent_id].initialize()\n\n with tf.name_scope('meta_agent'):\n # Initialize policies\n self._policies = [self.agents[a].policy for a in range(self.n_agents)]\n policy = policy_class(\n self._policies,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n clip=False,\n collect=False,\n inactive_agent_ids=inactive_agent_ids)\n\n self._collect_policies = [\n self.agents[a].collect_policy for a in range(self.n_agents)\n ]\n collect_policy = policy_class(\n self._collect_policies,\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n clip=False,\n collect=True,\n inactive_agent_ids=inactive_agent_ids)\n\n super(MultiagentPPO, self).__init__(\n time_step_spec,\n action_spec,\n policy,\n collect_policy,\n train_sequence_length=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter)\n\n self._global_step = train_step_counter\n self.update_normalizers_in_train = False\n print('Finished constructing multi-agent PPO')", "def __init__(self, n_rows=3, n_columns=16, n_obs=2, agents_r=[0,2],\n agents_c=[16,16], n_agents=1, max_steps=50):\n assert(n_rows % 2 == 1)\n assert(n_columns % 2 == 0)\n # Only n_rows and n_columns have green and orange squares\n self.n_rows = n_rows\n self.n_columns = n_columns\n self.n_obs = n_obs\n # Total grid size is larger so that agents' observations are valid\n # when they are located on the boundary\n self.total_rows = self.n_rows + 2*self.n_obs\n self.total_columns = self.n_columns + 2*self.n_obs + 1\n\n # Used to determine episode termination\n self.max_collectible = self.n_rows * self.n_columns\n\n self.n_agents = n_agents\n self.max_steps = max_steps\n\n # Initial agent locations, situated in expanded grid \n self.agents_r = np.array(agents_r) + self.n_obs\n self.agents_c = np.array(agents_c) + self.n_obs", "def initialize(self, num_agents: int) -> None:\n #add sites\n if config['aggregation']['site']:\n object_loc=config['aggregation']['location']\n filename = (\"experiments/aggregation/images/greyc1.png\")\n self.objects.add_object(\n file=filename, pos=object_loc, scale=config['aggregation']['scale'], obj_type=\"site\")\n if config['aggregation']['site2']:\n object_loc=config['aggregation']['location2']\n filename = (\"experiments/aggregation/images/greyc1.png\")\n self.objects.add_object(\n file=filename, pos=object_loc, scale=config['aggregation']['scale2'], obj_type=\"site\")\n #add outside borders\n if config['aggregation']['outside']:\n scale = [800, 800]\n filename = (\"experiments/flocking/images/convex.png\")\n#(\"experiments/aggregation/images/greyc2.png\")\n object_loc = config[\"base\"][\"object_location\"]\n self.objects.add_object(\n file=filename, pos=object_loc, scale=scale, obj_type=\"obstacle\"\n )\n min_x, max_x = area(object_loc[0], scale[0])\n min_y, max_y = area(object_loc[1], scale[1])\n #add agents\n for index, agent in enumerate(range(num_agents)):\n coordinates = generate_coordinates(self.screen)\n if config['aggregation']['outside']:\n while (\n coordinates[0] >= max_x\n or coordinates[0] <= min_x\n or coordinates[1] >= max_y\n or coordinates[1] <= min_y\n ):\n coordinates = generate_coordinates(self.screen)\n\n\n self.add_agent(Cockroach(pos=np.array(coordinates), v=None, aggregation=self, index=index))", "def initialize(self):\n\n \"*** YOUR CODE HERE\"\n #agent가 생성될때마다 agentNum을 하나씩 증가시킨다.\n MyAgent.agentNum = MyAgent.agentNum+1", "def __init__(self):\n self.y = random.randint(0, 99)\n self.x = random.randint(0, 99)", "def graph_init(G, agent):\n agent_dict = {}\n i = 0\n for agent in agent:\n agent_dict[i] = {'agent': agent}\n i += 1\n nx.set_node_attributes(G, agent_dict)\n return G" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines move() behaviour of Agent agent's y and x move randomly +1, torus solution to deal with boundary effects
def move(self): if random.random() < 0.5: self._y = (self._y + 1) % 300 else: self._y = (self._y - 1) % 300 if random.random() < 0.5: self._x = (self._x + 1) % 300 else: self._x = (self._x - 1) % 300
[ "def plane_move(self): \r\n \r\n #Move bacteria in xy plane \r\n # Generate random number from which xy movement will be decided\r\n randnum = random.random()\r\n # 5% chance of bacteria moving in -ve x direction\r\n if randnum <= self.prob_west:\r\n self.bomb_origin_x -= 1#\r\n # 10% chance of bacteria moving in -ve y direction \r\n elif randnum <= (self.prob_west + self.prob_south):\r\n self.bomb_origin_y -= 1\r\n # 10% chance of bacteria moving in +ve y direction \r\n elif randnum <= (self.prob_west + self.prob_south + self.prob_north):\r\n self.bomb_origin_y += 1\r\n # 75% chance of bacteria moving in ve x direction \r\n else:\r\n self.bomb_origin_x += 1", "def move(agent, firefly, network, hx):\n #network_output = network(encode(firefly - agent), hx)\n network_output = network(encode(firefly - agent))\n x_step = network_output[0,0]\n y_step = network_output[0,1]\n x = agent[0,0]\n y = agent[0,1]\n new_x = x + x_step\n new_y = y + y_step\n new_agent = torch.stack([new_x, new_y], dim=1)\n return new_agent", "def move(self):\n if random.randint(0, 1) == 0:\n if self.position > self.left_limit:\n self.position -= 1\n\n else:\n if self.position < self.right_limit:\n self.position += 1\n self.steps += 1", "def move(self):\r\n # move agents\r\n for agent in self.agents:\r\n agent.move(self.agents)", "def reset_agent(self, mode=None):\n if self.start_ind is not None:\n # Spawn the agent at the start state\n self.x = self.get_coords(self.start_ind)\n else:\n # Spawn the agent not too close to the goal\n self.x = self.get_random_pos(self.grid_free_index)\n while np.sum(np.square(self.x - self.g[0,:])) < 0.5:\n self.x = self.get_random_pos(self.grid_free_index)", "def __init__(self,environment, agents,_y= None ,_x = None):#doesnt need _y and _x setting if generating random values below \r\n #self._x = random.randint(0,10) #changed from 300 to check share_with_neighbour\r\n #self._y = random.randint(0,10) \r\n if (_x == None):\r\n self._x = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._x = _x\r\n \r\n if (_y == None):\r\n self._y = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._y = _y\r\n \r\n self.environment = environment\r\n self.store = 0\r\n self.agents = agents\r\n \r\n\r\n \r\n # Move the agents.\r", "def move_arm(self):\n target_dx = np.random.uniform(-self._hp.x_range, self._hp.x_range) - self._previous_target_qpos[0]\n target_dy = np.random.uniform(0.12, self.high_bound[2]) - self._previous_target_qpos[1]\n self.step(np.array([target_dx, target_dy, -1]))", "def place_agent(self):\n if self.initial_position is None:\n indx = np.random.randint(0, len(self.possible_locations))\n self.position = self.possible_locations[indx]\n else:\n self.position = np.copy(self.initial_position)", "def random_walk(self):\n\n\t\tnow = pygame.time.get_ticks()\n\t\t\t\n\t\tif now - self.last_move_update > 1000:\n\t\t\t\n\t\t\tself.last_move_update = now\n\t\t\tbehaviour_init(self)\n\t\t\t# self.vel.x, self.vel.y = 0, 0\n\t\t\tself.direction = random.randint(0, 7)\n\t\t\tself.walk()", "def _move(self, i, action):\n x,y = self.agent_pos[i]\n if action == 0 or action == 'left':\n if x > 0:\n x = x - 1\n elif action == 1 or action == 'right':\n if x < self.grid_size[0]-1:\n x = x + 1\n elif action == 2 or action == 'down':\n if y > 0:\n y = y - 1\n elif action == 3 or action == 'up':\n if y < self.grid_size[1]-1:\n y = y + 1\n else:\n raise ValueError()\n self.agent_pos[i] = (x,y)", "def _move_randomly(self):\n entity = self.entity\n pos = scripts.engine.core.matter.get_entitys_component(entity, Position)\n knowledge = scripts.engine.core.matter.get_entitys_component(entity, Knowledge)\n move = knowledge.skills[\"Move\"]\n cardinals = [Direction.UP, Direction.DOWN, Direction.LEFT, Direction.RIGHT]\n\n # check spaces are free\n poss_directions = []\n for _dir in cardinals:\n x = pos.x + _dir[0]\n y = pos.y + _dir[1]\n\n tile = world.get_tile((x, y))\n has_tags = world.tile_has_tag(entity, tile, TileTag.OPEN_SPACE)\n if has_tags:\n poss_directions.append((_dir[0], _dir[1]))\n\n # if any space we can move to, do so\n if poss_directions:\n move_dir = random.choice(poss_directions)\n target_tile = world.get_tile((pos.x, pos.y)) # target tile for Move is current pos\n\n scripts.engine.core.matter.use_skill(entity, move, target_tile, move_dir)\n\n logging.debug(\n f\"'{scripts.engine.core.matter.get_name(entity)}' couldnt see a target so moved randomly from ({pos.x},\"\n f\"{pos.y}) to ({pos.x + move_dir[0]},{pos.y + move_dir[1]}).\"\n )\n\n hourglass.end_turn(entity, move.time_cost)", "def simulate_move(self):\n for atom in self.list_of_atoms:\n atom.move(self.grid)", "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def move(self):\n\n # If red car can move, move red car\n if [\"X\"] in list(self.move_car.keys()) and self.move_car[\"X\"] == 1:\n self.random_car = \"X\"\n else:\n # Randomly pick one of the possibilities\n self.random_car = random.choice(list(self.move_car.keys()))\n \n # Get and then change coordinates \n car_orientation = self.cars[self.random_car].orientation\n if car_orientation == \"V\":\n self.temp_coordinates = copy.deepcopy(self.cars[self.random_car].row)\n self.cars[self.random_car].row = self.cars[self.random_car].row + self.move_car[self.random_car]\n else:\n self.temp_coordinates = copy.deepcopy(self.cars[self.random_car].col)\n self.cars[self.random_car].col = self.cars[self.random_car].col + self.move_car[self.random_car]", "def move(self, rows, cols, move_cost):\n if random.random() < (1/3):\n self.y = (self.y + 1) % (rows) \n self.store -= move_cost\n elif random.random() < 0.5:\n self.y = (self.y - 1) % (rows)\n self.store -= move_cost\n if random.random() < (1/3):\n self.x = (self.x + 1) % (cols)\n self.store -= move_cost\n elif random.random() < 0.5:\n self.x = (self.x - 1) % (cols)\n self.store -= move_cost", "def random_move(self):\n\t\toptions = [90, 180, 270]\n\t\tang = randint(0,2)\n\t\tn = randint(2, self.length - 1)\n\t\tself.rotate(n, radians(options[ang]))", "def move():\r\n if randrange(40) == 0: # Crea un target de manera aleatoria\r\n y = randrange(-150, 150)\r\n target = vector(200, y)\r\n targets.append(target) # Agrega el target a la lista de targets\r\n\r\n for target in targets: # Loop de movimiento en x de targets\r\n target.x -= 2.5 # Estaba en 0.5\r\n\r\n if inside(ball): # Movimiento de la pelota\r\n # Cambia velocidad en y\r\n speed.y -= 1 # Estaba en 0.35\r\n # Realiza el movimiento de la pelota con el parámetro speed\r\n ball.move(speed)\r\n\r\n # Crea una copia de targets\r\n dupe = targets.copy()\r\n # Elimina todos los targets\r\n targets.clear()\r\n\r\n for target in dupe: # Loop de targets\r\n # Verifica que la pelota esté a mayor distancia de 13 al target\r\n if abs(target - ball) > 13:\r\n # Agrega el target\r\n targets.append(target)\r\n\r\n # Dibuja los obstáculos y la pelota\r\n draw()\r\n\r\n for i in range(len(targets)): # Itera sobre la lista de targets\r\n # Verifica si el target se ha salido de la ventana\r\n if not inside(targets[i]):\r\n y = targets[i].y # En caso afirmativo, copia la coordenada y\r\n # Finalmente reubica el target en la posicion más a la derecha\r\n targets[i] = vector(200, y)\r\n # El juego nunca termina al no existir return\r\n\r\n ontimer(move, 50) # Llama a la función de movimiento cada 50 milisegundos\r", "def _move_actor(self,actor):\n px = actor.center_x\n vx = actor.change_x\n actor.center_x = 1 + (px + vx - 1) % (constants.MAX_X - 1)\n py = actor.center_y\n vy = actor.change_y\n actor.center_y = 1 + (py + vy - 1) % (constants.MAX_Y - 1)", "def step(self, action):\n\n if self.environment_type == 'deterministic':\n # Describing the outcomes of the various possible actions.\n if action == 0:\n self.agent_pos[0] += 1 # This action causes the agent to go right.\n if action == 1:\n self.agent_pos[0] -= 1 # This action causes the agent to go left.\n if action == 2:\n self.agent_pos[1] += 1 # This action causes the agent to go up.\n if action == 3:\n self.agent_pos[1] -= 1 # This action causes the agent to go down.\n\n if self.environment_type == 'stochastic':\n # Describing the outcomes of the various possible actions.\n if action == 0: # This action causes the agent to go right with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[0] += 1\n if action == 1: # This action causes the agent to go left with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[0] -= 1\n if action == 2: # This action causes the agent to go up with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[1] += 1\n if action == 3: # This action causes the agent to go down with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[1] -= 1\n\n # Ensuring that the agent doesn't go out of the environment.\n self.agent_pos = np.clip(self.agent_pos, a_min=0, a_max=3)\n\n new_gold_distance = self.compute_distance(self.agent_pos, self.gold_pos) # Computing the new distance of the\n # agent from the Gold.\n\n # Giving the agent different rewards if its distance to the Gold increases, decreases or remains the same.\n if new_gold_distance > self.gold_distance: # If the agent moves away from the Gold it gets reward -1.\n reward = -1\n self.gold_distance = new_gold_distance\n\n elif new_gold_distance < self.gold_distance: # If the agent moves closer to the Gold it gets reward 1.\n reward = 1\n self.gold_distance = new_gold_distance\n\n else: # If the agent's distance to the Gold doesn't change it gets reward 0.\n reward = 0\n\n observation = self.render()\n\n self.timesteps += 1 # Increasing the total number of steps taken by the agent.\n\n # Setting the reward to 10 if the agent reaches the gold.\n if (self.agent_pos == self.gold_pos).all() and self.gold_quantity > 0:\n self.gold_quantity -= 1\n reward = 10\n\n for i in range(len(self.pit_pos)): # Setting the reward to -1 if the agent falls in the pit.\n if (self.agent_pos == self.pit_pos[i]).all():\n reward = -1\n\n if (self.agent_pos == self.wumpus_pos).all(): # Setting the reward to -1 if the agent is killed by Wumpus.\n reward = -1\n\n # The episode terminates when the agent reaches the Gold, or is killed by the Wumpus, falls into the pit, or\n # takes more than 10 steps.\n if self.gold_quantity == 0 or \\\n (self.agent_pos == self.wumpus_pos).all():\n done = True\n else:\n done = False\n for i in range(len(self.pit_pos)):\n if (self.agent_pos == self.pit_pos[i]).all():\n done = True\n if self.timesteps == self.max_timesteps:\n done = True\n info = {}\n\n return observation, reward, done, info" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Agent eats 10 if environment is >10 at (y,x) or remainder if environment <10. Amount eaten is added to store
def eat(self): if self.environment[self._y][self._x] > 10: self.environment[self._y][self._x] -= 10 self.store += 10 else: self.store += self.environment[self.y][self.x] self.environment[self.y][self.x] = 0
[ "def eat(self, store_capacity, consumption_rate):\n \n if self.environment[self.y][self.x] > consumption_rate:\n self.environment[self.y][self.x] -= consumption_rate\n self.environment[self.y][self.x] = round(\n self.environment[self.y][self.x],1)\n self.store += consumption_rate\n self.store = round(self.store,1)\n else:\n self.store += self.environment[self.y][self.x]\n self.store = round(self.store,1)\n self.environment[self.y][self.x] = float(0)\n \n # 'int' used to ensure all store values are floats. Division in 'share \n # with neighbours' could create floats from integers.\n if self.store > store_capacity:\n sicked_up = random.randint(int(0.25 * self.store),\n int(0.75 * self.store))\n self.environment[self.y][self.x] += sicked_up \n self.store -= sicked_up", "def step(self, action):\n\n if self.environment_type == 'deterministic':\n # Describing the outcomes of the various possible actions.\n if action == 0:\n self.agent_pos[0] += 1 # This action causes the agent to go right.\n if action == 1:\n self.agent_pos[0] -= 1 # This action causes the agent to go left.\n if action == 2:\n self.agent_pos[1] += 1 # This action causes the agent to go up.\n if action == 3:\n self.agent_pos[1] -= 1 # This action causes the agent to go down.\n\n if self.environment_type == 'stochastic':\n # Describing the outcomes of the various possible actions.\n if action == 0: # This action causes the agent to go right with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[0] += 1\n if action == 1: # This action causes the agent to go left with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[0] -= 1\n if action == 2: # This action causes the agent to go up with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[1] += 1\n if action == 3: # This action causes the agent to go down with 0.9 probability and remain in the same\n # position with 0.1 probability.\n probability = random.uniform(0, 1)\n if probability > 0.1:\n self.agent_pos[1] -= 1\n\n # Ensuring that the agent doesn't go out of the environment.\n self.agent_pos = np.clip(self.agent_pos, a_min=0, a_max=3)\n\n new_gold_distance = self.compute_distance(self.agent_pos, self.gold_pos) # Computing the new distance of the\n # agent from the Gold.\n\n # Giving the agent different rewards if its distance to the Gold increases, decreases or remains the same.\n if new_gold_distance > self.gold_distance: # If the agent moves away from the Gold it gets reward -1.\n reward = -1\n self.gold_distance = new_gold_distance\n\n elif new_gold_distance < self.gold_distance: # If the agent moves closer to the Gold it gets reward 1.\n reward = 1\n self.gold_distance = new_gold_distance\n\n else: # If the agent's distance to the Gold doesn't change it gets reward 0.\n reward = 0\n\n observation = self.render()\n\n self.timesteps += 1 # Increasing the total number of steps taken by the agent.\n\n # Setting the reward to 10 if the agent reaches the gold.\n if (self.agent_pos == self.gold_pos).all() and self.gold_quantity > 0:\n self.gold_quantity -= 1\n reward = 10\n\n for i in range(len(self.pit_pos)): # Setting the reward to -1 if the agent falls in the pit.\n if (self.agent_pos == self.pit_pos[i]).all():\n reward = -1\n\n if (self.agent_pos == self.wumpus_pos).all(): # Setting the reward to -1 if the agent is killed by Wumpus.\n reward = -1\n\n # The episode terminates when the agent reaches the Gold, or is killed by the Wumpus, falls into the pit, or\n # takes more than 10 steps.\n if self.gold_quantity == 0 or \\\n (self.agent_pos == self.wumpus_pos).all():\n done = True\n else:\n done = False\n for i in range(len(self.pit_pos)):\n if (self.agent_pos == self.pit_pos[i]).all():\n done = True\n if self.timesteps == self.max_timesteps:\n done = True\n info = {}\n\n return observation, reward, done, info", "def earn_money(self, amount):\n self.earned_money += amount", "def energyRequirement(world, action):", "def eat(self, amount):\n self.__weight += amount", "def __init__(self,environment, agents,_y= None ,_x = None):#doesnt need _y and _x setting if generating random values below \r\n #self._x = random.randint(0,10) #changed from 300 to check share_with_neighbour\r\n #self._y = random.randint(0,10) \r\n if (_x == None):\r\n self._x = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._x = _x\r\n \r\n if (_y == None):\r\n self._y = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._y = _y\r\n \r\n self.environment = environment\r\n self.store = 0\r\n self.agents = agents\r\n \r\n\r\n \r\n # Move the agents.\r", "def loop(env, agent, training):\n reward = 0\n done = False\n score = 0\n special_data = {}\n special_data['ale.lives'] = 3\n ob = env.reset()\n while not done:\n \n action = agent.act(ob, reward, done, training=training)\n ob, reward, done, _ = env.step(action)\n score += reward\n # env.render()\n \n # Close the env and write monitor result info to disk\n # print (\"Your score: %d\" % score)\n return score", "def apply_earned_money(self):\n if self.earned_money != 0:\n self.money += self.earned_money\n\n if self.profile[\"show-earned-money\"]:\n ea = EarnMoney(self.earned_money, self.x+2,\n self.y-22-MONEY_FONT_SIZE,\n font_color=self.profile[\"skin\"])\n self.field.add(ea, layer=TOP_LAYER)\n\n if self.profile[\"money-info\"]:\n self.setup_money_info()\n\n self.earned_money = 0\n\n # Possible auto-buy weapon\n for ab in self.profile[\"autobuy-list\"]:\n if (not self.has_weapon(ab)) and \\\n (self.money >= WEAPONS[ab][\"price\"]):\n self.buy_weapon(ab)\n break\n\n if self.profile[\"show-hud\"]:\n self.hud.update_money(self.money)", "def _EstimateRewards(self,agents,foods,rwrdschem,world,AES,Terminated):\n # Check Agents in Foods Range\n def ResetagentReward(ID):\n #Punish for step \n agents[ID].CurrentReward= rwrdschem[2] # -1 # rwrdschem[2] if len(agents[ID].NextAction)>0 else 0\n \n for x in agents:\n ResetagentReward(x)\n\n AvailableFoods = world[(world>2000)&(world<=3000)]\n if len(AvailableFoods)==0:\n AES[0]-=1\n Terminated[0]= True if AES[0]<=0 else Terminated[0]\n #If Food Could be eaten without being in agent vision activate this\n #for ID in AvailableFoods:\n # foodcenter = World._GetElementCoords(ID,world)\n # fborder = World._GetVisionBorders(foodcenter,foods[ID].Range,world.shape)\n # crff = world[fborder[0]:fborder[1],fborder[2]:fborder[3]]\n #Find location of all elements between 0 and Food ID (2000 as default)\n # agnts = crff[(crff>1000)&(crff<=2000)]\n \n # for aID in agnts:\n # agents[aID].CurrentReward+= foods[ID].Energy* rwrdschem[1]\n # world[world==ID]=0\n \n for ID in agents.keys():\n if agents[ID].IAteFoodID >-1:\n agents[ID].CurrentReward+= foods[agents[ID].IAteFoodID].Energy* rwrdschem[1]\n agntcenter = World._GetElementCoords(ID,agents[ID].FullEgoCentric)\n aborder = World._GetVisionBorders(agntcenter,agents[ID].ControlRange,agents[ID].FullEgoCentric.shape)\n #print 'Control Range For Agent ID:',ID\n #Critical Area For Agent\n crfa = agents[ID].FullEgoCentric[aborder[0]:aborder[1],aborder[2]:aborder[3]]\n # List of Agents in Control Rane + In Vision Range\n \n for EnemyID in crfa[(crfa>1000)&(crfa<=2000)&(crfa!=ID)]:\n #If I have more power I punish\n if agents[ID].Power>agents[EnemyID].Power:\n agents[EnemyID].CurrentReward+= rwrdschem[0]*agents[ID].Power\n\n #Activate this if Food reward when in vision ", "def eat(self):\n if self.food > 0:\n self.food -= 1\n self.hunger -=random.randint(1,4)\n print(f\"Yum! {self.name} ate a great meal!\")\n else:\n print(f\"{self.name} doesn't have any food! Better forage for some.\")\n\n # If the hunger is less than zero set it to zero.\n\n if self.hunger< 0:\n self.hunger = 0", "def perform_strategy(self):\r\n number = -1\r\n while self._envelopeList[number].used or number == -1:\r\n number = randint(0, 101)\r\n\r\n my_envelope = self._envelopeList[number]\r\n self._envelopeList[number].used = True\r\n\r\n print(my_envelope.money)", "def incr_energy(self, nb):\n self.map.board[self.x, self.y, 10] += nb", "def MakanSteakAndBeans():\n global energy\n if (not CekStatusLebih(tambah_energi = 15)):\n energy += 15", "def forage(self):\n\n # Randomly find food from 0 to 4 pieces\n food_found = random.randint(0,4)\n self.food += food_found\n\n # Creatures get dirty from foraging\n self.dirtiness +=2\n\n print(f\"{self.name} found {food_found} pieces of food!!\")", "def attack(self, other_team):\n print(self.name + \" attacks \" + other_team.name +\"!\")\n physical_attack_total = 0\n magical_attack_total= 0\n for hero in self.heroes:\n if(hero.is_alive):\n attack_total = hero.attack()\n physical_attack_total += attack_total[0]\n magical_attack_total += attack_total[1]\n\n attack_total = list()\n attack_total.append(physical_attack_total)\n attack_total.append(magical_attack_total)\n #print(attack_total)\n kills = other_team.defend(attack_total)\n print(self.name + \" has killed \" + str(kills) + \" opponent(s)\")\n self.update_kills(kills)", "async def forage(self, ctx):\r\n #Get player location and materials.\r\n location = await AssetCreation.getLocation(self.client.pg_con, ctx.author.id)\r\n try:\r\n biome = location_dict[location]['Biome']\r\n except TypeError:\r\n await ctx.reply('Move to a location before foraging!')\r\n return\r\n\r\n if biome == 'City' or biome == 'Town':\r\n await ctx.reply(f'You can\\'t forage here at {location}! Get outside of an urban area.')\r\n return\r\n elif location == 'Fernheim' or location == 'Croire':\r\n mat = 'wheat'\r\n amount = random.randint(20,50)\r\n elif location == 'Sunset Prairie' or location =='Glakelys':\r\n mat = 'oat'\r\n amount = random.randint(5,20)\r\n elif biome == 'Forest':\r\n mat = 'wood'\r\n amount = random.randint(10,30)\r\n elif biome == 'Marsh':\r\n mat = 'reeds'\r\n amount = random.randint(40,80)\r\n elif biome == 'Taiga':\r\n material = random.choices(['pine', 'moss'], [66, 33])\r\n mat = material[0]\r\n if mat == 'pine':\r\n amount = random.randint(30,40)\r\n else:\r\n amount = random.randint(10,20)\r\n elif biome == 'Hills':\r\n mat = 'iron'\r\n amount = random.randint(5,10)\r\n elif biome == 'Jungle':\r\n mat = 'cacao'\r\n amount = random.randint(7,12)\r\n\r\n #Modify result given player's role and weapon type\r\n role = await AssetCreation.getClass(self.client.pg_con, ctx.author.id)\r\n if role == 'Traveler':\r\n amount *= 2\r\n\r\n if await AssetCreation.check_for_map_control_bonus(self.client.pg_con, ctx.author.id):\r\n amount = int(amount * 1.5)\r\n\r\n item_id = await AssetCreation.getEquippedItem(self.client.pg_con, ctx.author.id)\r\n item_info = await AssetCreation.getItem(self.client.pg_con, item_id)\r\n if item_info['Type'] == 'Dagger':\r\n amount = int(amount * 1.1)\r\n\r\n await AssetCreation.giveMat(self.client.pg_con, mat, amount, ctx.author.id)\r\n\r\n await ctx.reply(f'You received `{amount} {mat}` while foraging in `{location}`.')", "def MakanPizza():\n global energy\n if (not CekStatusLebih(tambah_energi = 10)):\n energy += 10", "def heal(self):\n if self.rage >= 10:\n self.rage = max(self.rage - 10, 0)\n self.health = min(self.health + 5, 100)", "def run_episode():\n obs = env.reset()\n total_reward = 0\n while True:\n env.render()\n obs, reward, done, _ = env.step(agent.predict(obs))\n total_reward += reward\n if done:\n break\n return total_reward" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks distance between each agent using distance_between Takes "neighbourhood" argument Prevents agent checking against itself
def share_with_neighbours(self, neighbourhood): #print(neighbourhood) #testing initial setup for agent in self.agents: if agent != self: distance = self.distance_between(agent) #print("distance between", self._x, self._y, " : ", # agent._x, agent._y," : ", distance) #testing distance function if distance <= neighbourhood: average_store = (self.store + agent.store)/2 #print("store", self.store, "neighbour store", agent.store) self.store = average_store agent.store = average_store #print("average store: ", average_store)
[ "def share_with_neighbours(self, neighbourhood):\n for agent in self.agents:\n if agent == self:\n continue\n else:\n distance = self.distance_between(agent) \n if distance <= neighbourhood:\n sum = self.store + agent.store\n average = sum / 2\n self.store = average\n agent.store = average", "def test_to_check_dhcp_agents_work(self):\n # max difference in % between max and min value for agents quantity\n # according @kkuznetsova 50% is OK.\n perc_diff = 50 # like diff in % between 100 and 65\n\n self.set_neutron_quota(network=50, router=50, subnet=50, port=250)\n self.networks = self.create_max_networks_with_instances(self.router)\n\n # Count networks for each dhcp agent\n # Each agent should contain networks\n # And amount of networks for each agent should be nearly equal\n networks_amount_on_each_agt = []\n for agt_id in self.dhcp_agent_ids:\n amount = len(self.os_conn.neutron.list_networks_on_dhcp_agent(\n agt_id)['networks'])\n err_msg = \"The dhcp agent {} has no networks!\".format(agt_id)\n assert amount > 0, err_msg\n networks_amount_on_each_agt.append(amount)\n logger.info('the dhcp agent {0} has {1} networks'.\n format(agt_id, amount))\n\n max_val = max(networks_amount_on_each_agt)\n min_val = min(networks_amount_on_each_agt)\n perc_diff_betw_vals = self.count_percent_difference(max_val, min_val)\n logger.info('Difference between {0} and {1} is: {2}%'.format(\n max_val, min_val, perc_diff_betw_vals))\n\n err_msg = (\"Amounts of networks for each agent are not nearly equal. \"\n \"More then {0}%\").format(perc_diff)\n assert perc_diff_betw_vals <= perc_diff, err_msg", "def scan_neighbors(self, positions):\n # Calculate potential generated by topology while removing the current drone\n v = pygame.math.Vector2(0,0) \n for position in positions:\n distance = (position - self.location).magnitude()\n if (self.mode == TRAINING and 0 < distance) or (self.mode == EVALUATION and 0 < distance):# < OBSERVABLE_RADIUS):\n # Get normalized direction of neighbor \n direction = position - self.location \n # Proporcional to the distance. The closer the stronger needs to be\n #direction = direction / distance \n v += direction\n # Save last non-zero neighbours position\n if self.neighbors.magnitude() > 0:\n self.last_neighbors = self.neighbors \n # This gives the direction of the resulting potential \n self.neighbors = v.copy() / (len(positions) - 1)", "def evaluate(self, obstacles):\n slam_obstacles = self.slam.get_landmarks()\n min_distances = [self.__find_min_distance(slam_obstacle, obstacles) for slam_obstacle in slam_obstacles]\n self.average_distances.append(sum(min_distances) / len(min_distances))", "def neighbors(self):\n hood = (self.x, self.y, self.neighborhood_radius) # neighborhood\n n = collide_single(hood, self.others)\n return n", "def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4", "def _recalculateListsAndAverages(self, parentAgent, otherAgents, neighbourhoodSize, \r\n crowdedRegionSize, collisionRegionSize, blindRegionAngle, forwardRegionAngle):\r\n visibleAreaAngle = 180 - (blindRegionAngle * 0.5)\r\n forwardAreaAngle = forwardRegionAngle * 0.5\r\n neighbourhoodRegionSquared = neighbourhoodSize **2\r\n crowdedRegionSquared = crowdedRegionSize **2\r\n collisionRegionSquared = collisionRegionSize **2\r\n \r\n for otherAgent in otherAgents:\r\n otherAgentParticleId = otherAgent.agentId\r\n otherAgentState = otherAgent.state\r\n otherAgentPosition = otherAgentState.position\r\n \r\n if(otherAgentParticleId != self._agentId and\r\n not otherAgentState.isInFreefall and\r\n otherAgentParticleId not in self._reciprocalNearbyChecks and\r\n self.withinCrudeRadiusOfPoint(otherAgentPosition, neighbourhoodSize)):\r\n \r\n directionToOtherAgent = otherAgentPosition - self._position # slightly more efficient than using self.withinPreciseRadius,\r\n distanceToOtherAgentSquared = directionToOtherAgent.magnitudeSquared(True) # as this way we can reuse locally created Vectors\r\n if(distanceToOtherAgentSquared < neighbourhoodRegionSquared):\r\n angleToOtherAgent = abs(self._velocity.angleTo(directionToOtherAgent, True))\r\n \r\n if(angleToOtherAgent < visibleAreaAngle):\r\n # otherAgent is \"nearby\" if we're here\r\n self.nearbyList.append(otherAgent)\r\n weighting = self._calculateWeighting(directionToOtherAgent, neighbourhoodSize, \r\n angleToOtherAgent, forwardAreaAngle, visibleAreaAngle)\r\n self._otherAgentWeightingLookup[otherAgentParticleId] = weighting\r\n \r\n self._avVelocity.add(otherAgentState.velocity * weighting)\r\n self._avPosition.add(otherAgentPosition * weighting)\r\n self._nearbyWeightedTotal += weighting\r\n \r\n if(distanceToOtherAgentSquared < crowdedRegionSquared):\r\n # \"crowded\" if we're here\r\n self.crowdedList.append(otherAgent)\r\n self._avCrowdedPos.add(otherAgentPosition * weighting)\r\n self._crowdingWeightedTotal += weighting\r\n \r\n if(distanceToOtherAgentSquared < collisionRegionSquared and angleToOtherAgent < 90):\r\n # \"collided\" if we're here\r\n self._isCollided = True\r\n self.collisionList.append(otherAgent)\r\n self._avCollisionDirection.add(otherAgentPosition)\r\n \r\n directionToOtherAgent.invert()\r\n otherAgentState._makeReciprocalCheck(parentAgent, distanceToOtherAgentSquared, directionToOtherAgent)\r\n \r\n elif(otherAgentParticleId != self._agentId and not otherAgent.isInFreefall):\r\n otherAgentState._makeReciprocalCheck(parentAgent)\r\n # end - for loop\r\n \r\n if(self.nearbyList):\r\n self._avVelocity.divide(self._nearbyWeightedTotal)\r\n self._avPosition.divide(self._nearbyWeightedTotal)\r\n \r\n if(self.crowdedList):\r\n self._avCrowdedPos.divide(self._crowdingWeightedTotal)\r\n if(self.collisionList):\r\n self._avCollisionDirection.divide(len(self.collisionList))\r\n else:\r\n self._avVelocity.resetToVector(self._velocity)\r\n self._avPosition.resetToVector(self._position)", "def enough_neighbours():\n log.info(\"Checking if there are enough neighbours to mesh with\")\n amount = count_neighbours()\n\n enough = amount >= 2\n if not enough:\n log.warning(\"Not enough machines to bootstrap meshnet. \"\n \"Need {} more.\".format(2 - amount))\n elif amount == 2:\n log.info(\"New meshnet will be established\")\n return enough", "def agent_proximity_stats(self, num_of_agents):\n max = -math.inf\n min = math.inf\n min_i = 0\n min_j = 0\n max_i = 0\n max_j = 0\n agents = self.agents\n for i in range (num_of_agents): \n for j in range (num_of_agents):\n if i < j:\n distance = (((agents[i].x - agents[j].x)**2) + \n ((agents[i].y - agents[j].y)**2))**0.5 \n if distance > max:\n max = round(distance, 2)\n max_i = i\n max_j = j\n if distance < min: \n min = round(distance, 2)\n min_i = i\n min_j = j \n return [min_i, min_j, min, max_i, max_j, max]", "def tag_neighbors(actor, objs, dist):\n## print('ATTEMPT to tag_neighbors')\n #clear tags\n for o in objs:\n o.untag()\n\n if actor == o:\n continue\n \n to = o.exact_pos - actor.exact_pos\n #account for the bounding radius of the other object search range\n rng = dist + o.bounding_radius\n \n## print('{tag_neighbors.to} ', to, str(to.length_squared()),str(rng*rng) )\n\n if to.length_squared() < ( rng * rng ):\n o.tag()", "def is_met_a_bound(self, maze, distance_from_wall):\n # generate a number between -1 to 1\n if uniform(0, 1) > 0.65:\n for lidar in self.lidars:\n if len(lidar.detected_list) < lidar.radius // 2:\n return True\n else:\n if len(self.lidars[0].detected_list) < self.lidars[0].radius // 2:\n return True\n return False", "def test_neighbor_locations():\n des = Desert((2, 4))\n\n nt.assert_list_equal([(1, 4), (2, 5), (3, 4), (2, 3)],\n des.neighbour_locations(),\n \"Returns wrong locations for neighboring cells\")", "def _get_checks_in_near_lane(self, trial_data, direction, car_in_view=False):\n\t\t\n\t\tchecks_per_entry = []\n\t\t\n\t\troad_entries = trial_data.get_road_details_forward_entrance()\n\t\tif not road_entries:\n\t\t\treturn None\n\t\t\n\t\tfor road_entry in road_entries:\n\t\t\tstart_index = road_entry.get_enter_index()\n\t\t\tend_index = road_entry.get_exit_near_lane_index()\n\t\t\t\n\t\t\tif not start_index:\n\t\t\t\tchecks_per_entry.append(None)\n\t\t\t\tcontinue\n\t\t\tif not end_index:\n\t\t\t\tmoment_got_hit = trial_data.get_first_hit()\n\t\t\t\tif not moment_got_hit:\n\t\t\t\t\tchecks_per_entry.append(None)\n\t\t\t\t\tcontinue\n\t\t\t\tend_index = moment_got_hit.get_index()\n\t\t\tif car_in_view is True:\n\t\t\t\tif direction == Direction.LEFT:\n\t\t\t\t\tchecks = self._get_times_with_closest_left_facing_car_in_view(trial_data, start_index, end_index, direction)\n\t\t\t\telif direction == Direction.RIGHT: \n\t\t\t\t\tchecks = self._get_times_with_closest_right_facing_car_in_view(trial_data, start_index, end_index, direction)\n\t\t\t\t\t#print \"Has gone \" , self._get_times_with_closest_car_in_view_middle_road(trial_data, start_index, end_index, direction)\n\t\t\t\t\t# otherwise, return 9999\n\t\t\t\telse:\n\t\t\t\t\tchecks = NO_VALUE_NUM\n\t\t\tchecks_per_entry.append(checks)\t\n\t\t\t\t\n\t\treturn checks_per_entry", "def drivers_within_distance(drivers):\n return all([driven_distance(driver) <= MAX_DISTANCE for driver in drivers])", "def __init__(self,environment, agents,_y= None ,_x = None):#doesnt need _y and _x setting if generating random values below \r\n #self._x = random.randint(0,10) #changed from 300 to check share_with_neighbour\r\n #self._y = random.randint(0,10) \r\n if (_x == None):\r\n self._x = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._x = _x\r\n \r\n if (_y == None):\r\n self._y = random.randint(0,300)#use this without if loop to generate random numbers rather than scrape\r\n else:\r\n self._y = _y\r\n \r\n self.environment = environment\r\n self.store = 0\r\n self.agents = agents\r\n \r\n\r\n \r\n # Move the agents.\r", "def get_nearest_agent(self, location, agents):\n distance = -1\n nearest = 0\n for agent in agents:\n agent_distance = self.get_distance_squared(agent.location, location)\n if agent_distance < distance or distance == -1:\n nearest = agent\n return nearest", "def find_bridges_in_radius(bridge_data: List[list], lat: float, long: float,\n distance: float) -> List[int]:\n \n bridges_in_radius = []\n for bridge in bridge_data:\n if calculate_distance(lat, long, bridge[3], bridge[4]) <= distance:\n bridges_in_radius.append(bridge[0])\n return bridges_in_radius", "def evaluate_euclidean_cell_utilities(self):\n for row in self.grid:\n for cell in row:\n cell.distance_utility = get_euclidean_distance(cell, self.target)", "def distance(board: np.array) -> int:\n total_distance = 0\n boxes_pos = np.argwhere(board == TYPE_LOOKUP['box not on target'])\n targets_pos = np.argwhere(board == TYPE_LOOKUP['box target']).tolist()\n\n for box in boxes_pos:\n distance_from_each_target = []\n for target in targets_pos:\n # Compute Manhattan distance with every empty target\n distance_from_each_target.append(np.sum(abs(box - target)))\n targets_pos.remove(targets_pos[np.argmin(distance_from_each_target)])\n total_distance += np.min(distance_from_each_target)\n\n return total_distance" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Overwrites __str__ to return _x and _y coordinate of agent and store
def __str__(self): return "agent-_x: {0}, agent-_y: {1}, store-agent: {2}".format(self._x, self._y, self.store)
[ "def __str__(self):\n return 'The agents (x,y) coordinates are (' + str(self.x) + ',' + \\\n str(self.y) + ') and the agent is storing ' + str(round(self.store,1))\\\n + ' units'", "def __str__(self) -> str:\n\n # Builds a string representation of the point and returns it\n return \"({0}, {1})\".format(self.x, self.y)", "def position(self):\n out_string=\"({:.1f},{:.1f})\".format(self.x,self.y)\n return out_string", "def __str__(self) -> str:\n # Use *p2d* instead of *self*:\n p2d: P2D = self\n x_text: str = \"{0:.3f}\".format(p2d.x)\n y_text: str = \"{0:.3f}\".format(p2d.y)\n x_text = \"0.000\" if x_text == \"-0.000\" else x_text\n y_text = \"0.000\" if y_text == \"-0.000\" else y_text\n return f\"P2D({x_text},{y_text})\"", "def __str__(self):\n return 'LookAt: \\n\\t\\t- eye: {0} \\n\\t\\t- point: {1} \\n\\t\\t- up: {2}'.format(\n self.eye, self.point, self.up)", "def __str__(self):\n return str(self.unitName + ' (' + self.hexLocation + ')')", "def __str__(self):\n agents_symbols = '*^'\n assert len(self.agents) <= len(agents_symbols)\n if not self.agents:\n return str(self.map)\n map_repr = list(str(self.map))\n legends = []\n for h, agent in enumerate(self.agents):\n line = 1 + agent.i * 2\n col = 3 + agent.j * 4 - h * 2 # |^x*| when both agents are on the same cell\n map_repr[line * (self.m * 4 + 2) + col] = agents_symbols[h]\n legends.append('{}: agent {}\\'s position'.format(agents_symbols[h], h))\n\n legend = '\\n'.join(legends)\n return ''.join(map_repr) + legend # + '\\n'", "def __str__(self):\n return 'text \"%s\" at (%g,%g)' % (self._text, self._position.x, self._position.y)", "def __str__(self):\n return f'The water droplet is at: ({self.pos[0]}, {self.pos[1]})'", "def __str__(self):\n if self.__orientation :\n ori = \"vertically\"\n else :\n ori = \"horizontally\"\n return \"Vehicle {} of size {} and positioned {}.\".format(self.__name, self.__size, ori)", "def __str__(self):\n return \"{}:{}:\".format(str(timestamp), str(location))", "def __str__(self):\n try:\n return '{0} {1} ({2}, index={3}, type={4})'.format(self.__class__.__name__,\n self.name,\n self.group_and_variation,\n self.index,\n self.point_type)\n except UnicodeEncodeError as err:\n _log.error('Unable to convert point definition to string, err = {}'.format(err))\n return ''", "def print_point(self):\n print \"({}, {})\".format(self.x,self.y)", "def __str__(self):\n outs = [\n '\\nAbstract Geometry with {n} points :'.format(n=len(self.coords))]\n for pp in self.coords:\n outs.append(' {pp}'.format(pp=pp))\n if self.centering_type == 'standard':\n if self.include_central_site_in_centroid:\n outs.append(\n 'Points are referenced to the central site for coordination numbers < 5'\n ' and to the centroid (calculated with the central site) for coordination'\n ' numbers >= 5 : {c}\\n'.format(c=self.centre))\n else:\n outs.append(\n 'Points are referenced to the central site for coordination numbers < 5'\n ' and to the centroid (calculated without the central site) for coordination'\n ' numbers >= 5 : {c}\\n'.format(c=self.centre))\n elif self.centering_type == 'central_site':\n outs.append(\n 'Points are referenced to the central site : {c}\\n'.format(\n c=self.centre))\n elif self.centering_type == 'centroid':\n if self.include_central_site_in_centroid:\n outs.append('Points are referenced to the centroid'\n ' (calculated with the central site) :\\n {c}\\n'.format(\n c=self.centre))\n else:\n outs.append('Points are referenced to the centroid'\n ' (calculated without the central site) :\\n {c}\\n'.format(\n c=self.centre))\n return '\\n'.join(outs)", "def __repr__(self):\n x, y, z = self.coord\n return f\"Atom({self.label}, {x:.4f}, {y:.4f}, {z:.4f})\"", "def __str__(self):\n text = \"Attractor \" + self.label + \"\\n\"\n text += \"\\tLength: \"+ str(len(self.states)) + \"\\n\"\n text += \"\\tBasin: \"+ str(self.basin) + \"\\n\"\n text += \"\\tWith nodes: \"+ ', '.join(self.node_names) + \"\\n\" \n text += \"\\tWith states: \"\n for a in self.states: text += \" -> \" + state_to_str(a)\n return text.strip()", "def print_pos(self):\n return (pos, x, y)", "def __str__(self):\n return \"<RefinePoint: point=({}, {}, {}) size={}, create_mesh_point={}\".format(\n self.point[0], self.point[1], self.point[2],\n self.size,\n self.create_mesh_point,\n )", "def GetPositionStr(self):\n return \"Line %i.%i\" % (self.lineNum, self.linePos)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wolf randomly traverses +/3 along x axis direction
def traverse(self): if random.random() < 0.5: self._x = (self._x + 3) % 300 else: self._x = (self._x - 3) % 300
[ "def move(self):\r\n if random.random() < 0.5:\r\n self._y = (self._y + 1) % 300\r\n else:\r\n self._y = (self._y - 1) % 300\r\n \r\n if random.random() < 0.5:\r\n self._x = (self._x + 1) % 300\r\n else:\r\n self._x = (self._x - 1) % 300", "def wandering_turtle():\n u = turtle.Turtle()\n u.shape(\"turtle\")\n u.color(\"green\")\n t.color(\"red\")\n for i in [t, u]:\n i.penup()\n i.setpos(random.randrange(-300, 300), random.randrange(-300, 300))\n i.pendown()\n\n while True:\n for t1, t2 in [(t, u), (u, t)]:\n coin = random.randrange(2)\n angle = random.randrange(360)\n if coin:\n t1.left(angle)\n else:\n t1.right(angle)\n t1.forward(50)\n if t1.distance(0, 0) > 390 or t1.distance(t2) < 25:\n t1.setpow(0, 0)\n return wandering_turtle()", "def random_walk(self):\n\n\t\tnow = pygame.time.get_ticks()\n\t\t\t\n\t\tif now - self.last_move_update > 1000:\n\t\t\t\n\t\t\tself.last_move_update = now\n\t\t\tbehaviour_init(self)\n\t\t\t# self.vel.x, self.vel.y = 0, 0\n\t\t\tself.direction = random.randint(0, 7)\n\t\t\tself.walk()", "def WolffMoveRecursive(self):\n i = scipy.random.randint(0,self.N)\n j = scipy.random.randint(0,self.N)\n\toldSpin = self.lattice[i,j]\n\tself.lattice[i,j] = (self.lattice[i,j]+1)%2\n\tspinsFlipped = 1 + self.FlipNeighbors(i,j,oldSpin)\n\treturn spinsFlipped", "def set_horizontal_wind(self):\n self.base_wind_horizontal = round(random.gauss(0, 0.5), 2)\n pass", "def wang(x=0, y=0, z=0):\n # Next step coordinates:\n x_out = x - y*z\n y_out = x - y + x*z\n z_out = -3*z + x*y\n return x_out, y_out, z_out", "def _random_towards(current, target, radius):", "def random_move(self):\n\t\toptions = [90, 180, 270]\n\t\tang = randint(0,2)\n\t\tn = randint(2, self.length - 1)\n\t\tself.rotate(n, radians(options[ang]))", "def pickDirection():\n turtle.right(random.randrange(-1*MAX_ANGLE(),MAX_ANGLE()))", "def random(self):\r\n if self.ate_apple:\r\n self.x = 20 * random.randint(0, 23)\r\n self.y = 20 * random.randint(3, 23)", "def move(self):\n\n # Move the fish according to its speed and direction\n self.x += self.x_speed\n self.y += self.y_speed\n self.z += self.z_speed\n \n if self.z > 2.0:\n self.z = 2.0\n self.z_speed = 0\n elif self.z < 0.0:\n self.z = 0.0\n self.z_speed = 0\n\n # Fish changes direction left to right once in a while, at most every 3 seconds\n if time.time() > self.last_direction_change + 3.0:\n \n r = random.randint(0, 100)\n \n # Don't change direction towards end of tank when close to it\n if self.x_speed > 0 and self.x < 200:\n r = 0\n elif self.x_speed < 0 and self.x > 1000:\n r = 0\n \n if r == 1:\n \n # Change direction\n self.x_speed = -self.x_speed\n \n # Also lose a little speed with each turn\n if self.x_speed > 0:\n self.x_speed -= 1\n elif self.x_speed < 0:\n self.x_speed += 1\n \n # Keep track of time we turned\n self.last_direction_change = time.time()\n \n # Fish speeds up periodically in X direction but at most every 3 seconds\n if time.time() > self.last_x_speed_change + 3.0:\n r = random.randint(0, 30)\n if r == 1:\n new_speed = random.randint(5, 9)\n if new_speed > abs(self.x_speed):\n if self.x_speed < 0:\n self.x_speed = -new_speed\n else:\n self.x_speed = new_speed\n self.last_x_speed_change = time.time()\n\n # Fish drifts up and down periodically but changes direction at most once every 3 seconds\n if time.time() > self.last_y_speed_change + 3.0:\n r = random.randint(0, 100)\n if r == 1:\n self.y_speed = -1\n self.last_y_speed_change = time.time()\n elif r == 2:\n self.y_speed = 1\n self.last_y_speed_change = time.time()\n elif r == 3:\n self.y_speed = 0\n self.last_y_speed_change = time.time()\n \n # Fish loses momentum and slows down in X direction at rate of one unit of speed per second\n if time.time() > self.last_slow_down + 1.0:\n if self.x_speed < 0:\n self.x_speed += 1\n else:\n self.x_speed -= 1\n self.last_slow_down = time.time()\n \n r = random.randint(0, 100)\n if r == 1:\n self.z_speed += 0.01\n elif r == 2:\n self.z_speed -= 0.01\n \n # Bounce off the edges by reversing direction\n if self.x > 1050 and self.x_speed > 0:\n self.x_speed = -self.x_speed\n elif self.x < 0 and self.x_speed < 0:\n self.x_speed = -self.x_speed\n if self.y > 400 and self.y_speed > 0:\n self.y_speed = -self.y_speed\n elif self.y < 0 and self.y_speed < 0:\n self.y_speed = -self.y_speed\n \n # Change to using correct fish image\n if self.x_speed > 0:\n self.current_fish = self.fish_right\n elif self.x_speed < 0:\n self.current_fish = self.fish_left", "def sweep_haar_random_right_to_left(self, **kwargs) -> None:\n for i in range(self._nqudits - 2, 0, -2):\n self.haar_random(i - 1, i, keep_left_canonical=False, **kwargs)", "def WolffMove(self):\n i = scipy.random.randint(0,self.N)\n j = scipy.random.randint(0,self.N)\n oldSpin = self.lattice[i,j]\n toFlip = [(i,j)]\n spinsFlipped = 0\n while len(toFlip) > 0:\n\t i, j = toFlip.pop(0)\n\t # Check if flipped in between\n\t if self.lattice[i,j] == oldSpin:\n\t self.lattice[i,j] = (self.lattice[i,j]+1)%2\n\t spinsFlipped += 1 \n ip1 = (i+1)%self.N\n im1 = (i-1)%self.N\n jp1 = (j+1)%self.N\n jm1 = (j-1)%self.N\n\t neighbors = [(ip1,j),(im1,j),(i,jp1),(i,jm1)]\n for m, n in neighbors:\n\t\t if self.lattice[m,n] == oldSpin:\n\t\t if scipy.random.random() < self.p:\n\t\t toFlip.append((m,n))\n return spinsFlipped", "def rightWing(s, obj):\n\n lift = s.lift(obj)/2\n return lift.scale(s.x+1)\n #return s.rigid.lift.scale(-s.lift(obj) * (-s.x + 1))", "def rw_generator(N: int, x_0 = 0, p_left=0.5):\n steps = [x_0] + [ 1 if (i>p_left) else -1 for i in np.random.random(N-1)]\n return np.add.accumulate(steps)", "def sweep_haar_random_left_to_right(self, **kwargs) -> None:\n for i in range(0, self._nqudits - 1, 2):\n self.haar_random(i, i + 1, keep_left_canonical=True, **kwargs)", "def move(self, n=1):\r\n for bins in range(n):\r\n if self.price > 0:\r\n self.update()\r\n temp_movement = 0\r\n if self.state[0, 0] == 1:\r\n temp_movement = -0.001\r\n while temp_movement < 0:\r\n temp_movement = levy.random(self.alpha, self.beta, self.delta, self.gamma)\r\n if self.state[0, 0] == 0:\r\n temp_movement = 0.001\r\n while temp_movement > 0 or temp_movement < -1:\r\n temp_movement = levy.random(self.alpha, self.beta, self.delta, self.gamma)\r\n\r\n self.movement = temp_movement\r\n self.price = self.price * (1 + self.movement)", "def leftWing(s, obj):\n\n lift = s.lift(obj)/2 # Two wings so divide by 2\n return lift.scale(-s.x + 1)", "def doppler_shift(r_wave, r_vel):\n\treturn r_wave * (1 + (r_vel / ap.constants.c))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If wolf is closer than 10 units in x and y direction and the wolf has eaten less than 3 sheep in the 24 iteration period, the wolf moves to the agent (y,x) position and deletes (eats) the agent. Prints number of sheep left and total sheep eaten
def chase(self, num_of_sheep, sheep, total_sheep_eaten): for sheep_i in sheep: if abs(sheep_i._x - self._x)< 10 and abs(sheep_i._y - self._y) < 10: #print("before eating, total sheep eaten: ", total_sheep_eaten) if self.sheep_eaten < 3: print("wolf eats sheep") #print("wolf(y,x): ", self._y, self._x) self._y = sheep_i._y self._x = sheep_i._x sheep.remove(sheep_i) #print("sheep(y,x): ", sheep_i._y, sheep_i._x) #print("After chasing, wolf(y,x): ", self._y, self._x) print("Number of sheep left: ",len(sheep)) total_sheep_eaten[0] +=1 self.sheep_eaten +=1 print("total sheep eaten: ", total_sheep_eaten)
[ "def healTeamates(self):\n x, y, z = es.getplayerlocation(self.userid)\n team = es.getplayerteam(self.userid)\n player = sourcerpg.players[self.userid]\n if team not in (2, 3):\n return\n if player is not None:\n level = player[skillName]\n if level:\n \"\"\" The user is at least level one in the medic skill \"\"\"\n distance = ( int(minDistance) + (level - 1) * float(distanceInc))\n healing = int(healingInc) * level\n armor = 0\n\n for teamPlayer in filter(lambda x: es.getplayerteam(x) == team and not es.getplayerprop(x, 'CBasePlayer.pl.deadflag'), es.getUseridList()):\n \"\"\" Loop through all the living players on their team \"\"\"\n xx, yy, zz = es.getplayerlocation(teamPlayer)\n if ( (x - xx) ** 2 + (y - yy) ** 2 + (z - zz) ** 2 ) ** 0.5 <= distance:\n health = es.getplayerprop(teamPlayer, 'CBasePlayer.m_iHealth')\n sourcerpgPlayer = sourcerpg.players[teamPlayer]\n if health < sourcerpgPlayer['maxHealth']:\n if health + healing > sourcerpgPlayer['maxHealth']:\n armor = sourcerpgPlayer['maxHealth'] - health - healing\n es.setplayerprop(teamPlayer, 'CBasePlayer.m_iHealth', sourcerpgPlayer['maxHealth'])\n else:\n es.setplayerprop(teamPlayer, 'CBasePlayer.m_iHealth', healing + health)\n else:\n armor = healing\n\n if armor and self.gameName == \"cstrike\":\n \"\"\" if we're playing CSS and we have armor to increment, do the task \"\"\"\n maxArmor = sourcerpgPlayer['maxArmor']\n currentArmor = es.getplayerprop(teamPlayer, 'CCSPlayer.m_ArmorValue')\n currentArmor += armor\n if currentArmor > maxArmor:\n currentArmor = maxArmor\n es.setplayerprop(teamPlayer, 'CCSPlayer.m_ArmorValue', currentArmor)", "def dispose_meat_floor(self):\n x = self.x\n y = self.y\n\n while self.g_energy() > 0:\n meat = random.randint(1, self.g_energy())\n self.incr_energy(-meat)\n\n if meat % 4 == 0 and self.pos_valid(x + 1, y):\n c_x = x + 1\n c_y = y\n\n elif meat % 4 == 1 and self.pos_valid(x - 1, y):\n c_x = x - 1\n c_y = y\n\n elif meat % 4 == 2 and self.pos_valid(x, y + 1):\n c_x = x\n c_y = y + 1\n\n elif meat % 4 == 3 and self.pos_valid(x, y - 1):\n c_x = x\n c_y = y - 1\n else:\n c_x = x\n c_y = y\n\n self.map.board[c_x, c_y, 21] = meat # TODO petre mettre dans un z a part", "def sheep_features(field, figure):\n\n # playfield, sheeps and wolfs\n playfield = Playfield(field)\n wolf = playfield.get_wolf(figure)\n sheep = playfield.get_sheep(figure)\n enemy_wolf = playfield.get_wolf(figure % 2 + 1)\n enemy_sheep = playfield.get_sheep(figure % 2 + 1)\n\n # Score features\n ###########################################################################\n\n # best score\n best_score = None\n\n # best move\n best_move = None\n\n # score if sheep stays\n score = PENALTY_NEAR_WOLF if is_near(sheep, enemy_wolf) else 0.0\n score += evaluate_playfield_sheep(playfield, figure)\n if playfield.items:\n score += PENALTY_MOVE_NONE\n best_score = score\n best_move = (0, 0)\n\n print(GREY + COORD_TO_STRING[best_move], \": \", best_score, ENDC)\n\n # try each direction\n for move in shuffle(MOVE_COORDS):\n coord = tuple(map(add, sheep, move))\n\n if not playfield.is_coord_available(coord):\n continue\n if coord in playfield.figures:\n continue\n\n new_playfield = playfield.move_figure(sheep, coord)\n score = new_playfield.items.pop(coord, 0.0) * FACTOR_EAT\n if is_near(coord, enemy_wolf):\n score += PENALTY_NEAR_WOLF\n score += evaluate_playfield_sheep(new_playfield, figure)\n\n print(GREY + COORD_TO_STRING[move], \": \", score, ENDC)\n\n if score > best_score:\n best_score = score\n best_move = move\n\n # create feature array for this game state\n features = [\n\n # current score is highest\n 1 if (0, 0) == best_move else 0,\n\n # score left is highest\n 1 if (-1, 0) == best_move else 0,\n\n # score right is highest\n 1 if (1, 0) == best_move else 0,\n\n # score above is highest\n 1 if (0, -1) == best_move else 0,\n\n # score below is highest\n 1 if (0, 1) == best_move else 0]\n\n # assert all features have been inserted\n assert len(features) == 5\n\n return features", "def simulate_round(self, course_info):\r\n import numpy as np\r\n\r\n\r\n #McIlroy = Player.player(name = 'Rory McIlroy', year = 2020, df = df_total)\r\n total_strokes = 0\r\n course_lengths = list(course_info.keys())\r\n for i in range(len(course_info)):\r\n self.distance_from_hole = course_lengths[i]\r\n self.par = course_info[course_lengths[i]]\r\n self.location = 'Tee box'\r\n self.number_of_strokes = 0\r\n self.in_hole = False\r\n\r\n while self.in_hole == False:\r\n if self.location == 'Tee box':\r\n print('1st')\r\n if self.par == 4 or self.par == 5:\r\n # use the fir method\r\n tee_shot_probs = self.fairway_in_reg() # this is a list of probabilities\r\n tee_shot_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n tee_shot = np.random.choice(tee_shot_outcomes, size = 1, p = tee_shot_probs)\r\n self.distance_from_hole = self.stroke(tee_shot)\r\n self.location = tee_shot\r\n else:\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n\r\n\r\n elif self.location == 'Fairway':\r\n if (self.distance_from_hole <= 280) and (self.distance_from_hole > 120):\r\n # use the gir method\r\n\r\n print('2nd')\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 280:\r\n # use the fir method\r\n\r\n print('3rd')\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n # use the pitch method\r\n\r\n print('4th')\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n # use the chip method\r\n\r\n print('5th')\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'First Cut':\r\n # The lie will adjust the maximum distance the player can reach the green in two.\r\n # If poorer the lie is, the shorter the maximum distance becomes.\r\n if (self.distance_from_hole <= 260) and (self.distance_from_hole > 120):\r\n\r\n print('6th')\r\n # use the gir method\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 260:\r\n\r\n print('7th')\r\n # use the fir method\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n\r\n print('8th')\r\n # use the pitch method\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n # use the chip method\r\n\r\n print('9th')\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'Second Cut':\r\n # The lie will adjust the maximum distance the player can reach the green in two.\r\n # If poorer the lie is, the shorter the maximum distance becomes.\r\n if self.distance_from_hole <= 230 and self.distance_from_hole > 120:\r\n\r\n print('10th')\r\n # use the gir method\r\n approach_shot_probs = self.green_in_reg()\r\n approach_shot_outcomes = ['Green', 'Fairway', 'First Cut', 'Second Cut']\r\n approach_shot = np.random.choice(approach_shot_outcomes, size = 1, p = approach_shot_probs)\r\n self.distance_from_hole = self.stroke(approach_shot)\r\n self.location = approach_shot\r\n\r\n elif self.distance_from_hole > 230:\r\n\r\n print('11th')\r\n # use the fir method\r\n layup_probs = self.fairway_in_reg() # this is a list of probabilities\r\n layup_outcomes = ['Fairway', 'First Cut', 'Second Cut']\r\n layup = np.random.choice(layup_outcomes, size = 1, p = layup_probs)\r\n self.distance_from_hole = self.stroke(layup)\r\n self.location = layup\r\n\r\n elif (self.distance_from_hole >= 30) and (self.distance_from_hole <= 120):\r\n\r\n print('12th')\r\n # use the pitch method\r\n pitch_probs = self.pitch() # this is a list of probabilities\r\n pitch_outcomes = ['Green', 'First Cut', 'Second Cut']\r\n pitch = np.random.choice(pitch_outcomes, size = 1, p = pitch_probs)\r\n self.distance_from_hole = self.stroke(pitch)\r\n self.location = pitch\r\n\r\n else:\r\n\r\n print('13th')\r\n # use the chip method\r\n chip_probs = self.chip() # this is a list of probabilities\r\n chip_outcomes = ['Make Chip', 'Miss Chip']\r\n chip = np.random.choice(chip_outcomes, size = 1, p = chip_probs)\r\n self.distance_from_hole = self.stroke(chip)\r\n self.location = 'Green'\r\n\r\n elif self.location == 'Green':\r\n # use the putt method\r\n\r\n print('14th')\r\n putt_probs = self.putt()\r\n putt_outcomes = ['Make', 'Miss']\r\n #putt = np.mean(np.random.choice([1, 0], size = 10, p = putt_probs)).round()\r\n putt = np.random.choice(putt_outcomes, size = 1, p = putt_probs)\r\n print(putt, putt_probs)\r\n if putt == 'Make':\r\n self.in_hole == True\r\n self.number_of_strokes += 1\r\n break\r\n else:\r\n self.distance_from_hole = self.stroke(putt)\r\n\r\n self.number_of_strokes += 1\r\n print('Number of strokes: ', self.number_of_strokes)\r\n total_strokes += self.number_of_strokes\r\n print('Total Number of Strokes', total_strokes)\r\n pass", "def POWW():\r\n global h, POW, mario, xyz\r\n xyz=False\r\n # Golpe 3\r\n if((h==2 and c.coords(luigi)[0]> c.coords(POW)[0]-50 and c.coords(luigi)[0]< c.coords(POW)[0]+50 and c.coords(luigi)[1]> c.coords(POW)[1]-50 and c.coords(luigi)[1]< c.coords(POW)[1]+100) or (h==2 and c.coords(mario)[0]> c.coords(POW)[0]-50 and c.coords(mario)[0]< c.coords(POW)[0]+50 and c.coords(mario)[1]> c.coords(POW)[1]-50 and c.coords(mario)[1]< c.coords(POW)[1]+100)):\r\n c.coords(POW, -2000, -2000)\r\n xyz=True\r\n h+=1\r\n # Golpe 2\r\n elif((h==1 and c.coords(luigi)[0]> c.coords(POW)[0]-50 and c.coords(luigi)[0]< c.coords(POW)[0]+50 and c.coords(luigi)[1]> c.coords(POW)[1]-50 and c.coords(luigi)[1]< c.coords(POW)[1]+100) or (h==1 and c.coords(mario)[0]> c.coords(POW)[0]-50 and c.coords(mario)[0]< c.coords(POW)[0]+50 and c.coords(mario)[1]> c.coords(POW)[1]-50 and c.coords(mario)[1]< c.coords(POW)[1]+100)):\r\n c.delete(POW)\r\n POW= c.create_image(750, 450, image=powimg3, anchor=NW)\r\n xyz=True\r\n h+=1\r\n # Golpe 1\r\n elif((h==0 and c.coords(luigi)[0]> c.coords(POW)[0]-50 and c.coords(luigi)[0]< c.coords(POW)[0]+50 and c.coords(luigi)[1]> c.coords(POW)[1]-50 and c.coords(luigi)[1]< c.coords(POW)[1]+100) or (h==0 and c.coords(mario)[0]> c.coords(POW)[0]-50 and c.coords(mario)[0]< c.coords(POW)[0]+50 and c.coords(mario)[1]> c.coords(POW)[1]-50 and c.coords(mario)[1]< c.coords(POW)[1]+100)):\r\n c.delete(POW)\r\n POW= c.create_image(750, 450, image=powimg2, anchor=NW)\r\n xyz=True\r\n h+=1\r\n ventana.after(30, POWW)", "def eat(self):\r\n if self.environment[self._y][self._x] > 10:\r\n self.environment[self._y][self._x] -= 10\r\n self.store += 10\r\n else:\r\n self.store += self.environment[self.y][self.x] \r\n self.environment[self.y][self.x] = 0", "def main(turns, num_ships):\n round = 0\n while turns > 0:\n print(f'Round {round}')\n create_sea(guess_board)\n row, column = get_ship_location()\n if guess_board[row][column] == 'X' or guess_board[row][column] == '-':\n print('You have already blown up theese coordinates!')\n elif unseen_board[row][column] == 'X':\n guess_board[row][column] = 'X'\n round += 1\n print('HIT! You sank a ship!')\n print(f'You have {turns} left before the enemy fleet sinks you!')\n else:\n print('MISS! Try again!')\n guess_board[row][column] = '-'\n round += 1\n turns -= 1\n print(f'You have {turns} turns left')\n print('Before the enemy fleet sinks you!')\n if count_hits(guess_board) == num_ships:\n print('You have WON!')\n print(f'It took you {round} rounds to sink {num_ships} ships')\n break\n elif turns == 0:\n print('Sorry. Your crew is sleeping with the fish!')\n print('In theese location you could of hit the ships.')\n create_sea(unseen_board)\n break", "def is_suicide_for_win_better_then_defend(game):\n \n # need rework!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n my_castle = game.get_my_castle()\n enemy_castle = game.get_enemy_castle()\n enemy_most_dangrous_elf = get_closest_enemy_elf(game, my_castle)\n my_most_dangrous_elf = get_closest_my_elf(game, enenemy_castle)\n \n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3 :\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3:\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n return True\n \n return False", "def elephant(self, mf_board_row, mf_board_column, mt_board_row, mt_board_column):\n\n\n #ensures piece to be moved is a elephant & sets piece owner info to variables\n if self._XiangqiGame._game_state == \"UNFINISHED\" and self._XiangqiGame._player_1._turn[0] == \\\n self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][0] and \\\n self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][1] == \"e\":\n\n current_space_player = self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][0]\n next_space_player = self._XiangqiGame._board_1._board[mt_board_row][mt_board_column][0]\n\n\n #ensures the elephant will either go into an empty space or the other opponents piece\n if (self._XiangqiGame._player_1._turn[0] != next_space_player) \\\n or (self._XiangqiGame._board_1._board[mt_board_row][mt_board_column] == \" \"):\n\n\n #stops elephant from being on wrong sid of board for red piece\n if current_space_player == \"r\" and mt_board_row > 4:\n\n return False\n\n # stops elephant from being on wrong sid of board for black piece\n if current_space_player == \"b\" and mt_board_row < 5:\n\n return False\n\n # up diagonal left\n if (mt_board_row == mf_board_row - 2) and (mt_board_column == mf_board_column - 2):\n\n\n if self._XiangqiGame._board_1._board[mf_board_row - 1][mf_board_column - 1] != \" \":\n return False\n\n else:\n return True\n\n # up diagonal right\n if (mt_board_row == mf_board_row - 2) and (mt_board_column == mf_board_column + 2):\n\n\n if self._XiangqiGame._board_1._board[mf_board_row - 1][mf_board_column + 1] != \" \":\n return False\n\n else:\n return True\n\n # down diagonal left\n if (mt_board_row == mf_board_row + 2) and (mt_board_column == mf_board_column - 2):\n\n\n if self._XiangqiGame._board_1._board[mf_board_row + 1][mf_board_column - 1] != \" \":\n return False\n\n else:\n return True\n\n # down diagonal right\n if (mt_board_row == mf_board_row + 2) and (mt_board_column == mf_board_column + 2):\n\n if self._XiangqiGame._board_1._board[mf_board_row + 1][mf_board_column + 1] != \" \":\n return False\n\n else:\n return True", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n foodList = currentGameState.getFood().asList()\n pacPos = currentGameState.getPacmanPosition()\n ghostStateList = currentGameState.getGhostStates()\n scaredTimeList = [ghostState.scaredTimer for ghostState in ghostStateList]\n ghostKillers = currentGameState.getCapsules()\n\n # The closer the food better the score\n foodScore = 0\n distanceFoodScore = []\n if len(foodList) > 0:\n for food in foodList:\n distanceFoodScore.append(util.manhattanDistance(pacPos, food))\n minFoodDist = min(distanceFoodScore)\n foodScore = (1.0 / minFoodDist) * 10.0 \n\n # The closer a Ghost is the worse the score\n ghostPos= []\n if len(ghostStateList) > 0:\n for ghost in ghostStateList:\n ghostPos.append(util.manhattanDistance(pacPos, ghost.getPosition()))\n minGhostPos = min(ghostPos)\n\n # if the ghost is closer amplify the score\n if minGhostPos < 1:\n ghostScore = 15.0\n elif minGhostPos < 4:\n ghostScore = (1.0 / minGhostPos) * 15.0\n else:\n ghostScore = (1.0 / minGhostPos) * 5.0\n\n # can make ghosts killable if he is near us\n killerScore = 0\n if minGhostPos < 3 and len(ghostKillers) > 0:\n killerPos = []\n for killer in ghostKillers:\n killerPos.append(util.manhattanDistance(pacPos, killer))\n minKiller = min(killerPos)\n killerScore = 1.0 / minKiller\n if minKiller < 1:\n killerScore = 20.0\n\n # ghost is eatable!\n scaredScore = 0\n for scared in scaredTimeList:\n if scared > minGhostPos:\n scaredScore += scared\n ghostScore = 0\n\n return foodScore + currentGameState.getScore() - ghostScore + killerScore + scaredScore", "def get_threat_killer_move(self, threat: ThreatObj, searchTurns, negativeTiles):\n killTiles = [threat.path.start.next.tile, threat.path.start.tile]\n armyAmount = threat.threatValue + 1\n saveTile = None\n largestTile = None\n source = None\n for threatSource in killTiles:\n for tile in threatSource.movable:\n if tile.player == self._map.player_index and tile not in threat.path.tileSet:\n if tile.army > 1 and (largestTile is None or tile.army > largestTile.army):\n largestTile = tile\n source = threatSource\n threatModifier = 3\n if (self._map.turn - 1) in self.history.attempted_threat_kills:\n logging.info(\"We attempted a threatKill last turn, using 1 instead of 3 as threatKill modifier.\")\n threatModifier = 1\n\n if largestTile is not None:\n if threat.threatValue - largestTile.army + threatModifier < 0:\n logging.info(\"reeeeeeeeeeeeeeeee\\nFUCK YES KILLING THREAT TILE {},{}\".format(largestTile.x, largestTile.y))\n saveTile = largestTile\n else:\n # else see if we can save after killing threat tile\n negativeTilesIncludingThreat = set()\n negativeTilesIncludingThreat.add(largestTile)\n dict = {}\n dict[self.general] = (0, threat.threatValue, 0)\n for tile in negativeTiles:\n negativeTilesIncludingThreat.add(tile)\n for tile in threat.path.tileSet:\n negativeTilesIncludingThreat.add(tile)\n if threat.saveTile is not None:\n dict[threat.saveTile] = (0, threat.threatValue, -0.5)\n self.viewInfo.add_targeted_tile(threat.saveTile, TargetStyle.GREEN)\n logging.info(\"(killthreat) dict[threat.saveTile] = (0, {}) -- threat.saveTile {},{}\".format(threat.saveTile.army, threat.saveTile.x, threat.saveTile.y))\n savePathSearchModifier = 2\n if largestTile in threat.path.start.tile.movable:\n logging.info(\"largestTile was adjacent to the real threat tile, so savepath needs to be 1 turn shorter for this to be safe\")\n # then we have to be prepared for this move to fail the first turn. Look for savePath - 1\n savePathSearchModifier = 3\n threatKillSearchAmount = armyAmount + threatModifier - largestTile.army #- 1\n postThreatKillSearchTurns = searchTurns - savePathSearchModifier\n logging.info(\"Searching post-threatKill path with threatKillSearchAmount {} for postThreatKillSearchTurns {}\".format(threatKillSearchAmount, postThreatKillSearchTurns))\n bestPath = dest_breadth_first_target(self._map, dict, threatKillSearchAmount, 0.1, postThreatKillSearchTurns, negativeTilesIncludingThreat, searchingPlayer = self.general.player, ignoreGoalArmy=True)\n if bestPath is not None and bestPath.length > 0:\n self.viewInfo.color_path(PathColorer(bestPath, 250, 250, 250, 200, 12, 100))\n if largestTile.army > 7 or threat.threatValue <= largestTile.army:\n logging.info(\"reeeeeeeeeeeeeeeee\\nkilling threat tile with {},{}, we still have time for defense after with path {}:\".format(largestTile.x, largestTile.y, bestPath.toString()))\n saveTile = largestTile\n else:\n logging.info(\"threatKill {},{} -> {},{} not worthwhile?\".format(largestTile.x, largestTile.y, source.x, source.y))\n else:\n logging.info(\"largestTile {} couldn't save us because no bestPath save path found post-kill\".format(largestTile.toString()))\n\n\n if saveTile is not None:\n self.history.attempted_threat_kills.add(self._map.turn)\n return Move(saveTile, source)\n return None", "def stroke(self, shot):\r\n \r\n import numpy as np\r\n \r\n if self.location == 'Tee box':\r\n # par 4 or par 5 tee shot\r\n if self.distance_from_hole >= 280:\r\n if shot == 'Fairway':\r\n # can potentially add a random choice component to vary the driving distance\r\n self.distance_from_hole -= self.drive_distance\r\n elif shot == 'First Cut':\r\n self.distance_from_hole = self.distance_from_hole - self.drive_distance + 5\r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = self.distance_from_hole - self.drive_distance + 15\r\n \r\n else:\r\n # par 3 tee shot\r\n if shot == 'Green':\r\n # if the player is a great iron player then they hit it to within 21 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([7], size = 10))\r\n # if the player is a decent iron player then they hit it to within 30 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([10], size = 10))\r\n else:\r\n # if the player is a poor iron player then they hit it to within 45 feet\r\n self.distance_from_hole = np.mean(np.random.choice([15], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be close (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n \r\n elif self.location == 'Fairway':\r\n if (self.distance_from_hole > 220) and (self.distance_from_hole < 280):\r\n if shot == 'Green':\r\n # if the player is a great approach player then they hit it to within 30 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([10], size = 10))\r\n # if the player is a decent approach player then they hit it to within 60 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([20], size = 10))\r\n else:\r\n # if the player is a poor approach player then they hit it to within 90 feet\r\n self.distance_from_hole = np.mean(np.random.choice([30], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif (self.distance_from_hole <= 220) and (self.distance_from_hole > 120):\r\n if shot == 'Green':\r\n # if the player is a great iron player then they hit it to within 21 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([7], size = 10))\r\n # if the player is a decent iron player then they hit it to within 30 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([10], size = 10))\r\n else:\r\n # if the player is a poor iron player then they hit it to within 45 feet\r\n self.distance_from_hole = np.mean(np.random.choice([15], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif (self.distance_from_hole <= 120) and (self.distance_from_hole > 30):\r\n if shot == 'Green':\r\n # if the player is a great iron player then they hit it to within 10 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([3.334], size = 10))\r\n # if the player is a decent iron player then they hit it to within 20 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([7], size = 10))\r\n else:\r\n # if the player is a poor iron player then they hit it to within 30 feet\r\n self.distance_from_hole = np.mean(np.random.choice([10], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif shot == 'Make Chip':\r\n self.distance_from_hole = 0\r\n elif shot == 'Miss Chip':\r\n self.distance_from_hole = 6\r\n \r\n elif self.location == 'First Cut' or self.location == 'Second Cut':\r\n # for now I'm just going to group second and first cut together, I will\r\n # end up making helper functions to make this process easier to read and \r\n # write out. Then I will separate these two cases.\r\n if (self.distance_from_hole > 220) and (self.distance_from_hole < 280):\r\n if shot == 'Green':\r\n # if the player is a great approach player then they hit it to within 45 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([15], size = 10))\r\n # if the player is a decent approach player then they hit it to within 75 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([25], size = 10))\r\n else:\r\n # if the player is a poor approach player then they hit it to within 120 feet\r\n self.distance_from_hole = np.mean(np.random.choice([40], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif (self.distance_from_hole <= 220) and (self.distance_from_hole > 120):\r\n if shot == 'Green':\r\n # if the player is a great iron player then they hit it to within 30 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([10], size = 10))\r\n # if the player is a decent iron player then they hit it to within 45 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([15], size = 10))\r\n else:\r\n # if the player is a poor iron player then they hit it to within 60 feet\r\n self.distance_from_hole = np.mean(np.random.choice([20], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif (self.distance_from_hole <= 120) and (self.distance_from_hole > 30):\r\n if shot == 'Green':\r\n # if the player is a great iron player then they hit it to within 15 feet\r\n if self.sg_apr > .5:\r\n self.distance_from_hole = np.mean(np.random.choice([5], size = 10))\r\n # if the player is a decent iron player then they hit it to within 25 feet\r\n elif self.sg_apr > 0:\r\n self.distance_from_hole = np.mean(np.random.choice([8.5], size = 10))\r\n else:\r\n # if the player is a poor iron player then they hit it to within 40 feet\r\n self.distance_from_hole = np.mean(np.random.choice([13.5], size = 10))\r\n \r\n elif shot == 'Fairway':\r\n self.distance_from_hole = 30\r\n \r\n elif shot == 'First Cut':\r\n # need to work out a way to have some fringe shots be closer (pin locations)\r\n # this will come when I have some course layout data to work with\r\n # but this will be fine for now as shots on the green tend to be closer to the\r\n # pin than shots on the fringe.\r\n self.distance_from_hole = 20\r\n \r\n elif shot == 'Second Cut':\r\n self.distance_from_hole = 25\r\n \r\n elif shot == 'Make Chip':\r\n self.distance_from_hole = 0\r\n elif shot == 'Miss Chip':\r\n self.distance_from_hole = 6\r\n \r\n \r\n #elif self.location == 'Second Cut':\r\n elif self.location == 'Green':\r\n # Need to set the distance form the hole after mssing a putt\r\n if self.distance_from_hole < (5/3):\r\n # Putting inside 5 feet\r\n remaining_distance = np.linspace(.1/3, 5/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n elif (self.distance_from_hole < (10/3)) and (self.distance_from_hole >= (5/3)):\r\n # Putting from 5 to 10 feet\r\n remaining_distance = np.linspace(.1/3, 5/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n elif (self.distance_from_hole < (15/3)) and (self.distance_from_hole >= (10/3)):\r\n # Puttin from 10 to 15 feet\r\n remaining_distance = np.linspace(.1/3, 5/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n elif (self.distance_from_hole < (20/3)) and (self.distance_from_hole >= (15/3)):\r\n # Putting from 15 to 20 feet\r\n remaining_distance = np.linspace(.1/3, 5/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n elif (self.distance_from_hole < (25/3)) and (self.distance_from_hole >= (20/3)):\r\n # Putting from 20 to 25 feet\r\n remaining_distance = np.linspace(.1/3, 6/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n else:\r\n # Putting outside 25 feet\r\n remaining_distance = np.linspace(.1/3, 7/3)\r\n self.distance_from_hole = np.mean(np.random.choice(remaining_distance, size = 10))\r\n \r\n \r\n return self.distance_from_hole", "def check_easykill(self):\n killable_enemy_pos = [bot.current_pos\n for bot in self.enemy_bots if (bot.is_harvester and not bot.noisy)]\n\n # easy kill (kind of tested)\n for killable in killable_enemy_pos:\n if killable in self.legal_moves.values():\n move = graph.diff_pos(self.current_pos, killable)\n return move", "def calculate_throwoff(shooter, gun, recoil_full, recoil_empty):\n # Ek = 0.5 * m * v^2 = recoil energy\n # Ek = (p^2)/2m\n # Ek*2m = p^2\n # p = math.sqrt(Ek*2m)\n shooterdata = SHOOTER[shooter]\n h_shooter = shooterdata.get(\"height\") / 1000 # m\n m_shooter = shooterdata.get(\"weight\") / 1000 # kg\n s_shooter = shooterdata.get(\"strength\")\n h_shoulder = h_shooter * 4 / 5 # shoulder height\n print(f\"\\n#### {shooter} ({h_shooter:.2f} m, {m_shooter:.2f} kg, \"\n f\"STR: {s_shooter})\")\n # angular momentum is proportional to the moment of inertia and angular\n # spead in radians/s\n # L = Iω\n # L = rmv\n # the shooter is given a backwards rotation around the center of mass,\n # located at shooter's height / 2\n # where r = shooter's height / (4/5) (4 5ths being around shoulder height)\n print(f\"\\n* Shoulder height: {h_shoulder:.2f} m\")\n # rifles have three point anchoring (two hands + shoulder)\n if (gun.get(\"type\") == \"rifle\" and not shooterdata.get(\"injured_hand\")):\n print(\"* Using rifle shooting stance\")\n stance_factor = 3\n else:\n print(\"* Using one-handed pistol shooting stance\")\n stance_factor = 1\n\n for recoil in [recoil_full, recoil_empty]:\n if recoil == recoil_full:\n print(\"\\n##### Full Magazine\")\n else:\n print(\"\\n##### Empty Magazine\")\n # (backwards) velocity given to the shooter:\n v_shooter = recoil / m_shooter\n # theoretically, it would be possible to place the pivot point\n # at the centre of mass in the shooter, i.e\n # shoulder height - shooter_height / 2, which would lessen the\n # effect of angular momentum\n orbital_angular_momentum = h_shoulder * m_shooter * v_shooter\n print(f\"\\n* Shooter backwards velocity: {v_shooter:.2f} m/s\")\n print(f\"* Shooter raw orbital angular momentum: \"\n f\"{orbital_angular_momentum:.2f} rad/s\")\n\n # print(\"\\nSkill vs. Throw-off (radians | quarter degrees):\\n\")\n print(\"\\n##### Throw-off, radians (quarter degrees)\\n\")\n print(\"S = standing, C = crouching, P = prone, r = radians, ¼d = quarter degrees\\n\")\n print(\"| Skill | S (r) | S (¼d) | C (r) | C (¼d) | P (r) | P (¼d) |\")\n print(\"|------:|------:|-------:|------:|-------:|------:|-------:|\")\n\n for skill in range(11):\n skill_factor = skill * 0.1 # 10 represents full handling ability\n handling_factor = 1 + (s_shooter * skill_factor * stance_factor)\n # reaction time = 0.15 s for touch, we use this as a base\n # scaling factor to compensate for recoil management\n # this could theoretically be made worse being\n # dazed/injured/drugged/confused/less intelligent/dexterious\n throwoff = (orbital_angular_momentum / handling_factor) * 0.15\n throwoff_qd = ((throwoff*180)/math.pi)/4\n throwoff_c = throwoff / 2\n throwoff_c_qd = ((throwoff_c*180)/math.pi)/4\n throwoff_p = throwoff / 5\n throwoff_p_qd = ((throwoff_p*180)/math.pi)/4\n print(f\"| {skill} | {throwoff:.4f} | {throwoff_qd:.0f} \"\n f\"| {throwoff_c:.4f} | {throwoff_c_qd:.0f} \"\n f\"| {throwoff_p:.4f} | {throwoff_p_qd:.0f} |\")", "def flee_success_calculator(character):\n flee_roll = roll_d10()\n\n if flee_roll > 1:\n time.sleep(1)\n print(\"You successfully escape the encounter!\")\n\n else:\n flee_damage = roll_d4()\n character[2] -= flee_damage\n time.sleep(1)\n print(\"The enemy slices your back as you run away, you take %d damage!\" % flee_damage)", "def move_cheeses(model, n, source, intermediate, destination):\n count = 0\n if n > 1: # fill this in!\n #move_cheeses(model, n - 1, source, destination, intermediate)\n #move_cheeses(model, 1, source, intermediate, destination)\n #move_cheeses(model, n - 1, intermediate, source, destination)\n count += move_cheeses(model, n - 1, source, destination, intermediate)\n #print(model._stools)\n count += move_cheeses(model, 1, source, intermediate, destination)\n count += move_cheeses(model, n - 1, intermediate, source, destination)\n\n else: # just 1 cheese --- leave this out for now!\n # this is the only change\n #print(\"{} -> {}\".format(source, destination))\n model.move(source, destination)\n count += 1\n return count", "def move(self, knight, direction):\n print (knight)\n knight.cell.knight = None\n try: \n new_cell = self.change_position(knight.cell, direction)\n print (new_cell.knight)\n print (\"New Cell: \", new_cell, \"\\n\")\n\n except:\n item, position = self.kill_knight(knight, 2)\n\n # item = knight.equipped\n # last_pos = knight.cell\n # knight.status = 'DROWNED'\n # knight.cell = None\n # knight.equipped = None\n # knight.base_attack = 0\n # knight.base_defence = 0\n\n if item:\n item.cell = position\n position.items.append(item)\n position.items.sort(key=attrgetter('value'))\n\n else: \n print (\"Defender: \", new_cell.knight)\n if (new_cell.knight is not None):\n print (\"BATTLE\")\n winner, loser = self.attack(knight, new_cell.knight)\n print (\"Winner: \", winner)\n print (\"Loser: \", loser ,\"\\n\")\n\n winner.cell = new_cell\n new_cell.knight = winner\n if winner.equipped:\n winner.equipped.cell = new_cell\n item, position = self.kill_knight(loser, 1)\n # item_ = loser.equipped\n # last_pos = loser.cell\n # loser.status = \"DEAD\"\n # loser.cell = None\n # loser.equipped = None\n # loser.attack_score = 0\n # loser.defense_score = 0\n\n if item:\n item.cell = position\n position.items.append(item)\n position.items.sort(key=attrgetter('value'))\n\n return winner\n \n if (new_cell.knight is None and len(new_cell.items) == 0):\n knight.cell = new_cell\n new_cell.knight = knight\n if knight.equipped:\n knight.equipped.cell = new_cell\n\n elif (len(new_cell.items) > 0):\n knight.cell = new_cell\n new_cell.knight = knight\n if knight.equipped:\n knight.equipped.cell = new_cell\n\n new_cell.items.sort(key=attrgetter('value'))\n if not knight.equipped:\n knight.equipped = new_cell.items.pop()\n\n return knight", "def tower_of_hanoi(num, source, target, helper):\n if num == 0:\n return\n else:\n tower_of_hanoi(num - 1, source, helper, target)\n print(\"move the disk -\", num, \"from\", source, \"to\", target)\n tower_of_hanoi(num - 1, helper, target, source)", "def makeMove(board, chests, x, y):\n smallestDistance = 100 # Any chest will be closer than 100.\n for cx, cy in chests:\n distance = math.sqrt((cx - x) * (cx - x) + (cy - y) * (cy - y))\n\n if distance < smallestDistance: # Use the closest chest.\n smallestDistance = distance\n\n smallestDistance = round(smallestDistance)\n\n if smallestDistance == 0:\n # xy is directly on a treasure chest!\n chests.remove([x, y])\n return 'You have found a sunken treasure chest!'\n else:\n if smallestDistance < 10:\n board[(x, y)] = str(smallestDistance)\n return 'Treasure detected at a distance of {} from the sonar device.'.format(smallestDistance)\n else:\n board[(x, y)] = 'X'\n return 'Sonar did not detect anything. All treasure chests out of range.'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
....Total products in top 5 common brands for every Retailer......
def runQueryatBrandLevel(): df = pd.DataFrame() query1 = "SELECT brand,count(id) AS totalProduct from productinfo where date=%s group by brand ORDER BY count(id) DESC " results1 = sql.read_sql(query1, con=conn, params=[date1]) results1['retailer']=retName[0] df = df.append(results1) query1 = "SELECT brand,count(id) AS totalProduct from bub_productinfo where date=%s group by brand ORDER BY count(id) DESC " results2 = sql.read_sql(query1, con=conn, params=[date1]) results2['retailer'] = retName[1] df = df.append(results2) query1 = "SELECT brand,count(id) AS totalProduct from boo_productinfo where date=%s group by brand ORDER BY count(id) DESC " results3 = sql.read_sql(query1, con=conn, params=[date1]) results3['retailer'] = retName[2] df = df.append(results3) list1 = results1['brand'].tolist() list2 = results2['brand'].tolist() list3 = results3['brand'].tolist() for brand in list1: if brand in list2 and brand in list3: brandName.append(brand) topBrand=brandName[:5] df.set_index('brand',inplace=True) df = df.ix[topBrand, :] df.reset_index(inplace=True) header = df.dtypes.index graphs.multipleBar(df, header[0], header[1], header[2]) print(df) print('\n') """.........No of offered products in top 5 common brands for every Retailer.......... """ df = pd.DataFrame() for o, i, z in zip(var, var1, retName): query1 = "select o.brand,count(DISTINCT i.id) as offeredProduct from %s as o INNER JOIN %s as i on o.id=i.id " % (o, i) query2 = query1 + "WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 GROUP BY o.brand ORDER BY offeredProduct DESC " results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]]) results['retailer'] = z df = df.append(results) header = df.dtypes.index graphs.multipleBar(df, header[0], header[1], header[2]) print(df) print('\n') """..........Offer Percentage in top 5 common brands for every Retailer..........""" df = pd.DataFrame() for o, i, z in zip(var, var1, retName): query1 = "select o.brand,AVG (i.discountPercentage) as offeredPercentage from %s as o INNER JOIN %s as i on o.id=i.id" % (o, i) query2 = query1 + " WHERE o.date=%s AND i.date=%s AND (o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s OR o.brand=%s) AND i.discountPercentage >0 " \ "GROUP BY o.brand ORDER BY offeredPercentage DESC " results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4]]) results['retailer'] = z df = df.append(results) header = df.dtypes.index graphs.multipleBar(df, header[0], header[1], header[2]) print(df) print('\n') """.........Color Variation in top 5 common brands for every Retailer.........""" df = pd.DataFrame() for o, i,z in zip(var, var1, retName): query1 = "select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s" \ " AS o inner join %s AS i on o.id=i.id" % (o, i) query2 = query1 + " where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo" results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[0]]) results['brand'] = topBrand[0] results['retailer'] = z df = df.append(results.ix[0:3, :]) query1 = "select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s" \ " AS o inner join %s AS i on o.id=i.id" % (o, i) query2 = query1 + " where i.date=%s and o.date=%s AND o.brand=%s) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo" results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[1]]) results['brand'] = topBrand[1] results['retailer'] = z df = df.append(results.ix[0:3, :]) query1 = "select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s" \ " AS o inner join %s AS i on o.id=i.id" % (o, i) query2 = query1 + " where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo" results = sql.read_sql(query2, con=conn, params=[date1, date1,topBrand[2]]) results['brand'] = topBrand[2] results['retailer'] = z df = df.append(results.ix[0:3, :]) query1 = "select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s" \ " AS o inner join %s AS i on o.id=i.id" % (o, i) query2 = query1 + " where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo" results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[3]]) results['brand'] = topBrand[3] results['retailer'] = z df = df.append(results.ix[0:3, :]) query1 = "select result.colorcount as colorNo,count(result.colorcount) AS products from (select infocolor.id,count(infocolor.id) as colorcount FROM (select o.id from %s" \ " AS o inner join %s AS i on o.id=i.id" % (o, i) query2 = query1 + " where i.date=%s and o.date=%s AND o.brand=%s ) as infocolor GROUP BY infocolor.id) as result GROUP BY colorNo" results = sql.read_sql(query2, con=conn, params=[date1, date1, topBrand[4]]) results['brand'] = topBrand[4] results['retailer'] = z df = df.append(results.ix[0:3, :]) header = df.dtypes.index graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3]) print(df) print('\n') """..........Size Variation in top 5 common brands for every Retailer...........""" df = pd.DataFrame() for o, i, p, z in zip(var, var1, var2, retName): query1= "select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size" results = sql.read_sql(query2, con=conn, params=[topBrand[0],date1, date1, date1]) results['brand'] = topBrand[0] results['retailer'] = z df = df.append(results) query1 = "select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size" results = sql.read_sql(query2, con=conn, params=[topBrand[1],date1, date1, date1]) results['brand'] = topBrand[1] results['retailer'] = z df = df.append(results) query1 = "select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size" results = sql.read_sql(query2, con=conn, params=[topBrand[2],date1, date1, date1]) results['brand'] = topBrand[2] results['retailer'] = z df = df.append(results) query1 = "select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size" results = sql.read_sql(query2, con=conn, params=[topBrand[3], date1, date1, date1]) results['brand'] = topBrand[3] results['retailer'] = z df = df.append(results) query1 = "select p.size,count(o.id) as products from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where o.brand=%s and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') GROUP BY p.size" results = sql.read_sql(query2, con=conn, params=[topBrand[4], date1, date1, date1]) results['brand'] = topBrand[4] results['retailer'] = z df = df.append(results) header = df.dtypes.index graphs.brandStackedMultiBar(df, header[0], header[1], header[2], header[3]) print(df) print('\n') """....Items Sold in top 5 common brands.... """ df = pd.DataFrame() df1 = pd.DataFrame() df2 = pd.DataFrame() """....Yesterday.....""" for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]): query1 = "select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')" results = sql.read_sql(query2, con=conn, params=[topBrand[0],topBrand[1],topBrand[2],topBrand[3],topBrand[4],date1, date1, date1]) results['retailer'] = z df1 = df1.append(results) """....Before Yesterday.....""" query1 = "select o.brand,p.size,p.sku,p.quantity from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query2 = query1 + " where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')" results = sql.read_sql(query2, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2]) results['retailer'] = z df2 = df2.append(results) query = "SELECT o.brand as brand_x,p.size as size_x,p.sku as sku,q.itemQuantity as itemsold from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId " \ "INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and " \ "(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L') " results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]]) results['retailer_x'] = retName[0] df = pd.merge(df1, df2, on='sku', how='inner') df['itemsold'] = df['quantity_y'] - df['quantity_x'] df = df[df['itemsold'] > 0] df = df.append(results) #df1['itemsold'] = df1['quantity'] - df2['quantity'] #df = df1.ix[:, ['category', 'retailer', 'itemsold', 'size']].copy() header = df.dtypes.index graphs.brandStackedMultiBar(df, header[7], header[2], header[0], header[5]) print(df) print('\n') """....Revenue in top 5 common brands.... """ df = pd.DataFrame() df1 = pd.DataFrame() df2 = pd.DataFrame() """....Yesterday.....""" for o, i, p, z in zip(var[1:], var1[1:], var2[1:], retName[1:]): query1 = "select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query = query1 + " where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')" results = sql.read_sql(query, con=conn, params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4],date1, date1, date1]) results['retailer'] = z df1 = df1.append(results) """........Before Yesterday......""" query1 = "select o.brand,p.size,p.sku,p.quantity,i.originalPrice,i.discountPercentage from %s as o INNER JOIN %s as i on o.id=i.id INNER JOIN %s p on p.colorId=i.colorId" % (o, i, p) query = query1 + " where (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) and o.date=%s and i.date=%s AND p.date=%s AND (p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')" results = sql.read_sql(query, con=conn,params=[topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4], date2, date2,date2]) results['retailer'] = z df2 = df2.append(results) query = "SELECT o.brand as brand_x,p.size as size_x,p.sku,q.itemQuantity as itemsold,q.itemRevenue as revenue from productsize2 AS q INNER JOIN productsize AS p ON p.sku=q.sku INNER JOIN productcolor AS i on p.colorId=i.colorId " \ "INNER JOIN productinfo as o ON o.id=i.id WHERE o.date=%s AND i.date=%s AND p.date=%s AND q.date=%s AND (o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s or o.brand=%s) AND " \ "(p.size='X' OR p.size='S' OR p.size='M' OR p.size='L')" results = sql.read_sql(query, con=conn, params=[date1, date1, date1, date1,topBrand[0], topBrand[1], topBrand[2], topBrand[3], topBrand[4]]) results['retailer_x'] = retName[0] df = pd.merge(df1, df2, on='sku', how='inner') df['itemsold'] = df['quantity_y'] - df['quantity_x'] df = df[df['itemsold'] > 0] df['price'] = df['originalPrice_x'] - (df['discountPercentage_x'] / 100) df['revenue'] = df['price'] * df['itemsold'] df = df.append(results) header = df.dtypes.index graphs.brandStackedMultiBar(df, header[13], header[12], header[0], header[10])
[ "def reduce_products(data, top_percent): \n # number of products\n n_of_products = data.product_id.nunique()\n\n # output\n print('Total Number of Products: {0}'.format(n_of_products))\n\n # 20% is the regular percentage of reducing the products\n top_20 = int(n_of_products * top_percent)\n\n # select the top products\n n_of_products_bought = data.product_id.value_counts()\n prod_f = n_of_products_bought.nlargest(top_20)\n top_products = prod_f.index\n\n # filter the transactions only for the top products\n data = data[(data.product_id.isin(top_products))]\n\n # output\n print('Number of Products after reduction: {0}'.format(top_20))\n\n return data", "def best_and_worst_selling_items(data): \n\n sorted_list = data.groupby('item_nbr', as_index=False).sum()[\"units\"].sort_values()\n\n lowest_5 = sorted_list[:5]\n highest_5 = sorted_list[106:]\n\n\n #Combining data in dataframe\n new_item_data = pd.DataFrame(dict(lowest_5 = lowest_5, \n highest_5=highest_5)).reset_index()\n new_item_data = new_item_data.fillna(0)\n new_item_data['Total number sold'] = new_item_data['lowest_5'] + new_item_data['highest_5']\n new_item_data = new_item_data.drop(columns = ['lowest_5', 'highest_5'])\n\n\n #Renaming columns, sorting dataframe and adjusting formatting\n\n new_item_data = new_item_data.rename(columns={\"index\": \"Item_Number\"})\n new_item_data = new_item_data.sort_values(['Total number sold'])\n new_item_data['Total number sold'] = new_item_data['Total number sold'].apply(lambda x: '%.1f' % x)\n new_item_data = new_item_data.reset_index(drop=True)\n\n #Showing table\n return new_item_data", "def top_5_after_transaction(book, book_to_recommend, items, items_ratings, similarity_measure):\n \n ## Selecting books based on transactions\n items_temp = items.loc[np.isin(items['itemID'], book_to_recommend)]\n \n ## Selecting books based on the same language and topic\n temp = items[items['itemID'] == book]\n temp_title = items.loc[items['itemID'] == book, 'title']\n \n items_temp = items_temp[~np.isin(items_temp['title'], temp_title)]\n items_temp = pd.concat([temp, items_temp]).reset_index(drop = True)\n \n ## Selecting books based on language\n items_temp = items_temp[np.isin(items_temp['language'], temp['language'])].reset_index(drop = True)\n \n ## Selecting variables of interest\n to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']\n variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]\n items_temp_1 = items_temp[variables_of_interest]\n \n if (items_temp.shape[0] >= 20):\n \n ## Selecting top 20 similar books and their corresponding ratings\n if (similarity_measure == 'Euclidean'):\n \n D = euclidean_distances(items_temp_1)\n to_select = np.argsort(D[:, 0])[1:21]\n \n elif (similarity_measure == 'Cosine'):\n \n D = cosine_similarity(items_temp_1)\n to_select = np.argsort(-D[:, 0])[1:21]\n \n elif (similarity_measure == 'Manhattan'):\n \n D = manhattan_distances(items_temp_1)\n to_select = np.argsort(D[:, 0])[1:21]\n \n return items_and_ratings(to_select, items_temp, items_ratings) \n\n else:\n\n knn_top_5 = top_5(book, items, items_ratings, similarity_measure)\n \n return knn_top_5", "def employees_with_most_sales():\n\n print('--- Show employees with most items sold ---\\n')\n\n employees = get_employees()\n sales = get_sales()\n\n products_by_employee = {}\n\n # fill the dictionary with initial values\n for e in employees:\n products_by_employee[e['id']] = 0\n\n # add the number of products sold by each employee\n for s in sales:\n employee_id = s['employee_id']\n products_by_employee[employee_id] += s['num_products']\n\n top_employees = []\n\n # get the top 3\n for i in range(3):\n top_employee = {\"employee_id\": -1, \"products\": -1}\n\n for k in products_by_employee:\n total_products = products_by_employee[k]\n # if the current employee has more sales, replace top_employee\n if total_products > top_employee['products']:\n top_employee = {\n \"employee_id\": k,\n \"products\": total_products\n }\n # add to podium\n top_employees.append(top_employee)\n # delete from list to calculate other placees\n del products_by_employee[top_employee['employee_id']]\n\n print('Top 3 employees:')\n place = 0\n for el in top_employees:\n place += 1\n employee = find_by_key(employees, 'id', el['employee_id'])\n print(\"%s) %s %s with %s items sold\" % (\n place,\n employee['name'],\n employee['last_name'],\n el['products']\n ))", "def best_broaders(supers_for_all_entities: Dict,\n per_candidate_links_and_supers: List[Dict],\n num_best: int = 5,\n super_counts_field: str = \"broader_counts\",\n doprint=False,\n representativeness_threshold=0.1):\n result = []\n global_counts = dict()\n for ent, bros in supers_for_all_entities.items():\n for bro in bros:\n global_counts[bro] = global_counts.get(bro, 0) + 1\n\n onlytopmost = []\n for can in per_candidate_links_and_supers:\n\n # For this entity, the following dictionaries have an element for every possible super\n # Using notation from the paper\n # T_cc : The number of entities narrower to a candidate which are tagged with NER typeT\n T_cc = {x: y for x, y in can[super_counts_field].items()\n if y > representativeness_threshold * len(can[\"entities\"])}\n if len(T_cc) == 0:\n T_cc = {x: y for x, y in can[super_counts_field].items()}\n # T_w : is the number of entities in the wholecorpus tagged with T\n T_w = {y: global_counts[y] for y in T_cc.keys()}\n # w : the total number of entities in the whole corpus\n w = float(len(supers_for_all_entities))\n # cc : the total number of entities in this candidate\n cc = float(len(can[\"entities\"]))\n\n # dict of the form super : log_odds\n log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w))\n for x in T_cc.keys()}\n\n logslist = list(log_odds_per_super.items())\n logslist.sort(key=lambda x: x[1])\n logslist.reverse()\n\n maxbroads = min(len(logslist), num_best)\n logodds = []\n for bi in range(maxbroads):\n logodds.append({\"candidatesbroader\": logslist[bi][0],\n \"loggods\": logslist[bi][1]})\n can[\"log_odds\"] = logodds\n if doprint:\n print(\"\\t\\t---\", \", \".join([str(x[1]) for x in logslist[:maxbroads]]))\n if len(logslist) > 0:\n onlytopmost.append(logslist[0][1])\n can[\"best_match_broader\"] = logslist[0][0]\n else:\n onlytopmost.append(None)\n can[\"best_match_broader\"] = None\n\n return onlytopmost", "def top_5(book, items, items_ratings, similarity_measure):\n \n ## Filter out books with same title but different publisher\n temp = items[items['itemID'] == book]\n temp_title = items.loc[items['itemID'] == book, 'title']\n items = items[~np.isin(items['title'], temp_title)]\n items = pd.concat([temp, items]).reset_index(drop = True)\n \n ## Selecting books based on the same language and topic\n items = items[np.isin(items['language'], temp['language'])].reset_index(drop = True)\n \n if (items[np.isin(items['general_topic'], temp['general_topic'])].shape[0] > 5): \n if (sum(items['general_topic'] == 'Y') > 15000):\n\n if (all(temp['general_topic_2'] == 'YF') == True):\n\n items = items[np.isin(items['general_topic_3'], temp['general_topic_3'])].reset_index(drop = True)\n\n else:\n \n if (items[np.isin(items['general_topic_2'], temp['general_topic_2'])].shape[0] >= 6):\n \n items = items[np.isin(items['general_topic_2'], temp['general_topic_2'])].reset_index(drop = True) \n \n else:\n \n items = items[np.isin(items['general_topic'], temp['general_topic'])].reset_index(drop = True)\n \n ## Selecting variables of interest \n to_remove = ['itemID', 'title', 'author', 'publisher', 'subtopics', 'general_topic', 'general_topic_2', 'general_topic_3', 'language', 'main topic']\n variables_of_interest = items.columns[~np.isin(items.columns, to_remove)]\n items_temp = items[variables_of_interest]\n \n ## Selecting top similar books\n if (similarity_measure == 'Euclidean'):\n \n D = euclidean_distances(items_temp)\n to_select = np.argsort(D[:, 0])[1:21]\n \n elif (similarity_measure == 'Cosine'):\n \n D = cosine_similarity(items_temp)\n to_select = np.argsort(-D[:, 0])[1:21]\n\n elif (similarity_measure == 'Manhattan'):\n \n D = manhattan_distances(items_temp)\n to_select = np.argsort(D[:, 0])[1:21]\n \n return items_and_ratings(to_select, items, items_ratings)", "def top_amenity():\n #pipeline = {$group:{\"_id\":\"$amenity\",\"count\":{\"$sum\":1}}} #[{$group:{\"_id\":\"$amenity\",\"count\":{\"$sum\":1}}},{\"$sort\":{\"count\":-1}}]\n pipeline = [{\"$match\":{\"amenity\":{\"$exists\":1}}},\n {\"$group\":{\"_id\":\"$amenity\",\"count\":{\"$sum\":1}}},\n {\"$sort\":{\"count\":-1}},{\"$limit\":10}]\n return pipeline", "def q5(data):\n\t# Try using dictionaries for this question, and make use of the sorted function available for list and dictionaries\n\t# https://docs.python.org/2/tutorial/datastructures.html\n\tn=0\n zipcode=0\n tot=0\n bottle=0\n res=''\n maxm=0\n cate=0\n sales={}\n for line in data:\n n+=1\n if n==1:\n i=-1\n for item in line:\n i+=1\n if item.lower() =='zipcode':\n zipcode=i\n elif item.lower() =='bottle qty':\n bottle=i\n elif item.lower() =='category name':\n cate=i\n elif line[cate]=='TEQUILA':\n\t\t\tnum=int(line[bottle])\n\t\t\tz=line[zipcode]\n\t\t\tif z in sales:\n\t\t\t\tsales[z]=sales[z]+num\n\t\t\telse:\n\t\t\t\tsales[z]=num\n\tmaxm=0\n\tzips=sales.keys()\n for item in zips:\n if maxm==0:\n res=item\n maxm=sales[item]\n if maxm<sales[item]:\n res=item\n maxm=sales[item]\n return res", "def get_popular_products(self,count):\r\n popular_products = self.data.groupby(\"product_category_name\") \\\r\n .count().sort(desc(\"count\")).limit(count)\r\n popular_products.toPandas().to_csv(\"popular_products.csv\",encoding = 'utf-8',index = False)", "def most_popular(self, n):\n return dict(collections.Counter([x[2] for x in self.tags]).most_common(n))", "def get_most_sold_item(df):\n\n\n top = df.groupby('Item').Units.sum().nlargest(1)\n\n return list(top.items())[0]", "def calculate(self):\n for company in self.active_offers:\n operations = self.active_offers[company]\n sellers = [seller for seller in operations if seller[2] < 0]\n buyers = [buyer for buyer in operations if buyer[2] > 0]\n prices = []\n sellers.sort(key=lambda x: float(x[1]))\n for seller in sellers:\n for buyer in buyers:\n if buyer[1] >= float(seller[1]):\n sell = abs(seller[2])\n buy = buyer[2]\n if sell > buy:\n quant = sell - buy\n else:\n quant = sell\n\n prices.append(seller[1])\n if seller[0] is None:\n if buyer[0].money >= buyer[1] * buyer[2]:\n seller[2] += quant\n buyer[0].money -= quant * float(seller[1])\n buyer[2] -= quant\n buyer[0].stocks[company] += quant\n else:\n if buyer[0].money >= buyer[1] * buyer[2]:\n seller[0].money += quant * float(seller[1])\n seller[2] += quant\n seller[0].stocks[company] -= quant\n buyer[0].money -= quant * float(seller[1])\n buyer[2] -= quant\n buyer[0].stocks[company] += quant\n\n if buyer[2] == 0:\n buyers.remove(buyer)\n\n if seller[2] == 0:\n sellers.remove(seller)\n\n del self.basic[company][0]\n if len(prices) > 0:\n self.basic[company].append(min(prices))\n else:\n self.basic[company].append(self.basic[company][-1])", "def resourcesByApplication(applicationResources):\n applicationsDepartment = applicationResources\n applicationsDepartment = applicationsDepartment.sort_values(['DEPARTMENT'])\n appsGroupedByDepartment = applicationsDepartment.groupby(['DEPARTMENT', 'APPLICATION'])[[\"CPU CORES\", \"RAM (MB)\"]].sum()\n print(appsGroupedByDepartment)", "def summarize_brands_data(brands_list=[], site_domain=None):\n\n if not isinstance(brands_list, list) or len(brands_list) == 0:\n return\n\n from debra.models import ProductModel, ProductModelShelfMap\n from collections import defaultdict, OrderedDict\n from urlparse import urlparse\n\n # 0.\n product_ids = []\n product_inf_ids = []\n product_data = []\n brands_mentioned = []\n\n parameters = {\n u'keyword_types': [u'all', u'all', u'all'],\n u'and_or_filter_on': True,\n u'order_by': {u'field': u'_score', u'order': u'desc'},\n u'filters': {u'priceranges': [],\n u'popularity': [],\n u'comments': None,\n u'tags': [],\n u'gender': [],\n u'brand': [],\n u'engagement': None,\n u'shares': None,\n u'source': [],\n u'location': [],\n u'social': None,\n u'activity': None,\n u'categories': [],\n u'likes': None},\n u'keyword': brands_list,\n u'group_concatenator': u'and_same',\n u'sub_tab': u'main_search',\n 'no_artificial_blogs': True,\n u'groups': [0 for _ in brands_list],\n u'type': u'all',\n u'page': 1}\n\n # Part 1. Number of influencers\n index_name = ELASTICSEARCH_INDEX\n endpoint = \"/%s/influencer/_search\" % index_name\n url = ELASTICSEARCH_URL\n\n # building base query for influencer\n # TODO: need to be adapted for new flattened schema\n query = es_influencer_query_builder_v2(parameters, page_size=1, page=0)\n query['query']['filtered']['filter'] = get_query_filter(False, True, False)\n\n # getting number of influencers in total\n rq = make_es_get_request(\n es_url=url + endpoint,\n es_query_string=json.dumps(query)\n )\n\n resp = rq.json()\n infs_total = resp.get(\"hits\", {}).get(\"total\", 0)\n\n # Part 2. Number of posts\n index_name = ELASTICSEARCH_INDEX\n endpoint = \"/%s/post/_search\" % index_name\n url = ELASTICSEARCH_URL\n\n # building base query for influencer\n # TODO: need to be adapted for new flattened schema\n query = es_post_query_builder_v2(parameters, page=0, page_size=1)\n query['query']['filtered']['filter']['has_parent']['filter'] = get_query_filter(False, True, False)\n\n query['aggs'] = {\n \"brand_domains_counts\": {\n \"terms\": {\n \"field\": \"brand_domains\",\n \"size\": 100\n }\n }\n }\n\n # getting number of influencers in total\n rq = make_es_get_request(\n es_url=url + endpoint,\n es_query_string=json.dumps(query)\n )\n\n resp = rq.json()\n posts_total = resp.get(\"hits\", {}).get(\"total\", 0)\n\n brands_distribution = []\n # getting results from aggregations\n for bucket in resp.get('aggregations', {}).get('brand_domains_counts', {}).get('buckets', []):\n brands_distribution.append( (bucket['key'], bucket['doc_count']) )\n\n # Part 3. Number of products\n index_name = ELASTICSEARCH_INDEX\n endpoint = \"/%s/product/_search\" % index_name\n url = ELASTICSEARCH_URL\n\n # building base query for influencer\n # TODO: need to be adapted for new flattened schema\n query = es_product_query_builder_v2(parameters, page=0, page_size=1)\n query['query']['filtered']['filter']['has_parent']['filter']['has_parent']['filter'] = get_query_filter(False, True, False)\n\n # getting number of influencers in total\n rq = make_es_get_request(\n es_url=url + endpoint,\n es_query_string=json.dumps(query)\n )\n\n resp = rq.json()\n products_total = resp.get(\"hits\", {}).get(\"total\", 0)\n\n # 4, 5\n # building base query for influencer\n # TODO: need to be adapted for new flattened schema\n query = es_product_query_builder_v2(parameters, page=0, page_size=products_total)\n query['query']['filtered']['filter']['has_parent']['filter']['has_parent']['filter'] = get_query_filter(False, True, False)\n\n query['fields'] = [\"create_date\", \"influencer_id\"]\n\n rq = make_es_get_request(\n es_url=url + endpoint,\n es_query_string=json.dumps(query)\n )\n\n resp = rq.json()\n # print(resp)\n for h in resp.get(\"hits\", {}).get(\"hits\", []):\n product_ids.append(h['_id'])\n product_inf_ids.append(h['fields']['influencer_id'][0])\n product_data.append((h['_id'], h['fields']['influencer_id'][0]))\n\n total_affiliate = 0\n total_not_affiliate = 0\n\n # 6, 7\n # checking site\n retailers = defaultdict(int)\n\n if site_domain:\n site_domain = site_domain.lower()\n\n for pd in product_data:\n prod = ProductModel.objects.get(id=pd[0])\n pmsm = prod.productmodelshelfmap_set.filter(influencer_id=pd[1]).order_by('-added_datetime')\n if pmsm.count() > 0:\n pmsm = pmsm[0]\n if pmsm.affiliate_prod_link is None:\n total_not_affiliate += 1\n else:\n total_affiliate += 1\n\n if site_domain:\n if site_domain in prod.prod_url.lower():\n retailers[site_domain] += 1\n else:\n retailers[urlparse(prod.prod_url).netloc] += 1\n\n print('%s Influencers found for %s' % (infs_total, brands_list))\n print('%s Posts found for %s' % (posts_total, brands_list))\n print('%s Products found for %s' % (products_total, brands_list))\n # print('Products ids: %s' % product_ids)\n\n print('%s Products from affiliates' % total_affiliate)\n print('%s Products not from affiliates' % total_not_affiliate)\n\n if site_domain:\n retailers_self_ctr = retailers.get(site_domain, 0)\n retailers_other_ctr = sum([v for k, v in retailers.items() if k != site_domain])\n print(' RETAILERS:')\n for k, v in sorted(retailers.items(), reverse=True):\n print(\" %s : %s ( %s percent)\" % (k, v, (v * 100 / (retailers_self_ctr + retailers_other_ctr))))\n\n print(\"Brands mentioned in conjunction:\")\n for bd in brands_distribution:\n print(\" %s : %s mentions\" % (bd[0], bd[1]))", "def ratPool(self,rar: int = 1) -> float:\r\n lPool = len(self.pool)\r\n rCount = 0\r\n for i in self.pool:\r\n if self.b[i][\"Rarity\"] == rar: rCount += 1\r\n return round(rCount/lPool,2)", "def cat_popular(self):\n list_of_tuple = []\n\n query = f\"SELECT cat_id FROM prodcat GROUP BY cat_id ORDER BY COUNT(*) DESC LIMIT 10;\"\n result = self.to_list_of_integer(self.query(query))\n\n for n in range(len(result)):\n query = (\n f\"SELECT cat_id, cat_nom FROM categories WHERE cat_id = '{result[n]}'\"\n )\n cat_popular = self.to_list_of_tuple(list_of_tuple, query, n)\n\n return cat_popular", "def most_popular_authors(query_2):", "def _calc_top_models_common(conn, subquery, subquery_params=None):\n if subquery_params is None:\n subquery_params = []\n\n with conn.cursor() as cursor, utils.CodeProfiler() as cp:\n cursor.execute(sql.SQL(\"\"\"SELECT manufacturer,\n model,\n total_imei_count,\n string_agg(technology_generation, '/') AS tech_generations\n FROM (SELECT manufacturer,\n model,\n total_imei_count,\n technology_generation\n FROM (SELECT model_name AS model,\n manufacturer::TEXT AS manufacturer,\n SUM(st.imei_count)::BIGINT AS total_imei_count,\n bit_or(rat_bitmask) AS model_rat\n FROM gsma_data\n JOIN ({0}) AS st\n USING (tac)\n GROUP BY manufacturer, model_name\n ORDER BY total_imei_count DESC\n LIMIT 10) top_10_models\n JOIN LATERAL (SELECT bitmask_to_set_bit_positions(top_10_models.model_rat)\n AS model_gsma_rank) model_ranks\n ON TRUE\n JOIN (SELECT DISTINCT gsma_rank, technology_generation\n FROM radio_access_technology_map) unique_map\n ON unique_map.gsma_rank = model_ranks.model_gsma_rank\n ) uniques\n GROUP BY manufacturer, model, total_imei_count\n ORDER BY total_imei_count DESC\n \"\"\").format(subquery), # noqa: Q447, Q449\n subquery_params)\n results = [{'model': result.model,\n 'manufacturer': result.manufacturer,\n 'tech_generations': result.tech_generations,\n 'imei_count': result.total_imei_count} for result in cursor]\n return results, cp.duration, [cp.duration]", "def recommend(user_id=None, business_id=None, city=None, n=10):\n lijst3 = []\n for j in lijst:\n for i in df_similarity_categories:\n if j['business_id'] == i:\n lijst3.append(j)\n\n if business_id == None:\n return random.sample(lijst3, 10)\n\n hallo = dict()\n for i in df_similarity_categories:\n if i == business_id:\n continue\n else:\n hallo[i] = df_similarity_categories[business_id][i]\n\n test = sorted(hallo, key=hallo.get, reverse=True)\n\n lijstje = []\n for i in test:\n for x in BUSINESSES['cleveland']:\n if i == x['business_id']:\n lijstje.append(x)\n\n for x in lijst:\n if x['business_id'] == business_id:\n for y, z in x['attributes'].items():\n if y == 'RestaurantsPriceRange2':\n prijs = int(z)\n\n final = []\n for x in lijstje:\n for y, z in x['attributes'].items():\n if y == 'RestaurantsPriceRange2':\n if int(z) == prijs or int(z) == (prijs + 1) or int(z) == (prijs - 1):\n final.append(x)\n\n final = final[0:10]\n return final" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append visits and error from a different response into this response
def append(self, other): if not isinstance(other, SIRIResponse): raise TypeError("Expected a SIRIResponse object") self.errors += other.errors for stop_code, visits in other.visits.items(): if stop_code in self.visits: raise ValueError("Merging requests for the same stop is not supported") self.visits[stop_code] = visits
[ "def _adapt_response(self, response):\n errors, meta = super(ServerError, self)._adapt_response(response)\n return errors[0], meta # single error instead of array", "def add_error(self, error: any):\n if ERRORS_KEY in self.response:\n self.response[ERRORS_KEY].append(error)\n else:\n self.response[ERRORS_KEY] = [error]\n return self", "def merge(self: ResponseT, other: ResponseT) -> ResponseT:\n raise TypeError(self)", "def add_response(self, request, response):\n self.records.append(ResponseRecord(request, response))", "def _additional_response_starting(self, name):\n pass", "def error_details(self):\n\n # TODO There is no attempt so far to eliminate duplicates.\n # Duplicates could be eliminated based on exception type\n # and message or exception type and file name/line number\n # presuming the latter are available. Right now the file\n # name and line number aren't captured so can't rely on it.\n\n # TODO There are no constraints in place on what keys/values\n # can be in params dictionaries. Need to convert values to\n # strings at some point.\n\n if not self.errors:\n return\n\n for error in self.errors:\n params = {}\n params[\"stack_trace\"] = error.stack_trace\n\n intrinsics = {'spanId': error.span_id, 'error.expected': error.expected}\n intrinsics.update(self.trace_intrinsics)\n params['intrinsics'] = intrinsics\n\n params['agentAttributes'] = {}\n for attr in self.agent_attributes:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['agentAttributes'][attr.name] = attr.value\n\n params['userAttributes'] = {}\n for attr in self.user_attributes:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['userAttributes'][attr.name] = attr.value\n\n # add error specific custom params to this error's userAttributes\n\n err_attrs = create_user_attributes(error.custom_params,\n self.settings.attribute_filter)\n for attr in err_attrs:\n if attr.destinations & DST_ERROR_COLLECTOR:\n params['userAttributes'][attr.name] = attr.value\n\n yield newrelic.core.error_collector.TracedError(\n start_time=error.timestamp,\n path=self.path,\n message=error.message,\n type=error.type,\n parameters=params)", "def _additional_response_starting(self, name):\n sname = self._strip_ns(name)\n if sname != 'multistatus':\n raise errors.InvalidHttpResponse(\n self.url, msg='Unexpected %s element' % name)", "def add_response(self,iSurveyID,aResponseData):", "def _annotate_exceptions(errors, mapping=None):\n\n for e in errors:\n if not e.url:\n # Error may have been raised before request path\n # was determined; nothing to annotate.\n continue\n\n if not mapping:\n # Request is basename of path portion of URI.\n e.request = os.path.basename(urlparse.urlsplit(\n e.url)[2])\n continue\n\n # If caller specified a mapping object, use that\n # instead of trying to deduce the request's name.\n if e.url not in mapping:\n raise tx.TransportOperationError(\n \"No mapping found for URL {0}\".format(\n e.url))\n\n e.request = mapping[e.url]\n\n return errors", "def custom_error_response(self, response):\n\n self.set_status(400)\n self.finish(response)", "def error_responses(self, error_responses):\n\n self._error_responses = error_responses", "def extend_response(self, response):\n bases = [GenericResponseExtension]\n\n if self.skip_n_forms:\n bases.append(type(\n \"SkipNForms\",\n (SkipNFormsExtension, ),\n dict(n=self.skip_n_forms)\n ))\n\n if self.use_intercooler:\n bases.append(IntercoolerClickExtension)\n\n bases.append(response.__class__)\n response.__class__ = type('ExtendedResponse', tuple(bases), {})\n\n return response", "def update_error():\n requests[\"error\"] += 1", "def _batch_response(self, request_id, response, exception):\n\n if exception is not None:\n logging.error(exception)\n logging.error('API Request Error! ' + str(response))", "def toResponse(self, stanza):\n if stanza.getAttribute('to'):\n stanza.swapAttributeValues('to', 'from')\n stanza['type'] = 'error'\n stanza.addChild(self.getElement())\n return stanza", "def process_response(self, request, response, spider):\n\n if not isinstance(request, SeleniumRequest):\n return response\n\n timeout = time.time() + spider.wait_time\n max_timeout = time.time() + spider.max_wait_time\n structured_data = extruct.extract(response.text, uniform=True, syntaxes=spider.wait_sintaxes)\n name = spider.get_name(response)\n category = spider.get_category(response, spider.category_locator)\n properties = spider.get_specifications(response, spider.properties_locator, spider.positions)\n\n while True:\n body = str.encode(response.request.meta['driver'].page_source)\n new_response = HtmlResponse(response.url, body=body, encoding='utf-8', request=request)\n structured_data2 = extruct.extract(body, uniform=True, syntaxes=spider.wait_sintaxes)\n name2 = spider.get_name(new_response)\n category2 = spider.get_category(new_response, spider.category_locator)\n properties2 = spider.get_specifications(new_response, spider.properties_locator, spider.positions)\n\n if (structured_data != structured_data2) or (name != name2) or (category != category2) or \\\n properties != properties2:\n timeout = time.time() + spider.wait_time\n structured_data = structured_data2\n name = name2\n category = category2\n properties = properties2\n\n elif time.time() > timeout:\n break\n\n if time.time() > max_timeout:\n print('max timeout reached!')\n break\n\n time.sleep(1)\n\n request.meta['Name'] = name\n request.meta['Category'] = category\n request.meta['Properties'] = properties\n request.meta['StructuredData'] = extruct.extract(body, uniform=True)\n\n return new_response", "def tag_responses(self):\n module_logger.info('------ Tag student responses')\n for dutch_prompt in self.__student_responses__:\n for res in self.__student_responses__[dutch_prompt]:\n response_text = res['response']\n response_tokens = nltk_helper.tokenize_sentence(response_text)\n response_pos = nltk_helper.tag_tokens(response_tokens)\n res['tokens'] = response_tokens\n res['pos'] = response_pos", "def start_response_impl(self, state, res, hdr, exc_info):\n\t\tstate.result = res.split(\" \")[0]\n\n\t\t# Work out from the request environment what output format we\n\t\t# want to use, and select it\n\t\tstate.transformer = self.get_transformer(state.env)\n\n\t\t# Modify the existing headers: drop any content-type or\n\t\t# content-length headers\n\t\tnew_hdr = []\n\t\tfor name, value in hdr:\n\t\t\tlname = name.lower()\n\t\t\tif lname == \"content-type\":\n\t\t\t\tcontinue\n\t\t\tif lname == \"content-length\":\n\t\t\t\tcontinue\n\t\t\tnew_hdr.append((name, value))\n\n\t\t# Add in suitable headers for the transformed output\n\t\tstate.transformer.http_headers(new_hdr)\n\n\t\t# Continue with the original function call as if nothing has\n\t\t# happened\n\t\twrite = state.start_response(res, new_hdr)\n\t\tdef new_write(data):\n\t\t\tlog.error(\"Deprecated write function called! Data not written.\")\n\t\t\twrite(state.transformer.write(data))\n\n\t\treturn new_write", "def add_response_headers(self, response, metrics):\n def sanitize(string):\n return string.title().replace(' ', '-')\n\n for module, module_values in metrics.items():\n for key, value in module_values.items():\n response['X-Speedbar-%s-%s' % (sanitize(module), sanitize(key))] = value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
define_op is a callable that translates tokens into objects bin_op and un_op provide functions for performing binary and unary operations
def evaluate(self, define_op, bin_op=_BINARY_OPERATOR_MAP, un_op=_UNARY_OPERATOR_MAP): if self.right: # binary or implicit operator op_text = self.operator[1] if self.operator else '' if op_text not in bin_op: raise DefinitionSyntaxError('missing binary operator "%s"' % op_text) left = self.left.evaluate(define_op, bin_op, un_op) return bin_op[op_text](left, self.right.evaluate(define_op, bin_op, un_op)) elif self.operator: # unary operator op_text = self.operator[1] if op_text not in un_op: raise DefinitionSyntaxError('missing unary operator "%s"' % op_text) return un_op[op_text](self.left.evaluate(define_op, bin_op, un_op)) else: # single value return define_op(self.left)
[ "def makeBinOp(opdict, next) :\n op = reduce(operator.or_, [\n a(Token('op', k)).expectsMsg(repr(k)) >> const(v)\n for k,v in opdict.iteritems()\n ])\n return (next + many((op + next) >> tuple)) \\\n >> unarg(eval)", "def op(operator):\n return a(Token('OP', operator)) >> tok_to_value", "def binary_operator(op):\n def _binary_operator(self, other):\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return return_type(\n \"({left} {op} {right})\".format(\n left=self_expr,\n op=op,\n right=other_expr\n ),\n binds=new_inputs\n )\n elif isinstance(other, NumExprFactor):\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n return commuted_method_getter(other)(self)\n elif isinstance(other, Term):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n binds=(self,)\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n binds=(self, other)\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant)\".format(op=op, constant=other),\n binds=(self,)\n )\n raise BadBinaryOperator(op, self, other)\n return _binary_operator", "def register_unary_op(op):\n to_lower_op = make_unary_op(op)\n cuda_lower(op, MaskedType)(to_lower_op)", "def register_arithmetic_op(op):\n to_lower_op = make_arithmetic_op(op)\n cuda_lower(op, MaskedType, MaskedType)(to_lower_op)", "def transform_binary_operator(self, node):\n # get all the tokens of assignment\n # and store it in the tokens list\n tokens = list(node.get_tokens())\n\n # supported operators list\n operators_list = ['+', '-', '*', '/', '%','=',\n '>', '>=', '<', '<=', '==', '!=', '&&', '||', '+=', '-=',\n '*=', '/=', '%=']\n\n # this stack will contain variable content\n # and type of variable in the rhs\n combined_variables_stack = []\n\n # this stack will contain operators\n # to be processed in the rhs\n operators_stack = []\n\n # iterate through every token\n for token in tokens:\n # token is either '(', ')' or\n # any of the supported operators from the operator list\n if token.kind == cin.TokenKind.PUNCTUATION:\n\n # push '(' to the operators stack\n if token.spelling == '(':\n operators_stack.append('(')\n\n elif token.spelling == ')':\n # keep adding the expression to the\n # combined variables stack unless\n # '(' is found\n while (operators_stack\n and operators_stack[-1] != '('):\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(\n lhs, rhs, operator))\n\n # pop '('\n operators_stack.pop()\n\n # token is an operator (supported)\n elif token.spelling in operators_list:\n while (operators_stack\n and self.priority_of(token.spelling)\n <= self.priority_of(\n operators_stack[-1])):\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(\n lhs, rhs, operator))\n\n # push current operator\n operators_stack.append(token.spelling)\n\n # token is a bitwise operator\n elif token.spelling in ['&', '|', '^', '<<', '>>']:\n raise NotImplementedError(\n \"Bitwise operator has not been \"\n \"implemented yet!\")\n\n # token is a shorthand bitwise operator\n elif token.spelling in ['&=', '|=', '^=', '<<=',\n '>>=']:\n raise NotImplementedError(\n \"Shorthand bitwise operator has not been \"\n \"implemented yet!\")\n else:\n raise NotImplementedError(\n \"Given token {} is not implemented yet!\"\n .format(token.spelling))\n\n # token is an identifier(variable)\n elif token.kind == cin.TokenKind.IDENTIFIER:\n combined_variables_stack.append(\n [token.spelling, 'identifier'])\n\n # token is a literal\n elif token.kind == cin.TokenKind.LITERAL:\n combined_variables_stack.append(\n [token.spelling, 'literal'])\n\n # token is a keyword, either true or false\n elif (token.kind == cin.TokenKind.KEYWORD\n and token.spelling in ['true', 'false']):\n combined_variables_stack.append(\n [token.spelling, 'boolean'])\n else:\n raise NotImplementedError(\n \"Given token {} is not implemented yet!\"\n .format(token.spelling))\n\n # process remaining operators\n while operators_stack:\n if len(combined_variables_stack) < 2:\n raise NotImplementedError(\n \"Unary operators as a part of \"\n \"binary operators is not \"\n \"supported yet!\")\n rhs = combined_variables_stack.pop()\n lhs = combined_variables_stack.pop()\n operator = operators_stack.pop()\n combined_variables_stack.append(\n self.perform_operation(lhs, rhs, operator))\n\n return combined_variables_stack[-1][0]", "def binary_op(cls, operator, a, b):\n return cls.binary_operators[operator](a, b)", "def visit_BinOp(self, node):\n # TODO: Integer Binary Operations\n op = node.op\n lhs = node.left\n rhs = node.right\n\n if (type(lhs) == asr.Variable):\n left_value = Symbol(lhs.name)\n elif(type(lhs) == asr.BinOp):\n l_exp_ast = call_visitor(lhs)\n for exp in l_exp_ast:\n left_value = exp\n else:\n raise NotImplementedError(\"Numbers Currently not supported\")\n\n if (type(rhs) == asr.Variable):\n right_value = Symbol(rhs.name)\n elif(type(rhs) == asr.BinOp):\n r_exp_ast = call_visitor(rhs)\n for exp in r_exp_ast:\n right_value = exp\n else:\n raise NotImplementedError(\"Numbers Currently not supported\")\n\n if isinstance(op, asr.Add):\n new_node = Add(left_value, right_value)\n elif isinstance(op, asr.Sub):\n new_node = Add(left_value, -right_value)\n elif isinstance(op, asr.Div):\n new_node = Mul(left_value, 1/right_value)\n elif isinstance(op, asr.Mul):\n new_node = Mul(left_value, right_value)\n\n self._py_ast.append(new_node)", "def _create_binary_operator(operator_func, description, list_kword=None):\n\n class _BinaryOperatorImpl(_BinaryOperator):\n \"\"\"Implements a binary operator specfication.\"\"\"\n\n def __init__(self, rvalue, key=None, description=description,\n default=DEFAULT_NOT_SET):\n _BinaryOperator.__init__(self, description, default, operator_func,\n rvalue, key, list_kword)\n return _BinaryOperatorImpl", "def visit_UnaryOp(self, node): #pylint: disable=invalid-name\n # Recursively remove Unary Ops\n if node.op == 'sizeof':\n type_node = get_type(node.expr, self.envr)\n #type_node = node.expr\n return get_size_ast(type_node, self.envr)\n\n node.expr = self.visit(node.expr)\n if node.op == '!':\n return AST.BinaryOp(\"==\", constant_zero(), node.expr, node.coord)\n elif node.op in ['++', '--', 'p--', 'p++']:\n return self.inc_and_dec(node)\n elif node.op in ['+', '-', '~']:\n if node.op == '-':\n return AST.BinaryOp('-', constant_zero(), node.expr, node.coord)\n node.show()\n raise NotImplementedError()\n elif node.op in ['&', '*']:\n return node\n else:\n raise NotImplementedError()", "def _binary_op(result_name, func_name, arg1_name, arg2_name):\n funcs = {'add': '+', 'sub': '-', 'mul': '*', 'div': '/'}\n return f\"{result_name} = {arg1_name} {funcs[func_name]} {arg2_name}\"", "def _bin_op(instance, opnode, op, other, context, reverse=False):\n if reverse:\n method_name = protocols.REFLECTED_BIN_OP_METHOD[op]\n else:\n method_name = protocols.BIN_OP_METHOD[op]\n return functools.partial(\n _invoke_binop_inference,\n instance=instance,\n op=op,\n opnode=opnode,\n other=other,\n context=context,\n method_name=method_name,\n )", "def test_compile_binary_operators(self):\n op_map = {\n operators.and_: ' AND ',\n operators.or_: ' OR ',\n operators.add: ' + ',\n operators.mul: ' * ',\n operators.sub: ' - ',\n operators.div: ' / ',\n operators.mod: ' MOD ',\n operators.truediv: ' / ',\n operators.lt: ' < ',\n operators.le: ' <= ',\n operators.ne: ' <> ',\n operators.gt: ' > ',\n operators.ge: ' >= ',\n operators.eq: ' = ',\n operators.concat_op: ' || ',\n operators.like_op: ' LIKE ',\n operators.is_: ' IS ',\n operators.isnot: ' IS NOT '\n }\n\n for op in op_map.keys():\n self.td_engine.execute(op(self.table.c.c1, text('arg')))\n\n assert(self.last_compiled == 't_test.c1' + op_map[op] + 'arg')", "def operatorConvert(op):\n operator = BitArray(3)\n if op == '+':\n return operator\n elif op == '-':\n operator.invert(2)\n return operator\n elif op == '*': \n operator.invert(1)\n return operator\n elif op == '/':\n operator.invert([1, 2])\n return operator\n elif op == 'mod':\n operator.invert(0)\n return operator\n elif op == '^':\n operator.invert([0, 2])\n return operator\n elif op == 'nck':\n operator.invert([0, 1])\n return operator\n elif op == '%':\n operator.invert([0, 1, 2])\n return operator", "def process_binary_operators(self, bin_op, left, right, disc_left, disc_right):\n return bin_op._binary_new_copy(disc_left, disc_right)", "def as_op(x):\n if isinstance(x, Op):\n return x\n\n return constant(x)", "def visit_BinaryOp(self, node):\n op = node.op\n\n if op == '+':\n return self.visit(node.left) + self.visit(node.right)\n elif op == '-':\n return self.visit(node.left) - self.visit(node.right)\n elif op == '/':\n return self.visit(node.left) / self.visit(node.right)\n elif op == '*':\n return self.visit(node.left) * self.visit(node.right)\n elif op == '%':\n return self.visit(node.left) % self.visit(node.right)\n elif op == '*':\n return self.visit(node.left) * self.visit(node.right)\n elif op == '<':\n return self.visit(node.left) < self.visit(node.right)\n elif op == '>':\n return self.visit(node.left) > self.visit(node.right)\n elif op == '>=':\n return self.visit(node.left) >= self.visit(node.right)\n elif op == '<=':\n return self.visit(node.left) <= self.visit(node.right)\n elif op == '&&':\n return self.visit(node.left) and self.visit(node.right)\n elif op == '||':\n return self.visit(node.left) or self.visit(node.right)\n elif op == '==':\n return self.visit(node.left) == self.visit(node.right)\n elif op == '!=':\n return self.visit(node.left) != self.visit(node.right)", "def _op(self, parse_args, op):\n parse_args = [_ensure_subexpression(arg) for arg in parse_args]\n arity = len(parse_args)\n indices = []\n for i in xrange(arity):\n if i == 0:\n indices = [-1]\n else:\n indices = [indices[0] - len(parse_args[-i].ops)] + indices\n new = _SubExpression([op], [indices])\n all_expressions = parse_args + [new]\n return _concat_subexpressions(*all_expressions)", "def register_custom_op(is_ortmodule=False):\n\n # Symbolic definition\n def inverse(g, self):\n return g.op(\"com.microsoft::Inverse\", self).setType(self.type())\n\n def gelu(g, self):\n return g.op(\"com.microsoft::Gelu\", self).setType(self.type())\n\n def triu(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=1).setType(self.type())\n\n def tril(g, self, diagonal):\n return g.op(\"com.microsoft::Trilu\", self, diagonal, upper_i=0).setType(self.type())\n\n # Op Registration\n register_custom_op_symbolic('::inverse', inverse, _onnx_opset_version)\n register_custom_op_symbolic('::gelu', gelu, _onnx_opset_version)\n register_custom_op_symbolic('::triu', triu, _onnx_opset_version)\n register_custom_op_symbolic('::tril', tril, _onnx_opset_version)\n\n if is_ortmodule:\n @parse_args('v', 'v', 'i', 'b', 'b')\n def embedding(g, weight, indices, padding_idx, scale_grad_by_freq, sparse):\n custom_attributes_json = (\n '{'\n f'\"padding_idx\":{str(padding_idx)},'\n f'\"scale_grad_by_freq\":{str(scale_grad_by_freq).lower()},'\n f'\"sparse\":{str(sparse).lower()}'\n '}'\n )\n output = g.op(\"com.microsoft::ATenOp\", weight, indices, name_s='aten::embedding',\n custom_attributes_json_s=custom_attributes_json)\n indices_shape = _get_tensor_sizes(indices)\n if indices_shape is not None and hasattr(weight.type(), 'with_sizes'):\n output_type = weight.type().with_sizes(indices_shape + [_get_tensor_dim_size(weight, 1)])\n output.setType(output_type)\n return output\n\n register_custom_op_symbolic('::embedding', embedding, _onnx_opset_version)\n\n @parse_args('v', 'v', 'v', 'i', 'v')\n def cross_entropy_loss(g, self, target, weight, reduction, ignore_index):\n # reduction: 0->none, 1->mean, 2->sum\n reduction = sym_help._maybe_get_const(reduction, 'i')\n reduction_vals = ['none', 'mean', 'sum']\n reduction = reduction_vals[reduction]\n output, log_prob = g.op(\"com.microsoft::SoftmaxCrossEntropyLossInternal\",\n self, target, weight, ignore_index,\n reduction_s=reduction, outputs=2)\n output.setType(self.type())\n log_prob.setType(self.type())\n return output\n\n register_custom_op_symbolic('::cross_entropy_loss', cross_entropy_loss, _onnx_opset_version)\n\n @parse_args('v', 'v', 'v', 'i', 'v')\n def nll_loss(g, self, target, weight, reduction, ignore_index):\n # reduction: 0->none, 1->mean, 2->sum\n reduction = sym_help._maybe_get_const(reduction, 'i')\n reduction_vals = ['none', 'mean', 'sum']\n reduction = reduction_vals[reduction]\n output = g.op(\"com.microsoft::NegativeLogLikelihoodLossInternal\",\n self, target, weight, ignore_index, reduction_s=reduction)\n output.setType(self.type())\n return output\n\n register_custom_op_symbolic('::nll_loss', nll_loss, _onnx_opset_version)\n\n @parse_args('v', 'is', 'is', 'is', 'is', 'b')\n def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode):\n custom_attributes_json = (\n '{'\n f'\"kernel_size\":{str(kernel_size)},'\n f'\"stride\":{str(stride)},'\n f'\"padding\":{str(padding)},'\n f'\"dilation\":{str(dilation)},'\n f'\"ceil_mode\":{str(ceil_mode).lower()}'\n '}'\n )\n return g.op(\"com.microsoft::ATenOp\", self, name_s='aten::max_pool2d_with_indices',\n custom_attributes_json_s=custom_attributes_json, outputs=2)[0]\n\n register_custom_op_symbolic('::max_pool2d', max_pool2d, _onnx_opset_version)\n\n @parse_args('v', 'i', 'i', 'i')\n def unfold(g, input, dimension, size, step):\n custom_attributes_json = (\n '{'\n f'\"dimension\":{str(dimension)},'\n f'\"size\":{str(size)},'\n f'\"step\":{str(step)}'\n '}'\n )\n return g.op(\"com.microsoft::ATenOp\", input, name_s='aten::unfold',\n custom_attributes_json_s=custom_attributes_json)\n\n register_custom_op_symbolic('::unfold', unfold, _onnx_opset_version)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates input_files and input_classes for testing. The same tif file is used for all samples.
def create_sample_data(num_files): filename = "/home/timhu/test_tif/l8_median_india_vis_500x500_402382.0.tif" possible_classes = list(range(16)) input_files = np.empty((num_files,), dtype=object) input_labels = np.zeros((num_files,), dtype=np.int64) for f in range(num_files): input_files[f] = filename input_labels[f] = random.choice(possible_classes) return input_files, input_labels
[ "def generate_testfiles(self):\n print(\"Opening files...\")\n data = self.open_test_files()\n print(\"Assemble and concat...\")\n testdata, labels = self.assemble_and_concat(**data)\n print(\"Removing nans and saving...\")\n self.remove_nans(testdata, labels)\n data = None\n labels = None", "def create_test_generators(self):\n \n test_datagen = ImageDataGenerator( \n preprocessing_function = self.preprocess) \n\n\n test_generator = test_datagen.flow_from_directory(\n self.test_path,\n target_size=(self.image_size,self.image_size),\n batch_size= self.batch_size,\n class_mode='categorical', \n shuffle=False) # keep data in same order as labels\n \n return test_generator", "def create_input(path):\n folder = path\n files = os.listdir(folder)\n x = []\n y = []\n image_paths = []\n scaler = MinMaxScaler(feature_range=(-0.1, 1.175))\n #noramlized as in LeCun, makes the mean input roughly 0 and the variance roughly 1.\n #This accelerates learning.\n for i, images in sorted(enumerate(files)):\n label = images[0:2] #class identifier is in these positions\n image_path = folder + '/' + images\n image_paths.append(image_path)\n image_read = cv2.imread(image_path, 0)\n resize = cv2.resize(image_read, (32, 32), interpolation=cv2.INTER_CUBIC)\n X_new = scaler.fit_transform(resize)\n x.append(X_new)\n y.append(int(label))\n X = np.array(x)\n n, m, p = X.shape\n x_aux = []\n for example in X:\n for row in example:\n for element in row:\n x_aux.append([element])\n x_aux = np.array(x_aux)\n x_aux = np.reshape(x_aux, (n, 32, 32, 1))\n return x_aux, y, image_paths", "def create_svm_input_files(matrix_cc, matrix_s2n, matrix_tt, labels,\n train_or_test):\n N = [1, 5, 10, 20, 50] + [n for n in range(100, 1000, 100)] + \\\n [n for n in range(1000, 21000, 1000)]\n for n in N:\n # max index in original Dexter data was 19999, thus n-1\n matrix_cc_chopped = matrix_cc[:, range(0, n-1)]\n matrix_s2n_chopped = matrix_s2n[:, range(0, n-1)]\n matrix_tt_chopped = matrix_tt[:, range(0, n-1)]\n svm_input_file(matrix_cc_chopped, labels, 'cc', n, train_or_test)\n svm_input_file(matrix_s2n_chopped, labels, 's2n', n, train_or_test)\n svm_input_file(matrix_tt_chopped, labels, 'tt', n, train_or_test)\n print('Created' , train_or_test, 'files for', n, 'features')", "def test_file_creation(Nfiles):\n command = ('python specFit/demo/demo_preprocess_tiff.py '\n '--processed_dir {} --raw_dir {} --Nfiles {} --Nx {} --Ny {} --spectra_type {}')\\\n .format(processed_dir(Nfiles), raw_dir(Nfiles), Nfiles, Nx, Ny, spectra_type)\n os.system(command)\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'dataCube.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'timestamps.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'exposures.npy'))\n assert os.path.exists(os.path.join(processed_dir(Nfiles), 'visual.npy'))", "def get_multipleInputFiles(self):\n \n # Attach whether we have a dummy input file \n self.multiple_input_files = self.simulation.multiple_input_files\n self.input_files = None\n self.dummy_input_file = None\n \n # If we have a multiple input files, attach the input files to the <path> object \n if self.multiple_input_files==True:\n \n # Get the input files corresponding to a similar simulation with different (kx,ky)\n self.input_files = self.simulation.input_files; self.paths = []\n self.input_files = [i for i in self.input_files if pathlib.Path(str(i).replace(\".in\", \"_kx0.0.in\")) not in self.input_files]\n\n # Create dummy path objects for each input file \n for input_file in self.input_files: \n self.paths.append(create_dummyPathObject(input_file, \"/not/used\"))\n \n # For each input file, remember the modes inside\n for path in self.paths: \n path.dummy_input_file = None\n nakx, naky = read_numberOfModesFromInputFile(path.input_file)\n kx, ky = read_modeFromInputFile(path.input_file)\n path.nakxnaky = nakx*naky\n path.kx = kx \n path.ky = ky\n if path.nakxnaky==1:\n path.dim_kx = 1\n path.dim_ky = 1\n path.vec_kx = [kx]\n path.vec_ky = [ky]\n if path.nakxnaky>1 or \"_dummy.in\" in str(path.input_file):\n with h5py.File(path.dimensions, 'r') as f: \n path.dim_kx = f[\"dim_kx\"][()] \n path.dim_ky = f[\"dim_ky\"][()] \n path.vec_kx = f[\"vec_kx\"][()] \n path.vec_ky = f[\"vec_ky\"][()] \n \n # For each input file, remember if it is part of a dummy input file\n for input_file in self.input_files: \n if \"_dummy.in\" in str(input_file):\n dummy_input_files = read_inputFilesInDummyInputFile(input_file) \n for path in self.paths: \n if path.input_file in dummy_input_files: path.dummy_input_file = input_file \n return", "def multiclass_dataset(train_files,test_files,\n label=0.9,bias=1.,\n scale_min=0., scale_max=1.,scale_prop=\"local\",\n feature_key=\"features\",target_key=\"target_midi\"):\n # read all features\n features = []\n targets = []\n feature = []\n target = []\n for file in train_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n feature = []\n target = []\n for file in test_files:\n data = shelve.open(file)\n print file,\"feature shape:\", data[feature_key].shape\n feature.append(data[feature_key])\n target.append(data[target_key])\n data.close()\n features.append(feature)\n targets.append(target)\n \n # make data preprocessing\n data_preprocessing(features,bias,scale_min,scale_max,0,scale_prop)\n\n # make targets\n \n # check how many pitch classes we have\n all_keys = []\n for el in targets[0]:\n all_keys += el.tolist()\n for el in targets[1]:\n all_keys += el.tolist()\n classes = list(set(all_keys))\n classes.sort()\n print \"classes:\", classes\n print \"nr classes:\",len(classes)\n \n # make (binary) target data\n cl_targets = []\n targ = []\n for piece in targets[0]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n targ = []\n for piece in targets[1]:\n target = np.ones((len(piece), len(classes))) * (-1)*label\n for n in range(len(piece)):\n ind = classes.index( piece[n] )\n target[n,ind] = label\n targ.append(target)\n cl_targets.append(targ)\n \n # make train and test data\n trainin = features[0]\n testin = features[1]\n trainout = cl_targets[0]\n testout = cl_targets[1]\n\n return trainin, trainout, testin, testout", "def load_animals(num_train_ex_per_class=300,\n num_test_ex_per_class=100,\n num_valid_ex_per_class=0,\n classes=None):\n\n num_channels = 3\n img_size = 299\n\n # The 2 if-else statements are just creating npz filename strings\n # that contains:\n # name of class\n # number of training instances\n # number of testing instances\n # number of validation instances\n if num_valid_ex_per_class == 0:\n valid_str = ''\n else:\n valid_str = '_valid-%s' % num_valid_ex_per_class\n\n if classes is None:\n classes = ['dog', 'cat', 'bird', 'fish', 'horse',\n 'monkey', 'zebra', 'panda', 'lemur', 'wombat']\n data_filename = os.path.join(BASE_DIR,\n 'dataset_train-%s_test-%s%s.npz'\n % (num_train_ex_per_class,\n num_test_ex_per_class,\n valid_str))\n else:\n data_filename = os.path.join(BASE_DIR,\n 'dataset_%s_train-%s_test-%s%s.npz'\n % ('-'.join(classes),\n num_train_ex_per_class,\n num_test_ex_per_class,\n valid_str))\n\n num_classes = len(classes)\n num_train_examples = num_train_ex_per_class * num_classes\n num_test_examples = num_test_ex_per_class * num_classes\n num_valid_examples = num_valid_ex_per_class * num_classes\n num_ex_per_class = num_train_ex_per_class+num_valid_ex_per_class+num_test_ex_per_class\n num_examples = num_train_examples + num_test_examples + num_valid_examples\n\n\n if os.path.exists(data_filename):\n print ('Loading data from pre-existed .npz file...')\n f = np.load(data_filename)\n X_train = f['X_train']\n X_test = f['X_test']\n Y_train = f['Y_train']\n Y_test = f['Y_test']\n X_valid = f['X_valid'] if 'X_valid' in f else None\n Y_valid = f['Y_valid'] if 'Y_valid' in f else None\n\n else:\n print('Creating .npz file from raw images...')\n # initialization\n X = np.zeros([num_examples, img_size, img_size, num_channels])\n Y = np.zeros([num_examples])\n\n for class_idx, class_string in enumerate(classes):\n print('class: %s' % class_string)\n i = 0\n num_filled = 0\n # no. of images that has been loaded in X_train\n while num_filled < num_ex_per_class:\n img_path = os.path.join(BASE_DIR,\n '%s/%s_%s.JPEG'%(class_string,\n class_string,\n i))\n if os.path.exists(img_path):\n fill(X,\n Y,\n num_filled+(num_ex_per_class*class_idx),\n class_idx,\n img_path,\n img_size)\n num_filled += 1\n i += 1\n\n X, Y = shuffle(X,Y)\n\n X_train = X[0:num_train_examples,...]\n Y_train = Y[0:num_train_examples, ...]\n X_test = X[num_train_examples:num_train_examples+num_test_examples,...]\n Y_test = Y[num_train_examples:num_train_examples+num_test_examples,...]\n X_valid = X[num_train_examples+num_test_examples:-1,...]\n Y_valid = Y[num_train_examples+num_test_examples:-1,...]\n\n # preprocess input with Inception V3 config\n X_train = preprocess_input(X_train)\n X_test = preprocess_input(X_test)\n X_valid = preprocess_input(X_valid)\n\n np.savez_compressed(data_filename,\n X_train=X_train,\n Y_train=Y_train,\n X_test=X_test,\n Y_test=Y_test,\n X_valid=X_valid,\n Y_valid=Y_valid)\n\n train = DataSet(X_train, Y_train) # see def of DataSet in influence/dataset\n test = DataSet(X_test, Y_test)\n validation = DataSet(X_valid, Y_valid) if X_valid and Y_valid else None\n\n # base: base utilities of tensorflow for loading datasets\n return base.Datasets(train=train, validation=validation, test=test)", "def create_point_cloud_dataset(data_dir, num_points_per_cloud=1024):\n\n train_pc = [] # array of training point clouds\n test_pc = [] # array of test point clouds\n\n train_labels = [] # array of corresponding training labels\n test_labels = [] # array of corresponding test labels\n\n class_ids = {} # list of class names\n\n # get all the folders except the readme file\n folders = glob.glob(os.path.join(data_dir, \"[!README]*\"))\n\n for class_id, folder in enumerate(folders):\n print(\"processing class: {}\".format(os.path.basename(folder)))\n\n # TODO: Fill this part, get the name of the folder (class) and save it\n class_ids[class_id] = os.path.basename(folder)\n\n # get the files in the train folder\n train_files = glob.glob(os.path.join(folder, \"train/*\"))\n for f in train_files:\n # TODO: Fill this part\n points = trimesh.sample.sample_surface(trimesh.load(f), num_points_per_cloud)[0]\n train_pc.append(points)\n train_labels.append(class_id)\n # get the files in the test folder\n test_files = glob.glob(os.path.join(folder, \"test/*\"))\n for f in test_files:\n # TODO: FIll this part\n points = trimesh.sample.sample_surface(trimesh.load(f), num_points_per_cloud)[0]\n test_pc.append(points)\n test_labels.append(class_id)\n\n encoded_train_labels = []\n for idx, label in enumerate(train_labels):\n one_hot = np.zeros(10)\n one_hot[label] = 1.\n encoded_train_labels.append(one_hot)\n encoded_train_labels = np.array(encoded_train_labels)\n\n encoded_test_labels = []\n for idx, label in enumerate(test_labels):\n one_hot = np.zeros(10)\n one_hot[label] = 1.\n encoded_test_labels.append(one_hot)\n encoded_test_labels = np.array(encoded_test_labels)\n\n return (np.array(train_pc), np.array(test_pc),\n np.array(encoded_train_labels), np.array(encoded_test_labels), class_ids)", "def process_test():\n\n test_entry = unpickle(test_file)\n test_dataset = test_entry[b'data']\n test_targets = test_entry[b'fine_labels']\n test_dataset = np.vstack(test_dataset).reshape(-1, 3, 32, 32)\n test_dataset = test_dataset.transpose((0, 2, 3, 1)) \n\n root_path = data_dir + '/cifar100/test/'\n for counter, item in enumerate(test_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = test_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"test_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)", "def testGenerator(test_path, num_image=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):\n assert len(glob.glob(os.path.join(test_path,\"*.png\"))) <= num_image, \"num_image need to be smaller than test image in current test_path\"\n for i in range(num_image):\n img = io.imread(os.path.join(test_path, \"%d.png\" % i), as_gray=as_gray)\n img = img / 255\n img = trans.resize(img, target_size)\n img = np.reshape(img, img.shape + (1,)) if (not flag_multi_class) else img\n img = np.reshape(img, (1,) + img.shape)\n yield img", "def createDataFiles(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrainAndTest(a_patterns)", "def get_keras_tiff_generator( X_folder, Y_folder, batch_size ):\r\n X_files = sorted(glob(os.path.join(X_folder,'*.tif'),recursive=True)) + sorted(glob(os.path.join(X_folder,'*.tiff'),recursive=True))\r\n Y_files = sorted(glob(os.path.join(Y_folder,'*.tif'),recursive=True)) + sorted(glob(os.path.join(Y_folder,'*.tiff'),recursive=True))\r\n\r\n print('keras tiff generator found {} files for X and {} files for Y'.format(len(X_files),len(Y_files)))\r\n\r\n return SimpleKerasGenerator( X_files, Y_files, batch_size )", "def create_train_test_dfs():\n\n labels = pd.read_csv('dataset/jc_input.txt')\n labels.head()\n logger.info(f\"Shape of the dataset: {labels.shape}\")\n\n features = np.loadtxt('dataset/selected_feature.txt', dtype=str, delimiter='\\n')\n logger.info(f\"Number of features in features.txt: {len(features)}\")\n\n # ### Change the column names in dataset. Repelace pipe with space.\n # This is done to match the column names specified in the selected_features.txt\n columns_with_pipe = list(filter(lambda x: re.match('.*\\|.*', x), labels.columns))\n len(columns_with_pipe)\n columns_replaced_pipe = set(map(lambda x: x.replace('|', ' '), labels.columns))\n\n len(columns_replaced_pipe.intersection(set(features)))\n\n # ### List of columns containing the labels\n # Sorted alphabetically\n label_columns = sorted(list(columns_replaced_pipe.intersection(set(features))))\n all_columns = [\"#Attr 266\", \"Name\", \"Price\"] + label_columns\n\n updated_labels_df = labels.copy()\n\n # ### Stores the mapping of renamed_cols to original_cols\n\n renamed_to_orig_cols_dict = dict([(col, col.replace('|', ' ')) for col in updated_labels_df.columns])\n\n updated_labels_df.rename(renamed_to_orig_cols_dict, axis=1, inplace=True)\n\n updated_labels_df = updated_labels_df[all_columns]\n\n updated_labels_df.shape\n\n # ### Update the column name \"#Attr 266\" to filename\n updated_labels_df.rename({'#Attr 266': \"filename\"}, axis=1, inplace=True)\n updated_labels_df[\"filename\"] = updated_labels_df[\"filename\"].apply(lambda x: str(x) + \".jpg\")\n\n training_list = [f.name for f in pathlib.Path(TRAIN_IMAGE_PATH).glob('*.jpg')]\n testing_list = [f.name for f in pathlib.Path(TEST_IMAGE_PATH).glob('*.jpg')]\n\n # In[29]:\n\n training_fname = list(set(updated_labels_df.filename.values).intersection(set(training_list)))\n testing_fname = list(set(updated_labels_df.filename.values).intersection(set(testing_list)))\n logger.info(f\"Number of training files with attributes: {len(training_fname)}\")\n logger.info(f\"Number of testing files with attributes: {len(testing_fname)}\")\n\n # ### Remove duplicates\n\n # In[30]:\n\n # Identify duplicate files\n updated_labels_df.filename.value_counts()\n\n # In[31]:\n\n # 4 files with name '& ress.jpg' are duplicated\n to_remove_index = updated_labels_df[(updated_labels_df.filename == '& ress.jpg')].index\n\n # In[32]:\n\n updated_labels_df.drop(to_remove_index, inplace=True)\n\n # ### Remove nans\n rows_with_nan = updated_labels_df[updated_labels_df[label_columns].isna().sum(axis=1) > 0].index\n logger.info(f\"Number of rows with NaN: {len(rows_with_nan)}\")\n updated_labels_df.drop(rows_with_nan, inplace=True)\n updated_labels_df.shape\n\n # Check for further nans\n sum(updated_labels_df[label_columns].isna().sum() > 0)\n # ### Update the Nan name attribute with blank_string\n updated_labels_df.Name.fillna(\"\", inplace=True)\n\n # ### Train test split\n # - update index to dataframe to file name\n # - perform a set intersection of df index and training_fname from the training images directory\n # - perform a set intersection of df index and testing_fname from the testing images directory\n # - create separate dfs - training_df and testing_df\n\n updated_labels_df.set_index(updated_labels_df.filename, inplace=True)\n\n train_index = set(updated_labels_df.index).intersection(set(training_fname))\n test_index = set(updated_labels_df.index).intersection(set(testing_fname))\n\n # ### Training and testing df\n training_df = updated_labels_df.loc[list(train_index)]\n testing_df = updated_labels_df.loc[list(test_index)]\n\n logger.info(f\"Training df: {training_df.shape}\")\n logger.info(f\"Testing df: {testing_df.shape}\")\n\n return training_df, testing_df, label_columns", "def create_samples(cls, n_images, ext='png', test=False, delete=True):\n _dataset_path = Path(_base_path + cls.name)\n if test:\n origin_path = _dataset_path / 'test'\n destin_path = _dataset_path / 'test_sample'\n else:\n origin_path = _dataset_path / 'train'\n destin_path = _dataset_path / 'train_sample'\n\n # all_ims = list(origin_path.glob(f'*.{ext}'))\n all_ims = list(origin_path.glob('**/*.*'))\n\n if float(n_images).is_integer():\n assert (n_images < len(all_ims)) and (n_images > 0), f\"Can't take {n_images} samples from {len(all_ims)} train or test images\"\n else:\n assert (n_images < 1) and (n_images > 0), f\"Can't take a fraction of {n_images} images. Fraction must be >0 or <1\"\n n_images = int(len(all_ims) * n_images)\n sample_imgs = np.random.choice(all_ims, n_images, replace=False)\n\n if destin_path.exists():\n shutil.rmtree(str(destin_path))\n destin_path.mkdir()\n for f in sample_imgs:\n if f.parts[-2] == 'train': # MNIST\n shutil.copy(str(f), str(destin_path / f.name))\n else: # FaschionMNIST, imagenette2\n (destin_path / f.parts[-2]).mkdir(exist_ok=True)\n shutil.copy(str(f), str(destin_path / f.parts[-2] / f.name))\n\n print(f\"Created {n_images} images in {destin_path}\")", "def createTrainAndValFiles(self):\n\n a_patterns = self.readDataFile()\n self.randomizeAndWriteTrainAndVal(a_patterns)", "def generate_dataset_images(input_TxtPth, out_ImgsPth):\n # Check whether the input txt files path exists or not.\n if not os.path.exists(input_TxtPth):\n print (input_TxtPth + ' is not a valid path')\n exit(-1)\n # Count the total number of *.txt files in the input_TxtPth\n file_counter = total_files_counter(input_TxtPth, '.txt')\n # Check whether there are txt files avaialable in the input path\n if not file_counter:\n print (input_TxtPth + ' contains no txt files')\n exit(-1)\n # Output directories for the raw thermal, grayscale and heatmap images\n heatmapImgsDir = os.path.join(out_ImgsPth, 'heatmaps')\n thermalImgsDir = os.path.join(out_ImgsPth, 'thermals')\n grayImgsDir = os.path.join(out_ImgsPth, 'grays')\n # Check if heatmaps directory is created or not\n if not os.path.exists(heatmapImgsDir):\n os.makedirs(heatmapImgsDir)\n # Check if raw thermals directory is created or not\n if not os.path.exists(thermalImgsDir):\n os.makedirs(thermalImgsDir)\n # Check if gryascale images directory is created or not\n if not os.path.exists(grayImgsDir):\n os.makedirs(grayImgsDir)\n # Name index files iterator\n nameIdxIterator = 0\n # Loop over all the txt files to parse them with a prograss bar\n for txtFile in tqdm(walk_dir(input_TxtPth, '.txt'), total=file_counter, desc='Generating dataset images'):\n # Six digit incremental number for each image's name \n fileName = '%06d' % (nameIdxIterator)\n raw_thermal_img = parse_txt_dataValues(txtFile)\n grayscale_img = decode_as_grayscale(raw_thermal_img)\n heatmap_img = decode_as_heatmap(grayscale_img)\n # Write each image to its corresponding directory\n cv2.imwrite(os.path.join(thermalImgsDir, (fileName + '.png')), raw_thermal_img)\n cv2.imwrite(os.path.join(grayImgsDir, (fileName + '.png')), grayscale_img)\n cv2.imwrite(os.path.join(heatmapImgsDir, (fileName + '.png')), heatmap_img)\n nameIdxIterator += 1\n return thermalImgsDir, grayImgsDir, heatmapImgsDir", "def process_train():\n\n train_entry = unpickle(train_file)\n train_dataset = train_entry[b'data']\n train_targets = train_entry[b'fine_labels'] # will need to edit for coarse\n train_dataset = np.vstack(train_dataset).reshape(-1, 3, 32, 32)\n train_dataset = train_dataset.transpose((0, 2, 3, 1)) \n\n meta_entry = unpickle(meta_file)\n meta_entry[b'fine_label_names']\n\n root_path = data_dir + '/cifar100/train/'\n for counter, item in enumerate(train_targets):\n make_dir_if_no_exist(root_path+str(item))\n # write data\n img = train_dataset[counter]\n #bgr_image = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_RGB2BGR)\n file_path = root_path+str(item)+'/'+\"train_img_{0}.jpg\".format(str(counter))\n #print(file_path)\n # something breaks here\n #cv2.imwrite(file_path, bgr_image)\n imageio.imwrite(file_path, img)", "def inputFiles(self):\n pass", "def createDataSets(smilePath, nonSmilePath, dataSetSize, testingSplit):\n\n trainingLabels = []\n trainingSetFiles = []\n testingLabels = []\n testingSetFiles = []\n\n # transform all smiling pictures\n for root, dirs, files in os.walk(smilePath, True):\n i=0\n #static for loop\n for name in files:\n #all images\n #for name in files:\n if name.endswith(\".jpg\") and (i<(dataSetSize/2) or dataSetSize == -1):\n if random.randint(1, 100) > testingSplit:\n trainingSetFiles.append(os.path.join(root, name))\n trainingLabels.append(np.array([1,0], np.int32))\n else:\n testingSetFiles.append(os.path.join(root, name))\n testingLabels.append(np.array([1,0], np.int32))\n i=i+1\n\n # transform all non-smiling pictures\n #the non smiling pictures are added to a random position in the trainingSet and labels and the testingSet and labels\n #the sets and labelled where already created in the above for loop.\n for root, dirs, files in os.walk(nonSmilePath, True):\n k=0\n #all images\n #for name in files:\n #static for loop\n for name in files:\n if name.endswith(\".jpg\") and (k<(dataSetSize/2) or dataSetSize == -1):\n if random.randint(1, 100) > testingSplit:\n # insert to a random position to avoid overfitting\n insertPosition = random.randint(0, len(trainingLabels))\n trainingSetFiles.insert(insertPosition, os.path.join(root, name))\n trainingLabels.insert(insertPosition, np.array([0, 1], np.int32))\n else:\n # insert to a random position to avoid overfitting\n insertPosition = random.randint(0, len(trainingLabels))\n testingSetFiles.insert(insertPosition, os.path.join(root, name))\n testingLabels.insert(insertPosition, np.array([0, 1], np.int32))\n k=k+1\n\n return trainingSetFiles,trainingLabels,testingSetFiles,testingLabels\n #TODO: Needs to be explained better Side note: Only the file names of the training images are provided to reduce memory consumption." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the config based on values in 'config'
def set_config(self, config): for key in config.keys(): self.config[key] = config[key]
[ "def set_config(**kwargs) -> None:\n _conf.update(kwargs)", "def apply_config(self, config):\n raise NotImplementedError", "def define_config(self, config: str) -> None:\n self.config = config", "def set_config():\n env = get_current_environment()\n app.config.from_object('server.config.{0}Config'.format(env))", "def update(self, config: dict):\n for key, value in config.items():\n setattr(self, key, value)", "def update_config():\n g.config = app.config", "def _setconf(self, directory, config):\n path = os.path.abspath(os.path.join(self.curdir, directory))\n self.configs[path] = config", "def set_config_strategy(self, config: IniConfiguration):\n self._config = config", "def set_config(self, value):\n try:\n self.validate(config=value)\n except (KeyError, ValueError) as e:\n print(\"Config not set, encountered error %s\" % e.msg)\n\n self.config = value", "def set_config(self, data: dict[str, str]) -> None:\n for key, value in data.items():\n if key not in self.config:\n raise CoreError(f\"unknown config: {key}\")\n self.custom_config[key] = value", "def set_config(self, config):\r\n todo = deque([self])\r\n while todo:\r\n node = todo.popleft()\r\n node.config = config\r\n todo.extend(node.iter_child_nodes())\r\n return self", "def _configure(self, *args, **kwargs):\n config_map = {\n \"chatport\" : self.config,\n \"protocol\" : self.config,\n \"invis\" : self.config,\n \"masterserver\" : self.__requester.config,\n \"basicserver\" : self.__requester.config,\n \"honver\" : self.__requester.config\n }\n \n for kwarg in kwargs:\n if kwarg in config_map:\n config_map[kwarg][kwarg] = kwargs[kwarg]", "def set(**kwargs): # pylint:disable=redefined-builtin\n try:\n _config = GlobalConfigManager.get_config_or_default()\n except Exception as e:\n Printer.print_error('Polyaxon load configuration.')\n Printer.print_error('Error message `{}`.'.format(e))\n Printer.print_header('You can reset your config by running: polyaxon config purge')\n sys.exit(1)\n\n for key, value in kwargs.items():\n if value is not None:\n setattr(_config, key, value)\n\n GlobalConfigManager.set_config(_config)\n Printer.print_success('Config was updated.')\n # Reset cli config\n CliConfigManager.purge()", "def load_config(self):\n for local_var, config_var in self.from_config.items():\n value = flask.current_app.config.get(config_var)\n if value:\n if \".\" in local_var:\n # this is a dotpath -- needs special handling\n body, tail = local_var.rsplit(\".\", 1)\n obj = getattrd(self, body)\n setattr(obj, tail, value)\n else:\n # just use a normal setattr call\n setattr(self, local_var, value)", "def set_config(self, key, value):\n self.update_config({key: value})", "def config():\n update_config_cli()", "def set_config(self, config):\n\n self._model_config = json_format.ParseDict(\n config, model_config_pb2.ModelConfig())", "def update_config(self):\n for key_name, entry in self.config.config.items():\n self.update_config_entry(key_name, entry)", "def set_config(self, *, configuration: NodeManagerConfig) -> None:" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downloads the blocklist specified by 'url' in the config
def download_list(self, url=None): def on_retrieve_data(data, current_length, total_length): if total_length: fp = float(current_length) / total_length if fp > 1.0: fp = 1.0 else: fp = 0.0 self.file_progress = fp import socket socket.setdefaulttimeout(self.config["timeout"]) if not url: url = self.config["url"] headers = {} if self.config["last_update"] and not self.force_download: headers['If-Modified-Since'] = formatdate(self.config["last_update"], usegmt=True) log.debug("Attempting to download blocklist %s", url) log.debug("Sending headers: %s", headers) self.is_downloading = True return download_file(url, deluge.configmanager.get_config_dir("blocklist.download"), on_retrieve_data, headers)
[ "def downloadChunks(url):\n global download_list\n baseFile = os.path.basename(url)\n\n \n #move the file to a more uniq path\n\n os.umask(0002)\n\n temp_path = os.getcwd()\n\n try:\n\n file = baseFile\n if os.path.exists(file):\n print baseFile, \"already exists\"\n return file\n\n \n req = urllib2.urlopen(url)\n\n total_size = int(req.info().getheader('Content-Length').strip())\n\n downloaded = 0\n\n CHUNK = 256 * 10240\n\n with open(file, 'wb') as fp:\n\n while True:\n\n chunk = req.read(CHUNK)\n\n downloaded += len(chunk)\n\n print math.floor( (downloaded / total_size) * 100 )\n\n if not chunk: break\n\n fp.write(chunk)\n download_list.append(file)\n\n except urllib2.HTTPError, e:\n\n print \"HTTP Error:\",e.code , url\n\n return False\n\n except urllib2.URLError, e:\n\n print \"URL Error:\",e.reason , url\n\n return False\n\n print download_list \n return file", "def url(self, url=pythoncom.Empty):\r\n return _base._rsf.block_u_r_l(self._block._name, url)", "def downloadChunks(self,url,filename):\n\n baseFile = os.path.basename(url)\n\n #move the file to a more uniq path\n #os.umask(0002)\n #temp_path = \"/tmp/\"\n try:\n #file = os.path.join(temp_path,baseFile)\n file = \"./\"+filename\n req = urllib.urlopen(url)\n total_size = int(req.info().get('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print (math.floor((downloaded / total_size) * 100 ))\n if not chunk: break\n fp.write(chunk)\n except error.HTTPError:\n #print (\"HTTP Error:\", url)\n return False\n except error.URLError:\n #print (\"URL Error:\", url)\n return False\n\n return file", "def _thread_download(work_queue, result_blocks, url, timeout, disable_ssl_validation):\n\n # Initialise HTTP handle\n http_handle = httplib2.Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)\n\n while True:\n work = work_queue.get()\n\n # Stop if receiving poison pill\n if work is None:\n return\n\n result_blocks[work[0]] = _get_block(http_handle, url, work[1], work[2])", "def test_cms_block_repository_v1_get_list_get(self):\n pass", "def download_show(self, url):", "def fetch():\n fetch_json() # fetch json files\n # format everything\n format_cot(COT)\n format_coinmetrics_data(ADDRESSES)\n format_coinmetrics_data(REALIZEDCAP)\n format_coinmetrics_data(PRICE)\n format_coinmetrics_data(MARKETCAP)\n format_coinmetrics_data(SUPPLY)\n format_fear_greed(FEAR_GREED)\n for info in DATA_INFO:\n if info['url'] == (BLOCKCHAIN_URL or COINMETRICS_URL):\n remove_zero_values(info['path'])", "def download_url():", "def downloadLinks(self):\n for item in self.linkList:\n self.currentDownloadProgress = 0\n sizeCompleted = 0\n \n if 'http' not in item:\n self.currentDownloadFile = self.urlName + item\n else:\n self.currentDownloadFile = item\n \n try:\n localFileName = self.downloadPath + '/' + urllib.unquote(item).split('/')[-1]\n \n urlInfo = self.getURLInfo(self.currentDownloadFile)\n if urlInfo['resumeSupport']: \n print(\"server file resume supported\")\n else:\n print(\"server file resume NOT supported\")\n \n if os.path.isfile(localFileName) and urlInfo['resumeSupport']:\n sizeCompleted = os.path.getsize(localFileName)\n if sizeCompleted >= int(urlInfo['fileSize']):\n self.downloadedFileCount += 1\n continue\n self.fd = open(localFileName, 'ab+')\n \n self.fd.seek(sizeCompleted)\n else:\n self.fd = open(localFileName, 'wb')\n \n request = urllib2.Request(self.currentDownloadFile)\n if urlInfo['resumeSupport']:\n request.headers['range'] = 'bytes=%s-' % (sizeCompleted)\n self.link = urllib2.urlopen(request)\n self.fileSize = int(urlInfo['fileSize'])\n self.currentDownloadProgress = int((sizeCompleted / float(self.fileSize)) * 100)\n self.currentDownloadSize = self.fileSize\n \n print('downloading %s [%d bytes]...' % (urllib.unquote(item), self.fileSize))\n \n while True:\n if self.task._flag_stop.is_set():\n self.fd.close()\n return ({'status':'success', 'response':{'error':'user stopped service'}})\n timeEnter = time.time()\n chunk = self.link.read(self.chunkSize)\n timeExit = time.time()\n self.currentDownloadSpeed = int((self.chunkSize) / ((timeExit - timeEnter) * 1024.0))\n if not chunk: \n break\n else:\n self.fd.write(chunk)\n sizeCompleted += self.chunkSize\n self.currentDownloadProgress = int((sizeCompleted / float(self.fileSize)) * 100)\n if self.currentDownloadProgress > 100: self.currentDownloadProgress = 100\n sys.stdout.write('\\r%3d%%' % (self.currentDownloadProgress))\n sys.stdout.flush()\n \n self.fd.close()\n self.downloadedFileCount += 1\n print(' (%d/%d) downloaded\\n' % (self.downloadedFileCount, self.totalFileCount))\n \n except Exception as e:\n continue\n #return ({'status':'error', 'response':{'error':'%s' % str(e)}})\n return ({'status':'success', 'response':{'file_count':'%d' % self.downloadedFileCount}})", "def downloadData(url):\n content = urllib2.urlopen(url)\n return content", "async def fetch_in_executor(self, url: str) -> Tuple[Any, List[Any]]:\n if sys.version_info < (3, 7):\n loop = asyncio.get_event_loop() # keep for Python 3.6 compatibility\n else:\n loop = asyncio.get_running_loop() # pylint: disable=no-member\n log.info(\"Crawl: %s\", url)\n return await loop.run_in_executor(None, htmllistparse.fetch_listing, url, 30)", "def downloader(url):\n\n try:\n chunk_size = 1024 #number of bytes to be read each time\n\n r = requests.get(url, stream=True)\n\n total_size = int(r.headers['content-length'])\n\n filename = url.split('/')[-1].split('?')[0]\n\n\n with open(filename, 'wb') as f:\n for data in tqdm(iterable=r.iter_content(chunk_size=chunk_size), total= total_size / chunk_size, unit='KB'):\n f.write(data)\n\n print(\"Download Completed\")\n\n f.close()\n\n except:\n\n print(\"Error, file can not be downloaded at: \" + url)", "def import_list(self, blocklist):\n def on_read_ip_range(start, end):\n \"\"\"Add ip range to blocklist\"\"\"\n self.blocklist.add_rule(start, end, BLOCK_RANGE)\n self.num_blocked += 1\n\n def on_finish_read(result):\n \"\"\"Add blocklist to session\"\"\"\n self.core.session.set_ip_filter(self.blocklist)\n return result\n\n # TODO: double check logic\n if self.up_to_date and self.has_imported:\n log.debug(\"Latest blocklist is already imported\")\n return defer.succeed(blocklist)\n\n self.is_importing = True\n self.num_blocked = 0\n self.blocklist = self.core.session.get_ip_filter()\n \n if not blocklist:\n blocklist = self.filename\n\n if not self.reader:\n self.auto_detect(blocklist)\n self.auto_detected = True\n\n log.debug(\"Importing using reader: %s\", self.reader)\n log.debug(\"Reader type: %s compression: %s\", self.config[\"list_type\"], self.config[\"list_compression\"])\n d = threads.deferToThread(self.reader(blocklist).read, on_read_ip_range)\n d.addCallback(on_finish_read)\n\n return d", "def load_blocks(self, verbose=True):\n nblocks = len(blocklist)\n # blocks = []\n if nblocks == 0:\n # add bdsim blocks folder\n blockpath = [Path(__file__).parent / 'blocks']\n\n # add RTB and MVTB if they exist\n try:\n import roboticstoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n try:\n import machinvevisiontoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n # blocklist = []\n\n # path = os.getenv('BDSIMPATH')\n # if path is not None:\n # for p in path.split(':'):\n # blockpath.append(Path(p)) \n \n if verbose:\n print('Loading blocks:')\n\n blocks = []\n for path in blockpath: # for each folder on the path\n if not path.exists():\n print(f\"WARNING: path does not exist: {path}\")\n continue\n for file in path.iterdir(): # for each file in the folder\n blocks_this_file = []\n\n # scan every file *.py to find block definitions\n # a block is a class that subclasses Source, Sink, Function, Transfer and\n # has an @block decorator.\n #\n # The decorator adds the classes to a global variable blocklist in the\n # component module's namespace.\n if not file.name.startswith('test_') and not file.name.startswith('__') and file.name.endswith('.py'):\n # valid python module, import it\n try:\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n spec = importlib.util.spec_from_file_location(file.name, file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n except SyntaxError:\n print(f\"-- syntax error in block definition file: {file}\")\n\n for cls in module.__dict__.values():\n if not inspect.isclass(cls) or \\\n inspect.getmro(cls)[-2].__name__ != 'Block' or \\\n not cls.__module__.startswith(file.name):\n continue\n\n # we have a block class candidate\n if cls.blockclass in ('source', 'transfer', 'function'):\n # must have an output function\n valid = hasattr(cls, 'output') and \\\n callable(cls.output) and \\\n len(inspect.signature(cls.output).parameters) == 2\n if not valid:\n raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n if cls.blockclass == 'sink':\n # must have a step function with at least one\n # parameter: step(self [,state])\n valid = hasattr(cls, 'step') and \\\n callable(cls.step) and \\\n len(inspect.signature(cls.step).parameters) >= 1\n if not valid:\n raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n\n blocks_this_file.append(blockname(cls))\n blocks.append(block(blockname(cls), cls, file))\n\n if verbose and len(blocks_this_file) > 0:\n print(' loaded {:d} blocks from {:s}: {:s}'.format(\n len(blocks_this_file),\n str(file),\n ', '.join(b for b in blocks_this_file)))\n \n\n # # components.blocklist grows with every block import\n # if len(blocklist) > nblocks:\n # # we just loaded some more blocks\n # if verbose:\n # print(' loading blocks from {:s}: {:s}'.format(str(file), ', '.join([blockname(cls) for cls in blocklist[nblocks:]])))\n \n # # perform basic sanity checks on the blocks just read\n # for cls in blocklist[nblocks:]:\n # print(cls)\n # if cls.blockclass in ('source', 'transfer', 'function'):\n # # must have an output function\n # valid = hasattr(cls, 'output') and \\\n # callable(cls.output) and \\\n # len(inspect.signature(cls.output).parameters) == 2\n # if not valid:\n # raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n # if cls.blockclass == 'sink':\n # # must have a step function with at least one\n # # parameter: step(self [,state])\n # valid = hasattr(cls, 'step') and \\\n # callable(cls.step) and \\\n # len(inspect.signature(cls.step).parameters) >= 1\n # if not valid:\n # raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n \n # blocks.append(block(blockname(cls), cls, file))\n\n # nblocks = len(blocklist)\n\n return blocks", "def download_files(directory, url_list):\n\n for url in url_list:\n file = directory + url.split(\"/\", -1)[-1]\n try:\n urlreq.urlretrieve(url, file)\n except URLError as e:\n print(e)", "def download_multiple_file(*url):\r\n # from multiprocessing.pool import ThreadPool\r\n # NU MERGE\r\n path, url = url\r\n r = requests.get(url, stream=True)\r\n with open(path, 'wb') as f:\r\n for ch in r:\r\n f.write(ch)", "def fetch_blockchain():\n get_chain_address = f\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n chain_meta = json.loads(response.content)\n chain_content = []\n # TODO\n for block in chain_meta[\"chain\"]:\n chain_content.append(block)\n\n global blocks_to_show\n blocks_to_show = sorted(chain_content, key=lambda block: block['timestamp'],reverse=True)", "def fetch_config(url):\n return DocumentConfig.parse_obj(requests.get(url).json())", "def get_image_links(self, url):\n soup = self.get_page(url)\n blks = soup.find_all(\"ul\", class_='large-image-blocks')\n\n link_queue = Queue.Queue()\n err_queue = Queue.Queue()\n threads = []\n\n for blk in blks:\n for tag in blk.find_all(\"a\"):\n link = tag.attrs['href']\n if link.rfind(\"album\") != -1:\n break\n thread = LinkDownloadThread(str(link), link_queue, err_queue,\n self.logger)\n thread.start()\n\n threads.append(thread)\n if self.maximum is not None:\n self.maximum -= 1\n\n if self.maximum == 0:\n break\n\n if self.maximum == 0:\n break\n\n for thread in threads:\n thread.join()\n\n if not err_queue.empty():\n raise err_queue.get()\n\n return list(link_queue.queue)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Imports the downloaded blocklist into the session
def import_list(self, blocklist): def on_read_ip_range(start, end): """Add ip range to blocklist""" self.blocklist.add_rule(start, end, BLOCK_RANGE) self.num_blocked += 1 def on_finish_read(result): """Add blocklist to session""" self.core.session.set_ip_filter(self.blocklist) return result # TODO: double check logic if self.up_to_date and self.has_imported: log.debug("Latest blocklist is already imported") return defer.succeed(blocklist) self.is_importing = True self.num_blocked = 0 self.blocklist = self.core.session.get_ip_filter() if not blocklist: blocklist = self.filename if not self.reader: self.auto_detect(blocklist) self.auto_detected = True log.debug("Importing using reader: %s", self.reader) log.debug("Reader type: %s compression: %s", self.config["list_type"], self.config["list_compression"]) d = threads.deferToThread(self.reader(blocklist).read, on_read_ip_range) d.addCallback(on_finish_read) return d
[ "def load_block_table():\n global UCDBlocks\n f = open(os.path.join(os.path.dirname(__file__), BLOCK_FILE), 'rb')\n UCDBlocks = load(f)\n f.close()", "def download_list(self, url=None):\n def on_retrieve_data(data, current_length, total_length):\n if total_length:\n fp = float(current_length) / total_length\n if fp > 1.0:\n fp = 1.0\n else:\n fp = 0.0\n\n self.file_progress = fp\n\n import socket\n socket.setdefaulttimeout(self.config[\"timeout\"])\n\n if not url:\n url = self.config[\"url\"]\n\n headers = {}\n if self.config[\"last_update\"] and not self.force_download:\n headers['If-Modified-Since'] = formatdate(self.config[\"last_update\"], usegmt=True)\n\n log.debug(\"Attempting to download blocklist %s\", url)\n log.debug(\"Sending headers: %s\", headers)\n self.is_downloading = True\n return download_file(url, deluge.configmanager.get_config_dir(\"blocklist.download\"), on_retrieve_data, headers)", "def _load_blocks(self, filename):\n contents = unpack(filename)\n blocks = contents['BlockTypesData']\n for block in blocks:\n if block is None:\n continue\n block_id = block['id']\n items = self._session.query(Item).filter_by(id=block_id).all()\n if len(items) == 0:\n print('no item matches block id \"{}\"'.format(block_id))\n try:\n item = items[0]\n if self._args.verbose:\n print('processing block \"{}\"'.format(item.name()))\n item.prestige = block['prestige']\n item.build_xp = block['buildXP']\n item.mine_xp = block['mineXP']\n except IndexError:\n if self._args.verbose:\n print('no item for block id={}'.format(block_id))\n\n self._session.commit()", "def load_blocks(self, verbose=True):\n nblocks = len(blocklist)\n # blocks = []\n if nblocks == 0:\n # add bdsim blocks folder\n blockpath = [Path(__file__).parent / 'blocks']\n\n # add RTB and MVTB if they exist\n try:\n import roboticstoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n try:\n import machinvevisiontoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n # blocklist = []\n\n # path = os.getenv('BDSIMPATH')\n # if path is not None:\n # for p in path.split(':'):\n # blockpath.append(Path(p)) \n \n if verbose:\n print('Loading blocks:')\n\n blocks = []\n for path in blockpath: # for each folder on the path\n if not path.exists():\n print(f\"WARNING: path does not exist: {path}\")\n continue\n for file in path.iterdir(): # for each file in the folder\n blocks_this_file = []\n\n # scan every file *.py to find block definitions\n # a block is a class that subclasses Source, Sink, Function, Transfer and\n # has an @block decorator.\n #\n # The decorator adds the classes to a global variable blocklist in the\n # component module's namespace.\n if not file.name.startswith('test_') and not file.name.startswith('__') and file.name.endswith('.py'):\n # valid python module, import it\n try:\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n spec = importlib.util.spec_from_file_location(file.name, file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n except SyntaxError:\n print(f\"-- syntax error in block definition file: {file}\")\n\n for cls in module.__dict__.values():\n if not inspect.isclass(cls) or \\\n inspect.getmro(cls)[-2].__name__ != 'Block' or \\\n not cls.__module__.startswith(file.name):\n continue\n\n # we have a block class candidate\n if cls.blockclass in ('source', 'transfer', 'function'):\n # must have an output function\n valid = hasattr(cls, 'output') and \\\n callable(cls.output) and \\\n len(inspect.signature(cls.output).parameters) == 2\n if not valid:\n raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n if cls.blockclass == 'sink':\n # must have a step function with at least one\n # parameter: step(self [,state])\n valid = hasattr(cls, 'step') and \\\n callable(cls.step) and \\\n len(inspect.signature(cls.step).parameters) >= 1\n if not valid:\n raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n\n blocks_this_file.append(blockname(cls))\n blocks.append(block(blockname(cls), cls, file))\n\n if verbose and len(blocks_this_file) > 0:\n print(' loaded {:d} blocks from {:s}: {:s}'.format(\n len(blocks_this_file),\n str(file),\n ', '.join(b for b in blocks_this_file)))\n \n\n # # components.blocklist grows with every block import\n # if len(blocklist) > nblocks:\n # # we just loaded some more blocks\n # if verbose:\n # print(' loading blocks from {:s}: {:s}'.format(str(file), ', '.join([blockname(cls) for cls in blocklist[nblocks:]])))\n \n # # perform basic sanity checks on the blocks just read\n # for cls in blocklist[nblocks:]:\n # print(cls)\n # if cls.blockclass in ('source', 'transfer', 'function'):\n # # must have an output function\n # valid = hasattr(cls, 'output') and \\\n # callable(cls.output) and \\\n # len(inspect.signature(cls.output).parameters) == 2\n # if not valid:\n # raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n # if cls.blockclass == 'sink':\n # # must have a step function with at least one\n # # parameter: step(self [,state])\n # valid = hasattr(cls, 'step') and \\\n # callable(cls.step) and \\\n # len(inspect.signature(cls.step).parameters) >= 1\n # if not valid:\n # raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n \n # blocks.append(block(blockname(cls), cls, file))\n\n # nblocks = len(blocklist)\n\n return blocks", "def load_data(self):\n try:\n with open('_blockchain.txt', mode='r') as f:\n #file_content = pickle.loads(f.read())\n file_content = f.readlines()\n print(file_content)\n #blockchain = file_content['chain']\n #open_transactions = file_content['ot']\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(tx['sender'], tx['recipient'], tx['amount']) for tx in block['transactions']]\n updated_block = Block(block['index'], block['previous_hash'], converted_tx, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n open_transactions = json.loads(file_content[1])\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(tx['sender'], tx['recipient'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n except (IOError, IndexError):\n pass\n finally:\n print('Success!')", "def load_list(self):\n # Load List\n # This is a list that must be created of all the incident ids you want to update. Currently,\n # the target column to be updated is the 3rd column.\n self.driver.get(self.list_url)", "def import_blocks(ctx, file):\n app = EthApp(ctx.obj['config'])\n DBService.register_with_app(app)\n AccountsService.register_with_app(app)\n ChainService.register_with_app(app)\n chain = app.services.chain\n assert chain.block_queue.empty()\n\n data = file.read()\n app.start()\n\n def blocks():\n \"\"\"Generator for blocks encoded in `data`.\"\"\"\n i = 0\n while i < len(data):\n try:\n block_data, next_i = rlp.codec.consume_item(data, i)\n except rlp.DecodingError:\n log.fatal('invalid RLP encoding', byte_index=i)\n sys.exit(1) # have to abort as we don't know where to continue\n try:\n if not isinstance(block_data, list) or len(block_data) != 3:\n raise rlp.DeserializationError('', block_data)\n yield eth_protocol.TransientBlock.init_from_rlp(block_data)\n except (IndexError, rlp.DeserializationError):\n log.warning('not a valid block', byte_index=i) # we can still continue\n yield None\n i = next_i\n\n log.info('importing blocks')\n # check if it makes sense to go through all blocks\n first_block = next(blocks())\n if first_block is None:\n log.fatal('first block invalid')\n sys.exit(1)\n if not (chain.knows_block(first_block.header.hash) or\n chain.knows_block(first_block.header.prevhash)):\n log.fatal('unlinked chains', newest_known_block=chain.chain.head.number,\n first_unknown_block=first_block.header.number)\n sys.exit(1)\n\n # import all blocks\n for n, block in enumerate(blocks()):\n if block is None:\n log.warning('skipping block', number_in_file=n)\n continue\n log.debug('adding block to queue', number_in_file=n, number_in_chain=block.header.number)\n app.services.chain.add_block(block, None) # None for proto\n\n # let block processing finish\n while not app.services.chain.block_queue.empty():\n gevent.sleep()\n app.stop()\n log.info('import finished', head_number=app.services.chain.chain.head.number)", "def get_blocklists() -> Dict[str, List[str]]:\n bl1 = os.path.join(BASE_PATH, 'ta/t-s-blocklist.json')\n bl2 = os.path.join(BASE_PATH, 'hta/s-t-blocklist.json')\n with locked_file(bl1) as f:\n ts_bl = json.load(f)\n\n with locked_file(bl2) as f:\n st_bl = json.load(f)\n\n for ta in st_bl:\n if ta not in ts_bl:\n ts_bl[ta] = []\n\n ts_bl[ta].extend(st_bl[ta])\n\n for ta in ts_bl:\n ts_bl[ta] = list(set(ts_bl[ta]))\n\n return ts_bl", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if address in blocklist:\n return []\n if verbose:\n print(address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting at', startblock, 'with', len(transactions))\n # add new transactions\n transactions.extend(self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs))\n # dedupe\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def test_add_blocklist(mocker):\n mocker.patch(\"TrendMicroVisionOneV3.Client.http_request\", add_remove_blocklist_mock_response)\n client = Client(\"https://apimock-dev.trendmicro.com\", api_key, proxy, verify)\n args = {\n \"valueType\": \"fileSha1\",\n \"targetValue\": \"2de5c1125d5f991842727ed8ea8b5fda0ffa249b\",\n \"description\": \"block info\",\n }\n result = add_or_remove_from_block_list(client, \"trendmicro-visionone-add-to-block-list\", args)\n assert result.outputs[\"status\"] == 202\n assert result.outputs[\"taskId\"] == \"00000001\"\n assert result.outputs_prefix == \"VisionOne.BlockList\"\n assert result.outputs_key_field == \"taskId\"", "def load_saved_list(self):\r\n saved_list = self.config.dict_config[\"data\"][\"saved_list\"].strip()\r\n list_from_config = []\r\n if saved_list:\r\n list_from_config = saved_list.split(\"\\n\")\r\n #\r\n self.ordered_set_selected = ordered_set.OrderedSet(list_from_config)\r\n #\r\n self.sync_lists()", "def import_content_blocks(block_ids, action, analyst):\n ret = {\n 'successes': 0,\n 'failures': [],\n 'status': False,\n 'msg': ''\n }\n tlos = {\n 'Certificate': [],\n 'Domain': [],\n 'Email': [],\n 'Event': [],\n 'Indicator': [],\n 'IP': [],\n 'PCAP': [],\n 'RawData': [],\n 'Sample': [],\n }\n\n if not block_ids:\n return {'status': False, 'msg': 'No content was selected for import'}\n\n method = \"TAXII Import\"\n blocks = taxii.TaxiiContent.objects(id__in=block_ids)\n\n tsvc = get_config('taxii_service')\n hdr_events = tsvc['header_events']\n tsrvs = tsvc.taxii_servers\n pids = {}\n\n for block in blocks:\n source = \"\"\n reference = block.taxii_msg_id\n timestamp = block.timestamp\n data = block.content\n\n for svr in tsrvs:\n if tsrvs[svr].get('hostname') == block.hostname:\n for feed in tsrvs[svr]['feeds']:\n if tsrvs[svr]['feeds'][feed]['feedname'] == block.feed:\n feed_cfg = tsrvs[svr]['feeds'][feed]\n source = feed_cfg['source']\n default_ci = (feed_cfg.get('def_conf', 'unknown'),\n feed_cfg.get('def_impact', 'unknown'))\n break\n if source:\n break\n else:\n try:\n source = block.hostname.split(' - Source: ')[1]\n except:\n source = block.hostname\n default_ci = ('unknown', 'unknown')\n\n objs = import_standards_doc(data, analyst, method, reference,\n hdr_events, default_ci, source)\n\n if not objs['success']:\n ret['failures'].append((objs['reason'],\n 'STIX Package'))\n block.import_failed = True\n block.errors.append('STIX Package: %s' % objs['reason'])\n\n for sid in objs['imported']:\n ret['successes'] += 1\n tlo_meta = objs['imported'][sid]\n tlos.setdefault(tlo_meta[0], []).append((tlo_meta[1],\n tlo_meta[2]))\n\n for k in objs['failed']:\n ret['failures'].append(k)\n block.import_failed = True\n block.errors.append('%s: %s' % (k[1], k[0]))\n\n if block.import_failed:\n block.save()\n else:\n block.delete()\n\n pids[block.poll_time] = 1 # save unique poll timestamps\n\n if action == \"import_delete\":\n taxii.TaxiiContent.objects(poll_time__in=pids.keys(), errors=[]).delete()\n\n ret.update(tlos) # add the TLO lists to the return dict\n\n ret['status'] = True\n\n return ret", "def get_lockdown_data_and_ingest_into_sentinel(self):\n self.state = StateManager(\n connection_string=self.connection_string, file_path=\"lockdown\"\n )\n hashed_events_list = self.get_checkpoint_snapshot()\n self.pull_and_push_the_snapshot_data(\n LOCKDOWN_ENDPOINT, self.lockdown_table_name, hashed_events_list, HASH_FIELD_LIST\n )", "def _loadlists (self) :\n trainlist = set ([])\n with open (self.trainlist) as ifp :\n for fid in ifp :\n trainlist.add (fid.strip ())\n self.trainlist = trainlist\n\n devlist = set ([])\n with open (self.devlist) as ifp :\n for fid in ifp :\n devlist.add (fid.strip ())\n self.devlist = devlist\n\n testlist = set ([])\n with open (self.testlist) as ifp :\n for fid in ifp :\n testlist.add (fid.strip ())\n self.testlist = testlist\n\n return", "def load_pull(self):\n file_path = os.path.join(self.script_dir,'pull list.json') \n if not os.path.isfile(file_path)or os.path.getsize(file_path) == 0 :\n with open(file_path,'w') as out:\n json.dump({},out)\n\n with open(file_path) as infile:\n self.pull_list = json.load(infile)", "def import_from_persistent_storage(cls, contract_id, state_hash, persistent_replica) :\n\n storage_service_client = StorageServiceClient(persistent_replica)\n block_manager = pblocks.local_block_manager()\n pulled_blocks = pblocks.sync_block_store(storage_service_client, block_manager, state_hash)\n logger.debug(\"imported %d new blocks from persistent storage service\", pulled_blocks)\n\n return cls.read_from_cache(contract_id, state_hash)", "def get_available_blocks(self, ip, port, filename):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n query = ZapTorrentProtocolResponse(response_type='inventory?',\n filename=filename)\n msg = query.as_response()\n sock.connect((ip, int(port)))\n self.send_to_socket(sock, msg)\n results = sock.recv(54000)\n if len(results) == 0:\n raise RuntimeError(\"socket closed remotely\")\n parser = ZapTorrentProtocolParser(results)\n parser.parse()\n sock.close()\n return [block['id'] for block in parser.get_blocks()]", "async def refresh(self):\n # TODO: to be taken care of, no refresh atm between blocks\n try:\n if self.refreshing:\n return\n self.refreshing = True\n await self.config.LatestBlock.block_checker()\n if self.block_factory:\n self.last_block_time = int(self.block_factory.time)\n self.block_factory = await self.create_block(\n await self.get_pending_transactions(),\n self.config.public_key,\n self.config.private_key,\n index=self.config.LatestBlock.block.index + 1,\n )\n self.block_factory.header = self.block_factory.generate_header()\n self.refreshing = False\n except Exception:\n self.refreshing = False\n from traceback import format_exc\n\n self.app_log.error(\"Exception {} mp.refresh\".format(format_exc()))\n raise", "def load_fishlist():\n\n print(\"Fish List\")\n\n FishList.query.delete()\n\n \"\"\"Load user's fish lists into database.\"\"\"\n\n for row in open(\"fishlist_data.txt\"):\n row = row.rstrip()\n user_id, fish_id = row.split(\",\")\n\n list_fish = FishList(user_id = user_id.strip(),\n fish_id = fish_id.strip())\n\n db.session.add(list_fish)\n \n db.session.commit()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add ip range to blocklist
def on_read_ip_range(start, end): self.blocklist.add_rule(start, end, BLOCK_RANGE) self.num_blocked += 1
[ "def iprange(start_ip, end_ip):\n queue = Queue.Queue()\n ip_range = []\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n tmp = start\n \n ip_range.append(start_ip)\n while tmp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if tmp[i] == 256:\n tmp[i] = 0\n tmp[i-1] += 1\n ip_range.append(\".\".join(map(str, tmp)))\n \n for add in ip_range:\n queue.put(add)\n return queue", "def AddFwAddressRange(self, name, start_ip, end_ip, associated_interface='', comment=''):\n name = str(name)\n start_ip = str(start_ip)\n end_ip = str(end_ip)\n associated_interface = str(associated_interface)\n payload = {'json':\n {\n 'name': name,\n 'type': 'iprange',\n 'start-ip': start_ip,\n 'end-ip': end_ip,\n 'associated-interface': associated_interface,\n 'comment': comment\n }\n }\n return self.ApiAdd('cmdb/firewall/address/', payload)", "def ip_range_set(self, range_name, ip_range_start, ip_range_end):\n if range_name in self.ip_ranges:\n raise error.DevopsError(\n \"Setting IP range '{0}' for address pool '{1}' failed: range \"\n \"already exists\".format(range_name, self.name))\n self.ip_ranges[range_name] = (ip_range_start, ip_range_end)\n self.save()", "def AddFwIPpool(self, name, startip, endip, type_pool='overload', internal_startip='0.0.0.0',\n internal_endip='0.0.0.0', arp_reply='enable', block_size='128', num_blocks_per_user='8',\n comment=''):\n name = str(name)\n startip = str(startip)\n endip = str(endip)\n payload = {'json':\n {\n 'name': name,\n 'startip': startip,\n 'endip': endip,\n 'type': type_pool,\n 'source-startip': internal_startip,\n 'source-endip': internal_endip,\n 'arp-reply': arp_reply,\n 'block-size': block_size,\n 'num-blocks-per-user': num_blocks_per_user,\n 'comments': comment\n }\n }\n return self.ApiAdd('cmdb/firewall/ippool/', payload)", "def gen_ip_range(start, end):\n while True:\n yield start\n if start != end:\n start = increment_ip(start)\n else:\n break", "async def blacklist_add(self, ctx: commands.Context, *, ip):\r\n async with self.config.blacklisted() as data:\r\n data.append(ip)\r\n await ctx.tick()", "def _config_ip_range(self, network, setting, start_offset=None,\n end_offset=None, count=None):\n ip_range = self.settings_obj[network].get(setting)\n interface = self.settings_obj[network].get('bridged_interface')\n\n if not ip_range:\n cidr = self.settings_obj[network].get('cidr')\n ip_range = ip_utils.get_ip_range(start_offset=start_offset,\n end_offset=end_offset,\n count=count,\n cidr=cidr,\n interface=interface)\n self.settings_obj[network][setting] = ip_range\n\n logging.info(\"{}_{}: {}\".format(network, setting, ip_range))", "def allocate_range(self):\n start = self._server._edges[0]\n finish = int(self._server._edges[0] + (self._cpu_cores ** 10) / 2)\n self._range_start_finish = [start, finish]\n self._server._edges[0] = finish", "def AddIpAndRangeArgsForCreate(parser, with_private_nat=False):\n if with_private_nat:\n ACTIVE_IPS_ARG_OPTIONAL.AddArgument(parser, cust_metavar='IP_ADDRESS')\n ACTIVE_RANGES_ARG.AddArgument(parser, cust_metavar='SUBNETWORK')\n else:\n ACTIVE_IPS_ARG_REQUIRED.AddArgument(parser, cust_metavar='IP_ADDRESS')", "def _add_custom_range(addrange, table_locator):\n s2l = ui_lib.get_s2l()\n ui_lib.wait_for_element_and_input_text(FusionSettingsPage.ID_INPUT_ADD_CUSTOM_RANGE_FROM, addrange.rangefrom)\n ui_lib.wait_for_element_and_input_text(FusionSettingsPage.ID_INPUT_ADD_CUSTOM_RANGE_TO, addrange.to)\n ui_lib.wait_for_element_and_input_text(FusionSettingsPage.ID_INPUT_ADD_CUSTOM_RANGE_COUNT, addrange.count)\n\n # check for errors, if any !! click cancel and proceed for the next add custom WWN\n actual_xpath_count = len(s2l._element_find(FusionSettingsPage.ID_LABEL_ERROR_ADD_CUSTOM, False, False))\n if int(actual_xpath_count) != 0:\n logger._warn(\" custom range add failed as there is error while filling info, Custom range from : '%s'\" % addrange.rangefrom)\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_BTN_CANCEL_MAC_CUSTOM_ADD)\n s2l.capture_page_screenshot()\n return False\n else:\n # No Error, Clicking on add\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_BTN_ADD_CUSTOM_RANGE)\n if ui_lib.wait_for_element_visible(FusionSettingsPage.ID_LABEL_CUSTOM_ADD_OVERLAPPING_ERROR):\n ui_lib.wait_for_element_and_click(FusionSettingsPage.ID_BTN_CANCEL_MAC_CUSTOM_ADD)\n logger._warn(\" Cannot create overlapping pool. Please select a different range, CURRENT RANGE PASSED - '%s'\" % addrange.rangefrom)\n s2l.capture_page_screenshot()\n return False\n else:\n # check for custom range in table after addition.\n from_element = s2l._table_element_finder.find_by_content(s2l._current_browser(), table_locator, addrange.rangefrom)\n to_element = s2l._table_element_finder.find_by_content(s2l._current_browser(), table_locator, addrange.to)\n\n if from_element is None or to_element is None:\n logger._warn(\" Custom Range not reflecting in table, range starting from %s\" % addrange.rangefrom)\n s2l.capture_page_screenshot()\n return False\n else:\n logger._log_to_console_and_log_file(\"Added CUSTOM RANGE starting from %s\" % addrange.rangefrom)\n return True", "def allocate_subnet(self, request):", "def block():\n\n class RouteExistsError(Exception):\n \"\"\"Exception for when trying to insert a route that already exists.\"\"\"\n pass\n\n routing_table = get_routingtable()\n blocklist = db_read(DB_FILE)\n\n # process the WHITELIST entries\n whitelisted = []\n for entry in WHITELIST:\n if '/' in entry:\n # assume it's a network\n whitelisted.append(ipaddress.ip_network(entry))\n else:\n # single IP address\n whitelisted.append(ipaddress.ip_address(entry))\n\n # add IPs from logfile to our blocklist\n for ip_addr, attempts in getfailed_logins(SOURCE_LOG).items():\n # ignore addresses configured in WHITELIST\n skip = False\n ip_obj = ipaddress.ip_address(ip_addr)\n for item in whitelisted:\n if isinstance(item, (ipaddress.IPv4Address,\n ipaddress.IPv6Address)):\n if ip_obj == item:\n print(\"IP from Logfile ({}) is whitelisted\".format(ip_obj))\n skip = True\n break\n\n elif isinstance(item, (ipaddress.IPv4Network,\n ipaddress.IPv6Network)):\n if ip_obj in item:\n print(\"IP from Logfile ({}) is whitelisted via network {}\"\n .format(ip_obj, item))\n skip = True\n break\n\n # we found a whitelisted address; skip processing it\n if skip:\n continue\n\n if ip_addr in blocklist:\n # ignore ip addresses from log file if already in our blockist\n continue\n\n if len(attempts) >= BAN_THRESHOLD:\n blocklist[ip_addr] = datetime.strftime(datetime.now(),\n \"%Y %b %d %H:%M:%S\")\n else:\n if VERBOSE:\n print(\"{} number of connection attempts below threshold\"\n .format(ip_addr),\n \"({}<{}). Not blocking.\"\n .format(len(attempts), BAN_THRESHOLD))\n\n # then iterate over the IPs in the resulting blocklist and create routes\n for ip_addr in blocklist:\n try:\n for route in routing_table:\n if ip_addr in route:\n raise RouteExistsError(ip_addr)\n if VERBOSE:\n print(\"Blocking IP (blocklist)\" + ip_addr)\n blackhole(\"add\", ip_addr)\n except RouteExistsError as err:\n if VERBOSE:\n print(str(err) + \" is already blackholed\")\n # finally save the block list in its current state\n db_store(blocklist)", "def add_range(self, start, end, company_type=CID):\n if start <= 0 or end < start:\n raise ValueError('not a valid range: {} - {}'.format(start, end))\n self._iters[company_type].append(range(start, end + 1))\n return self", "def range_usage(ip_start, ip_end, ip_type, get_objects=True):\n istart, iend, ipf_q = start_end_filter(ip_start, ip_end, ip_type)\n\n def get_ip(rec):\n return two_to_one(rec.ip_upper, rec.ip_lower)\n\n lists = [sorted(AddressRecord.objects.filter(ipf_q), key=get_ip),\n sorted(PTR.objects.filter(ipf_q), key=get_ip),\n sorted(StaticInterface.objects.filter(ipf_q), key=get_ip)]\n\n free_ranges = []\n\n def cmp_ip_upper_lower(a, b):\n if a.ip_upper > b.ip_upper:\n return a\n elif a.ip_upper < b.ip_upper:\n return b\n elif a.ip_lower > b.ip_lower:\n return a\n elif a.ip_lower < b.ip_lower:\n return b\n else:\n return a # redundant, maybe?\n\n unused = 0\n minimum_i = 0\n rel_start = int(istart)\n end = int(iend)\n\n # This is translated directly from a recursive implementation.\n while True:\n if rel_start > end:\n break\n lists = [l for l in lists if l]\n if not lists:\n free_ranges.append((rel_start, end))\n unused += end - rel_start + 1\n break\n\n min_list = min(lists, key=lambda x: two_to_one(x[0].ip_upper,\n x[0].ip_lower))\n\n minimum = min_list[0]\n minimum_i = two_to_one(minimum.ip_upper, minimum.ip_lower)\n unused += minimum_i - rel_start\n if minimum_i != rel_start:\n free_ranges.append((rel_start, minimum_i - 1))\n\n for l in lists:\n while (l and l[0].ip_upper == minimum.ip_upper and\n l[0].ip_lower == minimum.ip_lower):\n l.pop(0)\n\n rel_start = minimum_i + 1\n\n return {\n 'unused': unused,\n 'used': int(iend) - int(istart) - unused + 1,\n 'free_ranges': free_ranges,\n }", "def gen_ip_range_from_cidr(cidr):\n return gen_ip_range(*cidr_to_ip_range(cidr))", "def reserve_ipblock(self, ipblock):\n properties = {\n \"name\": ipblock.name\n }\n\n if ipblock.location:\n properties['location'] = ipblock.location\n\n if ipblock.size:\n properties['size'] = str(ipblock.size)\n\n raw = {\n \"properties\": properties,\n }\n\n data = self._underscore_to_camelcase(json.dumps(raw))\n\n response = self._perform_request(\n url='/ipblocks', method='POST', data=data)\n\n return response", "def ParseInterfaceRanges(self):\n ranges = Session.ExecCommand(\"show configuration interfaces | display set | match interface-range\")\n for line in [l.lower().strip() for l in ranges.splitlines()] :\n try:\n words = line.split(\" \")\n if \"interface-range\" in line :\n if \" member-range \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member-range ge-0/0/0 to ge-0/0/41\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[7]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n elif \" member \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member ge-0/0/0\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[5]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n else :\n rangeName = words[3]\n # find a defined range (should aready be in the list)\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n # set interface properties for ranges\n if \"interface-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"port-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"vlan members\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching vlan members Corp-Access\n foundRange.vlanMembers.append(words[len(words) - 1])\n else:\n raise Exception(\"Interface range name <{0}> definition is missing\".format(rangeName))\n \n except Exception as Ex:\n message = \"JunOS Router Module Error : could not parse an interface range for line <{0}>. Error is : {1} \".format(line, str(Ex))\n DebugEx.WriteLine(message) \n \n pass", "def test_ip_addresses_list(self):\n pass", "def add_range(self, a, z):\n # our implementation assumes that codepoint is used in\n # comparisons\n a = force_text(a)\n z = force_text(z)\n if z < a:\n x = z\n z = a\n a = x\n if self.ranges:\n match_a, index_a = self._bisection_search(a, 0,\n len(self.ranges) - 1)\n match_z, index_z = self._bisection_search(z, 0,\n len(self.ranges) - 1)\n if match_a:\n if match_z:\n # Both ends of the new range are already matched\n if index_a == index_z:\n # Nothing to do\n return\n else:\n # We need to join the ranges from index_a to and\n # including index_z\n self.ranges[index_a:index_z + 1] = [\n [self.ranges[index_a][0], self.ranges[index_z][1]]]\n else:\n # Note that at this point, index_z must be > index_a\n # We need to join the ranges from index_a up to but\n # *not* including index_z extending the last range to\n # include z\n self.ranges[\n index_a:index_z] = [[self.ranges[index_a][0], z]]\n elif match_z:\n # We need to join the ranges from index_a up to and\n # including index_z extending the first range to include\n # a (works even if index_a==index_z)\n self.ranges[\n index_a:index_z + 1] = [[a, self.ranges[index_z][1]]]\n else:\n # We need to join the ranges from index_a to index_z-1,\n # extending them to include a and z respectively. Note\n # that if index_a==index_z then no ranges are joined and\n # the slice assignment simply inserts a new range.\n self.ranges[index_a:index_z] = [[a, z]]\n self._merge(index_a)\n else:\n self.ranges = [[a, z]]\n self._clear_cache()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to autodetect the blocklist type
def auto_detect(self, blocklist): self.config["list_compression"] = detect_compression(blocklist) self.config["list_type"] = detect_format(blocklist, self.config["list_compression"]) log.debug("Auto-detected type: %s compression: %s", self.config["list_type"], self.config["list_compression"]) if not self.config["list_type"]: self.config["list_compression"] = "" raise UnknownFormatError else: self.reader = create_reader(self.config["list_type"], self.config["list_compression"])
[ "def get_block_types(self) -> list:\n \n block_types = list(self._config_per_block_type().keys())\n if len(block_types) > 1 and 'generic' in block_types:\n block_types.remove('generic')\n return block_types", "def inspectblocktype(self, block_type):\n try:\n # try via header\n return self.data.header.has_block_type(block_type)\n except ValueError:\n # header does not have the information because nif version is\n # too old\n return True", "def infer_block_types(self, shape, allow_plain=True):\n block_types = tuple(\n 'Macro' if size == self.block_size('Macro') else\n 'Meso' if size == self.block_size('Meso') else\n 'Micro' if size == self.block_size('Micro') else\n 'Plain'\n for size in shape )\n if not allow_plain and 'Plain' in block_types:\n error_str = (\"\\n\\nAt least one dimension is not compatible \"\n \"with the population sizes;\\n\"\n \"Shape: {};\\n\".format(shape))\n # TODO: Use dictionary interface to `block_size` when implemented\n # to avoid hard-coding block names\n block_size_str = ''.join([\" {}: {}\\n\".format(blockname, self.block_size(blockname))\n for blockname in ['Macro', 'Meso', 'Micro']])\n raise ShapeError(error_str + \"Block sizes:\\n\" + block_size_str)\n return block_types", "async def list_types():\n async with get_client() as client:\n block_types = await client.read_block_types()\n\n table = Table(\n title=\"Block Types\",\n show_lines=True,\n )\n\n table.add_column(\"Block Type Slug\", style=\"italic cyan\", no_wrap=True)\n table.add_column(\"Description\", style=\"blue\", no_wrap=False, justify=\"left\")\n table.add_column(\n \"Generate creation link\", style=\"italic cyan\", no_wrap=False, justify=\"left\"\n )\n\n for blocktype in sorted(block_types, key=lambda x: x.name):\n table.add_row(\n str(blocktype.slug),\n (\n str(blocktype.description.splitlines()[0].partition(\".\")[0])\n if blocktype.description is not None\n else \"\"\n ),\n f\"prefect block create {blocktype.slug}\",\n )\n\n app.console.print(table)", "def get_block(block_type, **kargs):\n if block_type == 'ca':\n return ClassBlock(**kargs)", "def get_block_type_names(self):\n res = []\n for blk in self.blocks:\n blk_id = blk.blk_id\n name = hunk_names[blk_id]\n res.append(name)\n return res", "def get_blocks(self) -> Dict[str, List[str]]:\n if self.block_type == \"s2\":\n return self.get_s2_blocks()\n elif self.block_type == \"original\":\n return self.get_original_blocks()\n else:\n raise Exception(f\"Unknown block type: {self.block_type}\")", "def get_list_type(param_type):\n if str(param_type).find('[str]') != -1:\n return str\n\n if str(param_type).find('[int]') != -1:\n return int\n\n if str(param_type).find('[float]') != -1:\n return float\n\n if str(param_type).find('[bool]') != -1:\n return bool\n\n return str", "async def update_block_type(\n block_type: schemas.actions.BlockTypeUpdate,\n block_type_id: UUID = Path(..., description=\"The block type ID\", alias=\"id\"),\n db: PrefectDBInterface = Depends(provide_database_interface),\n):\n async with db.session_context(begin_transaction=True) as session:\n db_block_type = await models.block_types.read_block_type(\n session=session, block_type_id=block_type_id\n )\n if db_block_type is None:\n raise HTTPException(\n status.HTTP_404_NOT_FOUND, detail=\"Block type not found\"\n )\n\n # Only update the block type if there is any meaningful changes.\n # This avoids deadlocks when creating multiple blocks of the same type.\n # This check happens client side, but we do it server side as well\n # to accommodate older clients.\n if _should_update_block_type(\n block_type, schemas.core.BlockType.from_orm(db_block_type)\n ):\n await models.block_types.update_block_type(\n session=session, block_type=block_type, block_type_id=block_type_id\n )", "async def read_block_type_by_id(\n block_type_id: UUID = Path(..., description=\"The block type ID\", alias=\"id\"),\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> schemas.core.BlockType:\n async with db.session_context() as session:\n block_type = await models.block_types.read_block_type(\n session=session, block_type_id=block_type_id\n )\n if not block_type:\n raise HTTPException(status.HTTP_404_NOT_FOUND, detail=\"Block type not found\")\n return block_type", "def test_tool_types_list(self):\n pass", "async def create_block_type(\n block_type: schemas.actions.BlockTypeCreate,\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> schemas.core.BlockType:\n # API-created blocks cannot start with the word \"Prefect\"\n # as it is reserved for system use\n if block_type.name.lower().startswith(\"prefect\"):\n raise HTTPException(\n status.HTTP_403_FORBIDDEN,\n detail=\"Block type names beginning with 'Prefect' are reserved.\",\n )\n try:\n async with db.session_context(begin_transaction=True) as session:\n created_block_type = await models.block_types.create_block_type(\n session, block_type=block_type\n )\n except sa.exc.IntegrityError:\n raise HTTPException(\n status.HTTP_409_CONFLICT,\n detail=f'Block type with name \"{block_type.name}\" already exists',\n )\n return created_block_type", "def loader_for_type(self, ctype):\n for loadee, mimes in Mimer.TYPES.iteritems():\n for mime in mimes:\n if ctype.startswith(mime):\n return loadee", "def load_blocks(self, verbose=True):\n nblocks = len(blocklist)\n # blocks = []\n if nblocks == 0:\n # add bdsim blocks folder\n blockpath = [Path(__file__).parent / 'blocks']\n\n # add RTB and MVTB if they exist\n try:\n import roboticstoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n try:\n import machinvevisiontoolbox.blocks as pkg\n blockpath.append(Path(pkg.__path__[0]))\n except ImportError:\n pass\n # blocklist = []\n\n # path = os.getenv('BDSIMPATH')\n # if path is not None:\n # for p in path.split(':'):\n # blockpath.append(Path(p)) \n \n if verbose:\n print('Loading blocks:')\n\n blocks = []\n for path in blockpath: # for each folder on the path\n if not path.exists():\n print(f\"WARNING: path does not exist: {path}\")\n continue\n for file in path.iterdir(): # for each file in the folder\n blocks_this_file = []\n\n # scan every file *.py to find block definitions\n # a block is a class that subclasses Source, Sink, Function, Transfer and\n # has an @block decorator.\n #\n # The decorator adds the classes to a global variable blocklist in the\n # component module's namespace.\n if not file.name.startswith('test_') and not file.name.startswith('__') and file.name.endswith('.py'):\n # valid python module, import it\n try:\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n spec = importlib.util.spec_from_file_location(file.name, file)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n # module = importlib.import_module('.' + file.stem, package='bdsim.blocks')\n except SyntaxError:\n print(f\"-- syntax error in block definition file: {file}\")\n\n for cls in module.__dict__.values():\n if not inspect.isclass(cls) or \\\n inspect.getmro(cls)[-2].__name__ != 'Block' or \\\n not cls.__module__.startswith(file.name):\n continue\n\n # we have a block class candidate\n if cls.blockclass in ('source', 'transfer', 'function'):\n # must have an output function\n valid = hasattr(cls, 'output') and \\\n callable(cls.output) and \\\n len(inspect.signature(cls.output).parameters) == 2\n if not valid:\n raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n if cls.blockclass == 'sink':\n # must have a step function with at least one\n # parameter: step(self [,state])\n valid = hasattr(cls, 'step') and \\\n callable(cls.step) and \\\n len(inspect.signature(cls.step).parameters) >= 1\n if not valid:\n raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n\n blocks_this_file.append(blockname(cls))\n blocks.append(block(blockname(cls), cls, file))\n\n if verbose and len(blocks_this_file) > 0:\n print(' loaded {:d} blocks from {:s}: {:s}'.format(\n len(blocks_this_file),\n str(file),\n ', '.join(b for b in blocks_this_file)))\n \n\n # # components.blocklist grows with every block import\n # if len(blocklist) > nblocks:\n # # we just loaded some more blocks\n # if verbose:\n # print(' loading blocks from {:s}: {:s}'.format(str(file), ', '.join([blockname(cls) for cls in blocklist[nblocks:]])))\n \n # # perform basic sanity checks on the blocks just read\n # for cls in blocklist[nblocks:]:\n # print(cls)\n # if cls.blockclass in ('source', 'transfer', 'function'):\n # # must have an output function\n # valid = hasattr(cls, 'output') and \\\n # callable(cls.output) and \\\n # len(inspect.signature(cls.output).parameters) == 2\n # if not valid:\n # raise ImportError('class {:s} has missing/improper output method'.format(str(cls)))\n \n # if cls.blockclass == 'sink':\n # # must have a step function with at least one\n # # parameter: step(self [,state])\n # valid = hasattr(cls, 'step') and \\\n # callable(cls.step) and \\\n # len(inspect.signature(cls.step).parameters) >= 1\n # if not valid:\n # raise ImportError('class {:s} has missing/improper step method'.format(str(cls)))\n \n # blocks.append(block(blockname(cls), cls, file))\n\n # nblocks = len(blocklist)\n\n return blocks", "def determine_type(self, value):\n for available_type, _ in self.SERIALIZERS:\n klass = getattr(types,'%sType' % available_type)\n if isinstance(value, klass):\n return available_type\n return None", "def create_block(self, blocktype, b_obj = None):\n try:\n block = getattr(NifFormat, blocktype)()\n except AttributeError:\n raise NifExportError(\n \"'%s': Unknown block type (this is probably a bug).\"\n % blocktype)\n return self.register_block(block, b_obj)", "async def read_block_types(\n block_types: Optional[schemas.filters.BlockTypeFilter] = None,\n block_schemas: Optional[schemas.filters.BlockSchemaFilter] = None,\n limit: int = dependencies.LimitBody(),\n offset: int = Body(0, ge=0),\n db: PrefectDBInterface = Depends(provide_database_interface),\n) -> List[schemas.core.BlockType]:\n async with db.session_context() as session:\n return await models.block_types.read_block_types(\n session=session,\n limit=limit,\n offset=offset,\n block_type_filter=block_types,\n block_schema_filter=block_schemas,\n )", "def select_packet_type():", "async def blocktype_inspect(\n slug: str = typer.Argument(..., help=\"A block type slug\"),\n):\n async with get_client() as client:\n try:\n block_type = await client.read_block_type_by_slug(slug)\n except ObjectNotFound:\n exit_with_error(f\"Block type {slug!r} not found!\")\n\n app.console.print(display_block_type(block_type))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the given ical_file, and create objects in Guidebook via the API
def parse(self, ical_file): cal = self.get_ical_object(ical_file) # Determine what timezone these events should be interpreted as. self.x_wr_timezone = self.get_ical_timezone_info(cal) # Determine the date range of events we care about limit_start = self.today - timedelta(days=self.days_before) limit_end = self.today + timedelta(days=self.days_after) if settings.DEBUG: print u'Limit start value: {}'.format(limit_start) print u'Limit end value: {}'.format(limit_end) # dictionaries to keep track of name and id mapping to minimize redundant API calls schedule_track_name_to_id_mapping = {} location_name_to_id_mapping = {} cal_components = self.cal_components(cal) session_ids = [] # maintain a list of all Session IDs. Return this upon completion. for component in cal_components: # get the raw ical representations of UID UID = component['UID'] if settings.DEBUG: print u'Parsing iCal event: {}'.format(UID) session_start_time = component['DTSTART'].dt session_end_time = component['DTEND'].dt # confirm that this given event is within our import range session_within_limits = self.is_within_time_and_date_limits(session_start_time, session_end_time, limit_start, limit_end) if not session_within_limits: continue # For this integration, we are mapping the CATEGORIES field to the Schedule Track object in Guidebook schedule_track_name = u'{}'.format(component['CATEGORIES']) if schedule_track_name not in schedule_track_name_to_id_mapping: track_id = self.gb_api_client.get_or_create_schedule_track(name=schedule_track_name) schedule_track_name_to_id_mapping[schedule_track_name] = track_id else: track_id = schedule_track_name_to_id_mapping.get(schedule_track_name) location_name = u'{}'.format(component['LOCATION']) if location_name not in location_name_to_id_mapping: location_id = self.gb_api_client.get_or_create_location(name=location_name) location_name_to_id_mapping[location_name] = location_id else: location_id = location_name_to_id_mapping.get(location_name) # The SUMMARY field will map to the Session name in Guidebook session_name = u'{}'.format(component['SUMMARY']) description = u'{}'.format(component['DESCRIPTION']) session = self.gb_api_client.update_or_create_session(import_id=UID, name=session_name, start_time=session_start_time, end_time=session_end_time, description_html=description, schedule_tracks=[track_id], locations=[location_id]) if settings.DEBUG: print session session_ids.append(session.get('id')) if settings.DEBUG: print schedule_track_name_to_id_mapping print location_name_to_id_mapping print session_ids return session_ids
[ "def parse_ics_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n return VCalendar.from_ical(file.read())", "def get_ical_object(self, ical_file):\n # get a string representation of the ical_file if we don't already have one\n if not isinstance(ical_file, basestring):\n ical_file.seek(0)\n ical_file_string = ical_file.read()\n else:\n ical_file_string = ical_file\n try:\n cal = Calendar.from_ical(ical_file_string)\n except Exception as error_on_string:\n raise InvalidiCalendarFile(\"Invalid Calendar file: {error}\".format(error=error_on_string))\n return cal", "def parseical(self, filename, dateRange=None):\n f = open(filename)\n ical = icalendar.cal.Component.from_ical(''.join([l for l in f]))\n vevent_list = ical.walk(name='VEVENT')\n\n # Take user input for dates\n if dateRange == None: # Use default dateRange\n visRange = (datetime.date(2016, 5, 1), datetime.date(2016, 5, 7))\n else:\n visRange = dateRange\n\n for vevent in vevent_list:\n e = event.Event(vevent)\n if e.date <= visRange[1] and e.date >= visRange[0]:\n self.events.append(e)", "def create_ical(events_dict):\n iCal = Calendar()\n\n for event in events_dict:\n e = Event()\n e.name = event['title']\n e.description = event['description']\n e.begin = event['begin']\n iCal.events.add(e)\n print('\\ne:', e)\n\n return iCal", "def test_clinicalimpression_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"clinicalimpression-example.json\"\n inst = clinicalimpression.ClinicalImpression.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"ClinicalImpression\" == inst.resource_type\n\n impl_clinicalimpression_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"ClinicalImpression\" == data[\"resourceType\"]\n\n inst2 = clinicalimpression.ClinicalImpression(**data)\n impl_clinicalimpression_1(inst2)", "def load_books(self):\n\n #logger.info('Loading books from file...')\n\n processor = BookDataPreprocessor()\n\n with open(self.filename, encoding = 'utf-8') as catalog:\n\n for entry in catalog:\n\n book_desc = processor.preprocess(entry)\n\n metadata = ' '.join(book_desc[self._BOOK_META_FIELD_INDEX:self._BOOK_META_ID_INDEX])\n\n #iid = book_desc[self._BOOK_META_ID_INDEX].strip()\n\n field = book_desc[self._BOOK_META_FIELD_INDEX].strip()\n \n f_entry = entry.replace('\\t', '|').strip()\n \n if not isinstance(f_entry, str):\n f_entry = unicodedata.normalize('NFD', (str(f_entry, 'utf-8'))).encode('ascii', 'ignore')\n \n f_entry = re.compile(r'\\s+', re.IGNORECASE).sub(' ', f_entry)\n\n f_entry_ = f_entry.split('|')\n \n FIELD = f_entry_[self._BOOK_META_FIELD_INDEX]\n\n subfield = book_desc[self._BOOK_META_SUBFIELD_INDEX].strip()\n \n SUBFIELD = f_entry_[self._BOOK_META_SUBFIELD_INDEX]\n \n iid = f_entry_[self._BOOK_META_ID_INDEX]\n\n book = Book(iid, field, subfield, FIELD, SUBFIELD, metadata) #collaborator\n\n self.engine.add_object(book)\n\n self.engine.start()", "def as_icalendar(self,\n ical,\n request,\n summary_attr='ical_summary',\n description_attr='ical_description',\n url_attr='get_absolute_url',\n location_attr='venue_description',\n latitude_attr='latitude',\n longitude_attr='longitude',\n cancelled_attr='is_cancelled',\n ):\n vevent = ical.add('vevent')\n\n start = localtime(self.start)\n end = localtime(self.end())\n\n if self.all_day():\n vevent.add('dtstart').value = start.date()\n vevent.add('dtend').value = end.date()\n else:\n # Add the timezone specified in the project settings to the event start\n # and end datetimes, if they don't have a timezone already\n if not start.tzinfo and not end.tzinfo \\\n and getattr(settings, 'TIME_ZONE', None):\n # Since Google Calendar (and probably others) can't handle timezone\n # declarations inside ICS files, convert to UTC before adding.\n start = start.astimezone(utc)\n end = end.astimezone(utc)\n vevent.add('dtstart').value = start\n vevent.add('dtend').value = end\n\n cancelled = self._resolve_attr(cancelled_attr)\n if cancelled:\n vevent.add('method').value = 'CANCEL'\n vevent.add('status').value = 'CANCELLED'\n\n summary = self._resolve_attr(summary_attr)\n if summary:\n vevent.add('summary').value = summary\n\n description = self._resolve_attr(description_attr)\n if description:\n vevent.add('description').value = description\n\n url = self._resolve_attr(url_attr)\n if url:\n domain = \"\".join(('http', ('', 's')[request.is_secure()], '://', request.get_host()))\n vevent.add('url').value = \"%s%s\" % (domain, url)\n\n location = self._resolve_attr(location_attr)\n if location:\n vevent.add('location').value = location\n\n lat = self._resolve_attr(latitude_attr)\n lon = self._resolve_attr(longitude_attr)\n if lat and lon:\n vevent.add('geo').value = \"%s;%s\" % (lon, lat)\n\n return ical", "def generate_ICS(input_filename, output_filename):\n\n TIMETABLE_DICT_RE = (\n \"([0-9]{1,2}):([0-9]{1,2}):([AP])M-([0-9]{1,2}):([0-9]{1,2}):([AP])M\"\n )\n timetable_dict_parser = re.compile(TIMETABLE_DICT_RE)\n cal = Calendar()\n cal.add(\"prodid\", \"-//Your Timetable generated by GYFT//mxm.dk//\")\n cal.add(\"version\", \"1.0\")\n # Get your timetable\n with open(input_filename) as data_file:\n data = json.load(data_file)\n\n found_missing_sub = False\n for day in data:\n startDates = [next_weekday(x[0], days[day]) for x in WORKING_DAYS]\n\n for time in data[day]:\n # parsing time from time_table dict\n # currently we only parse the starting time\n # duration of the event is rounded off to the closest hour\n # i.e 17:00 - 17:55 will be shown as 17:00 - 18:00\n\n parse_results = timetable_dict_parser.findall(time)[0]\n\n lectureBeginsStamps = [\n get_stamp(parse_results[:3], start) for start in startDates\n ]\n\n durationInHours = data[day][time][2]\n\n # Find the name of this course\n # Use subject name if available, else ask the user for the subject name and use that\n subject_code = data[day][time][0]\n dept = subject_code[:2]\n summary = subject_code\n description = subject_code\n\n # check if subject name is there in data.txt\n if data[day][time][3] is not None:\n summary = data[day][time][3].title()\n else:\n print(\n \"ERROR: Our subjects database does not have %s in it.\"\n % subject_code\n )\n summary = input(\n \"INPUT: Please input the name of the course %s: \" % subject_code\n )\n summary = summary.title()\n\n # Find location of this class\n location = data[day][time][1]\n\n for lectureBegin, [periodBegin, periodEnd] in zip(\n lectureBeginsStamps, WORKING_DAYS\n ):\n\n event = build_event.build_event_duration(\n summary,\n description,\n lectureBegin,\n durationInHours,\n location,\n \"weekly\",\n periodEnd,\n )\n\n cal.add_component(event)\n\n if DEBUG:\n print(event)\n\n\n with open(output_filename, \"wb\") as f:\n f.write(cal.to_ical())\n print(\"\\nYour timetable has been written to %s\" % output_filename)", "def _read_file(self):\n if not os.path.isfile(self.fname):\n return\n\n # The AGP file exists. Initialize everything.\n self._comment_lines = []\n self._objects = []\n self._current_obj = None\n self._seen_objs = set()\n\n line_number = 0\n in_body = False\n with open(self.fname, \"r\") as f:\n for line in f:\n line_number += 1\n line = line.rstrip(\"\\n\")\n if line.startswith(\"#\"):\n if not in_body:\n self._comment_lines.append(line)\n else:\n raise AGPError(self.fname, line_number, \"illegal comment in AGP body\")\n continue\n\n # In a valid AGP file, we should no longer see comment lines\n in_body = True\n fields = line.split(\"\\t\")\n\n # There should be exactly 9 tab delimited fields\n if not len(fields) == 9:\n raise AGPError(self.fname, line_number, \"detected more than 9 tab delimited fields\")\n\n # All fields should have a value\n if not all(fields):\n raise AGPError(self.fname, line_number, \"detected an empty field\")\n\n # Instantiate all the AGPLine objects. These will do line-specific validations.\n if fields[4] == \"N\" or fields[4] == \"U\":\n agp_line = AGPGapLine(self.fname, line_number, *fields)\n else:\n agp_line = AGPSeqLine(self.fname, line_number, *fields)\n\n self._add_line(agp_line)", "def instructor(self, path):\n try:\n ifile = open(path, 'r')\n except FileNotFoundError:\n logging.exception('There is an error with opening the file to analyze')\n else:\n if ifile.readlines() == ['\\n']:\n print('This file is an empty!')\n else:\n ifile.seek(0)\n for lines in ifile:\n instructorid,instructorname,instructordept = lines.strip().split('\\t')\n self.instructordict[instructorid] = Instructor(instructorid,instructorname,instructordept)", "def readins(self) -> None:\n path :str = os.path.join(self.directory_path,\"instructors.txt\")\n for cwid, name, department in file_reader(path, 3, sep='\\t',header=True): \n b: Instructor = Instructor(cwid,name,department)\n self.instdict[cwid]=b", "def parse_input():\n input_data = ''\n calendars = []\n\n for line in fileinput.input():\n if 'BEGIN:VCALENDAR' in line:\n calendars.append(input_data)\n input_data = line\n else:\n input_data += line\n calendars.append(input_data)\n\n return calendars[1:]", "def parse_input(loc, fname, fix_acronyms = True):\n\n papers = dict()\n counter = 0\n annotations = []\n relations = []\n for i, line in enumerate(read_file(fname, loc)):\n if not line:\n papers[pmid] = Paper(pmid, title, abstract, annotations,\n relations, fix_acronyms = fix_acronyms)\n\n counter = -1\n annotations = []\n relations = []\n elif counter < 2:\n vals = line.split('|')\n assert len(vals) == 3, \"Bad format for line {}\".format(i+1)\n assert vals[1] == [\"t\", \"a\"][counter]\n\n if counter == 0:\n pmid = int(vals[0])\n title = vals[2]\n else:\n assert pmid == int(vals[0])\n abstract = vals[2]\n else:\n vals = line.split('\\t')\n assert pmid == int(vals[0])\n if vals[1] == \"CID\":\n relations.append((OntologyID(vals[2]), OntologyID(vals[3])))\n else:\n # an annotation\n if len(vals) == 5: # no identifier was assigned\n vals.append(\"-1\")\n\n assert 6 <= len(vals) <= 7, \"Error on line {0}\".format(i+1)\n annotations.append(Annotation(vals[5], vals[4], vals[3], vals[1], vals[2]))\n\n counter += 1\n\n return papers", "def openfile(self):\n self.employees = []\n self.current = 0\n filename = askopenfilename()\n ifile = open(filename)\n line = ifile.readline()\n while line != \"\":\n number = line.strip()\n name = ifile.readline().strip()\n address = ifile.readline().strip()\n line = ifile.readline().strip().split()\n hourly = line[0]\n hours = line[1]\n line = ifile.readline().strip()\n employee = {\"number\":number,\n \"name\":name,\n \"address\":address,\n \"hourly\":hourly,\n \"hours\":hours}\n self.employees.append(employee)\n ifile.close()\n self.setfields(self.employees[self.current])", "def createobjects(files, ep_data):\n #__init__(self,season,episode,minor,name,filename):\n objects=[]\n for f in files:# only create for known files\n ed=ep_data[f]\n objects.append(episodes(ed['Season'],ed['Episode'],ed['Minor'],ed['Name'],f))\n return objects # use create_all method", "def create_test_definition_object(self):\n definition_location = (self.file_handler.read_config(\"test-definitions\"))\n definition_files = os.path.join(nuts.basedir, definition_location)\n for filename in os.listdir(definition_files):\n test_definition_yaml = self.file_handler.read_file(\n definition_files + filename\n )\n self.progress_bar.initiate_progress_bar(\n len(test_definition_yaml), \"Read objects from \" + filename\n )\n try:\n for test_definition in test_definition_yaml:\n self.test_definitions[test_definition[0]] = TestDefinition(\n test_definition[0],\n test_definition[1],\n test_definition[2],\n test_definition[3],\n test_definition[4],\n test_definition[5]\n )\n self.logger.info('Testdefinitio Object \"{}\" created'.format(test_definition[0]))\n self.progress_bar.update_progress_bar(1)\n except ValueError as ex:\n print(\"There are Values missing or in the wrong Format\")\n self.logger.exception(ex)\n else:\n self.progress_bar.clear_progress_bar()\n return self.test_definitions", "def parse_xml_to_CEIdoc(root):\n paths = {\n 'title': 'teiHeader/fileDesc/titleStmt/title',\n 'issuer': 'charter/chDesc/relevantPersonal/issuer/persName',\n 'date_range': 'charter/chDesc/head/issued/issueDate/p/dateRange',\n 'place_name': 'charter/chDesc/head/issued/issuePlace/placeName',\n 'place_loc': 'charter/chDesc/head/issued/issuePlace/location/geo',\n 'abstract': 'charter/chDesc/abstract/p',\n }\n\n title = root.findall(paths['title'])[0].text\n issuer = root.findall(paths['issuer'])[0].text\n date_range_start_str = root.findall(paths['date_range'])[0].attrib['from']\n date_range_end_str = root.findall(paths['date_range'])[0].attrib['to']\n date_range_start = datetime.strptime(date_range_start_str, '%Y-%m-%d')\n date_range_end = datetime.strptime(date_range_end_str, '%Y-%m-%d')\n location_dict = {}\n if root.findall(paths['place_name']):\n location_dict.update({\n 'name': root.findall(paths['place_name'])[0].text,\n })\n else:\n location_dict.update({\n 'name': 'No Data'\n })\n if root.findall(paths['place_loc']):\n location_dict.update({\n 'geo': root.findall(paths['place_loc'])[0].text,\n })\n else:\n location_dict.update({\n 'geo': 'No Data'\n })\n abstract_paragraphs = [\n paragraph.text for paragraph in root.findall(paths['abstract'])\n ]\n abstract = ''\n for paragraph in abstract_paragraphs:\n abstract += paragraph + '\\n'\n abstract = abstract[:-1]\n cei_doc = CEIdoc(\n title=title,\n issuer=issuer,\n date_range_start=date_range_start,\n date_range_end=date_range_end,\n location_dict=location_dict,\n abstract=abstract\n )\n return cei_doc", "def test_generate_ics(self):\n event_json = {\n \"summary\": \"Test Event\",\n \"location\": \"Washington, DC\",\n \"uid\": \"8DB71F484FA2ABC57F621CB7F1@2013-07-03 09:30:00\",\n \"dtstart\": \"2013-07-03T09:30:00Z\",\n \"dtend\": \"2013-07-03T10:30:00Z\",\n \"dtstamp\": \"2013-07-02T14:29:08Z\"\n }\n\n with mock.patch('flask_eventics.controllers.get_event_json') as mock_get_event_json:\n mock_get_event_json.return_value = event_json, 200\n ics, status, headers = generate_ics('foo')\n\n # Make sure the ics parses\n try:\n icalendar.Calendar.from_ical(ics)\n except ValueError:\n self.fail(\"generate_ics() did not return a valid iCalendar file\")", "def test_new_calendar_to_ical(self):\n # Arrange.\n cal = icalendar.Calendar()\n # Act.\n ical = cal.to_ical()\n # Assert.\n self.assertEqual(ical, b'BEGIN:VCALENDAR\\r\\nEND:VCALENDAR\\r\\n')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the timezone info of an calendar object parsed by Calendar.from_ical(). Return the 'XWRTIMEZONE' if present, None o.w.
def get_ical_timezone_info(self, cal): ical_xwr_timezone = cal.get('X-WR-TIMEZONE', None) if ical_xwr_timezone: ical_xwr_timezone = pytz.timezone(ical_xwr_timezone.rstrip('/')) # remove trailing slashes return ical_xwr_timezone
[ "def get_timezone(self):\n try:\n return self.user_data['Bootstrap']['Timezone']\n except KeyError:\n return None", "def _get_adjtime_timezone():\n adjtime_file = \"/etc/adjtime\"\n if os.path.exists(adjtime_file):\n cmd = [\"tail\", \"-n\", \"1\", adjtime_file]\n return __salt__[\"cmd.run\"](cmd, python_shell=False)\n elif os.path.exists(\"/dev/rtc\"):\n raise CommandExecutionError(\n \"Unable to get hwclock timezone from \" + adjtime_file\n )\n else:\n # There is no RTC.\n return None", "def getTimezone(profile):\r\n try:\r\n return timezone(profile['timezone'])\r\n except:\r\n return None", "def get_timezone():\n ctx = _request_ctx_stack.top\n # tzinfo = getattr(ctx, 'babel_tzinfo', None)\n tzinfo = ctx and getattr(ctx, 'babel_tzinfo', None) or None\n if tzinfo is None:\n if ctx is None:\n tzinfo = timezone(constants.DEFAULT_TIMEZONE)\n else:\n babel = ctx.app.extensions['babel']\n if babel.timezone_selector_func is None:\n tzinfo = babel.default_timezone\n else:\n rv = babel.timezone_selector_func()\n if rv is None:\n tzinfo = babel.default_timezone\n else:\n if isinstance(rv, basestring):\n tzinfo = timezone(rv)\n else:\n tzinfo = rv\n ctx.babel_tzinfo = tzinfo\n return tzinfo", "def timezone(self):\n data = self.__fetch_dict({'smartlife.iot.common.timesetting': {'get_timezone': {}}})\n timezone = data['smartlife.iot.common.timesetting']['get_timezone']['index']\n return timezone", "def get_timezone_name():", "def get_timezone(tzname: str) -> tzinfo | None:\n try:\n # First, try with the provided name\n return zi.ZoneInfo(tzname)\n except zi.ZoneInfoNotFoundError:\n pass\n\n # No result: try with an alias, if there's one\n if alias := (_defs._TZ_ALIASES.get(tzname)):\n try:\n return zi.ZoneInfo(alias)\n except zi.ZoneInfoNotFoundError:\n pass\n\n # Still no result: fallback to a static timezone, or return None\n return _tz_map().get(tzname)", "def GetNATzinfo(tz='utc'):\r\n tzinfo = None\r\n tz = tz.lower()\r\n\r\n if tz == 'pst' or tz == 'pdt' or tz == 'pacific':\r\n tzinfo = NorthAmericanTzinfo(-8, 'PST', 'PDT')\r\n elif tz == 'mst' or tz == 'mdt' or tz == 'mountain':\r\n tzinfo = NorthAmericanTzinfo(-7, 'MST', 'MDT')\r\n elif tz == 'cst' or tz == 'cdt' or tz == 'central':\r\n tzinfo = NorthAmericanTzinfo(-6, 'CST', 'CDT')\r\n elif tz == 'est' or tz == 'edt' or tz == 'eastern':\r\n tzinfo = NorthAmericanTzinfo(-5, 'EST', 'EDT')\r\n elif tz == 'ast' or tz == 'adt' or tz == 'atlantic':\r\n tzinfo = NorthAmericanTzinfo(-4, 'AST', 'ADT')\r\n elif tz == 'utc':\r\n tzinfo = UtcTzinfo()\r\n\r\n return tzinfo", "def getTimezoneWindow(self):\n return AdjustTimezone(self.__screen)", "def ptimeznR(self):\n return self.patterns.tzinfo", "def test_getTimezoneInCalendar(self):\n\n data = (\n (\"America/New_York\", True),\n (\"America/Los_Angeles\", True),\n (\"America/Cupertino\", True),\n (\"America/FooBar\", False),\n )\n\n for tzid, result in data:\n cal = TimezoneDatabase.getTimezoneInCalendar(tzid)\n if result:\n self.assertTrue(cal is not None)\n self.assertEqual(cal.getComponents()[0].getID(), tzid)\n else:\n self.assertTrue(cal is None)", "def get_timezone(timezone_string):\n if timezone_string:\n return pytz.timezone(timezone_string)\n else:\n return None", "def time_zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"time_zone_id\")", "def timezone(tzname):\n tz = get_timezone(tzname)\n if not tz:\n raise KeyError(tzname)\n return tz", "def get_timezone(self):\n server_date = p4gf_util.first_value_for_key(self.p4.run(\"info\"), 'serverDate')\n self.timezone = server_date.split(\" \")[2]", "def _get_timezones(self):\n return self.nitro.request(\"time_zones\")", "def timezones(self, timezone=None):\n method = \"getTimezones\"\n\n parameters = {\n }\n\n if timezone:\n if not isinstance(timezone, str):\n raise ValueError(\"Code for a specific Time Zone needs to be a str (Example: 'America/Buenos_Aires')\")\n else:\n parameters[\"timezone\"] = timezone\n\n return self._voipms_client._get(method, parameters)", "def get_zone(self):\n try:\n return self.meta_data['placement'][\n 'availability-zone'][-1].strip()\n except KeyError:\n raise IcsMetaException(\n \"Cannot find the 'availability zone' in meta-data.\")", "def test_get_zone(self):\n\n zone = \"America/Inuvik\" if not HAS_TZLOCAL else \"America/Denver\"\n\n # first set the zone\n assert self.run_function(\"timezone.set_zone\", [zone])\n\n # check it set the correct zone\n ret = self.run_function(\"timezone.get_zone\")\n assert zone in ret\n\n # compare zones\n assert self.run_function(\"timezone.zone_compare\", [zone])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return True if session_start_time and session_end_time are within limit_start and limit_end. False otherwise.
def is_within_time_and_date_limits(self, session_start_time, session_end_time, limit_start, limit_end): return session_start_time > limit_start and session_end_time < limit_end
[ "def is_current_time_between(start_hour, start_min, end_hour, end_min):\r\n now = datetime.now()\r\n start = datetime(year=now.year, month=now.month, day=now.day, hour=start_hour, minute=start_min, second=0)\r\n end = datetime(year=now.year, month=now.month, day=now.day, hour=end_hour, minute=end_min, second=59)\r\n\r\n return now >= start and now <= end", "def in_bounds(t0, t1):\n assert t0 <= t1\n ends_before_bounds = t1 < start_time\n starts_after_bounds = t0 >= end_time\n return not (ends_before_bounds or starts_after_bounds)", "def time_range(self, start, diff, curr):\n td = timedelta(minutes=diff)\n end = start + td\n \n start = self.timestamp(start)\n end = self.timestamp(end)\n\n return bool(start <= curr <= end)", "def is_valid(self, valid_start_time, valid_end_time):\n if self.start_meeting.time() < valid_start_time or self.end_meeting.time() > valid_end_time:\n return False\n\n return True", "def is_timed_out(self):\n if not self.config.competition_mode:\n return\n if self.end_time is None or self.start_time is None:\n return False\n return self.end_time - self.start_time > self.config.time_limits.game", "def is_valid_max_limit(self) -> bool:\n if (self._end_dt is not None) and (self._start_dt is None):\n return True", "def is_overlapping(record_to_check, start_time, end_time):\n if start_time > record_to_check['end_time'] or end_time < record_to_check['start_time']:\n return False\n else:\n return True", "def time_between(self, startTime, endTime):\n # convert time string to datetime\n now = datetime.now()\n startTime = startTime.split(\":\")\n startTime = now.replace(hour=int(startTime[0]), minute=int(startTime[1]), second=0, microsecond=0)\n endTime = endTime.split(\":\")\n endTime = now.replace(hour=int(endTime[0]), minute=int(endTime[1]), second=0, microsecond=0)\n\n if (now >= startTime) and (now < endTime) :\n return True\n else :\n return False", "def check_times(self):\r\n if self.in_time and self.out_time and not (self.in_time == self.out_time):\r\n return False\r\n return True", "def check_timer(self, limit=1):\n self.log.notset(__name__ + '::check_timer:')\n timer_now = dt.datetime.now()\n change = (timer_now - self.timer_start).total_seconds()\n if change > limit: # if time limit exceeded\n self.log.error(__name__ + '::check_timer: request_data failed after ' + str(limit) + ' seconds')\n self.log.error(__name__ + '::check_timer: notDone items in self.end_check_list')\n tp = self.end_check_list[self.end_check_list['status'] != 'Done']\n self.log.error(str(tp))\n return True\n else:\n return None", "def _contains_datetime(self, time):\n if self.start_inclusive:\n lower_cond = self.start <= time\n else:\n lower_cond = self.start < time\n\n if self.end_inclusive:\n upper_cond = time <= self.end\n else:\n upper_cond = time < self.end\n\n return (lower_cond and upper_cond)", "def dt_range_contains_time(start, end, time):\n if end <= start:\n return False\n\n # end is after target and start is before it\n if start.time() <= time < end.time():\n return True\n\n # at least 24 hours has passed\n if end - start >= datetime.timedelta(days=1):\n return True\n\n # the start is before the time and the end is on the next day\n if start.time() <= time and start.date() < end.date():\n return True\n return False", "def time_overlaps(self, other: \"DropletTrack\") -> bool:\n s0, s1 = self.start, self.end\n o0, o1 = other.start, other.end\n return s0 <= o1 and o0 <= s1", "def check_overlap(self, entry_b, **kwargs):\r\n consider_pause = kwargs.get('pause', True)\r\n entry_a = self\r\n #if entries are open, consider them to be closed right now\r\n if not entry_a.end_time or not entry_b.end_time:\r\n return False\r\n #Check the two entries against each other\r\n start_inside = entry_a.start_time > entry_b.start_time \\\r\n and entry_a.start_time < entry_b.end_time\r\n end_inside = entry_a.end_time > entry_b.start_time \\\r\n and entry_a.end_time < entry_b.end_time\r\n a_is_inside = entry_a.start_time > entry_b.start_time \\\r\n and entry_a.end_time < entry_b.end_time\r\n b_is_inside = entry_a.start_time < entry_b.start_time \\\r\n and entry_a.end_time > entry_b.end_time\r\n overlap = start_inside or end_inside or a_is_inside or b_is_inside\r\n if not consider_pause:\r\n return overlap\r\n else:\r\n if overlap:\r\n max_end = max(entry_a.end_time, entry_b.end_time)\r\n min_start = min(entry_a.start_time, entry_b.start_time)\r\n diff = max_end - min_start\r\n diff = diff.seconds + diff.days * 86400\r\n total = entry_a.get_total_seconds() + \\\r\n entry_b.get_total_seconds() - 1\r\n if total >= diff:\r\n return True\r\n return False", "def time_exceeded(self, tstart, tmax):\n def t_exceed(t=None):\n t_taken = int(time.time()) - tstart\n if t_taken > tmax:\n return True\n else:\n return False\n\n return t_exceed", "def check_in_time(self):\r\n if self.out_time and not self.in_time:\r\n return False\r\n return True", "def check_out_time(self):\r\n if self.in_time and self.out_time and (self.out_time <= self.in_time):\r\n return False\r\n return True", "def has_session_expired(self, expiration_time):", "def is_in_range(self, time):\n\n if isinstance(time, CompactHourMinute):\n working_time = time\n else:\n working_time = CompactHourMinute(time)\n\n return self.from_time <= working_time <= self.to_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a Calendar object from an ical_file. Return that parsed object. Raise InvalidiCalendarFile on bad iCal input.
def get_ical_object(self, ical_file): # get a string representation of the ical_file if we don't already have one if not isinstance(ical_file, basestring): ical_file.seek(0) ical_file_string = ical_file.read() else: ical_file_string = ical_file try: cal = Calendar.from_ical(ical_file_string) except Exception as error_on_string: raise InvalidiCalendarFile("Invalid Calendar file: {error}".format(error=error_on_string)) return cal
[ "def parse_ics_file(filename):\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n return VCalendar.from_ical(file.read())", "def get_calendar(self, calendar_id):\n return # osid.calendaring.Calendar", "def load_cal(entry: dict) -> Calendar:\n\n if \"cache\" in entry and entry[\"cache\"]:\n print(\"Getting\", entry[\"name\"], \"from cache\")\n try:\n return get_from_cache(entry)\n except FileNotFoundError:\n return Calendar()\n\n else:\n print(\"Getting\", entry[\"name\"], \"from remote\")\n r = requests.get(entry[\"url\"], allow_redirects=True)\n if \"encoding\" in entry:\n cal = Calendar(imports=r.content.decode(encoding=entry[\"encoding\"]))\n else:\n cal = Calendar(imports=r.content.decode())\n\n cal = horodate(cal, 'Downloaded at')\n return cal", "def parse(self, ical_file):\n cal = self.get_ical_object(ical_file)\n # Determine what timezone these events should be interpreted as.\n self.x_wr_timezone = self.get_ical_timezone_info(cal)\n\n # Determine the date range of events we care about\n limit_start = self.today - timedelta(days=self.days_before)\n limit_end = self.today + timedelta(days=self.days_after)\n\n if settings.DEBUG:\n print u'Limit start value: {}'.format(limit_start)\n print u'Limit end value: {}'.format(limit_end)\n # dictionaries to keep track of name and id mapping to minimize redundant API calls\n schedule_track_name_to_id_mapping = {}\n location_name_to_id_mapping = {}\n\n cal_components = self.cal_components(cal)\n session_ids = [] # maintain a list of all Session IDs. Return this upon completion.\n for component in cal_components:\n # get the raw ical representations of UID\n UID = component['UID']\n if settings.DEBUG:\n print u'Parsing iCal event: {}'.format(UID)\n session_start_time = component['DTSTART'].dt\n session_end_time = component['DTEND'].dt\n\n # confirm that this given event is within our import range\n session_within_limits = self.is_within_time_and_date_limits(session_start_time, session_end_time, limit_start, limit_end)\n if not session_within_limits:\n continue\n\n # For this integration, we are mapping the CATEGORIES field to the Schedule Track object in Guidebook\n schedule_track_name = u'{}'.format(component['CATEGORIES'])\n if schedule_track_name not in schedule_track_name_to_id_mapping:\n track_id = self.gb_api_client.get_or_create_schedule_track(name=schedule_track_name)\n schedule_track_name_to_id_mapping[schedule_track_name] = track_id\n else:\n track_id = schedule_track_name_to_id_mapping.get(schedule_track_name)\n\n location_name = u'{}'.format(component['LOCATION'])\n if location_name not in location_name_to_id_mapping:\n location_id = self.gb_api_client.get_or_create_location(name=location_name)\n location_name_to_id_mapping[location_name] = location_id\n else:\n location_id = location_name_to_id_mapping.get(location_name)\n\n # The SUMMARY field will map to the Session name in Guidebook\n session_name = u'{}'.format(component['SUMMARY'])\n description = u'{}'.format(component['DESCRIPTION'])\n session = self.gb_api_client.update_or_create_session(import_id=UID, name=session_name, start_time=session_start_time,\n end_time=session_end_time, description_html=description,\n schedule_tracks=[track_id], locations=[location_id])\n if settings.DEBUG:\n print session\n session_ids.append(session.get('id'))\n if settings.DEBUG:\n print schedule_track_name_to_id_mapping\n print location_name_to_id_mapping\n print session_ids\n return session_ids", "def test_new_calendar_to_ical(self):\n # Arrange.\n cal = icalendar.Calendar()\n # Act.\n ical = cal.to_ical()\n # Assert.\n self.assertEqual(ical, b'BEGIN:VCALENDAR\\r\\nEND:VCALENDAR\\r\\n')", "def parse_input():\n input_data = ''\n calendars = []\n\n for line in fileinput.input():\n if 'BEGIN:VCALENDAR' in line:\n calendars.append(input_data)\n input_data = line\n else:\n input_data += line\n calendars.append(input_data)\n\n return calendars[1:]", "def load_calendar():\n try:\n calendar = pickle.load(open(\"calendar.p\", \"rb\"))\n except OSError:\n return False\n return calendar", "def test_calendar_ical_get(self):\n pass", "def parseical(self, filename, dateRange=None):\n f = open(filename)\n ical = icalendar.cal.Component.from_ical(''.join([l for l in f]))\n vevent_list = ical.walk(name='VEVENT')\n\n # Take user input for dates\n if dateRange == None: # Use default dateRange\n visRange = (datetime.date(2016, 5, 1), datetime.date(2016, 5, 7))\n else:\n visRange = dateRange\n\n for vevent in vevent_list:\n e = event.Event(vevent)\n if e.date <= visRange[1] and e.date >= visRange[0]:\n self.events.append(e)", "def get_ics_file(self, cr, uid, event_obj, context=None):\n res = None\n\n def ics_datetime(idate, allday=False):\n if idate:\n if allday:\n return datetime.strptime(idate.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT).replace(tzinfo=pytz.timezone('UTC'))\n else:\n return datetime.strptime(idate.split('.')[0], DEFAULT_SERVER_DATETIME_FORMAT).replace(tzinfo=pytz.timezone('UTC'))\n return False\n\n try:\n # FIXME: why isn't this in CalDAV?\n import vobject\n except ImportError:\n return res\n\n cal = vobject.iCalendar()\n event = cal.add('vevent')\n if not event_obj.start or not event_obj.stop:\n raise osv.except_osv(_('Warning!'), _(\"First you have to specify the date of the invitation.\"))\n event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))\n event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)\n event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)\n event.add('summary').value = event_obj.name\n if event_obj.description:\n event.add('description').value = event_obj.description\n if event_obj.location:\n event.add('location').value = event_obj.location\n if event_obj.rrule:\n event.add('rrule').value = event_obj.rrule\n\n if event_obj.alarm_ids:\n for alarm in event_obj.alarm_ids:\n valarm = event.add('valarm')\n interval = alarm.interval\n duration = alarm.duration\n trigger = valarm.add('TRIGGER')\n trigger.params['related'] = [\"START\"]\n if interval == 'days':\n delta = timedelta(days=duration)\n elif interval == 'hours':\n delta = timedelta(hours=duration)\n elif interval == 'minutes':\n delta = timedelta(minutes=duration)\n trigger.value = delta\n valarm.add('DESCRIPTION').value = alarm.name or 'OpenERP'\n for attendee in event_obj.attendee_ids:\n attendee_add = event.add('attendee')\n attendee_add.value = 'MAILTO:' + (attendee.email or '')\n res = cal.serialize()\n return res", "def test_issue_104__no_ignore_exceptions(self):\n ical_str = \"\"\"BEGIN:VCALENDAR\nVERSION:2.0\nMETHOD:PUBLISH\nBEGIN:VEVENT\nDTSTART:20140401T000000Z\nDTEND:20140401T010000Z\nDTSTAMP:20140401T000000Z\nSUMMARY:Broken Eevnt\nCLASS:PUBLIC\nSTATUS:CONFIRMED\nTRANSP:OPAQUE\nEND:VEVENT\nX\nEND:VCALENDAR\"\"\"\n with self.assertRaises(ValueError):\n icalendar.Calendar.from_ical(ical_str)", "def calendar_item(self, calendar_item_id):\n\n return self.account.calendar.get(\n ncr_calendar_item_id=calendar_item_id)", "def as_icalendar(self,\n ical,\n request,\n summary_attr='ical_summary',\n description_attr='ical_description',\n url_attr='get_absolute_url',\n location_attr='venue_description',\n latitude_attr='latitude',\n longitude_attr='longitude',\n cancelled_attr='is_cancelled',\n ):\n vevent = ical.add('vevent')\n\n start = localtime(self.start)\n end = localtime(self.end())\n\n if self.all_day():\n vevent.add('dtstart').value = start.date()\n vevent.add('dtend').value = end.date()\n else:\n # Add the timezone specified in the project settings to the event start\n # and end datetimes, if they don't have a timezone already\n if not start.tzinfo and not end.tzinfo \\\n and getattr(settings, 'TIME_ZONE', None):\n # Since Google Calendar (and probably others) can't handle timezone\n # declarations inside ICS files, convert to UTC before adding.\n start = start.astimezone(utc)\n end = end.astimezone(utc)\n vevent.add('dtstart').value = start\n vevent.add('dtend').value = end\n\n cancelled = self._resolve_attr(cancelled_attr)\n if cancelled:\n vevent.add('method').value = 'CANCEL'\n vevent.add('status').value = 'CANCELLED'\n\n summary = self._resolve_attr(summary_attr)\n if summary:\n vevent.add('summary').value = summary\n\n description = self._resolve_attr(description_attr)\n if description:\n vevent.add('description').value = description\n\n url = self._resolve_attr(url_attr)\n if url:\n domain = \"\".join(('http', ('', 's')[request.is_secure()], '://', request.get_host()))\n vevent.add('url').value = \"%s%s\" % (domain, url)\n\n location = self._resolve_attr(location_attr)\n if location:\n vevent.add('location').value = location\n\n lat = self._resolve_attr(latitude_attr)\n lon = self._resolve_attr(longitude_attr)\n if lat and lon:\n vevent.add('geo').value = \"%s;%s\" % (lon, lat)\n\n return ical", "def downloadIcs(url):\n response = urllib2.urlopen(url).read().splitlines()\n # checks\n if response[0]!=\"BEGIN:VCALENDAR\": raise BaseException(\"Bad ICS response\")\n if response[-1]!=\"END:VCALENDAR\": raise BaseException(\"Bad ICS response\")\n events = []\n event=[]\n for line in response:\n if \"BEGIN:VEVENT\" in line:\n event=[]\n elif \"END:VEVENT\" in line:\n events.append(parseEvent(event))\n else: event.append(line)\n return events", "def process(path: str, from_cache: bool = True) -> Calendar:\n print(\"app/cache/\" + sanitize_filename(path).rstrip(\".json\") + \".ics\")\n if from_cache and os.path.isfile(\"app/cache/\" + sanitize_filename(path).rstrip(\".json\") + \".ics\"):\n with open(\"app/cache/\" + sanitize_filename(path).rstrip(\".json\") + \".ics\") as file:\n data = file.read()\n print(\"Serving precomputed file\")\n return data #Calendar(imports=data)\n\n else:\n o = \"app/config/\" + sanitize_filename(path)\n print(\"Try to open \" + o)\n file = open(o, \"r\")\n config = json.loads(file.read())\n file.close()\n\n data = []\n\n for entry in config:\n\n cal = load_cal(entry)\n\n if \"filters\" in entry:\n cal = apply_filters(cal, entry[\"filters\"])\n\n if \"modify\" in entry:\n cal = apply_modify(cal, entry[\"modify\"])\n\n data.append(cal)\n\n return merge(data)", "def as_mapi(self, calendar_dto: CalendarDto) -> MapiCalendarDto:\n # verify the required parameter 'calendar_dto' is set\n if calendar_dto is None:\n raise ValueError(\"Missing the required parameter `calendar_dto` when calling `as_mapi`\")\n\n collection_formats = {}\n path = '/email/Calendar/as-mapi'\n\n header_params = {}\n # HTTP header `Accept`\n header_params['Accept'] = self._select_header_accept(\n ['application/json'])\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self._select_header_content_type(\n ['application/json'])\n body_params = calendar_dto\n # Authentication setting\n auth_settings = ['JWT']\n\n http_request_object = HttpRequest(path, None, None, header_params, None, body_params, None, None, auth_settings)\n\n return self._make_request(http_request_object, 'PUT', 'MapiCalendarDto')", "def get_calendar(name: str) -> TradingCalendar:\n ...", "def get_as_file(self, request: CalendarGetAsFileRequest) -> str:\n # verify the required parameter 'file_name' is set\n if request.file_name is None:\n raise ValueError(\"Missing the required parameter `file_name` when calling `get_as_file`\")\n # verify the required parameter 'format' is set\n if request.format is None:\n raise ValueError(\"Missing the required parameter `format` when calling `get_as_file`\")\n\n collection_formats = {}\n path = '/email/Calendar/as-file'\n path_params = {}\n\n query_params = []\n path_parameter = '{' + self._lowercase_first_letter('fileName') + '}'\n if path_parameter in path:\n path = path.replace(path_parameter, request.file_name if request.file_name is not None else '')\n else:\n if request.file_name is not None:\n query_params.append((self._lowercase_first_letter('fileName'), request.file_name))\n path_parameter = '{' + self._lowercase_first_letter('format') + '}'\n if path_parameter in path:\n path = path.replace(path_parameter, request.format if request.format is not None else '')\n else:\n if request.format is not None:\n query_params.append((self._lowercase_first_letter('format'), request.format))\n path_parameter = '{' + self._lowercase_first_letter('storage') + '}'\n if path_parameter in path:\n path = path.replace(path_parameter, request.storage if request.storage is not None else '')\n else:\n if request.storage is not None:\n query_params.append((self._lowercase_first_letter('storage'), request.storage))\n path_parameter = '{' + self._lowercase_first_letter('folder') + '}'\n if path_parameter in path:\n path = path.replace(path_parameter, request.folder if request.folder is not None else '')\n else:\n if request.folder is not None:\n query_params.append((self._lowercase_first_letter('folder'), request.folder))\n\n form_params = []\n local_var_files = []\n\n header_params = {}\n # HTTP header `Accept`\n header_params['Accept'] = self._select_header_accept(\n ['multipart/form-data'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self._select_header_content_type(\n ['application/json'])\n\n # Authentication setting\n auth_settings = ['JWT']\n\n http_request_object = HttpRequest(path, path_params, query_params, header_params, form_params, None, local_var_files,\n collection_formats, auth_settings)\n\n return self._make_request(http_request_object, 'GET', 'file')", "def get_calendar_event(cal):\n for component in cal.walk():\n if component.name == 'VEVENT':\n summary = component.get('summary', ' ')\n description = component.get('description', ' ')\n location = component.get('location', ' ')\n startdt = component.get('dtstart').dt\n enddt = component.get('dtend').dt\n enddt = startdt if enddt.day > startdt.day else enddt\n return {\n 'summary': '{} - {}'.format(summary, location),\n 'location': '{} === {}'.format(location, description),\n 'description': description,\n 'start': {\n 'dateTime': parse_event_time(startdt),\n 'timeZone': 'America/Los_Angeles',\n },\n 'end': {\n 'dateTime': parse_event_time(enddt),\n 'timeZone': 'America/Los_Angeles',\n },\n }\n # there should always be a VEVENT in the icalendar event\n raise ValueError('No VEVENT component found in icalendar event.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
triggers pedestrian_tracking process on rtsp link using a thread
def trigger_process(cfg, args): try: t = Thread(target=pedestrian_tracking, args=(cfg, args)) t.start() return jsonify({"message": "Pedestrian detection started successfully"}) except Exception: return jsonify({'message': "Unexpected exception occured in process"})
[ "def thread_handler():\n\n print(\"thread_handler_init\")\n while True:\n try:\n if listener.can_record and not listener.capture_frame:\n listener.recording = True\n time.sleep(.4)\n print(\"recording\")\n while listener.hand_vel < 250:\n pass\n\n listener.capture_frame = True\n\n elif not listener.can_record and \\\n listener.capture_frame:\n\n print(\"recording and recognizing captured\")\n listener.capture_frame = False # end of Leap capture\n points = listener.gesture[1] # this allows \"F\" to work with mouse and hand stroke\n if len(points) > 20:\n try:\n pc = Point_cloud(\"f1\", listener.gesture[1]) # pc containing our new gesture\n pc.draw_on_canvas() # drawing pc on canvas to see the shape\n except:\n print(\"some point_cloud error...\")\n\n try:\n result = recognize_stroke(points)\n gesture_match(result.name, conf)\n except:\n print(\"u have to redo the gesture\")\n\n listener.clear_variables()\n listener.recording = False\n\n except:\n pass", "def taken_by_processor(self, link):", "def run(self):\n prctl.set_name(\"NoxGateway\") # set thread name visible in htop\n logger.info('Init thread (delay %ss) %s' %(self.cycle_delay, str(current_thread().ident)))\n while (True):\n self.forward_command_from_web_to_alarm()\n self.forward_status_from_alarm_to_web()\n self.forward_request_status_from_web_to_alarm()\n\n sleep(self.cycle_delay)", "def main(session):\n\n # Set target to track.\n eventName = \"ALTracker/BlobDetected\"\n\n \n # Get the services ALTracker, ALMotion and ALRobotPosture.\n\n #motion_service = session.service(\"ALMotion\")\n motion_service = naoqi.ALProxy(\"ALMotion\", IP, PORT)\n posture_service = session.service(\"ALRobotPosture\")\n tracker_service = session.service(\"ALTracker\")\n memory_service = session.service(\"ALMemory\")\n\n memory_service.declareEvent(eventName)\n \n # First, wake up.\n motion_service.wakeUp()\n\n print\n print \"Head\"\n print motion_service.getPosition(\"Head\",2,False)\n print\n\n motion_service.post.moveToward(0.0,0.0,0.4,[])\n \n fractionMaxSpeed = 0.8\n\n # Go to posture stand\n posture_service.goToPosture(\"StandInit\", fractionMaxSpeed)\n\n\n # set mode\n mode = \"Head\"\n tracker_service.setMode(mode)\n \n # Then, start tracker.\n tracker_service.trackEvent(eventName)\n\n print \"ALTracker successfully started.\"\n print \"Use Ctrl+c to stop this script.\"\n\n position = [0.0 for _ in range(6)]\n position[0] = 2.0\n position[1] = 0.0\n position[2] = 1.0\n \n try:\n while True:\n\n #print memory_service.getEventHistory(eventName)[0]\n #print tracker_service.getAvailableModes()\n #print tracker_service.getActiveTarget()\n #print tracker_service.getRegisteredTargets()\n #print tracker_service.getTargetPosition(2)\n #print tracker_service.isActive()\n \n #position[1]=y\n #y = -1.0 * y\n \n time_stamp = qi.clockNow()\n \n al_value = [position,\n [time.time(),time_stamp],\n 0]\n\n print al_value\n\n memory_service.raiseEvent(eventName,al_value)\n\n motion_service.post.moveToward(0.0,0.0,0.6,[])\n \n time.sleep(2)\n \n except KeyboardInterrupt:\n print\n print \"Interrupted by user\"\n print \"Stopping...\"\n\n # Stop tracker, go to posture Sit.\n tracker_service.stopTracker()\n tracker_service.unregisterAllTargets()\n #posture_service.goToPosture(\"Sit\", fractionMaxSpeed)\n #motion_service.rest()\n\n motion_service.post.moveToward(0.0,0.0,0.0,[])\n \n print \"ALTracker stopped.\"", "def run_thread(self, usnap=.2):\n # self.stream_data() # Unless other changes are made this would limit to localhost only.\n gps3_data_thread = Thread(group=None, target=self.unpack_data, args={usnap: usnap})\n gps3_data_thread.start()", "def _traceroute(self, _):\r\n logger = LoggingMessageHandler(bool(), self._log_viewer)\r\n if self.ip.text() == '':\r\n logger.clear()\r\n logger.status_message(\"No IP to traceroute.\")\r\n return\r\n else:\r\n command = f'traceroute {self.ip.text()}'\r\n self.command_thread.command = command\r\n logger.status_message(\"Running....\")\r\n self.command_thread.start()", "def processRtspRequest(self, data):\n\t\t# Get the request type\n\t\trequest = data.split('\\n')\n\t\tline1 = request[0].split(' ')\n\t\trequestType = line1[0]\n\t\t\n\t\t# Get the media file name\n\t\tfilename = line1[1]\n\t\t\n\t\t# Get the RTSP sequence number \n\t\tseq = request[1].split(' ')\n\n\t\t# Process SETUP request\n\t\tprint(requestType)\n\t\tif requestType == self.SETUP:\n\t\t\tif self.state == self.INIT:\n\t\t\t\t# Update state\n\t\t\t\tprint(\"processing SETUP\\n\")\n\t\t\t\ttry:\n\t\t\t\t\t#print(filename)\n\t\t\t\t\tself.clientInfo['videoStream'] = VideoStream(filename)\n\t\t\t\t\tself.state = self.READY\n\t\t\t\t\tself.clientInfo['videoStream'].totalFrame(filename)\n\t\t\t\t\t\n\t\t\t\texcept IOError:\n\t\t\t\t\tself.replyRtsp(self.FILE_NOT_FOUND_404, seq[1])\n\t\t\t\t\n\t\t\t\t# Generate a randomized RTSP session ID\n\t\t\t\tself.clientInfo['session'] = randint(100000, 999999)\n\t\t\t\t# Send RTSP reply\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\ttotalTime = (\"tt\" + ' ' + str(self.clientInfo['videoStream'].totalTime()) + ' ' + str(self.clientInfo['videoStream'].getFPS())).encode()\n\t\t\t\tself.clientInfo['rtspSocket'][0].send(totalTime)\n\t\t\t\t# Get the RTP/UDP port from the last line\n\n\t\t\t\tself.clientInfo['rtpPort'] = request[2].split(' ')[3]\n\t\t\n\t\t# Process PLAY request \t\t\n\t\telif requestType == self.PLAY:\n\t\t\tif self.state == self.READY or self.state == self.SWITCH:\n\t\t\t\tprint(\"processing PLAY\\n\")\n\t\t\t\tself.state = self.PLAYING\n\t\t\t\t\n\t\t\t\t# Create a new socket for RTP/UDP\n\t\t\t\tself.clientInfo[\"rtpSocket\"] = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\n\t\t\t\t# Create a new thread and start sending RTP packets\n\t\t\t\tself.clientInfo['event'] = threading.Event()\n\t\t\t\tself.clientInfo['event'].clear()\n\t\t\t\tself.clientInfo['worker'] = threading.Thread(target=self.sendRtp) \n\t\t\t\tself.clientInfo['worker'].start()\n\t\t\n\t\t# Process PAUSE request\n\t\telif requestType == self.PAUSE:\n\t\t\tif self.state == self.PLAYING:\n\t\t\t\tprint(\"processing PAUSE\\n\")\n\t\t\t\tself.state = self.READY\n\t\t\t\t\n\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\n\t\t# Process TEARDOWN request\n\t\telif requestType == self.TEARDOWN:\n\t\t\t\tprint(\"processing TEARDOWN\\n\")\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\texcept: pass\n\t\t\t\tself.state = self.INIT\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\t# Close the RTP socket\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['rtpSocket'].close()\n\t\t\t\texcept: pass\n\t\t\n\t\t# Process FORWARD request\n\t\telif requestType == self.FORWARD:\n\t\t\tif self.state == self.PLAYING or self.state == self.READY:\n\t\t\t\tprint(\"processing FORWARD\\n\")\n\t\t\t\tself.clientInfo['videoStream'].moveForward()\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\n\t\t# Process BACKWARDvrequest\n\t\telif requestType == self.BACKWARD:\n\t\t\tif self.state == self.PLAYING or self.state == self.READY:\n\t\t\t\tprint(\"processing BACKWARD\\n\")\n\t\t\t\tself.clientInfo['videoStream'].moveBackward()\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\telif requestType == self.DESCRIBE:\n\t\t\t\tprint(\"processing DESCRIBE\\n\")\n\t\t\t\tself.replyRtsp(self.OK_200, seq[1])\n\t\t\t\t\n\t\t\t\tv = 0 #protocol version\n\t\t\t\ts = 'Video streaming by using RTP and RTSP protocol'\n\t\t\t\tt = datetime.now()\n\t\t\t\tm = 'video ' + str(self.clientInfo['rtpPort']) + ' RTP/UDP'\n\t\t\t\ta = 'control:streamid=' + str(self.clientInfo['session']) + '\\na=mimetype:string;\\\"video/MJPEG\\\"'\n\t\t\t\tsdp1 ='\\n\\nv=' + str(v) + '\\ns=' + s + '\\nt=' + str(t) +'\\nm=' + m + '\\na=' + a\n\t\t\t\tsdp = 'cc' + 'Content-Base:' + filename + '\\nContent-Type:application/sdp' + '\\nContent-Length:' + str(len(sdp1)) + sdp1\n\t\t\t\tself.clientInfo['rtspSocket'][0].send(sdp.encode())\t\t\t\t \n\t\telif requestType == self.GETLIST:\n\t\t\t\ttry:\n\t\t\t\t\tself.clientInfo['event'].set()\n\t\t\t\texcept: pass\n\t\t\t\tself.state = self.SWITCH\n\t\t\t\tprint(\"processing GETLIST\\n\")\n\t\t\t\tjsonFile = open(\"videoList.txt\",\"r\")\n\t\t\t\toutput = ''\n\t\t\t\tfor line in jsonFile.readlines():\n\t\t\t\t\toutput += line\n\t\t\t\treply = 'RTSP/1.0 200 OK\\nCSeq: ' + seq[1] + '\\nSession: ' + str(self.clientInfo['session']) + '\\n' + output\n\t\t\t\tconnSocket = self.clientInfo['rtspSocket'][0] ## because this is RTSP/TCP, unlike the rpt sender,\n\t\t\t\tconnSocket.send(reply.encode())", "def startProxy(self, uid=False, pwd=False):\n try:\n #start live555 process for this device\n rtsp_url = self.device.rtspURL\n if (self.device.model.find('AXIS')>=0):\n bitrate, framerate = self.device.getcamdb()\n if (bitrate>0):\n rtsp_url = self.device.baseRTSPURL + \"&videomaxbitrate=\" + str(bitrate) \n \n if (uid):\n live555cmd=c.approot+\"live555 -u \" + uid + \" \" + pwd + \" -o \"+self.urlFilePath+\" -p \"+str(self.ports['rts'])+\" \"+rtsp_url\n else:\n live555cmd=c.approot+\"live555 -o \"+self.urlFilePath+\" -p \"+str(self.ports['rts'])+\" \"+rtsp_url\n \n dbg.prn(dbg.SRC,\"Adding livefeed for idx:{} FILE:{} PORT:{} IP:{}.{} {} {}\".format(self.idx, self.urlFilePath, self.ports['rts'], self.device.ip, self.device.vidquality, self.device.model, self.type))\n \n pname = \"livefeed_\" + self.device.vidquality\n pdevId = self.device.ip + \".\" + self.device.vidquality\n procMan.padd(name=pname, devID=pdevId, cmd=live555cmd, keepAlive=True, killIdle=True, forceKill=True, modelname=self.device.model, srcidx=self.idx)\n # record the time of the live555 server start - will be used in the next step to determine when live555 should be restarted\n self.device.liveStart = time.time()\n except Exception as e:\n dbg.prn(dbg.ERR|dbg.SRC,\"[---]source.startProxy\", e, sys.exc_info()[-1].tb_lineno,)", "def run(self):\r\n log.info(\"[%s] Started monitoring links for a router: %s\",\r\n self.threadName, self.routerName)\r\n\r\n # Create event handler\r\n eventHandler = RouterLinkEventHandler(patterns=[self.routerInformation],\r\n ignore_patterns=[],\r\n ignore_directories=True)\r\n eventHandler.routingTable = self.routingTable\r\n eventHandler.threadName = self.threadName\r\n\r\n # Create watchdog observer\r\n self.observer = Observer()\r\n self.observer.schedule(eventHandler,\r\n self.routerInformationPath,\r\n recursive=False)\r\n self.observer.start()\r\n\r\n # Keep monitoring links with neighbour routers,\r\n # until someone terminates the thread\r\n while not self._stopevent.isSet():\r\n time.sleep(1)\r\n else:\r\n self._stop()", "def __init__(self):\n\n threading.Thread.__init__(self)\n self.gpsd = gps.gps(\"localhost\", \"2947\")\n self.gpsd.stream(gps.WATCH_ENABLE | gps.WATCH_NEWSTYLE)\n self.current_value = None\n self.running = True", "def sendRtp(self):\n\t\twhile True:\n\t\t\tt = 1/self.clientInfo['videoStream'].getFPS()\n\t\t\tself.clientInfo['event'].wait(t) \n\t\t\tprint('1\\n2')\n\t\t\t# Stop sending if request is PAUSE or TEARDOWN\n\t\t\tif self.clientInfo['event'].isSet(): \n\t\t\t\tbreak \n\t\t\t\t\n\t\t\tdata = self.clientInfo['videoStream'].nextFrame()\n\t\t\tif data: \n\t\t\t\tframeNumber = self.clientInfo['videoStream'].frameNbr()\n\t\t\t\ttry:\n\t\t\t\t\taddress = self.clientInfo['rtspSocket'][1][0]\n\t\t\t\t\tport = int(self.clientInfo['rtpPort'])\n\t\t\t\t\tself.clientInfo['rtpSocket'].sendto(self.makeRtp(data, frameNumber),(address,port)) #UDP\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Connection Error\")", "def link_existing_particles(self, event=None):\n start_time = time.time()\n analysis_frame = self.gui.analyze_trial_frame\n tracking_seach_radius = int(\n analysis_frame.linking_radius_selector.get())\n last_timepoint = int(\n analysis_frame.last_time_selector.get())\n\n self.trial.link_mitos(tracking_seach_radius=tracking_seach_radius,\n last_timepoint=last_timepoint)\n\n link_done_time = time.time()\n print('Done linking trial file. Linking and filtering took ' +\n str(round(link_done_time - start_time)) + ' seconds.')", "def tracking(self, chosen_route):\n track = None\n try:\n track = Tracker(chosen_route)\n self.view.tracking_wrapper(track)\n except Exception as e:\n \"\"\"If the Tracker or GpsLocator encounters any unknown error,\n simply abort the tracking mode and restart from the main_menu()\n \"\"\"\n self.view.display_error(e)\n finally:\n del track", "def _start_handler_thread(self):\n self.update_thread = Thread(target=self.lyrics_receiver.start_loop)\n self.update_thread.daemon = True\n self.update_thread.start()", "def track(self, *args, **kwargs):\n actual_track(*args, **kwargs)", "def _pcap_thread(self, pcapfiles, tag=None, sdpidx=None, capidx=None):\n th = Thread(target=self._worker, \n args=(pcapfiles, tag, sdpidx, capidx))\n th.start()\n return th", "def _update_rtt(self, pid):\n # Get departure time of packet\n depart = self._tcp.departure(pid)\n if depart is None:\n return\n # Calcualte roundtrip time of packet\n rtt = self.env.now - depart\n # append packet to list of RTT's\n self._times.append(rtt)", "def added_to_queue(self, link):", "def run(self):\n self.pull()\n self.process()\n if self.has_delay_line:\n self.delay_line.sample()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a positive integer num into an 8bit bit vector
def bin_array(num): # source: https://stackoverflow.com/a/47521145/1103264 return np.array(list(np.binary_repr(num).zfill(8))).astype(np.int8)
[ "def bit_vec_transform(num):\n vec = [0]*10\n vec[int(num)] = 1\n return vec", "def eightbits(number):\n # useful only so far in context of a forwardmask or any bitmask.\n prefix, value = bin(number).split('b')\n return '0b%0.8i' % (int(value),)", "def __to_bits(data_byte):\n return [(int(data_byte) >> i) & 1 for i in range(0, 8)]", "def convert_16_to_8(value):\n return value >> 8", "def intToBin(i):\n return (i).to_bytes(2, byteorder=\"little\")", "def byte_to_bit_array(byte: int) -> [int]:\n return [int(i) for i in \"{0:08b}\".format(byte)]", "def number_to_uint8(number, out_len):\n if number > (256**out_len - 1):\n rospy.logwarn(\"assembling sonar message: Trying to send number larger than the bytes it needs to fit in!\")\n\n int_number = int(round(number))\n result = [0] * out_len\n while out_len > 0:\n out_len = out_len - 1\n result[out_len] = int(int_number / (256 ** out_len)) # starting with MSB, enforce division result stays int\n int_number = int_number % (256 ** out_len) # get remainder\n return result", "def toBinary( val, numBits ):\r\n if numBits == 0: return\r\n toBinary( val>>1 , numBits-1 )\r\n print((val & 0x01), end=' ') # print least significant bit\r", "def convert_to_bits(data):\n\tresult = []\n\tfor c in data:\n\t\tbits = bin(c)[2:]\n\t\tbits = '00000000'[len(bits):] + bits\n\t\tresult.extend(bits)\n\t\t# result.extend([int(b) for b in bits])\n\treturn ''.join([i for i in result])", "def byte_to_bits(byte):\n return \"\".join([str(get_bit(byte, bit_num)) for bit_num in range(7, -1, -1)])", "def decimal_to_binary(n: int):\n return int(\"{0:b}\".format(n))", "def encode_variable_int(value):\n if not isinstance(value, Integral) or value < 0:\n raise ValueError('variable int must be a non-negative integer')\n\n bytes = []\n while value:\n bytes.append(value & 0x7f)\n value >>= 7\n\n if bytes:\n bytes.reverse()\n\n # Set high bit in every byte but the last.\n for i in range(len(bytes) - 1):\n bytes[i] |= 0x80\n return bytes\n else:\n return [0]", "def __invert__(self):\n newInt = self\n bits = []\n for i in xrange(0, len(newInt.bits)):\n bits.append(~newInt.bits[i])\n return UInt8(bits=bits)", "def signed_nibble(x) -> int:\n return (x | ~7) if (x & 8) else (x & 7)", "def decimal_to_binary(num):\r\n bits = []\r\n i = 1\r\n while num and i <= 32:\r\n if num >= (1.0 / 2) ** i:\r\n bits.append(1)\r\n num -= (1.0 / 2) ** i\r\n else:\r\n bits.append(0)\r\n i += 1\r\n if num:\r\n return \"ERROR\"\r\n return \"0.\" + \"\".join(str(bit) for bit in bits)", "def int_to_bin(number, width=32):\r\n if number < 0:\r\n number += 1 << width\r\n i = width - 1\r\n bits = bytearray(width)\r\n while number and i >= 0:\r\n bits[i] = number & 1\r\n number >>= 1\r\n i -= 1\r\n return bytes(bits)", "def get_signed8(uint: int) -> int:\n if uint > 127:\n return uint - 256\n return uint", "def _2bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:\n for n, val in enumerate(in_):\n out[4 * n] = int((val >> 6) * scale)\n out[4 * n + 1] = int(((val >> 4) & 0b11) * scale)\n out[4 * n + 2] = int(((val >> 2) & 0b11) * scale)\n out[4 * n + 3] = int((val & 0b11) * scale)", "def dec_to_bin(self,num):\r\n BinStr = ''\r\n if num == 0: return '0'*8\r\n while num > 0:\r\n BinStr = str(num % 2) + BinStr\r\n num = num >> 1 # right-shift the num by 1 bit\r\n BinStr = BinStr.zfill(8) # make BinStr an 8-bit string\r\n return BinStr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The edge sizes are the number of vertices within each edge.
def edge_sizes(self) -> typing.Tuple[int]: return copy.deepcopy(self._edge_sizes)
[ "def number_of_edges(self) -> int:\n count = 0\n for vertice in self.__graph:\n count += len(self.__graph[vertice])\n return count // 2", "def getNumEdges(self): \n return self.__E", "def get_edge_list_len(self):\n return self.edge_list_len", "def edge_sum(self):\n return len(self.edges)", "def dim_edge_features(self) -> int:\n return self.num_edge_features", "def edge_width(self):\n if not self._edge_width:\n self.edge_width = self.default_edgewidth\n return self._edge_width", "def edge_lengths(self):\n points = list(self.base_piece.polygon.points())\n NUM = 4\n assert len(points) == NUM\n return [(points[i] - points[(i+1) % NUM]).norm() for i in range(NUM)]", "def get_total_edges_count(self) -> int:\n return self.edge_record_count", "def get_edge_lengths(vertices, edge_points):\n N, D = vertices.shape\n E = edge_points.shape[0]\n # E,2,D (OK to do this kind of indexing on the first dimension)\n edge_vertices = vertices[edge_points[:,:2]]\n\n edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:])\n edges_sqrlen = torch.sum(edges * edges, dim=-1)\n return edges_sqrlen", "def sizes(self):\n return np.array([entry.data[\"size\"] for entry in self._entries])", "def _get_sizes(self) -> int:\n pass", "def edge_length(self, edge_id):\n raise NotImplementedError", "def edge_lengths_dimension(self, eqs_lambdas):\n return ideal(eqs_lambdas + [self.aux_var]).dimension()", "def get_total_edges(self):\n edges = 0\n for vertex in self.get_all_nodes():\n edges += len(self.graph[vertex][self._ADJACENT_NODES])\n\n return edges", "def numConnectedEdges(*args, **kwargs):\n \n pass", "def num_vertices(self):\n pass", "def test_the_number_of_edges(self):\n num_vertices = len(self.mst.vertices)\n num_edges = len(self.mst.graph_edges)\n\n self.assertEqual(num_vertices-1, num_edges)", "def calc_edge_length(edge, layout):\n\n Ax, Ay, Bx, By = edge_to_cartesian(edge,layout)\n\n edge_length = math.sqrt( (Bx - Ax)*(Bx - Ax) + (By - Ay)*(By - Ay) )\n\n #print edge, Ax, Ay, Bx, By\n\n return edge_length", "def number_edges(self):\n\n # The maximum number of edges is (the number of nodes*(number of nodes -1))/2, forming an upper bound.\n if self.a_edges == \"low\":\n n_edges = round(random.uniform(((self.n_nodes - 1)/(self.n_nodes**2)), 0.333) * ((self.n_nodes*(self.n_nodes - 1))/2))\n elif self.a_edges == \"middle\":\n n_edges = round(random.uniform(0.34, 0.666) * (self.n_nodes*(self.n_nodes - 1))/2)\n elif self.a_edges == \"high\":\n n_edges = round(random.uniform(0.67, 1.0) * (self.n_nodes*(self.n_nodes - 1))/2)\n else:\n raise ValueError(\"Amount of edges must be either 'low', 'middle' or 'high'\")\n\n return n_edges" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a nonrandom multihypergraph with the given degree sequence. This instantiates a bipartite graph, using the degree sequence and edge sequence as the bipartite degree sequence. The result is a nonrandom multihypergraph graph. To sample a multihypergraph approximately uniformly at random, the switch chain can be applied, which is often rapidly converging.
def from_degree_sequence( degree_sequence: typing.Sequence[int], edge_sequence: typing.Sequence[int] ) -> "SwitchMultiHypergraph": # argument checks if not random_graph.utils.bipartite_degree_sequence_graphical(degree_sequence, edge_sequence): raise ValueError("Degree sequence is not graphical.") # empty hypergraph returned immediately if len(degree_sequence) == 0: return SwitchMultiHypergraph(n=len(degree_sequence), edges=set()) # use bipartite construction to create the multi-hypergraph bipartite_graph = random_graph.graphs.SwitchBipartiteGraph.from_degree_sequence(degree_sequence, edge_sequence) hypergraph_edges = bipartite_graph.neighborhoods("y") # convert to graph graph = SwitchMultiHypergraph(n=len(degree_sequence), edges=hypergraph_edges) return graph
[ "def random(cls, maxA=20, maxB=40, prob=.05):\n assert prob >= 0 and prob <= 1\n G = nx.algorithms.bipartite.generators.random_graph(maxA, maxB, p=prob)\n G.remove_nodes_from([n for n in G if G.degree(n) == 0])\n G = max((G.subgraph(cc) for cc in nx.connected_components(G)), key=lambda cc: len(cc))\n return cls((a,b) for a in nx.bipartite.sets(G)[0] for b in G[a])", "def buildRandNetwork(nNodes,maxDegree=2,nLoops=0):\n assert maxDegree > 1, \"maxDegree must be greater than 1\"\n availableNodes = {LETTERS[nNodes-1]:0}\n pairs = set()\n # EXTEND THE TREE WITHOUT CYCLES\n for i in range(nNodes-1):\n this = LETTERS[i]\n # select an available node\n other = availableNodes.keys()[random.randint(0,len(availableNodes)-1)]\n otherConn = availableNodes.pop(other)\n otherConn +=1\n pairs.add((this,other))\n #select a \n availableNodes[this] = 1\n if otherConn < maxDegree:\n availableNodes[other] = otherConn\n print \"edges without cycles: %s\"%(len(pairs))\n # NOW ADD IN THE CYCLES\n attempts = 0\n maxAttempts = nLoops*3\n while (len(pairs) < nNodes + nLoops - 1) and attempts < maxAttempts:\n attempts += 1\n for i in range(nLoops):\n this = availableNodes.keys()[random.randint(0,len(availableNodes)-1)]\n thisConn = availableNodes.pop(this)\n other = availableNodes.keys()[random.randint(0,len(availableNodes)-1)]\n otherConn = availableNodes.pop(other)\n pairs.add((this,other))\n # incriment the number of edges per node\n thisConn +=1\n otherConn +=1\n if thisConn < maxDegree:\n availableNodes[this] = thisConn\n if otherConn < maxDegree:\n availableNodes[other] = otherConn\n return list(pairs)", "def generate_random_network(self):\r\n degrees = self.main_degree_distribution.sample_sequence(self.node_number)\r\n self.network = nx.expected_degree_graph(degrees, seed=None, selfloops=False)\r\n for n in self.network.nodes:\r\n self.network.nodes[n]['state'] = State(self.model)\r\n self.network.nodes[n]['event_prob'] = self.event_probability.sample()\r\n for e in self.network.edges:\r\n self.network[e[0]][e[1]]['p'] = self.infection_probability_distribution.sample()\r\n self.network[e[0]][e[1]]['mlogp'] = -np.log(self.network[e[0]][e[1]]['p'])", "def generate_graph(N, p):\n # generate lattice\n grid_graph = nx.Graph()\n for x,y in itertools.product(range(N), repeat=2):\n for dx,dy in itertools.product(range(-1,2), repeat=2):\n grid_graph.add_edge((x,y), ((x+dx)%N,(y+dy)%N))\n\n # generate scale-free graph\n ba_graph = nx.barabasi_albert_graph(N**2, 1)\n\n # combine all graphs\n grid_mat = nx.to_numpy_matrix(grid_graph)\n ba_mat = nx.to_numpy_matrix(ba_graph)\n assert grid_mat.shape == ba_mat.shape == (N**2, N**2), (grid_mat.shape, ba_mat.shape)\n\n res_mat = -np.ones((N**2, N**2))\n for i,j in np.ndindex(res_mat.shape):\n res_mat[i,j] = ba_mat[i,j] if np.random.random() < p else grid_mat[i,j]\n assert (res_mat != -1).all(), res_mat\n\n res_graph = nx.from_numpy_matrix(res_mat)\n\n # reset node names\n mapping = dict(zip(res_graph.nodes(), grid_graph.nodes()))\n nx.relabel_nodes(res_graph, mapping, copy=False)\n\n return res_graph", "def havel_hakimi_custom_graph(deg_sequence):\n\n if not (nx.is_valid_degree_sequence(deg_sequence) or nx.is_graphical(deg_sequence) or nx.is_valid_degree_sequence_erdos_gallai(deg_sequence)):\n raise nx.NetworkXError('Invalid degree sequence')\n\n p = len(deg_sequence)\n G=nx.empty_graph(p)\n num_degs = []\n for i in range(p):\n num_degs.append([])\n dmin, dmax, dsum, n = 10000000, 0, 0, 0\n for d in deg_sequence:\n # Process only the non-zero integers\n if d>0:\n num_degs[d].append(n)\n dmin, dmax, dsum, n = min(dmin,d), max(dmax,d), dsum+d, n+1\n # Return graph if no edges\n if n==0:\n return G\n\n modstubs = [(0,0)]*(dmax+1)\n # Successively reduce degree sequence by removing the maximum degree\n while n > 0:\n # Retrieve the maximum degree in the sequence\n while len(num_degs[dmax]) == 0:\n dmax -= 1;\n while len(num_degs[dmin]) == 0:\n dmax += 1;\n # If there are not enough stubs to connect to, then the sequence is\n # not graphical\n if dmax > n-1:\n raise nx.NetworkXError('Non-graphical integer sequence')\n \n # Remove most little stub in list\n source = num_degs[dmin].pop()\n n -= 1\n # Reduce the dmin largest stubs\n mslen = 0\n k = dmax\n for i in range(dmin):\n while len(num_degs[k]) == 0:\n k -= 1\n target = num_degs[k].pop()\n G.add_edge(source, target)\n n -= 1\n if k > 1:\n modstubs[mslen] = (k-1,target)\n mslen += 1\n # Add back to the list any nonzero stubs that were removed\n for i in range(mslen):\n (stubval, stubtarget) = modstubs[i]\n num_degs[stubval].append(stubtarget)\n n += 1\n\n G.name=\"havel_hakimi_graph %d nodes %d edges\"%(G.order(),G.size())\n return G", "def g05_graph(n, seed=0):\n\n graph = nx.gnp_random_graph(n, 0.5, seed)\n\n return graph", "def generate_random_graph(number_of_nodes, probability_of_edge):\n\n G = nx.fast_gnp_random_graph(number_of_nodes, probability_of_edge, seed=None, directed=False)\n edges = []\n for i in range(number_of_nodes):\n edges.append(list(G.adj[i].keys()))\n return G.number_of_edges(), edges", "def inet(n,alpha,tau):\n G= nx.MultiGraph()\n degree = {}\n full_nodes = []\n connected_nodes = []\n unconnected_nodes = range(n)\n sum_deg = 0\n \n for i in range(n):\n G.add_node(i)\n degree[i] = rand_pl(alpha,1)\n sum_deg += degree[i]\n\n \n deg_sort = sorted([(degree[i],i) for i in range(n)],reverse=True)\n top_tau = [deg_sort[i][1] for i in range(tau)]\n\n for i in range(tau):\n connected_nodes.append(top_tau[i])\n unconnected_nodes.remove(top_tau[i])\n degree[top_tau[i]] -= (tau-1)\n for j in range(i+1,tau):\n G.add_edge(top_tau[i],top_tau[j])\n sum_deg -= 2\n\n\n deg_two_nodes = [i for i in range(n) if degree[i] == 2]\n \n for t in top_tau:\n for j in range(int(degree[t]*0.25)):\n try:\n x = random.choice(deg_two_nodes)\n except:\n break\n G.add_edge(t,x)\n deg_two_nodes.remove(x)\n degree[t] -= 1\n degree[x] -= 1\n sum_deg -= 2\n connected_nodes.append(x)\n unconnected_nodes.remove(x)\n \n while not (unconnected_nodes == []):\n u = random.choice(unconnected_nodes)\n v = random.choice(connected_nodes)\n if not (degree[v]==0):\n G.add_edge(u,v)\n connected_nodes.append(u)\n unconnected_nodes.remove(u)\n degree[u] -= 1\n degree[v] -= 1\n sum_deg -= 2\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n\n num_repeats = 0\n while not (connected_nodes == []):\n if len(connected_nodes) % 1 == 0:\n print(len(connected_nodes))\n u = random.choice(connected_nodes)\n v = random.choice(connected_nodes)\n #print(connected_nodes)\n #print(G.edges(connected_nodes))\n if not(u==v) and not G.has_edge(u,v):\n sum_deg -= 2\n G.add_edge(u,v)\n degree[v] -= 1\n degree[u] -= 1\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n elif (u==v) and len(connected_nodes) ==1:\n G.add_edge(u,v)\n degree[u] -= 2\n connected_nodes.remove(u)\n full_nodes.append(u)\n sum_deg -= 2\n elif G.has_edge(u,v) and num_repeats < 10: # This is definitely a hack\n num_repeats += 1\n elif G.has_edge(u,v) and num_repeats >= 10:\n num_repeats = 0\n G.add_edge(u,v)\n degree[v] -= 1\n degree[u] -= 1\n sum_deg -= 2\n if degree[u] == 0:\n connected_nodes.remove(u)\n full_nodes.append(u)\n if degree[v] == 0:\n connected_nodes.remove(v)\n full_nodes.append(v)\n return G", "def generate_random_seeded_skipgraph(n, seed, constructor = SkipGraph):\n random.seed(seed)\n S = constructor()\n for i in range(n):\n node = LLNode(i)\n memvec = [random.choice([0,1]) for k in range(n)]\n node.set_memvec(memvec)\n S.insert(node)\n return S", "def mk_graph(n, k, seed=0, keep_zero_edges=True):\n return generate_weighted_random_regular_graph(n, k, lambda: one_or_minus_one(seed), seed, keep_zero_edges)", "def build_graph(sequence):\n graph = Graph()\n for item in sequence:\n if not graph.has_node(item):\n graph.add_node(item)\n return graph", "def gen(D, V):\n\tif(D * V > V * (V - 1)):\n\t\tprint \"No such graph: total degree should be less than of equal to 2 x maximal number of edge\"\n\t\treturn Graph([])\n\tsets = map(list, [[]] * (D + 1))\n\tsets[0] = map(Vertex, range(1, V + 1))\n\tmin_d = 0\n\tv1 = Vertex()\n\tv2 = Vertex()\n\twhile(len(sets[D]) < V - 1 ):\n\t\tv = random.randrange (V - len(sets[D]))\n\t\tif(v / len(sets[min_d]) == 0):\n\t\t\tv1 = sets[min_d].pop(v)\n\t\t\tif(len(sets[min_d]) == 0):\n\t\t\t\tmin_d = min_d + 1\n\t\telse:\n\t\t\tv = v - len(sets[min_d])\n\t\t\tv1 = sets[min_d + 1].pop(v)\n\t\tl = D - len(v1.list)\n\t\tpl = 0\n\t\twhile(l > 0 and min_d < D):\n\t\t\tuntouched_len = len(sets[min_d]) - pl \n\t\t\tif(l < untouched_len):\n\t\t\t\tfor i in range(l):\n\t\t\t\t\tv2 = sets[min_d].pop(random.randrange(len(sets[min_d]) - pl))\n\t\t\t\t\tweight = random.randint(1,MAX_EDGE_WEIGHT)\n\t\t\t\t\tv2.add_adjacency_vertex(v1.label, weight)\n\t\t\t\t\tv1.add_adjacency_vertex(v2.label, weight)\n\t\t\t\t\tsets[min_d + 1].append(v2)\n\t\t\t\tl = 0\n\t\t\telse:\n\t\t\t\tfor i in range(untouched_len):\n\t\t\t\t\tv2 = sets[min_d].pop(random.randrange(len(sets[min_d]) - pl))\n\t\t\t\t\tweight = random.randint(1,MAX_EDGE_WEIGHT)\n\t\t\t\t\tv2.add_adjacency_vertex(v1.label, weight)\n\t\t\t\t\tv1.add_adjacency_vertex(v2.label, weight)\n\t\t\t\t\tsets[min_d + 1].append(v2)\n\t\t\t\tl = l - untouched_len\n\t\t\t\tpl = untouched_len\n\t\t\t\tmin_d = min_d + 1\n\t\tsets[D].append(v1)\n\tif(len(sets[D]) == V):\n\t\treturn Graph(sets[D])\n\telse:\n\t\tprint \"No such graph: Returned graph has a vertex with degree less than D\"\n\t\tsets[D].append(sets[D - 1].pop())\n\t\treturn Graph(sets[D])", "def mkGraph(sum_n, linkRate, maxCPU, maxBw):\n nodes = []\n links = []\n for i in range(0 , sum_n):\n while True:\n tmpNode = Node(random.random()*maxCPU)\n if tmpNode != 0:\n break\n nodes.append(tmpNode)\n l = Link(0, 1, random.random()*maxCPU)\n links.append(l)\n for i in range(2 , len(nodes)):\n flag = True\n for j in range(0 , i):\n if random.random() <= linkRate:\n tmpL = Link(j , i, random.random()*maxBw)\n links.append(tmpL)\n flag = False\n if flag:\n target = math.floor(random.random()*i)\n if target == i:\n target -= 1\n tmpL = Link(target, i, random.random()*maxBw)\n links.append(tmpL)\n net = {\"nodes\": nodes,\"links\":links}\n return net", "def make_random(n: int, p: float) -> Graph:\n assert(n >= 0 and 0 <= p <= 1)\n\n g = Graph(n)\n\n for (u,v) in combinations(range(n), 2):\n if random() < p:\n g.add_edge(u,v)\n \n return g", "def create_random_graph(no_vertices, no_edges):\r\n if no_vertices < 0 or no_edges < 0:\r\n raise GraphException(\"Error! The number of edges and number of vertices must be non-negative.\")\r\n if no_edges > no_vertices * (no_vertices - 1):\r\n raise GraphException(\"Error! Too many edges given.\")\r\n random_graph = DirectedGraph()\r\n vertex = 0\r\n while vertex < no_vertices:\r\n duration = random.randrange(1, 11) # The random duration will be in the range [1, 10]\r\n random_graph.add_vertex(vertex, duration)\r\n vertex += 1\r\n while no_edges:\r\n _from = random.randrange(0, no_vertices)\r\n _to = random.randrange(0, no_vertices)\r\n if not random_graph.is_edge_in_graph(_from, _to):\r\n random_graph.add_edge(_from, _to)\r\n no_edges = no_edges - 1\r\n return random_graph", "def generate_random_skipgraph(n, constructor = SkipGraph):\n S = constructor()\n S.init_random(list(range(n)))\n return S", "def pm1d_graph(n, seed=0, keep_zero_edges=True):\n return generate_weighted_random_graph(n, 0.99, lambda: one_or_minus_one(seed), seed, keep_zero_edges)", "def generate_balanced_skipgraph(n, constructor = SkipGraph):\n\n l = int(math.log(n,2)) - 3\n mvecs = [[0,0,0],[1,0,0], [0,1,0], [1,1,0],[0,0,1], [0,1,1], [1,0,1], [1,1,1]]\n while l > 0:\n mvecs = [i + [0] for i in mvecs] + [i + [1] for i in mvecs]\n l -= 1\n S = constructor()\n n = list(range(n))\n random.shuffle(n)\n for i in n:\n node = LLNode(i)\n node.set_memvec(mvecs[i])\n S.insert(node)\n return S", "def construct_random_hypergraph(n, m, r):\n return construct_low_conductance_hypergraph(n, 0, m, r, 0, 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts the current hypergraph object into a bipartite graph via canonical realisation. This chooses a labelling for the edges of the hypergraph, and uses this to create the associated bipartite graph. Note that because edges in the hypergraph are unlabelled (unlike the vertices in the bipartite graph), this can result in different outputs. Hence, we include an argument to shuffle the edges. If edges are shuffled, the resulting labelling is effectively random; if this is set to false, the labelling is nonrandom (which is useful if results are required to be identical between runs).
def to_bipartite_graph(self, shuffle_edges: bool = True) -> "SwitchBipartiteGraph": # get edges in desired order (this determines labelling) hyperedges = list(self.edges) if shuffle_edges: random.shuffle(hyperedges) else: hyperedges = sorted(tuple(sorted(edge)) for edge in hyperedges) bipartite_edges = [(x, y) for y, edge in enumerate(hyperedges) for x in edge] bipartite_graph = random_graph.graphs.SwitchBipartiteGraph(nx=self.n, ny=self.m, edges=bipartite_edges) return bipartite_graph
[ "def make_false_label_edges(self, dict_class_label_edge):\n data_path = self.args.data_name + '_false_edges_balanced_{}.pickle'.format(self.args.false_per_true)\n if os.path.exists(os.path.join(self.args.data_name, data_path)):\n with open(os.path.join(self.args.data_name, data_path), 'rb') as handle:\n dict_class_false_edges = pickle.load(handle)\n else:\n dict_class_false_edges = {}\n labels = list(dict_class_label_edge.keys())\n false_labels = []\n for label in labels:\n for edge in dict_class_label_edge[label]:\n if edge[0][0] == 'c':\n label = edge[0]\n movie = edge[1]\n else:\n label = edge[1]\n movie = edge[0]\n if len(false_labels) < self.args.false_per_true + 1:\n false_labels = list(set(labels) - set(label))\n else:\n false_labels = list(set(false_labels) - set(label))\n indexes = random.sample(range(1, len(false_labels)), self.args.false_per_true)\n for i, index in enumerate(indexes):\n if dict_class_false_edges.get(label) is None:\n dict_class_false_edges[label] = [[movie, false_labels[index]]]\n else:\n edges = dict_class_false_edges[label]\n edges.append([movie, false_labels[index]])\n dict_class_false_edges[label] = edges\n false_labels = list(np.delete(np.array(false_labels), indexes))\n try:\n with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:\n pickle.dump(dict_class_false_edges, handle, protocol=3)\n except:\n pass\n return dict_class_false_edges", "def randomize_edge_directions(g, p = .5):\n \"\"\"\n Hiroki's Algorithm:\n 1. Create a list of all node pairs that are connected by at least one way (or both ways).\n \n 2. For each pair in the list created above, independently decide whether you want to swap the directions of their edges (with, say, 50% probability).\n \n 3. For those node pairs for which the edge direction reversal was decided in 2, swap the directions of the edges between them (i.e., i –> j becomes j –> i, and j –> i becomes i –> j; you can just remove the old edges and create new ones).\n \"\"\"\n old_nodes = list(g.nodes)\n node_pairs = list(itertools.combinations(g.nodes, 2))\n connected_nodes = {pair: [] for pair in node_pairs}\n for pair in connected_nodes.keys():\n u = pair[0]\n v = pair[1]\n for a, b, c in g.edges(data = True):\n if (a, b) == (u, v) or (a, b) == (v, u):\n connected_nodes[pair].append((a, b, c))\n \n for pair in list(connected_nodes.keys()):\n if connected_nodes[pair] == []:\n del connected_nodes[pair]\n \n new_edges = []\n for pair in list(connected_nodes.keys()):\n if random.random() < p:\n for edge in connected_nodes[pair]:\n new = (edge[1], edge[0], edge[2])\n new_edges.append(new)\n else:\n for edge in connected_nodes[pair]:\n new_edges.append(edge)\n\n new_graph = nx.DiGraph()\n new_graph.add_nodes_from(old_nodes)\n new_graph.add_edges_from(new_edges)\n\n return new_graph", "def make_label_edges(self):\n data_path = self.args.data_name + '_true_edges.pickle'\n if os.path.exists(os.path.join(self.args.data_name, data_path)):\n with open(os.path.join(self.args.data_name, data_path), 'rb') as handle:\n label_edges = pickle.load(handle)\n else:\n nodes = list(self.graph.nodes)\n label_edges = []\n for node in nodes:\n info = self.graph._adj[node]\n neighs = list(info.keys())\n for neigh in neighs:\n if info[neigh][0]['key'] == 'labels_edges':\n label_edges.append([node, neigh])\n try:\n with open(os.path.join(self.args.data_name, data_path), 'wb') as handle:\n pickle.dump(label_edges, handle, protocol=3)\n except:\n pass\n return label_edges", "def permute(g: nx.Graph) -> nx.Graph:\n permutation = np.random.permutation(g.nodes)\n mapping = {k: v for (k, v) in zip(g.nodes, permutation)}\n\n h = nx.relabel_nodes(g, mapping=mapping)\n\n return h", "def get_sample(self):\n\t\tGraph = nx.Graph()\n\t\tdataset = open(self.dataset)\n\t\tfor line in dataset.readlines():\n\t\t\tline = line.strip()\n\t\t\tcol = line.split(';')\n\t\t\tiid = int(col[0])\n\t\t\tGraph.add_node(iid, {'bipartite': 0})\n\t\t\tpid = \"p%s\" %(int(col[1]))\n\t\t\tGraph.add_node(pid, {'bipartite': 1})\n\t\t\tGraph.add_edge(iid, pid)\n\t\t\n\t\tdataset.close()\n\t\t\n\t\tself.original_graph = Graph.copy()\n\n\t\tnodes = set(n for n,d in Graph.nodes(data=True) if d['bipartite'] == 1)\n\t\t\n\t\tGt = nx.Graph()\n\t\tfor node in nodes:\n\t\t\tfor intermediate_node in list(nx.all_neighbors(Graph, node)):\n\t\t\t\tfor second_node in list(nx.all_neighbors(Graph, intermediate_node)):\n\t\t\t\t\tGt.add_edge(node, second_node)\n\t\t\n\t\tself.graph_training = Gt.copy()\n\t\tself.graph_test = Gt.copy() if self.task == 1 else self.get_future_links_graph()\n\t\t\n\t\tself.get_positive_examples()\n\t\tself.get_negative_examples()", "def __flip_labels(self):\n amount_to_flip = int(self.mislabel_ratio * self.true_labels.shape[0])\n self.mislabel_indices = np.random.choice(np.arange(self.true_labels.shape[0]), amount_to_flip, replace=False)\n\n random_shift = np.zeros(self.true_labels.shape[0])\n random_shift[self.mislabel_indices] = 1\n\n # This operation is only good for IMDB as we assume that the number of classes is 2\n self.unknown_labels = (self.true_labels + random_shift).astype(int) % 2\n\n print('Labels have been flipped.')", "def shuffle_data(self):\n if self._has_rot is True:\n self._num_rot = np.amax(self._info[:, 2]) - np.amin(self._info[:, 2]) + 1\n else:\n self._num_rot = 1\n # Fisher-Yatest shuffle assuming that rotations of one obj are together\n for fy_i in range(self._labels.shape[0] - 1, 1 + self._num_rot, -1 * self._num_rot):\n fy_j = np.random.randint(1, int((fy_i + 1) / self._num_rot) + 1) * self._num_rot - 1\n if fy_j - self._num_rot < 0:\n self._features[fy_i:fy_i - self._num_rot:-1], self._features[fy_j::-1] =\\\n self._features[fy_j::-1], self._features[fy_i:fy_i - self._num_rot:-1].copy()\n self._labels[fy_i:fy_i - self._num_rot:-1], self._labels[fy_j::-1] =\\\n self._labels[fy_j::-1], self._labels[fy_i:fy_i - self._num_rot:-1].copy()\n self._info[fy_i:fy_i - self._num_rot:-1], self._info[fy_j::-1] =\\\n self._info[fy_j::-1], self._info[fy_i:fy_i - self._num_rot:-1].copy()\n else:\n self._features[fy_i:fy_i - self._num_rot:-1], self._features[fy_j:fy_j - self._num_rot:-1] =\\\n self._features[fy_j:fy_j - self._num_rot:-1], self._features[fy_i:fy_i - self._num_rot:-1].copy()\n self._labels[fy_i:fy_i - self._num_rot:-1], self._labels[fy_j:fy_j - self._num_rot:-1] =\\\n self._labels[fy_j:fy_j - self._num_rot:-1], self._labels[fy_i:fy_i - self._num_rot:-1].copy()\n self._info[fy_i:fy_i - self._num_rot:-1], self._info[fy_j:fy_j - self._num_rot:-1] =\\\n self._info[fy_j:fy_j - self._num_rot:-1], self._info[fy_i:fy_i - self._num_rot:-1].copy()", "def LabelPropagation(graph):\n nodes = list(graph.nodes())\n n = len(nodes)\n adjDict = adjacencyDict(graph)\n labels = {i: i for i in graph.nodes}\n frequencies = np.ones(n)\n dontStop = True\n while dontStop:\n nodes = FYshuffle(nodes)\n dontStop = False\n for node in nodes:\n nodeAdj = adjDict[node]\n if len(nodeAdj) != 0:\n popular_neighbor = nodeAdj[np.argmax(frequencies[nodeAdj])]\n if labels[node] != labels[popular_neighbor]:\n dontStop = True\n labels[node] = labels[popular_neighbor]\n frequencies[node] = max(frequencies[node]-1, 0)\n frequencies[popular_neighbor] += 1\n else:\n frequencies[node] = n+1\n\n return labels", "def remove_edges_from_graph(base_graph, edge_label_list, scale, min_num_edges):\n\n graph = base_graph.copy()\n\n for node in graph:\n # Select all the neighbors that share an edge of a particular label\n neighbors = [neighbor for neighbor in list(graph[node].keys())\n if graph.edges[node, neighbor][\"label\"] in edge_label_list]\n\n # If there are no neighbors that have a label from edge_label_list, we\n # continue\n if neighbors:\n # Randomly draw the number of edges to keep\n quarantine_edge_num = int(max(min(np.random.exponential(\n scale=scale, size=1), len(neighbors)), min_num_edges))\n\n if quarantine_edge_num <= len(neighbors):\n # Create the list of neighbors to keep\n quarantine_keep_neighbors = np.random.choice(\n neighbors, size=quarantine_edge_num, replace=False)\n\n # Remove edges that are not in te list of neighbors to keep\n for neighbor in neighbors:\n if neighbor not in quarantine_keep_neighbors:\n graph.remove_edge(node, neighbor)\n\n return graph", "def shuffle_nodes(self):\n for node in TreeIterator(self.tree, lambda n: not n.is_leaf()):\n for g in node.grouped_children:\n shuffle(g)\n node.swap(g)\n self._update_elements_()\n return self", "def shuffle_data(self):\r\n images = self.generate_empty_lst()\r\n\r\n for index, ch_input in enumerate(self.images):\r\n images[index] = copy(ch_input)\r\n\r\n labels = copy(self.labels)\r\n\r\n self.images = self.generate_empty_lst()\r\n self.labels = []\r\n\r\n # create list of permutated index and shuffle data accoding to list\r\n idx = np.random.permutation(len(labels))\r\n\r\n for i in idx:\r\n for index in range(self.num_ch):\r\n self.images[index].append(images[index][i])\r\n\r\n self.labels.append(labels[i])", "def _replace_edge_labels_with_nodes(G, next_id, interpreter, training_data):\n\n message_generator = UserMessageGenerator(training_data) if training_data else None\n\n for s, e, k, d in G.edges(keys=True, data=True):\n if k != EDGE_NONE_LABEL:\n if message_generator and d.get(\"label\", k) is not None:\n parsed_info = interpreter.parse(d.get(\"label\", k))\n label = message_generator.message_for_data(parsed_info)\n else:\n label = d.get(\"label\", k)\n next_id += 1\n G.remove_edge(s, e, k)\n G.add_node(next_id, label=label, style=\"filled\", fillcolor=\"lightblue\", shape=\"box\")\n G.add_edge(s, next_id)\n G.add_edge(next_id, e)", "def reshuffle_connections(self):\n old_connections = self.connections\n\n self.connections = []\n for i in range(len(old_connections)):\n from_neuron, to_neuron = old_connections[i]\n from_layer = self.find_layer(from_neuron)\n to_layer = self.find_layer(to_neuron)\n from_neuron = random.choice(self.neurons_in_layer[from_layer])\n to_neuron = random.choice(self.neurons_in_layer[to_layer])\n self.connections.append((from_neuron, to_neuron))", "def __BipartiteToSingle(graph):\n single_graph = graph.dot(graph.T)\n single_graph[single_graph != 0] = 1\n single_graph -= sp.identity(graph.shape[0])\n return single_graph", "def to_bipartite(G):\n if not G.is_directed():\n raise nx.NetworkXError(\"G must be a directed Graph\")\n if G.is_multigraph():\n H = nx.MultiGraph()\n else:\n H = nx.Graph()\n for n in G:\n H.add_node((n,'+'))\n H.add_node((n,'-'))\n for (u,v) in G.edges_iter():\n H.add_edge((u,'+'),(v,'-'))\n\n return H", "def shuffle(self, norestart=False):\n rand_order = np.random.permutation(self.N)\n self._labels = self._labels[rand_order]\n self._boxfiles = [self._boxfiles[i] for i in rand_order]\n\n if not norestart:\n self._restart_worker()", "def isBipartite(self, graph):\n if not graph: return True\n colors = {}\n for node, edges in enumerate(graph):\n node_color = colors.get(node)\n if not node_color:\n colors[node] = 'blue'\n for edge in edges:\n if colors.get(edge) == colors[node]:\n return False\n else:\n colors[edge] = ['blue', 'red'][colors[node] == 'blue']\n return True", "def flip_graph(self):\n from collections import defaultdict\n if not self.is_pure():\n return None\n d = self.dimension()\n Fs = self.facets()\n flipG = Graph()\n flipG.add_vertices(Fs)\n edges = defaultdict(list)\n # go through all codim 1 faces to build the edge\n for F in Fs:\n F_tuple = sorted(F._Simplex__set)\n for i in range(d+1):\n coF = tuple(F_tuple[:i]+F_tuple[i+1:])\n if coF in edges:\n for G in edges[coF]:\n flipG.add_edge((F, G))\n edges[coF].append(F)\n return flipG", "def shuffle_bricks(bricks):\n\n random.shuffle(bricks)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test whether the multihypergraph graph is simple. A multihypergraph is simple if no two edges are the same.
def simple(self) -> bool: return random_graph.utils.all_unique(tuple(sorted(neighbourhood)) for neighbourhood in self.edges)
[ "def is_multigraph(self):\n s = set()\n for (a1, s1), (a2, s2) in self.bonds:\n if (a1, a2) in s:\n return True\n else:\n s.add((a1, a2))\n return False", "def is_pseudomanifold(self):\n if not self.is_pure():\n return False\n d = self.dimension()\n if d == 0:\n return len(self.facets()) == 2\n F = self.facets()\n X = self.faces()[d-1]\n # is each (d-1)-simplex is the face of exactly two facets?\n for s in X:\n if len([a for a in [s.is_face(f) for f in F] if a]) != 2:\n return False\n # construct a graph with one vertex for each facet, one edge\n # when two facets intersect in a (d-1)-simplex, and see\n # whether that graph is connected.\n V = [f.set() for f in self.facets()]\n E = (lambda a, b: len(a.intersection(b)) == d)\n g = Graph([V, E])\n return g.is_connected()", "def _is_strongly_connected(g: nx.MultiDiGraph) -> Tuple[str, bool]:\n _, number_of_nodes = _number_of_nodes(g)\n ret = False if number_of_nodes == 0 else nx.is_strongly_connected(g)\n return \"Is strongly connected\", ret", "def check(\n self, first_graph: Optional[ConjunctiveGraph], graph: ConjunctiveGraph\n ) -> None:\n if self.quad_count is not None:\n assert self.quad_count == len(list(graph.quads()))\n if first_graph is not None and self.exact_match:\n GraphHelper.assert_quad_sets_equals(first_graph, graph)\n if first_graph is None and self.has_subject_iris is not None:\n subjects_iris = {\n f\"{subject}\"\n for subject in graph.subjects()\n if isinstance(subject, URIRef)\n }\n assert set(self.has_subject_iris) == subjects_iris", "def regular(self):\n degs = {len([e for e in self._edges if x in e])\n for x in self._vertices}\n return len(degs) == 1", "def is_subgraph_isomorphic(self, other, initial_map=None, generate_initial_map=False, save_order=False):\n cython.declare(group=Group)\n cython.declare(mult1=cython.short, mult2=cython.short)\n cython.declare(a=GroupAtom, L=list)\n # It only makes sense to compare a Group to a Group for subgraph\n # isomorphism, so raise an exception if this is not what was requested\n if not isinstance(other, Group):\n raise TypeError(\n 'Got a {0} object for parameter \"other\", when a Group object is required.'.format(other.__class__))\n\n group = other\n\n if generate_initial_map:\n keys = []\n atms = []\n initial_map = dict()\n for atom in self.atoms:\n if atom.label and atom.label != '':\n L = [a for a in other.atoms if a.label == atom.label]\n if L == []:\n return False\n elif len(L) == 1:\n initial_map[atom] = L[0]\n else:\n keys.append(atom)\n atms.append(L)\n if atms:\n for atmlist in itertools.product(*atms):\n if len(set(atmlist)) != len(atmlist):\n # skip entries that map multiple graph atoms to the same subgraph atom\n continue\n for i, key in enumerate(keys):\n initial_map[key] = atmlist[i]\n if (self.is_mapping_valid(other, initial_map, equivalent=False) and\n Graph.is_subgraph_isomorphic(self, other, initial_map, save_order=save_order)):\n return True\n else:\n return False\n else:\n if not self.is_mapping_valid(other, initial_map, equivalent=False):\n return False\n\n if self.multiplicity:\n for mult1 in self.multiplicity:\n if group.multiplicity:\n for mult2 in group.multiplicity:\n if mult1 == mult2: break\n else:\n return False\n else:\n if group.multiplicity: return False\n # Do the isomorphism comparison\n return Graph.is_subgraph_isomorphic(self, other, initial_map, save_order=save_order)", "def is_single_node_partition(partition):\n for com in partition:\n if len(com) > 1:\n return False\n return True", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_source_node().get_name(), edge.get_terminal_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def hasParallelEdges(self): \n if self.__diGraph:\n raise \n for key in self.__Graph:\n temp = set()\n adjLst = self.__Graph[key]\n for item in adjLst:\n if item in temp:\n return True\n return False", "def _is_weakly_connected(g: nx.MultiDiGraph) -> Tuple[str, bool]:\n _, number_of_nodes = _number_of_nodes(g)\n ret = False if number_of_nodes == 0 else nx.is_weakly_connected(g)\n return \"Is weakly connected\", ret", "def simple_material(mat):\n return (mat is not None) and (not mat.use_nodes)", "def is_Bipartite(graph):\r\n if len(get_nodes(graph)) < 2:\r\n return False\r\n return True if paint(graph, 2) else False", "def check_graph():\n return None", "def is_graph(graph: Graph) -> bool:\r\n (n_vertices, edges) = graph\r\n for edge in edges:\r\n for vertex in edge:\r\n if not 1 <= vertex <= n_vertices:\r\n return False\r\n if edge[0] == edge[1]:\r\n return False\r\n return True", "def is_vertex_singular(self, vkey):\n\n\t\tif (self.is_vertex_on_boundary(vkey) and self.vertex_degree(vkey) != 3) or (not self.is_vertex_on_boundary(vkey) and self.vertex_degree(vkey) != 4):\n\t\t\treturn True\n\n\t\telse:\n\t\t\treturn False", "def check_collapse_manifold(mesh: Mesh, edge_key: int):\n\n # If the edge doesn't exist anymore, it can't be collapsed.\n if mesh.edge_mask[edge_key] == False:\n return False\n\n vertex_a, vertex_b = mesh.edges[edge_key]\n\n # Get the one-ring neighbors around the edge's vertices.\n def get_neighbor_vertices(vertex):\n neighbor_has_degree_3 = False\n neighbor_vertices = set()\n for edge_connection in mesh.vertex_to_edges[vertex]:\n other_vertex = mesh.edges[\n edge_connection.edge_index, 1 - edge_connection.index_in_edge\n ]\n if len(mesh.vertex_to_edges[other_vertex]) == 3:\n neighbor_has_degree_3 = True\n break\n\n neighbor_vertices.add(other_vertex)\n return neighbor_vertices, neighbor_has_degree_3\n\n # The mesh will remain manifold as long as there are exactly two shared\n # neighbors.\n vertex_a_neighbors, a_neighbor_invalid = get_neighbor_vertices(vertex_a)\n vertex_b_neighbors, b_neighbor_invalid = get_neighbor_vertices(vertex_b)\n return (\n len(vertex_a_neighbors & vertex_b_neighbors) == 2\n and not a_neighbor_invalid\n and not b_neighbor_invalid\n )", "def is_trivial_graph(g, node, args):\n nodes = [node\n for node in dfs(g.output,\n succ_incoming,\n freevars_boundary(g, False))\n if is_apply(node)]\n\n if len(nodes) == 0:\n return True\n elif len(nodes) == 1:\n app, = nodes\n return all(not is_constant_graph(inp) for inp in app.inputs[1:])\n else:\n return False", "def if_conn(graph):\n\n nodes = graph.nodes()\n first_node = nodes[0]\n last_node = nodes[-1]\n return nx.has_path(graph, first_node, last_node)", "def is_tied(mg, c1, c2): \n return not mg.has_edge(c1, c2) and not mg.has_edge(c2, c1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scan for existing Harmontown episodes, find the latest one by file name, not file date and return it
def last_episode(): highest_episode = 125 # The one before the first regular video episode available online highest_date = datetime.date(2014, 11, 3) for filename in os.listdir(HARMONTOWN_DIRECTORY): matches = re.match('Harmontown - S01E(\d+) - (\d+)-(\d+)-(\d+)\.mp4', filename) if matches and int(matches.group(1)) > highest_episode: highest_episode = int(matches.group(1)) highest_date = datetime.date( int(matches.group(2)), int(matches.group(3)), int(matches.group(4)) ) return highest_episode, highest_date
[ "def determine_next_episode(\n\t\tself):\n\n\t\tresult = dict()\n\n\t\tsjmanager.log.log('Trying to determine next show to watch')\n\n\t\t# First up, check which season and which episode is in the watch cache.\n\t\trow = self.sql.execute(\"\"\"SELECT \n\t\t\tseason_title,\n\t\t\tepisode_title,\n\t\t\tfinished \n\t\t\tFROM last_watched \n\t\t\tWHERE show_url = ?\"\"\",\n\t\t\t(self.url,)).fetchone()\n\n\t\tsjmanager.log.log(\"Fetched the following row: {}, {}, {}, {}\".format(row['season_title'],row['episode_title'],row['finished'],row['finished'] == str(0)))\n\n\t\t# If it's not finished, this means there's a cache file lying around, so\n\t\t# return episode and season title so we can find it.\n\t\tif str(row['finished']) == '0':\n\t\t\tsjmanager.log.log(\"Previous show isn't finished, so taking that as new show\")\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = row['episode_title']\n\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t'Ok, season title is {}, episode title is {}'.format(\n\t\t\t\trow['season_title'],\n\t\t\t\trow['episode_title']))\n\n\t\t# Otherwise, if the episode title isn't numeric, there's no chance to know\n\t\t# which episode (or even season) is next. So we return nothing\n\t\tif not row['episode_title'].isnumeric():\n\t\t\tsjmanager.log.log('The episode title is not numeric, so returning nothing')\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\t# If the episode title _is_ numeric, there's two cases that can happen:\n\t\t#\n\t\t# 1. There's an episode in the current season with a number one higher than\n\t\t# the current episode\n\t\t# 2. No episode with a higher number exists. In that case, maybe we have\n\t\t# another season to continue to\n\t\tsjmanager.log.log('Cool, the episode title is numeric')\n\n\t\tseasons = self.seasons(\n\t\t\trow['season_title'])\n\n\t\t# Get all the mangled episode titles in the season\n\t\tepisode_titles = set()\n\t\tfor season in seasons:\n\t\t\tepisode_titles = episode_titles.union(\n\t\t\t\tseason.episode_titles())\n\n\t\tif str(int(row['episode_title']) + 1) in episode_titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Cool, we've got an episode called {}, continuing with that\".format(\n\t\t\t\t\tint(row['episode_title']) + 1))\n\n\t\t\tresult['season_title'] = row['season_title']\n\t\t\tresult['episode_title'] = str(int(row['episode_title']) + 1)\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"No higher episode found, checking if season is numeric\")\n\n\t\tif not row['season_title'].isnumeric():\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"Season is not numeric, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"Season is numeric, checking if a higher season exists\")\n\n\t\ttitles = self.season_titles()\n\n\t\tif not str(int(row['season_title'])+1) in titles:\n\t\t\tsjmanager.log.log(\n\t\t\t\t\"No higher season exists, returning nothing\")\n\t\t\tresult['season_title'] = None\n\t\t\tresult['episode_title'] = None\n\t\t\treturn result\n\n\t\tsjmanager.log.log(\n\t\t\t\"A higher season exists, returning this season but no episode\")\n\t\tresult['season_title'] = str(int(row['season_title'])+1)\n\t\tresult['episode_title'] = None\n\t\treturn result", "def _find_next_episode(self, episodes):\n today = date.today()\n rw = None\n timespan = None\n\n # Search for the episode which airs next (air date is the closest to now)\n for episode in episodes:\n try:\n airdate = datetime.strptime(episode['firstAired'], '%Y-%m-%d')\n airdate = airdate.date()\n if airdate >= today:\n ctimespan = airdate - today\n if timespan is None or ctimespan < timespan:\n rw = episode\n timespan = ctimespan\n except:\n continue\n return rw", "def findLatestMetaFile(name): \n\n directory = \"./savedModels/\"+name\n if not(os.path.isdir(directory)):\n print(\"Meta file not found (directory not found)\")\n return -1, \"\"\n\n onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f))]\n biggest_step=-1\n file_with_biggest_step=\"\"\n for file in onlyfiles:\n filename, file_extension = os.path.splitext(file)\n beginning = \"state_at_step-\"\n if file_extension==\".meta\" and filename.startswith(beginning):\n rest=filename[len(beginning):]\n try:\n int_value = int(rest)\n if int_value > biggest_step:\n biggest_step=int_value\n file_with_biggest_step=filename+file_extension\n except ValueError:\n pass\n if biggest_step!=-1:\n print(\"Biggest step found is \", biggest_step)\n print(\"Meta file is \" + file_with_biggest_step)\n else:\n print(\"Meta file not found\")\n return biggest_step, file_with_biggest_step", "def episode(self,title):\n\t\tassert isinstance(title,str)\n\n\t\tfor episode in self.episodes:\n\t\t\tsjmanager.log.log('Episode title is {}'.format(episode.title))\n\t\t\tif episode.title == title:\n\t\t\t\treturn episode\n\n\t\treturn None", "def find_episode_pattern(filename): \n print(\"A:\", filename)\n patterns = []\n patterns.append(\"\\ss\\d+\\se\\d+\") \n patterns.append(\"\\ss\\d+e\\d+\")\n patterns.append(\"\\sS\\d+\\sE\\d+\") \n patterns.append(\"\\sS\\d+E\\d+\") \n patterns.append(\"\\sS\\d+\\se\\d+\") \n patterns.append(\"\\sS\\d+e\\d+\")\n patterns.append(\"\\ss\\d+\\sE\\d+\")\n patterns.append(\"\\ss\\d+E\\d+\")\n\n found = None\n for pattern in patterns:\n found = re.search(pattern, filename)\n if found is not None:\n found = found.group(0).strip()\n break\n print(\"B:\", found)\n if found is None:\n patterns = []\n patterns.append(\"\\sseason\\d+episode\\d+\")\n patterns.append(\"\\sSeason\\d+Episode\\d+\")\n patterns.append(\"\\sseason\\s\\d+episode\\s\\d+\")\n patterns.append(\"\\sSeason\\s\\d+Episode\\s\\d+\")\n\n for pattern in patterns:\n found = re.search(pattern, filename)\n print(\"C:\", found)\n if found is not None:\n found = found.group(0).split()\n print(found)\n break\n\n return found", "def get_episode_info(filename): \n episode_tag, season, episode = None, None, None\n episode_tag = find_episode_pattern(filename)\n if episode_tag is not None:\n pattern = episode_tag.lower().replace(\"s\",\" \").replace(\"e\",\" \")\n pattern_array = pattern.split()\n season = int(pattern_array[0])\n episode = int(pattern_array[1])\n season = \"{:0>2}\".format(season)\n episode = \"{:0>2}\".format(episode)\n return episode_tag, season, episode", "def _find_latest_file(path):\n files = glob.glob(path)\n if files:\n return max(files, key=os.path.getctime)\n return None", "def download_episode(self):\n\n logger.info(\"Starting download...\")\n for shot_code, shot_data in self.shots.copy().items():\n sg_shot = sg.find_one(\n \"Shot\",\n [\n self.project_filter,\n [\"code\", \"is\", shot_code],\n ],\n )\n filters = self.get_filters(sg_shot)\n\n # Get version shotgun data\n versions = self.get_versions(filters)\n if versions:\n version = versions[0]\n else:\n problem = \"No versions found for shot directory {}\".format(shot_code)\n logger.error(problem)\n self.problems.append(problem)\n continue\n\n shot_data[\"version\"] = version\n self.shots[shot_code] = shot_data\n\n # Download version\n success = False\n if not self.dry_run:\n success = self.download_version(\n shot_data[\"version\"],\n shot_data[\"movie_file\"][\"path\"],\n )\n\n if not success:\n del self.shots[shot_code]\n\n if self.process_single:\n break\n\n logger.info(\"Finished {} download.\\n\".format(self.episode))", "def parse_episode_filename(dasc, ovrx={}, single=False, longep=False):\n fparse = {'series': None, 'season': None, 'episode': None, 'special': None}\n tdex_id = None\n dval = dasc['fpath']['base']\n\n # Regex matching rounds\n for rgx in fregex:\n logthis(\"Trying regex:\", suffix=rgx, loglevel=LL.DEBUG2)\n mm = re.search(rgx, dval, re.I)\n if mm:\n mm = mm.groupdict()\n # determine series name\n if mm.has_key('series'):\n if not single:\n ldist = distance.nlevenshtein(dasc['dpath']['base'].lower(), mm['series'].lower())\n if ldist < 0.26:\n sser = filter_fname(dasc['dpath']['base'])\n logthis(\"Using directory name for series name (ldist = %0.3f)\" % (ldist), loglevel=LL.DEBUG)\n else:\n sser = filter_fname(mm['series'])\n logthis(\"Using series name extracted from filename (ldist = %0.3f)\" % (ldist), loglevel=LL.DEBUG)\n else:\n sser = filter_fname(mm['series'])\n logthis(\"Using series name extracted from filename\", loglevel=LL.DEBUG)\n else:\n # Check base directory name; if it has the season number or season name\n sspc = re.match(r'(season|s)\\s*(?P<season>[0-9]{1,2})', dasc['dpath']['base'], re.I)\n if sspc:\n # Grab series name from the parent directory; tuck the season number away for later\n sspc = sspc.groupdict()\n mm['season'] = sspc['season']\n sser = dasc['dpath']['parent']\n else:\n # Directory name should be series name (if you name your directories properly!)\n sser = dasc['dpath']['base']\n\n # Parse out the fansub group name\n if mm.get('fansub', None) is not None:\n fansub = mm['fansub'].strip()\n else:\n fansub = None\n\n # Grab season name from parsed filename; if it doesn't exist, assume Season 1\n snum = mm.get('season', '1')\n if snum is None: snum = '1'\n\n # Get episode number\n epnum = mm.get('epnum', '0')\n if epnum is None: epnum = '0'\n\n # Fix episode number, if necessary\n if not longep:\n if int(epnum) > 100:\n # For numbers over 100, assume SSEE encoding\n # (ex: 103 = Season 1, Episode 3)\n epnum = int(mm['epnum'][-2:])\n snum = int(mm['epnum'][:(len(mm['epnum']) -2)])\n\n # Get special episode type\n special = mm.get('special', \"\")\n if special: special = special.strip()\n\n # Set overrides\n if ovrx:\n if ovrx.has_key('season'):\n snum = int(ovrx['season'])\n logthis(\"Season set by override. Season:\", suffix=snum, loglevel=LL.VERBOSE)\n\n if ovrx.has_key('series_name'):\n sser = ovrx['series_name']\n logthis(\"Series name set by override. Series:\", suffix=sser, loglevel=LL.VERBOSE)\n\n if ovrx.has_key('fansub'):\n fansub = ovrx['fansub']\n logthis(\"Fansub group set by override. Fansub:\", suffix=fansub, loglevel=LL.VERBOSE)\n\n logthis(\"Matched [%s] with regex:\" % (dval), suffix=rgx, loglevel=LL.DEBUG)\n logthis(\"> Ser[%s] Se#[%s] Ep#[%s] Special[%s] Fansub[%s]\" % (sser, snum, epnum, special, fansub), loglevel=LL.DEBUG)\n\n # Build output fparse array\n fparse = {'series': sser, 'season': int(snum), 'episode': int(epnum), 'special': special, 'fansub': fansub}\n\n # Add series to tdex\n tdex_id = mdb.series_add(sser, ovrx)\n\n break\n\n return (fparse, tdex_id)", "def best_fileref(self, date):\n candidates = list(self.list_forecasts(date).values())[0]\n for fileref in candidates:\n if fileref.status() == 'downloaded':\n return fileref\n return None # Not found", "def find_latest(filepath):\n\n search_pattern = re_version_take_user.sub('_v*', filepath)\n\n results = glob.glob(search_pattern)\n # print results\n if not results:\n return None\n\n path_by_version = {}\n for r in results:\n path_by_version[extract_version(r)] = r\n\n # sort by version and then by take\n sorted_versions = sorted(path_by_version.iterkeys(), key=attrgetter('version', 'take'))\n return path_by_version[sorted_versions[-1]]", "def lookup_episode(self, seriesName, episodeName, id = 0):\n\n episodeName = episodeName.strip()\n\n if seriesName is None or episodeName is None:\n return (None,None)\n\n if seriesName != self.last_series_name:\n logging.info(\" - Searching thetvdb for:{}\".format(seriesName))\n search = tvdb.Search()\n response = search.series(seriesName)\n s = search.series\n\n if id > 0:\n for s in response:\n if s['id'] == id:\n show = tvdb.Series(id)\n break\n elif id == 0:\n show = tvdb.Series(s[0]['id'])\n else:\n logging.info(' - Unable to find series id:{} - terminating'.format(id))\n return (None,None)\n\n self.last_series_name = seriesName\n self.last_series_obj = show\n\n show = self.last_series_obj\n\n episodes = show.Episodes.all()\n logging.info(\" - Found {} episodes\".format(len(episodes)))\n if (len(episodes)==0):\n return (\"error\", \"no episodes found\")\n\n if episodes == []:\n return (None, None)\n\n for i, e in enumerate(episodes[::-1]):\n ep_name = e['episodeName']\n\n if ep_name is not None:\n n = Levenshtein.distance(episodeName, ep_name)\n\n if n <= MATCH_TOLERANCE:\n e_id = e['airedEpisodeNumber']\n s_id = e['airedSeason']\n logging.info(\" - Matched [{0}] to episode name: [{1}]\".format(episodeName, ep_name))\n return (str(s_id), str(e_id))\n\n logging.info(\" - UNABLE TO MATCH: {}\".format(episodeName))\n return (\"error\", \"expected series not found\")", "def find_episodes(self):\n foundany = False\n if os.path.isdir(self._library):\n # Grab all the files\n for (dirpath, dirnames, filenames) in os.walk(self._library):\n for filename in filenames:\n item, title, start, end = self.resolve(filename)\n if item is not None:\n # Mark episode(s) as available\n abspath = os.path.abspath(os.path.join(dirpath, filename))\n for episode in range(start, end + 1):\n mappeditem, mappedepi = self.map_episode(item, episode)\n if mappeditem.add_episode(mappedepi, abspath):\n foundany = True\n return foundany", "def latest_file(dpath,pattern):\n # match pattern and find greatest file date\n filtered = fnmatch.filter(os.listdir(dpath),pattern)\n\n if filtered:\n return(max(filtered))\n else:\n raise IOError('No File Matched')", "def extract_latest_file(self, list_blobs):\n last_recent_file = None\n possible_recent_date_collision = False\n recent_date = datetime.strptime('01-01-1900', '%d-%m-%Y')\n for filename in list_blobs:\n date_file = extract_date_from_file(filename)\n if date_file:\n if date_file == recent_date:\n if not self.is_a_spark_directory(filename):\n possible_recent_date_collision = True\n else:\n # it is spark dir. Check if it is the same dir\n if os.path.dirname(filename) != os.path.dirname(last_recent_file):\n logger.debug(\"'{}' vs '{}'\".format(filename, last_recent_file))\n possible_recent_date_collision = True\n if date_file > recent_date:\n possible_recent_date_collision = False\n recent_date = date_file\n last_recent_file = filename\n if possible_recent_date_collision:\n # Raise an error. No filename is unique in the recent date selected.\n msg = \"Error TWO files with the same date: '{}' and '{}'\".format(last_recent_file,\n recent_date.strftime('%d-%m-%Y'))\n logger.error(msg)\n raise ValueError(msg)\n logger.info(\"Latest file: %s %s\", last_recent_file, recent_date.strftime('%d-%m-%Y'))\n return {\"latest_filename\": last_recent_file,\n \"suffix\": recent_date.strftime('%Y-%m-%d'), \"spark\": self.is_a_spark_directory(last_recent_file)}", "def latest_file_by_suffix(abspath_basename, extension):\n i = 1\n file_candidate = None\n while i < MAX_TRIES:\n suffix = '%05d' % i\n inc_file_name = \"%s_%s.%s\" % (abspath_basename, suffix, extension)\n if not os.path.exists(inc_file_name):\n return file_candidate\n file_candidate = inc_file_name\n i += 1", "def get_episode_info(p):\n season, episode = None, None\n\n _, name = os.path.split(p)\n\n for fmt in EPISODE_FMTS:\n match = re.search(fmt, name)\n\n if match:\n season = int(match.group(1))\n episode = int(match.group(2))\n break\n\n if not episode:\n raise ValueError(f'could not parse episode: {p}')\n\n return season, episode", "def tv(count, all_files):\n _latest_files('tvshows', count, all_files)", "def get_episodes():\r\n tvshow = \"\"\r\n tvshows = {\r\n \"game of thrones\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=jUJfW_j2DISOvQTrmZHwBA&q=game+of+thrones+episodes&oq=game+o+episodes&gs_l=psy-ab.1.0.0i7i30k1l10.52520.53781.0.55237.6.6.0.0.0.0.362.529.0j1j0j1.2.0....0...1.1.64.psy-ab..4.2.523....0.07UT2XT-nX4\", # noqa\r\n \"castle rock\": \"https://www.google.co.in/search?q=castle+rock+episodes&stick=H4sIAAAAAAAAAONgFuLVT9c3NEw2K8pKL042VkLlakllJ1vpl5QBUXxBUX56UWKuVWpBZnF-SmoxALHeYSM8AAAA&sa=X&ved=2ahUKEwj715fQpMfcAhWGro8KHSK3BIUQMTA5egQIDRBD&biw=1366&bih=662\", # noqa\r\n \"orange is the new black\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=eUNfW5nCEYjlvAS1ja6IDg&q=orange+is+the+new+black+episodes&oq=+oraepisodes&gs_l=psy-ab.3.0.0i7i30k1l3.73181.75732.0.77105.10.10.0.0.0.0.197.1249.0j7.7.0....0...1.1.64.psy-ab..3.6.1070...0i7i10i30k1j0i8i10i30k1j0i67k1.0.KKD0uo55zFc\", # noqa\r\n \"suits\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=1UNfW6mcGcXnvASp-45Y&q=suits+episodes&oq=Sulits+episodes&gs_l=psy-ab.3.0.0i13k1l10.100383.103892.0.105529.8.8.0.0.0.0.294.1276.0j3j3.6.0....0...1.1.64.psy-ab..2.6.1261...0i7i30k1j0i67k1.0.z7eTUNw7kI0\", # noqa\r\n \"the flash\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=RURfW5uVBcfivASXobjAAw&q=the+flash+episodes&oq=theflas+episodes&gs_l=psy-ab.3.0.0i13k1l10.121800.125333.0.127277.9.8.1.0.0.0.246.661.0j1j2.3.0....0...1.1.64.psy-ab..5.4.673...0i7i30k1j0i10k1.0.rNJJNmiWmeI\", # noqa\r\n \"jessica jones\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=0ERfW7u6IY7EvwSa-r-4Dw&q=jessica+jones+episodes&oq=Jess+episodes&gs_l=psy-ab.3.2.0i7i30k1l10.429044.431792.0.433171.4.4.0.0.0.0.285.915.0j2j2.4.0....0...1.1.64.psy-ab..0.4.906....0.bt0PY6CGPJs\", # noqa\r\n \"sherlock\": \"https://www.google.co.in/search?biw=1366&bih=662&ei=ikZfW_B4xeG-BK7Pm7AP&q=sherlock+episodes&oq=sher+episodes&gs_l=psy-ab.3.0.0i7i30k1l10.115543.116200.0.117240.4.4.0.0.0.0.204.759.0j3j1.4.0....0...1.1.64.psy-ab..0.4.746....0.CGkqZHrozHk\", # noqa\r\n \"the fall\": \"https://www.google.co.in/search?ei=rqRgW4ajF4O5rQHXt5jQDA&btnG=Search&q=the+fall+episodes\", # noqa\r\n \"13 reasons why\": \"https://www.google.co.in/search?ei=3qRgW4CLBYX7rQHRvJKYDA&q=13+reasons+why+episodes&oq=13+reasons+why+episodes&gs_l=psy-ab.3...35.7078.0.7552.18.18.0.0.0.0.0.0..0.0....0...1c.1.64.psy-ab..18.0.0....0.VHfUUA_T0WQ\"} # noqa\r\n while tvshow.lower() not in tvshows.keys():\r\n tvshow = input(\"Which tv show you want to know about.\\n\"\r\n \"Please provide the name\\n [The Names are:\"\r\n \"\\nGame of thrones,\\nCastle Rock,\\nOrange Is the\"\r\n \" New Black,\\nSuits,\\nThe Flash,\\nJessica Jones,\"\r\n \"\\nSherlock,\\nThe Fall,\\n13 Reasons Why]\\n\")\r\n if tvshow.lower() not in tvshows.keys():\r\n print(\"Please provide the correct name of the Show\")\r\n else:\r\n tv = tvshows[tvshow.lower()]\r\n print('-'*80)\r\n return tv" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write invitation record to DynamoDB.
def write_inv_record(uid_1, uid_2): d = {} d2 = {} now = datetime.datetime.now().isoformat() d['uid'] = uid_1 d['timestamp'] = now d['partner'] = uid_2 d2['uid'] = uid_2 d2['timestamp'] = now d2['partner'] = uid_1 mytable.put_item(d, overwrite=True) mytable.put_item(d2, overwrite=True) return None
[ "def dynamo_put(data):\n logger.info('dynamo_put: uploading event data to DynamoDb...')\n\n # dynamodb\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(settings.dynamodb_table)\n # create a new item (row)\n # source: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#creating-a-new-item\n\n try:\n table.put_item(\n Item={\n # generate a ramdom/unique ID for each DB Entry\n 'id': str(uuid.uuid4()),\n 'name': data['name'],\n 'phone': data['phone'],\n 'email': data['email'],\n 'message': data['message'],\n 'create-date': str(datetime.datetime.now()),\n }\n )\n except Exception as e:\n logger.error(f'dynamo_put: dynamodb.table.put_item: {e}')\n return False\n else:\n logger.info('dynamo_put: dynamodb.table.put_item: Success!!!')\n return True", "def Write(dynamodb,obj_as_dict: dict,tablename: str):\n\t\t\treturn dynamodb.resource.Table(tablename).put_item(Item=obj_as_dict)", "def save_user_info(email, query=''):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('UserEmail')\n table.put_item(\n Item={\n 'email': email,\n 'timestamp': int(time.time()),\n 'query': query,\n 'time': time.ctime(),\n 'from_app': 'CampusCrime'\n }\n )", "def send_event(table, data_stream, pk, sk, item, debug=0):\n \n if debug == 1:\n logging.info(f'table: {table}')\n logging.info(f'data_stream: {data_stream}')\n logging.info(f'pk: {pk}')\n logging.info(f'sk: {sk}')\n \n item_decimal = json.loads(json.dumps(item), parse_float=Decimal)\n\n def default_json(t):\n \"\"\"\n \"convert json elements to string in order to handle Float attributes which DynamoDB is not supported\n \"\"\"\n return f'{t}'\n \n \n raw_data = json.dumps(item_decimal, default=default_json)\n # raw_data = json.dumps(item)\n encoded_data = bytes(raw_data, 'utf-8')\n kinesis_record = {\n 'Data': encoded_data,\n 'PartitionKey': item_decimal[pk]\n }\n \n \n kinesis_record_list = []\n kinesis_record_list.append(kinesis_record)\n\n request = {\n 'Records': kinesis_record_list,\n 'StreamName': data_stream \n }\n if debug == 1:\n logging.info(f'request sent to data_stream: {request}')\n\n \n kinesis_client.put_records(**request)", "def write_meeting(user_id, slot_data):\n start_at = slot_data['start_at']\n\n try:\n meeting = Meetings(user_id, start_at, \"\", \"\", False)\n db.session.add(meeting)\n db.session.commit()\n except db_exception.SQLAlchemyError as e:\n app.logger.error(\"DB exception: {}\".format(e))\n db.session.rollback()\n raise exceptions.DBError()", "def put_record(self):\n timestamp = datetime.datetime.utcnow()\n part_key = self.ipAddr\n data = random_alphanumeric(10)\n print( \"put {} to kinesisStrem {}\".format( data, self.streamName ) )\n self.kinesisClient.put_record(\n StreamName=self.streamName, \n Data=data, \n PartitionKey=part_key\n )", "def write(self, conf, conn):\n db = conn['warehouse']\n if self.table not in db.tables:\n if conf['ENV'] in ['development', 'testing']:\n table = db.create_table(\n self.table,\n primary_id=self.__primary_key,\n primary_type=fields.Text.column_type)\n # work around a bug whereby the table is not persisted\n table.table\n table = db[self.table]\n action = 'created'\n if table.find_one(**{self.__primary_key: self[self.__primary_key]}):\n action = 'updated'\n del self['meta_id']\n\n ensure_fields = False\n if conf['ENV'] in ['development', 'testing']:\n ensure_fields = True\n\n table.upsert(self, [self.__primary_key], ensure=ensure_fields, types=self.__column_types)\n\n logger.debug('Record - %s: %s - %s fields', action, self, len(self))", "def write(self, entity_data):", "def post_invitation(self, invitation):\n if not invitation.signatures: invitation.signatures = [self.signature]\n if not invitation.writers: invitation.writers = [self.signature]\n response = requests.post(self.invitations_url, json = invitation.to_json(), headers = self.headers)\n response = self.__handle_response(response)\n\n return Invitation.from_json(response.json())", "def create_record(row):\n\n # create the boto3 object.\n dynamodb = boto3.resource('dynamodb',\n region_name=config.AWS_REGION)\n\n # set the table from the cfg file.\n table = dynamodb.Table(config.DB_TABLE)\n\n # create a new id since we can't necessarily trust the one\n # coming from the api.\n if row['id'] == 'from-test-dyndb':\n the_id = 'from-test-dyndb'\n else:\n the_id = str(uuid.uuid1())\n\n # the field snippet appears to have a lot of html so let us\n # strip out the tags before saving.\n the_desc = re.sub('<[^<]+?>', '', row['snippet'])\n\n # add the item.\n response = table.put_item(\n Item={\n # create an id rather than using ones from result.\n 'id': the_id,\n 'title': row['title'] if row['title'] is not None else '',\n 'company': row['company'] if row['company'] is not None else '',\n 'location': row['location'] if row['location'] is not None else '',\n 'snippit': the_desc,\n 'salary': row['salary'] if row['salary'] is not None else '',\n 'source': row['source'] if row['source'] is not None else '',\n 'the_type': row['type'] if row['type'] is not None else '',\n 'the_link': row['link'] if row['link'] is not None else '',\n 'updated': row['updated'] if row['updated'] is not None else ''\n }\n )\n\n # make sure it succeeded.\n the_status = response['ResponseMetadata']['HTTPStatusCode']\n bool_rtn = True if the_status == 200 else False\n\n # return success here.\n return bool_rtn", "def post_candidate(event, context):\n client = boto3.resource('dynamodb')\n table = client.Table(os.environ.get('CANDIDATE_TABLE'))\n\n body = json.loads(event.get('body'))\n if body:\n body['id'] = str(uuid.uuid4())\n table.put_item(Item=body)\n\n resp = {\n \"statusCode\": 200,\n \"body\": json.dumps(body)\n }\n\n logger.warning(f\"resp: {resp}\")\n\n return resp", "def save(data, count):\n\n # connect to AWS DB service\n db = boto.dynamodb.connect_to_region(\"eu-west-1\",\n aws_access_key_id=conf.ACESS_KEY,\n aws_secret_access_key=conf.SECRET_KEY\n )\n db.use_decimals()\n\n for obj in data:\n name = obj[\"name\"]\n address = obj[\"address\"]\n lat = decimal.Decimal(str(obj[\"position\"][\"lat\"]))\n time_stamp = obj['last_update']\n lng = decimal.Decimal(str(obj[\"position\"][\"lng\"]))\n free = obj['available_bikes']\n number = obj[\"number\"]\n bike_stands = obj[\"bike_stands\"]\n available_bike_stands = obj['available_bike_stands']\n count += 1\n\n item_data = {\n \"name\": name,\n \"address\": address,\n \"lat\": lat,\n \"lna\": lng,\n \"time_stamp\": time_stamp,\n \"free\": free,\n \"number\": number,\n \"bike_stands\": bike_stands,\n \"available_bike_stands\": available_bike_stands,\n \"count\": count\n }\n table = db.get_table('DublinBikes')\n item = table.new_item(\n # primary key\n hash_key=name,\n # range key\n range_key=time_stamp,\n # attributes\n attrs=item_data\n )\n item.put()\n print(\"Adding bike occupancy data from:\", name,\n \"free bikes at the moment: \", free, \"from\", number)\n\n print(\"Put Items succeeded. Last update at:\", datetime.fromtimestamp(int(\n data[0][\"last_update\"]) / 1000).strftime('%Y.%m.%d %H:%M:%S'))", "def _write_row(self, row):\n return", "def write(data):\n print(\"Writing data to the database:\" + data)\n time.sleep(2) # Add a delay to simulate persisting to a slow database.\n r.rpush(list_name, data)", "def append_transaction_details(data):\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table(DYNAMO_DB_NAME)\n print(\"data to append\", data)\n result = table.update_item(\n Key={'username': str(data['username'])},\n UpdateExpression=\"SET statements = list_append(if_not_exists(statements, :empty_list), :i)\",\n ExpressionAttributeValues={\n \":i\":[data], \n \":empty_list\":{\"statements\":[]},\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(result)", "def write(self):\n\t\timport revitron\n\n\t\tself.data.IsTransmitted = True\n\t\trevitron.DB.TransmissionData.WriteTransmissionData(self.hostPath, self.data)", "def post_to_dynamo(glucose_data):\n dynamodb = boto3.resource('dynamodb', region_name=REGION)\n table = dynamodb.Table('Glucose')\n response = table.put_item(\n Item=glucose_data\n )\n logger.info(\"Put Item succeeded:\")\n logger.info(json.dumps(response, indent=4))", "def _write(self, data):\n return self._driver_instance.write(data, *self._args, **self._kwargs)", "def mark_as_write(response):\r\n response._db_write = True\r\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Append invitation records to DynamoDB.
def append_inv_records(uids): # Get current timestamp, in "YYYY-MM-DD HH:MM:SS" format. today = datetime.datetime.today() timestamp = str(today.year) + '-' + str(today.month) + '-' + str(today.day) \ + ' ' + str(today.hour) + ':' + str(today.minute) + \ str(today.second) # Append records to Dynamo for uid in uids: d = {} d['uid'] = uid d['timestamp'] = timestamp d['partners'] = [v for v in uids if v != uid] write_dynamo(d) return None
[ "def append_transaction_details(data):\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table(DYNAMO_DB_NAME)\n print(\"data to append\", data)\n result = table.update_item(\n Key={'username': str(data['username'])},\n UpdateExpression=\"SET statements = list_append(if_not_exists(statements, :empty_list), :i)\",\n ExpressionAttributeValues={\n \":i\":[data], \n \":empty_list\":{\"statements\":[]},\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n print(result)", "def write_inv_record(uid_1, uid_2):\n d = {}\n d2 = {}\n now = datetime.datetime.now().isoformat()\n\n d['uid'] = uid_1\n d['timestamp'] = now\n d['partner'] = uid_2\n\n d2['uid'] = uid_2\n d2['timestamp'] = now\n d2['partner'] = uid_1\n\n mytable.put_item(d, overwrite=True)\n mytable.put_item(d2, overwrite=True)\n return None", "def test_append(self):\r\n records = self._get_records(5, keyspace=\"eggs\", column_family=\"bacon\")\r\n for record in records:\r\n self.object.append(record)\r\n self.assert_(record.key.key in self.object)\r\n self.assert_(self.object[record.key.key] is record)\r\n\r\n self.assert_(self.object.values() == records)", "def insert_record(self,list_holding_record):", "def add_records(self, records):\r\n return self.manager.add_records(self, records)", "def add_record_context(records: List[dict], context: RecordContext) -> None:\n for record in records:\n record.update(\n {\n \"trial\": context.trial,\n \"assay\": context.assay,\n \"record_id\": context.record,\n }\n )", "def append_records(a_records, prefix, network_interface, dns_name,\n dns_internal_name):\n internal_ip = network_interface['networkIP']\n a_records.append((prefix + dns_internal_name, [internal_ip]))\n external_ips = []\n for access_config in network_interface.get('accessConfigs', []):\n external_ip = access_config.get('natIP', None)\n if external_ip:\n external_ips.append(external_ip)\n if external_ips:\n a_records.append((prefix + dns_name, external_ips))", "def add_records(self, domain, records):\r\n return domain.add_records(records)", "def create_invite(event_id):\n \n json_req_body = request.get_json()\n\n # print(req_body)\n\n user_ids = json_req_body['userIds']\n\n # print(user_ids)\n\n for user_id in user_ids:\n req_body = {\n 'user_id': user_id,\n 'event_id': event_id\n }\n invite = Invitation(**req_body)\n db.session.add(invite)\n \n db.session.commit()\n\n return jsonify(event_id)", "def add_record(self, transaction):\n raise NotImplementedError(\"Please Implement this method\")", "def add(self, record):\n return self._append_record(record, 'additions')", "def storeParticipantInfoInDB():\n collection.update({ \"type\": \"proposal\", \"_id\":flask.session['proposal_id'] }, {'$push': {'responders':flask.session['name']}})\n collection.update({ \"type\": \"proposal\", \"_id\":flask.session['proposal_id'] }, {'$push': {'free_times':flask.session['revised_free']}})", "def append(self, record):\r\n return self.__setitem__(record.key.key, record)", "def add_new_records(data):\n \n try:\n conn = sqlite3.connect(DB_PATH) \n data.to_sql('global_data', conn, if_exists='append', index=True)\n conn.commit()\n print(\"Successfully added new records\")\n except sqlite3.Error as error:\n print(\"Error while adding new records in global_data \", error)\n finally:\n if (conn): conn.close()", "def dynamo_put(data):\n logger.info('dynamo_put: uploading event data to DynamoDb...')\n\n # dynamodb\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(settings.dynamodb_table)\n # create a new item (row)\n # source: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html#creating-a-new-item\n\n try:\n table.put_item(\n Item={\n # generate a ramdom/unique ID for each DB Entry\n 'id': str(uuid.uuid4()),\n 'name': data['name'],\n 'phone': data['phone'],\n 'email': data['email'],\n 'message': data['message'],\n 'create-date': str(datetime.datetime.now()),\n }\n )\n except Exception as e:\n logger.error(f'dynamo_put: dynamodb.table.put_item: {e}')\n return False\n else:\n logger.info('dynamo_put: dynamodb.table.put_item: Success!!!')\n return True", "def save_to_database_append(data_in_rows, table_name=\"Movie_Info\"):\n print(\"Saving data to database...\")\n conn = sqlite3.connect(DATABASE_NAME)\n data_in_rows.to_sql(table_name, conn, index=False, if_exists='append')\n conn.close()\n print(\"Data has been successfully saved.\")", "def update_records():\n flir_ = Flir()\n for record in flir_.record_file_list:\n with session_scope() as session:\n try:\n record_obj = Record(record)\n session.merge(record_obj)\n session.commit()\n print('Record added: {}'.format(record_obj))\n except Exception as e:\n logging.error('Failed Record update: {}'.format(record))\n logging.error(e)\n print(Exception('error for : {}'.format(record)))\n print(e)\n print(record)", "def append(self, interaction):\n matches = [i for i in self.interactions if i.hash == interaction.hash]\n if not matches:\n self.interactions.append(interaction)\n else:\n console.logdebug(\"Interactions Table : interaction already stored, not appending to the table [%s]\" % interaction.hash)", "def append(self, record):\r\n return self._append_view(record).append(record)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if Broadcast is to the node in anyway (group or direct).
def broadcast_is_to_this_node(self, b:Broadcast): return( b.is_to_all() or \ b.to == self.network_addr or \ b.to_secure_group() in self.joined_secure_groups or \ b.to_gen_group() in self.joined_groups)
[ "def _is_broadcast(self):\n pass", "def _bcastIsOwn(self, host):\n netinfo = NetworkInfo()\n local_addresses = netinfo.get_local_addresses()\n return host in local_addresses", "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # make a broadcast PDU\n pdu = PDU(b'data', source=1, destination=0)\n if _debug: TestVLAN._debug(\" - pdu: %r\", pdu)\n\n # node 1 sends the pdu, node 2 and 3 each get it\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduSource=1).success()\n\n # run the group\n tnet.run()", "def is_unicast(self):\n return (self.integer & 1 << 40) == 0", "def is_directed(self):\n return self._incoming is not self._outgoing # directed if maps are distinct", "def covers(self, other):\n self.check_right_instance(other)\n if self.network_address <= other.network_address and self.broadcast_address >= other.broadcast_address:\n return True\n else:\n return False", "def _is_broadcastable(self, shape):\n shape0 = getattr(self, \"shape\", None)\n if shape is None:\n return False\n\n shape1 = shape\n\n if tuple(shape1) == tuple(shape0):\n # Same shape\n return True\n\n ndim0 = len(shape0)\n ndim1 = len(shape1)\n if not ndim0 or not ndim1:\n # Either or both is scalar\n return True\n\n for setN in set(shape0), set(shape1):\n if setN == {1}:\n return True\n\n if ndim1 > ndim0:\n return False\n\n for n, m in zip(shape1[::-1], shape0[::-1]):\n if n != m and n != 1:\n return False\n\n return True", "def nodeIsSubscribed(self, node):\n return self.hasNodeParams(node)", "def _check_that_node_from_body(node):\n n_ports = len(node.out_edges())\n internal_port_in_out_ports = ['internal_port_id' in edge for edge in node.out_edges()]\n return np.all(internal_port_in_out_ports) and n_ports", "def is_root_worker():\n return get_rank() == ROOT_RANK", "def is_node_a_group(node: dict) -> bool:\n if 'y:ProxyAutoBoundsNode' in node.keys():\n return True\n return False", "def __nonzero__(self):\n\n return bool(self.nodes)", "def _global_routing(self):\n return self.global_vlan.vid and self.routers and len(self.routers) == 1", "def is_local_root_worker():\n return get_local_rank() == ROOT_RANK", "def scalable(self, node: Node) -> bool:\n if len(node.ingoing) > 0 and len(node.outgoing) > 0:\n return True\n\n return False", "def do_not_disturb(self, group_id, now):\n group = self.groups.get(group_id) or self.groups.get(0)\n return HourCallManager.is_in_period(group.do_not_disturb, now)", "def is_node_with_weight(node: NNCFNode) -> bool:", "def isPhysical(self,uid):\n return( self.id2node[uid].group==\"Physical\" )", "def v_is_group(self):\n return not self._is_leaf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a payload, the to, and the from; returns decrpyted and b64 decoded payload. Used to decrpyt a payload to this node or a secure group it may be a part of.
def payload_decryptor(self, payload:bytes, to, frm): if to.startswith(b'*'): # no extra encryption, just b64 decode return base64_decode(payload) if len(to) <= 1: # at this point len(to) > 1 raise ExceptionWithResponse(RespCode.PRSER, "Invalid 'to' address.", back_to=frm) if to == self.network_addr: # to == public address from_public_key = self.cached_nodes[frm].node_info['kPublic'] raw_payload = base64_decode(payload) return self.crypto.decrypt_from_public_key(raw_payload, from_public_key) if to.startswith(b'#'): group_name = to if group_name in self.joined_secure_groups: group_key = self.joined_secure_groups[group_name] plain_payload = Crypto.decrypt_symmetrically(base64_decode(payload), group_key) return plain_payload else: return base64_decode(payload) return base64_decode(payload) # if cant decrypt, just give it back?? TODO
[ "def decode_payload(cls, payload: bytes) -> MsgGenericPayload:\n pass", "def crds_decode(msg):\n if isinstance(msg, dict) and \"crds_encoded\" in msg:\n ascii = msg[\"crds_payload\"]\n b64 = ascii.encode(\"ascii\")\n compressed = base64.b64decode(b64)\n utf8 = gzip.decompress(compressed)\n json_str = utf8.decode()\n obj = json.loads(json_str)\n return obj\n else:\n return msg", "def _decrypt(self, data):\n key = self.kernel_id[0:16]\n cipher = AES.new(key.encode('utf-8'), AES.MODE_ECB)\n payload = cipher.decrypt(base64.b64decode(data))\n payload = \"\".join([payload.decode(\"utf-8\").rsplit(\"}\", 1)[0], \"}\"]) # Get rid of padding after the '}'.\n return payload", "def _decode_base64(payload):\n data = payload.copy()\n if \"RequestBody\" in data:\n if isinstance(data[\"RequestBody\"], dict):\n return data\n\n data[\"RequestBody\"] = json.loads(\n base64.b64decode(data[\"RequestBody\"])\n )\n return data", "def decode_payload(message, enc=\"utf-8\"):\n return MessageUtils.decode(message.payload, enc=enc)", "def decrypt(payload, private_key):\n cipher = PKCS1_v1_5.new(private_key)\n aes_key_str = cipher.decrypt(b64decode(payload.get(\"aes_key\")), sentinel=None)\n aes_key = json.loads(aes_key_str.decode(\"utf-8\"))\n key = b64decode(aes_key.get(\"key\"))\n iv = b64decode(aes_key.get(\"iv\"))\n encrypted_magic_envelope = b64decode(payload.get(\"encrypted_magic_envelope\"))\n encrypter = AES.new(key, AES.MODE_CBC, iv)\n content = encrypter.decrypt(encrypted_magic_envelope)\n return etree.fromstring(pkcs7_unpad(content))", "def base64Decode(self):\n self.b64 = str(self.b64).strip()\n try:\n return base64.b64decode(self.b64)\n except TypeError:\n padding = len(self.b64) % 4\n if padding == 1:\n return ''\n elif padding == 2:\n self.b64 += b'=='\n elif padding == 3:\n self.b64 += b'='\n return base64.b64decode(self.b64)", "def decode_base64_packet(msg):\n packet_type = PACKET_MAP.get(try_convert(msg[0], int))\n data = bytearray(b64decode(msg[1:]), 'base64')\n\n return {'type': packet_type, 'data': data}", "def reconstruct_payload(payload):\n if payload is None: # nothing to do\n return None\n \n into = ['payload = ']\n if isinstance(payload, str):\n reconstruct_json_into(payload, into, 0)\n \n elif isinstance(payload, Formdata):\n reconstruct_formdata_into(payload, into)\n \n elif isinstance(payload, bytes):\n reconstruct_binary_into(payload, into)\n \n else:\n reconstruct_unexpected_into(payload, into)\n \n return ''.join(into)", "def get_payload(raw_msg):\n payload=str(raw_msg[14:42])\n print payload\n padding=int(raw_msg[45])\n return payload,padding", "def _b64decode(self, string):\n \n return b64decode(string)", "def decode_base64(_base64):\n return binascii.a2b_base64(_base64)", "def decode_base64(data):\n missing_padding = 4 - len(data) % 4\n if missing_padding:\n data += b'=' * missing_padding\n return base64.decodestring(data)", "def decode_y64(b64_data):\n return str(base64.b64decode(b64_data.replace('-', '=').replace('.', '+').replace('_', '/')))", "def _decode_payload(self, decoded: dict[str, Any]) -> Any:\n try:\n payload = json.loads(decoded[\"payload\"])\n except ValueError as e:\n raise DecodeError(f\"Invalid payload string: {e}\")\n if not isinstance(payload, dict):\n raise DecodeError(\"Invalid payload string: must be a json object\")\n return payload", "def decode_blob_string(msg):\n if msg[:len(CRYPTO_MIRROR_HEADER)] != CRYPTO_MIRROR_HEADER:\n raise Exception(\"Mismatched header\")\n if msg[-len(CRYPTO_MIRROR_TRAILER):] != CRYPTO_MIRROR_TRAILER:\n raise Exception(\"Mismatched trailer\")\n inner = msg[len(CRYPTO_MIRROR_HEADER):-len(CRYPTO_MIRROR_TRAILER)]\n return inner.decode(\"base64\")", "def clean_payload(payload: typing.Dict[str, typing.Any]) -> typing.Dict[str, typing.Any]:\n return Serializer._clean_item(payload)", "def base64Decode(self, data):\n # type: (Union[str, bytearray]) -> Union[str, bytearray]", "def decrypt_and_decode(self, data, **kwargs):\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Encrypts and b64 encodes the constructed payload(pre_payload) given the broadcast information.
def payload_encryptor(self, b:Broadcast, pre_payload:bytes): if b.to_gen_group(): #includes 'all' (*) return base64_encode(pre_payload) if b.to_secure_group(): group_name = b.to_secure_group() if group_name in self.joined_secure_groups: group_key = self.joined_secure_groups[group_name] payload_encrypted = Crypto.encrypt_symmetrically(pre_payload, group_key) return base64_encode(payload_encrypted) else: raise NotInSecureGroupException(group_name) if b.to in self.cached_nodes: to_public_key = self.cached_nodes[b.to].node_info['kPublic'] raw_encrypted = self.crypto.encrypt_to_public_key(pre_payload, to_public_key) return base64_encode(raw_encrypted) else: # unkown node, cant encypt, check if part of marco-polo TODO pass #raise UnknownNodeException() raise Exception('Could not determine how to encrypt/encode the broadcast \ from the node. This (idealy) should never happen, \ another Exception should come first if any.')
[ "def encrypt_payload(self, payload):\n encrypter = AES.new(\n self.key,\n AES.MODE_CBC,\n self.encryption_meta.payload\n )\n payload = self._pad_payload(payload)\n return encrypter.encrypt(payload)", "def encrypt(self, payload):\n encrypted_data = encrypt(payload, key_store=self.key_store, key_purpose=KEY_PURPOSE)\n return encrypted_data", "def b64_encrypt(key, raw, **kwargs):\n return urlsafe_b64encode(encrypt(key, raw, **kwargs)).rstrip('=')", "def encrypt(self, raw):\n raw = pad(raw)\n cipher = AES.new(self.key, AES.MODE_ECB)\n # return cipher.encrypt(raw).encode(\"hex\") # Python 2\n return cipher.encrypt(raw.encode()).hex()", "def encrypt_and_add_payload(self, payload):\n key, ciphertext = symmetric_encrypt(bytes(payload))\n hash_key = crypto.double_sha256(ciphertext)\n self._add(hash_key, key, ciphertext, payload)\n return ciphertext, hash_key", "def encrypt(self, data):\n aes_key, hmac_key = self.keys\n pad = AES_BLOCK_SIZE - len(data) % AES_BLOCK_SIZE\n data = data + pad * chr(pad)\n iv_bytes = RandomPool(512).get_bytes(AES_BLOCK_SIZE)\n cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)\n data = iv_bytes + cypher.encrypt(data)\n sig = hmac.new(hmac_key, data, hashlib.sha256).digest()\n return data + sig", "def _encrypt_payload(self, payload, key_ids):\n global legacy_gpg\n payload = encode_string(payload)\n\n self.gpg.armor = True\n\n recipient = [self.gpg.get_key(key_id) for key_id in key_ids]\n\n for key in recipient:\n if key.expired:\n if legacy_gpg:\n raise gpgme.GpgmeError(\n \"Key with user email %s \"\n \"is expired!\".format(key.uids[0].email))\n else:\n raise gpg.errors.GPGMEError(\n \"Key with user email %s \"\n \"is expired!\".format(key.uids[0].email))\n\n if legacy_gpg:\n plaintext = BytesIO(payload)\n ciphertext = BytesIO()\n\n self.gpg.encrypt(recipient, gpgme.ENCRYPT_ALWAYS_TRUST,\n plaintext, ciphertext)\n\n return ciphertext.getvalue()\n else:\n (ciphertext, encresult, signresult) = self.gpg.encrypt(\n gpg.Data(string=payload),\n recipients=recipient,\n sign=False,\n always_trust=True\n )\n\n return ciphertext", "def make_transmittable_broadcast(self, broadcast:Broadcast) -> TransmittableBroadcast:\n\n encrypted = self.crypto.sign_and_encrypt_with_network_key(\n broadcast.encode('0.1', self.payload_encryptor))\n\n # x01x01 means: version 1, normal broadcast\n return TransmittableBroadcast(b'\\x01\\x01' + struct.pack('!H', len(encrypted)) + encrypted,\n broadcast)", "def encrypt(self, unencrypted, secret, tenant):", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, b'=')", "def secret_data_encode_bytes(data: bytes) -> bytes:\n return base64.b64encode(data)", "def encrypt(self, bytes):\n paddedBytes = self._addPKCS1Padding(bytes, 2)\n m = bytesToNumber(paddedBytes)\n if m >= self.n:\n raise ValueError()\n c = self._rawPublicKeyOp(m)\n encBytes = numberToBytes(c)\n return encBytes", "def encrypt(self, value):\n value = self._add_pad(value)\n init_vector = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, init_vector)\n return base64.b64encode(init_vector + cipher.encrypt(value))", "def encrypt(self, public_key, message):", "def encrypt(plaintext):\n return blake2b(salt=DropboxClient.SALT).update(plaintext).hexdigest()", "def encrypt_and_sign(self, data):\n\t\treturn self.cipher.encrypt_and_digest(data)", "def encrypt(key, plaintext):\n\n aes, iv = _get_aes(key)\n ciphertext = aes.encrypt(plaintext.encode())\n return iv + ciphertext", "def encryptor(text: bytes, IV: bytes, key: bytes) -> bytes:\n \n # Given\n prepend_string = \"comment1=cooking%20MCs;userdata=\"\n append_string = \";comment2=%20like%20a%20pound%20of%20bacon\"\n\n plaintext = (prepend_string.encode() + text + append_string.encode()).replace(b';', b'\";\"').replace(b'=', b'\"=\"')\n ciphertext = AES_CBC_encrypt(PKCS7_pad(plaintext, len(key)), IV, key)\n return ciphertext", "def encrypt_bytes(self, plaintext):\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a Broadcast object and makes a TransmittableBroadcast object which includes the broadcast encoded, encyted, and ready to transmit.
def make_transmittable_broadcast(self, broadcast:Broadcast) -> TransmittableBroadcast: encrypted = self.crypto.sign_and_encrypt_with_network_key( broadcast.encode('0.1', self.payload_encryptor)) # x01x01 means: version 1, normal broadcast return TransmittableBroadcast(b'\x01\x01' + struct.pack('!H', len(encrypted)) + encrypted, broadcast)
[ "def _make_broadcast_socket(self):\n self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.broadcast_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)", "def send_broadcasts(self):\n\n @self.conflict_resolver.managed_transaction\n def get_ready_broadcasts(session):\n wallet = self.get_wallet(session)\n NetworkTransaction = wallet.coin_description.NetworkTransaction\n return session.query(NetworkTransaction).filter(NetworkTransaction.transaction_type == \"broadcast\", NetworkTransaction.opened_at == None, NetworkTransaction.closed_at == None) # noqa\n\n @self.conflict_resolver.managed_non_retryable_transaction\n def mark_for_sending(session, broadcast_id):\n \"\"\"Mark we are going to send this broadcast and get backend data needed for to build the network transaction.\n\n :yield: (address, amount) tuples how much to send to each address\n \"\"\"\n b = self.get_broadcast(session, broadcast_id)\n assert b.opened_at is None\n b.opened_at = _now()\n session.add(b)\n\n outputs = Counter()\n\n for tx in b.transactions:\n assert tx.state == \"pending\"\n assert tx.receiving_account is None\n assert tx.amount > 0\n assert tx.address\n assert tx.address.address\n outputs[tx.address.address] += tx.amount\n\n return outputs\n\n @self.conflict_resolver.managed_non_retryable_transaction\n def mark_sending_done(session, broadcast_id, txid):\n b = self.get_broadcast(session, broadcast_id)\n assert b.closed_at is None\n b.txid = txid\n b.closed_at = _now()\n b.state = \"broadcasted\"\n session.add(b)\n\n # TODO: See if we can write update() more neatly\n tx_ids = [tx.id for tx in b.transactions]\n Transaction = b.coin_description.Transaction\n session.query(Transaction).filter(Transaction.id.in_(tx_ids)).update(dict(state=\"broadcasted\", processed_at=_now()), synchronize_session=False)\n\n @self.conflict_resolver.managed_transaction\n def charge_fees(session, broadcast_id, fee):\n wallet = self.get_wallet(session)\n broadcast = self.get_broadcast(session, broadcast_id)\n return wallet.charge_network_fees(broadcast, fee)\n\n ready_broadcasts = get_ready_broadcasts()\n count = ready_broadcasts.count()\n if count == 0:\n logger.debug(\"No broadcasts ready for sending to network\")\n else:\n logger.info(\"%d broadcasts prepared for sending\", count)\n\n broadcasted_count = 0\n total_fees = 0\n\n for b in ready_broadcasts:\n # Note: This is something we must NOT attempt to retry\n logger.info(\"Opening broadcast %d for sending\", b.id)\n outgoing = mark_for_sending(b.id)\n\n try:\n txid, fee = self.backend.send(outgoing, \"Outgoing broadcast {}\".format(b.id))\n assert txid\n broadcasted_count += 1\n except Exception as e:\n # Transaction broadcast died and we don't know why. We are pretty much dead in this situation, as we don't know if it is safe to try to re-broadcast the transaction or not.\n logger.error(\"Failed to broadcast external transaction %s\", e)\n logger.exception(e)\n\n #: TODO: Throw emergency event here?\n continue\n\n logger.info(\"Closing broadcast %d as done, it got txid %s\", b.id, txid)\n mark_sending_done(b.id, txid)\n\n if fee:\n charge_fees(b.id, fee)\n total_fees += fee\n\n return broadcasted_count, total_fees", "def send_broadcast_packet(self, broadcast_packet):\n print(\"Send broadcast message: \" + str(broadcast_packet.get_buf()))\n message = broadcast_packet.get_buf()\n self.stream.broadcast_to_none_registers(message, self.stream.get_server_address())", "def test_broadcast(self):\n if _debug: TestVLAN._debug(\"test_broadcast\")\n\n # three element network\n tnet = TNetwork(3)\n tnode1, tnode2, tnode3 = tnet.state_machines\n\n # make a broadcast PDU\n pdu = PDU(b'data', source=1, destination=0)\n if _debug: TestVLAN._debug(\" - pdu: %r\", pdu)\n\n # node 1 sends the pdu, node 2 and 3 each get it\n tnode1.start_state.send(pdu).success()\n tnode2.start_state.receive(PDU, pduSource=1).success()\n tnode3.start_state.receive(PDU, pduSource=1).success()\n\n # run the group\n tnet.run()", "def broadcast():\n message = request.form['broadcast_form_message']\n return sockets.broadcast(message)", "def broadcast(self, tx):\n tx = Transaction(tx)\n return self.network.broadcast_transaction(tx)", "def test_broadcast_call():\n print('\\n', \"testing broadcast call\")\n call.nspv_logout()\n call.nspv_login(wif_real)\n rpc_call = call.nspv_spend(addr_send, 0.1)\n rep = call.type_convert(rpc_call)\n hex_res = rep.get(\"hex\")\n hex = [False, \"norealhexhere\", hex_res]\n retcode_failed = [-1, -2, -3]\n\n # Cae 1 - No hex given\n rpc_call = call.nspv_broadcast(hex[0])\n call.assert_error(rpc_call)\n\n # Case 2 - Non-valid hex, failed broadcast should contain appropriate retcode\n rpc_call = call.nspv_broadcast(hex[1])\n call.assert_in(rpc_call, \"retcode\", retcode_failed)\n\n # Case 3 - Hex of previous transaction\n rpc_call = call.nspv_broadcast(hex[2])\n call.assert_success(rpc_call)\n rep = call.type_convert(rpc_call)\n broadcast_res = rep.get(\"broadcast\")\n expected = rep.get(\"expected\")\n if broadcast_res == expected:\n pass\n else:\n raise AssertionError(\"Aseert equal braodcast: \", broadcast_res, expected)", "def broadcast(self, bitcoin_transaction):\n raise NotImplementedError", "def _split_ast_on_broadcast(bb):\n before, after = transformations.force_align_and_split_by_intrinsics(\n bb, [building_block_factory.create_null_federated_broadcast()]\n )\n return _untuple_broadcast_only_before_after(before, after)", "def _set_broadcast_filter(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"broadcast-filter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"broadcast_filter must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"broadcast-filter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__broadcast_filter = t\n if hasattr(self, '_set'):\n self._set()", "def _set_broadcast_filter(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"broadcast-filter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"broadcast_filter must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"broadcast-filter\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/wifi/mac', defining_module='openconfig-wifi-mac', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__broadcast_filter = t\n if hasattr(self, '_set'):\n self._set()", "def test_bluefog_broadcast(self):\n rank = bf.rank()\n size = bf.size()\n\n # This test does not apply if there is only one worker.\n if size == 1:\n return\n\n dtypes = [tf.int32, tf.int64, tf.float32,\n tf.float64, tf.bool]\n dims = [1, 2, 3]\n root_ranks = list(range(size))\n for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):\n tensor = tf.ones([17] * dim) * rank\n root_tensor = tf.ones([17] * dim) * root_rank\n if dtype == tf.bool:\n tensor = tensor % 2\n root_tensor = root_tensor % 2\n tensor = tf.cast(tensor, dtype=dtype)\n root_tensor = tf.cast(root_tensor, dtype=dtype)\n broadcasted_tensor = bf.broadcast(tensor, root_rank)\n self.assertTrue(\n self.evaluate(tf.reduce_all(tf.equal(\n tf.cast(root_tensor, tf.int32), tf.cast(broadcasted_tensor, tf.int32)))),\n \"bf.broadcast produces incorrect broadcasted tensor\")", "def broadcast():\n socketio.emit('s_broadcast', game_state.tanks_json())", "def mkBroadcastAddress(netbytes,maskbits=None):\n def makeadrbyte(m, a): return (~m | a) & 0xFF\n if not maskbits: (netbytes,maskbits) = netbytes\n netmask = mkNetMask(netbytes,maskbits)\n return map(makeadrbyte, netmask, netbytes)", "def BroadcastR(self):\n\t\tdata = {\"Ri\" : self.Ri, \"identity\" : self.identity}\n\t\tmsg = {\"data\" : data , \"type\" : \"RandomStringBroadcast\"}\n\t\tself.state = ELASTICO_STATES[\"BroadcastedR\"]\n\t\tBroadcastTo_Network(data, \"RandomStringBroadcast\")", "def payload_encryptor(self, b:Broadcast, pre_payload:bytes):\n\n if b.to_gen_group(): #includes 'all' (*)\n return base64_encode(pre_payload)\n\n if b.to_secure_group():\n\n group_name = b.to_secure_group()\n if group_name in self.joined_secure_groups:\n\n group_key = self.joined_secure_groups[group_name]\n\n payload_encrypted = Crypto.encrypt_symmetrically(pre_payload, group_key)\n\n return base64_encode(payload_encrypted)\n else:\n raise NotInSecureGroupException(group_name)\n\n\n if b.to in self.cached_nodes:\n\n to_public_key = self.cached_nodes[b.to].node_info['kPublic']\n\n raw_encrypted = self.crypto.encrypt_to_public_key(pre_payload, to_public_key)\n\n return base64_encode(raw_encrypted)\n else:\n # unkown node, cant encypt, check if part of marco-polo TODO\n pass\n #raise UnknownNodeException()\n\n raise Exception('Could not determine how to encrypt/encode the broadcast \\\n from the node. This (idealy) should never happen, \\\n another Exception should come first if any.')", "def BroadcastTo_Committee(committee_id, data , type_):\n\tmsg = {\"type\" : type_ , \"data\" : data}\n\n\tpass", "def get_computation_for_broadcast_form(\n bf: forms.BroadcastForm,\n) -> computation_base.Computation:\n py_typecheck.check_type(bf, forms.BroadcastForm)\n server_data_type = bf.compute_server_context.type_signature.parameter\n client_data_type = bf.client_processing.type_signature.parameter[1]\n comp_parameter_type = computation_types.StructType([\n (bf.server_data_label, computation_types.at_server(server_data_type)),\n (bf.client_data_label, computation_types.at_clients(client_data_type)),\n ])\n\n @federated_computation.federated_computation(comp_parameter_type)\n def computation(arg):\n server_data, client_data = arg\n context_at_server = intrinsics.federated_map(\n bf.compute_server_context, server_data\n )\n context_at_clients = intrinsics.federated_broadcast(context_at_server)\n client_processing_arg = intrinsics.federated_zip(\n (context_at_clients, client_data)\n )\n return intrinsics.federated_map(bf.client_processing, client_processing_arg)\n\n return computation", "def broadcast(self, message):\r\n print \"OK Broadcast!\", message" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A decorator that makes a class inherit documentation from its parents.
def inherit_doc(cls): for name, func in vars(cls).items(): if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
[ "def inherit_function_doc(parent):\n def doc_wrapper(method):\n func_name = method.__name__\n assert (func_name in dir(\n parent)), '%s.%s is not a method! Cannot inherit documentation' % (\n parent.__name__, func_name)\n\n # Set the documentation. This only ever happens at the time of class\n # definition, and not every time the method is called.\n method.__doc__ = getattr(parent, func_name).__doc__\n\n # We don't need another wrapper, we can just return the method as its\n # own method\n return method\n return doc_wrapper", "def inherit_annotations(cls) -> Callable[[type], Callable[[type], type]]:\n def decorator(func: type) -> Callable[[type], type]:\n cls_func = getattr(cls, func.__name__)\n sub_cls_name = func.__qualname__.split('.')[0]\n\n # Update annotations\n if not func.__annotations__:\n func.__annotations__ = dct = getattr(cls_func, '__annotations__', {}).copy()\n if 'return' in dct and dct['return'] in (cls, cls.__name__):\n dct['return'] = sub_cls_name\n\n # Update docstring\n if func.__doc__ is None:\n func.__doc__ = cls_func.__doc__.replace(cls.__name__, sub_cls_name)\n\n return func\n return decorator", "def get_base_docstring(self):", "def copy_baseclass_docs(classname, bases, dict, metaclass=type):\n for (name, member) in dict.iteritems():\n if getattr(member, \"__doc__\", None):\n continue\n for base in bases: # look only in direct ancestors\n basemember = getattr(base, name, None)\n if not basemember:\n continue\n basememberdoc = getattr(basemember, \"__doc__\", None)\n if basememberdoc:\n member.__doc__ = basememberdoc\n return metaclass(classname, bases, dict)", "def docclass(self, object, name=None, mod=None, funcs={}, classes={},\r\n *ignored):\r\n realname = object.__name__\r\n name = name or realname\r\n bases = object.__bases__\r\n\r\n contents = []\r\n push = contents.append\r\n\r\n # Cute little class to pump out a horizontal rule between sections.\r\n class HorizontalRule:\r\n def __init__(self):\r\n self.needone = 0\r\n def maybe(self):\r\n if self.needone:\r\n push('<hr>\\n')\r\n self.needone = 1\r\n hr = HorizontalRule()\r\n\r\n # List the mro, if non-trivial.\r\n mro = deque(inspect.getmro(object))\r\n if len(mro) > 2:\r\n hr.maybe()\r\n push('<dl><dt>Method resolution order:</dt>\\n')\r\n for base in mro:\r\n push('<dd>%s</dd>\\n' % self.classlink(base,\r\n object.__module__))\r\n push('</dl>\\n')\r\n\r\n def spill(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n push(self.document(getattr(object, name), name, mod,\r\n funcs, classes, mdict, object))\r\n push('\\n')\r\n return attrs\r\n\r\n def spilldescriptors(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n push(self._docdescriptor(name, value, mod))\r\n return attrs\r\n\r\n def spilldata(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n base = self.docother(getattr(object, name), name, mod)\r\n if callable(value) or inspect.isdatadescriptor(value):\r\n doc = getattr(value, \"__doc__\", None)\r\n else:\r\n doc = None\r\n if doc is None:\r\n push('<dl><dt>%s</dl>\\n' % base)\r\n else:\r\n doc = self.markup(getdoc(value), self.preformat,\r\n funcs, classes, mdict)\r\n doc = '<dd><tt>%s</tt>' % doc\r\n push('<dl><dt>%s%s</dl>\\n' % (base, doc))\r\n push('\\n')\r\n return attrs\r\n\r\n attrs = filter(lambda (name, kind, cls, value): visiblename(name),\r\n classify_class_attrs(object))\r\n mdict = {}\r\n for key, kind, homecls, value in attrs:\r\n mdict[key] = anchor = '#' + name + '-' + key\r\n value = getattr(object, key)\r\n try:\r\n # The value may not be hashable (e.g., a data attr with\r\n # a dict or list value).\r\n mdict[value] = anchor\r\n except TypeError:\r\n pass\r\n\r\n while attrs:\r\n if mro:\r\n thisclass = mro.popleft()\r\n else:\r\n thisclass = attrs[0][2]\r\n attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\r\n\r\n if thisclass is __builtin__.object:\r\n attrs = inherited\r\n continue\r\n elif thisclass is object:\r\n tag = 'defined here'\r\n else:\r\n tag = 'inherited from %s' % self.classlink(thisclass,\r\n object.__module__)\r\n tag += ':<br>\\n'\r\n\r\n # Sort attrs by name.\r\n try:\r\n attrs.sort(key=lambda t: t[0])\r\n except TypeError:\r\n attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat\r\n\r\n # Pump out the attrs, segregated by kind.\r\n attrs = spill('Methods %s' % tag, attrs,\r\n lambda t: t[1] == 'method')\r\n attrs = spill('Class methods %s' % tag, attrs,\r\n lambda t: t[1] == 'class method')\r\n attrs = spill('Static methods %s' % tag, attrs,\r\n lambda t: t[1] == 'static method')\r\n attrs = spilldescriptors('Data descriptors %s' % tag, attrs,\r\n lambda t: t[1] == 'data descriptor')\r\n attrs = spilldata('Data and other attributes %s' % tag, attrs,\r\n lambda t: t[1] == 'data')\r\n assert attrs == []\r\n attrs = inherited\r\n\r\n contents = ''.join(contents)\r\n\r\n if name == realname:\r\n title = '<a name=\"%s\">class <strong>%s</strong></a>' % (\r\n name, realname)\r\n else:\r\n title = '<strong>%s</strong> = <a name=\"%s\">class %s</a>' % (\r\n name, name, realname)\r\n if bases:\r\n parents = []\r\n for base in bases:\r\n parents.append(self.classlink(base, object.__module__))\r\n title = title + '(%s)' % join(parents, ', ')\r\n doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)\r\n doc = doc and '<tt>%s<br>&nbsp;</tt>' % doc\r\n\r\n return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)", "def docclass(self, object, name=None, mod=None):\r\n realname = object.__name__\r\n name = name or realname\r\n bases = object.__bases__\r\n\r\n def makename(c, m=object.__module__):\r\n return classname(c, m)\r\n\r\n if name == realname:\r\n title = 'class ' + self.bold(realname)\r\n else:\r\n title = self.bold(name) + ' = class ' + realname\r\n if bases:\r\n parents = map(makename, bases)\r\n title = title + '(%s)' % join(parents, ', ')\r\n\r\n doc = getdoc(object)\r\n contents = doc and [doc + '\\n'] or []\r\n push = contents.append\r\n\r\n # List the mro, if non-trivial.\r\n mro = deque(inspect.getmro(object))\r\n if len(mro) > 2:\r\n push(\"Method resolution order:\")\r\n for base in mro:\r\n push(' ' + makename(base))\r\n push('')\r\n\r\n # Cute little class to pump out a horizontal rule between sections.\r\n class HorizontalRule:\r\n def __init__(self):\r\n self.needone = 0\r\n def maybe(self):\r\n if self.needone:\r\n push('-' * 70)\r\n self.needone = 1\r\n hr = HorizontalRule()\r\n\r\n def spill(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n push(self.document(getattr(object, name),\r\n name, mod, object))\r\n return attrs\r\n\r\n def spilldescriptors(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n push(self._docdescriptor(name, value, mod))\r\n return attrs\r\n\r\n def spilldata(msg, attrs, predicate):\r\n ok, attrs = _split_list(attrs, predicate)\r\n if ok:\r\n hr.maybe()\r\n push(msg)\r\n for name, kind, homecls, value in ok:\r\n if callable(value) or inspect.isdatadescriptor(value):\r\n doc = getdoc(value)\r\n else:\r\n doc = None\r\n push(self.docother(getattr(object, name),\r\n name, mod, maxlen=70, doc=doc) + '\\n')\r\n return attrs\r\n\r\n attrs = filter(lambda (name, kind, cls, value): visiblename(name),\r\n classify_class_attrs(object))\r\n while attrs:\r\n if mro:\r\n thisclass = mro.popleft()\r\n else:\r\n thisclass = attrs[0][2]\r\n attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)\r\n\r\n if thisclass is __builtin__.object:\r\n attrs = inherited\r\n continue\r\n elif thisclass is object:\r\n tag = \"defined here\"\r\n else:\r\n tag = \"inherited from %s\" % classname(thisclass,\r\n object.__module__)\r\n filter(lambda t: not t[0].startswith('_'), attrs)\r\n\r\n # Sort attrs by name.\r\n attrs.sort()\r\n\r\n # Pump out the attrs, segregated by kind.\r\n attrs = spill(\"Methods %s:\\n\" % tag, attrs,\r\n lambda t: t[1] == 'method')\r\n attrs = spill(\"Class methods %s:\\n\" % tag, attrs,\r\n lambda t: t[1] == 'class method')\r\n attrs = spill(\"Static methods %s:\\n\" % tag, attrs,\r\n lambda t: t[1] == 'static method')\r\n attrs = spilldescriptors(\"Data descriptors %s:\\n\" % tag, attrs,\r\n lambda t: t[1] == 'data descriptor')\r\n attrs = spilldata(\"Data and other attributes %s:\\n\" % tag, attrs,\r\n lambda t: t[1] == 'data')\r\n assert attrs == []\r\n attrs = inherited\r\n\r\n contents = '\\n'.join(contents)\r\n if not contents:\r\n return title + '\\n'\r\n return title + '\\n' + self.indent(rstrip(contents), ' | ') + '\\n'", "def make_doc(klass):\n if hasattr(klass, '__doc__'):\n doc = dedent(klass.__doc__) + \"\\n\"\n else:\n doc = \"\"\n doc += dedent(\"\"\"**Keyword arguments:**\"\"\")\n traits = klass.class_traits().copy()\n traits.pop('trait_added')\n traits.pop('trait_modified')\n doc += traits_doc(traits)\n return doc", "def add_docs(cls, clsdict: Dict, klass: \"Implementer\") -> None:\n abstract_docs, abstract_methods = cls.implementation_info(clsdict)\n if not klass.__doc__:\n klass.__doc__ = \"\\n\".join(f\"Implements: {k}\\n{v}\\n\" for k, v in abstract_docs.items())\n for abstract_method, abstract_klass in abstract_methods.items():\n method = getattr(klass, abstract_method, None)\n if not method:\n # this will not instantiate, so bail now\n return\n # Only set the doc for the method if its not already set.\n # `@classmethod` `__doc__`s are immutable, so skip them.\n if not method.__doc__ and not hasattr(method, \"__self__\"):\n method.__doc__ = getattr(abstract_klass, abstract_method).__doc__", "def DocInheritMeta(style=\"parent\", abstract_base_class=False):\n\n if style not in store:\n raise NotImplementedError(\"The available inheritance styles are: \" + \", \".join(store))\n metaclass = store[style]\n return metaclass if not abstract_base_class else type(\"abc\" + metaclass.__name__, (ABCMeta, metaclass), {})", "def expand_doc(klass: ModelMetaclass) -> ModelMetaclass:\n docs = ['', '', 'Keyword Args:']\n for name, field in klass.__fields__.items(): # type: ignore\n default_str = ''\n #\n if field.default:\n default_str = ''\n if field.default:\n if SecretStr not in field.type_.__mro__:\n default = field.default\n if Path in field.type_.__mro__:\n default = str(Path(default).relative_to(Path(default).parents[2]))\n if field.name == 'user_klass':\n default_str = f' [default: :class:`{default.replace(\"`\", \"\").replace(\":\", \".\")}`]'\n else:\n default_str = f' [default: ``{default}``]'\n else:\n default_str = ' [default: ``uuid.uuid4()``]'\n module = field.outer_type_.__module__\n if module != 'builtins':\n if hasattr(field.outer_type_, '__origin__'):\n type_ = f' ({field.outer_type_.__origin__.__name__}) '\n elif not hasattr(field.outer_type_, '__name__'):\n type_ = ''\n else:\n type_ = f' ({module}.{field.outer_type_.__name__}) '\n else:\n type_ = f' ({field.outer_type_.__name__}) '\n env_var = ''\n if 'env' in field.field_info.extra:\n env_var = f' (Can be set by ``{field.field_info.extra[\"env\"]}`` environment variable)'\n docs.append(f' {name}{type_}: {field.field_info.description}{default_str}{env_var}')\n if klass.__doc__ is None:\n klass.__doc__ = ''\n klass.__doc__ += '\\n'.join(docs)\n return klass", "def update_docs(self, t, module): \n #We need to look in the parent module docstrings for this types decorating tags.\n key = \"{}.{}\".format(module.name, t.name)\n if key in module.predocs:\n t.docstring = self.docparser.to_doc(module.predocs[key][0], t.name)\n t.docstart, t.docend = (module.predocs[key][1], module.predocs[key][2])", "def DecoratorMixin(decorator):\n\n class Mixin(object):\n __doc__ = decorator.__doc__\n\n @classmethod\n def as_view(cls, *args, **kwargs):\n view = super(Mixin, cls).as_view(*args, **kwargs)\n return decorator(view)\n\n Mixin.__name__ = 'DecoratorMixin(%s)' % decorator.__name__\n return Mixin", "def add_api_docs(*docs, hidden_methods=None):\n\n def decorator(cls):\n # Find the section doc and all method docs\n section_doc = None\n method_docs = []\n for doc in docs:\n if isinstance(doc, SectionDocs):\n if section_doc is not None:\n raise RuntimeError(\"Only a single SectionDocs instance can be present\")\n section_doc = doc\n elif isinstance(doc, MethodDocs):\n method_docs.append(doc)\n else:\n raise RuntimeError(\"add_api_docs() parameters must be SectionDocs or MethodDocs instances\")\n\n # Create empty section if it wasn't specified\n if section_doc is None:\n section_doc = SectionDocs()\n\n # Add all found methods to the section and attach the section to the viewset\n section_doc.methods = method_docs\n\n # Remember hidden methods\n section_doc.hidden_methods = hidden_methods or []\n\n cls.api_core_docs = section_doc\n return cls\n\n return decorator", "def __new__(cls, clsname: str, bases: Tuple[Type, ...], clsdict: Dict) -> \"Implementer\":\n if \"__implements__\" not in clsdict:\n return super().__new__(cls, clsname, bases, clsdict)\n klass = super().__new__(cls, clsname, cls.get_bases(bases, clsdict), clsdict)\n cls.add_docs(clsdict, klass)\n return klass", "def _get_doc(cls):\n return cls.__doc__", "def doc(self):\r\n for sub in self.subscopes:\r\n if sub.name.names[-1] == '__init__':\r\n return '%s\\n\\n%s' % (\r\n sub.get_call_signature(funcname=self.name.names[-1]),\r\n self.docstr)\r\n return self.docstr", "def print_doc(self=None):\n print(self.__doc__)", "def test_review_class_docstring(self):\n self.assertIsNot(Review.__doc__, None,\n \"Review class needs a docstring\")\n self.assertTrue(len(Review.__doc__) >= 1,\n \"Review class needs a docstring\")", "def test_any_docstring(self):\n self.assertTrue(len(Base.__doc__) >= 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a start time in strng or datetime.datetime format, use the spacepy library to download the solar wind data for the time range [tlag_hr, t], where lag_hr is the integer or float hour lag. The dbase kwarg is passed directly into spacepy.omni.get_omni to get hourly solar wind values. to_df converts the solar wind data to a pandas DataFrame after throwing away some of the derived products.
def get_solar_wind_data(t, lag_hr, dbase='QDhourly', to_df=True): if isinstance(t, str): t = dateutil.parser.parse(t) omni_times = pd.date_range(t-timedelta(hours=lag_hr), t, freq='h') try: data = spacepy.omni.get_omni(omni_times.to_pydatetime(), dbase=dbase) except ValueError as err: if str(err) == 'Requested dates are outside data range': print(f"For time {t} spacepy thinks it's out of range. If you " "don't have the data, run these two commands:\n" "import spacepy.toolbox as tb\n" "tb.update(omni=True)") raise if to_df: # Return a DataFrame cast_data_dict = {} # Lots of finess going on here. We need to treat the ticks key # carefully because it is a spacepy.time.Ticktock object. # Also do make it compatable with a DataFrame, I removed the # Tsyganenko derived inpputs G and W. for key, val in data.items(): # if key == 'ticks': # cast_data_dict[key] = np.array(val.UTC) if key in ['G', 'W', 'Qbits','ticks']: continue else: cast_data_dict[key] = np.array(val) df = pd.DataFrame(data=cast_data_dict) df.index=df.UTC df = df.drop(['UTC'], axis=1) return df else: # Return a dictionary of spacepy dmarrays return data
[ "def import_forecast(t_start,t_end,hours = \"all\",info = (\"GHI\",),\\\n grid_list = \"all\",sub_h_freq = 'all',\\\n sub_D_freq = 'all'):\n root = return_to_root()\n #Sanitycheck for different input \n if \"Fortrolig_data\" not in os.listdir(root):\n raise(OSError(\"Root is noot the svn shared folder\"))\n \n if type(t_start) != pd._libs.tslib.Timestamp or type(t_end) != pd._libs.tslib.Timestamp:\n raise(TypeError(\"t_start and t_end should be pandas timestamp\"))\n \n t_max = pd.Timestamp(2018,1,1,0)\n if t_start > t_max or t_end > t_max:\n raise(ValueError(\"Select a daterange within 2017\"))\n \n if t_start.time() != d_time(0,0) or t_end.time() != d_time(0,0):\n raise(ValueError(\"t_start and t_end should be whole dates only, \\n\"\n \"i.e hours = 0 and minutes = 0. \\n\"\n \"Use the hours argument to get less hours on a day\")) \n \n if not isinstance(info,(list,tuple,np.ndarray)) and info != \"all\":\n raise(TypeError(\"info argument should be tuple, list or numpy array\"))\n \n if (hours[0][-2:] != \"00\" or hours[1][-2:] != \"00\") and \\\n isinstance(hours,(list,tuple,np.ndarray)):\n raise(ValueError(\"Hours should be whole \\\"hh\\:00\\\", e.g. \\\"08:00\\\"\"))\n\n if not isinstance(sub_h_freq,str):\n raise(ValueError(\"Frequency hour argument must be string.\\ne.g. \\\"2H\\\"\"))\n \n if sub_h_freq[-1] != 'H' and sub_h_freq != 'all':\n raise(NotImplementedError(\"Currenly only hour sub sampling is allowed\"))\n \n if sub_D_freq[-1] != 'D' and sub_D_freq != 'all':\n raise(NotImplementedError(\"Currenly only day sub sampling is allowed\"))\n\n #Fetch stem data (grid) - used for sanity check but also later on\n grid_path = \"Fortrolig_data/stem_data/forecast_grid\" #load grid numbers from file\n grid = sio.loadmat(root + grid_path + \".mat\")['forecast_grid'].T[0]\n \n if not set(grid_list).issubset(set(grid)) and grid_list != 'all':\n raise(ValueError(\"One or more elements in grid_list is invalid:\\n\"\n \"forecast for that grid point is not known\"))\n \n #Import more sanity check in neccesary later\n \n #handle timerange\n if sub_h_freq == 'all':\n sub_h_freq = \"H\"\n \n if sub_D_freq == 'all':\n sub_D_freq = \"D\"\n \n t_end = t_end.replace(hour = 23)\n rng = pd.date_range(t_start,t_end,freq = sub_h_freq) #daterange for forecast\n h_int = rng.freq.delta.components.hours #get hours as int\n if 24%h_int != 0:\n raise(ValueError(\"Freqency in hours must be a multible of 24, e.g. 2,6,12\"))\n \n if hours == \"all\":\n hours = (\"00:00\",\"23:00\")\n day_rng = pd.date_range(t_start,t_end,freq = sub_D_freq)\n rng = choose_days_from_timerange(rng,day_rng) #subsample days\n rng = rng[rng.indexer_between_time(hours[0],hours[1])] #remove unwanted hours\n spd = int(len(rng)/len(day_rng)) #samples pr. day\n s_day0 = rng[0].hour\n s_day1 = -((24 - rng[-1].hour) - 1)\n \n #Avoid empty matrix when indexing\n if s_day0 == 0: \n s_day0 = None\n if s_day1 == 0:\n s_day1 = None \n \n \n if grid_list == \"all\":\n grid_index = range(len(grid)) #All indicies\n grid_list = grid\n else:\n grid_index = np.in1d(grid, grid_list).nonzero()[0]\n #List with indicies of chosen grid numbers\n\n #Create data structures\n if info == \"all\":\n info = (\"GHI\",\"WD\",\"WS\")\n data = dict.fromkeys(info) #Big ass data matrix \n N,M = len(rng),len(grid_index)\n #Create datamatrix for forecast types\n for key in data.keys():\n data[key] = np.zeros((N,M))\n \n folder_path = \"Fortrolig_data/2017_forecast/\"\n idx_count = 0\n for t in day_rng: #Runs thorugh every 6th element in timerange excluding the last\n data_path = \"%d/%d/\" %(t.month,t.day) #Specific day and hour\n for key in data.keys(): #load from file and write to matrix\n data[key][idx_count*spd:idx_count*spd + spd] = \\\n np.matrix(pd.read_pickle(root + folder_path + data_path +\\\n key + 'day.p'))\\\n [s_day0:s_day1][:,grid_index][::h_int]\n # [s_day0:s_day1] picks out the relevant times\n # [:,muni_index] picks out the relevant munipicilaties\n idx_count += 1\n \n #Convert to dataframe, overwrites matricies\n dataframes = dict.fromkeys([\"GHI\",\"WD\",\"WS\"]) #dictionary for dataframes\n for key in data.keys(): \n dataframes[key] = pd.DataFrame(data[key],index = rng,columns = grid_list)\n dataframes[key].columns.name = 'GRIDNR'\n #Return as forecast object with specified information\n return(forecast(GHI = dataframes[\"GHI\"],WD = dataframes[\"WD\"],\\\n WS = dataframes[\"WS\"],h_freq=sub_h_freq,D_freq = sub_D_freq))", "def route_trips_with_stops_to_dataframe(self, gtfs_day='19700101'):\n df = None\n _df = DataFrame({\n 'departure_time':\n [use_schedule.get_offset(self.departure_offsets[i]) for i in range(len(self.ordered_stops) - 1)],\n 'arrival_time':\n [use_schedule.get_offset(self.arrival_offsets[i]) for i in range(1, len(self.ordered_stops))],\n 'from_stop': self.ordered_stops[:-1],\n 'to_stop': self.ordered_stops[1:]\n })\n for trip_id, trip_dep_time, veh_id in zip(self.trips['trip_id'], self.trips['trip_departure_time'],\n self.trips['vehicle_id']):\n trip_df = _df.copy()\n trip_df['trip'] = trip_id\n trip_df['vehicle_id'] = veh_id\n trip_dep_time = use_schedule.sanitise_time(trip_dep_time, gtfs_day=gtfs_day)\n trip_df['departure_time'] = trip_dep_time + trip_df['departure_time']\n trip_df['arrival_time'] = trip_dep_time + trip_df['arrival_time']\n if df is None:\n df = trip_df\n else:\n df = df.append(trip_df)\n df['route'] = self.id\n df['route_name'] = self.route_short_name.replace(\"\\\\\", \"_\").replace(\"/\", \"_\")\n df['mode'] = self.mode\n df['from_stop_name'] = df['from_stop'].apply(lambda x: self.stop(x).name.replace(\"\\\\\", \"_\").replace(\"/\", \"_\"))\n df['to_stop_name'] = df['to_stop'].apply(lambda x: self.stop(x).name.replace(\"\\\\\", \"_\").replace(\"/\", \"_\"))\n df = df.reset_index(drop=True)\n return df", "def load_trend(query_api, measurements, trend_window=3, bucket=\"sdd\"):\n print(f\"load_trend... (trend_window={trend_window})\")\n logging.debug(f\"Influx DB query for load_trend() with trend_window={trend_window}\")\n filterstring = \" or \".join([f'r[\"_field\"] == \"{helpers.fieldnames[x]}\"' for x in measurements])\n query = f'''\n from(bucket: \"{bucket}\")\n |> range(start: -{trend_window + 2}d)\n |> filter(fn: (r) => {filterstring})\n |> filter(fn: (r) => r[\"unverified\"] != \"True\")\n '''\n tables = query_api.query_data_frame(query)\n print(\"query executed\")\n if isinstance(tables, list):\n df = pd.concat(tables)\n else:\n df = tables\n df[\"c_id\"] = compound_index(df)\n\n output = {\n \"model\": {},\n \"trend\": {},\n \"last_value\": {},\n \"last_time\": {}\n }\n df[\"_time\"] = df[\"_time\"].apply(helpers.utc_to_local, 1)\n df[\"unixtime\"] = df[\"_time\"].apply(lambda x: int(x.timestamp()), 1) # unixtime in s\n for cid in set(df[\"c_id\"]):\n # get sub-dataframe for this id\n\n tmpdf = df[df[\"c_id\"] == cid].sort_values(by=[\"unixtime\"])\n output[\"last_value\"][cid] = tmpdf[\"_value\"].iloc[-1]\n output[\"last_time\"][cid] = tmpdf[\"_time\"].iloc[-1]\n\n lastday = max(tmpdf[\"_time\"])\n firstday = min(tmpdf[\"_time\"])\n\n if (lastday - firstday).days < trend_window - 1:\n # not enough data for this station, trend window not covered\n output[\"model\"][cid] = (np.nan, np.nan)\n output[\"trend\"][cid] = np.nan\n continue\n\n day0 = lastday - timedelta(days=trend_window - 1)\n tmpdf = tmpdf[tmpdf[\"_time\"] >= day0]\n tmpdf = tmpdf.reset_index(drop=True)\n\n values = pd.to_numeric(tmpdf[\"_value\"])\n\n COUNT_LOW_THRESHOLD = 3\n PERCENT_NONZEROS_THRESHOLD = 0.75\n # perform linear regression only when the mean is above COUNT_LOW_THRESHOLD\n # or if the fraction of non-zero numbers exceeds PERCENT_NONZEROS_THRESHOLD.\n # This is to suppress unhelpful fits for low-value data sources\n if np.mean(values) > COUNT_LOW_THRESHOLD or \\\n np.count_nonzero(values) / len(values) > PERCENT_NONZEROS_THRESHOLD:\n # linear regression y = a*x +b\n model = np.polyfit(tmpdf[\"unixtime\"], values, 1)\n output[\"model\"][cid] = model\n\n # calculate trend\n a, b = model[:2]\n t1 = day0.timestamp()\n t2 = lastday.timestamp()\n y1 = (a * t1 + b)\n y2 = (a * t2 + b)\n if y1 > 0:\n output[\"trend\"][cid] = y2 / y1 - 1\n else:\n output[\"trend\"][cid] = np.nan\n else:\n # counts too low for reliable regression\n output[\"model\"][cid] = (np.nan, np.nan)\n output[\"trend\"][cid] = np.nan\n\n return output # dicts", "def fetch(api, site, start, end, *, nrel_pvdaq_api_key):\n try:\n site_extra_params = common.decode_extra_parameters(site)\n except ValueError:\n return pd.DataFrame()\n try:\n years = list(range(start.year, end.year + 1))\n obs_df = pvdaq.get_pvdaq_data(\n site_extra_params['network_api_id'], years,\n api_key=nrel_pvdaq_api_key)\n except Exception:\n # Not yet sure what kind of errors we might hit in production\n logger.warning(f'Could not retrieve data for site {site.name}'\n f' between {start} and {end}.')\n return pd.DataFrame()\n obs_df = _watts_to_mw(obs_df)\n try:\n obs_df = obs_df.tz_localize(site.timezone)\n except NonExistentTimeError as e:\n logger.warning(f'Could not localize data for site {site.name} '\n f'due to DST issue: {e}')\n return pd.DataFrame()\n return obs_df", "def extract_between_times(\n t_start: AcceptableTimeStamp,\n t_end: AcceptableTimeStamp,\n keys: Sequence[str] = None,\n names: Dict[str, str] = None,\n) -> tfs.TfsDataFrame:\n with suppress(TypeError):\n t_start: CERNDatetime = CERNDatetime.from_timestamp(t_start)\n\n with suppress(TypeError):\n t_end: CERNDatetime = CERNDatetime.from_timestamp(t_end)\n\n db = pytimber.LoggingDB(source=\"nxcals\")\n if keys is None:\n keys = get_tune_and_coupling_variables(db)\n\n # Attempt getting data from NXCALS, which can sometimes need a few retries (yay NXCALS)\n # If Java gives a feign.RetryableException, retry up to MAX_RETRIES times.\n extract_dict = {}\n for tries in range(MAX_RETRIES + 1):\n try:\n # We use timestamps to avoid any confusion with local time\n extract_dict = db.get(keys, t_start.timestamp(), t_end.timestamp())\n except jpype.java.lang.IllegalStateException as java_state_error:\n raise IOError(\n \"Could not get data from Timber, user probably has no access to NXCALS\"\n ) from java_state_error\n except jpype.JException as java_exception: # Might be a case for retries\n if \"RetryableException\" in str(java_exception) and (tries + 1) < MAX_RETRIES:\n LOG.warning(f\"Could not get data from Timber! Trial no {tries + 1} / {MAX_RETRIES}\")\n continue # will go to the next iteratoin of the loop, so retry\n raise IOError(\"Could not get data from timber!\") from java_exception\n else:\n break\n\n if (not len(extract_dict) # dict is empty\n or all(not len(v) for v in extract_dict.values()) # values are empty\n or all(len(v) == 2 and not len(v[0]) for v in extract_dict.values()) # arrays are empty (size 2 for time/data)\n ):\n raise IOError(f\"Variables {keys} found but no data extracted in time {t_start.utc_string} - {t_end.utc_string} (UTC).\\n\"\n f\"Possible reasons:\\n\"\n f\" - Too small time window.\\n\"\n f\" - Old pytimber version.\\n\"\n f\" - Variable outdated (i.e. no longer logged).\")\n\n out_df = tfs.TfsDataFrame()\n for key in keys:\n if extract_dict[key][1][0].size > 1:\n raise NotImplementedError(\"Multidimensional variables are not implemented yet\")\n\n data = np.asarray(extract_dict[key]).transpose()\n column = key if names is None else names.get(key, key)\n key_df = tfs.TfsDataFrame(data, columns=[TIME_COL, column]).set_index(TIME_COL)\n out_df = out_df.merge(key_df, how=\"outer\", left_index=True, right_index=True)\n\n out_df.index = [CERNDatetime.from_timestamp(i) for i in out_df.index]\n out_df.headers[START_TIME] = t_start.cern_utc_string()\n out_df.headers[END_TIME] = t_end.cern_utc_string()\n return out_df", "def gtfs_data(stop_id):\n my_date = date.today()\n day = calendar.day_name[my_date.weekday()].lower()\n current_date = datetime.now().strftime('%d/%m/%Y ')\n current_time = datetime.now().strftime('%H:%M:%S')\n time_object = datetime.strptime(current_time, '%H:%M:%S').time()\n service_ids_from_calendar_df = calendar_df.loc[calendar_df[day] == 1]\n list_of_service_ids = service_ids_from_calendar_df['service_id'].to_list()\n stop_id_timetable_df = timetable_df.loc[timetable_df['stop_id'] == stop_id]\n list_of_trips = stop_id_timetable_df['trip_id'].to_list()\n trips_df_with_select_trip_ids = trips_df[trips_df['trip_id'].isin(\n list_of_trips)]\n trips_df_with_select_trip_ids_and_service_ids = trips_df_with_select_trip_ids[\n trips_df_with_select_trip_ids['service_id'].isin(list_of_service_ids)]\n list_of_route_ids = trips_df_with_select_trip_ids_and_service_ids['route_id'].to_list(\n )\n route_df_with_select_route_ids = route_df[route_df['route_id'].isin(\n list_of_route_ids)]\n merged_route_and_trips_df = trips_df_with_select_trip_ids_and_service_ids.merge(\n route_df_with_select_route_ids, on=\"route_id\", how='inner')\n merged_df = stop_id_timetable_df.merge(\n merged_route_and_trips_df, on=\"trip_id\", how='inner')\n sub_final_df = merged_df.drop_duplicates(\n subset='arrival_time', keep=\"first\").copy()\n sub_final_df['arrival_time'] = sub_final_df['arrival_time'].apply(\n parse_midnight)\n sub_final_df['trip_headsign'] = sub_final_df['trip_headsign'].str.split(\n '-').str[1]\n final_df = sub_final_df[['arrival_time',\n 'route_short_name', 'trip_headsign']].copy()\n final_df.iloc[:, 0] = current_date + final_df.iloc[:, 0]\n final_df.rename(columns={'arrival_time': 'idA',\n 'route_short_name': 'idB', 'trip_headsign': 'targetA'}, inplace=True)\n arrivaltime = sub_final_df[\"arrival_time\"]\n final_df = final_df.join(arrivaltime)\n final_df['arrival_time'] = pd.to_datetime(\n final_df['arrival_time'], format='%H:%M:%S').dt.time\n final_df = final_df.sort_values(by='arrival_time')\n final_df.rename(columns={'arrival_time': 'targetB'},\n inplace=True)\n if final_df.empty:\n # no information returned from GTFS dataset\n # return placeholder object to avoid error\n return placeholder\n elif len(final_df.index) > 10:\n # where the dataframe is larger than 10\n # return only the timetable information that occurs after the current time.\n df = final_df[final_df['targetB'] > time_object]\n if df.empty:\n # no information returned from GTFS dataset within the selected timeframe.\n # return placeholder object to avoid error\n return placeholder\n else:\n df = df.sort_values(by='targetB')\n df = df.head(10)\n result = df.to_json(orient=\"records\")\n parsed = json.loads(result)\n return {'results': parsed}\n else:\n df = final_df\n result = df.to_json(orient=\"records\")\n parsed = json.loads(result)\n return {'results': parsed}", "def _extract_data_to_dataframe_at_time(t):\n print(\"Publishing data for day {} (index {})\".format(t[1], t[0]))\n itime = t[0]\n nb_cells = nc.dimensions['n_cells'].size\n npst = np.ma.column_stack((\n np.arange(start=1, stop=nb_cells + 1, dtype='i4'),\n vfunc_jd_to_dt(np.full((nb_cells), nc.variables['time'][itime])),\n nc.variables['water_elevation_catchment_mean'][itime, :],\n nc.variables['water_elevation_catchment_median'][itime, :],\n nc.variables['water_elevation_catchment_std'][itime, :],\n nc.variables['water_elevation_catchment_mad'][itime, :],\n nc.variables['streamflow_catchment_mean'][itime, :],\n nc.variables['streamflow_catchment_median'][itime, :],\n nc.variables['streamflow_catchment_std'][itime, :],\n nc.variables['streamflow_catchment_mad'][itime, :],\n vfunc_jd_to_dt(np.full((nb_cells), nc.variables['time_added_to_hydb'][itime])),\n np.full((nb_cells), nc.variables['is_analysis'][itime])\n ))\n\n df = pd.DataFrame(npst,\n index=np.arange(start=1, stop=nb_cells + 1, dtype='i4'),\n columns=['cell_id', 'date', 'elevation_mean', 'elevation_median', 'elevation_stddev', 'elevation_mad',\n 'flow_mean', 'flow_median', 'flow_stddev', 'flow_mad', 'update_time', 'is_analysis']\n )\n\n # force cell_id type to smallint\n df = df.astype({\n 'cell_id': 'int16',\n 'is_analysis': 'boolean'\n })\n print(df)\n return df", "def get_ltv_df(start_date, days=7):\n dates = generate_dates(start_date, days)\n file_paths = []\n for date in dates:\n for network in const.MONET_NETWORKS:\n path = const.PATH_REPORTS +'/'+ date + '/' + network + '/'\n file_path = 's3://' + const.ORION_BUCKET + '/' + get_xday_ltv(const.s3_client, const.ORION_BUCKET, path, xday=const.XDAY)\n file_paths.append(file_path)\n #print(file_path)\n\n # fetch ltv data to build dataframe\n ltv_df = pd.DataFrame()\n for file_path in file_paths:\n tmp_df = pd.read_csv(file_path, compression='gzip')\n tmp_df = tmp_df[tmp_df['xday']==2] # use configuration\n ltv_df = pd.concat([ltv_df,tmp_df],ignore_index=True)\n ltv_df['date'] = pd.to_datetime(ltv_df['date'])\n return ltv_df", "def get_datasets_in_time_range(c_ftp, product, t_start, t_end, in_memory=False,\n product_kws={}, debug=False):\n product_path_format = get_remote_path_format(product, **product_kws)\n remote_paths = []\n for qr in gen_queries(product_path_format, t_start, t_end):\n if debug:\n print(\"query\", qr)\n try:\n remote_paths += c_ftp.list(qr)\n except:\n pass\n ds_ = []\n for remote_path in remote_paths:\n if debug:\n print(\"download\", remote_path)\n ds = get_dataset(c_ftp, remote_path, in_memory=in_memory)\n ds_.append(ds)\n if len(ds_) > 0:\n return xr.concat(ds_, dim='time')\n else:\n raise Exception(\"No data found\")", "def import_muni_forecast_simu(t_start,t_end,info = (\"GHI\",),muni_list = \"all\",\\\n res = \"H\"):\n t_max = pd.Timestamp(2018,1,1,0)\n if t_start > t_max or t_end > t_max:\n raise(ValueError(\"Select a daterange within 2017\"))\n \n if t_start.time() != d_time(0,0) or t_end.time() != d_time(0,0):\n raise(ValueError(\"t_start and t_end should be whole dates only, \\n\"\n \"i.e hours = 0 and minutes = 0. \\n\"\n \"Use the hours argument to get less hours on a day\"))\n\n if not isinstance(info,(list,tuple,np.ndarray)) and info != \"all\":\n raise(TypeError(\"info argument should be tuple, list or numpy array\"))\n \n if not isinstance(muni_list,(list,tuple)) and muni_list != 'all':\n raise(ValueError(\"muni_list should be list or tuple\"))\n \n grid_list,conv_sheet = muni_list_to_grid_list(muni_list)\n if muni_list == 'all': #transform grid list into all because faster later\n grid_list = 'all'\n muni_list = conv_sheet.index\n \n #structure for forecasts\n days = pd.date_range(t_start,t_end,freq = \"D\")\n h_dic = dict.fromkeys(['00','06','12','18'])\n fc_dic = dict.fromkeys(days.date)\n for day in days:\n #Load data\n h_dic_day = copy.deepcopy(h_dic) #used for storing forecasts\n fc_grid = import_single_forecast_from_mat(day,info = info,\\\n grid_list = grid_list,\\\n res = res)\n for h in fc_grid.keys():\n fc_muni = _average_grid_to_muni(fc_grid[h],info,conv_sheet,muni_list)\n h_dic_day[h] = forecast(GHI = fc_muni[\"GHI\"],WD = fc_muni[\"WD\"],\\\n WS = fc_muni[\"WS\"],mode = \"simu\",h_freq=res,hours = 'all')\n fc_dic[day.date()] = h_dic_day\n\n return(forecast_simu(fc_dic,info,h_freq=res))", "def load_isd_hourly_temp_data(\n self,\n start,\n end,\n read_from_cache=True,\n write_to_cache=True,\n fetch_from_web=True,\n error_on_missing_years=True,\n ):\n return load_isd_hourly_temp_data(\n self.usaf_id,\n start,\n end,\n read_from_cache=read_from_cache,\n write_to_cache=write_to_cache,\n fetch_from_web=fetch_from_web,\n error_on_missing_years=error_on_missing_years,\n )", "def load_gsod_daily_temp_data(\n self, start, end, read_from_cache=True, write_to_cache=True, fetch_from_web=True\n ):\n return load_gsod_daily_temp_data(\n self.usaf_id,\n start,\n end,\n read_from_cache=read_from_cache,\n write_to_cache=write_to_cache,\n fetch_from_web=fetch_from_web,\n )", "def weather_data_to_df(file, period_start, period_end, timestep):\n folder = 'profiles'\n subfolder = 'weather'\n df = open_csv(file, os.path.join(folder, subfolder), ',')\n for t in ['Temperature', 'Irradiance']:\n df[t] = pd.to_numeric(df[t], errors='coerce')\n \n to_date_time(df, 'Date')\n \n df = df.truncate(before = period_start, after = period_end)\n \n # Sum over Irradiance values: units of Irradiance are now kWh/m^2/h = kW/m^2\n df = df.resample(time_delta(timestep)).agg({'Irradiance': np.sum, 'Temperature': np.mean})\n df['Irradiance'] /= 1000 \n return df", "def open_url(mill, start_timestamp):\n url = 'http://ds.windstream-inc.com/WSData/api/performancedata.json'\n params = { 'installid': mill.install_id,\n 'timezone': 'utc',\n 'start': start_timestamp.strftime(\"%Y-%m-%d %H:%M\"),\n 'span': \"{}hours\".format(HOURS_PER_QUERY)\n\n }\n full_url = \"{}?{}\".format(url, urlencode(params))\n return urlopen(full_url)", "def get_stock_data(stock_id: str, begin_date: str, end_date: str) -> pd.DataFrame:\r\n # minute bar\r\n target_column = ['szWindCode', 'nActionDay', 'nTime', 'nOpenIndex', 'nHighIndex',\r\n 'nLowIndex', 'nLastIndex', 'iTotalVolume', 'iTurnover', 'nPreCloseIndex']\r\n\r\n \"\"\" get data\"\"\"\r\n db = pymysql.connect(host='192.168.10.68',\r\n port=3306,\r\n user='nas',\r\n password='Prism@123456')\r\n\r\n sql = \"\"\"SELECT symbol.szWindCode, snap_min_index.* FROM marketdata.snap_min_index join marketdata.symbol where snap_min_index.codeID = symbol.codeID \r\n and symbol.szWindCode = '%s' and snap_min_index.nActionDay >= %s and snap_min_index.nActionDay <= %s;\"\"\" \\\r\n % (stock_id, begin_date, end_date)\r\n\r\n df = pd.read_sql(sql, db)\r\n df = df[target_column]\r\n return df", "def get_data_by_time(path, columns, start_date, start_time=\"00:00\", end_date=None, end_time=\"23:59\"):\n\n # Locate and read data file(s)\n if path[-1] != '/':\n path += '/'\n paths = [path + \"datalog \" + start_date + '.xls']\n data = [remove_notes(pd.read_csv(paths[0], delimiter='\\t'))]\n\n if end_date is not None:\n paths.append(path + \"datalog \" + end_date + \".xls\")\n data.append(remove_notes(pd.read_csv(paths[1], delimiter='\\t')))\n\n # Calculate start index\n time_column = pd.to_numeric(data[0].iloc[:, 0])\n interval = time_column[1]-time_column[0]\n start_idx = int(round((day_fraction(start_time) - time_column[0])/interval + .5)) #round up\n\n # Calculate end index\n time_column = pd.to_numeric(data[-1].iloc[:, 0])\n end_idx = int(round((day_fraction(end_time) - time_column[0])/interval + .5)) + 1 #round up\n\n # Get columns of interest\n if len(paths) == 1:\n if isinstance(columns, int):\n result = list(pd.to_numeric(data[0].iloc[start_idx:end_idx, columns]))\n else:\n result = []\n for c in columns:\n result.append(list(pd.to_numeric(data[0].iloc[start_idx:end_idx, c])))\n else:\n data[1].iloc[0, 0] = 0\n if isinstance(columns, int):\n result = list(pd.to_numeric(data[0].iloc[start_idx:, columns])) + \\\n list(pd.to_numeric(data[1].iloc[:end_idx, columns]) + (1 if columns == 0 else 0))\n else:\n result = []\n for c in columns:\n result.append(list(pd.to_numeric(data[0].iloc[start_idx:, c])) +\n list(pd.to_numeric(data[1].iloc[:end_idx, c])+(1 if c == 0 else 0)))\n\n return result", "def load_data(symbol):\n end = datetime.today()\n start = end - pd.Timedelta('5y')\n # get start & endtime for ohlc data\n start_time = convert_to_unix(start)\n end_time = convert_to_unix(end)\n # set resolution for query to 'Daily'\n resolution = 'D'\n \n # get OHLC data for defined symbol\n res = finnhub_client.stock_candles(symbol, resolution, start_time, end_time)\n\n data = pd.DataFrame(res)\n data = data.set_index(convert_to_timestamp(data.t))\n\n return data", "def get_fundamentals(self,\n start: dt.date = DateLimit.LOW_LIMIT.value,\n end: dt.date = dt.date.today(),\n period: DataMeasure = DataMeasure.ONE_YEAR.value,\n direction: DataMeasure = DataMeasure.FORWARD.value,\n metrics: List[DataMeasure] = DataMeasure.list_fundamentals()) -> pd.DataFrame:\n where = dict(assetId=self.id, period=period, periodDirection=direction, metric=metrics)\n query = DataQuery(where=where, start_date=start, end_date=end)\n response = GsDataApi.query_data(query=query, dataset_id=IndicesDatasets.BASKET_FUNDAMENTALS.value)\n return pd.DataFrame(response)", "def route_trips_with_stops_to_dataframe(self, gtfs_day='19700101'):\n df = None\n for route in self.routes():\n _df = route.route_trips_with_stops_to_dataframe(gtfs_day=gtfs_day)\n if df is None:\n df = _df\n else:\n df = df.append(_df)\n df['service'] = self.id\n df['service_name'] = self.name.replace(\"\\\\\", \"_\").replace(\"/\", \"_\")\n df = df.reset_index(drop=True)\n return df" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves a system of linear equations
def solve_linear_equations(*args: List[RealNumber]) -> List[RealNumber]: # Check to see if solution is underdetermined (num_eq < num_var) if len(args) < len(args[0]) - 1: # -1 because the RH side is not a variable raise UnderDeterminedError m = Matrix(list(args)) # Put Matrix in Reduced-Row Echelon Form m.rref() # Check matrix for num_solutions inf_sol = [0 for _ in range(m.num_columns)] no_sol = inf_sol[:-1] + [1] for row in m.array: if row == inf_sol: raise InfiniteSolutionsWaring elif row == no_sol: raise InconsistentWarning # Convert matrix to solution dict solution = [] for row in m.array: solution.append(row[-1]) # Return solution return solution
[ "def _solve_system(self):\n result, residual = optimize.nnls(self._lgs_A.toarray(), np.asarray(self._lgs_b))\n\n self._lgs_sol = result", "def solve(self):\r\n\r\n # A pre-allocation for the matrix used to solve the system\r\n matrix = []\r\n\r\n # Each unknown must be put into a list so sympy can solve it\r\n unknowns_list = list(self.dict_of_variables.keys())\r\n\r\n # Each equation (except for the 'Total') will be appended to the matrix. This is done to allow for the user\r\n # or the code (when this feature is added) to easily double check the variables for accuracy\r\n for key, equation in self.equations_dict.items():\r\n if key != 'Total':\r\n matrix.append(equation)\r\n\r\n # sympy does it's thing and returns a dict in the form of {symbol: solution}\r\n solutions = sp.solve(matrix, unknowns_list, dict=True)\r\n\r\n # This loop updates the dict_of_variables with the newly solved values for each\r\n for solutions_set in solutions:\r\n\r\n # This is done because the solutions are given in a list containing a dictionary: [{}], which is weird\r\n for count in range(len(solutions_set)):\r\n\r\n # The newly solved variables can be used to solve other ControlVolumes\r\n self.dict_of_variables[unknowns_list[count]] = solutions_set[unknowns_list[count]]", "def solve(self, solver):\r\n solver.solve()", "def test_exam_lsolve3S(self):\n result = 0\n b = symbol('b')\n c = symbol('c')\n x = symbol('x')\n y = symbol('y')\n z = symbol('z')\n\n # Create the linear system [y+z==b, -y+z==b] with an additional row\n eqns = [numeric(0) == numeric(0), b == z + y, -y+z == c]\n # Solve it for [x,y,z]\n solution = lsolve(eqns, [x,y,z])\n solx = solution[0].rhs()\n soly = solution[1].rhs()\n solz = solution[2].rhs()\n\n if(solx != x or soly != (b-c)/2 or solz != (b+c)/2):\n result = 1\n print \"solution of the system \", [str(item) for item in eqns], \" for [x,y,z]\"\n print \"erroneously returned \", [str(item) for item in solution]\n self.assertEqual(result,0)", "def solve_equations(self, period):\n ___SOLVE_EQUATIONS___", "def Resolve(splu,RHS):\n\t# array 2D -> array 1D\n\tf2 = RHS.ravel()\n\n\t# Solving the linear system\n\tx = lg.lsqr(splu.tocsc(),f2)\n\n\treturn x[0].reshape(RHS.shape)", "def solve_linear(self, prob, sett, RHS, x_init):\n # r1 is dense row1 of rhs vector from update_xz_tilde,\n # i.e. RHS = xz_tilde, the 3x1 vector [x_tilde; v_tilde]\n ## Solve linear system\n r1 = RHS[:prob.n_val]\n r2 = RHS[prob.n_val:]\n # Set up rhs\n rhs = r1 + sett.rho * tf.sparse_tensor_dense_matmul(prob.AT, r2)\n # Solve to obtain x first using conjugate gradient method solving Mx = b\n cg = ConjugateGradient(prob, sett, rhs, x_init, sett.cg_tol, sett.cg_max_iter)\n state = cg.solve()\n x = state.x\n cg_iter = state.k\n # Now find y = (A*x - r2) * rho\n y = tf.sparse_tensor_dense_matmul(prob.A, x) - r2\n y = y * sett.rho\n # Finally form the output [x; y] as the new xz_tilde\n RHS = tf.concat([x, y], 0)\n return RHS, cg_iter", "def test_exam_lsolve2b(self):\n result = 0\n x = symbol('x')\n y = symbol('y')\n eqns = [3*x+y==7, 2*x-5*y==8]\n solution = lsolve(eqns, [x,y])\n solx = solution[0].rhs()\n soly = solution[1].rhs()\n # It should have returned x==43/17 and y==-10/17\n if(solx != numeric(43,17) or soly != numeric(-10,17)):\n result = 1\n print \"solution of the system \", [str(item) for item in eqns], \" for [x,y] \"\n print \"erronously returned \", [str(item) for item in solution]\n self.assertEqual(result,0)", "def test_exam_lsolve1(self):\n \n x = symbol('x'); \n eq = (3*x+5 == numeric(8));\n return [str(item) for item in lsolve([eq], [x])];", "def test_exam_lsolve2a(self):\n\n a = symbol('a');\n b = symbol('b');\n x = symbol('x');\n y = symbol('y');\n eqns = [a*x + b*y == 3, x-y==b];\n solution = lsolve(eqns, [x,y]);\n solx = solution[0].rhs();\n soly = solution[1].rhs();\n realx = (3+pow(b,2))/(a+b);\n realy = (3-a*b)/(a+b);\n result = (solx-realx).normal().is_zero() and (soly-realy).normal().is_zero() \n self.assertEqual(result,1)", "def test_exam_lsolve2S(self):\n result = 0\n x = symbol('x')\n y = symbol('y')\n t = symbol('t')\n eqns = [0*x + 0*y == 0, 0*x + 1*y == t]\n solution = lsolve(eqns, [x,y])\n solx = solution[0].rhs()\n soly = solution[1].rhs()\n # It should have returned x==x, y==t\n if(solx != x or soly != t):\n result = 1\n print \"solution of the system \", [str(item) for item in eqns], \" for [x,y]\"\n print \"erroneously returned \", [str(item) for item in solution]\n self.assertEqual(result,0)", "def solve_graph(self):\n self.solver.solve()", "def solve_lin_sys(eqs, ring, _raw=True):\n as_expr = not _raw\n\n assert ring.domain.is_Field\n\n # transform from equations to matrix form\n matrix = eqs_to_matrix(eqs, ring)\n\n # solve by row-reduction\n echelon, pivots = matrix.rref(iszerofunc=lambda x: not x, simplify=lambda x: x)\n\n # construct the returnable form of the solutions\n keys = ring.symbols if as_expr else ring.gens\n\n if pivots[-1] == len(keys):\n return None\n\n if len(pivots) == len(keys):\n sol = []\n for s in echelon[:, -1]:\n a = ring.ground_new(s)\n if as_expr:\n a = a.as_expr()\n sol.append(a)\n sols = dict(zip(keys, sol))\n else:\n sols = {}\n g = ring.gens\n _g = [[-i] for i in g]\n for i, p in enumerate(pivots):\n vect = RawMatrix(_g[p + 1:] + [[ring.one]])\n v = (echelon[i, p + 1:]*vect)[0]\n if as_expr:\n v = v.as_expr()\n sols[keys[p]] = v\n\n return sols", "def _solve_v(Theta, U, V, L, lambda2, S, pi):\n pass", "def solve_linear(A, x, b, var):\n idx = x.index(Calculus(var)) # position of desired variable in vars\n _A = A * 1 # coppy of A\n _A[:, idx] = b # put the rhs vector into the idx-column of the lhs matrix\n numerator = _A.det().expand()\n denominator = A.det().expand()\n return numerator, denominator", "def solve_algorithm(self):\n self.algorithm.solve()", "def test_exam_lsolve2c(self):\n result = 0\n x = symbol('x')\n y = symbol('y')\n eqns = [I*x+y == 1, I*x-y == 2]\n solution = lsolve(eqns, [x,y])\n solx = solution[0].rhs()\n soly = solution[1].rhs()\n # It should have returned x == -3/2*I and y == -1/2\n if(solx != numeric(-3,2)*I or soly != numeric(-1,2)):\n result = 1\n print \"solution of the system \", [str(item) for item in eqns], \" for [x,y] \"\n print \"erroneously returned \", [str (item) for item in solution]\n self.assertEqual(result,0)", "def solve(equation):\r\n\r\n if not validator.is_valid(equation):\r\n raise Invalid(\"not valid\")\r\n #make haircut for the minuses\r\n equation = solver_helper.minuses_haircut(equation)\r\n #strip the expression from it's brackets if necessary\r\n # if an expression needs to be striped twice then it's\r\n # invalid equation\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n equation = solver_helper.strip_outer_brackets(equation)\r\n if solver_helper.needs_to_be_bracket_striped(equation):\r\n raise Exception(\"unnecessary brackets on an expression: (\" +\r\n str(equation) + \")\")\r\n #make a list\r\n lst = solver_helper.make_a_list(equation)\r\n #(on the list)\r\n\r\n # while there are expressions, solve them\r\n # (expression is an equation in between brackets)\r\n\r\n i = finder.find_expression(lst)\r\n while i != -1:\r\n res = solve(lst[i])\r\n lst[i] = res\r\n i = finder.find_expression(lst)\r\n\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n #while len(lst) > 1 or lst[0] is not an expression\r\n #find the strongest operator and operate\r\n lst = clear_from_operators(lst)\r\n if solver_helper.list_is_valid(lst):\r\n pass\r\n if len(lst) > 1:\r\n raise Exception(\"an operator is missing between two expressions\")\r\n return lst[0]", "def lu_solve(self, LU_data, LU_pivots): # real signature unknown; restored from __doc__\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapper doesn't effect allqubit quantum errors.
def test_remap_all_qubit_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_all_qubit_quantum_error(error1, ['u3'], False) model.add_all_qubit_quantum_error(error2, ['cx'], False) remapped_model = remap_noise_model(model, [[0, 1], [1, 0]], warnings=False) self.assertEqual(model, remapped_model)
[ "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_quantum_error(error1, ['u3'], [1], False)\n target.add_quantum_error(error2, ['cx'], [2, 0], False)\n self.assertEqual(remapped_model, target)", "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, model)", "def test_remap_nonlocal_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_nonlocal_quantum_error(error1, ['u3'], [0], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [1, 2], [3, 0], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_nonlocal_quantum_error(error1, ['u3'], [1], [2], False)\n target.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n self.assertEqual(remapped_model, target)", "def test_map_failures(self):\n # any failed build must throw a HDFMappingError\n #\n # make a default/clean 'N5700_PS' module\n self.mod.knobs.reset()\n\n # expected dataset does not exist\n # - rename 'Run time list' dataset\n self.mod.move(\"Run time list\", \"N5700 data\")\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.move(\"N5700 data\", \"Run time list\")\n\n # 'N5700 power supply command list' attribute does not exist\n #\n config_name = self.mod.config_names[0]\n cl = self.mod[config_name].attrs[\"N5700 power supply command list\"]\n self.mod[config_name].attrs[\"Wrong command list\"] = cl\n del self.mod[config_name].attrs[\"N5700 power supply command list\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod[config_name].attrs[\"N5700 power supply command list\"] = cl\n del self.mod[config_name].attrs[\"Wrong command list\"]\n\n # there are no configuration groups to map\n del self.f[\"Raw data + config/N5700_PS/config01\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.knobs.reset()", "def test_already_mapped(self):\n coupling = CouplingMap(\n [[1, 0], [1, 2], [2, 3], [3, 4], [3, 14], [5, 4], [6, 5],\n [6, 7], [6, 11], [7, 10], [8, 7], [9, 8], [9, 10],\n [11, 10], [12, 5], [12, 11], [12, 13], [13, 4], [13, 14],\n [15, 0], [15, 0], [15, 2], [15, 14]])\n qr = QuantumRegister(16, 'q')\n cr = ClassicalRegister(16, 'c')\n circ = QuantumCircuit(qr, cr)\n circ.cx(qr[3], qr[14])\n circ.cx(qr[5], qr[4])\n circ.h(qr[9])\n circ.cx(qr[9], qr[8])\n circ.x(qr[11])\n circ.cx(qr[3], qr[4])\n circ.cx(qr[12], qr[11])\n circ.cx(qr[13], qr[4])\n for j in range(16):\n circ.measure(qr[j], cr[j])\n\n dag = circuit_to_dag(circ)\n\n pass_ = StochasticSwap(coupling, None, 20, 13)\n after = pass_.run(dag)\n self.assertEqual(circuit_to_dag(circ), after)", "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def test_mapnode_crash(tmpdir):\n cwd = os.getcwd()\n node = pe.MapNode(\n niu.Function(\n input_names=[\"WRONG\"], output_names=[\"newstring\"], function=dummy_func\n ),\n iterfield=[\"WRONG\"],\n name=\"myfunc\",\n )\n node.inputs.WRONG = [f\"string{i}\" for i in range(3)]\n node.config = deepcopy(config._sections)\n node.config[\"execution\"][\"stop_on_first_crash\"] = True\n node.base_dir = tmpdir.strpath\n with pytest.raises(pe.nodes.NodeExecutionError):\n node.run()\n os.chdir(cwd)", "def test_invalid_qasmname_qr(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name='Qr')", "def test_mapnode_crash3(tmpdir):\n tmpdir.chdir()\n node = pe.MapNode(\n niu.Function(\n input_names=[\"WRONG\"], output_names=[\"newstring\"], function=dummy_func\n ),\n iterfield=[\"WRONG\"],\n name=\"myfunc\",\n )\n node.inputs.WRONG = [f\"string{i}\" for i in range(3)]\n wf = pe.Workflow(\"testmapnodecrash\")\n wf.add_nodes([node])\n wf.base_dir = tmpdir.strpath\n # changing crashdump dir to current working directory (to avoid problems with read-only systems)\n wf.config[\"execution\"][\"crashdump_dir\"] = os.getcwd()\n with pytest.raises(RuntimeError):\n wf.run(plugin=\"Linear\")", "def test_copy_residue_within_molecule_fail2(self):\n\n # Create a few residues.\n self.residue_fns.create(1, 'Ala')\n self.residue_fns.create(-1, 'His')\n\n # Copy a residue to a number which already exists.\n self.assertRaises(RelaxError, self.residue_fns.copy, res_from=':1', res_to=':-1,Gly')", "def test_mapQtoR(self):\n At = ds.mapQtoR(self.A)\n self.assertTrue(np.allclose(At, self.Ares))", "def test_optimize_single_reset_in_diff_qubits(self):\n qr = QuantumRegister(2, 'qr')\n circuit = QuantumCircuit(qr)\n circuit.reset(qr)\n dag = circuit_to_dag(circuit)\n\n expected = QuantumCircuit(qr)\n\n pass_ = RemoveResetInZeroState()\n after = pass_.run(dag)\n\n self.assertEqual(circuit_to_dag(expected), after)", "def test_copy_residue_within_molecule_fail1(self):\n\n # Create a few residues.\n self.residue_fns.create(1, 'Ala')\n self.residue_fns.create(-1, 'His')\n\n # Copy a non-existent residue (1 Met).\n self.assertRaises(RelaxError, self.residue_fns.copy, res_from=':Met', res_to=':2,Gly')", "def test_invalid_type_qr_name(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name=1)", "def test_reduce_remapped_noise_model(self):\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n roerror1 = [[0.9, 0.1], [0.5, 0.5]]\n roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n\n model = NoiseModel()\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_quantum_error(error1, ['u3'], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n model.add_all_qubit_readout_error(roerror1, False)\n model.add_readout_error(roerror2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [2, 0, 1], discard_qubits=True, warnings=False)\n target = NoiseModel()\n target.add_all_qubit_quantum_error(error1, ['u3'], False)\n target.add_quantum_error(error1, ['u3'], [2], False)\n target.add_all_qubit_readout_error(roerror1, False)\n target.add_readout_error(roerror2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def test_chromosome_not_mapped(self):\n\n coordinate = {\"chromosome\": \"12\", \"position\": 150, \"reference\": \"A\"}\n align_tuples = [\n (100,\"1\",100,300,\"2\"),\n (300,\"2\",200,20,\"7\") \n ]\n new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])\n self.assertEqual(new_mapping, None)", "def test_reprocess_ifc(self):\n pass", "def test(result, map1):\n if len(result.index) == len(map1.index):\n #print(f\"Internal test SUCCESSFUL, {map} mapped on {key}\")\n pass\n else:\n print(f\"Internal test FAILED. Attention! Total rows of the result does not match total rows of the map {map}. Check if {map} is a perfect subset of {key}.\")", "def test_remap_position_outside_expected_range(self):\n coordinate = {\"chromosome\": \"1\", \"position\": 35, \"reference\": \"A\"}\n align_tuples = [\n (100,\"1\",100,300,\"2\"),\n (300,\"2\",200,20,\"7\") \n ]\n new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])\n self.assertEqual(new_mapping, None)\n coordinate = {\"chromosome\": \"1\", \"position\": 201, \"reference\": \"A\"}\n align_tuples = [\n (100,\"1\",100,300,\"2\"),\n (300,\"2\",200,20,\"7\") \n ]\n new_mapping = grapper.remap_genome_coordinate(coordinate, align_tuples, [tup[0] for tup in align_tuples])\n self.assertEqual(new_mapping, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of quantum errors.
def test_remap_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_quantum_error(error1, ['u3'], [0], False) model.add_quantum_error(error2, ['cx'], [1, 2], False) remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False) target = NoiseModel() target.add_quantum_error(error1, ['u3'], [1], False) target.add_quantum_error(error2, ['cx'], [2, 0], False) self.assertEqual(remapped_model, target)
[ "def test_remap_all_qubit_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_all_qubit_quantum_error(error2, ['cx'], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 0]], warnings=False)\n self.assertEqual(model, remapped_model)", "def test_remap_nonlocal_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_nonlocal_quantum_error(error1, ['u3'], [0], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [1, 2], [3, 0], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_nonlocal_quantum_error(error1, ['u3'], [1], [2], False)\n target.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n self.assertEqual(remapped_model, target)", "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, model)", "def test_invalid_qasmname_qr(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name='Qr')", "def test_validate_invalid_transition_result_symbol(self):\n with nose.assert_raises(exceptions.InvalidSymbolError):\n self.ntm1.transitions['q0']['.'] = {('q3', '3', 'R')}\n self.ntm1.validate()", "def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])", "def test_61_spectral_index_probable_errors_filtering():\n\tcasalog.origin(\"test_61_spectral_index_probable_errors_filtering\")\n\tcasalog.post(\"starting\")\n\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.alpha.error', \n\t 'imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM1>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.alpha.error.filtered')", "def test_invalid_type_qr_name(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name=1)", "def test_invalid_rgb_magick(self):\n assert not poly.isInMap((40, 40), \"magick\") and not poly.isInMap((40, 40, 40, 40), \"magick\") and not poly.isInMap(('a', 40, 40), \"magick\") and not poly.isInMap((40, 40, 400), \"magick\") \n\n # Testing that the correct maps are supported", "def test_plot_error_map_over_100_qubit_backend_v2(self):\n backend = FakeWashingtonV2()\n img_ref = path_to_diagram_reference(\"washington_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def test_plot_error_map_backend_v1(self):\n backend = FakeKolkata()\n img_ref = path_to_diagram_reference(\"kolkata_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def test_bad_units(self):\r\n self.assertRaises(ValueError, convert_temperature, 0, 'C', 'R')\r\n self.assertRaises(ValueError, convert_temperature, 0, 'N', 'K')", "def test_map_failures(self):\n # any failed build must throw a HDFMappingError\n #\n # make a default/clean 'N5700_PS' module\n self.mod.knobs.reset()\n\n # expected dataset does not exist\n # - rename 'Run time list' dataset\n self.mod.move(\"Run time list\", \"N5700 data\")\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.move(\"N5700 data\", \"Run time list\")\n\n # 'N5700 power supply command list' attribute does not exist\n #\n config_name = self.mod.config_names[0]\n cl = self.mod[config_name].attrs[\"N5700 power supply command list\"]\n self.mod[config_name].attrs[\"Wrong command list\"] = cl\n del self.mod[config_name].attrs[\"N5700 power supply command list\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod[config_name].attrs[\"N5700 power supply command list\"] = cl\n del self.mod[config_name].attrs[\"Wrong command list\"]\n\n # there are no configuration groups to map\n del self.f[\"Raw data + config/N5700_PS/config01\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.knobs.reset()", "def test_validate_invalid_transition_symbol(self):\n with nose.assert_raises(exceptions.InvalidSymbolError):\n self.ntm1.transitions['q0']['3'] = {('q0', '0' 'R')}\n self.ntm1.validate()", "def test_add_ifc_errors(self):\n pass", "def test_error(self):\n simple = SillyResolverSimple()\n complex = SimpleResolverComplexifier(simple)\n receiver = ResultHolder(self)\n self.assertEqual(receiver._started, False)\n complex.resolveHostName(receiver, u\"example.com\")\n self.assertEqual(receiver._started, True)\n self.assertEqual(receiver._ended, False)\n self.assertEqual(receiver._addresses, [])\n simple._requests[0].errback(ZeroDivisionError(\"zow\"))\n self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)\n self.assertEqual(receiver._ended, True)\n self.assertEqual(receiver._addresses, [])", "def test_init_error_2(n, eta, omega, error, br, charge, vectors):\n with pytest.raises(ValueError, match=\"lattice vectors and the unit cell volume should not be\"):\n qml.resource.FirstQuantization(n, eta, omega, error, charge, br, vectors)", "def test_qual_escape(self):\n self.check_fails(\"Quality/error_qual_escape.fastq\", 4)\n self.check_general_passes(\"Quality/error_qual_escape.fastq\", 5)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of nonlocal quantum errors.
def test_remap_nonlocal_quantum_errors(self): model = NoiseModel() error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) model.add_nonlocal_quantum_error(error1, ['u3'], [0], [1], False) model.add_nonlocal_quantum_error(error2, ['cx'], [1, 2], [3, 0], False) remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False) target = NoiseModel() target.add_nonlocal_quantum_error(error1, ['u3'], [1], [2], False) target.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False) self.assertEqual(remapped_model, target)
[ "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_quantum_error(error1, ['u3'], [1], False)\n target.add_quantum_error(error2, ['cx'], [2, 0], False)\n self.assertEqual(remapped_model, target)", "def test_remap_all_qubit_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_all_qubit_quantum_error(error2, ['cx'], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 0]], warnings=False)\n self.assertEqual(model, remapped_model)", "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, model)", "def test_validate_invalid_transition_result_symbol(self):\n with nose.assert_raises(exceptions.InvalidSymbolError):\n self.ntm1.transitions['q0']['.'] = {('q3', '3', 'R')}\n self.ntm1.validate()", "def test_validate_invalid_transition_symbol(self):\n with nose.assert_raises(exceptions.InvalidSymbolError):\n self.ntm1.transitions['q0']['3'] = {('q0', '0' 'R')}\n self.ntm1.validate()", "def test_error(self):\n simple = SillyResolverSimple()\n complex = SimpleResolverComplexifier(simple)\n receiver = ResultHolder(self)\n self.assertEqual(receiver._started, False)\n complex.resolveHostName(receiver, u\"example.com\")\n self.assertEqual(receiver._started, True)\n self.assertEqual(receiver._ended, False)\n self.assertEqual(receiver._addresses, [])\n simple._requests[0].errback(ZeroDivisionError(\"zow\"))\n self.assertEqual(len(self.flushLoggedErrors(ZeroDivisionError)), 1)\n self.assertEqual(receiver._ended, True)\n self.assertEqual(receiver._addresses, [])", "def test_map_failures(self):\n # any failed build must throw a HDFMappingError\n #\n # make a default/clean 'N5700_PS' module\n self.mod.knobs.reset()\n\n # expected dataset does not exist\n # - rename 'Run time list' dataset\n self.mod.move(\"Run time list\", \"N5700 data\")\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.move(\"N5700 data\", \"Run time list\")\n\n # 'N5700 power supply command list' attribute does not exist\n #\n config_name = self.mod.config_names[0]\n cl = self.mod[config_name].attrs[\"N5700 power supply command list\"]\n self.mod[config_name].attrs[\"Wrong command list\"] = cl\n del self.mod[config_name].attrs[\"N5700 power supply command list\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod[config_name].attrs[\"N5700 power supply command list\"] = cl\n del self.mod[config_name].attrs[\"Wrong command list\"]\n\n # there are no configuration groups to map\n del self.f[\"Raw data + config/N5700_PS/config01\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.knobs.reset()", "def test_invalid_qasmname_qr(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name='Qr')", "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def test_invalid_type_qr_name(self):\n self.assertRaises(QiskitError, QuantumRegister, size=3, name=1)", "def test_validate_invalid_transition_result_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.transitions['q0']['.'] = {('q4', '.', 'R')}\n self.ntm1.validate()", "def test_bad_units(self):\r\n self.assertRaises(ValueError, convert_temperature, 0, 'C', 'R')\r\n self.assertRaises(ValueError, convert_temperature, 0, 'N', 'K')", "def test_same_wires(self):\n\n with pytest.raises(qml.QuantumFunctionError, match=\"The target wires and estimation wires\"):\n QuantumPhaseEstimation(np.eye(2), target_wires=[0, 1], estimation_wires=[1, 2])", "def test_invalid_rgb_magick(self):\n assert not poly.isInMap((40, 40), \"magick\") and not poly.isInMap((40, 40, 40, 40), \"magick\") and not poly.isInMap(('a', 40, 40), \"magick\") and not poly.isInMap((40, 40, 400), \"magick\") \n\n # Testing that the correct maps are supported", "def test_plot_error_map_over_100_qubit_backend_v2(self):\n backend = FakeWashingtonV2()\n img_ref = path_to_diagram_reference(\"washington_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def testBadInputColocation(self):\n with self.assertRaisesRegexp(ValueError, \"(?s)input.*colocate.*loc:@u\"):\n _ = hub.create_module_spec(bad_input_colocation_module_fn)", "def test_unmatched_cube_error(self):\n self.neighbour_cube.attributes['model_grid_hash'] = '123'\n plugin = SpotExtraction()\n msg = (\"Cubes do not share or originate from the same grid, so cannot \"\n \"be used together.\")\n with self.assertRaisesRegex(ValueError, msg):\n plugin.process(self.neighbour_cube, self.diagnostic_cube_xy)", "def test_add_ifc_errors(self):\n pass", "def _is_name_resolution_error(error_message):\n return 'Temporary failure in name resolution' in error_message" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of allqubit readout errors.
def test_remap_all_qubit_readout_errors(self): model = NoiseModel() error1 = [[0.9, 0.1], [0.5, 0.5]] model.add_all_qubit_readout_error(error1, False) remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False) self.assertEqual(remapped_model, model)
[ "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def test_remap_all_qubit_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_all_qubit_quantum_error(error2, ['cx'], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 0]], warnings=False)\n self.assertEqual(model, remapped_model)", "def test_map_failures(self):\n # any failed build must throw a HDFMappingError\n #\n # make a default/clean 'N5700_PS' module\n self.mod.knobs.reset()\n\n # expected dataset does not exist\n # - rename 'Run time list' dataset\n self.mod.move(\"Run time list\", \"N5700 data\")\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.move(\"N5700 data\", \"Run time list\")\n\n # 'N5700 power supply command list' attribute does not exist\n #\n config_name = self.mod.config_names[0]\n cl = self.mod[config_name].attrs[\"N5700 power supply command list\"]\n self.mod[config_name].attrs[\"Wrong command list\"] = cl\n del self.mod[config_name].attrs[\"N5700 power supply command list\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod[config_name].attrs[\"N5700 power supply command list\"] = cl\n del self.mod[config_name].attrs[\"Wrong command list\"]\n\n # there are no configuration groups to map\n del self.f[\"Raw data + config/N5700_PS/config01\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.knobs.reset()", "def test_bus_error(self):\n rbcp = Rbcp(\"127.0.0.1\")\n self.assertRaises(RbcpBusError, rbcp.read, 0xfe, 4)", "def test_rirs_read(self):\n pass", "def test_plot_error_map_over_100_qubit_backend_v2(self):\n backend = FakeWashingtonV2()\n img_ref = path_to_diagram_reference(\"washington_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def test_fail_demux_n(self):\n # Here I need a dummy bcl2fastq that will print a version but fail otherwise\n self.bm.add_mock( 'bcl2fastq',\n side_effect = 'if [ $1 = --version ] ; then echo 123 ; exit 0 ; else exit 1 ; fi' )\n\n res = self.bm_rundemux( [ 'dummy_seqdata',\n self.tmp_dir.sandbox,\n self.get_example('N'),\n '1' ],\n expected_retval = 1 )\n\n # No .opts file written on failure. No mismatch1.log because the failure is not a\n # collision failure.\n self.assertEqual( self.tmp_dir.lsdir('.'), ['bcl2fastq.log', 'bcl2fastq.version'] )\n\n self.assertEqual(self.bm.last_calls, dict( bcl2fastq = [\n ['--version'],\n ['-R', 'dummy_seqdata', '-o', self.tmp_dir.sandbox,\n '--fastq-compression-level', '6',\n '--use-bases-mask', '1:Y100n,I8,I8,Y100n',\n '--tiles=s_[1]',\n '--sample-sheet', self.get_example('N'),\n '-p', '10',\n '--barcode-mismatches', '1' ] ]))\n\n with open(os.path.join(self.tmp_dir.sandbox, 'bcl2fastq.version')) as fh:\n self.assertEqual(list(fh), ['123\\n'])", "def test_add_ifc_errors(self):\n pass", "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_quantum_error(error1, ['u3'], [1], False)\n target.add_quantum_error(error2, ['cx'], [2, 0], False)\n self.assertEqual(remapped_model, target)", "def test_failure_iofailure():\n assert IOFailure(5).failure() == IO(5)", "def test_read(self):\n # check against a correct input file\n renamer = bioformats.seqname.BaseSeqRenamer()\n renamer.read_renaming_dict(self.__correct)\n # check against an incorrect input file\n with self.assertRaises(IncorrectDictError):\n renamer.read_renaming_dict(self.__incorrect)", "def test_bad_bcl2fastq(self):\n self.bm.add_mock( 'bcl2fastq', fail=True )\n\n res = self.bm_rundemux( [ 'dummy_seqdata',\n self.tmp_dir.sandbox,\n self.get_example('1'),\n '1' ],\n expected_retval = 1 )\n\n self.assertEqual(self.bm.last_calls, dict( bcl2fastq = [ ['--version'] ] ))", "def test_func_memmap_fail():\n with TempFileName('memmap_fail') as fname:\n with pytest.raises(ValueError):\n memmap(fname, shape=(16, 16), dtype='float32', byteorder='>')", "def test_bigquery_export_to_partition_mapping_error(self):\n downloader = self.downloader\n err_msg = \"GCP Error\"\n expected_error_msg = \"could not query table for partition date information\"\n with patch(\"masu.external.downloader.gcp.gcp_report_downloader.bigquery\") as bigquery:\n bigquery.Client.side_effect = GoogleCloudError(err_msg)\n with self.assertRaisesRegex(GCPReportDownloaderError, expected_error_msg):\n downloader.bigquery_export_to_partition_mapping()", "def _mem_exception_check():\n if_imem_misalign.next = if_misalign\n if_imem_fault.next = imem_m.err_i\n mem_ld_misalign.next = (io.dmem_pipeline.valid and\n io.dmem_pipeline.fcn == Consts.M_RD and\n mem_misalign)\n mem_ld_fault.next = dmem_m.err_i\n mem_st_misalign.next = (io.dmem_pipeline.valid and\n io.dmem_pipeline.fcn == Consts.M_WR and\n mem_misalign)\n mem_st_fault.next = dmem_m.err_i", "def assertReadData(self, addr):\n\t\tu, b = self.uut[addr], self.basis[addr]\n\t\tif u != b:\n\t\t\tself.fail(\"0x{0:08X} != 0x{1:08X} @ address {2}\".format(u, b, addr))", "def test_translate_header_fails(self):\n with io.StringIO() as out:\n with io.StringIO() as err:\n okay, failed = process_files(\n [TESTDATA], r\"^.*yaml$\", 0, False, outstream=out, errstream=err, output_mode=\"none\"\n )\n\n lines = self._readlines(out)\n self.assertEqual(len(lines), len(failed))\n self.assertTrue(lines[0].startswith(\"Failure processing\"), f\"Line: '{lines[0]}'\")\n self.assertIn(\"not a mapping\", lines[0], f\"Line: '{lines[0]}'\")\n\n lines = self._readlines(err)\n self.assertEqual(len(lines), 13)\n self.assertTrue(lines[0].startswith(\"Analyzing\"), f\"Line: '{lines[0]}'\")\n\n self.assertEqual(len(okay), 10)\n self.assertEqual(len(failed), 3)", "def test_reading_ucsc_mm9_chr10_bad(self):\n path = \"MAF/ucsc_mm9_chr10_bad.maf\"\n alignments = Align.parse(path, \"maf\")\n self.assertEqual(alignments.metadata[\"MAF Version\"], \"1\")\n self.assertEqual(alignments.metadata[\"Scoring\"], \"autoMZ.v1\")\n next(alignments)\n next(alignments)\n next(alignments)\n next(alignments)\n next(alignments)\n next(alignments)\n with self.assertRaises(ValueError) as cm:\n next(alignments)\n self.assertEqual(\n str(cm.exception), \"sequence size is incorrect (found 219, expected 319)\"\n )", "def test_read_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionException):\n self.ntm1.read_input('02')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test remapping of readout errors.
def test_remap_readout_errors(self): model = NoiseModel() error1 = [[0.9, 0.1], [0.5, 0.5]] error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model.add_readout_error(error1, [1], False) model.add_readout_error(error2, [0, 2], False) remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False) target = NoiseModel() target.add_readout_error(error1, [2], False) target.add_readout_error(error2, [1, 0], False) self.assertEqual(remapped_model, target)
[ "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, model)", "def test_error_redirect(self):\n filep = six.moves.StringIO('w')\n with elcaminoreal.errors_to(filep):\n some_plugins.COMMANDS.run(['no-such-command'])\n error_message = filep.getvalue().splitlines()\n self.assertEquals(error_message.pop(0), 'Usage:')", "def test_read(self):\n # check against a correct input file\n renamer = bioformats.seqname.BaseSeqRenamer()\n renamer.read_renaming_dict(self.__correct)\n # check against an incorrect input file\n with self.assertRaises(IncorrectDictError):\n renamer.read_renaming_dict(self.__incorrect)", "def test_map_failures(self):\n # any failed build must throw a HDFMappingError\n #\n # make a default/clean 'N5700_PS' module\n self.mod.knobs.reset()\n\n # expected dataset does not exist\n # - rename 'Run time list' dataset\n self.mod.move(\"Run time list\", \"N5700 data\")\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.move(\"N5700 data\", \"Run time list\")\n\n # 'N5700 power supply command list' attribute does not exist\n #\n config_name = self.mod.config_names[0]\n cl = self.mod[config_name].attrs[\"N5700 power supply command list\"]\n self.mod[config_name].attrs[\"Wrong command list\"] = cl\n del self.mod[config_name].attrs[\"N5700 power supply command list\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod[config_name].attrs[\"N5700 power supply command list\"] = cl\n del self.mod[config_name].attrs[\"Wrong command list\"]\n\n # there are no configuration groups to map\n del self.f[\"Raw data + config/N5700_PS/config01\"]\n with self.assertRaises(HDFMappingError):\n _map = self.map\n self.mod.knobs.reset()", "def test_translate_header_fails(self):\n with io.StringIO() as out:\n with io.StringIO() as err:\n okay, failed = process_files(\n [TESTDATA], r\"^.*yaml$\", 0, False, outstream=out, errstream=err, output_mode=\"none\"\n )\n\n lines = self._readlines(out)\n self.assertEqual(len(lines), len(failed))\n self.assertTrue(lines[0].startswith(\"Failure processing\"), f\"Line: '{lines[0]}'\")\n self.assertIn(\"not a mapping\", lines[0], f\"Line: '{lines[0]}'\")\n\n lines = self._readlines(err)\n self.assertEqual(len(lines), 13)\n self.assertTrue(lines[0].startswith(\"Analyzing\"), f\"Line: '{lines[0]}'\")\n\n self.assertEqual(len(okay), 10)\n self.assertEqual(len(failed), 3)", "def _read_error(err, name): # pragma: no cover\n\n err.info((\"testcases_content\",\n \"_read_error\",\n \"read_error\"),\n \"File could not be read: %s\" % name,\n \"\"\"A File in the archive could not be read. This may be\n due to corruption or because the path name is too\n long.\"\"\",\n name)", "def test_failure_iofailure():\n assert IOFailure(5).failure() == IO(5)", "def test_func_memmap_fail():\n with TempFileName('memmap_fail') as fname:\n with pytest.raises(ValueError):\n memmap(fname, shape=(16, 16), dtype='float32', byteorder='>')", "def trap_err(self):\n if sys.stderr is self.err:\n raise OutputTrapError('You are already trapping stderr.')\n if not self.debug:\n self._err_save = sys.stderr\n sys.stderr = self.err", "def write_err(*args, **unused_kwargs):\n mock_err.write(args[0])", "def test_translate_header_traceback(self):\n with io.StringIO() as out:\n with io.StringIO() as err:\n okay, failed = process_files(\n [TESTDATA], r\"^.*yaml$\", 0, True, outstream=out, errstream=err, output_mode=\"none\"\n )\n\n lines = self._readlines(out)\n self.assertGreaterEqual(len(lines), 22, \"\\n\".join(lines))\n self.assertTrue(lines[0].startswith(\"Traceback\"), f\"Line '{lines[0]}'\")\n\n lines = self._readlines(err)\n self.assertGreaterEqual(len(lines), 13, \"\\n\".join(lines))\n self.assertTrue(lines[0].startswith(\"Analyzing\"), f\"Line: '{lines[0]}'\")\n\n self.assertEqual(len(okay), 10)\n self.assertEqual(len(failed), 3)", "def _mem_exception_check():\n if_imem_misalign.next = if_misalign\n if_imem_fault.next = imem_m.err_i\n mem_ld_misalign.next = (io.dmem_pipeline.valid and\n io.dmem_pipeline.fcn == Consts.M_RD and\n mem_misalign)\n mem_ld_fault.next = dmem_m.err_i\n mem_st_misalign.next = (io.dmem_pipeline.valid and\n io.dmem_pipeline.fcn == Consts.M_WR and\n mem_misalign)\n mem_st_fault.next = dmem_m.err_i", "def test_read_missing(self):\n self.remove_file(self.FILENAME)\n try:\n r = fileio.readline(self.FILENAME, 1)\n self.fail(\"Did not get expected exception\")\n except fileio.FileIOException:\n pass # expected", "def test_stderrFileMatchDifferent(self):\n proc = self.process([self.helloworld, \"--lower\", \"--stderr\"])\n self.assert_stderr_matches_file(proc, \"tests/helloworld.out\")", "def test_invalid_test_mapping_wrong_preferred_targets_value(self):\n with open(self.test_mapping_file, 'w') as f:\n f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_NONE_LIST)\n with open(self.test_mapping_file, 'r') as f:\n self.assertRaises(\n android_test_mapping_format.InvalidTestMappingError,\n android_test_mapping_format.process_file,\n f.read())\n with open(self.test_mapping_file, 'w') as f:\n f.write(BAD_TEST_WRONG_PREFERRED_TARGETS_VALUE_WRONG_TYPE)\n with open(self.test_mapping_file, 'r') as f:\n self.assertRaises(\n android_test_mapping_format.InvalidTestMappingError,\n android_test_mapping_format.process_file,\n f.read())", "def test_invalid_test_mapping_file_patterns_value(self):\n with open(self.test_mapping_file, 'w') as f:\n f.write(BAD_FILE_PATTERNS)\n with open(self.test_mapping_file, 'r') as f:\n self.assertRaises(\n android_test_mapping_format.InvalidTestMappingError,\n android_test_mapping_format.process_file,\n f.read())", "def test_invalid_operations_for_output(self):\n\n self.assertRaises(InvalidCmd,\n self.do, 'output')", "def test_read_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionException):\n self.ntm1.read_input('02')", "def test_other_01(self):\n self.file_in.write(b'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08')\n self.file_in.flush()\n self.file_cmd.write(b'NOP 0x3\\nNOP 0x5')\n self.file_cmd.flush()\n faults_inject.main([\"-i\", self.file_in.name, \"-o\", self.file_out.name, \"-a\", \"x86\", \"-f\", self.file_cmd.name])\n self.assertEqual(b'\\x01\\x02\\x03\\x90\\x05\\x90\\x07\\x08', self.file_out.read())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reduction mapping of noise model.
def test_reduce_noise_model(self): error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) roerror1 = [[0.9, 0.1], [0.5, 0.5]] roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model = NoiseModel() model.add_all_qubit_quantum_error(error1, ['u3'], False) model.add_quantum_error(error1, ['u3'], [1], False) model.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False) model.add_all_qubit_readout_error(roerror1, False) model.add_readout_error(roerror2, [0, 2], False) remapped_model = remap_noise_model(model, [0, 1, 2], discard_qubits=True, warnings=False) target = NoiseModel() target.add_all_qubit_quantum_error(error1, ['u3'], False) target.add_quantum_error(error1, ['u3'], [1], False) target.add_all_qubit_readout_error(roerror1, False) target.add_readout_error(roerror2, [0, 2], False) self.assertEqual(remapped_model, target)
[ "def test_reduce_remapped_noise_model(self):\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n roerror1 = [[0.9, 0.1], [0.5, 0.5]]\n roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n\n model = NoiseModel()\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_quantum_error(error1, ['u3'], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n model.add_all_qubit_readout_error(roerror1, False)\n model.add_readout_error(roerror2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [2, 0, 1], discard_qubits=True, warnings=False)\n target = NoiseModel()\n target.add_all_qubit_quantum_error(error1, ['u3'], False)\n target.add_quantum_error(error1, ['u3'], [2], False)\n target.add_all_qubit_readout_error(roerror1, False)\n target.add_readout_error(roerror2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def noise_floor_test(ensemble):\n\n return ADCP_FLAGS['no_test']", "def test_noise_no_trend(self):\n self.assertFalse(self.data_item.is_noise(20))\n self.assertFalse(self.data_item.is_noise(20.1))\n self.assertFalse(self.data_item.is_noise(10))", "def random_noise():\n tree = bpy.context.scene.node_tree\n for node in tree.nodes:\n if node.type == \"BRIGHTCONTRAST\":\n node.inputs['Bright'].default_value = (random.random() - .5) * 10\n node.inputs['Contrast'].default_value = (random.random() - .5) * 10\n elif node.type == \"LENSDIST\":\n node.inputs['Dispersion'].default_value = random.random()/10.0", "def test_multiscale_zero(self):\n self.assertEqual(0, metrics.multiscale_spectral_loss(self.x, self.x))", "def _make_noise_test_image():\n global _TEST_IMAGE\n if _TEST_IMAGE is None:\n rng = np.random.RandomState(12345)\n noise = rng.normal(size=(256, 256, 3))\n noise = gaussian_filter(noise, (4, 4, 0))\n noise -= noise.min(axis=(0,1), keepdims=True)\n noise /= noise.max(axis=(0,1), keepdims=True)\n noise = (noise * 255).astype(np.uint8)\n _TEST_IMAGE = noise\n return _TEST_IMAGE", "def test_topic_noise():\n noise_param_file = path.join(path.dirname(__file__),\n 'param_files',\n 'topic_noise_test.param')\n topic_noise_task.main(noise_param_file)\n articles = pickle.load(open(path.join(out_dir, 'articles.pickle')))\n for key in articles.keys():\n assert_true(path.exists(path.join(out_dir, key + '.png')))", "def pred_noise(self, signal, timestep, mel):\n return self.wavenet(signal, timestep, mel)", "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def init_noise_model(self):\n\n self.noise = galsim.PoissonNoise(self.rng)\n self.logger.info('Poisson noise model created.')\n \n return", "def _is_noise(self, _):\n return False", "def noise_reduction(dirname, raw_fname, denoised_fname, nr_cfg, state_space_fname):\n refnotch = nr_cfg['refnotch']\n reflp = nr_cfg['reflp']\n refhp = nr_cfg['refhp']\n noiseref_hp = nr_cfg['noiseref_hp']\n\n from jumeg.jumeg_noise_reducer import noise_reducer, plot_denoising\n\n subj = op.basename(raw_fname).split('_')[0]\n ss_dict_fname = op.join(op.dirname(raw_fname), subj + state_space_fname)\n\n # read the raw file\n raw = mne.io.Raw(op.join(dirname, raw_fname), preload=True)\n\n # apply noise reducer thrice to reference channels with different freq parameters\n # the nr-raw.fif are rewritten\n # low pass filter for freq below 5 hz\n raw_nr = noise_reducer(raw_fname, raw=raw, reflp=reflp, return_raw=True)\n\n raw.close()\n\n raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=refhp, noiseref=noiseref_hp, return_raw=True)\n\n # notch filter to remove power line noise\n raw_nr = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch,\n fnout=op.join(dirname, denoised_fname),\n return_raw=True)\n\n raw_nr.close()\n\n # plot final plotting\n plot_name = denoised_fname.rsplit('-raw.fif')[0] + '-plot'\n plot_denoising([op.join(dirname, raw_fname), op.join(dirname, denoised_fname)],\n n_jobs=1, fnout=op.join(dirname, plot_name), show=False)\n\n # save config file\n nr_dict = nr_cfg.copy()\n nr_dict['input_file'] = op.join(dirname, raw_fname)\n nr_dict['process'] = 'noise_reducer'\n nr_dict['output_file'] = op.join(dirname, denoised_fname)\n\n save_state_space_file(ss_dict_fname, process_config_dict=nr_dict)", "def test_61_spectral_index_probable_errors_filtering():\n\tcasalog.origin(\"test_61_spectral_index_probable_errors_filtering\")\n\tcasalog.post(\"starting\")\n\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.alpha.error', \n\t 'imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM1>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.alpha.error.filtered')", "def global_noise(self, seed=42, **kwargs):\n # Random perturbations, initialized globally for same results in parallel\n gshape = self.domain.dist.grid_layout.global_shape(scales=self.domain.dealias)\n slices = self.domain.dist.grid_layout.slices(scales=self.domain.dealias)\n rand = np.random.RandomState(seed=seed)\n noise = rand.standard_normal(gshape)[slices]\n\n # filter in k-space\n noise_field = self._new_field()\n noise_field.set_scales(self.domain.dealias, keep_data=False)\n noise_field['g'] = noise\n self.filter_field(noise_field, **kwargs)\n\n return noise_field", "def dummy_noise_model(self):\n noise_model = NoiseModel()\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model.add_all_qubit_quantum_error(error, 'x')\n return noise_model", "def add_noise(self):\n for i in range(self.num_neurons):\n spike_train = deepcopy(self.spike_trains[i, :])\n\n # Get indices without spikes.\n indices = [j for j, dt in enumerate(spike_train) if dt == 0]\n\n # Add spikes to indices randomly with given probability.\n p = self.noise * self.dt\n for index in indices:\n if np.random.uniform(0, 1) < p:\n spike_train[index] = 1\n\n self.spike_trains[i, :] = spike_train", "def ComputeNoiseForSingleData():\n\n # Generate noisy data\n NumPointsAlongAxis = 50\n NoiseMagnitude = 0.2\n GridOfPoints = True\n x, y, z = Data.GenerateData(NumPointsAlongAxis, NoiseMagnitude, GridOfPoints)\n\n # Generate Linear Model\n DecorrelationScale = 0.1\n UseSparse = False\n nu = 0.5\n K = Data.GenerateCorrelationMatrix(x, y, z, DecorrelationScale, nu, UseSparse)\n\n # BasisFunctionsType = 'Polynomial-0'\n # BasisFunctionsType = 'Polynomial-1'\n BasisFunctionsType = 'Polynomial-2'\n # BasisFunctionsType = 'Polynomial-3'\n # BasisFunctionsType = 'Polynomial-4'\n # BasisFunctionsType = 'Polynomial-5'\n # BasisFunctionsType = 'Polynomial-2-Trigonometric-1'\n X = Data.GenerateLinearModelBasisFunctions(x, y, BasisFunctionsType)\n\n # Trace estimation weights\n UseEigenvaluesMethod = False # If set to True, it overrides the interpolation estimation methods\n # TraceEstimationMethod = 'NonOrthogonalFunctionsMethod' # highest condtion number\n # TraceEstimationMethod = 'OrthogonalFunctionsMethod' # still high condition number\n TraceEstimationMethod = 'OrthogonalFunctionsMethod2' # best (lowest) condition number\n # TraceEstimationMethod = 'RBFMethod'\n\n # Precompute trace interpolation function\n TraceEstimationUtilities = TraceEstimation.ComputeTraceEstimationUtilities(K, UseEigenvaluesMethod, TraceEstimationMethod, None, [1e-4, 4e-4, 1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3])\n\n # Finding optimal parameters with maximum likelihood using parameters (sigma, sigma0)\n # Results = LikelihoodEstimation.MaximizeLogLikelihoodWithSigmaSigma0(z, X, K, TraceEstimationUtilities)\n # print(Results)\n\n # Finding optimal parameters with maximum likelihood using parameters (sigma, eta)\n # Results = LikelihoodEstimation.MaximizeLogLikelihoodWithSigmaEta(z, X, K, TraceEstimationUtilities)\n # print(Results)\n\n # Finding optimal parameters with derivative of likelihood\n Interval_eta = [1e-4, 1e+3] # Note: make sure the interval is exactly the end points of eta_i, not less or more.\n Results = LikelihoodEstimation.FindZeroOfLogLikelihoodFirstDerivative(z, X, K, TraceEstimationUtilities, Interval_eta)\n print(Results)\n\n # Plot likelihood and its derivative\n # LikelihoodEstimation.PlotLogLikelihood(z, X, K, TraceEstimationUtilities)\n LikelihoodEstimation.PlotLogLikelihoodFirstDerivative(z, X, K, TraceEstimationUtilities, Results['eta'])", "def setup_test():\n warnings.simplefilter('default')\n\n from scipy import signal, ndimage, special, optimize, linalg\n from scipy.io import loadmat\n from skimage import viewer, filter\n\n np.random.seed(0)\n\n warnings.simplefilter('error')", "def noiseimg(objdata, flatdata, texp, gain=5.0, rdnoise=10.8, darkcur=0.67 ): \n # read out noise should be squared\n ronoise=np.square(rdnoise/gain) # readout noise variance in ADU \n dcnoise=darkcur*texp/gain # dark current noise in ADU\n \n # multiply by gain to get image in electrons\n var=np.abs( objdata - np.sqrt(2.)*np.sqrt(ronoise)) + ronoise + dcnoise # in electron^2\n \n # divide by normalized flat squared\n var/=np.square(flatdata)\n \n # RETURN VARIANCE IMAGE\n return var" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test reduction and remapping of noise model.
def test_reduce_remapped_noise_model(self): error1 = depolarizing_error(0.5, 1) error2 = depolarizing_error(0.5, 2) roerror1 = [[0.9, 0.1], [0.5, 0.5]] roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]] model = NoiseModel() model.add_all_qubit_quantum_error(error1, ['u3'], False) model.add_quantum_error(error1, ['u3'], [1], False) model.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False) model.add_all_qubit_readout_error(roerror1, False) model.add_readout_error(roerror2, [0, 2], False) remapped_model = remap_noise_model(model, [2, 0, 1], discard_qubits=True, warnings=False) target = NoiseModel() target.add_all_qubit_quantum_error(error1, ['u3'], False) target.add_quantum_error(error1, ['u3'], [2], False) target.add_all_qubit_readout_error(roerror1, False) target.add_readout_error(roerror2, [1, 0], False) self.assertEqual(remapped_model, target)
[ "def test_reduce_noise_model(self):\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n roerror1 = [[0.9, 0.1], [0.5, 0.5]]\n roerror2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n\n model = NoiseModel()\n model.add_all_qubit_quantum_error(error1, ['u3'], False)\n model.add_quantum_error(error1, ['u3'], [1], False)\n model.add_nonlocal_quantum_error(error2, ['cx'], [2, 0], [3, 1], False)\n model.add_all_qubit_readout_error(roerror1, False)\n model.add_readout_error(roerror2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [0, 1, 2], discard_qubits=True, warnings=False)\n target = NoiseModel()\n target.add_all_qubit_quantum_error(error1, ['u3'], False)\n target.add_quantum_error(error1, ['u3'], [1], False)\n target.add_all_qubit_readout_error(roerror1, False)\n target.add_readout_error(roerror2, [0, 2], False)\n self.assertEqual(remapped_model, target)", "def test_remap_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n error2 = [[0.8, 0.2, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0.1, 0.9]]\n model.add_readout_error(error1, [1], False)\n model.add_readout_error(error2, [0, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_readout_error(error1, [2], False)\n target.add_readout_error(error2, [1, 0], False)\n self.assertEqual(remapped_model, target)", "def init_noise_model(self):\n\n self.noise = galsim.PoissonNoise(self.rng)\n self.logger.info('Poisson noise model created.')\n \n return", "def _make_noise_test_image():\n global _TEST_IMAGE\n if _TEST_IMAGE is None:\n rng = np.random.RandomState(12345)\n noise = rng.normal(size=(256, 256, 3))\n noise = gaussian_filter(noise, (4, 4, 0))\n noise -= noise.min(axis=(0,1), keepdims=True)\n noise /= noise.max(axis=(0,1), keepdims=True)\n noise = (noise * 255).astype(np.uint8)\n _TEST_IMAGE = noise\n return _TEST_IMAGE", "def noise_reduction(dirname, raw_fname, denoised_fname, nr_cfg, state_space_fname):\n refnotch = nr_cfg['refnotch']\n reflp = nr_cfg['reflp']\n refhp = nr_cfg['refhp']\n noiseref_hp = nr_cfg['noiseref_hp']\n\n from jumeg.jumeg_noise_reducer import noise_reducer, plot_denoising\n\n subj = op.basename(raw_fname).split('_')[0]\n ss_dict_fname = op.join(op.dirname(raw_fname), subj + state_space_fname)\n\n # read the raw file\n raw = mne.io.Raw(op.join(dirname, raw_fname), preload=True)\n\n # apply noise reducer thrice to reference channels with different freq parameters\n # the nr-raw.fif are rewritten\n # low pass filter for freq below 5 hz\n raw_nr = noise_reducer(raw_fname, raw=raw, reflp=reflp, return_raw=True)\n\n raw.close()\n\n raw_nr = noise_reducer(raw_fname, raw=raw_nr, refhp=refhp, noiseref=noiseref_hp, return_raw=True)\n\n # notch filter to remove power line noise\n raw_nr = noise_reducer(raw_fname, raw=raw_nr, refnotch=refnotch,\n fnout=op.join(dirname, denoised_fname),\n return_raw=True)\n\n raw_nr.close()\n\n # plot final plotting\n plot_name = denoised_fname.rsplit('-raw.fif')[0] + '-plot'\n plot_denoising([op.join(dirname, raw_fname), op.join(dirname, denoised_fname)],\n n_jobs=1, fnout=op.join(dirname, plot_name), show=False)\n\n # save config file\n nr_dict = nr_cfg.copy()\n nr_dict['input_file'] = op.join(dirname, raw_fname)\n nr_dict['process'] = 'noise_reducer'\n nr_dict['output_file'] = op.join(dirname, denoised_fname)\n\n save_state_space_file(ss_dict_fname, process_config_dict=nr_dict)", "def dummy_noise_model(self):\n noise_model = NoiseModel()\n error = pauli_error([('X', 0.25), ('I', 0.75)])\n noise_model.add_all_qubit_quantum_error(error, 'x')\n return noise_model", "def test_remap_quantum_errors(self):\n model = NoiseModel()\n error1 = depolarizing_error(0.5, 1)\n error2 = depolarizing_error(0.5, 2)\n model.add_quantum_error(error1, ['u3'], [0], False)\n model.add_quantum_error(error2, ['cx'], [1, 2], False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n target = NoiseModel()\n target.add_quantum_error(error1, ['u3'], [1], False)\n target.add_quantum_error(error2, ['cx'], [2, 0], False)\n self.assertEqual(remapped_model, target)", "def test_remap_all_qubit_readout_errors(self):\n model = NoiseModel()\n error1 = [[0.9, 0.1], [0.5, 0.5]]\n model.add_all_qubit_readout_error(error1, False)\n\n remapped_model = remap_noise_model(model, [[0, 1], [1, 2], [2, 0]], warnings=False)\n self.assertEqual(remapped_model, model)", "def test_noise_no_trend(self):\n self.assertFalse(self.data_item.is_noise(20))\n self.assertFalse(self.data_item.is_noise(20.1))\n self.assertFalse(self.data_item.is_noise(10))", "def add_noise(self):\n for i in range(self.num_neurons):\n spike_train = deepcopy(self.spike_trains[i, :])\n\n # Get indices without spikes.\n indices = [j for j, dt in enumerate(spike_train) if dt == 0]\n\n # Add spikes to indices randomly with given probability.\n p = self.noise * self.dt\n for index in indices:\n if np.random.uniform(0, 1) < p:\n spike_train[index] = 1\n\n self.spike_trains[i, :] = spike_train", "def modelOnNoise(file = None, out = None, n_samples = None, multiplier = 0.01, reps = 5, n_levels = 15,\n alg = None, set_search = 'GSV', set_kernel = 'RBF', set_normy = False, gp_opt = None, use_alpha = False):\n # Read in dataframe from dataset\n df_1 = pd.read_csv(file, index_col = 0)\n\n # Drop columns with errors in descriptor column values\n cols = [x for x in df_1.columns if df_1[x].dtype == 'object']\n df_2 = df_1.drop(columns = cols)\n\n\n # Randomly sample dataframe and assign sigma from y range and multiplier\n if isinstance(n_samples, int):\n df_2 = df_2.sample(n=n_samples).copy(deep=True)\n sig = (df_2.iloc[:,-1].max() - df_2.iloc[:,-1].min())*multiplier\n\n # Make dictionary of column dictionaries\n noise_dict = {}\n for i in range(reps):\n title = 'col_dict_{}'.format(i)\n noise_dict[title] = sampleNoise(df = df_2, base_sigma= sig, num_levels = n_levels)\n\n # Iterate over each col_dict in noise_dict\n for coldict in noise_dict.keys():\n\n # Iterate over each column in col_dict\n for i,col in enumerate(noise_dict[coldict].keys()):\n\n # Copy dataframe and add column to it\n df = df_2.iloc[:,:-1].copy(deep=True)\n df[col] = noise_dict[coldict][col]\n\n # Extract the sigma term from the added gaussian error for each noise level, to input into GP, if GP is used\n if alg == 'gp':\n y_max = df.iloc[:, -1].max()\n y_min = df.iloc[:, -1].min()\n y_range = y_max - y_min\n if use_alpha is True:\n gp_alpha = y_range*multiplier*i\n else:\n gp_alpha = 1E-10\n\n # Make model and extract values from dictionary\n est = MakeModels(df = df, algs = alg, set_search= set_search, set_kernel= set_kernel,\n set_normy= set_normy, svr_opt_kern= False, save_fig = False, gp_alpha= gp_alpha,\n gp_opt = gp_opt)\n\n # Extract X_test, y_pred, y_true, and y_noise\n xtest = est['X_test']\n ind = xtest.index\n y_pred = est['y_pred']\n y_true_df = df_2.loc[ind]\n y_true = y_true_df.iloc[:,-1]\n y_noise = est['y_test']\n\n # Calculate RMSEs with and without noise\n rmse_nonoise = mean_squared_error(y_true= y_true, y_pred= y_pred, squared= False)\n rmse = mean_squared_error(y_true = y_noise, y_pred = y_pred, squared = False)\n noise_dict[coldict][col]['rmsenonoise'] = rmse_nonoise\n noise_dict[coldict][col]['rmse'] = rmse\n\n # Calculate R2s with and without noise\n r2_nonoise = r2_score(y_true= y_true, y_pred = y_pred)\n r2 = r2_score(y_true = y_noise, y_pred = y_pred)\n noise_dict[coldict][col]['r2nonoise'] = r2_nonoise\n noise_dict[coldict][col]['r2'] = r2\n\n # Save estimator dict\n noise_dict[coldict][col]['est'] = est\n\n # Save dictionary to pkl\n with open(out, 'wb') as fp:\n pickle.dump(noise_dict, fp)", "def del_noisemodel(self):\n if self.noisemodel is None:\n warn(\"No noisemodel is present in this model.\")\n else:\n self.nparam -= self.noisemodel.nparam\n self.parameters = self.parameters.ix[self.parameters.name !=\n self.noisemodel.name]\n self.noisemodel = None", "def add_noisemodel(self, noisemodel):\n self.noisemodel = noisemodel\n self.parameters = self.get_init_parameters()\n self.nparam += noisemodel.nparam", "def _sample(self, model_output: torch.Tensor) -> torch.Tensor:\n pass", "def ComputeNoiseForSingleData():\n\n # Generate noisy data\n NumPointsAlongAxis = 50\n NoiseMagnitude = 0.2\n GridOfPoints = True\n x, y, z = Data.GenerateData(NumPointsAlongAxis, NoiseMagnitude, GridOfPoints)\n\n # Generate Linear Model\n DecorrelationScale = 0.1\n UseSparse = False\n nu = 0.5\n K = Data.GenerateCorrelationMatrix(x, y, z, DecorrelationScale, nu, UseSparse)\n\n # BasisFunctionsType = 'Polynomial-0'\n # BasisFunctionsType = 'Polynomial-1'\n BasisFunctionsType = 'Polynomial-2'\n # BasisFunctionsType = 'Polynomial-3'\n # BasisFunctionsType = 'Polynomial-4'\n # BasisFunctionsType = 'Polynomial-5'\n # BasisFunctionsType = 'Polynomial-2-Trigonometric-1'\n X = Data.GenerateLinearModelBasisFunctions(x, y, BasisFunctionsType)\n\n # Trace estimation weights\n UseEigenvaluesMethod = False # If set to True, it overrides the interpolation estimation methods\n # TraceEstimationMethod = 'NonOrthogonalFunctionsMethod' # highest condtion number\n # TraceEstimationMethod = 'OrthogonalFunctionsMethod' # still high condition number\n TraceEstimationMethod = 'OrthogonalFunctionsMethod2' # best (lowest) condition number\n # TraceEstimationMethod = 'RBFMethod'\n\n # Precompute trace interpolation function\n TraceEstimationUtilities = TraceEstimation.ComputeTraceEstimationUtilities(K, UseEigenvaluesMethod, TraceEstimationMethod, None, [1e-4, 4e-4, 1e-3, 1e-2, 1e-1, 1, 1e+1, 1e+2, 1e+3])\n\n # Finding optimal parameters with maximum likelihood using parameters (sigma, sigma0)\n # Results = LikelihoodEstimation.MaximizeLogLikelihoodWithSigmaSigma0(z, X, K, TraceEstimationUtilities)\n # print(Results)\n\n # Finding optimal parameters with maximum likelihood using parameters (sigma, eta)\n # Results = LikelihoodEstimation.MaximizeLogLikelihoodWithSigmaEta(z, X, K, TraceEstimationUtilities)\n # print(Results)\n\n # Finding optimal parameters with derivative of likelihood\n Interval_eta = [1e-4, 1e+3] # Note: make sure the interval is exactly the end points of eta_i, not less or more.\n Results = LikelihoodEstimation.FindZeroOfLogLikelihoodFirstDerivative(z, X, K, TraceEstimationUtilities, Interval_eta)\n print(Results)\n\n # Plot likelihood and its derivative\n # LikelihoodEstimation.PlotLogLikelihood(z, X, K, TraceEstimationUtilities)\n LikelihoodEstimation.PlotLogLikelihoodFirstDerivative(z, X, K, TraceEstimationUtilities, Results['eta'])", "def pred_noise(self, signal, timestep, mel):\n return self.wavenet(signal, timestep, mel)", "def noise_floor_test(ensemble):\n\n return ADCP_FLAGS['no_test']", "def fullModelOnNoise(file=None, outfile=None, algs=['ridge', 'knn', 'svr', 'rf', 'gp'],\n n_samples=None, multiplier=0.01, reps=5, n_levels=15, set_search='GSV',\n set_kernel='RBF', set_normy=False):\n\n # Define outlist, tups\n outlist = [outfile.format(x) for x in algs]\n tups = list(zip(algs, outlist))\n\n # Define model loop\n for alg, out in tups:\n if alg == 'rf':\n modelOnNoise(file=file, out=out, alg=alg, set_search='RSV', n_samples=n_samples,\n multiplier=multiplier, reps=reps, n_levels=n_levels)\n elif alg == 'gp':\n modelOnNoise(file=file, out=out, alg=alg, set_normy=True, n_samples=n_samples,\n multiplier=multiplier, reps=reps, n_levels=n_levels, set_kernel=set_kernel)\n else:\n modelOnNoise(file=file, out=out, alg=alg, n_samples=n_samples,\n multiplier=multiplier, reps=reps, n_levels=n_levels, set_search=set_search)\n\n return None", "def random_noise():\n tree = bpy.context.scene.node_tree\n for node in tree.nodes:\n if node.type == \"BRIGHTCONTRAST\":\n node.inputs['Bright'].default_value = (random.random() - .5) * 10\n node.inputs['Contrast'].default_value = (random.random() - .5) * 10\n elif node.type == \"LENSDIST\":\n node.inputs['Dispersion'].default_value = random.random()/10.0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return whether a name is valid as an entry name. Checks a name against an assortment of DOSlike filename rules.
def is_valid_entry_name(filename): allowed = string.ascii_letters + string.digits + "_^$~!#%&-{}@`'()" reserved = ['CON', 'PRN', 'AUX', 'CLOCK$', 'NUL', 'COM0', 'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9', 'LPT0', 'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9', 'LST', 'KEYBD$', 'SCREEN$', '$IDLE$', 'CONFIG$'] # Cannot be empty or None. if not filename: return False # Separator cannot appear more than once. if filename.count('.') > 1: return False # Split into name and extension. s = filename.partition('.') name = s[0] separator = s[1] extension = s[2] # Check name length. name_len_ok = (0 < len(name) <= 8) # Check name characters. name_char_ok = all(c in allowed for c in name) # Check name reservation. name_reserved_ok = (name.upper() not in reserved) # Default to valid extension checks. ext_len_ok = True ext_char_ok = True # Check extension if a separator is present. # Must have a valid extension if separator is present. if separator: # Check extension length. ext_len_ok = (0 < len(extension) <= 3) # Check extension characters. ext_char_ok = all(c in allowed for c in name) # Reserved names do not apply to extensions. return ((name_len_ok and name_char_ok and name_reserved_ok) and (ext_len_ok and ext_char_ok))
[ "def isValidName(self, filename):\n if filename in RESERVED_WORDS:\n return False\n tnam = filename[:].lower()\n return NAME_MATCH(tnam) is not None", "def is_valid_file_name_linux(name:str) -> bool:\r\n return not any( c in invalid_linux_char for c in name )", "def is_valid_file_name_win(name:str, has_ext:bool=False) -> bool:\r\n return not ( any( c in invalid_windows_char for c in name ) \r\n or ( ( _splitext(name)[0] if has_ext else name ).upper() in reserved_win_names) )", "def is_valid_name(name):\n return isinstance(name, str) and bool(Command._name_pattern.fullmatch(name))", "def is_valid_rule_name(self, entry):\n # Check if entry is blank\n if not entry:\n return False\n\n # Check length\n if len(entry) > 128:\n return False\n\n # Ensure doesn't start with a digit\n if entry[0].isdigit():\n return False\n\n # Accept only alphanumeric and underscores\n if not re.match(r'\\w+$', entry):\n return False\n\n # Verify not in keywords\n if entry in self.keywords:\n return False\n\n return True", "def _check_name(self, filename: str) -> bool:\n pattern = r'[\\/\\\\\\:\\<\\>]'\n if re.search(pattern, filename):\n return False\n return True", "def is_valid_file_name(name:str, has_ext:bool=False) -> bool:\r\n return is_valid_file_name_linux(name) and is_valid_file_name_win(name, has_ext=has_ext)", "def _is_valid_filename(self, filename: str) -> bool:\n return all(c in FILENAME_VALID_CHARS for c in filename)", "def is_valid_name(name):\n return isinstance(name, str) and bool(Option._name_pattern.fullmatch(name))", "def storage_name_valid(name: str) -> bool:\n if re.match('[a-z0-9]{3,24}$', name):\n return True\n return False", "def __is_valid_char_name(char):\n return char.isalpha() or char.isnumeric() or char in Project.VALID_NAME_SPECIAL_CHARS", "def name_filter(filename: str) -> bool:\n form = r\"\\Alog[0-9]{6}_[0-9A-Z]+[0-9]{6}_[0-9A-Z]+_Dyad_([0-9]+|(Morning)).*\\Z\"\n filename = normalize_name(filename)\n return re.fullmatch(form, filename) is not None", "def is_crds_name(name):\n name = os.path.basename(name).lower()\n return bool(CRDS_NAME_RE.match(name))", "def check_libname(name):\n # name = <str>\n # ch = <str>\n # return <int>|<bool>\n name = str(name)\n if not name:\n return 0\n return (name[0] in _FIRST_LETTERS and\n all(ch in _OTHER_LETTERS for ch in name[1:]))", "def IsNameChar(c):\n # This is called a lot, so we take a few short cuts for commone cases\n if c:\n if c <= u\"z\":\n if c >= u\"a\":\n return True\n elif c <= u\"Z\":\n if c >= u\"A\":\n return True\n elif c <= u\"9\":\n if c >= u\"0\":\n return True\n else:\n return c in \"-.\"\n else:\n return c == u\":\"\n else:\n return c == u\"_\"\n else:\n return NameCharClass.test(c)\n return False", "def is_valid_index_name(name):\n\n if name.startswith(('_', '.')):\n return False\n\n if _invalid_index_characters.search(name):\n return False\n\n if '*' in name:\n return False\n\n return True", "def is_valid_reference_name(filename):\n name = os.path.basename(filename)\n return is_reference(name) and (is_crds_name(name) or is_cdbs_name(name))", "def is_valid_workflow_name(name):\n return bool(re.match('(?s)^[a-zA-Z][a-zA-Z0-9_]*$',name))", "def _verify_contact_name(name):\n\tif re.fullmatch('[a-zA-Z ]*', name) is None:\n\t\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple of size information given a list of entries. Projects the meta data size and raw data size of a GOB if it were created with the given list of entries.
def get_gob_size(entries): # Header + Catalog Offset + Catalog meta_size = GOB_HEADER_SIZE + GOB_CATALOG_OFFSET_SIZE + (GOB_CATALOG_ENTRY_SIZE * len(entries)) # Raw Data data_size = sum([len(entry[1]) for entry in entries]) return (meta_size, data_size)
[ "def _get_sizes(self) -> int:\n pass", "def list_sizes(location=None):", "def _parse_sizes(self):\n with open(self._data_set, 'r') as f:\n # First line\n return tuple([int(v) for v in f.readline().split()])", "def sizes(self):\n return np.array([entry.data[\"size\"] for entry in self._entries])", "def get_sizes(args):\n if args.out is None:\n args.out = '.'.join(os.path.basename(args.bam).split('.')[0:-1])\n sizes = FragmentSizes(lower = args.lower, upper = args.upper, atac = args.atac)\n if args.bed:\n chunks = ChunkList.read(args.bed)\n chunks.merge()\n sizes.calculateSizes(args.bam, chunks)\n else:\n sizes.calculateSizes(args.bam)\n sizes.save(args.out+'.fragmentsizes.txt')\n if not args.no_plot:\n #make figure\n fig = plt.figure()\n plt.plot(range(sizes.lower,sizes.upper),sizes.get(sizes.lower,sizes.upper),label = args.out)\n plt.xlabel(\"Fragment Size\")\n plt.ylabel(\"Frequency\")\n fig.savefig(args.out+'.fragmentsizes.eps')\n plt.close(fig)", "def get_bnd_entry_header_size(self) -> int:\n size = 16 # Base size.\n if self.has_ids:\n size += 4\n if self.has_names_1 or self.has_names_2:\n size += 4\n if self.has_compression:\n size += 8\n size += 8 if self.has_long_offsets else 4\n return size", "def dimension_list_size(item_list):\n if item_list.count == 0:\n return 1\n elif item_list.count == 1:\n return item_list[0].size\n else:\n return reduce(lambda x, y: x * y, [i.size for i in item_list])", "def extract(args):\n p = OptionParser(extract.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n idsfile, sizesfile = args\n sizes = Sizes(sizesfile).mapping\n fp = open(idsfile)\n for row in fp:\n name = row.strip()\n size = sizes[name]\n print(\"\\t\".join(str(x) for x in (name, size)))", "def get_image_sizes(metadata):\n image_sizes = {}\n with open(metadata.image_sizes) as f:\n for line in f.readlines():\n image_id, ws, hs = line.strip('\\n').split(',')\n w, h = int(ws), int(hs)\n image_sizes[image_id] = (w, h)\n return image_sizes", "def _do_get_size(self, size_key):\n stream = self.fetch_stream(size_key)\n meta = self.fetch_meta(size_key)\n\n return (meta, stream)", "def size_dblist(self,db_list):\n db_strs = str(db_list)[1:-1]\n sql = \"select datname, pg_database_size(datname) from pg_database where datname in ( %s );\" % db_strs\n dbsize_list = {}\n rows = self.execute_sql(sql)\n for row in rows:\n db = row[0]\n size = format_disk_size(row[1])\n dbsize_list[db] = size\n\n return dbsize_list", "def get_size(bucket=\"czb-seqbot\", prefix=None):\n yield from prefix_gen(bucket, prefix, lambda r: (r[\"Key\"], r[\"Size\"]))", "def get_sizes(events, discrete_width, prob, pred=None, num_classes=2):", "def get_cache_sizes():\r\n global _ATTR_CACHE, _PROP_CACHE\r\n attr_n = len(_ATTR_CACHE)\r\n attr_mb = sum(getsizeof(obj) for obj in _ATTR_CACHE) / 1024.0\r\n prop_n = sum(len(dic) for dic in _PROP_CACHE.values())\r\n prop_mb = sum(sum([getsizeof(obj) for obj in dic.values()]) for dic in _PROP_CACHE.values()) / 1024.0\r\n return (attr_n, attr_mb), (prop_n, prop_mb)", "def quantity_size():", "def get_size(self):\n units = (\"B\", \"KB\", \"MB\", \"GB\", \"TB\")\n for i, unit in enumerate(units):\n high = 10**(i*3)\n if self.size < high*1000:\n return f\"{round(self.size/high, 3)} {unit}\"", "def entSize(ent):\n box = ent['box']\n width, height = max(box[1]), min(box[1])\n return width, height", "def get_size(self, sizes):\n if len(sizes) == 1:\n return sizes[0]\n else: \n return random.randint(*sizes[:2])", "def read_sizes(info_path):\r\n with open(info_path + '/sizes/sizes.txt', 'r') as sizes_file:\r\n sizes_read = sizes_file.read()\r\n sizes_line = str(sizes_read).split(' ')\r\n sizes_line.pop(-1)\r\n sizes_list = list(map(int, sizes_line))\r\n sizes_list = torch.Tensor(sizes_list).int()\r\n return sizes_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Writes a GOB container given a path and a list of GOB entries.
def write(filename, entries): meta_size, data_size = get_gob_size(entries) if (meta_size + data_size) > GOB_MAX_SIZE: raise GOBException('Cannot create GOB because it would exceed maximum size.') for entry in entries: if not is_valid_entry_name(entry[0]): raise GOBException('"' + entry[0] + '" is an invalid entry name.') with open(filename, 'wb') as file: file.write(b'GOB\n') file.write(struct.pack('<i', GOB_HEADER_SIZE + data_size)) for entry in entries: file.write(entry[1]) file.write(struct.pack('<i', len(entries))) offset = GOB_HEADER_SIZE for entry in entries: file.write(struct.pack('<i', offset)) file.write(struct.pack('<i', len(entry[1]))) file.write(struct.pack('13s', entry[0].encode('ascii'))) offset += len(entry[1])
[ "def test_write_gro():\n B = Bead(1,'PHE','BB',1,np.array([0.,0.,0.]),np.array([0.,0.,0.]),'SC5')\n G = Gro('testing gro construction',1,[B],[30.3,30.3,30.3])\n G.write('test.gro')", "def node_json_making(path, output=\"sp_nodes.txt\"):\n print(\"making nodes file...\")\n with open(output, 'w') as filout:\n for i in path:\n i = str(i)\n filout.write(f'{{ data: {{ id: \\'{i}\\', size: 200, name: \\'{i}\\' }}, classes: [] }},\\n')", "def writePlist(root_object, path_or_file, binary=False):\n did_open = False\n if isinstance(path_or_file, (str, unicode)):\n path_or_file = open(path_or_file, \"w\")\n did_open = True\n dump(root_object, path_or_file, binary)\n if did_open:\n path_or_file.close()", "def write_hgrps(self, hgrp_list, dirname):\n self.host_group_manager.write_objects(hgrp_list, dirname)", "def write_by_name(fpath, *objs_list, **kwargs):\n length = len(objs_list)\n group_size = kwargs.pop('group_size', length)\n with stream.open(fpath, 'wb', **kwargs) as ostream:\n cursor = 0\n while cursor < length:\n ostream.write(*objs_list[cursor:cursor+group_size])\n cursor += group_size", "def write_event_gtracks_json(egtrk : List[GTracks], path : str):\n devt = {}\n for ievt, gtrks in enumerate(egtrk): # loop over events\n # Create a dictionary of json objects (from networkx objects)\n dgtrk = {j:nx.node_link_data(gtrks[j].gt)\\\n for j, _ in enumerate(gtrks)}\n #print(f'ievt = {ievt}', trk0 = {dgtrk[0]}')\n\n # create dictionaries for all other fields\n devid = {j : int(gtrks[j].event_id) for j, _ in enumerate(gtrks)}\n dvbin = {j : float(gtrks[j].voxel_bin) for j, _ in enumerate(gtrks)}\n dcont = {j : float(gtrks[j].contiguity) for j, _ in enumerate(gtrks)}\n\n # print(f'ievt = {ievt}', devid0 = {devid[0]}')\n # print(f'ievt = {ievt}', dvbin = {dvbin[0]}')\n # print(f'ievt = {ievt}', dcont = {dcont[0]}')\n #\n\n # create a dict of json objects corresponding to the GTrack\n dGtrk = {\"gtrk\" : dgtrk, \"event_id\" : devid,\n \"voxel_bin\" : dvbin,\n \"contiguity\" : dcont}\n\n #print(f'ievt = {ievt}', dGtrk = {dGtrk}')\n\n # and add to the dictionary of events\n devt[ievt] = dGtrk\n\n # write to disk\n with open(path, 'w') as fp:\n json.dump(devt, fp)", "def write_queues(self, queue_list, dirname):\n self.cluster_queue_manager.write_objects(queue_list, dirname)", "def _write_mol_block_to_file(save_to_path, mol_block_list):\n\n def __mol_block_writer(mode):\n with tf.gfile.Open(save_to_path, mode) as writer:\n for line in mol_block:\n writer.write('%s\\n' % line)\n\n for index, mol_block in enumerate(mol_block_list):\n if index == 0:\n __mol_block_writer(mode='w')\n else:\n __mol_block_writer(mode='a')", "def write(objs, name, path=None, fmt=\"msa\", writeMask=0xFF):\n\tpath = (path if path else defaultPath)\n\tname = name.replace(\":\",\"_\").replace(\"\\\\\",\"_\").replace(\".\",\"_\")\n\tif isinstance(objs, javaarray.array) or isinstance(objs, tuple) or isinstance(objs, list):\n\t\tfor i, obj in enumerate(objs):\n\t\t\tif isinstance(obj, ept.ScaledImage):\n\t\t\t\tdi = (int(obj.getDetectorIndex()) if obj.getDetectorIndex() else i)\n\t\t\t\tif (writeMask & (1<<di)) != 0:\n\t\t\t\t\twrite(obj, \"%s[%d]\" % (name, di, ), path, fmt=fmt)\n\t\t\telse:\n\t\t\t\twrite(obj, \"%s[%d]\" % (name, i), path, fmt=fmt)\n\telif isinstance(objs, semss.DataItems.ImageDatum):\n\t\tfor i in range(0, objs.getCount()):\n\t\t\twrite(objs.getImage(i), \"%s[%d][%s]\" % (name, objs.getFrameId(), objs.getName(i)), path, fmt=fmt)\n\telif isinstance(objs, semss.DataItems.SpectrumDatum):\n\t\tfor i in range(0, objs.getCount()):\n\t\t\twrite(objs.getSpectrum(i), \"%s[%d]\" % (name, i), path, fmt=fmt)\n\telif isinstance(objs, ept.ScaledImage) or isinstance(objs, jawtimg.BufferedImage):\n\t\tif fmt=='tif':\n\t\t\t_saverize.addAsTIFF(jio.File(path, \"%s.tif\" % name), objs)\n\t\telse:\n\t\t\t_saverize.addAsPNG(jio.File(path, \"%s.png\" % name), objs)\n\telif isinstance(objs, epq.ISpectrumData):\n\t\tif fmt=='tif':\n\t\t\t_saverize.addAsTIFF(jio.File(path, name+\".tif\"), objs)\n\t\telse:\n\t\t\temsa = ept.WriteSpectrumAsEMSA1_0()\n\t\t\tos = jio.FileOutputStream(\"%s/%s.msa\" % (path, name))\n\t\t\ttry:\n\t\t\t\temsa.write(objs, os, emsa.Mode.COMPATIBLE)\n\t\t\t\t# report(\"<p>Spectrum <i>%s</i> written to <i>%s\\\\%s.msa</i></p>\" % (objs, path, name))\n\t\t\tfinally:\n\t\t\t\tos.close()", "def saveMap(filename, paths, images, faces, years, places):\n f = open(filename, 'w+')\n nodes = list(set(cbook.flatten(paths)))\n pathInd = {} #easier form to work with here\n for i in range(len(paths)):\n for j in paths[i]:\n if j in pathInd.keys():\n pathInd[j].append(i+1)\n else:\n pathInd[j] = [i+1]\n strs = []\n\n # Write nodes\n f.write('{ \"nodes\": [\\n')\n for node in nodes:\n imgPath = 'images/' + str(node) + '.png'\n #misc.imsave(websitePath + imgPath, images[node]) #XXX suspect don't need this anymore\n s = '{\"id\": ' + str(node) + ', \"line\": ' + str(pathInd[node])\n s += ', \"faces\": [' + ','.join([str(x) for x in np.nonzero(faces[node])[0]]) + ']'\n p = np.nonzero(places[node])[0]\n s += ', \"time\": ' + str(years[node]) + ', \"place\": ' + str(p[0] if len(p) > 0 else -1)\n s += '}'\n strs.append(s)\n f.write(',\\n'.join(strs) + '],\\n\"links\": [\\n')\n strs = []\n\n # Write links\n for i in range(len(paths)):\n p = paths[i]\n for j in range(0, len(p)-1):\n strs.append('{\"source\": ' + str(nodes.index(p[j])) + ', \"target\": ' + str(nodes.index(p[j+1])) + ', \"line\": ' + str(i+1) + '}')\n f.write(',\\n'.join(strs) + ']}')\n f.close()", "def write_list(path_out, image_list):\n filename = os.path.join(args.root, path_out)\n print('filename=', filename)\n with open(filename, 'w') as fout:\n for i, item in enumerate(image_list):\n line = '%s\\t' % item[1]\n line += '%f\\n' % item[2]\n fout.write(line)", "def write_gsd(\n top,\n filename,\n base_units=None,\n rigid_bodies=None,\n shift_coords=True,\n write_special_pairs=True,\n):\n gsd_snapshot = to_gsd_snapshot(\n top=top,\n base_units=base_units,\n rigid_bodies=rigid_bodies,\n shift_coords=shift_coords,\n parse_special_pairs=write_special_pairs,\n )[0]\n with gsd.hoomd.open(filename, mode=\"w\") as gsd_file:\n gsd_file.append(gsd_snapshot)", "def write_by_file(fpath, *objs_list, **kwargs):\n group_size = kwargs.pop('group_size', len(objs_list))\n mode = 'wb'\n if kwargs.get('gzip', True):\n ofs = gzip.open(fpath, mode)\n else:\n ofs = open(fpath, mode)\n with stream.open(fileobj=ofs, mode=mode, buffer_size=group_size,\n **kwargs) as ostream:\n ostream.write(*objs_list)\n ofs.close()", "def write(self, igseq):\n \n if (not isinstance(igseq, list)) and (not isinstance(igseq, tuple)):\n igseq_list = [igseq]\n else:\n igseq_list = igseq\n \n for igseq_single in igseq_list:\n self.__fh.write(self.__convert_to_pydair(igseq_single))", "def write_build_info(build_info, paths, project=None):\n gcs_client = None\n\n contents = yaml.dump(build_info)\n\n for p in paths:\n logging.info(\"Writing build information to %s\", p)\n if p.startswith(\"gs://\"):\n if not gcs_client:\n gcs_client = storage.Client(project=project)\n bucket_name, path = util.split_gcs_uri(p)\n bucket = gcs_client.get_bucket(bucket_name)\n blob = bucket.blob(path)\n blob.upload_from_string(contents)\n\n else:\n with open(p, mode='w') as hf:\n hf.write(contents)", "def save_objects(path, frame, objects):\n full_path = path + str(frame) + '/'\n create_folder(full_path)\n cnt = 0\n for obj in objects:\n img = Image.fromarray(obj.cutout)\n img.save(full_path + 'obj-' + str(cnt) + '.png')\n cnt += 1", "def write_smiles(smiles, file_path):\n if (not os.path.exists(os.path.dirname(file_path))) and (os.path.dirname(file_path) != ''):\n os.makedirs(os.path.dirname(file_path))\n if any(['gz' in ext for ext in os.path.basename(file_path).split('.')[1:]]):\n with gzip.open(file_path, 'wb') as f:\n _ = [f.write((smi+'\\n').encode('utf-8')) for smi in smiles]\n else:\n with open(file_path, 'wt') as f:\n _ = [f.write(smi+'\\n') for smi in smiles]\n return", "def WriteToFile(file_path, boxes, scores, class_indices):\n serialized_data = SerializeToString(boxes, scores, class_indices)\n with tf.io.gfile.GFile(file_path, 'w') as f:\n f.write(serialized_data)", "def _writeContainer(self, filelike, compress):\n aux.writeJsonZipfile(filelike, self.container, compress=compress)\n zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED\n with zipfile.ZipFile(filelike, 'a', allowZip64=True) as containerFile:\n infodata = self.info\n infodata['_matrixTemplate'] = self._matrixTemplate\n containerFile.writestr('info', json.dumps(infodata, zipcomp))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function creates the genbank file from the gff, protein and nucleotide sequences.
def writeGbkFile(output_file_name, contig_dict, sample_name, nucleotide_seq_dict, protein_seq_dict): date = genbankDate() output_file_name_tmp = output_file_name + ".tmp" outputfile = open(output_file_name_tmp, 'w') count = 0 outputStr = "" for key in contig_dict: first = True if count % 10000 == 0: outputfile.write(outputStr) outputStr = "" count+=1 for attrib in contig_dict[key]: id = attrib['id'] try: protein_seq = protein_seq_dict[id] except: protein_seq = "" definition = sample_name accession = '.' version = '.' + " "*10 + "GI:." dblink = sample_name keywords = '.' source = sample_name organism = sample_name dna_seq_formatted = "" if first: first = False try: dna_seq = nucleotide_seq_dict[key] dna_seq_formatted = formatSequenceOrigin(dna_seq) dna_length = len(dna_seq) sourceStr = "1.." + str(dna_length) except: dna_seq = "" dna_seq_formatted = "" dna_length = 0 sourceStr = "0..0" outputStr+=("LOCUS %-18s %4d bp DNA BCT %-11s\n" % (key, dna_length, date)) outputStr+=(wrap("DEFINITION ",12,74, definition)+'\n') outputStr+=(wrap("ACCESSION ", 12, 74, accession)+'\n') outputStr+=(wrap("VERSION ", 12, 74, version)+'\n') outputStr+=(wrap("DBLINK ", 12, 74, dblink)+'\n') outputStr+=(wrap("KEYWORDS ", 12, 74,keywords)+'\n') outputStr+=(wrap("SOURCE ", 12, 74, keywords)+'\n') outputStr+=(wrap(" ORGANISM ",12, 74, organism)+'\n') outputStr+=(wrap("", 12, 74, "Metagenome")+'\n') outputStr+=( wrap("REFERENCE ",12,74, "1 (bases 1 to XXXXX)")+'\n') outputStr+=( wrap(" AUTHORS ",12,74, "YYYYYY,X.")+'\n') outputStr+=( wrap(" CONSRTM ",12,74, "XXXXX")+'\n') outputStr+=( wrap(" TITLE ",12,74, "XXXXX")+'\n') outputStr+=( wrap(" JOURNAL ",12,74,"XXXXX")+'\n') outputStr+=( wrap(" PUBMED ",12,74,"XXXXX")+'\n') outputStr+=( wrap(" REMARK ",12,74, "XXXXX")+'\n') outputStr+=( wrap("COMMENT ", 12, 74,"PROVISIONAL REFSEQ: This record has not yet been subject to final NCBI review COMPLETENESS: XXXXX")+'\n') outputStr+=( wrap("FEATURES ",21,74,"Location/Qualifiers") +'\n') outputStr+=( wrap(" source",21,74,sourceStr) +'\n') outputStr+=( wrap("",21,74,"/organism=\"" + sourceStr +"\"") +'\n') outputStr+=( wrap("",21,74,"/strain=\"1\"")+'\n') outputStr+=( wrap("",21,74,"/chromosome=\"1\"") +'\n') if 'start' in attrib and 'end' in attrib: geneLoc = str(attrib['start']) +".." + str(attrib['end']) else: geneLoc = "0..0" if 'strand' in attrib: if attrib['strand']=='-': geneLoc='complement' + '(' + geneLoc +')' outputStr+=( wrap(" gene",21,74,geneLoc) +'\n') if 'locus_tag' in attrib: locus_tag = "/locus_tag=" + "\"" + attrib['locus_tag'] + "\"" else: locus_tag = "/locus_tag" + "\"\"" outputStr+=( wrap("",21,74,locus_tag) +'\n') outputStr+=( wrap(" CDS",21,74,geneLoc) +'\n') if 'product' in attrib: product="/product=" + attrib['product'] else: product="/product=\"\"" outputStr+=( wrap("",21,74,product) +'\n') outputStr+=( wrap("",21,74,locus_tag) +'\n') codon_start="/codon_start=1" translation_table="/transl_table=11" outputStr+=( wrap("",21,74,codon_start) +'\n') outputStr+=( wrap("",21,74,translation_table) +'\n') translation= "/translation="+ protein_seq outputStr+=( wrap("",21,74,translation) +'\n') outputStr+=(wrap("ORIGIN", 21, 74, "")+'\n') outputStr+=(dna_seq_formatted +'\n') outputStr+=("//\n") outputfile.write(outputStr) outputfile.close() rename(output_file_name_tmp, output_file_name)
[ "def create_genbank(fasta_file, UPLOAD_FOLDER, phage_id, payload):\n headers = payload.get_json()\n gb_file = os.path.join(UPLOAD_FOLDER, phage_id + \".gb\")\n genome = SeqIO.read(fasta_file, \"fasta\").seq\n genome = Seq(str(genome), IUPAC.unambiguous_dna)\n record = SeqRecord(genome, id='', name=headers[\"phageName\"], description=headers[\"source\"])\n ##FIXME\n record.annotations[\"AUTHORS\"] = \"Becker, L.W.\"\n record.annotations[\"Reference\"] = \"whole thing\"\n\n qualifiers = {}\n qualifiers[\"organism\"] = headers[\"source\"]\n qualifiers[\"mol_type\"] = headers[\"molType\"]\n qualifiers[\"isolation_source\"] = headers[\"isolationSource\"]\n qualifiers[\"lab_host\"] = headers[\"labHost\"]\n qualifiers[\"country\"] = headers[\"country\"]\n qualifiers[\"identified_by\"] = headers[\"identifiedBy\"]\n qualifiers[\"note\"] = headers[\"notes\"]\n feature = SeqFeature(FeatureLocation(start=0, end=len(genome)), type='source', qualifiers=qualifiers)\n record.features.append(feature)\n\n idNumber = 0\n for cds in Annotations.query.filter_by(phage_id=phage_id).order_by(Annotations.left).all():\n if (cds.function == \"@DELETED\" or cds.status == \"trnaDELETED\"):\n continue\n idNumber += 1\n if cds.strand == '-':\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = str(idNumber)\n if headers[\"includeNotes\"]:\n qualifiers[\"note\"] = cds.notes\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='gene', qualifiers=qualifiers)\n record.features.append(feature)\n if cds.status == \"tRNA\":\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"note\"] = cds.function\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='tRNA', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"codon_left\"] = [1]\n qualifiers[\"transl_table\"] = [11]\n pattern = re.compile(\"@(.*)##(.*)\")\n matches = pattern.search(cds.function)\n if matches:\n qualifiers[\"product\"] = matches.group(1)\n qualifiers[\"protein_id\"] = matches.group(2)\n else:\n qualifiers[\"product\"] = \"Hypothetical Protein\"\n qualifiers[\"protein_id\"] = \"unknown:\" + qualifiers[\"locus_tag\"]\n left = len(genome) - cds.right\n right = len(genome) - cds.left + 1\n qualifiers[\"translation\"] = Seq.translate(helper.get_sequence(genome, cds.strand, left, right), table=11)[0:-1]\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right, strand=-1), id=cds.id[0:-1] + str(idNumber), type='CDS', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n if headers[\"includeNotes\"]:\n qualifiers[\"note\"] = cds.notes\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='gene', qualifiers=qualifiers)\n record.features.append(feature)\n if cds.status == \"tRNA\":\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"note\"] = cds.function\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='tRNA', qualifiers=qualifiers)\n record.features.append(feature)\n else:\n qualifiers = {}\n qualifiers[\"gene\"] = str(idNumber)\n qualifiers[\"locus_tag\"] = headers[\"phageName\"] + '_' + str(idNumber)\n qualifiers[\"codon_left\"] = [1]\n qualifiers[\"transl_table\"] = [11]\n pattern = re.compile(\"@(.*)##(.*)\")\n matches = pattern.search(cds.function)\n if matches:\n qualifiers[\"product\"] = matches.group(1)\n qualifiers[\"protein_id\"] = matches.group(2)\n else:\n qualifiers[\"product\"] = \"Hypothetical Protein\"\n qualifiers[\"protein_id\"] = \"unknown:\" + qualifiers[\"locus_tag\"]\n qualifiers[\"translation\"] = Seq.translate(helper.get_sequence(genome, cds.strand, cds.left - 1, cds.right), table=11)[0:-1]\n feature = SeqFeature(FeatureLocation(start=cds.left - 1, end=cds.right), id=cds.id[0:-1] + str(idNumber), type='CDS', qualifiers=qualifiers)\n record.features.append(feature)\n with open(gb_file, 'w') as genbank:\n SeqIO.write(record, genbank, 'genbank')\n new_lines = []\n with open (gb_file, 'r') as genbank:\n lines = genbank.readlines()\n for index, line in enumerate(lines):\n if index is 0:\n new_lines.append(line[0:-28] + \" linear \" + datetime.now().strftime('%d-%b-%Y').upper() + '\\n')\n elif index is 5 or index is 6:\n if headers[\"source\"] != \"\":\n new_lines.append(line[0:-2] + headers[\"source\"] + '\\n')\n elif index is 7:\n if headers[\"organism\"] != \"\":\n new_lines.append(line[0:-2] + headers[\"organism\"] + '\\n')\n new_lines.append(\"REFERENCE 1 (bases 1 to \" + str(len(genome)) + \")\\n\")\n long_line = \" AUTHORS \" + headers[\"authors\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n long_line = \" TITLE \" + headers[\"title\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n long_line = \" JOURNAL \" + headers[\"journal\"]\n while len(long_line) > 81:\n new_lines.append(long_line[0:80] + '\\n')\n long_line = long_line[81:]\n new_lines.append(long_line + '\\n')\n else:\n new_lines.append(line)\n with open (gb_file, 'w') as genbank:\n genbank.writelines(new_lines)\n return gb_file", "def gbk2pasaNEW(input, gff, trnaout, fastaout, spliceout, exonout, proteinsout):\n LocusTags = []\n multiExon = {}\n genes = {}\n with open(fastaout, \"w\") as fasta:\n with open(input, \"r\") as gbk:\n for record in SeqIO.parse(gbk, \"genbank\"):\n fasta.write(\">%s\\n%s\\n\" % (record.id, record.seq))\n for f in record.features:\n lib.gb_feature_add2dict(f, record, genes)\n # out of order mRNA/CDS in genbank files can break this... so try to validate those with multiple transcripts\n warn = False\n for k, v in natsorted(list(genes.items())):\n if v[\"type\"] == \"mRNA\" and len(v[\"ids\"]) > 1:\n confirmedCDS, confirmedExons, warning = validateCDSmRNAPairs(\n k, v[\"CDS\"], v[\"mRNA\"], v[\"strand\"]\n )\n if warning:\n warn = True\n genes[k][\"CDS\"] = confirmedCDS\n genes[k][\"mRNA\"] = confirmedExons\n if warn:\n lib.log.info(\n \"GenBank file has multiple transcripts per locus, I tried my hardest to match them up but can't gaurantee there aren't errors. You can blame NCBI. You may want to try to pass a GFF3 + FASTA files instead of GBK.\"\n )\n with open(gff, \"w\") as gffout:\n gffout.write(\"##gff-version 3\\n\")\n with open(trnaout, \"w\") as trna:\n with open(proteinsout, \"w\") as protout:\n for k, v in natsorted(list(genes.items())):\n if k not in LocusTags:\n LocusTags.append(k)\n if v[\"type\"] == \"mRNA\":\n # write GFF gene feature\n if v[\"name\"]:\n gffout.write(\n \"{:}\\tGenBank\\tgene\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:};Name={:};\\n\".format(\n v[\"contig\"],\n v[\"location\"][0],\n v[\"location\"][1],\n v[\"strand\"],\n k,\n v[\"name\"],\n )\n )\n else:\n gffout.write(\n \"{:}\\tGenBank\\tgene\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:};\\n\".format(\n v[\"contig\"],\n v[\"location\"][0],\n v[\"location\"][1],\n v[\"strand\"],\n k,\n )\n )\n for i in range(0, len(v[\"ids\"])):\n # now write mRNA feature\n gffout.write(\n \"{:}\\tGenBank\\t{:}\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:};Parent={:};product={:};\\n\".format(\n v[\"contig\"],\n v[\"type\"],\n v[\"location\"][0],\n v[\"location\"][1],\n v[\"strand\"],\n v[\"ids\"][i],\n k,\n v[\"product\"][i],\n )\n )\n protout.write(\n \">%s %s\\n%s\\n\" % (v[\"ids\"][i], k, v[\"protein\"][i])\n )\n # write the exons and CDS features\n num_exons = len(v[\"mRNA\"][i])\n for x in range(0, num_exons):\n ex_num = x + 1\n gffout.write(\n \"{:}\\tGenBank\\texon\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:}.exon{:};Parent={:};\\n\".format(\n v[\"contig\"],\n v[\"mRNA\"][i][x][0],\n v[\"mRNA\"][i][x][1],\n v[\"strand\"],\n v[\"ids\"][i],\n ex_num,\n v[\"ids\"][i],\n )\n )\n if num_exons > 1:\n # ss and exons are 0-based position, so 1 less than GFF\n exons_start = int(v[\"mRNA\"][i][x][0]) - 1\n exons_end = int(v[\"mRNA\"][i][x][1]) - 1\n # add to exon dictionary\n if not v[\"ids\"][i] in multiExon:\n multiExon[v[\"ids\"][i]] = [\n v[\"contig\"],\n v[\"strand\"],\n [(exons_start, exons_end)],\n ]\n else:\n multiExon[v[\"ids\"][i]][2].append(\n (exons_start, exons_end)\n )\n num_cds = len(v[\"CDS\"][i])\n # GFF3 phase is 1 less than flat file\n current_phase = v[\"codon_start\"][i] - 1\n for y in range(0, num_cds):\n gffout.write(\n \"{:}\\tGenBank\\tCDS\\t{:}\\t{:}\\t.\\t{:}\\t{:}\\tID={:}.cds;Parent={:};\\n\".format(\n v[\"contig\"],\n v[\"CDS\"][i][y][0],\n v[\"CDS\"][i][y][1],\n v[\"strand\"],\n current_phase,\n v[\"ids\"][i],\n v[\"ids\"][i],\n )\n )\n current_phase = (\n current_phase\n - (\n int(v[\"CDS\"][i][y][1])\n - int(v[\"CDS\"][i][y][0])\n + 1\n )\n ) % 3\n if current_phase == 3:\n current_phase = 0\n elif v[\"type\"] in [\"tRNA\", \"rRNA\", \"ncRNA\"]:\n # check length of tRNA gene should be between 50 and 150\n if v[\"type\"] == \"tRNA\":\n if v[\"strand\"] == \"+\":\n length = abs(\n int(v[\"location\"][1]) - int(v[\"location\"][0])\n )\n else:\n length = abs(\n int(v[\"location\"][0]) - int(v[\"location\"][1])\n )\n else:\n length = 100 # just a placeholder for rRNA features --> not sure if they have length requirements?\n if length < 50 or length > 150:\n continue\n trna.write(\n \"{:}\\tGenBank\\tgene\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:};\\n\".format(\n v[\"contig\"],\n v[\"location\"][0],\n v[\"location\"][1],\n v[\"strand\"],\n k,\n )\n )\n for i in range(0, len(v[\"ids\"])):\n trna.write(\n \"{:}\\tGenBank\\t{:}\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:};Parent={:};product={:};\\n\".format(\n v[\"contig\"],\n v[\"type\"],\n v[\"location\"][0],\n v[\"location\"][1],\n v[\"strand\"],\n v[\"ids\"][i],\n k,\n v[\"product\"][i],\n )\n )\n if v[\"type\"] == \"tRNA\":\n num_exons = len(v[\"mRNA\"][i])\n for x in range(0, num_exons):\n ex_num = x + 1\n trna.write(\n \"{:}\\tGenBank\\texon\\t{:}\\t{:}\\t.\\t{:}\\t.\\tID={:}.exon{:};Parent={:};\\n\".format(\n v[\"contig\"],\n v[\"mRNA\"][i][x][0],\n v[\"mRNA\"][i][x][1],\n v[\"strand\"],\n v[\"ids\"][i],\n ex_num,\n v[\"ids\"][i],\n )\n )\n\n # parse splice sites and write to file\n with open(exonout, \"w\") as exon:\n with open(spliceout, \"w\") as splicer:\n for k, v in natsorted(list(multiExon.items())):\n sortedList = sorted(v[2], key=lambda tup: tup[0])\n for y in sortedList:\n exon.write(\"%s\\t%i\\t%i\\t%s\\n\" % (v[0], y[0], y[1], v[1]))\n splices = []\n for i in range(1, len(sortedList)):\n splices.append((sortedList[i - 1][1], sortedList[i][0]))\n for x in splices:\n splicer.write(\"%s\\t%i\\t%i\\t%s\\n\" % (v[0], x[0], x[1], v[1]))\n # finally lets return the base locus tag name and the last number\n lastTag = natsorted(LocusTags)[-1]\n if \"_\" in lastTag:\n tagsplit = lastTag.split(\"_\")\n if len(tagsplit) > 2:\n lib.log.error(\n \"Too many '_' in the locus name, NCBI expects only 1 underscore\"\n )\n sys.exit(1)\n\n tag, count = tagsplit\n tag = tag + \"_\"\n else:\n tag, count = (None,) * 2\n for i, c in enumerate(lastTag):\n if c.isdigit():\n tag = lastTag[:i]\n count = lastTag[i:]\n break\n # if we cannot determine, then just set it to something\n if not tag or not count:\n count = len(LocusTags)\n tag = \"FUN_\"\n # if it is numerical great, otherwise count total gene tags\n try:\n count = int(count)\n except ValueError:\n count = len(LocusTags)\n justify = len(str(count))\n return tag, count, justify", "def makedbGenbank(seqFile, outputPath):\n # prepare file path and names\n seqPathName = os.path.splitext(seqFile)[0]\n seqName = os.path.split(seqPathName)[1]\n dbPath = outputPath\n if not os.path.isdir(dbPath):\n os.makedirs(dbPath)\n # makedirs support both simple folder and nested folder\n # mkdir only support simple folder\n seqName = steriliseName(seqName)\n nuclFile = f'{seqName}_nucl.fasta'\n protFile = f'{seqName}_prot.fasta'\n nuclPath = os.path.join(dbPath, nuclFile)\n protPath = os.path.join(dbPath, protFile)\n outNuclDb = os.path.join(dbPath, os.path.splitext(nuclFile)[0])\n outProtDb = os.path.join(dbPath, os.path.splitext(protFile)[0])\n\n # convert to nt fasta\n ntseqs = SeqIO.parse(seqFile, 'genbank')\n SeqIO.write(ntseqs, nuclPath, 'fasta')\n\n # parse CDS and write translation to prot fasta\n prots = getProteins(seqFile)\n SeqIO.write(prots, protPath, 'fasta')\n\n # make nt database\n runMakedbNucl = NcbimakeblastdbCommandline(\n dbtype='nucl',\n input_file=nuclPath,\n out=outNuclDb\n )\n stdout, stderr = runMakedbNucl()\n if stderr == '':\n print('\\nSuccessfully made nucleotide database:')\n print(f'{outNuclDb}')\n else:\n print('Make nucleotide database failed')\n print('[stdout, stderr, seqPathName, seqName, dbPath, outNuclDb]')\n print('\\n'.join([stdout, stderr, seqPathName,\n seqName, dbPath, outNuclDb]))\n raise Exception()\n\n # make protein database\n runMakedbProt = NcbimakeblastdbCommandline(\n dbtype='prot',\n input_file=protPath,\n out=outProtDb\n )\n stdout, stderr = runMakedbProt()\n if stderr == '':\n print(\n f'\\nSuccessfully made protein database\\n{outProtDb}')\n else:\n print('Make protein database failed')\n print('[stdout, stderr, seqPathName, seqPath, seqName, dbPath, outProtDb]')\n print('\\n'.join([stdout, stderr, seqPathName,\n seqPath, seqName, dbPath, outProtDb]))\n raise Exception()\n\n return outNuclDb, outProtDb", "def convert_genbank(recs, seqid=None):\n global COUNTER\n\n # The outer dictionary containing multiple records.\n data = []\n\n # Add each record separately.\n for rec in recs:\n\n # Each individual SeqRecords is a dictionary.\n item = dict()\n\n # Fill the standard SeqRecord fields.\n item[const.SEQID] = seqid or rec.id\n item[const.DEFINITION] = rec.description\n item[const.DBLINK] = rec.dbxrefs\n item[const.LOCUS] = rec.name\n item[const.FEATURE_COUNT] = len(rec.features)\n item[const.ORIGIN_SIZE] = len(rec.seq)\n\n # Fill in all annotations.\n for key, value in rec.annotations.items():\n item[key] = json_ready(value)\n\n # Fill in the features.\n feats = []\n for feat in rec.features:\n\n # Feature type.\n ftype = feat.type\n\n # Remap GenBank terms to Sequence Ontology terms.\n ftype = const.SEQUENCE_ONTOLOGY.get(ftype, ftype)\n\n # Feature strand.\n strand = feat.strand\n\n # Feature coordinates are 1 based.\n start = int(feat.location.start) + 1\n end = int(feat.location.end)\n\n # Location operator.\n oper = feat.location_operator\n\n # Location coordinates are 1 based.\n location = [(loc.start + 1, loc.end, loc.strand) for loc in feat.location.parts]\n\n # Feature attributes\n attrs = dict(id='', name='', start=start, end=end, type=ftype, strand=strand, location=location,\n operator=oper)\n\n # Fills in the additional qualifiers.\n for (k, v) in feat.qualifiers.items():\n attrs[k] = json_ready(v)\n\n # Correct uid, parent and name\n attrs = fill_name(attrs)\n\n # Append the attributes as a record.\n feats.append(attrs)\n\n # Add the features\n item[const.FEATURES] = feats\n\n # Save the sequence as well\n item[const.ORIGIN] = str(rec.seq)\n\n # Add the item to the list.\n data.append(item)\n\n logger.info(\"genbank conversion completed\")\n return data", "def convert_file(in_file, out_file):\n sequences = SeqIO.parse(in_file, \"genbank\")\n g = open(out_file, \"w\")\n SeqIO.write(sequences, out_file, \"fasta\")", "def GFFParse(gff_file):\n\n genes, transcripts, exons, utr3, utr5, cds = {}, {}, {}, {}, {}, {}\n gff_handle = open(gff_file, \"rU\")\n for gff_line in gff_handle:\n gff_line = gff_line.strip('\\n\\r').split('\\t')\n if not gff_line:continue\n if re.match(r'#', gff_line[0]) or re.match(r'>', gff_line[0]):continue\n if len(gff_line) == 1:continue ## GFF files with genome sequence in FASTA at the end \n assert (len(gff_line)==9), '\\t'.join(gff_line)\n if gff_line[3] == '' or gff_line[4] == '' or gff_line[-1] == '':sys.stdout.write('Warning: invalid GFF line\\t' + '\\t'.join(gff_line) + '\\n');continue\n if gff_line[2] == 'gene' or gff_line[2] == 'pseudogene':\n gid, gene_info = None, dict()\n gene_info['start'] = int(gff_line[3])\n gene_info['stop'] = int(gff_line[4])\n gene_info['chr'] = gff_line[0]\n gene_info['source'] = gff_line[1]\n gene_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'ID':gid=attr[1];continue \n gene_info[attr[0]] = attr[1]\n genes[(gff_line[0], gid)] = gene_info\n elif gff_line[2] == 'mRNA' or gff_line[2] == 'transcript' or gff_line[2] == 'ncRNA' or gff_line[2] == 'miRNA' or gff_line[2] == 'pseudogenic_transcript' or gff_line[2] == 'rRNA' or gff_line[2] == 'snoRNA' or gff_line[2] == 'snRNA' or gff_line[2] == 'tRNA' or gff_line[2] == 'scRNA': # TODO Include non coding transcripts \n gid, mrna_info = None, dict() \n mrna_info['start'] = int(gff_line[3])\n mrna_info['stop'] = int(gff_line[4])\n mrna_info['chr'] = gff_line[0]\n mrna_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'Parent':gid=attr[1];continue\n mrna_info[attr[0]] = attr[1]\n if (gff_line[0], gid) in transcripts:\n transcripts[(gff_line[0], gid)].append(mrna_info)\n else:\n transcripts[(gff_line[0], gid)] = [mrna_info]\n elif gff_line[2] == 'exon':\n tids, exon_info = None, dict()\n exon_info['start'] = int(gff_line[3])\n exon_info['stop'] = int(gff_line[4])\n exon_info['chr'] = gff_line[0]\n exon_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'Parent':tids=attr[1];continue\n exon_info[attr[0]] = attr[1]\n for tid in tids.split(','):\n if (gff_line[0], tid) in exons:\n exons[(gff_line[0], tid)].append(exon_info)\n else:\n exons[(gff_line[0], tid)] = [exon_info]\n elif gff_line[2] == 'five_prime_UTR':\n utr5_info, tids = dict(), None\n utr5_info['start'] = int(gff_line[3])\n utr5_info['stop'] = int(gff_line[4])\n utr5_info['chr'] = gff_line[0]\n utr5_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'Parent':tids=attr[1];continue\n utr5_info[attr[0]] = attr[1]\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr5:\n utr5[(gff_line[0], tid)].append(utr5_info)\n else:\n utr5[(gff_line[0], tid)] = [utr5_info]\n elif gff_line[2] == 'CDS':\n cds_info, tids = dict(), None\n cds_info['start'] = int(gff_line[3])\n cds_info['stop'] = int(gff_line[4])\n cds_info['chr'] = gff_line[0]\n cds_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'Parent':tids=attr[1];continue\n cds_info[attr[0]] = attr[1]\n for tid in tids.split(','):\n if (gff_line[0], tid) in cds:\n cds[(gff_line[0], tid)].append(cds_info)\n else:\n cds[(gff_line[0], tid)] = [cds_info]\n elif gff_line[2] == 'three_prime_UTR':\n utr3_info, tids = dict(), None\n utr3_info['start'] = int(gff_line[3])\n utr3_info['stop'] = int(gff_line[4])\n utr3_info['chr'] = gff_line[0]\n utr3_info['strand'] = gff_line[6]\n for attr in gff_line[-1].split(';'):\n if attr == '':continue\n attr = attr.split('=')\n if attr[0] == 'Parent':tids=attr[1];continue\n utr3_info[attr[0]] = attr[1]\n for tid in tids.split(','):\n if (gff_line[0], tid) in utr3:\n utr3[(gff_line[0], tid)].append(utr3_info)\n else:\n utr3[(gff_line[0], tid)] = [utr3_info]\n gff_handle.close()\n return genes, transcripts, exons, utr3, utr5, cds", "def get_genbank(UPLOAD_FOLDER, phage_id, payload):\n gb_file = helper.get_file_path(\"genbank\", UPLOAD_FOLDER)\n fasta_file = helper.get_file_path(\"fasta\", UPLOAD_FOLDER)\n gb_file = create_genbank(fasta_file, UPLOAD_FOLDER, phage_id, payload)\n f = open(gb_file, \"r\")\n return f.read()", "def build(sequences, output, force):\n if output is None:\n output = os.path.splitext(sequences)[0] + '.db'\n if os.path.exists(output):\n msg = u\"Are you sure you want to overwrite {}?\".format(output)\n if force or click.confirm(msg):\n if os.path.isdir(output):\n shutil.rmtree(output)\n else:\n os.unlink(output)\n os.makedirs(output)\n dest_seqs = os.path.join(output, 'sequences.fa')\n shutil.copyfile(sequences, dest_seqs)\n db = SeqDB(dest_seqs)\n seqs = list(db)\n click.echo(u'Building for {} sequences...'.format(len(seqs)), nl=False)\n tree = VPTree.build(seqs)\n click.echo(u'done.')\n click.echo(u'Linearizing...', nl=False)\n linear_tree = LinearVPTree.fromtree(db, tree)\n click.echo(u'done.')\n fn = os.path.join(output, 'indices.npz')\n linear_tree.save(fn)\n click.echo(u'Database created in {}'.format(output))", "def mk_file(genes):\n if not os.path.isdir(\"data\"):\n os.mkdir(\"data\")\n if not os.path.isfile(out_file):\n subprocess.run([\"touch\", out_file]) # make sure the alignment file exists\n print(\"making unaligned fasta...\")\n with open(in_file, \"w\") as FILE:\n for gene in genes:\n print(f\">{gene}\\n{genes[gene].replace('-', '')}\", file=FILE) # create the unaligned file", "def makeBLASTDatabase(dbfilename,makeblastdb = \"\",outfilename=\"outdb\"):\r\n if(makeblastdb==\"\"):\r\n makeblastdb = \"makeblastdb -dbtype {} -out \\\"{}\\\" -title {} -in \\\"{}\\\"\"\r\n os.system(makeblastdb.format('nucl',gbU.guttDNA+\"temp\\\\\"+outfilename,outfilename,dbfilename))\r\n print \"made db!\"", "def make_genome(genome_record, strain_tax):\n gc = bioutils.GC(genome_record.seq) # GC content\n p = create_taxonomy(genome_record) # create all taxonomy\n assert genome_record.features[0].type=='source'\n q = genome_record.features[0].qualifiers\n chromosome = q['chromosome'][0] if 'chromosome' in q else ''\n # create genome sequence object\n genome_sequence = GenomeSequence.objects.create(sequence=genome_record.seq.tostring())\n # create genome object\n g = Genome(genome_accession=genome_record.id,\n genome_sequence = genome_sequence,\n GC_content=gc,\n gi=genome_record.annotations['gi'],\n organism=genome_record.annotations['organism'],\n chromosome=chromosome,\n taxonomy=p)\n g.save()", "def parse_cntg(seq, number, map_file, folder, seq_index):\n\n # Save the input arguments\n current_nucleotides = seq\n i = number\n map = map_file\n current_dir = folder\n index = seq_index\n # If the file is empty, this means the sequence started with Ns (this may occur during scaffold parsing).\n # In this case, do not write a contig file (it would be empty)\n if(len(current_nucleotides) == 0):\n pass\n # If the length of the contig is smaller than 250.000 nt, write a single file\n elif (len(current_nucleotides) < 250000):\n # Write the file\n output_file = open(\"{}/Sequence{}\".format(current_dir, str(i)), \"w\")\n output_file.write(current_nucleotides)\n # output_file.close()\n # Write in the map file\n map.write(\"{}/Sequence{}\\t{} {}\\n\".format(current_dir, str(i), str(index),\n str(index + len(current_nucleotides) - 1)))\n index = index + len(current_nucleotides)\n # If this contig is bigger than 250.000 nt, split it into files of 100.000 nt each\n else:\n # Divide the current contig into chunks of 100.000 nt\n chunks = [current_nucleotides[y:y + 100000]\n for y in range(0, len(current_nucleotides), 100000)]\n # Create a new folder to store those chunks\n current_dir = \"{}/Sequence{}\".format(current_dir, str(i))\n os.mkdir(current_dir)\n # Create chunk_count\n chunk_count = 0\n # Write each chunk in a separate file\n for chunk in chunks:\n chunk_count = chunk_count + 1\n # Write the file\n output_file = open(\n \"{}/Sequence{}\".format(current_dir, str(chunk_count)), \"w\")\n output_file.write(chunk)\n # output_file.close()\n # Write in the map file\n map.write(\"{}/Sequence{}\\t{} {}\\n\".format(current_dir, str(chunk_count),\n str(index), str(index + len(chunk) - 1)))\n index = index + len(chunk)\n # Return the updated index\n return index", "def combine_gffs(gff_file_list, filename):\n # sort gff files, just in case\n gff_file_list = sorted(gff_file_list)\n\n with open(filename, 'w') as outfile:\n file_num = 1\n for f in gff_file_list:\n print('get the good stuff from {}'.format(f))\n group = get_group_name(f)\n with open(f) as f1:\n for line_num, line in enumerate(f1): #keep the header from file1\n\n # The first line is `##gff-version 3`. Keep that for the first file.\n if (file_num == 1) and (line_num == 0):\n outfile.write(line)\n\n # The end of the file has the entire FASTA sequence glued on. Remove it.\n elif '##FASTA' in line:\n # We aren't keeping the nucleotide sequences!\n break\n # Delete subsequent lines like `##sequence-region k141_461591 1 2140`\n elif (line_num > 0) and line.startswith('##'):\n print('skip line: {}'.format(line))\n continue\n else:\n # Need to give each file unique ID's. If not, each file has an ID like\n # ID=contigs_longer_than_1500bp_00001\n # and duplicates are a problem.\n m = re.search('(contigs_[_a-z0-9]+bp)', line)\n if m:\n general_name = m.group(1)\n new_name = general_name + '_group_' + group\n line_edited = re.sub(general_name, new_name, line)\n outfile.write(line_edited)\n else:\n print(\"failed to find re.search for '(contigs_[_a-z0-9]+bp)' in '{}'\".format(line))\n outfile.write(line)\n file_num += 1", "def addFeatures(self):\n\t\tfrom Bio import Entrez\n\t\tfrom Bio import SeqIO\n\t\timport json\n\t\timport ast\n\t\tEntrez.email = 'pedro_araujo97@hotmail.com'\n\t\tprint('Working...')\n\t\tfor phageID in self.data.index:\n\t\t\twith Entrez.efetch(db=\"nucleotide\", rettype=\"gb\", retmode=\"text\", id=phageID) as handle:\n\t\t\t\tgenomePhage = SeqIO.read(handle, \"gb\")\n\t\t\tprotsPhage = {}\n\t\t\tfor feat in genomePhage.features:\n\t\t\t\tif feat.type == 'CDS':\n\t\t\t\t\ttry: protsPhage[feat.qualifiers['protein_id'][0]] = [feat.qualifiers['product'][0], feat.qualifiers['translation'][0]]\n\t\t\t\t\texcept: pass\n\t\t\tself.phagesProteins[phageID] = protsPhage\n\n\t\tfor bact in self.all_bact:\n\t\t\tprotsBac = {}\n\t\t\twith Entrez.efetch(db=\"nucleotide\", rettype=\"gb\", retmode=\"text\", id=bact) as handle:\n\t\t\t\tgenomeBac = SeqIO.read(handle, \"gb\")\n\t\t\tfor feat in genomeBac.features:\n\t\t\t\tif feat.type == 'CDS':\n\t\t\t\t\ttry: protsBac[feat.qualifiers['protein_id'][0]] = [feat.qualifiers['product'][0], feat.qualifiers['translation'][0]]\n\t\t\t\t\texcept: pass\n\t\t\tself.bactProteins[bact] = protsBac\n\n\t\twith open('files/phagesProteins.json', 'w') as f:\n\t\t\tjson.dump(self.phagesProteins, f)\n\t\tself.__createFasta(self.phagesProteins, 'phagesProteins')\n\t\twith open('files/bactProteins.json', 'w') as f:\n\t\t\tjson.dump(self.bactProteins, f)\n\t\tself.__createFasta(self.bactProteins, 'bactProteins')\n\t\tprint('Done')", "def gv_generate(t):\r\n manual_foot = '}'\r\n\r\n for element in t:\r\n genre_name = element[0]\r\n genre_list = element[1]\r\n filename = genre_name + '.gv'\r\n\r\n header = (\r\n 'graph \"' + genre_name + '\" {\\n',\r\n '\\tpage=\"8.5,11\";\\n',\r\n '\\tratio=fill;\\n',\r\n '\\toverlap=false;\\n',\r\n '\\t\"' + genre_name + '\"[shape=box];\\n'\r\n )\r\n\r\n with codecs.open(filename, 'w', 'utf-8-sig') as p:\r\n for x in header:\r\n p.write(x)\r\n\r\n for tup in genre_list:\r\n if tup[0] != tup[1]:\r\n line = '\\t\"%s\" -- \"%s\";\\n' % (tup[0], tup[1])\r\n p.write(line)\r\n\r\n p.write(manual_foot)", "def createPedonFGDB():\n\n try:\n AddMsgAndPrint(\"\\nCreating New Pedon File Geodatabase\",0)\n arcpy.SetProgressorLabel(\"Creating New Pedon File Geodatabase\")\n\n # pedon xml template that contains empty pedon Tables and relationships\n # schema and will be copied over to the output location\n pedonXML = os.path.dirname(sys.argv[0]) + os.sep + \"Extract_Pedons_from_NASIS_XMLWorkspace.xml\"\n localPedonGDB = os.path.dirname(sys.argv[0]) + os.sep + \"NasisPedonsTemplate.gdb\"\n\n # Return false if xml file is not found\n if not arcpy.Exists(pedonXML):\n AddMsgAndPrint(\"\\t\" + os.path.basename(pedonXML) + \" Workspace document was not found!\",2)\n return \"\"\n\n # Return false if pedon fGDB template is not found\n if not arcpy.Exists(localPedonGDB):\n AddMsgAndPrint(\"\\t\" + os.path.basename(localPedonGDB) + \" FGDB template was not found!\",2)\n return \"\"\n\n newPedonFGDB = os.path.join(outputFolder,GDBname + \".gdb\")\n\n if arcpy.Exists(newPedonFGDB):\n try:\n arcpy.Delete_management(newPedonFGDB)\n AddMsgAndPrint(\"\\t\" + GDBname + \".gdb already exists. Deleting and re-creating FGDB\\n\",1)\n except:\n AddMsgAndPrint(\"\\t\" + GDBname + \".gdb already exists. Failed to delete\\n\",2)\n return \"\"\n\n # copy template over to new location\n AddMsgAndPrint(\"\\tCreating \" + GDBname + \".gdb with NCSS Pedon Schema 7.3\")\n arcpy.Copy_management(localPedonGDB,newPedonFGDB)\n\n## # Create empty temp File Geodatabae\n## arcpy.CreateFileGDB_management(outputFolder,os.path.splitext(os.path.basename(newPedonFGDB))[0])\n##\n## # set the pedon schema on the newly created temp Pedon FGDB\n## AddMsgAndPrint(\"\\tImporting NCSS Pedon Schema 7.3 into \" + GDBname + \".gdb\")\n## arcpy.ImportXMLWorkspaceDocument_management(newPedonFGDB, pedonXML, \"DATA\", \"DEFAULTS\")\n\n arcpy.UncompressFileGeodatabaseData_management(newPedonFGDB)\n\n arcpy.RefreshCatalog(outputFolder)\n\n AddMsgAndPrint(\"\\tSuccessfully created: \" + GDBname + \".gdb\")\n\n return newPedonFGDB\n\n except arcpy.ExecuteError:\n AddMsgAndPrint(arcpy.GetMessages(2),2)\n return \"\"\n\n except:\n AddMsgAndPrint(\"Unhandled exception (createFGDB)\", 2)\n errorMsg()\n return \"\"", "def create_genome_annotation(self, filename):\n ann = Data.objects.create(\n name='Annotation_{}'.format(filename.split('.')[0]),\n process=get_process('upload-gtf'),\n contributor=get_superuser(),\n status=Data.STATUS_PROCESSING,\n input={'src': {'file': filename}, 'source': 'UCSC'})\n\n os.mkdir(os.path.join(self.data_dir, str(ann.id)))\n\n with gzip.open(os.path.join(self.test_files_path, filename), 'rb') as gzfile:\n with open(os.path.join(self.data_dir, str(ann.id), filename[:-3]), 'wb') as outfile:\n shutil.copyfileobj(gzfile, outfile)\n\n ann.output = {\n 'gtf': {'file': filename[:-3]},\n 'source': 'UCSC'\n }\n ann.status = Data.STATUS_DONE\n ann.save()\n\n with open(os.path.join(self.data_dir, str(ann.id), 'stdout.txt'), 'w') as stdout:\n stdout.write('Upload genome annotation with the '\n 'generate_diffexpr_cuffdiff django-admin command.')\n\n logger.info(__('Genome annotation created: {} (id={})', filename, ann.id))\n\n return ann", "def test_get_file_success(coordination_args, gb_file_dir):\n accession = \"GCA_test####_genomic\"\n result = gb_file_dir / \"GCA_test####_genomic.gbff.gz\"\n\n assert result == get_genbank_annotations.get_genbank_file(accession, coordination_args[\"args\"])", "def create_basis_file():\n\n energy_cutoffs = [20] # [21., 40., 60., 80., 100]\n max_matching_order = 1\n\n for energy_cutoff in energy_cutoffs:\n print(\"ZR. Trial energy cutoff: \" + str(energy_cutoff) + \" max_matching_order:\" + str(max_matching_order))\n print(add_to_groundstate_basis(zr_groundstate_basis, zr_trial_energies, energy_cutoff, max_matching_order))\n\n print(\"OXYGEN. Trial energy cutoff: \" + str(energy_cutoff) + \" max_matching_order:\" + str(max_matching_order))\n print(add_to_groundstate_basis(oxygen_groundstate_basis, o_trial_energies, energy_cutoff, max_matching_order))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls Impala HS2 API's GetExecSummary method on the given query handle
def get_exec_summary(self, operation_handle, session_handle): req = ImpalaHiveServer2Service.TGetExecSummaryReq(operationHandle=operation_handle, sessionHandle=session_handle) # GetExecSummary() only works for closed queries try: self.close_operation(operation_handle) except QueryServerException, e: LOG.warn('Failed to close operation for query handle, query may be invalid or already closed.') resp = self.call(self._client.GetExecSummary, req) return self._serialize_exec_summary(resp.summary)
[ "def test_execute_sum_query(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, AzureInstanceTypeView)\n handler = AzureReportQueryHandler(query_params)\n\n filters = self.ten_day_filter\n for filt in handler._mapper.report_type_map.get(\"filter\"):\n qf = QueryFilter(**filt)\n filters.update({qf.composed_query_string(): qf.parameter})\n current_totals = self.get_totals_costs_by_time_scope(handler, filters)\n expected_cost_total = current_totals.get(\"cost_total\")\n self.assertIsNotNone(expected_cost_total)\n query_output = handler.execute_query()\n\n self.assertIsNotNone(query_output.get(\"data\"))\n self.assertIsNotNone(query_output.get(\"total\"))\n total = query_output.get(\"total\")\n self.assertIsNotNone(total.get(\"usage\", {}).get(\"value\"))\n self.assertEqual(total.get(\"usage\", {}).get(\"value\"), current_totals.get(\"usage\"))\n result_cost_total = total.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertIsNotNone(result_cost_total)\n self.assertEqual(result_cost_total, expected_cost_total)", "def _perform_query(self):\n query_params = {}\n if self._vcenter_uuid:\n url = \"/vcenters/{}/vulnerabilities/summary\".format(self._vcenter_uuid)\n else:\n url = \"/vulnerabilities/summary\"\n\n if self._severity:\n query_params[\"severity\"] = self._severity\n\n req_url = Vulnerability.OrgSummary.urlobject.format(self._cb.credentials.org_key) + url\n\n return self._doc_class(self._cb, initial_data=self._cb.get_object(req_url, query_params))", "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def _query_alerts(self, stat):\n info_statement = stat.sql_info_statement\n LOGGER.debug('Querying alert info for rule \\'%s\\': %s', stat.rule_name, info_statement)\n\n response = self._athena_client.run_async_query(info_statement)\n\n return response['QueryExecutionId']", "def executeShellStats(self):\n return subprocess.check_output([self.SHELL_PATH + '/stats.sh',\n self.TOKEN,\n self.GITLAB_GROUP,\n self.PLAIN_PROJECT,\n self.ROOT_PATH])", "def run_query(self, query, output_format='json'):\n self.cursor.execute(query)\n if self.cursor.rowcount != -1:\n res = [dict(line) for line in\n [zip([column[0] for column in self.cursor.description], row) for row in self.cursor.fetchall()]]\n self.close()\n if output_format == 'df':\n return pd.read_json(json.dumps(res, default=datetime_convert))\n else:\n return json.dumps(res)", "def read_query_result(self, *args, **kwargs): # real signature unknown\n pass", "def execution_stats():\n execution_id = validate_integer('executionId', only_positive=True, required=False)\n if execution_id is None:\n try:\n execution_id = _stats_collection.get_collection().find_one({'_id': {'subsystem_id': _user.get('scope'),\n 'type': 'aggregated'}})['last_execution_id']\n except TypeError:\n return app.response_class(response=_dumps({'stats': None, 'reason': 'The Data Gathering Subsystem '\n 'has not yet been executed.', 'last_execution_id': None}), status=404, mimetype='application/json')\n result = _stats_collection.get_collection().find_one(filter={'_id': {'subsystem_id': _user.get('scope'),\n 'execution_id': execution_id, 'type': 'last_execution'}})\n if result:\n return app.response_class(response=_dumps(result), status=200, mimetype='application/json')\n else:\n try:\n execution_id = _stats_collection.get_collection().find_one({'_id': {'subsystem_id': _user.get('scope'),\n 'type': 'aggregated'}})['last_execution_id']\n except TypeError:\n return app.response_class(response=_dumps({'stats': None, 'reason': 'The Data Gathering Subsystem '\n 'has not yet been executed.'}), status=404, mimetype='application/json')\n return app.response_class(response=_dumps({'stats': None, 'reason': 'Unable to find stats for the given'\n ' execution ID.', 'last_execution_id': execution_id}), status=404, mimetype='application/json')", "def get_log_summary():\n clean_expired_sessions()\n\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n logging.info(\"get_log_summary start session=\" + str(session) + \" process=\" + str(process))\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n dictio = lh.get_handler_for_process_and_session(process, session).get_log_summary_dictio()\n\n logging.info(\n \"get_log_summary complete session=\" + str(session) + \" process=\" + str(process) + \" user=\" + str(user))\n\n ret = jsonify(dictio)\n return ret", "def query(self, query, check_return_code=True, use_tempfile=False):\n\n if use_tempfile:\n with tempfile.NamedTemporaryFile(mode='w', prefix='tmp-hive-query-', suffix='.hiveql') as f:\n logger.debug('Writing Hive query to tempfile {0}.'.format(f.name))\n f.write(query)\n f.flush()\n out = self.script(f.name, check_return_code)\n # NamedTemporaryFile will be deleted on close().\n return out\n else:\n return self._command(['-e', query], check_return_code)", "def execute(self):\n return SIA2Results(self.execute_votable(), url=self.queryurl, session=self._session)", "def executeQueryFetch(self, query = None):\n\t\tif query == None:\n\t\t\tquery = self.query\n\t\tself.cursor.execute(query)\n\t\tself.result = self.cursor.fetchall()\n\t\treturn self.result", "def __ExecuteSummarize(self):\n\n # If no results file is specified, use a default value.\n if len(self.__arguments) == 0:\n results_path = \"results.qmr\"\n else:\n results_path = self.__arguments[0]\n\n # The remaining arguments, if any, are test and suite IDs.\n id_arguments = self.__arguments[1:]\n # Are there any?\n if len(id_arguments) > 0:\n filter = 1\n # Expand arguments into test IDs.\n try:\n test_ids, suite_ids \\\n = self.GetDatabase().ExpandIds(id_arguments)\n except (qm.test.database.NoSuchTestError,\n qm.test.database.NoSuchSuiteError), exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n except ValueError, exception:\n raise qm.cmdline.CommandError, \\\n qm.error(\"no such ID\", id=str(exception))\n else:\n # No IDs specified. Show all test and resource results.\n # Don't show any results by test suite though.\n filter = 0\n suite_ids = []\n\n # Get an iterator over the results.\n try:\n results = base.load_results(open(results_path, \"rb\"),\n self.GetDatabase())\n except (IOError, xml.sax.SAXException), exception:\n raise QMException, \\\n qm.error(\"invalid results file\",\n path=results_path,\n problem=str(exception))\n\n any_unexpected_outcomes = 0\n\n # Compute the list of result streams to which output should be\n # written. Results path only used for HTML/NexTest\n streams = self.__GetResultStreams(results_path)\n \n # Send the annotations through.\n for s in streams:\n s.WriteAllAnnotations(results.GetAnnotations())\n\n # Get the expected outcomes.\n outcomes = self.__GetExpectedOutcomes()\n\n # Our filtering function. Should use itertools.ifilter, once\n # we can depend on having Python 2.3.\n def good(r):\n return r.GetKind() == Result.TEST \\\n and r.GetId() in test_ids\n\n # Simulate the events that would have occurred during an\n # actual test run.\n for r in results:\n if not filter or good(r):\n for s in streams:\n s.WriteResult(r)\n if (r.GetOutcome()\n != outcomes.get(r.GetId(), Result.PASS)):\n any_unexpected_outcomes = 1\n for s in streams:\n s.Summarize()\n\n if any_unexpected_outcomes:\n return 1\n \n return 0", "def _summarise(self, pid: int) -> ProcedureSummary:\n with self._state_updating:\n state = self.states[pid]\n script = self.scripts[pid]\n script_args = self.script_args[pid]\n history = self.history[pid]\n return ProcedureSummary(\n id=pid,\n script=script,\n script_args=script_args,\n history=history,\n state=state,\n )", "def execute_query(query):\n conn, cursor = db_connect()\n cursor.execute(query)\n results = cursor.fetchall()\n conn.close()\n return results", "def __execute_query(self, query):\n self.logger.debug(\"Executing query:\\n{0}\\n\".format(query))\n\n try:\n self.cursor.execute(query)\n except psycopg2.ProgrammingError, err:\n self.logger.error(\"Problem with query\\n{0}\\n{1}\".format(query, err))\n sys.exit(-1)\n except psycopg2.DataError, err:\n self.logger.error(\"{0}; query was\\n{1}\".format(err, query))\n sys.exit(-1)\n\n #if no error occured then return the results\n row = self.cursor.fetchone()\n while not row is None:\n yield row\n\n row = self.cursor.fetchone()", "def executeQuery(conn, query):\n cur = conn.cursor()\n cur.execute(query)\n return cur", "def execute(self):\n return SSAResults(self.execute_votable(), url=self.queryurl, session=self._session)", "def execute_aggregate_query(aggregate_keyword, query_tables, query_column):\n\n sum = 0\n number_of_records = 0\n maximum = -99999999\n minimum = 99999999\n for table in query_tables:\n number_of_records += len(RECORDS_DICT[table])\n for record in RECORDS_DICT[table]:\n for i, col in enumerate(COLUMNS_DICT[table]):\n if col == query_column:\n sum += int(record[i])\n maximum = max(maximum, int(record[i]))\n minimum = min(minimum, int(record[i]))\n break\n\n if aggregate_keyword == \"SUM\":\n print \"SUM(\" + query_column + \")\"\n print sum\n elif aggregate_keyword == \"AVG\":\n print \"AVG(\" + query_column + \")\"\n print \"%.2f\" % (float(sum)/number_of_records)\n elif aggregate_keyword == \"COUNT\":\n print \"COUNT(\" + query_column + \")\"\n print number_of_records\n elif aggregate_keyword == \"MAX\":\n print \"MAX(\" + query_column + \")\"\n print maximum\n elif aggregate_keyword == \"MIN\":\n print \"MIN(\" + query_column + \")\"\n print minimum" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calls Impala HS2 API's GetRuntimeProfile method on the given query handle
def get_runtime_profile(self, operation_handle, session_handle): req = ImpalaHiveServer2Service.TGetRuntimeProfileReq(operationHandle=operation_handle, sessionHandle=session_handle) # TGetRuntimeProfileReq() only works for closed queries try: self.close_operation(operation_handle) except QueryServerException, e: LOG.warn('Failed to close operation for query handle, query may be invalid or already closed.') resp = self.call(self._client.GetRuntimeProfile, req) return resp.profile
[ "def advapi32_GetCurrentHwProfile(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpHwProfileInfo\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "async def on_profile_query(self, args: JsonDict) -> JsonDict:\n\n if not self.hs.config.federation.allow_profile_lookup_over_federation:\n raise SynapseError(\n 403,\n \"Profile lookup over federation is disabled on this homeserver\",\n Codes.FORBIDDEN,\n )\n\n user = UserID.from_string(args[\"user_id\"])\n if not self.hs.is_mine(user):\n raise SynapseError(400, \"User is not hosted on this homeserver\")\n\n just_field = args.get(\"field\", None)\n\n response = {}\n try:\n if just_field is None or just_field == \"displayname\":\n response[\"displayname\"] = await self.store.get_profile_displayname(user)\n\n if just_field is None or just_field == \"avatar_url\":\n response[\"avatar_url\"] = await self.store.get_profile_avatar_url(user)\n except StoreError as e:\n if e.code == 404:\n raise SynapseError(404, \"Profile was not found\", Codes.NOT_FOUND)\n raise\n\n return response", "def test_query_profile_encoded_unknown_query_id(self):\n cluster = ImpalaCluster.get_e2e_test_cluster()\n impalad = cluster.get_any_impalad()\n result = impalad.service.read_debug_webpage(\"query_profile_encoded?query_id=123\")\n assert result.startswith(\"Could not obtain runtime profile: Query id\")", "def _run_query (self, query):\n self._login()\n return self.api_obj.query(query)", "def advapi32_AuditQueryPerUserPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pSid\", \"pSubCategoryGuids\", \"PolicyCount\", \"ppAuditPolicy\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "async def get_task_cpu_profile(self, req) -> aiohttp.web.Response:\n if \"task_id\" not in req.query:\n raise ValueError(\"task_id is required\")\n if \"attempt_number\" not in req.query:\n raise ValueError(\"task's attempt number is required\")\n if \"node_id\" not in req.query:\n raise ValueError(\"node_id is required\")\n\n task_id = req.query.get(\"task_id\")\n attempt_number = req.query.get(\"attempt_number\")\n node_id = req.query.get(\"node_id\")\n\n ip = DataSource.node_id_to_ip[node_id]\n\n duration = int(req.query.get(\"duration\", 5))\n if duration > 60:\n raise ValueError(f\"The max duration allowed is 60: {duration}.\")\n format = req.query.get(\"format\", \"flamegraph\")\n\n # Default not using `--native` for profiling\n native = req.query.get(\"native\", False) == \"1\"\n reporter_stub = self._stubs[ip]\n\n try:\n (pid, _) = await self.get_worker_details_for_running_task(\n task_id, attempt_number\n )\n except ValueError as e:\n raise aiohttp.web.HTTPInternalServerError(text=str(e))\n\n logger.info(\n \"Sending CPU profiling request to {}:{} for {} with native={}\".format(\n ip, pid, task_id, native\n )\n )\n\n reply = await reporter_stub.CpuProfiling(\n reporter_pb2.CpuProfilingRequest(\n pid=pid, duration=duration, format=format, native=native\n )\n )\n\n \"\"\"\n In order to truly confirm whether there are any other tasks\n running during the profiling, we need to retrieve all tasks\n that are currently running or have finished, and then parse\n the task events (i.e., their start and finish times) to check\n for any potential overlap. However, this process can be quite\n extensive, so here we will make our best efforts to check\n for any overlapping tasks. Therefore, we will check if\n the task is still running\n \"\"\"\n try:\n (_, worker_id) = await self.get_worker_details_for_running_task(\n task_id, attempt_number\n )\n except ValueError as e:\n raise aiohttp.web.HTTPInternalServerError(text=str(e))\n\n if not reply.success:\n return aiohttp.web.HTTPInternalServerError(text=reply.output)\n logger.info(\"Returning profiling response, size {}\".format(len(reply.output)))\n\n task_ids_in_a_worker = await self.get_task_ids_running_in_a_worker(worker_id)\n return aiohttp.web.Response(\n body='<p style=\"color: #E37400;\">{} {} </br> </p> </br>'.format(\n EMOJI_WARNING,\n WARNING_FOR_MULTI_TASK_IN_A_WORKER + str(task_ids_in_a_worker),\n )\n + SVG_STYLE\n + (reply.output)\n if len(task_ids_in_a_worker) > 1\n else SVG_STYLE + reply.output,\n headers={\"Content-Type\": \"text/html\"},\n )", "def advapi32_AuditQuerySystemPolicy(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"pSubCategoryGuids\", \"PolicyCount\", \"ppAuditPolicy\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def system_image_performance_query(self, namelist):\n smt_userid = zvmutils.get_smt_userid()\n rd = ' '.join((\n \"SMAPI %s API System_Image_Performance_Query\" % smt_userid,\n \"--operands -T %s\" % namelist))\n action = \"get performance info of namelist '%s'\" % namelist\n with zvmutils.log_and_reraise_smt_request_failed(action):\n results = self._request(rd)\n\n ipq_kws = {\n 'userid': \"Guest name:\",\n 'guest_cpus': \"Guest CPUs:\",\n 'used_cpu_time': \"Used CPU time:\",\n 'elapsed_cpu_time': \"Elapsed time:\",\n 'min_cpu_count': \"Minimum CPU count:\",\n 'max_cpu_limit': \"Max CPU limit:\",\n 'samples_cpu_in_use': \"Samples CPU in use:\",\n 'samples_cpu_delay': \"Samples CPU delay:\",\n 'used_memory': \"Used memory:\",\n 'max_memory': \"Max memory:\",\n 'min_memory': \"Minimum memory:\",\n 'shared_memory': \"Shared memory:\",\n }\n\n pi_dict = {}\n pi = {}\n rpi_list = ('\\n'.join(results['response'])).split(\"\\n\\n\")\n for rpi in rpi_list:\n try:\n pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)\n except exception.SDKInternalError as err:\n emsg = err.format_message()\n # when there is only one userid queried and this userid is\n # in 'off'state, the smcli will only returns the queried\n # userid number, no valid performance info returned.\n if(emsg.__contains__(\"No value matched with keywords.\")):\n continue\n else:\n raise err\n for k, v in pi.items():\n pi[k] = v.strip('\" ')\n if pi.get('userid') is not None:\n pi_dict[pi['userid']] = pi\n\n return pi_dict", "def RunWMIQuery(query, baseobj=r\"winmgmts:\\root\\cimv2\"):\n pythoncom.CoInitialize() # Needs to be called if using com from a thread.\n wmi_obj = win32com.client.GetObject(baseobj)\n # This allows our WMI to do some extra things, in particular\n # it gives it access to find the executable path for all processes.\n wmi_obj.Security_.Privileges.AddAsString(\"SeDebugPrivilege\")\n\n # Run query\n try:\n query_results = wmi_obj.ExecQuery(query)\n except pythoncom.com_error as e:\n raise RuntimeError(\"Failed to run WMI query \\'%s\\' err was %s\" % (query, e))\n\n # Extract results from the returned COMObject and return dicts.\n try:\n for result in query_results:\n response = rdf_protodict.Dict()\n properties = (\n list(result.Properties_) +\n list(getattr(result, \"SystemProperties_\", [])))\n\n for prop in properties:\n if prop.Name not in IGNORE_PROPS:\n # Protodict can handle most of the types we care about, but we may\n # get some objects that we don't know how to serialize, so we tell the\n # dict to set the value to an error message and keep going\n response.SetItem(prop.Name, prop.Value, raise_on_error=False)\n yield response\n\n except pythoncom.com_error as e:\n raise RuntimeError(\"WMI query data error on query \\'%s\\' err was %s\" %\n (e, query))", "def webapp_performance():\n return profile", "def _profile(self):\n if self.args.profile == \"console\":\n self._console_profile(self._main)\n else:\n cProfile.runctx(\"self._main()\", locals(), locals(), self.args.profile)", "def get(self, architecture, profile):\n return self._get(\"/archs/%s/profiles/%s\" % (base.getid(architecture),\n profile),\n \"profile\")", "def gather_profiling_info(self, extrainfo=None):\n # Add metadata\n info_dict = {}\n info_dict[\"time\"] = self.runtime\n info_dict[\"created-at\"] = time.time()\n if extrainfo:\n info_dict.update(extrainfo)\n\n # get the information from the timing script\n with self.runner_temp.in_dir():\n if os.path.exists(cf.STDERR_FILE):\n with open(cf.STDERR_FILE) as stderr_file:\n stderr = stderr_file.read()\n time_str = stderr.splitlines()[-1]\n time_dat = json.loads(time_str)\n info_dict.update(time_dat)\n\n logger.debug(\"Caching profile information\")\n self.info_dict = info_dict", "def advapi32_PerfQueryInstance(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"hProvider\", \"CounterSetGuid\", \"szInstance\", \"dwInstance\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def advapi32_TraceQueryInformation(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"SessionHandle\", \"InformationClass\", \"TraceInformation\", \"InformationLength\", \"ReturnLength\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def _common_get_processing(self, device, cmd_keyword, kwargs):\n options = {}\n options[\"more_options\"] = kwargs.pop('more_options', None)\n options[\"return_mode\"] = str(kwargs.pop(\"return_mode\", None)).strip().upper()\n options[\"timeout\"] = int(kwargs.pop(\"timeout\", self.default[\"CLI_COMMIT_TIMEOUT\"]))\n\n cmd_element = []\n cmd_element.append(\"show system security-profile {}\".format(cmd_keyword))\n\n if options[\"more_options\"]:\n cmd_element.append(options[\"more_options\"])\n\n if options[\"return_mode\"] == \"TEXT\":\n response_format = \"text\"\n else:\n response_format = \"xml\"\n\n response = dev.execute_cli_command_on_device(\n device=device,\n command=\" \".join(cmd_element),\n channel=\"pyez\",\n format=response_format,\n timeout=options[\"timeout\"],\n )\n\n if options[\"return_mode\"] == \"TEXT\":\n return response\n\n response = self.xml.strip_xml_response(self.xml.xml_to_pure_dict(response), return_list=True)\n if cmd_keyword == \"nat-interface-port-ol\":\n cmd_keyword = \"nat-interface-po\"\n\n main_path_keyword = \"security-profile-{}-information\".format(cmd_keyword)\n all_entry_list = []\n for item in response:\n info = {}\n for keyword in item:\n if keyword != main_path_keyword:\n info[self.tool.underscore_and_lowercase_transit(keyword)] = str(item[keyword])\n\n for entry in self.tool.set_element_list(item[main_path_keyword][\"security-profile-information\"]):\n for keyword in entry:\n info_keyword = self.tool.underscore_and_lowercase_transit(keyword)\n info[info_keyword] = str(entry[keyword])\n\n all_entry_list.append(copy.deepcopy(info))\n\n return all_entry_list", "def fetch_appfw_profile(self, device, **kwargs):\n all_entry_list = self._common_get_processing(device=device, cmd_keyword=\"appfw-profile\", kwargs=kwargs)\n device.log(message=\"{} return value:\\n{}\".format(self.tool.get_current_function_name(), self.tool.pprint(all_entry_list)))\n return all_entry_list", "def execute_json_prom_query(json_query):\n query_string = create_prom_query(json_query)\n query_res = make_prom_query(query_string)\n if check_json_result(query_res):\n return get_data_json_result(query_res)\n else:\n print('Prometheus query failed:', query_string)", "def test_search_appfw_profile(self, mock_execute_cli_command_on_device):\n print(\"HA HE setup with summary response\")\n mock_execute_cli_command_on_device.return_value = self.xml.xml_string_to_dict(self.response[\"SA_HE_APPFW_PROFILE\"])\n response = self.ins.search_appfw_profile(\n device=self.mock_device_ins,\n logical_system_name=[\"root\", \"in\"],\n resources_used=0,\n resources_reserved=0,\n resources_maximum=\"0-57344 in\",\n security_profile_name=\"SP in\",\n )\n self.assertTrue(response)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks for existence of piece name/type.
def exists_piece(conn, piecetype, piecename): return bool( conn.execute( """SELECT * FROM pieces WHERE piecetype=? AND piecename=?;""", (piecetype, piecename), ).fetchall() )
[ "def CheckType(player, type):\n print \"Checking\", player.name + \"'s hand:\"\n for elem in type:\n print \"Testing\", elem.tag\n if not elem.tag in TYPEELEMS.keys():\n print elem.tag, \"type subelement not recognized.\"\n continue\n elif not TYPEELEMS[elem.tag](player, elem):\n print elem.tag, \"type subelement violated.\"\n return False\n else:\n print elem.tag, \"OK.\"\n print player.name +\"'s hand is OK\"\n return True", "def check(self, partnames):\n\n for i in partnames:\n if i not in shader_part:\n return False\n\n return True", "def are_valid_pieces(pieces):\n return all([p in PIECES for p in pieces])", "def has_cooling(cool_type):\n if cool_type is None:\n return False\n if cool_type == \"none\":\n return False\n if cool_type == \"other\":\n return False\n if cool_type in COOL_TYPE:\n return True\n return False", "async def unit_exists(self, unit: str) -> bool:", "def check_component_type(component_type, component):\n\n # TODO stub\n return True", "def is_the_d_type_exists(self, the_d_type):\n conn = sqlite3.connect(\"dampers.db\")\n conn.execute(\"PRAGMA foreign_keys=1\") # enable cascade deleting and updating.\n cur = conn.cursor()\n try:\n cur.execute(\"SELECT d_types.d_type FROM d_types WHERE d_types.d_type=:d_type\", {\"d_type\": the_d_type})\n # cur.execute(\"SELECT d_types.d_type FROM d_types WHERE d_types.d_type='{}';\".format(the_d_type))\n except sqlite3.DatabaseError as err:\n raise sqlite3.DatabaseError(err)\n else:\n if cur.fetchone() is None:\n return False # d_type not exists.\n else:\n return True # d_type exists.\n # try:\n # cur.__next__()\n # except StopIteration:\n # return False # d_type not exists.\n # else:\n # return True # d_type exists.\n finally:\n cur.close()\n conn.close()", "def is_valid_type(typename):\n return typename in MCType.instances", "def has_heating(heat_type):\n if heat_type is None:\n return False\n if heat_type == \"none\":\n return False\n if heat_type == \"other\":\n return False\n if heat_type == \"heat_pump_dual_fuel\":\n return False\n if heat_type in HEAT_TYPE:\n return True\n return False", "def check_lookup_exists(feature, lookup_name):\n for x in feature:\n try:\n if x.name == lookup_name:\n return True\n except AttributeError:\n continue\n\n return False", "def test_is_usable(self):\n self.assertFalse(isinstance(self.cup, item.Usable))\n self.assertFalse(isinstance(self.gold_piece, item.Usable))\n self.assertFalse(isinstance(self.silver_piece, item.Usable))\n self.assertFalse(isinstance(self.shilling, item.Usable))\n self.assertTrue(isinstance(self.usable_money, item.Usable))\n self.assertTrue(isinstance(self.potion, item.Usable))\n self.assertTrue(isinstance(self.fancy_potion, item.Usable))\n self.assertFalse(isinstance(self.sword, item.Usable))", "def does_match_exist(self, inst):\n\n answer = True\n if self.type is not None:\n answer &= isinstance(inst, self.type)\n if self.name is not None:\n answer &= inst.name == self.name\n if self.parent is not None:\n answer &= self.parent is inst.parent\n if self.fullname is not None:\n if inst.name:\n answer &= self.fullname == full_name(inst)\n else:\n answer = False\n return answer", "def is_ext_pack_usable(self, name):\n if not isinstance(name, basestring):\n raise TypeError(\"name can only be an instance of type basestring\")\n usable = self._call(\"isExtPackUsable\",\n in_p=[name])\n return usable", "def exists(self, symbol: str) -> bool:\n pass", "def check_known_types():\n has_error = False\n for entry in KNOWN_TYPEINFO_NAMES:\n typeinfo, known_typeid, known_type_string = entry[:3]\n typeid = get_typeid(typeinfo)\n if typeid != known_typeid:\n print(f\"Unexpected typeid for {typeinfo!r}: {typeid:#018x} != {known_typeid:#018x}\")\n has_error = True\n\n try:\n type_string = decode_typeinfo(typeinfo)\n except ValueError:\n # The type might be unknown to old versions of c++filt (like 2.32), from a known-list\n if None in entry[3:]:\n type_string = None\n else:\n raise\n\n if type_string is not None and type_string != known_type_string:\n # The typeid might be an alternate one (from old binutils), so check the known ones\n if type_string not in entry[3:]:\n print(f\"Unexpected typeid for {typeinfo!r}: {type_string!r} != {known_type_string!r}\")\n has_error = True\n\n print(f\"{typeinfo!r} ({typeid:#018x}): {type_string!r}\")\n assert not has_error", "def check_cue_exist(cue_name):\n sl = SettingLoader()\n settings = sl.settings\n package_name = \"odie.cues\" + \".\" + cue_name.lower() + \".\" + cue_name.lower()\n if settings.resources.cue_folder is not None:\n neuron_resource_path = settings.resources.neuron_folder + \\\n os.sep + cue_name.lower() + os.sep + \\\n cue_name.lower() + \".py\"\n if os.path.exists(neuron_resource_path):\n imp.load_source(cue_name.capitalize(), neuron_resource_path)\n package_name = cue_name.capitalize()\n\n try:\n mod = __import__(package_name, fromlist=[cue_name.capitalize()])\n getattr(mod, cue_name.capitalize())\n except AttributeError:\n raise ModuleNotFoundError(\n \"[AttributeError] The module %s does not exist in the package %s \" % (cue_name.capitalize(),\n package_name))\n except ImportError:\n raise ModuleNotFoundError(\n \"[ImportError] The module %s does not exist in the package %s \" % (cue_name.capitalize(),\n package_name))\n return True", "def check_type_name(type_name: str, type_per_name: Dict[str, Type[object]]) -> None:\n # Remove any subarray stuff here.\n type_name = type_name.split(\"[\")[0]\n if type_name not in type_per_name:\n close_matches = get_close_matches(\n type_name, type_per_name.keys(), 3, cutoff=0.4\n )\n close_matches_str = \", \".join(f\"'{match}'\" for match in close_matches)\n extra_help = \"\"\n if len(close_matches) > 1:\n extra_help = f\" Did you mean one of {close_matches_str}?\"\n elif close_matches:\n extra_help = f\" Did you mean {close_matches_str}?\"\n raise InvalidStructureError( # pylint: disable=raise-missing-from\n f\"Type '{type_name}' is not valid in this context.{extra_help}\"\n )", "def add_pieces(self, piece_list):\n success = True\n # iterate through each piece, adding the piece if valid\n for piece in piece_list:\n if not self.add_piece(piece):\n # warning for invalid piece location - does not kill engine\n print(f'Warning: piece at {piece[0]} not added - space occupied')\n success = False\n return success", "def _check_partno(self, ctx=None):\n if hasattr(self, \"Item\") and self.teilenummer and not self.Item:\n raise ue.Exception(\"part_number\", self.teilenummer, self.t_index)", "def has_weapon(self, wpnname):\n return wpnname in map(lambda w: w.__class__.__name__, self.weapons)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates an piece entry of type in the conn db. conn takes a sqlite3.connect object.
def create_piece(conn, piecetype, piecename, content, catchall=""): if not exists_piece(conn, piecetype, piecename): conn.execute( """INSERT INTO pieces VALUES (?,?,?,?)""", (piecetype, piecename, content, catchall), ) conn.commit()
[ "def sqlite_tab(sqlite_conn):\n def create_tab(tab_name):\n cursor = sqlite_conn.cursor()\n cursor.execute('''CREATE TABLE aapl_1_day( \n time TEXT, \n open REAL, \n high REAL, \n low REAL, \n close REAL, \n volume REAL)''')\n return sqlite_conn\n return create_tab", "def create_connection(\n conn_id, conn_type, host, login, password, port, extra, schema\n):\n conn = Connection(\n conn_id=conn_id,\n conn_type=conn_type,\n host=host,\n login=login,\n password=password,\n port=port,\n extra=extra,\n schema=schema\n )\n session = settings.Session()\n session.add(conn)\n session.commit()", "def make_table(self):\n curs = self.conn.cursor()\n curs.execute(\"CREATE TABLE IF NOT EXISTS facts(id INTEGER PRIMARY KEY AUTOINCREMENT, txt VARCHAR(255) NOT NULL)\")\n curs.close()\n self.conn.commit()", "def add_record(title, description):\n connection = sqlite3.connect('blog.sqlite3')\n cursor = connection.cursor()\n sql = f'INSERT INTO records (Title, Description) VALUES (\"{title}\", \"{description}\")'\n cursor.execute(sql)\n connection.commit()\n connection.close()\n return None", "def add_type(self, d_type):\n conn = sqlite3.connect(\"dampers.db\")\n conn.execute(\"PRAGMA foreign_keys=1\") # enable cascade deleting and updating.\n cur = conn.cursor()\n try:\n cur.execute(\"INSERT INTO d_types(d_type) VALUES(:d_type)\", {\"d_type\": d_type})\n except sqlite3.DatabaseError as err:\n raise sqlite3.DatabaseError(err)\n else:\n conn.commit()\n finally:\n cur.close()\n conn.close()", "def create_item(cls, item):\n\n connection = sqlite3.connect(\"data.db\")\n cursor = connection.cursor()\n\n cursor.execute(\"INSERT INTO items VALUES (?, ?)\", (item[\"name\"], item[\"price\"]))\n\n connection.commit()\n connection.close()", "def write(self, conf, conn):\n db = conn['warehouse']\n if self.table not in db.tables:\n if conf['ENV'] in ['development', 'testing']:\n table = db.create_table(\n self.table,\n primary_id=self.__primary_key,\n primary_type=fields.Text.column_type)\n # work around a bug whereby the table is not persisted\n table.table\n table = db[self.table]\n action = 'created'\n if table.find_one(**{self.__primary_key: self[self.__primary_key]}):\n action = 'updated'\n del self['meta_id']\n\n ensure_fields = False\n if conf['ENV'] in ['development', 'testing']:\n ensure_fields = True\n\n table.upsert(self, [self.__primary_key], ensure=ensure_fields, types=self.__column_types)\n\n logger.debug('Record - %s: %s - %s fields', action, self, len(self))", "def init_sqlite_table(conn, create_sqlite_table_sql):\n # http://www.sqlitetutorial.net/sqlite-python/create-tables/\n try:\n cursor = conn.cursor()\n cursor.execute(create_sqlite_table_sql)\n except Error as e:\n print(e)", "def insert_into_journal(journal_type, sqlite_journal_path, table_name, filename, state):\n conn = sqlite3.connect(sqlite_journal_path)\n cursor = conn.cursor()\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n query = \"insert into %s values('%s','%s', %s)\" %(table_name, timestamp, filename, state)\n cursor.execute(query)\n conn.commit()\n cursor.close()\n conn.close()\n return True", "def handleAddRecord(self, inputWindow, database):\n for (key, value) in self.entries.items():\n if \"tableID\" in key or \"tableName\" in key:\n continue\n types = next(\n x for x in self.dataTypes if x[\"tableID\"] == database[\"tableID\"])\n try:\n types[key](value.get())\n except ValueError:\n self.entries = {}\n self.createEntries(inputWindow, database)\n Label(inputWindow, text=f\"{value.get()} is not the correct type\").grid(\n row=len(database.items()) + 2, column=0)\n return\n for (key, value) in self.entries.items():\n if \"tableID\" in key or \"tableName\" in key:\n continue\n result = types[key](value.get())\n database[key].append(result)\n inputWindow.destroy()\n self.entries = {}\n self.createMainInterface()", "def createLinks():\n conn = sqlite3.connect(conf['path'])\n cur = conn.cursor()\n # Add dp as string later\n # name varchar(32) not null,\\\n # desc varchar(250) unique not null,\\\n cur.execute('''Create Table If Not Exists Link(\\\n pid integer Primary Key,\\\n link varchar(50),\\\n utc timestamp not null\\\n );''')\n conn.commit()\n conn.close()", "def createKindleDatabase(db_name):\n array = [\n {\n 'word_id': \"en:even\", 'lookups_id': 'CE',\n 'word': \"even\", 'stem': \"even\",\n 'category': 0, 'usage': \"The integer is even\"\n },\n {\n 'word_id': \"en:watched\", 'lookups_id': 'WA',\n 'word': \"watch\", 'stem': \"watch\",\n 'category': 0, 'usage': \"I've watched this show\"\n },\n {\n 'word_id': \"en:nervous\", 'lookups_id': 'NE',\n 'word': \"nervous\", 'stem': \"nervous\",\n 'category': 0, 'usage': \"I'm nervous\"\n },\n {\n 'word_id': \"en:guests\", 'lookups_id': 'GU',\n 'word': \"guests\", 'stem': \"guest\",\n 'category': 100, 'usage': \"I had a lot of guests there\"\n },\n {\n 'word_id': \"en:doing\", 'lookups_id': 'DO',\n 'word': \"doing\", 'stem': \"do\",\n 'category': 100, 'usage': \"He enjoyed doing this\"\n }\n ]\n words_create_command = \"\"\"\n CREATE TABLE WORDS\n (id TEXT PRIMARY KEY NOT NULL,\n word TEXT,\n stem TEXT,\n lang TEXT,\n category INTEGER DEFAULT 0,\n timestamp INTEGER DEFAULT 0,\n profileid TEXT);\n \"\"\"\n lookups_create_command = \"\"\"\n CREATE TABLE LOOKUPS\n (id TEXT PRIMARY KEY NOT NULL,\n word_key TEXT,\n book_key TEXT,\n dict_key TEXT,\n pos TEXT,\n usage TEXT,\n timestamp INTEGER DEFAULT 0);\n \"\"\"\n words_insert_command = \"\"\"\n INSERT INTO \"WORDS\" VALUES\n (:id,\n :word,\n :stem,\n 'en',\n :category,\n 0,\n '')\n \"\"\"\n lookups_insert_command = \"\"\"\n INSERT INTO \"LOOKUPS\" VALUES\n (:id,\n :word_key,\n 'book_key',\n '',\n 'pos',\n :usage,\n 0)\n \"\"\"\n with sqlite3.connect(db_name) as conn:\n conn.execute(words_create_command)\n conn.execute(lookups_create_command)\n for row in array:\n conn.execute(words_insert_command,\n {\n 'id': row['word_id'],\n 'word': row['word'],\n 'stem': row['stem'],\n 'category': row['category']\n })\n conn.execute(lookups_insert_command,\n {\n 'id': row['lookups_id'],\n 'word_key': row['word_id'],\n 'usage': row['usage']\n })\n return array", "def _create_collection_table(self):\n self.c.execute(\"\"\"CREATE TABLE collection (\n id INTEGER,\n datetime NUMERIC,\n seed INTEGER,\n act TEXT,\n world TEXT,\n scene TEXT,\n quest TEXT,\n quest_step INTEGER,\n class TEXT\n )\"\"\")", "def create_db_and_table(input_csv_file, dbname):\n\n connection = sqlite3.connect(dbname)\n cursor = connection.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS conn_info (id INTEGER PRIMARY KEY AUTOINCREMENT, first_name TEXT, last_name TEXT, company TEXT, position TEXT, notes TEXT)')\n\n df = pd.read_csv(input_csv_file)\n num_records = df.shape[0]\n res = [insert_record(connection, cursor, a,b,c,d) for a,b,c,d in zip(df['First Name'],df['Last Name'],df['Company'],df['Position'])]\n\n connection.commit()\n connection.close()\n print('successfully created database and table. Inserted records: ',num_records)", "def __init__(self, table):\n import sqlite3\n self.table = table\n try:\n self.conn = sqlite3.connect(\"backend/database/recommendations.db\")\n except Exception as e:\n print(e)\n self.c = self.conn.cursor()", "def addRow(self, conn, info, dryrun=False, create=False, table=None):\n if table is None:\n table = self.config.table\n sql = \"INSERT\"\n if self.config.ignore:\n sql += \" OR IGNORE\"\n sql += \" INTO %s VALUES (NULL\" % table\n sql += \", ?\" * len(self.config.columns)\n sql += \")\"\n values = [info[col] for col in self.config.columns]\n if dryrun:\n print \"Would execute: '%s' with %s\" % (sql, \",\".join([str(value) for value in values]))\n else:\n conn.execute(sql, values)", "def add_database(self,\r\n name,\r\n conn_str,\r\n client_conn_str=None,\r\n conn_type=\"shared\"):\r\n\r\n item = {\r\n \"type\" : \"egdb\",\r\n \"path\" : \"/enterpriseDatabases/\" + name,\r\n \"info\" : {\r\n \"connectionString\" : conn_str,\r\n \"dataStoreConnectionType\" : conn_type\r\n }\r\n }\r\n\r\n if client_conn_str is not None:\r\n item['info']['clientConnectionString'] = client_conn_str\r\n\r\n is_managed = False\r\n if conn_type == \"serverOnly\":\r\n is_managed = True\r\n\r\n item['info']['isManaged'] = is_managed\r\n\r\n params = {\r\n \"f\" : \"json\",\r\n \"item\" : item\r\n }\r\n status, msg = self._validate_item(item=params['item'])\r\n if status == False:\r\n raise Exception(msg)\r\n path = self._admin_url + \"/data/registerItem\"\r\n res = self._portal.con.post(path, params, verify_cert=False)\r\n if res['status'] == 'success' or res['status'] == 'exists':\r\n return Datastore(self, \"/enterpriseDatabases/\" + name)\r\n else:\r\n print(str(res))\r\n return None", "def ledger_entry(conn, accountid, ledgertype, amount):\n with conn.cursor() as curs:\n sql = (\n 'INSERT INTO ledger (account_id, type, amount)'\n ' VALUES (%s, %s, %s)'\n )\n res = curs.execute(sql, (accountid, ledgertype, amount))\n if res is not None:\n print('\\nledger_entry...', res)", "def insert(sql, clue):\n # clue is [game, airdate, round, category, value, clue, answer]\n # note that at this point, clue[4] is False if round is 3\n if \"\\\\\\'\" in clue[6]:\n clue[6] = clue[6].replace(\"\\\\\\'\", \"'\")\n if \"\\\\\\\"\" in clue[6]:\n clue[6] = clue[6].replace(\"\\\\\\\"\", \"\\\"\")\n if not sql:\n print(clue)\n return\n sql.execute(\"INSERT OR IGNORE INTO airdates VALUES(?, ?);\", (clue[0], clue[1], ))\n sql.execute(\"INSERT OR IGNORE INTO categories(category) VALUES(?);\", (clue[3], ))\n category_id = sql.execute(\"SELECT id FROM categories WHERE category = ?;\", (clue[3], )).fetchone()[0]\n clue_id = sql.execute(\"INSERT INTO documents(clue, answer) VALUES(?, ?);\", (clue[5], clue[6], )).lastrowid\n sql.execute(\"INSERT INTO clues(game, round, value, daily_double) VALUES(?, ?, ?, ?);\", (clue[0], clue[2], clue[4], clue[7], ))\n sql.execute(\"INSERT INTO classifications VALUES(?, ?)\", (clue_id, category_id, ))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure the superuser can access the API
def test_superuser_access(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot/") assert response.status_code == 200
[ "def check_authorization(self):\n pass", "def test_get_as_suporte(self):\n self.client.force_login(self.user_suporte)\n self.response = self.client.get(r('category:sub_create'))\n self.assertEqual(403, self.response.status_code)", "def admin_require_permission():\n if not current_user.is_authenticated or not current_user.has_permission('admin'):\n abort(404)", "def test_has_access_not_admin(self):\n self.api_get(\n '/rest/u/test-admin/profile/has_access?user=test-admin&perm=admin',\n user='test-user',\n status=403)", "def test_admin_only_with_valid_user_and_invalid_token(self):\n response = self.app.post(\n \"/api/1.0/releases/\",\n data=None, # For this test, we don't need a real payload\n headers={\n 'User': self.admin_id,\n 'Authorization': \"wrong\"\n }\n )\n self.assertEqual(401, response.status_code)", "def authorize(self):\n return True", "def protect_superuser(sender, instance, **kwargs):\n if instance.is_superuser:\n raise PermissionDenied", "def test_authenticated_and_not_superuser_view_subscribers(self):\n\n self.client.login(\n email=\"regularuser@testing.com\", password=\"regularusertestingpassword\"\n )\n response = self.client.get(path=\"/api/subscribers/\")\n subscribers = Subscriber.objects.all()\n serializer = SubscriberSerializer(subscribers, many=True)\n self.client.logout()\n self.assertNotEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_plain_user_cannot_access_users_list(self):\n NO_SUPERUSER = 1\n self.client.login(\n username=self.users[NO_SUPERUSER].get('username'),\n password=self.users[NO_SUPERUSER].get('password')\n )\n response = self.client.get('/1.0/users/')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def handle_missing_authorization(self, *args, **kwargs):\n return False", "def _check_permission_ha(self, request):\n if request[REQUEST_FROM] != self.sys_homeassistant:\n raise APIForbidden(\"Only HomeAssistant can use this API!\")", "def test_authenticated_user_access(self):\n self.client.force_authenticate(self.user)\n response = self.client.get(\"/api/robot.json/\")\n assert response.status_code == 200", "def test_auth_permission(self):\n unauth_client = APIClient()\n\n response = unauth_client.put(self.url, {'answer': 'update answer'}, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = unauth_client.delete(self.url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_allow(self):\n self.validate_test(self.v1_controller.allow() == 'GET')", "def admin_requests():\n if g.role == \"Admin\":\n res = requestObj.view_all()\n return jsonify(res), 200\n else:\n return jsonify(response=\"Sorry you don't have enough \\\n rights to view this resource\"), 401", "def test_api_disable_user__err_403__cant_disable_myself_user(self):\n dbsession = get_tm_session(self.session_factory, transaction.manager)\n admin = dbsession.query(User).filter(User.email == \"admin@admin.admin\").one()\n uapi = UserApi(current_user=admin, session=dbsession, config=self.app_config)\n gapi = GroupApi(current_user=admin, session=dbsession, config=self.app_config)\n groups = [gapi.get_one_with_name(\"users\")]\n test_user = uapi.create_user(\n email=\"test@test.test\",\n password=\"password\",\n name=\"bob\",\n groups=groups,\n timezone=\"Europe/Paris\",\n lang=\"fr\",\n do_save=True,\n do_notify=False,\n )\n uapi.enable(test_user, do_save=True)\n uapi.save(test_user)\n transaction.commit()\n user_id = int(test_user.user_id)\n\n self.testapp.authorization = (\"Basic\", (\"test@test.test\", \"password\"))\n # check before\n res = self.testapp.get(\"/api/v2/users/{}\".format(user_id), status=200)\n res = res.json_body\n assert res[\"user_id\"] == user_id\n assert res[\"is_active\"] is True\n res = self.testapp.put_json(\"/api/v2/users/{}/disabled\".format(user_id), status=403)\n assert res.json_body\n assert \"code\" in res.json_body\n assert res.json_body[\"code\"] == ErrorCode.INSUFFICIENT_USER_PROFILE\n # Check After\n res = self.testapp.get(\"/api/v2/users/{}\".format(user_id), status=200)\n res = res.json_body\n assert res[\"user_id\"] == user_id\n assert res[\"is_active\"] is True", "def test_otoroshi_controllers_adminapi_users_controller_web_authn_admins(self):\n pass", "def pre_authorization(self):\n pass", "def test_get_without_auth(self):\n response = self.client.get('/api/v1/restock/')\n\n # Verify access denied\n self.assertEqual(response.status_code, 401)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make sure an authenticated user can access the API
def test_authenticated_user_access(self): self.client.force_authenticate(self.user) response = self.client.get("/api/robot.json/") assert response.status_code == 200
[ "def check_authorization(self):\n pass", "def authorize(self):\n return True", "def check_auth(self):\n\n if not self.authenticated:\n raise UserNotAuthenticatedError()", "def handle_missing_authorization(self, *args, **kwargs):\n return False", "def test_get_with_auth(self):\n response = self.client.get(\n '/api/v1/restock/',\n HTTP_AUTHORIZATION='Token {}'.format(self.token)\n )\n\n # Access allow\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(response.json(), self.data)", "def test_superuser_access(self):\n self.client.force_authenticate(self.superuser)\n response = self.client.get(\"/api/robot/\")\n assert response.status_code == 200", "def test_api_authorization(self):\n response = self.get('/api/v1/run/')\n self.assertEqual(200, response.status_code)\n\n json_data = json.loads(response.content)\n self.assertEqual(1, len(json_data['objects']))\n self.assertEqual(1, json_data['objects'][0]['id'])", "def check_authorization(self):\n self.token", "def test_unauthenticated_user_authenticated_route(self, app):\n res = app.test_client().get('/vision', follow_redirects=True)\n assert b'Please login first.' in res.data", "def check_auth(self, **kwargs):\n #Test for auth token\n if 'lgAuthToken' in cherrypy.serving.request.params:\n # REST request, disable session.\n cherrypy.serving.session = None\n\n # Get token and authenticate\n token = cherrypy.serving.request.params['lgAuthToken']\n del cherrypy.serving.request.params['lgAuthToken']\n user = config.auth.get_user_from_token(token)\n if user is not None:\n d = user.todict()\n d['__name__'] = d['name']\n d['__id__'] = user.id\n else:\n d = None\n config.auth.serve_user_from_dict(d)\n else:\n #Make the user into an object for conveniences\n user = cherrypy.session.get('auth', None)\n config.auth.serve_user_from_dict(user)\n\n #Now validate static permissions, if any\n access_groups = kwargs['groups']\n if access_groups is not None:\n if len(access_groups) > 0 and access_groups[0] == 'all:':\n access_groups = access_groups[1:]\n require_groups_all(*access_groups)\n else:\n require_groups(*access_groups)", "def valid_credentials(self):\n add_headers = self.auth_method(\"requests\")\n r = self._http_request('GET', self.cfg.api_url, '/', headers=add_headers)\n if r.status_code in [200]:\n return True\n LOG.info('API response to token = {} {}'.format(r.status_code, r.reason))\n return False", "def test_restricted(self):\n self.login(self.email)\n response = self.client.get('/restricted')\n self.assert_200(response)\n\n self.client.get('/logout')\n response = self.client.get('/restricted')", "def test_unauthorized_view_fails(self):\n response = self.api_client.get('/account/', format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_authenticated_view_subscribers(self):\n\n response = self.client.get(path=\"/api/subscribers/\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def pre_authorization(self):\n pass", "def check_request(request):\n if not request.user.is_authenticated():\n raise Http404\n\n # if not request.is_ajax():\n # raise Http404", "def can_request_assistance(user):\n return _is_in_acl(user, 'authorized')", "def test_check_authorization(self):\n self.instance.set_client_id(\"client-id\", \"client-secret\")\n self.instance.check_authorization(\"super-fake-access-token\")\n\n self.session.get.assert_called_once_with(\n url_for(\"applications/client-id/tokens/super-fake-access-token\"),\n params={\"client_id\": None, \"client_secret\": None},\n auth=(\"client-id\", \"client-secret\"),\n )", "def test_api_key_access_granted(populate_users):\n user = populate_users[0]\n rv = client.get('/api/auth/me',\n headers={'X-Yeti-API': user.api_key},\n content_type='application/json')\n assert rv.status_code == 200\n response = json.loads(rv.data)\n assert response['authenticated']\n assert response['user'] == 'admin@email.com'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify we can get back csv results
def test_result_csv_format(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot.csv") expected = [ "id,outcome,date,duration,repo_name,branch_name,source_file,test_name,robot_tags,robot_keyword,message", f"2,Pass,{self.today},0.1,repo1,main,file1.robot,Passing 1,,,", f"3,Pass,{self.today},0.1,repo1,main,file1.robot,Passing 2,,,", f"4,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 1,,KW1,epic fail", f"5,Fail,{self.today},0.1,repo2,feature/robot,file2.robot,Failing 2,,KW1,epic fail", f"6,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 3,,KW2,epic fail", f'7,Fail,{self.today},0.1,repo2,feature/robot,file3.robot,Failing 4,"t1,t2",KW3,ʃıɐɟ ɔıdǝ', ] actual = response.content.decode().splitlines() self.assertCountEqual(expected, actual)
[ "def test_export_csv_to_file(self):\n pass", "def test_export_csv_in_job(self):\n pass", "def test_remote_csv(self):\n with hxl.data(URL_CSV, InputOptions(timeout=10)) as source:\n self.compare_input(source)", "def test_CSV(self):\n CSVReporter(self.filename).write_report(self.results)\n self.assertTrue(self.filename)\n with self.assertRaises(ValueError) as err:\n CSVReporter(self.filename, \"r\").write_report(self.results)\n self.assertEqual(str(err.exception),\n \"Filemode must allow write options.\")", "def test_csvdata(db, specialization, slu1, slu2, student, grade_slu1, grade_slu2):\n\n specialization.unit_count = 2\n spc_list = [specialization]\n unit_list = [slu1, slu2]\n object_list = [\n {\n \"user\": student,\n \"grades\": [grade_slu1, grade_slu2],\n \"submission_date\": datetime(year=2021, month=8, day=15),\n \"total_score\": 38,\n }\n ]\n text = csvdata(spc_list, unit_list, object_list)\n assert (\n text == \"username,slack_id,submission_date,total_score,S01-SLU01,S01-SLU02\\r\\n\"\n \"test_student,U12J14XV12Z,2021-08-15 00:00:00,38,18,20\\r\\n\"\n )", "def finishedCSV():\n exceptionRaised = False\n try:\n # Tries to open the file\n open('weatherDataNSW.csv')\n except Exception:\n exceptionRaised = True\n\n # Returns True if present and False if not\n return not exceptionRaised", "def test_read_csv(self):\n customer_list = []\n with open(os.path.join(TEST_DIRECTORY, TEST_CUSTOMERS_FILE), newline='') as customer_file:\n for row in reader(customer_file):\n customer_list.append(dict(zip(self.customer_fields, row)))\n\n result_list, error_count = read_csv(TEST_DIRECTORY, TEST_CUSTOMERS_FILE,\n self.customer_fields)\n self.assertListEqual(result_list, customer_list)\n self.assertEqual(error_count, 0)", "def HasCSV(self):\n return self.__has('CSV')", "def verify_output_csv(output_str, key=None, value=None, expected_rows=None):\n dataframe = pandas.read_csv(io.StringIO(output_str))\n if key is not None:\n assert key in dataframe.columns\n\n if value is not None:\n assert dataframe[key][0] == value\n\n if expected_rows is not None:\n assert len(dataframe) == expected_rows\n\n return True", "def test_top_users_csv_filtered(self):\n factory = RequestFactory()\n\n csv_url = reverse(\"programs:csv_user_totals\", kwargs={\"pk\": self.program1.pk})\n\n data = {\"start_date\": \"2019-01-01\", \"end_date\": \"2019-02-01\"}\n request = factory.get(csv_url, data)\n response = CSVUserTotals.as_view()(request, pk=self.program1.pk)\n csv_content = response.content.decode(\"utf-8\")\n\n expected_output = (\n \"Username,Links added,Links removed,Net Change\\r\\n\" \"Jim,2,0,2\\r\\n\"\n )\n self.assertEqual(csv_content, expected_output)", "def test_iter_csv_rows_ok():\n rows = query_csv.iter_csv_rows(_PATH, delim=' ')\n assert list(rows) == [\n {'s': 'a', 'i': 1, 'f': 1.0},\n {'s': 'b', 'i': 2, 'f': 2.0},\n {'s': 'c', 'i': 3, 'f': 3.0},\n ]", "def test_top_users_csv(self):\n factory = RequestFactory()\n\n csv_url = reverse(\"programs:csv_user_totals\", kwargs={\"pk\": self.program1.pk})\n\n request = factory.get(csv_url)\n response = CSVUserTotals.as_view()(request, pk=self.program1.pk)\n csv_content = response.content.decode(\"utf-8\")\n\n expected_output = (\n \"Username,Links added,Links removed,Net Change\\r\\n\"\n \"Jim,2,0,2\\r\\n\"\n \"Mary,1,0,1\\r\\n\"\n \"Bob,0,1,-1\\r\\n\"\n )\n self.assertEqual(csv_content, expected_output)", "def test_csv_report(class_testsuite, instances_fixture, tmpdir):\n class_testsuite.instances = instances_fixture\n filename = tmpdir.mkdir(\"test_csv\").join(\"twister_csv_report.csv\")\n class_testsuite.csv_report(filename)\n assert os.path.exists(filename)\n assert os.stat(filename).st_size != 0\n\n mydict = {'test': [], 'arch' : [], 'platform' : [], 'status': [],\n 'extra_args': [], 'handler': [], 'handler_time': [],\n 'ram_size': [], 'rom_size': []}\n\n with open(filename, \"r\") as file:\n csv_reader = csv.reader(file)\n assert set(mydict.keys()) == set(list(csv_reader)[0])\n\n for instance in class_testsuite.instances.values():\n mydict[\"test\"].append(instance.testcase.name)\n mydict[\"arch\"].append(instance.platform.arch)\n mydict[\"platform\"].append(instance.platform.name)\n instance_status = instance.status if instance.status is not None else \"\"\n mydict[\"status\"].append(instance_status)\n args = \" \".join(instance.testcase.extra_args)\n mydict[\"extra_args\"].append(args)\n mydict[\"handler\"].append(instance.platform.simulation)\n mydict[\"handler_time\"].append(instance.metrics.get(\"handler_time\", \"\"))\n mydict[\"ram_size\"].append(instance.metrics.get(\"ram_size\", '0'))\n mydict[\"rom_size\"].append(instance.metrics.get(\"rom_size\", '0'))\n\n dict_file = open(filename, \"r\")\n dict_reader = csv.DictReader(dict_file)\n columns = defaultdict(list)\n for row in dict_reader:\n for (key, value) in row.items():\n columns[key].append(value)\n for _, value in enumerate(mydict):\n assert columns[value] == mydict[value]\n dict_file.close()", "def assert_csv(self, path: Path) -> bool:\r\n return str.lower(path.suffix) == '.csv'", "def testSqlToCSV1(self, path=\"C:\\\\Users\\\\Nitin\\\\Downloads\\\\studentOutputTC.csv\", tableName=\"error\"):\n self.assertIsNone((self.msc.sqlToCSV(path, tableName)), \"Invalid SQL Table to Convert to CSV\")", "def test_nba_scraper_output_csv(tmp_path):\n # tmp_path is a pathlib object.\n d = tmp_path / \"sub\"\n d.mkdir()\n # Create a csv file object for storing scraped data to be assigned later on\n p = d / \"nba_2001.csv\"\n\n # Note that p is a pathlib object. To get path in string format, you have\n # to use str(p.resolve())\n nba_2001 = nba_scraper.nba_scraper(\n 2001, \"regular\", csv_path=str(p.resolve()))\n nba_2001_csv = pd.read_csv(str(p.resolve()))\n\n assert nba_2001.shape == nba_2001_csv.shape", "def test_execute_export_9(self):\n include_columns = [\"phage.Cluster\"]\n export_db.execute_export(self.alchemist, self.test_dir,\n self.export_test_dir.name, \"csv\", table=\"gene\",\n include_columns=include_columns)\n\n csv_path = self.export_test_dir.joinpath(\n f\"gene.csv\")\n\n with open(csv_path) as csv_handle:\n reader = csv.reader(csv_handle)\n headers = next(reader)\n\n self.assertTrue(\"Cluster\" in headers)\n self.assertEqual(\"GeneID\", headers[0])\n self.assertFalse(\"Translation\" in headers)", "def _saveCSV( self ):", "def test_execute_export_11(self):\n export_db.execute_export(self.alchemist, self.test_dir,\n self.export_test_dir.name, \"csv\",\n sequence_columns=True)\n\n csv_path = self.export_test_dir.joinpath(\n f\"phage.csv\")\n\n with open(csv_path) as csv_handle:\n reader = csv.reader(csv_handle)\n headers = next(reader)\n\n self.assertTrue(\"Cluster\" in headers)\n self.assertEqual(\"PhageID\", headers[0])\n self.assertTrue(\"Sequence\" in headers)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify superuser sees all results
def test_testresult_filter__as_superuser(self): self.client.force_authenticate(self.superuser) response = self.client.get("/api/robot.json") data = response.json() assert data["count"] == 3
[ "def test_superuser_can_see_any_profile(self):\n SUPERUSER = 0\n self.client.login(\n username=self.users[SUPERUSER].get('username'),\n password=self.users[SUPERUSER].get('password')\n )\n for user in User.objects.all():\n response = self.client.get('/1.0/users/{0}/'.format(user.pk))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_authorized_results(self):\n student = self.create_student_with_performances(count_perfs=1)\n self.login(student.person.user.username)\n self.__go_to_first_exam_marks_page(student)\n student_perf = student_performance.search(registration_id=student.registration_id)[0]\n message = self.get_localized_message('performance_result_note_not_autorized', student.person.language). \\\n format(self.get_localized_message(student_perf.session_locked, student.person.language))\n self.check_page_not_contains_string(message)", "def test_non_superuser(self):\r\n\r\n # add some articles as the superuser\r\n for i in range(5):\r\n self.new_article('This is a test', 'with some content')\r\n\r\n # now add some as a non-superuser\r\n joe = User.objects.create_user('joe', 'joe@bob.com', 'bob')\r\n joe.is_staff = True\r\n joe.user_permissions = Permission.objects.filter(codename__endswith='_article')\r\n joe.save()\r\n\r\n self.client.login(username='joe', password='bob')\r\n for i in range(5):\r\n self.new_article('I am not a super user', 'har har', author=joe)\r\n\r\n # display all articles that the non-superuser can see\r\n res = self.client.get(reverse('admin:articles_article_changelist'))\r\n self.assertEqual(res.content.count('_selected_action'), 5)\r\n\r\n # make sure the superuser can see all of them\r\n self.client.login(username='admin', password='admin')\r\n res = self.client.get(reverse('admin:articles_article_changelist'))\r\n self.assertEqual(res.content.count('_selected_action'), 10)", "def show_to_superuser(self, item_dict):\n yep = True\n if self.is_true(item_dict, 'render_for_superuser') and not self.is_superuser:\n yep = False\n return yep", "def test_superuser_can_view_result_buttons(self):\n self._become_superuser()\n response = self.client.get(\n reverse(self.reverse_view_name, args=self.reverse_view_args)\n )\n self.assertContains(response, \"div-proposal-result-buttons\")", "def test_indexed(self):\n self.assertTrue(Secret.user.indexed)", "def test_user_can_see_its_profile_and_cannot_other_profiles(self):\n NO_SUPERUSER = 1\n self.client.login(\n username=self.users[NO_SUPERUSER].get('username'),\n password=self.users[NO_SUPERUSER].get('password')\n )\n for user in User.objects.all():\n response = self.client.get('/1.0/users/{0}/'.format(user.pk))\n if user.username == self.users[NO_SUPERUSER].get('username'):\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n else:\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def test_users_listed(self) -> None:\n url = reverse(\"admin:core_user_changelist\")\n response_ = self.client.get(url)\n\n self.assertContains(response_, self.user.name)\n self.assertContains(response_, self.user.email)", "def test_superuser_can_access_users_list(self):\n SUPERUSER = 0\n self.client.login(\n username=self.users[SUPERUSER].get('username'),\n password=self.users[SUPERUSER].get('password')\n )\n response = self.client.get('/1.0/users/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n for user in response.data.get('results'):\n db_user = User.objects.get(pk=user.get('id'))\n self.assertIsNotNone(db_user)", "def test_process_list_user_tenants(self):\n error, out = self.process_list_user_tenants()\n for err in error: assert err == 0", "def test_user_list_starred(self):\n pass", "def test_authenticated_and_not_superuser_view_subscribers(self):\n\n self.client.login(\n email=\"regularuser@testing.com\", password=\"regularusertestingpassword\"\n )\n response = self.client.get(path=\"/api/subscribers/\")\n subscribers = Subscriber.objects.all()\n serializer = SubscriberSerializer(subscribers, many=True)\n self.client.logout()\n self.assertNotEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_list_all_users(self):\n pass", "def test_admin_stats(self):\n self.instance.admin_stats(option=\"all\")\n self.session.get.assert_called_once_with(\n enterprise_url_for(\"enterprise/stats/all\")\n )", "def test_school_list_view_with_superuser_logged_in(self):\n session = self.session\n session['user_type'] = 'SuperUser'\n session.save()\n response = self.client.get(reverse('school_list'))\n self.assertContains(response, \"School List\")\n self.assertContains(response, \"Test School\")", "def test_with_superuser(self):\n self.user.is_superuser = True\n self.user.save()\n\n self.basicAuth()\n\n response = self.get(reverse('api-user-roles'))\n\n roles = response.data['roles']\n\n for rule in RuleSet.RULESET_NAMES:\n self.assertIn(rule, roles.keys())\n\n for perm in ['view', 'add', 'change', 'delete']:\n self.assertIn(perm, roles[rule])", "def test_user_current_check_starring(self):\n pass", "def test_not_authorized_results(self):\n student = self.create_student_with_performances(count_perfs=1, authorized_results=False)\n self.login(student.person.user.username)\n self.__go_to_first_exam_marks_page(student)\n student_perf = student_performance.search(registration_id=student.registration_id)[0]\n message = self.get_localized_message('performance_result_note_not_autorized', student.person.language).\\\n format(self.get_localized_message(student_perf.session_locked, student.person.language))\n self.check_page_contains_string(message)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load fam file (PLINK sample information file) into a df
def load_sample_info(fam_file, categorical_phenotype): df = pd.read_table(fam_file, header=None, sep=" ") df.columns = ["FID", "IID", "IID_father", "IID_mother", "sex", "phenotype"] # Update 'sex' df["sex"] = df["sex"].astype("category") df["sex"] = df["sex"].cat.rename_categories({1: "male", 2: "female", 0: "unknown"}) # Encode the phenotype DEFAULT_CAT_MAP = {1: "Control", 2: "Case"} if categorical_phenotype: df["phenotype"] = df["phenotype"].astype("category") df["phenotype"].cat.rename_categories(DEFAULT_CAT_MAP, inplace=True) df.loc[~df["phenotype"].isin(DEFAULT_CAT_MAP.values()), "phenotype"] = None print(f"\tLoaded information for {len(df)} samples from '{fam_file.name}'") return df
[ "def read_feat(file):\n df = pd.read_csv(file, sep=\" \", names=[\"node_id\"] + list(range(0, 1364)))\n return df", "def load_hep_data(self,variables2plot=[]):\n file = uproot.open(self.hep_data)\n data = file[self.treename]\n self.df = data.pandas.df( self.features+['target']+variables2plot )\n #self.df = df.sample(frac=0.2)\n print self.df.dtypes\n\n self.metadata = file['metadata'] # names of samples, target values, etc.\n\n return", "def read_metaphlan(self):\n df = pd.read_hdf(self.hdf5_fp, \"/abund/metaphlan/table\")\n self.metaphlan_df = df", "def fits_to_df(fname):\n\n d = fits.open(\"FoF\\\\processing\\\\datasets\\\\\" + fname)\n print(d.info())\n col_num = int(input(\"Choose the table to import: \"))\n t = Table(d[col_num].data)\n df = t.to_pandas()\n d.close()\n print(\"Dataframe of table \" + str(col_num) + \" initialized.\")\n print(df.head())\n return df", "def get_data(fn):\n rows = []\n dbf = dbflib.open(fn)\n for i in range(dbf.record_count()):\n rows.append(dbf.read_record(i))\n\n return pd.DataFrame(rows)", "def abstract_dataframe(filename):\n pmid_ab_dict = medline_parser(filename)\n df = pd.DataFrame.from_dict(pmid_ab_dict, orient='index').reset_index()\n df.columns = ['pmid', 'title']\n df.to_csv('../data/pmid_titles_metabolism_5years.csv', index=False, index_label=False)", "def load_vcf_as_df(vcf_file):\n vcf_as_df = pd.read_csv(vcf_file, header='infer', comment='#', sep='\\t')\n vcf_as_df.columns = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'SAMPLE']\n return vcf_as_df", "def load_dataframe(fileName):\n\tpath = dataPath + fileName\n\treturn pd.read_csv(path, header=None, names=featuresNames)", "def load_local_lipidmaps_db(db_fname):\n df = pd.read_table(db_fname, index_col=0, dtype={'CHEBI_ID':str})\n return df", "def getGenesFromMaf(maffile):\n\n maf_head = pd.read_csv(gzip.open(maffile),sep='\\t',comment='#')\n ##get hugo_symbol, and case_id\n return maf_head[['Hugo_Symbol', 'case_id', 'HGVSc', 'One_Consequence', 'SIFT', 'PolyPhen']]", "def read_featnames(file):\n df = pd.read_csv(file, sep=\" \", names=[\"featname\"], index_col=0)\n return df", "def read_visa(path: str) -> pd.DataFrame:\n return read_aux_table(\n path,\n 'visa'\n )", "def PAT_load_data(directory, search_format):\n \n file_list = [os.path.join(directory, f) for f in os.listdir(directory) if f.endswith('.lvm')]\n data = []\n\n for i in range(len(file_list)):\n\n result = re.search(search_format, file_list[i])\n E = float(result.group(1).replace('_','.'))\n df = pd.read_csv(file_list[i],skiprows=21,header=1) #12 is the minimum number of rows to skip\n df = df.drop(['X_Value'],axis=1)\n df = df.drop(['Comment'],axis=1)\n df = df.rename(columns = {'Untitled':'Time'})\n df['Ion Mass'] = df['Ion Mass'][0]\n df['Distance to Target'] = df['Distance to Target'][0]\n df['Bias Voltage'] = df['Bias Voltage'][0]\n df['Laser Energy'] = E\n data.append(df)\n #This line sorts the table in the list according to laser energy\n data.sort(key = lambda x: x['Laser Energy'][0])\n\n return data", "def importPLINKDATA(self, bfile):\n filename = bfile + '.bim'\n self.SNPs = pd.read_table(\n bfile+'.bim', sep=None, names=['CHR', 'RSID', 'Cm', 'POS', 'ALT', 'REF'], engine='python')\n self.Samples = pd.read_table(bfile+'.fam', sep=None,\n names=['FID', 'IID', 'PID', 'MID', 'Sex', 'Pheno'], engine='python')\n self.nSNPs = self.SNPs.shape[0]\n self.nSamples = self.Samples.shape[0]\n filename = bfile + '.bed'\n num_bytes = math.ceil(self.nSamples / 4.0)\n GENO = np.fromfile(filename, dtype=np.uint8, count=-1)\n GENO = GENO[3:]\n self.GENO = np.reshape(GENO, (num_bytes, - 1), order='F')", "def load_data(name: str, location: str = SAVE_LOCATION) -> pd.DataFrame:\n df = pd.read_feather(location + name + '.feather')\n if 'date' in df.columns.values:\n df = df.set_index('date')\n return df", "def sample_dataframe():\n return read_sample_dataframe()", "def _load_reft_data(reft_file, index_name=\"btl_fire_num\"):\n reft_data = pd.read_csv(reft_file, usecols=[\"btl_fire_num\", \"T90\", \"REFTMP_FLAG_W\"])\n reft_data.set_index(index_name)\n reft_data['SSSCC_TEMP'] = Path(reft_file).stem.split(\"_\")[0]\n reft_data['REFTMP'] = reft_data['T90']\n\n return reft_data", "def read_training_data(fname, features=None):\n file = open(fname)\n params = [\"radius\", \"texture\", \"perimeter\",\"area\",\"smoothness\",\"compactness\",\"concavity\",\"concave points\",\"symmetry\",\"fractal dimension\"];\n stats = [\"(mean)\", \"(stderr)\", \"(worst)\"]\n feature_labels = set([y+x for x in stats for y in params])\n feature_map = {params[i]+stats[j]:j*len(params)+i for i in range(len(params)) for j in range(len(stats))}\n if features is None: features = feature_labels\n feature_vectors = {}\n patient_diagnoses = {}\n for line in file:\n row = line.split(\",\")\n patient_ID = int(row[0])\n patient_diagnoses[patient_ID] = -1 if row[1]=='B' else +1\n feature_vectors[patient_ID] = Vec(features, {f:float(row[feature_map[f]+2]) for f in features})\n return rowdict2mat(feature_vectors), Vec(set(patient_diagnoses.keys()), patient_diagnoses)", "def read_fsa_db(db,fp,org_id) :\n\n cdsseq=\"\"\n tag=\"\"\n for line in fp :\n if line[0] == '>' :\n loaddb(cdsseq,org_id,tag,db)\n\n tag = line[1:].strip().split()[0]\n tag=tag.replace(\"ORFN:\",\"ORFP_\")\n cdsseq = \"\"\n else :\n cdsseq += line.strip()\n \n loaddb(cdsseq,org_id,tag,db)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load bim file (PLINK extended MAP file) into a list of variants
def load_variant_info(bim_file, max_variants): variant_info = pd.read_table(bim_file, header=None, sep="\t") # Note 'position' is in centimorgans, 'coordinate' is what pandas-genomics refers to as 'position' (in base-pairs) variant_info.columns = [ "chromosome", "variant_id", "position", "coordinate", "allele1", "allele2", ] # chromosome is a category variant_info["chromosome"] = variant_info["chromosome"].astype("category") # Limit num_variants if max_variants is not None: if max_variants < 1: raise ValueError(f"'max_variants' set to an invalid value: {max_variants}") else: variant_info = variant_info.iloc[:max_variants] variant_list = [create_variant(row) for idx, row in variant_info.iterrows()] print( f"\tLoaded information for {len(variant_list)} variants from '{bim_file.name}'" ) return variant_list
[ "def load_all_variants( vcf_file, variants ):\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\ttry:\n\t\t\t\t\tvariants[ parts[0] + '_%_' + parts[1].zfill( 9 ) ]\n\t\t\t\texcept KeyError:\n\t\t\t\t\tvariants.update( { parts[0] + '_%_' + parts[1].zfill( 9 ): line } )\n\t\t\tline = f.readline()\n\treturn variants", "def load_beam(self, filepath: str) -> Iterable[Brick]:", "def importPLINKDATA(self, bfile):\n filename = bfile + '.bim'\n self.SNPs = pd.read_table(\n bfile+'.bim', sep=None, names=['CHR', 'RSID', 'Cm', 'POS', 'ALT', 'REF'], engine='python')\n self.Samples = pd.read_table(bfile+'.fam', sep=None,\n names=['FID', 'IID', 'PID', 'MID', 'Sex', 'Pheno'], engine='python')\n self.nSNPs = self.SNPs.shape[0]\n self.nSamples = self.Samples.shape[0]\n filename = bfile + '.bed'\n num_bytes = math.ceil(self.nSamples / 4.0)\n GENO = np.fromfile(filename, dtype=np.uint8, count=-1)\n GENO = GENO[3:]\n self.GENO = np.reshape(GENO, (num_bytes, - 1), order='F')", "def _load_probes(self):\n self.probe_num = 2057\n probes = []\n roi = np.zeros([self.probe_num, 4], dtype = np.int32)\n probetxt = open(osp.join(self._root_dir, 'query_info.txt'), 'r')\n try:\n i = 0\n for line in probetxt:\n line = line.strip('\\r\\n')\n pid, x, y, w, h, img_name = line.split(' ')\n roi[i][0] = float(x)\n roi[i][1] = float(y)\n roi[i][2] = float(w)\n roi[i][3] = float(h)\n roi[:, 2:] += roi[:, :2]\n i += 1\n im_name = osp.join(self._data_path, str(img_name)+'.jpg')\n probes.append((im_name, roi, pid))\n finally:\n probetxt.close()\n\n return probes", "def read_smiles(file_path):\n if any(['gz' in ext for ext in os.path.basename(file_path).split('.')[1:]]):\n with gzip.open(file_path) as f:\n smiles = f.read().splitlines()\n smiles = [smi.decode('utf-8') for smi in smiles]\n else:\n with open(file_path, 'rt') as f:\n smiles = f.read().splitlines()\n return smiles", "def __load(self, ifile):\n # load map entries from file\n output = {}\n optmatch = None\n finput = AltFileInput(ifile, encoding = self.encd)\n for line in finput:\n if line:\n optmatch = RE_OPTIONS.match(line)\n if optmatch:\n if self.flags:\n raise RuleFormatError( \\\n msg = \"Multiple flag lines are not supported\", \\\n efile = finput)\n else:\n self.flags = optmatch.group(1)\n self.ignorecase = RegExp(self.flags, \"\").re.flags & re.IGNORECASE\n continue\n # find map entries\n line = skip_comments(line)\n m = MAP_DELIMITER.search(line)\n if m:\n src, trg = self.__normalize_quotes(line[0:m.start()], \\\n line[m.end():])\n if not (src and trg):\n print src.encode('utf-8')\n print trg.encode('utf-8')\n raise RuleFormatError(efile = finput)\n src = re.escape(src)\n if self.ignorecase:\n output[src.lower()] = trg\n else:\n output[src] = trg\n elif line:\n raise RuleFormatError(efile = finput)\n return output", "def load(self):\n\n with open(\"map.txt\", \"r\") as map: #read file\n map_frame = [] #list for rows \n for line in map:\n map_line = [] #list for characters\n for char in line:\n if char != '\\n': #check for line break in text file\n map_line.append(char) #add character to list\n map_frame.append(map_line) #add line to list\n self.frame = map_frame", "def parse_bsp( array ):\n array = array.view( 'c' )\n magic = array[:4].tostring()\n if magic.startswith(b'PK'):\n raise RuntimeError(\"You are likely attempting to load a .pk3 file, Magic Number mismatch: %r\"%(\n magic,\n ))\n assert magic == b'IBSP', magic \n alen = array.shape[-1]\n iarray = array[:alen-(alen%4)].view( i4 )\n version = iarray[1]\n assert version == 0x2e, version\n direntries = iarray[2:2+17*2]\n direntries = numpy.reshape( direntries, (17,2))\n model = {}\n for (lump,dtype),(offset,length) in zip( LUMP_ORDER, direntries ):\n data = array[offset:offset+length]\n loader = globals().get( 'load_%s'%(lump,))\n if loader:\n model[lump] = data = loader( data )\n else:\n dtype = numpy.dtype( dtype )\n extra = len(data) % dtype.itemsize\n if extra:\n log.warn( 'Extra data in lump %s: %s bytes', lump, extra )\n data = data[:-extra]\n model[lump] = data = data.view( dtype )\n log.debug( 'Loaded %s %s', data.shape[0], lump )\n return model", "def load_genome(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)", "def _read_maps(self):\n try:\n f = open(self._procpath('%d/maps' % self.pid))\n except IOError as e:\n # ignore killed process\n if e.errno != errno.ENOENT:\n raise\n return\n for line in f:\n try:\n so = line.split()[5].strip()\n self.mapped.append(so)\n except IndexError:\n pass", "def load_bioms(self):\n bioms = {}\n bioms_file = Path(\"resources/data/bioms.yml\")\n if bioms_file.is_file():\n with open(bioms_file, 'r') as stream:\n try:\n bioms_dict = yaml.safe_load(stream)\n if bioms_dict is not None:\n for biom_name, biom_values in bioms_dict.items():\n bioms[biom_name] = Biom(biom_name,\n float(biom_values['atmospheric_diffusion']),\n float(biom_values['atmospheric_absorption']),\n float(biom_values['cloud_reflection']),\n float(biom_values['avg_rainfall_per_day']),\n float(biom_values['groundwater']))\n except yaml.YAMLError as exc:\n print(exc)\n self.bioms = bioms", "def importRBFs(filePath):\n data = __importData(filePath)\n if data is None:\n return\n for k, v in data.iteritems():\n rbfModule = RBF_MODULES[v[\"rbfType\"]]\n rbfModule.createRBFFromInfo({k: v})", "def load_vcf(self,vcf_file):\n\n # setup the dictionaries of expected SNPs for each lineage\n self._reset_lineage_snps()\n\n # open the VCF file for reading\n vcf_reader = vcf.Reader(open(vcf_file, 'r'))\n\n # read the VCF file line-by-line\n for record in vcf_reader:\n\n # consider each lineage in turn\n for lineage_name in self.lineages:\n\n # only proceed if the genome position occurs in the list of identifiable positions\n if record.POS in self.reference_snps[lineage_name].keys():\n\n # parse the record\n for sample in record.samples:\n geno = sample['GT'][0]\n\n # if there is a null call, record a hyphen which won't match, regardless of the reference\n if geno == '.':\n\n self.sample_snps[lineage_name][int(record.POS)]=\"-\"\n\n # otherwise replace the H37Rv base with the actual base from the VCF file\n elif geno != 0:\n self.sample_snps[lineage_name][int(record.POS)]=record.ALT[int(geno)-1]", "def parse_battleground_path(self, path=\"\"):\n if path == \"\":\n path = self.battleground_path\n\n with open(path, 'r') as battleground_file:\n parsed = [line.strip() for line in battleground_file.readlines()]\n self.bg_map = [list(i) for i in zip(*parsed)]", "def load_file(self, filename):\n image = pygame.image.load(filename)\n virtual_texture = self.load_image(image)\n self.__filename_map[filename] = virtual_texture\n return virtual_texture", "def load_ing_list(path=\"data/douguo.recipe\"):\n\n recipes = load_corrected_recipe_from_file(path)\n ing_list = extract_recipe_ingredient_list(recipes)\n return ing_list", "def load_data(gmp, neut_file, subs_file):\n # load all neutral sites from zipped 012 mask array\n assert neut_file.endswith('.npz')\n neut_pos, = np.where(np.load(neut_file)['neutmask'] != 0)\n\n # mask neutral sites beyond genetic mask and get their gpos\n neut_pos = gmp.gmap_mask(neut_pos)\n neut_gpos = gmp.interp_gpos(neut_pos)\n\n # gmap_mask substitution positions, convert to genetic map positions\n if subs_file.endswith('.npy'):\n sub_pos = np.load(subs_file)\n else:\n sub_pos = np.loadtxt(subs_file)\n\n # mask substitution sites beyond genetic mask and get their gpos\n sub_pos = gmp.gmap_mask(sub_pos)\n sub_gpos = gmp.interp_gpos(sub_pos)\n\n return neut_pos, neut_gpos, sub_gpos", "def load_VCF(vcf_file, biallelic_only=False, load_sample=True, sparse=True):\n if vcf_file[-3:] == \".gz\" or vcf_file[-4:] == \".bgz\":\n infile = gzip.open(vcf_file, \"rb\")\n is_gzip = True\n else:\n infile = open(vcf_file, \"r\")\n is_gzip = False\n \n FixedINFO = {}\n contig_lines = []\n comment_lines = []\n var_ids, obs_ids, obs_dat = [], [], []\n \n for line in infile:\n if is_gzip:\n line = line.decode('utf-8')\n if line.startswith(\"#\"):\n if line.startswith(\"##contig=\"):\n contig_lines.append(line.rstrip())\n if line.startswith(\"#CHROM\"):\n obs_ids = line.rstrip().split(\"\\t\")[9:]\n key_ids = line[1:].rstrip().split(\"\\t\")[:8]\n for _key in key_ids:\n FixedINFO[_key] = []\n else:\n comment_lines.append(line.rstrip())\n else:\n list_val = line.rstrip().split(\"\\t\") #[:5] #:8\n if biallelic_only:\n if len(list_val[3]) > 1 or len(list_val[4]) > 1:\n continue\n if load_sample:\n obs_dat.append(list_val[8:])\n for i in range(len(key_ids)):\n FixedINFO[key_ids[i]].append(list_val[i])\n var_ids.append(\"_\".join([list_val[x] for x in [0, 1, 3, 4]]))\n infile.close()\n\n RV = {}\n RV[\"variants\"] = var_ids\n RV[\"FixedINFO\"] = FixedINFO\n RV[\"samples\"] = obs_ids\n RV[\"GenoINFO\"] = parse_sample_info(obs_dat, sparse=sparse)\n RV[\"contigs\"] = contig_lines\n RV[\"comments\"] = comment_lines\n return RV", "def load_auction_p(fname):\n return pickle.load(open(fname, \"rb\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }