query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
sequencelengths 19
20
| metadata
dict |
---|---|---|---|
Identifies genes that are significantly enriched for insertions (CTGs). This function takes a DataFrame of insertions, coming from multiple samples, and identifies if any genes are more frequently affected by an insertion than would be expected by chance. These genes are called Commonly Targeted Genes (CTGs). CTGs are selected by comparing the number of insertions within the gene to the number of insertions that would be expected from the background insertion rate, which is modeled using a Poisson distribution. | def test_ctgs(
insertions, # type: List[Insertion]
reference, # type: Reference
gene_ids=None, # type: Set[str]
chromosomes=None, # type: Set[str]
pattern=None, # type: str
per_sample=True, # type: bool
window=None #type: Tuple[int, int]
):
# Default to shared chromosome sequences (typically drops some
# of the more esoteric extra scaffold/patch sequences).
if chromosomes is None:
reference_seq = pyfaidx.Fasta(str(reference.fasta_path))
reference_gtf = GtfIterator(reference.indexed_gtf_path)
chromosomes = list(
set(reference_seq.keys()) & set(reference_gtf.contigs))
if len(chromosomes) == 0:
ValueError('No chromosomes are shared between the reference '
'sequence and reference gtf files')
if len(chromosomes) == 0:
raise ValueError('At least one chromosome must be given')
# Determine gene windows using GTF.
logging.info('Generating gene windows')
gene_windows = _build_gene_windows(
reference.indexed_gtf_path, window=window, chromosomes=chromosomes)
# Subset insertions to gene intervals.
insertions = _subset_to_windows(insertions, gene_windows)
if gene_ids is None:
gene_ids = set(ins.metadata['gene_id'] for ins in insertions)
# Collapse insertions per gene/sample (recommended).
# Corrects for hopping/multiple detection issues.
if per_sample:
logging.info('Collapsing insertions')
insertions = list(_collapse_per_sample(insertions))
# Calculate total number of pattern occurrences within intervals.
logging.info('Counting pattern occurrences')
reference_seq = pyfaidx.Fasta(str(reference.fasta_path))
total = count_total(
reference_seq, pattern=pattern, intervals=gene_windows.values())
# Calculate p-values for each gene.
logging.info('Calculating significance for genes')
insertion_trees = GenomicIntervalTree.from_objects_position(
insertions, chrom_attr='seqname')
p_values = {
gene_id: test_region(
insertions=insertions,
reference_seq=reference_seq,
region=gene_windows[gene_id],
total=total,
pattern=pattern,
filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],
insertion_trees=insertion_trees)
for gene_id in gene_ids
}
# Build result frame.
result = pd.DataFrame.from_records(
iter(p_values.items()), columns=['gene_id', 'p_value'])
# Calculate corrected p-value using bonferroni correction.
result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)
# Sort by q-value and p-value.
result.sort_values(by=['q_value', 'p_value'], inplace=True)
if len(insertions) > 0:
# Annotate with gene_name if possible.
if 'gene_name' in insertions[0].metadata:
name_map = {
ins.metadata['gene_id']: ins.metadata['gene_name']
for ins in insertions
}
result.insert(1, 'gene_name', result['gene_id'].map(name_map))
else:
result['gene_name'] = np.nan
# Annotate with frequency.
frequency = (Insertion.to_frame(insertions)
.groupby('gene_id')['sample'].nunique()
.reset_index(name='n_samples'))
result = pd.merge(result, frequency, on='gene_id', how='left')
else:
result['gene_name'] = np.nan
result['n_samples'] = np.nan
return result | [
"def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))",
"def joint_genotypes(variant_df, all_have_gt_samples=None, any_has_gt_samples=None, min_count=1):\n\n # Check (a) criterion.\n all_have_gt = True\n if all_have_gt_samples != None:\n all_have_gt = len(variant_df[variant_df[\"sample\"].isin(all_have_gt_samples)]) == len(all_have_gt_samples)\n\n # Check (b) criterion.\n any_has_gt = True\n if any_has_gt_samples != None:\n any_has_gt = len(variant_df[variant_df[\"sample\"].isin(any_has_gt_samples)]) >= min_count\n\n return all_have_gt and any_has_gt",
"def check_HGT_AT_vs_global_AT(gene_AT_cont_dic, AI, the_mean, standard_dev,\n gene_of_interest, comment, sd_numbers,\n gene_to_expression,\n gene_to_exon_count,\n gene_to_HGTspeces_discription_dict,\n genomic_cov_from_mean,\n gene_to_HGT_percent_identity):\n \n # user defined number of standard deviations away from\n # the mean for the stats\n sd_numbers = float(sd_numbers)\n # print \"gene_of_interest = \", gene_of_interest\n current_gene_AT = gene_AT_cont_dic[gene_of_interest]\n\n AI = float(AI)\n\n num_sd_from_mean = how_many_sd_from_mean(the_mean, standard_dev,\n current_gene_AT)\n assert how_many_sd_from_mean(10, 2, 2) ==4\n HGTspecies_description = gene_to_HGTspeces_discription_dict[gene_of_interest]\n HGTspecies = HGTspecies_description.split(\"\\t\")[0]\n description = HGTspecies_description.split(\"\\t\")[1]\n # description = description.split(\"[\")[0]\n lower_threshold = float(the_mean) - (sd_numbers * float(standard_dev))\n upper_threshold = float(the_mean) + (sd_numbers * float(standard_dev))\n # print lower_threshold, upper_threshold\n if current_gene_AT < lower_threshold or current_gene_AT > upper_threshold:\n # if calling with RNAseq assembly\n transcript = gene_of_interest.split(\"|\")[0]\n # call dict to get expression\n try:\n TPM = gene_to_expression[gene_of_interest]\n except:\n ValueError #Rnaseq assebmly not genome\n TPM = gene_to_expression[transcript]\n try:\n exons = gene_to_exon_count[gene_of_interest]\n except:\n ValueError\n # note if this crashes you may need to specifcy introns,\n # not exons to count.\n # exons = gene_to_exon_count[transcript]\n exons = 1\n try:\n per_ident = gene_to_HGT_percent_identity[gene_of_interest]\n except:\n ValueError\n per_ident = gene_to_HGT_percent_identity[transcript]\n #print \"gene: %s\\tAT_cont: %d\\tcomment: ...%s... \\texpression: %s\\texons: %d\\t%s\\t%s\"\\\n #%(gene_of_interest, current_gene_AT, comment, TPM, exons, HGTspecies, description)\n print (\"gene: %s\\tAI = %s\\tAT_cont: %d\\tAT_cont_numSD_fromMean: %.2f\\tgenomic_cov_from_mean: %.2f\\t\\texpression: %s\\texons: %d\\tcomment: ...%s...\\t%s\\t%s\" %(gene_of_interest,\n AI, current_gene_AT,\\\n num_sd_from_mean, genomic_cov_from_mean, \\\n TPM, exons, comment, HGTspecies, description))\n HGT_info_dataformatted = \"%s\\t%s\\t%.1f\\t%d\\t%.2f\\t%.2f\\t%s\\t%d\\t%s\\t%s\\t%s\\n\" %(gene_of_interest,\n per_ident,\n AI,\n current_gene_AT,\n num_sd_from_mean,\n genomic_cov_from_mean,\n TPM,\n exons,\n comment,\n HGTspecies,\n description)\n return HGT_info_dataformatted",
"def gene_features():\n cancer_type_dict = get_cancer_data_files(mutation_data_name=\"data_mutations_extended.txt\")\n gene_set = []\n #Pull genes for each cancer type\n for cancer in cancer_type_dict:\n cancer_files = cancer_type_dict[cancer]\n genes = get_genes_for_cancer_type(cancer_files)\n for gene in genes:\n if gene not in gene_set: #make sure gene hasn't already been selected in another type\n gene_set.append(gene)\n return gene_set",
"def _count_genus_hits(self):\n for comp_list in self.super_results_list:\n # Get the taxonomic annotations for the sequence\n try:\n genus_level, family_level, order_level = self._get_taxa_designation_from_staxid(\n staxid=comp_list[6])\n except:\n # there are some tax_ids that we don't seem to be able to find\n # for the time being we will just continue over this seqeunce\n continue\n if False in [genus_level, family_level, order_level]:\n # Then we were unable to find one or more of the annotations\n continue\n \n # Log the match\n annotation_tup = (genus_level, family_level, order_level)\n if family_level == 'Symbiodiniaceae':\n self.symbiodiniaceae_genus_count_dict[annotation_tup] += 1\n elif order_level == 'Scleractinia' or order_level == 'Anthoathecata':\n self.coral_genus_count_dict[annotation_tup] += 1\n else:\n self.other_genus_count_dict[annotation_tup] += 1",
"def check_gene_coverage(sequence_records, check_for_overlap=True):\n length_total = 0\n gene_length_total = 0\n total_length_by_feature = defaultdict(lambda: 0)\n for sequence_record in sequence_records:\n length_total += len(sequence_record.seq)\n for gene in sequence_record.features:\n gene_length_total += gene.location.end.position - gene.location.start.position\n # this section tries to keep track of subfeature types\n for feature in gene.sub_features:\n total_length_by_feature[feature.type] += len(feature)\n for subfeature in feature.sub_features:\n total_length_by_feature[subfeature.type] += len(subfeature)\n gene_coverage_fraction = float(gene_length_total)/length_total\n feature_coverage_fractions = [(feature,float(length)/gene_length_total) for feature,length \n in total_length_by_feature.items()]\n\n # TODO the by-feature coverage doesn't work because I'm only parsing the file for genes, not features!!! If I want to parse for features, I need to split things up into multiple passes etc again...\n #print total_length_by_feature\n\n # Check for overlapping genes and print a warning, since overlapping genes will make the measurement inaccurate\n if check_for_overlap:\n if check_for_overlapping_genes(sequence_record):\n print \"WARNING: There are overlapping genes! %% of length covered by genes may not be accurate.\"\n # MAYBE-TODO actually adjust the measurement for overlapping genes? Nah, too much work, not enough need for now.\n\n return gene_coverage_fraction, feature_coverage_fractions",
"def findInteractions( targetGenes, geneTable ):\n pass",
"def categorize_exons(gff, gene, exonID):\n exon = exonID.replace('E', '')\n gene_df = gff_data.loc[gff_data['gene_id'].str.contains(gene)]\n # list of unique transcripts per gene\n tscripts = gene_df.loc[gene_df['feature'] == 'exonic_part']['transcripts'].tolist()\n uniq_tscipts = sorted(set([i for sublist in [x.split('+') for x in tscripts] for i in sublist]))\n # rebuild transcripts\n tscript_sets = {}\n for t in uniq_tscipts:\n t_list = gene_df.loc[gene_df['transcripts'].str.contains(t)]['exonic_part_number'].tolist()\n tscript_sets[t] = t_list\n # subset to include entries with exon\n exon_set = {k:v for k,v in tscript_sets.items() if exon in v}\n # categorize exons\n TSS,TTS,KES,NES = (0,0,0,0)\n # 1: TSS [the exon is first in at least one transcript]\n TSS = max([1 if exon == v[0] else 0 for k,v in exon_set.items()])\n # 1: TTS [the exon is last in at least one transcript]\n TTS = max([1 if exon == v[len(v)-1] else 0 for k,v in exon_set.items()])\n if TSS == 0 and TTS == 0:\n # 3: Known exon skipping [this exon absent in at least one transcript]\n if len(exon_set) < len(tscript_sets):\n KES = 1\n # 4: Novel exon skipping [this exon is present in all transcripts]\n elif len(exon_set) == len(tscript_sets):\n NES = 1\n # create summary df\n final_df = pd.DataFrame.from_dict({0: {\"Exon\":f'{gene}:{exonID}', \"TSS\":TSS, \"TTS\":TTS, \"KES\":KES, \"NES\":NES}}, orient='index')\n return(final_df)",
"def per_gene_coverage(genes,df):\n\n sub_genes =[]\n\n #For every gene in the list, check the average coverage, if less than 100 add it to the final list.\n for gene in genes:\n coverage = average(df[df['GeneSymbol;Accession'] == gene]['percentage30'])\n\n if coverage < 100:\n sub_genes.append([gene.split(';')[0],round(coverage,2)])\n \n return sub_genes",
"def check_scaffolds_for_only_HGT_genes(genome, gff, LTG, dna_file, sd_numbers, rnaseq,\n bam_file, out_file):\n bad_scaffold_out = \"bad_scaffold.\"+out_file\n bad_scaff_title = \"#%s\\n#scaffold\\tcomment\\tGenes_on_scaffold\\tAI_of_these_gene\\tdescription\\n\" %(datetime.date.today())\n out = open(bad_scaffold_out, \"w\")\n out.write(bad_scaff_title)\n\n HGT_gene_info = \"HGT.info.\"+out_file\n f_out = open(HGT_gene_info, \"w\")\n f_out.write(\"#%s\\n#gene\\tPerc_identity_to_HGT_hit\\tAI\\tAT_cont\\tAT_cont_numSD_fromMean\\tgenomic_cov_from_mean\\texpression_TMM\\tnum_RNAseq_reads\\texons\\tcomment\\tKingdom\\tHGT_closest_species_hit\\tBLAST_description\\n\" %(datetime.date.today()))\n\n #call function to get the scaffold to gene dict\n scaffold_to_gene_dict, gene_to_exon_count, gene_start_stop_dict = parse_gff(gff) \n #call function to get gene_set, gene_to_comment_dict\n HGT_predicted_gene_set, gene_to_comment_dict,\\\n gene_to_HGTspeces_discription_dict, gene_to_AI, \\\n gene_to_HGT_percent_identity = LTG_file(LTG)\n #get_scaffold_coverage from import coverage.py\n if bam_file:\n overall_coverage = \"overall_coverage.txt\"\n overall_expression_dic = get_total_coverage(bam_file, overall_coverage)\n HGT_gene_to_genic_cov_dic, scaffold_mean_SD_cov_dict, \\\n mean_genomic_cov, standard_dev_genomic_cov = get_scaffold_coverage(genome, \\\n scaffold_to_gene_dict, gene_start_stop_dict,\\\n bam_file, overall_expression_dic,\\\n HGT_predicted_gene_set)\n\n #print gene_to_exon_count\n #call function to get rna seq mapping TPM\n gene_to_expression = parse_rnaseq(rnaseq)\n\n #call function with DNA file\n gene_AT_cont_dic, the_mean, standard_dev = get_stats_on_AT_content(dna_file)\n print \"the AVR AT = %f with SD %f \" %(the_mean, standard_dev)\n \n for gene, comment in gene_to_comment_dict.items():\n if bam_file:\n genomic_cov_from_mean = how_many_sd_from_mean(mean_genomic_cov,\n standard_dev_genomic_cov,\n HGT_gene_to_genic_cov_dic[gene])\n else:\n mean_genomic_cov = 0\n standard_dev_genomic_cov=0\n genomic_cov_from_mean=0\n try:\n AI = gene_to_AI[gene]\n except:\n ValueError\n AI = \"NA\"\n \n HGT_info_dataformatted = check_HGT_AT_vs_global_AT(gene_AT_cont_dic, AI, the_mean,\n standard_dev, gene, comment,\n sd_numbers, gene_to_expression,\n gene_to_exon_count,\n gene_to_HGTspeces_discription_dict,\n genomic_cov_from_mean,\n gene_to_HGT_percent_identity)\n if \"Unclassified\" not in HGT_info_dataformatted:\n f_out.write(HGT_info_dataformatted)\n for scaffold, genes in scaffold_to_gene_dict.items():\n descrption_to_add = \"\"\n genes_string = \"\"\n AI_values = \"\"\n bad_contig = True\n for gene in genes:\n #print gene\n if gene not in HGT_predicted_gene_set:\n bad_contig = False\n\n if bad_contig == True:\n #genes_string = \"\"\n genes_on_scaffold = scaffold_to_gene_dict[scaffold]\n #print \"genes_on_scaffold\", genes_on_scaffold\n for member in genes_on_scaffold:\n genes_string = genes_string+\" \"+member\n try:\n AI = gene_to_AI[gene]\n # HGT looswe threshold of 20 set here.. for further examination\n if int(AI) < 15:\n bad_contig = False\n continue\n except:\n ValueError\n bad_contig = False\n AI = \"NA\"\n continue\n AI_values = AI_values+\" \"+AI\n\n if bad_contig == True:\n print (\"Bad scaffold = %s\" %(scaffold))\n descrpt = gene_to_HGTspeces_discription_dict[gene]\n descrption_to_add = descrption_to_add + \" \" + descrpt\n data_formatted = \"%s\\tBad_scaffold\\t%s\\t%s\\t%s\\n\" %(scaffold,\n genes_string,\n AI_values,\n descrption_to_add)\n out.write(data_formatted)\n out.close()",
"def matched_gc_bedfile(bedfile, matchfile, genome, number, size=None, min_bin_size=100):\n g = Genome(genome)\n genome_fa = g.filename\n try:\n fa = Fasta(matchfile)\n gc = [\n (seq.upper().count(\"C\") + seq.upper().count(\"G\")) / len(seq)\n for seq in fa.seqs\n ]\n sizes = [len(seq) for seq in fa.seqs]\n except Exception:\n try:\n # pylint: disable=unexpected-keyword-arg\n fields = pd.read_csv(matchfile, comment=\"#\", nrows=10, sep=\"\\t\").shape[1]\n tmp = (\n pybedtools.BedTool(matchfile).filter(lambda x: len(x) >= 10).saveas().fn\n )\n bed = pybedtools.BedTool(tmp)\n gc = np.array(\n [float(x[fields + 1]) for x in bed.nucleotide_content(fi=genome_fa)]\n )\n sizes = np.array([x.length for x in bed])\n gc = [round(x, 2) for x in gc]\n except Exception:\n logger.error(\"Please provide input file in BED or FASTA format\")\n raise\n\n # Get the median size of the sequences\n if size is None or size == 0:\n size = int(np.median(sizes))\n if np.std(sizes) > size * 0.05:\n logger.info(\"Sequences do not seem to be of equal size.\")\n logger.info(\n f\"GC% matched sequences of the median size ({size}) will be created\"\n )\n\n bins = [(0.0, 0.2), (0.8, 1)]\n for b in np.arange(0.2, 0.799, 0.05):\n bins.append((b, b + 0.05))\n\n fraction = number / len(gc)\n gc = np.array(gc)\n # print(\"GC\", gc)\n bin_count = []\n for b_start, b_end in bins:\n bin_count.append(\n int(np.sum((gc > round(b_start, 2)) & (gc <= round(b_end, 2))) * fraction)\n )\n\n # To make te requested number, divide remaining over\n # all bins that have counts\n rest = number - sum(bin_count)\n i = 0\n for _ in range(rest):\n while bin_count[i % len(bins)] == 0:\n i += 1\n bin_count[i % len(bins)] += 1\n i += 1\n\n nseqs = max(bin_count) * len(bins)\n\n with NamedTemporaryFile(delete=False) as tmp:\n gc_bin_bedfile(\n tmp.name,\n genome,\n nseqs,\n length=size,\n bins=bins,\n random_state=None,\n min_bin_size=min_bin_size,\n )\n df = pd.read_csv(tmp.name, sep=\"\\t\", names=[\"chrom\", \"start\", \"end\", \"bin\"])\n # print(tmp.name)\n with open(bedfile, \"w\") as f:\n pass\n with open(bedfile, \"a\") as f:\n for (b_start, b_end), n in zip(bins, bin_count):\n if n == 0:\n continue\n # print(b_start, b_end, n)\n b = f\"{b_start:.2f}-{b_end:.2f}\"\n df.loc[df[\"bin\"] == b, [\"chrom\", \"start\", \"end\"]].sample(n).to_csv(\n f, sep=\"\\t\", header=False, index=False\n )",
"def get_GO_presence_labels(genes_of_interest, min_GO_size=200, max_GO_size=300):\n genes = pd.Series(genes_of_interest)\n go_group_presence = {}\n\n for GO in go2geneIDs:\n gene_ids = go2geneIDs[GO]\n\n # boolean vector (length is num of genes in embedding)\n in_go_group_vector = genes.isin(gene_ids)\n\n if (in_go_group_vector.sum() > min_GO_size) & (in_go_group_vector.sum() < max_GO_size):\n go_group_presence[GO] = in_go_group_vector\n\n result = pd.DataFrame(go_group_presence)\n result.index = genes\n result.index.name = 'entrezgene'\n return result",
"def check_multi_exon(tr_nc_index_dict, ncdf):\n\n\tfor gene in tr_nc_index_dict:\n\t\n\t\ttempdf = ncdf.iloc[tr_nc_index_dict[gene][0]:tr_nc_index_dict[gene][1]]\n\t\texon_count = 0\n\t\t\n\t\tfor i in tempdf.index:\n\t\t\tif tempdf.loc[i,'feature'] == 'exon':\n\t\t\t\texon_count += 1\n\t# print exon_count\n\t\tif exon_count >1 :\n\t\t\tprint \" more than one exon for %s\" % gene\n\t\t\tsys.exit()\t# prevent writing fasta if there is multi exon transcript",
"def find_entropy(less_than_threshold,more_than_threshold):\n\n ''' Storing total number of records '''\n total_records = len(less_than_threshold) + len(more_than_threshold)\n\n ''' Calculating the probability '''\n less_than_probability = len(less_than_threshold) / total_records\n more_than_probability = len(more_than_threshold) / total_records\n\n ''' Converting the dataframe to numpy arrays '''\n less_than_threshold_values = less_than_threshold.values\n more_than_threshold_values = more_than_threshold.values\n\n ''' Storing the target attribute values (Muffin or Cupcake) for threshold values '''\n target_for_less_than = less_than_threshold_values[:, -1]\n target_for_more_than = more_than_threshold_values[:, -1]\n\n ''' Finding the counts of muffin and cupcake for values lower than and greater than threshold value '''\n recipe_type, less_than_cupcake_muffin_count = np.unique(target_for_less_than, return_counts=True)\n recipe_type, more_than_cupcake_muffin_count = np.unique(target_for_more_than, return_counts=True)\n\n # print(recipe_type, more_than_cupcake_muffin_count, len(more_than_cupcake_muffin_count))\n ''' To ensure there are at least 5 records in each node '''\n if less_than_cupcake_muffin_count.sum() < 5 or more_than_cupcake_muffin_count.sum() < 5:\n ''' Return horrible badness '''\n return math.inf\n else:\n ''' Find the entropies for less than threshold values and more than threshold values '''\n less_than_entropy = sum((less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()) * - np.log2(\n less_than_cupcake_muffin_count / less_than_cupcake_muffin_count.sum()))\n more_than_entropy = sum((more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()) * - np.log2(\n more_than_cupcake_muffin_count / more_than_cupcake_muffin_count.sum()))\n\n ''' Calculate the total weighted entropy '''\n total_weighted_entropy = less_than_probability * less_than_entropy + more_than_probability * more_than_entropy\n\n return total_weighted_entropy",
"def calculateGC(seq):\n return(((seq.count(\"G\") + seq.count(\"C\"))/float(len(seq)))*100)\n # Count the occurrences of a given item in the list",
"def test_query_expressed_genes_with_inclusive_target_genes(self):\n # Need to test when we have 100% overlap, 0% overlap and something in between\n\n self.populate_target_data()\n\n # Test 1 overlap, 4 sample and 2 target genes\n included_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 1')\n self.assertTrue(len(included_genes) == 1)\n self.assertTrue(included_genes[0].ensembl_id == 'PLUS-1')\n\n # Test 4 overlap, 4 sample and 4 target genes\n included_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 2')\n self.assertTrue(len(included_genes) == 4)\n self.assertTrue(included_genes[0].ensembl_id == 'MINUS-3')\n self.assertTrue(included_genes[1].ensembl_id == 'MINUS-1')\n self.assertTrue(included_genes[2].ensembl_id == 'PLUS-3')\n self.assertTrue(included_genes[3].ensembl_id == 'PLUS-1')\n\n # Test 0 overlap, 4 sample and 1 non-matching target gene\n remaining_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 3')\n self.assertTrue(len(remaining_genes) == 0)",
"def gcCheck(self, seq3):\n return float(self.gcPercent) <= self.numGC * 100.0 / len(seq3) \\\n <= float(self.GCPercent)",
"def find_common_genes(input_fp):\n trait_genes = {}\n all_genes = []\n common_genes = []\n snp_count = {}\n traits = {}\n matrix = []\n print('Extracting genes from eQTL interactions for...')\n _,_,t_files = next(os.walk(input_fp), (None, None, []))\n for trait_file in t_files:\n trait = trait_file[:len(trait_file)-4]\n print('\\t' + trait)\n tfile = open(os.path.join(input_fp, trait_file), 'r')\n eqtls= csv.reader(tfile, delimiter = '\\t') \n next(tfile, None)\n for line in eqtls:\n genes = []\n if trait in trait_genes.keys():\n genes = trait_genes[trait]\n genes.append(line[3])\n trait_genes[trait] = genes\n all_genes.append(line[3])\n tfile.close()\n \n for trait in trait_genes:\n trait_genes[trait] = list(set(trait_genes[trait]))\n all_genes = list(set(all_genes))\n print(len(all_genes))\n\n done_genes = []\n \"\"\"\n for snp in all_snps:\n occur = all_snps.count(snp)\n if occur > 1 and snp not in done_snps:\n done_snps.append(snp)\n for record in trait_snps:\n if snp == record[1] and record not in common_snps:\n common_snps.append(record)\n snp_count[snp] = occur\n to_dict = []\n if record[0] not in traits.keys():\n to_dict.append(snp)\n traits[record[0]] = to_dict\n else:\n to_dict = traits[record[0]]\n to_dict.append(snp)\n traits[record[0]] = to_dict\n \"\"\"\n for trait in trait_genes.keys():\n gene_count = {}\n genes_total = len(trait_genes[trait])\n compare_traits = trait_genes.keys()\n if genes_total > 3:\n for trait_gene in trait_genes[trait]:\n for compare in compare_traits:\n if trait_gene in trait_genes[compare]:\n if compare not in gene_count.keys():\n gene_count[compare] = 1\n else:\n gene_count[compare] += 1\n #else:\n # gene_count[compare] = 0\n row = []\n row.append(trait)\n for t in gene_count:\n ratio = round(gene_count[t]/float(genes_total), 7)\n matrix.append([trait, t, genes_total, gene_count[t], ratio])\n\n \"\"\"\n with open (output_fp + '/' + 'common_snps_count.txt', 'wb') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['snp', 'count'])\n for snp in snp_count:\n writer.writerow([snp,snp_count[snp]])\n \"\"\"\n\n with open ('gene_matrix.txt', 'w') as cluster_file:\n writer = csv.writer(cluster_file, delimiter = '\\t')\n writer.writerow(['trait_x', 'trait_y', '#total_genes', '#common_snps', \\\n 'ratio'])\n writer.writerows(matrix)",
"def compute_coverage_statistics( self ):\n import pandas\n \n def compute_coverages( genes, exons, cds, sequences ):\n # the .reset_index() is used here to turn the analysis column back into a normal column.\n # (otherwise it is an 'index' and behaves differently)\n result = {\n \"genes\": compute_genome_bases_covered( genes, sequences ).reset_index(),\n \"exons\": compute_genome_bases_covered( exons, sequences ).reset_index(),\n \"cds\": compute_genome_bases_covered( cds, sequences ).reset_index()\n }\n return result\n \n def build_single_table( coverages ):\n # Now build a single table\n result = coverages['genes'][['analysis', 'sequence_length']]\n for what in [ 'genes', 'exons', 'cds' ]:\n result = pandas.merge(\n result,\n coverages[what][['analysis', 'bases_covered', 'proportion' ]],\n left_on = 'analysis',\n right_on = 'analysis'\n )\n result.rename(\n columns = {\n \"bases_covered\": \"%s:bases_covered\" % what,\n \"proportion\": \"%s:proportion_covered\" % what\n },\n inplace = True\n )\n return result\n\n coverages = compute_coverages( self.m_genes, self.m_exons, self.m_cds, self.m_sequences )\n return build_single_table( coverages )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets insertions for given gene windows. | def _subset_to_windows(
insertions, # type: List[Insertion]
gene_windows # type: Dict[str, Tuple[str, int, int]]
): # type: (...) -> List[Insertion]
# Create lookup trees.
trees = {
chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int)
for chrom, chrom_int in itertools.groupby(
sorted(gene_windows.values()), operator.itemgetter(0))
}
# Determine which insertions overlap tree intervals and
# correspond to genes with known gene window.
def _in_windows(ins, trees):
try:
return trees[ins.seqname].overlaps(ins.position)
except KeyError:
return False
return [
ins for ins in insertions
if ins.metadata['gene_id'] in gene_windows and _in_windows(ins, trees)
] | [
"def make_windows(annotations, window_size, step):\n annotations_bin_keys = {\"gene_name\", \"gene_chrom\", \"gene_start\", \"gene_end\", \"gene_strand\", \"gene_region_end\", \"gene_region_start\"}\n annotations_bin = {k: [] for k in annotations_bin_keys}\n annotations_bin[\"bin_start\"] = []\n annotations_bin[\"bin_end\"] = []\n annotations_bin[\"bin_strand\"] = []\n for r, row in annotations.iterrows():\n numOfChunks = int((row[\"gene_region_end\"] - row[\"gene_region_start\"] - window_size) / step) + 1\n bins = list(range(0, numOfChunks * step, step))\n\n # Original strand\n annotations_bin[\"bin_start\"].extend([int(i+row[\"gene_region_start\"]) for i in bins])\n annotations_bin[\"bin_end\"].extend([int(i+window_size+row[\"gene_region_start\"]) for i in bins])\n annotations_bin[\"bin_strand\"].extend([row[\"gene_strand\"]]*len(bins))\n for k in annotations_bin_keys:\n annotations_bin[k].extend([row[k]]*len(bins))\n\n # Reverse strand\n annotations_bin[\"bin_start\"].extend([int(i + row[\"gene_region_start\"]) for i in bins])\n annotations_bin[\"bin_end\"].extend([int(i + window_size + row[\"gene_region_start\"]) for i in bins])\n annotations_bin[\"bin_strand\"].extend([\"-\" if row[\"gene_strand\"] == \"+\" else \"+\"]*len(bins))\n for k in annotations_bin_keys:\n annotations_bin[k].extend([row[k]]*len(bins))\n\n return pd.DataFrame.from_dict(annotations_bin)",
"def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\tif line and not line.startswith('#'):\t\t# if line is not empty and not a comment\n#\t\tif line and re.match('\\d+', line):\n\t\t\tlogging.debug((\"line: %s\" %line))\n\t\t\tfields = line.split()\t\t# it is better to use the default splitting algorithm here.\n\t\t\t\t\t\t\t\t\t\t# read help(''.split)\t\n\n\t\t\tgene_name = fields[0]\n\t\t\tlogging.debug((\"fields: %s\" %fields))\n\t\t\tstart = int(fields[2])\n\t\t\tend = int(fields[3].strip())\t\t# remove \\n\\r, like chomp\n\t\t\tgenes.append((gene_name, start, end))\n\t\t\t\n#\tlogging.debug((\"genes :\", genes))\t\t# print the contents of genes, if level=loggin.DEBUG\n\n\t# read sliding windows file, and select windows that fall in genes\n\toutput = '#gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score\\n'\n\toutputlineskeleton = \"%s\\t%d\\t%d\\t%d\\t%d\\t%d\\t%s\\t%s\\t%s\\n\"\t# %(gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\n\tfor line in sliding_windows_file:\n\t\tline = line.strip()\t\t# remove trailing characters (like chomp)\n\t\tif line and not line.startswith('#'):\n\t\t\twindow_fields = line.split()\n\n#\t\t\tlogging.debug(window_fields)\n\t\t\twindow_start = int(window_fields[0])\n\t\t\twindow_middle = int(window_fields[2])\n\t\t\twindow_end = int(window_fields[1])\n#\t\t\tgene = window_fields[3]\n\t\t\tpopulation = window_fields[4]\n\t\t\tnumber = window_fields[5]\n\t\t\tscore = window_fields[6]\n\n\t\t\tfor gene in genes:\n\t\t\t\tgene_start = int(gene[1])\n\t\t\t\tgene_end = int(gene[2])\n\t\t\t\tgene_name = gene[0]\n\t\t\t\t# if window_start is comprised between gene_end and gene_start\n\t\t\t\tif gene_end > window_start >= gene_start:\n\t\t\t\t\tlogging.debug(\"This window starts inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\t\t\telif gene_end >= window_end > gene_start:\n\t\t\t\t\tlogging.debug(\"This window ends inside gene %s (%s, %s)\" %(gene[0], gene_start, gene_end))\n\t\t\t\t\tlogging.debug(line)\n\t\t\t\t\toutput += outputlineskeleton % (gene_name, gene_start, gene_end, window_start, window_middle, window_end, population, number, score)\n\t\n\tlogging.debug(output)\n\toutput_file.write(output)\n\toutput_file.seek(0)\n\treturn output_file",
"def cut_into_windows(\n self,\n duration: Seconds,\n hop: Optional[Seconds] = None,\n keep_excessive_supervisions: bool = True,\n num_jobs: int = 1,\n ) -> \"CutSet\":\n if not hop:\n hop = duration\n if num_jobs == 1:\n from lhotse.lazy import LazyFlattener, LazyMapper\n\n return CutSet(\n LazyFlattener(\n LazyMapper(\n self,\n partial(\n _cut_into_windows_single,\n duration=duration,\n hop=hop,\n keep_excessive_supervisions=keep_excessive_supervisions,\n ),\n )\n )\n )\n\n from lhotse.manipulation import split_parallelize_combine\n\n result = split_parallelize_combine(\n num_jobs,\n self,\n _cut_into_windows_single,\n duration=duration,\n hop=hop,\n keep_excessive_supervisions=keep_excessive_supervisions,\n )\n return result",
"def select_windows(start, stop, num_windows,\n window_width=1, window_units=\"D\",\n sampling=1, sampling_units=\"T\",\n no_overlaps=True, verbose=True):\n\n # Create all sample candidates\n dt_range = pd.date_range(start, stop-pd.Timedelta(window_width),\n freq=\"%i%s\" % (sampling, sampling_units))\n\n # Sample candidate windows\n selected_windows = np.random.choice(dt_range, num_windows, replace=False)\n selected_windows = pd.DataFrame(selected_windows, columns=[\"start\"])\n\n # Calculate window end\n end_delta = (pd.Timedelta(window_width, unit=window_units)\n - pd.Timedelta(sampling,\n unit=\"m\" if sampling_units==\"T\" else sampling_units))\n selected_windows[\"end\"] = (selected_windows[\"start\"] + end_delta)\n\n # Filter overlaps\n if not no_overlaps:\n return selected_windows\n else:\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n\n while selected_windows.shape[0]<num_windows:\n if verbose:\n print(\"Got %i windows...\" % selected_windows.shape[0])\n\n selected_windows = pd.concat([selected_windows,\n select_windows(start, stop, num_windows,\n window_width, window_units,\n sampling, sampling_units,\n no_overlaps=False)],\n ignore_index=True)\n selected_windows = filter_overlaps(selected_windows,\n pd.Timedelta(window_width,\n unit=window_units))\n return selected_windows.iloc[:num_windows]",
"def create_subsets(self, start_ids):\n subsets = list()\n df = self.all_df.copy()\n for sid in start_ids:\n df2 = df.loc[sid:, :]\n subsets.append(df.drop(df2.index, axis=0))\n df = df2.copy()\n subsets.append(df)\n return subsets",
"def create_selection_sets(obj: ArmatureObject, _metarig: ArmatureObject):\n # Check if selection sets addon is installed\n if 'bone_selection_groups' not in bpy.context.preferences.addons \\\n and 'bone_selection_sets' not in bpy.context.preferences.addons:\n return\n\n obj.selection_sets.clear() # noqa\n\n for coll in obj.data.collections:\n if not coll.rigify_sel_set:\n continue\n\n create_selection_set_for_rig_layer(obj, coll.name, coll)",
"def test_getIntronExonWindowCoords(self):\n\n add_all_gene_exons(self.session, self.genes)\n plus1 = self.session.query(Gene).filter_by(ensembl_id='PLUS-1').one()\n plus3 = self.session.query(Gene).filter_by(ensembl_id='PLUS-3').one()\n minus1 = self.session.query(Gene).filter_by(ensembl_id='MINUS-1').one()\n minus3 = self.session.query(Gene).filter_by(ensembl_id='MINUS-3').one()\n\n # symmetric window first\n upstream = 100\n downstream = 100\n\n expect = {'PLUS-1': [], 'PLUS-3': [(c-upstream, c+downstream)\n for c in [1050, 1600, 1800]],\n 'MINUS-1': [], 'MINUS-3': [(c-downstream, c+upstream)\n for c in [1900, 1700, 1400]]}\n\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getIntronExonWindowCoords(upstream, downstream, no_overlap=False)\n self.assertEqual(got, expect[gene.ensembl_id])\n\n # asymmetric window: 200 upstream, 100 downstream\n upstream = 200\n downstream = 100\n expect = {'PLUS-1': [], 'PLUS-3': [(c-upstream, c+downstream)\n for c in [1050, 1600, 1800]],\n 'MINUS-1': [], 'MINUS-3': [(c-downstream, c+upstream)\n for c in [1900, 1700, 1400]]}\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getIntronExonWindowCoords(upstream, downstream, no_overlap=False)\n self.assertEqual(got, expect[gene.ensembl_id])\n\n # proximity check - wipes out all intron starts close to UTR\n\n expect = {'PLUS-1': [], 'PLUS-3': [\n (None, None),\n (1600-upstream, 1600+downstream),\n (None, None)\n ], 'MINUS-1': [], 'MINUS-3': [\n (None, None),\n (1700-downstream, 1700+upstream),\n (1400-downstream, 1400+upstream)\n ]}\n\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getIntronExonWindowCoords(upstream, downstream, no_overlap=True)\n self.assertEqual(got, expect[gene.ensembl_id])",
"def create_subset_list(self):\n\n row = 0\n for time_step in self.min_increments:\n subset = SubsetClass(time_step=time_step, query_df=self.query_df, model_df=self.model_df, row=row)\n self.subset_list.append(subset)\n row += 1",
"def window(\n ds: Dataset,\n size: int,\n step: Optional[int] = None,\n merge: bool = True,\n) -> Dataset:\n step = step or size\n n_variants = ds.dims[\"variants\"]\n n_contigs = len(ds.attrs[\"contigs\"])\n contig_ids = np.arange(n_contigs)\n variant_contig = ds[\"variant_contig\"]\n contig_starts = np.searchsorted(variant_contig.values, contig_ids)\n contig_bounds = np.append(contig_starts, [n_variants], axis=0)\n\n contig_window_contigs = []\n contig_window_starts = []\n contig_window_stops = []\n for i in range(n_contigs):\n starts, stops = _get_windows(contig_bounds[i], contig_bounds[i + 1], size, step)\n contig_window_starts.append(starts)\n contig_window_stops.append(stops)\n contig_window_contigs.append(np.full_like(starts, i))\n\n window_contigs = np.concatenate(contig_window_contigs)\n window_starts = np.concatenate(contig_window_starts)\n window_stops = np.concatenate(contig_window_stops)\n\n new_ds = Dataset(\n {\n window_contig: (\n \"windows\",\n window_contigs,\n ),\n window_start: (\n \"windows\",\n window_starts,\n ),\n window_stop: (\n \"windows\",\n window_stops,\n ),\n }\n )\n return conditional_merge_datasets(ds, new_ds, merge)",
"def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]",
"def test_ctgs(\n insertions, # type: List[Insertion]\n reference, # type: Reference\n gene_ids=None, # type: Set[str]\n chromosomes=None, # type: Set[str]\n pattern=None, # type: str\n per_sample=True, # type: bool\n window=None #type: Tuple[int, int]\n):\n\n # Default to shared chromosome sequences (typically drops some\n # of the more esoteric extra scaffold/patch sequences).\n if chromosomes is None:\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n reference_gtf = GtfIterator(reference.indexed_gtf_path)\n\n chromosomes = list(\n set(reference_seq.keys()) & set(reference_gtf.contigs))\n\n if len(chromosomes) == 0:\n ValueError('No chromosomes are shared between the reference '\n 'sequence and reference gtf files')\n\n if len(chromosomes) == 0:\n raise ValueError('At least one chromosome must be given')\n\n # Determine gene windows using GTF.\n logging.info('Generating gene windows')\n gene_windows = _build_gene_windows(\n reference.indexed_gtf_path, window=window, chromosomes=chromosomes)\n\n # Subset insertions to gene intervals.\n insertions = _subset_to_windows(insertions, gene_windows)\n\n if gene_ids is None:\n gene_ids = set(ins.metadata['gene_id'] for ins in insertions)\n\n # Collapse insertions per gene/sample (recommended).\n # Corrects for hopping/multiple detection issues.\n if per_sample:\n logging.info('Collapsing insertions')\n insertions = list(_collapse_per_sample(insertions))\n\n # Calculate total number of pattern occurrences within intervals.\n logging.info('Counting pattern occurrences')\n reference_seq = pyfaidx.Fasta(str(reference.fasta_path))\n\n total = count_total(\n reference_seq, pattern=pattern, intervals=gene_windows.values())\n\n # Calculate p-values for each gene.\n logging.info('Calculating significance for genes')\n insertion_trees = GenomicIntervalTree.from_objects_position(\n insertions, chrom_attr='seqname')\n\n p_values = {\n gene_id: test_region(\n insertions=insertions,\n reference_seq=reference_seq,\n region=gene_windows[gene_id],\n total=total,\n pattern=pattern,\n filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid],\n insertion_trees=insertion_trees)\n for gene_id in gene_ids\n }\n\n # Build result frame.\n result = pd.DataFrame.from_records(\n iter(p_values.items()), columns=['gene_id', 'p_value'])\n\n # Calculate corrected p-value using bonferroni correction.\n result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0)\n\n # Sort by q-value and p-value.\n result.sort_values(by=['q_value', 'p_value'], inplace=True)\n\n if len(insertions) > 0:\n # Annotate with gene_name if possible.\n if 'gene_name' in insertions[0].metadata:\n name_map = {\n ins.metadata['gene_id']: ins.metadata['gene_name']\n for ins in insertions\n }\n result.insert(1, 'gene_name', result['gene_id'].map(name_map))\n else:\n result['gene_name'] = np.nan\n\n # Annotate with frequency.\n frequency = (Insertion.to_frame(insertions)\n .groupby('gene_id')['sample'].nunique()\n .reset_index(name='n_samples'))\n result = pd.merge(result, frequency, on='gene_id', how='left')\n else:\n result['gene_name'] = np.nan\n result['n_samples'] = np.nan\n\n return result",
"def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)",
"def annotate_insertions(insertions, window, genes, select_closest=False):\n\n annotated = _annotate_insertions(insertions, window, genes)\n\n if select_closest:\n annotated = _select_closest_gene(annotated)\n\n yield from annotated",
"def test_getExonIntronWindowCoords(self):\n add_all_gene_exons(self.session, self.genes)\n plus1 = self.session.query(Gene).filter_by(ensembl_id='PLUS-1').one()\n plus3 = self.session.query(Gene).filter_by(ensembl_id='PLUS-3').one()\n minus1 = self.session.query(Gene).filter_by(ensembl_id='MINUS-1').one()\n minus3 = self.session.query(Gene).filter_by(ensembl_id='MINUS-3').one()\n\n # symmetric window first\n upstream = 100\n downstream = 100\n\n expect = {'PLUS-1': [], 'PLUS-3': [(c-upstream, c+downstream)\n for c in [1400, 1700]],\n 'MINUS-1': [], 'MINUS-3': [(c-downstream, c+upstream)\n for c in [1800, 1600]]}\n\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getExonIntronWindowCoords(upstream, downstream, no_overlap=False)\n self.assertEqual(got, expect[gene.ensembl_id])\n\n # asymmetric window: 200 upstream, 100 downstream\n upstream = 200\n downstream = 100\n expect = {'PLUS-1': [], 'PLUS-3': [(c-upstream, c+downstream)\n for c in [1400, 1700]],\n 'MINUS-1': [], 'MINUS-3': [(c-downstream, c+upstream)\n for c in [1800, 1600]]}\n\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getExonIntronWindowCoords(upstream, downstream, no_overlap=False)\n self.assertEqual(got, expect[gene.ensembl_id])\n\n # proximity check - wipes out all intron starts close to UTR\n\n expect = {'PLUS-1': [], 'PLUS-3': [(c-upstream, c+downstream)\n for c in [1400, 1700]],\n 'MINUS-1': [], 'MINUS-3': [(c-downstream, c+upstream)\n for c in [1800, 1600]]}\n\n for gene in (plus1, plus3, minus1, minus3):\n got = gene.getExonIntronWindowCoords(upstream, downstream, no_overlap=True)\n self.assertEqual(got, expect[gene.ensembl_id])",
"def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])",
"def subwindowcoor(*coor, **kargs):\n nwx = kargs.get('NWX', 10) # set number of windows in x direction\n nwy = kargs.get('NWY', 10) # set number of windows in y direction\n\n x1, x2, y1, y2 = coor\n\n wx = (x2-x1)//nwx # size of every subwindow in x\n wy = (y2-y1)//nwy # size of every subwindow in y\n\n for i in range(nwx):\n for j in range(nwy):\n if i == 0:\n xi = x1 + i * wx\n\n else:\n xi = x1 + i * wx + 1\n\n if j == 0:\n yi = y1 + j * wy\n else:\n yi = y1 + j * wy + 1\n\n xf = x1 + (i + 1) * wx # - 1\n yf = y1 + (j+1) * wy # - 1\n yield(i, j, xi, xf, yi, yf)",
"def filter_gene_sets(matrx, genesets, args):\n logger = logging.getLogger('root')\n for gs, genes in genesets.items():\n logger.debug(gs)\n start = len(genes)\n res = filter_gene_set(list(genes),\n matrx,\n CPU=args.CPU,\n gene_mean_filter=args.min_mean_filter,\n min_prob_filter=args.min_prob_filter,\n output_dir=args.output_dir,\n sensitive=args.sensitive,\n mm_path=args.mm_path,\n overwrite=args.overwrite)\n res = list(set(res))\n end = len(res)\n\n logger.info(\"Filtering: {gs} went from {x} to {y} genes\".format(gs=gs,\n x=start,\n y=end))\n if end < args.min_num_filter:\n logger.info(\"Skipping {gs} because there \"\n \"are not enough genes to cluster\".format(gs=gs))\n continue\n\n yield gs, res",
"def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, min_len: int = 1, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n start_idxs = np.full(len(index), 0)\n end_idxs = np.arange(len(index))\n\n # Filter out short ranges\n window_lens = end_idxs - start_idxs + 1\n min_len_mask = window_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet window_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of windows {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)",
"def __create_windows(self, dat_in, dat_out, sequential):\n print(\"Creating windows...\")\n num_pkts = dat_in.shape[0]\n num_wins = math.ceil(num_pkts / self.win)\n fets = [(name, typ) for name, typ in dat_in.dtype.descr if name != \"\"]\n # Select random intervals from this simulation to create the\n # new input data. Do not pick indices between 0 and self.win\n # to make sure that all windows ending on the chosen index fit\n # within the simulation.\n pkt_idxs = random.choices(range(self.win, num_pkts), k=num_wins)\n # The new data format consists of self.win copies of the\n # existing input features. All copies of a particular feature\n # share the same scaling group.\n scl_grps, dtype = zip(\n *[(scl_grp, (f\"{name}_{idx}\", typ))\n for idx in range(self.win)\n for scl_grp, (name, typ) in enumerate(fets)])\n scl_grps = np.array(scl_grps)\n dat_in_new = np.zeros((num_wins,), dtype=list(dtype))\n\n for win_idx, end_idx in enumerate(pkt_idxs):\n # This could be done on a single line with a range select\n # and a generator, but this version is preferable because\n # it removes intermediate data copies and guarantees that\n # the resulting row is properly ordered.\n for fet_idx, pkt_idx in enumerate(\n range(end_idx - self.win + 1, end_idx + 1)):\n for name, _ in fets:\n dat_in_new[f\"{name}_{fet_idx}\"][win_idx] = (\n dat_in[pkt_idx][name])\n\n # Verify that we selected at least as many windows as we intended to.\n num_selected_wins = len(dat_in_new)\n assert num_selected_wins >= num_wins, \\\n f\"Insufficient windows: {num_selected_wins} < {num_wins}\"\n\n # As an output feature, select only the final ground truth\n # value. I.e., the final ground truth value for this window\n # becomes the ground truth for the entire window.\n return dat_in_new, np.take(dat_out, pkt_idxs), scl_grps"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Tests a given genomic region for enrichment in insertions. | def test_region(
insertions, # type: List[Insertion]
reference_seq, # type: pyfaidx.Fasta
region, # type: Tuple[str, int, int]
pattern=None, # type: Optional[str]
intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]]
total=None, # type: Optional[int]
filters=None, # type: Optional[List[Callable]]
insertion_trees=None # type: GenomicIntervalTree
): # type: (...) -> float
if total is None:
total = count_total(
reference_seq, pattern=pattern, intervals=intervals)
# Count pattern in region.
region_count = count_region(reference_seq, region=region, pattern=pattern)
# Sub-select insertions for region.
if insertion_trees is None:
insertion_trees = GenomicIntervalTree.from_objects_position(
insertions, chrom_attr='seqname')
region_ins = set(interval[2]
for interval in insertion_trees.search(*region))
# Apply additional filter functions to insertions if given
# (such as filtering on gene name/id for example).
if filters is not None:
for filter_func in filters:
region_ins = set(ins for ins in region_ins if filter_func(ins))
# Calculate p-value.
x = len(list(region_ins))
mu = len(insertions) * (region_count / total)
# Note here we use loc=1, because we are interested in
# calculating P(X >= x), not P(X > x) (the default
# surivival function).
p_val = poisson.sf(x, mu=mu, loc=1) # type: float
return p_val | [
"def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR3 = pybedtools.BedTool(\"\"\"chr1\\t8500\\t9000\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n proxintron = pybedtools.BedTool(\"\"\"chr1\\t100\\t300\\tfoo\\t0\\t+\\n\n chr1\\t798\\t998\\tfoo\\t0\\t+\\n\n chr1\\t2000\\t2200\\tfoo\\t0\\t+\\n\n chr1\\t2798\\t2998\\tfoo\\t0\\t+\\n\n chr1\\t6000\\t6200\\tfoo\\t0\\t+\\n\n chr1\\t6798\\t6998\\tfoo\\t0\\t+\\n\n chr1\\t7900\\t7998\\tfoo\\t0\\t+\\n\"\"\", from_string = True\n )\n distintron = pybedtools.BedTool(\"\"\"chr1\\t301\\t797\\tfoo\\t0\\t+\\n\n chr1\\t2201\\t2797\\tfoo\\t0\\t+\\n\n chr1\\t6201\\t6797\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n \n regions = build_genomic_regions(pybedtools.BedTool(clipper.test_file(\"test.gtf\")), prox_distance=200) \n \n #print UTR3\n\n #print regions['UTR3']\n print proxintron\n print regions['proxintron']\n #print regions['distintron']\n \n self.assertEqual(len(CDS.intersect(regions['CDS'], f= 1.0, r = True)), 2)\n self.assertEqual(len(UTR5.intersect(regions['UTR5'], f= 1.0, r = True)), 1)\n self.assertEqual(len(UTR3.intersect(regions['UTR3'], f= 1.0, r = True)), 1)\n self.assertEqual(len(proxintron.intersect(regions['proxintron'], f= 1.0, r = True)), 7)\n self.assertEqual(len(distintron.intersect(regions['distintron'], f= 1.0, r = True)), 3)",
"def test_postregions(self):\n pass",
"def test_region_extraction(self):\n image, regions = self.create_image_and_regions()\n for region in regions:\n extracted = visual.safe_extract_with_region(image, region)\n implanted = visual.safe_implant_with_region(image, extracted, region)\n self.validate_image(extracted)\n self.validate_image(implanted)",
"def test_signal_regions(i07_nexus: I07Nexus, regions):\n # Note: this should probably always be a for loop with just 1 iteration.\n for i, _ in enumerate(regions):\n assert i07_nexus.signal_regions[i] == regions[i]",
"async def test_genomic_insertion(test_handler, genomic_insertion,\n grch38_genomic_insertion):\n resp = await test_handler.normalize(\"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\") # noqa: E501\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"NC_000017.10:g.37880993_37880994insGCTTACGTGATG\")\n\n fixture_id = \\\n \"normalize.variation:NC_000017.10%3Ag.37880993_37880994insGCTTACGTGATG\"\n resp = await test_handler.normalize(\"17-37880993-G-GGCTTACGTGATG\")\n assert resp.variation_descriptor.id == \\\n \"normalize.variation:17-37880993-G-GGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, grch38_genomic_insertion,\n \"17-37880993-G-GGCTTACGTGATG\")\n\n resp = await test_handler.normalize(\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")\n assert resp.variation_descriptor.id ==\\\n \"normalize.variation:ERBB2%20g.37880993_37880994insGCTTACGTGATG\"\n resp.variation_descriptor.id = fixture_id\n assertion_checks(resp.variation_descriptor, genomic_insertion,\n \"ERBB2 g.37880993_37880994insGCTTACGTGATG\")",
"def test_getregions(self):\n pass",
"def test_putregions_item(self):\n pass",
"def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')",
"def test_search_region_of_birth(self):\n pass",
"def test_regions_get(self):\n pass",
"def generateNewRegion(self):\n \n # regions related with \"near/within\" preposition\n for (regionName,dist) in self.regionNear:\n for region in self.proj.rfi.regions:\n if region.name == regionName:\n oldRegion = region\n newRegion = oldRegion.findRegionNear(dist,mode=\"overEstimate\",name='near$'+regionName+'$'+str(dist))\n self.proj.rfi.regions.append(newRegion)\n \n \n # regions related with \"between\" preposition\n for (regionNameA,regionNameB) in self.regionBetween:\n\n for region in self.proj.rfi.regions:\n if region.name == regionNameA:\n regionA = region\n elif region.name == regionNameB:\n regionB = region\n \n newRegion = findRegionBetween(regionA,regionB,name='between$'+regionNameA+'$and$'+regionNameB+\"$\")\n self.proj.rfi.regions.append(newRegion)",
"def test_update_region_of_birth(self):\n pass",
"def _process_region(self, region, writer):",
"def test_avalanche_warning_by_region_simple(self):\n pass",
"def test_query_expressed_genes_with_inclusive_target_genes(self):\n # Need to test when we have 100% overlap, 0% overlap and something in between\n\n self.populate_target_data()\n\n # Test 1 overlap, 4 sample and 2 target genes\n included_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 1')\n self.assertTrue(len(included_genes) == 1)\n self.assertTrue(included_genes[0].ensembl_id == 'PLUS-1')\n\n # Test 4 overlap, 4 sample and 4 target genes\n included_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 2')\n self.assertTrue(len(included_genes) == 4)\n self.assertTrue(included_genes[0].ensembl_id == 'MINUS-3')\n self.assertTrue(included_genes[1].ensembl_id == 'MINUS-1')\n self.assertTrue(included_genes[2].ensembl_id == 'PLUS-3')\n self.assertTrue(included_genes[3].ensembl_id == 'PLUS-1')\n\n # Test 0 overlap, 4 sample and 1 non-matching target gene\n remaining_genes = get_genes_by_ranked_expr(self.session, 'sample 1',\n include_targets='target 3')\n self.assertTrue(len(remaining_genes) == 0)",
"def test_ith_region_nxs_01(i07_nexus_object_01: I07Nexus,\n i, ith_region):\n assert i07_nexus_object_01._get_ith_region(i) == ith_region",
"def test_patchregions_item(self):\n pass",
"def test_coordinates2region():\n assert coordinates2region({'x':0,'y':0})==[1],'something failed'\n\tassert coordinates2region({'x':0,'y':350})==[2]'Something failed'\n assert coordinates2region({'x':350,'y':0})==[3],'Something failed.'\n assert coordinates2region({'x':350,'y':350})==[4],'Something failed.'",
"def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts occurrences of pattern within given genomic region. | def count_region(
reference_seq, # type: pyfaidx.Fasta
region, # type: Tuple[str, int, int]
pattern=None # type: Optional[str]
): # type: (...) -> int
chrom, start, end = region
seq = reference_seq[chrom][int(start):int(end)]
return _count_sequence(seq, regex=_build_regex(pattern)) | [
"def approx_pattern_count(pattern: str, genome: str, d: int) -> int:\n return len(approx_pattern_matching(pattern, genome, d))",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0",
"def PatternCount(text, pattern):\n l_p = len(pattern)\n l_t = len(text)\n tot = 0\n for i in range(l_t - l_p + 1):\n if text[i:i+l_p] == pattern: tot += 1\n return tot",
"def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))",
"def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter",
"def pattern_count(text, pattern):\n count = 0\n for i in range(0, (len(text) - len(pattern))):\n if text[i:i + len(pattern)] == pattern:\n count += 1\n return count",
"def count_hits_region(location, region):\n l=len(region)\n c=0\n for i in range(0,l-1):\n if hits_border(location,region[i],region[i+1])==True:\n c=c+1\n return c",
"def pattern_count(text: str, pattern: str) -> int:\n count = 0\n pattern_size = len(pattern)\n\n for i in range(len(text) - pattern_size + 1):\n if text[i:i+pattern_size] == pattern:\n count += 1\n \n return count",
"def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before):\r\n # Implement this function yourself\r\n top = 0\r\n bottom = len(bwt) -1\r\n while top <= bottom:\r\n if pattern:\r\n symbol = pattern[-1]\r\n pattern = pattern[:-1]\r\n if symbol in bwt[top:bottom+1]:\r\n top = starts[symbol] + occ_counts_before[symbol][top]\r\n bottom = starts[symbol] + occ_counts_before[symbol][bottom+1] -1\r\n else:\r\n return 0\r\n else:\r\n return bottom - top +1",
"def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count",
"def region_count(segmented_img, region_number):\n count_region = np.sum(segmented_img == region_number)\n return count_region",
"def countmatches(self, pattern):\n assert_is_type(pattern, str, [str])\n fr = H2OFrame._expr(expr=ExprNode(\"countmatches\", self, pattern))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncols = self.ncol\n return fr",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index",
"def count_occurrences(lines, substrings):\n for substring in substrings:\n pattern = get_pattern(substring)\n count = sum(len(pattern.findall(line)) for line in lines)\n print(count)",
"def parse_file_count(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n n_found = 0\n pattern = args.pattern\n for line in fisier:\n if args.ignore_case:\n line = line.lower()\n pattern = pattern.lower()\n n_found += line.count(pattern)\n\n fisier.close()\n return n_found",
"def count_regexp_occ(regexp=\"\", text=None):\n return len(re.findall(regexp, text))",
"def contig_count(contig):\n return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])",
"def annotation_count(content_object):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Counts occurrences of pattern in sequence. | def _count_sequence(sequence, regex=None):
# type: (pyfaidx.Sequence, Pattern[str]) -> int
if regex is None:
count = len(sequence)
else:
count = sum((1 for _ in regex.finditer(str(sequence))))
return count | [
"def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return counter",
"def PatternCount(text, pattern):\n l_p = len(pattern)\n l_t = len(text)\n tot = 0\n for i in range(l_t - l_p + 1):\n if text[i:i+l_p] == pattern: tot += 1\n return tot",
"def count(seq):\n\treturn sum(1 for x in seq)",
"def pattern_count(text, pattern):\n return len([i\n for i in range(0, len(text) - len(pattern) + 1)\n if text[i:i + len(pattern)] == pattern])",
"def pattern_count(text: str, pattern: str) -> int:\n count = 0\n pattern_size = len(pattern)\n\n for i in range(len(text) - pattern_size + 1):\n if text[i:i+pattern_size] == pattern:\n count += 1\n \n return count",
"def pattern_count(text, pattern):\n count = 0\n for i in range(0, (len(text) - len(pattern))):\n if text[i:i + len(pattern)] == pattern:\n count += 1\n return count",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0",
"def pattern_count(text, pattern):\n\n count = 0\n len_text = len(text)\n len_pattern = len(pattern)\n for i in range(len_text - len_pattern):\n if pattern in text[i:i + len_pattern]:\n count = count + 1\n else:\n continue\n return count",
"def approx_pattern_count(pattern: str, genome: str, d: int) -> int:\n return len(approx_pattern_matching(pattern, genome, d))",
"def count_patterns(pattern, file):\n count = 0\n with open(file, 'r') as f:\n for line in f:\n if re.search(pattern, line):\n count += 1\n print(\"The pattern '{}' appears {} times.\".format(pattern, count))",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before):\r\n # Implement this function yourself\r\n top = 0\r\n bottom = len(bwt) -1\r\n while top <= bottom:\r\n if pattern:\r\n symbol = pattern[-1]\r\n pattern = pattern[:-1]\r\n if symbol in bwt[top:bottom+1]:\r\n top = starts[symbol] + occ_counts_before[symbol][top]\r\n bottom = starts[symbol] + occ_counts_before[symbol][bottom+1] -1\r\n else:\r\n return 0\r\n else:\r\n return bottom - top +1",
"def count_pattern(pattern, lst):\n count = 0\n if type(pattern) != type(lst):\n raise TypeError(\"count_pattern() : arguments must be of the same type\")\n elif not pattern or not lst:\n return count\n else:\n\n # size of the pattern and the lst\n patternlength = len(pattern)\n lstlength = len(lst)\n\n # if the pattern is longer than the lst, quit out\n if patternlength > lstlength:\n return count\n\n # otherwise look for matches\n else:\n\n # for the maximum total possible matches\n for ii in range(lstlength - patternlength + 1):\n\n # step the pattern through the lst\n candidatematch = lst[ii:(ii + patternlength)]\n\n # if it's a match, increment the count of the matches\n if pattern == candidatematch:\n count += 1\n return count",
"def count_type_changes(pattern: str):\n count = 0\n for left, right in zip(pattern[:-1], pattern[1:]):\n if left != right:\n count += 1\n return count",
"def get_count(self):\n\n return len(self._pattern)",
"def b_count(pattern):\n\n num_b = 0\n\n for char in pattern:\n if char == \"B\":\n num_b += 1\n\n return num_b",
"def find_repeats(sequence, pattern):\r\n\r\n # List containing repeats\r\n repeat_list = []\r\n\r\n # Find repeats ussing regular expressions and pattern\r\n hit_list = re.finditer(pattern, sequence)\r\n\r\n for repeat in hit_list:\r\n\r\n rep_start, rep_end = repeat.span()\r\n\r\n rep_num = (rep_end - rep_start)/2\r\n\r\n repeat_list.append((rep_start+1, rep_end, rep_num))\r\n\r\n return repeat_list",
"def CountOccurrences(pattern, bwt, starts, occ_counts_before, suffix_array):\r\n # 0$ 1A 2T 3G 4C\r\n letters = {'$':0, 'A':1, 'T':2, 'G':3, 'C':4}\r\n top=0\r\n bottom = len(bwt)-1\r\n matches_index = []\r\n while True:\r\n if len(pattern)!=0:\r\n char = pattern[-1]\r\n j = letters[char]\r\n pattern = pattern[:-1]\r\n found=False\r\n for i in range(top,bottom+1):\r\n if bwt[i] == char:\r\n top = occ_counts_before[i][j] + starts[char] -1\r\n bottom = occ_counts_before[bottom][j] + starts[char] -1\r\n found = True\r\n break\r\n\r\n if found==False:\r\n return matches_index\r\n # when pattern is finished proccessing\r\n else:\r\n for i in range(top, bottom+1):\r\n matches_index.append(suffix_array[i])\r\n return matches_index\r\n\r\n return matches_index",
"def count_occurrences(lines, substrings):\n for substring in substrings:\n pattern = get_pattern(substring)\n count = sum(len(pattern.findall(line)) for line in lines)\n print(count)",
"def count_intervals(sequence):\n return collections.Counter(intervals(sequence))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Merges overlapping genomic intervals. | def merge_genomic_intervals(intervals):
# type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]]
# Group intervals by chromosome.
grouped_intervals = itertools.groupby(
sorted(intervals), operator.itemgetter(0))
# Now yield merged intervals per chromosome.
for chrom, grp in grouped_intervals:
chrom_intervals = [interval[1:] for interval in grp]
for low, high in merge_intervals(chrom_intervals, is_sorted=True):
yield chrom, low, high | [
"def merge_ranges():",
"def mergeOverlapping(intervals) :\n ## We sort the interval to allow for easy merging:\n slist = sorted(intervals,key = lambda val : val.lowerBound)\n retlist = []\n curr = None\n for i in range(len(slist)) :\n if curr is None :\n curr = slist[i]\n else :\n try :\n curr = interval.mergeIntervals(curr,slist[i])\n except IntervalError:\n retlist.append(curr)\n curr = slist[i]\n if curr is not None :\n retlist.append(curr)\n return retlist",
"def merge_overlapping_ranges(ranges):\n ranges = iter(sorted(ranges))\n try:\n current_start, current_stop = next(ranges)\n except StopIteration:\n return None\n for start, stop in ranges:\n if start > current_stop:\n # Gap between segments: output current segment and start a new\n # one.\n yield current_start, current_stop\n current_start, current_stop = start, stop\n else:\n # Segments adjacent or overlapping: merge.\n current_stop = max(current_stop, stop)\n yield current_start, current_stop",
"def test_merge_intervals():\n\n a = pybedtools.example_bedtool(\"a.bed\") # path to test file a\n # This file looks like this:\n # chr1\t1\t100\tfeature1\t0\t+\n # chr1\t100\t200\tfeature2\t0\t+\n # chr1\t150\t500\tfeature3\t0\t-\n # chr1 900\t950\tfeature4\t0\t+\n\n assert len(a) == 4\n\n b = pybedtools.example_bedtool(\"b.bed\") # path to test file b\n # This file looks like this:\n # chr1\t155\t200\tfeature5\t0\t-\n # chr1\t800\t901\tfeature6\t0\t+\n\n assert len(b) == 2\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == 2\n # Merged file looks like this:\n # chr1\t1\t500\n # chr1\t800\t950",
"def merge_intervals(interval1, interval2):\n \n return min(interval1[0], interval2[0]), max(interval1[1], interval2[1])",
"def merge_overlapping_intervals(stream):\n\n s = stream[0]\n\n for interval in stream:\n if is_overlapping(s, interval):\n s = merge_intervals(s, interval)\n else:\n yield s\n s = interval\n\n yield s",
"def CombineOverlaps( old_gff, method = \"combine\" ):\n\n old_gff.sort( lambda x,y: cmp( (x.contig, x.strand, x.start, x.end),\n (y.contig, y.strand, y.start, y.end) ) )\n \n new_gff = []\n\n last_e = old_gff[0]\n\n for e in old_gff[1:]:\n if not Overlap( last_e, e):\n new_gff.append( last_e )\n last_e = e\n else:\n if method[0] == \"c\":\n last_e.start = min( last_e.start, e.start )\n last_e.end = max( last_e.end, e.end )\n last_e.mInfo += \" ; \" + e.mInfo\n\n new_gff.append( last_e )\n \n return new_gff",
"def mergeIntervals(int1, int2):\n if int2.lowerBound < int1.lowerBound:\n int1,int2 = int2,int1 \n if int1.upperBound >= int2.upperBound:\n return int1\n ## Merges adjacent intervals:\n elif int1.upperBound >= int2.lowerBound-1:\n return interval('['+str(int1.lowerBound)+','+str(int2.upperBound)+']')\n else :\n raise IntervalError('Cannot merge intervals')",
"def merge_overlapping_regions(regions):\n sorted_regions = sorted(regions, key=lambda r: (r.chromosome, r.start))\n\n merged_regions = []\n current_regions = []\n last_end = None\n for region in sorted_regions:\n if len(current_regions) == 0:\n current_regions.append(region)\n last_end = region.end\n elif region.chromosome == current_regions[0].chromosome and region.start < last_end:\n current_regions.append(region)\n last_end = max(last_end, region.end)\n else:\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n current_regions = [region]\n last_end = region.end\n\n merged_region = GenomicRegion(chromosome=current_regions[0].chromosome,\n start=current_regions[0].start, end=last_end,\n strand=current_regions[0].strand)\n merged_regions.append(merged_region)\n\n return merged_regions",
"def _merge_ints(row, overlapping, strats=['cut_at_lower'], excepts=[]):\n # todo check if every strategy in the list is possible\n assert isinstance(overlapping, pd.Series)\n assert isinstance(row, pd.Series)\n\n if row.activity == overlapping.activity:\n return _merge_int_same(row, overlapping)\n \n int1 = pd.Interval(row.start_time, row.end_time)\n int2 = pd.Interval(overlapping.start_time, overlapping.end_time)\n \n if excepts != []:\n # find out which is the dominant and the less dominant interval\n if row.activity in excepts and overlapping.activity in excepts:\n # assign row and ov such as to replace the one with lower priority below\n print('priority mismatch!'*20)\n idx_row = excepts.index(row.activity)\n idx_ov = excepts.index(overlapping.activity)\n if idx_row < idx_ov:\n dom = overlapping\n less_dom = row\n\n if row.activity in excepts:\n dom = row\n less_dom = overlapping\n else:\n dom = overlapping \n less_dom = row\n\n\n # apply merging strategies\n int_dom = pd.Interval(dom.start_time, dom.end_time)\n int_ldom = pd.Interval(less_dom.start_time, less_dom.end_time)\n\n # dominant interval encloses less dominant => keep dominant, drop less dominant\n if (int_dom.left < int_ldom.left) & (int_ldom.right < int_dom.right):\n df_res = _create_activity_df()\n df_res.loc[0] = dom\n return df_res\n\n # less dominant interval encloses dominant => normal inclusive merge\n elif (int_ldom.left < int_dom.left) & (int_dom.right < int_ldom.right):\n return _merge_int_inclusive(less_dom, dom)\n\n # intervals overlap => keep dominant\n else:\n return _merge_int_first_persists(dom, less_dom)\n \n\n if (int1.left < int2.left) & (int2.right < int1.right):\n # int1 |~~~~~~| \n # int2 |----| \n df_res = _merge_int_inclusive(row, overlapping)\n \n elif (int1.left <= int2.left) & (int1.right < int2.right):\n # int1 |~~~~| \n # int2 |----| \n df_res = _merge_int_right_partial(row, overlapping)\n \n else:\n raise ValueError # this should never happen\n return df_res",
"def merge_df_intervals(df, iv_func=lambda iv: iv.merge_hull()):\n if not \"strand\" in df.columns:\n df = df.assign(strand=1)\n strand_added = True\n else:\n strand_added = False\n joined = _df_to_tup(df)\n\n out = []\n for chr_strand, sub_group in itertools.groupby(joined, lambda tup: tup[0]):\n args = [x[1:] for x in sub_group]\n iv = IntervalSet.from_tuples_with_id(args)\n new_order = iv_func(iv).to_tuples_last_id()\n new_df = df.iloc[[x[2] for x in new_order]].copy()\n new_df.loc[:, \"start\"] = [x[0] for x in new_order]\n new_df.loc[:, \"stop\"] = [x[1] for x in new_order]\n out.append(new_df)\n res = pd.concat(out)\n if strand_added:\n res = res.drop(\"strand\", axis=1)\n return res.sort_values([\"chr\", \"start\"])",
"def test_merge_demo_intervals():\n a = pybedtools.BedTool(panel1_path)\n assert len(a) == 4\n b = pybedtools.BedTool(panel2_path)\n assert len(b) == 3\n\n merged_bed = merge_intervals([a, b])\n assert len(merged_bed) == len(a) + len(b) - 1 # a and b have a shared interval",
"def test_rangesMerged(self):\n\n mergeAfter = MessageSet(1, 3)\n mergeBefore = MessageSet(6, 8)\n\n mergeBetweenSequence = mergeAfter + mergeBefore\n mergeBetweenNumber = mergeAfter + MessageSet(5, 7)\n\n self.assertEqual(list(mergeAfter + (2, 4)), [1, 2, 3, 4])\n self.assertEqual(list(mergeAfter + (3, 5)), [1, 2, 3, 4, 5])\n\n self.assertEqual(list(mergeBefore + (5, 7)), [5, 6, 7, 8])\n self.assertEqual(list(mergeBefore + (4, 6)), [4, 5, 6, 7, 8])\n\n self.assertEqual(list(mergeBetweenSequence + (3, 5)),\n [1, 2, 3, 4, 5, 6, 7, 8])\n self.assertEqual(list(mergeBetweenNumber + MessageSet(4)),\n [1, 2, 3, 4, 5, 6, 7])",
"def test_mergeOverlapping(self):\n\n # test expected behavior for correctly formatted inputs\n int1 = interval('[1,2)')\n int2 = interval('(1,2]')\n int12 = interval('[1,2]')\n merged12 = mergeOverlapping([int1, int2])\n self.assertEqual([int12], merged12)\n int3 = interval('[3,3]')\n int13 = interval('[1,3]')\n intneg1 = interval('[-1,0)')\n int0 = interval('[0,1)')\n intneg13 = interval('[-1,3]')\n self.assertEqual([intneg13], mergeOverlapping([intneg1, int0, int13]))\n self.assertEqual([intneg1, int3], mergeOverlapping([intneg1, int3]))\n self.assertEqual([int13], mergeOverlapping([int12, int3]))\n int4 = interval('(3,4]')\n int58 = interval('[5,8]')\n intnothing = mergeOverlapping([])\n self.assertEqual([], intnothing)\n self.assertEqual([int13, int58], mergeOverlapping([int12, int3, int58]))\n self.assertEqual([int13, int58], mergeOverlapping([int58, int13]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int3]))\n self.assertEqual([int13], mergeOverlapping([int1, int2, int2, int3, int12]))\n self.assertEqual([int1], mergeOverlapping([int1]))\n\n # test expected behavior for incorrectly formatted inputs\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, 4])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, int1])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"not an interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([3, \"[1,3]\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[12, \"hi\"], \"interval\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([int1, \"\"])\n with self.assertRaises(ValueError):\n int1 = mergeOverlapping([[], int2])\n print(\"merge overlapping list test complete\")",
"def merge_close(events, min_interval, merge_to_longer=False):\n half_iv = min_interval / 2\n merged = []\n\n for higher in events:\n\n if not merged:\n merged.append(higher)\n\n else:\n lower = merged[-1]\n\n if higher['start'] - half_iv <= lower['end'] + half_iv:\n\n if merge_to_longer and (higher['end'] - higher['start'] >\n lower['end'] - lower['start']):\n start = min(lower['start'], higher['start'])\n higher.update({'start': start})\n merged[-1] = higher\n\n else:\n end = max(lower['end'], higher['end'])\n merged[-1].update({'end': end})\n\n else:\n merged.append(higher)\n\n return merged",
"def merge(self, itspaces, within=None):\n itspaces = self._convert_to_mode0(itspaces)\n within = within or 1\n\n itspaces = sorted(tuple(set(itspaces)))\n merged_itspaces = []\n current_start, current_stop = itspaces[0]\n for start, stop in itspaces:\n if start - within > current_stop:\n merged_itspaces.append((current_start, current_stop))\n current_start, current_stop = start, stop\n else:\n # Ranges adjacent or overlapping: merge.\n current_stop = max(current_stop, stop)\n merged_itspaces.append((current_start, current_stop))\n\n itspaces = self._convert_from_mode0(merged_itspaces)\n return itspaces",
"def interval_union(intervals):\n intervals.sort(key=lambda x: x[0])\n union = [intervals[0]]\n for i in intervals[1:]:\n if i[0] <= union[-1][1]: # overlap w/ previous\n if i[1] > union[-1][1]: # only extend if larger\n union[-1] = (union[-1][0], i[1])\n else:\n union.append(i)\n return union",
"def merge_peaks(peaks, peak_size, merge_overlap, chrom_len):\n max_overlap = merge_overlap\n while len(peaks) > 1 and max_overlap >= merge_overlap:\n # find largest overlap\n max_i = 0\n max_overlap = peaks[0].end - peaks[1].start\n for i in range(1, len(peaks) - 1):\n peaks_overlap = peaks[i].end - peaks[i + 1].start\n if peaks_overlap > max_overlap:\n max_i = i\n max_overlap = peaks_overlap\n\n if max_overlap >= merge_overlap:\n # merge peaks\n peaks[max_i].merge(peaks[max_i + 1], peak_size, chrom_len)\n\n # remove merged peak\n peaks = peaks[: max_i + 1] + peaks[max_i + 2 :]\n\n return peaks",
"def mergeSequences(self):\n\t\tseqs=self.__sequences\n\t\tslist=sorted(seqs)\n\t\toutseq={}\n\t\tendpos=0\n\t\tfor seq in slist:\n\n\t\t\tif not seqs[seq][0]: continue\n\n\t\t\tif endpos<seqs[seq][1]: endpos=seqs[seq][1]\n\n\t\t\tturn=0\n\t\t\t# handling repeat command; when not used == 1\n\t\t\tfor repeat in range(seqs[seq][3]):\n\t\t\t\tts={}\n\n\t\t\t\tfor t in sorted(seqs[seq][2]):\n\t\t\t\t\ts=list(seqs[seq][2][t])\n\t\t\t\t\tif len(s)>=4: s[3]+=turn\n\t\t\t\t\tts[(t[0]+turn,t[1],len(ts))]=tuple(s)\n\n\t\t\t\toutseq.update(ts)\n\t\t\t\tturn+=seqs[seq][1]\n\t\t\t\t\t\n\t\t# far far away\n\t\toutseq[(endpos,0,100000000)]=(0xff,0x2f,0x00)\n\n\t\tself.__merged_sequence=outseq"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read CSV in folder "general" in database. Also used in setup.py | def open_general(file, setup=False):
try:
if setup is False:
p = datapath(True, 'general', file)
df = _pd.read_csv(p + '.csv')
elif setup is True:
p = datapath(True, 'general', file)
df = _pd.read_csv(p + '.py')
else:
df = None # not tested here
return df
except FileNotFoundError as e:
print("There is no record of {} in your database. Go to your chosen setup path to check, if not there go to "
"Github and download the missing sheet".format(file))
return None | [
"def read_csv_file(self):\n pass",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def read_csv(filepath, collection):\n articles = []\n with open(filepath) as csvfile:\n reader = csv.reader(csvfile)\n # skip headers\n next(reader, None)\n for row in reader:\n pro = row[0]\n con = row[2]\n if pro != \"\":\n articles.append([pro, \"pro\"])\n if con != \"\":\n articles.append([con, \"con\"])\n\n collection.add_data(articles)",
"def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)",
"def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata",
"def _read_csv(csv_file, discrete=None):\n if csv_file == 'demo':\n return load_demo()\n discrete_cols = discrete if discrete is not None else discrete.split(',')\n return pd.read_csv(csv_file), discrete_cols",
"def read_database():\n d_database={}\n filenames=glob.glob('*.csv')\n for filename in filenames:\n try:\n f_in=open(filename,'r') \n except:\n print(\"fille open error\")\n return False\n table_name=os.path.splitext(filename)[0]\n d_table=read_table(f_in)\n d_database[table_name]=d_table\n return d_database",
"def openCsv():\n csvFile = 'BDO_app/modules/crafting/alchemyRecipes.csv'\n return csvFile",
"def reader(path, *, aggregate=False):\n\n for root, dirs, files in os.walk(path):\n if {\"new\", \"base\"} <= set(dirs):\n # We are now inside a test directory where we want to read new/raw.csv\n yield from _build_results(\n Path(root) / \"new\" / \"raw.csv\", aggregate=aggregate\n )\n else: # pragma: nocover (bug in coverage)\n continue",
"def read_csv_file(dir_name, csv_file, collection, error_list):\n count = 0\n try:\n filename = os.path.join(dir_name, csv_file)\n with open(filename, 'r') as file:\n csv_reader = csv.DictReader(file)\n # create the document for products collection\n for row in csv_reader:\n collection.insert_one(row)\n except FileNotFoundError:\n LOGGER.info('FileNotFoundError')\n count += 1\n except Exception as error:\n count += 1\n LOGGER.info('Exception:')\n LOGGER.info(error)\n error_list.append(count)",
"def get_all_csv(base_dir, verbose=False):\n data = []\n delimiter = ','\n for dir_name in get_dirs(base_dir):\n for data_file_name in os.listdir(dir_name):\n if data_file_name.endswith(\".csv\"):\n full_path = os.path.join(dir_name, data_file_name)\n if verbose:\n print(\"Reading {}\".format(full_path))\n data.append(np.genfromtxt(\n full_path, delimiter=delimiter, dtype=None, names=True\n ))\n return data",
"def loadCSV(input_file):",
"def _process_directory(self, directory):\n\n if directory[-1] != os.sep:\n directory += os.sep\n\n for filename in glob.glob('%s*.sql' % directory):\n self._read(filename)",
"def read(self, database ='project'):\n\t\tfile = open(self.file_name, \"r\")\n\n\t\ti = 1\n\t\tseptics = []\n\t\tfor line in file:\n\t\t\tif i > 2:\n\t\t\t\tval = line.split()\n\t\t\t\tself.check_cols(val, 13, 'septic')\n\n\t\t\t\tsep = {\n\t\t\t\t\t'name': val[0].lower(),\n\t\t\t\t\t'q_rate': val[1],\n\t\t\t\t\t'bod': val[2],\n\t\t\t\t\t'tss': val[3],\n\t\t\t\t\t'nh4_n': val[4],\n\t\t\t\t\t'no3_n': val[5],\n\t\t\t\t\t'no2_n': val[6],\n\t\t\t\t\t'org_n': val[7],\n\t\t\t\t\t'min_p': val[8],\n\t\t\t\t\t'org_p': val[9],\n\t\t\t\t\t'fcoli': val[10],\n\t\t\t\t\t'description': val[12] if val[12] != 'null' else None # 12 index because extra column\n\t\t\t\t}\n\t\t\t\tseptics.append(sep)\n\t\t\ti += 1\n\n\t\tif database == 'project':\n\t\t\tdb_lib.bulk_insert(project_base.db, project_parmdb.Septic_sep, septics)\n\t\telse:\n\t\t\tdb_lib.bulk_insert(datasets_base.db, datasets_parmdb.Septic_sep, septics)",
"def csv_lines(base_name):\n path = os.path.join('data', base_name)\n with open(path, 'r') as csvfile:\n csvreader = csv.reader(csvfile)\n # Skip header line.\n next(csvreader)\n for line in csvreader:\n yield line",
"def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)",
"def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items",
"def from_csv(self, folder, sep=','):\n os.chdir(folder)\n for f in glob.glob(\"*.csv\"):\n name = f[:-4]\n with open(f) as ps:\n for line in ps:\n args = tuple(line.replace(' ', '').replace('\\n', '').split(sep))\n self.add_predicate(name, args)",
"def import_data(directory_name, product_file, customer_file, rentals_file):\n with MONGO:\n try:\n db = MONGO.connection.db\n products_col, customers_col, rentals_col = (db['products'],\n db['customers'],\n db['rentals'])\n csvs = [product_file, customer_file, rentals_file]\n data_dir = os.listdir(os.path.abspath(directory_name))\n records = []\n errors = []\n for csv in csvs:\n if csv in data_dir:\n if csv == product_file:\n try:\n LOGGER.info(\"CSV file is a {csv}\")\n errors_count = 0\n csv_list = []\n csv_dict = pd.read_csv(\n os.path.abspath(\n directory_name + '/' + csv)).to_dict(\n orient='records')\n for row in csv_dict:\n id = row.pop('product_id')\n row['_id'] = id\n csv_list.append(row)\n result = db.products_col.insert_many(\n csv_list, ordered=True)\n records.append(len(result.inserted_ids))\n LOGGER.info(\"Total records from %s are: %s\",\n csv, len(result.inserted_ids))\n except BulkWriteError:\n errors_count += 1\n errors.append(errors_count)\n elif csv == customer_file:\n try:\n LOGGER.info(\"CSV file is a {csv}\")\n errors_count = 0\n csv_list = []\n csv_dict = pd.read_csv(\n os.path.abspath(\n directory_name + '/' + csv)).to_dict(\n orient='records')\n for row in csv_dict:\n id = row.pop('user_id')\n row['_id'] = id\n csv_list.append(row)\n result = db.customers_col.insert_many(\n csv_list, ordered=True)\n records.append(len(result.inserted_ids))\n LOGGER.info(\"Total records from %s are: %s\",\n csv, len(result.inserted_ids))\n except BulkWriteError:\n errors_count += 1\n errors.append(errors_count)\n\n elif csv == rentals_file:\n try:\n LOGGER.info(\"CSV file is a {csv}\")\n errors_count = 0\n csv_list = []\n csv_dict = pd.read_csv(\n os.path.abspath(\n directory_name + '/' + csv)).to_dict(\n orient='records')\n result = db.rentals_col.insert_many(\n csv_dict, ordered=True)\n records.append(len(result.inserted_ids))\n LOGGER.info(\"Total records from %s are: %s\",\n csv, len(result.inserted_ids))\n except BulkWriteError:\n errors_count += 1\n errors.append(errors_count)\n except Exception as error:\n LOGGER.error(\"Error: %s\", error)\n finally:\n return tuple(records), tuple(errors)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Determines whether the discrepancy has been sufficiently resolved; used as return value for fix_discrepancy. | def discrepancy_resolved(self):
# If there's a discrepancy and distance change matches the existing data, we're good.
if self.distance_change == self.existing_data:
return True
# If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good
elif self.recommend_updates:
return True
else:
return False | [
"def checkIfRecovered(self, person) -> bool:\n # Days that the person showing symptom is around 17.8 days ~= 18 days \n if (person.getInfectedLength() >= 23):\n return True\n return False",
"def is_solved(value):\n return len(value) == 1",
"def check_initial_confidence(self): # pragma: no cover\n if self.test_type != 'perf':\n return True\n\n if self.required_initial_confidence is None:\n return True # pragma: no cover\n\n # TODO(robertocn): Remove all uses of \"confidence\".\n if self.dummy_initial_confidence is not None:\n self.initial_confidence = float(\n self.dummy_initial_confidence)\n if (float(self.initial_confidence) <\n float(self.required_initial_confidence)):\n self._set_insufficient_confidence_warning()\n return False\n return True\n\n if self.dummy_builds:\n dummy_result = self.good_rev.values != self.bad_rev.values\n if not dummy_result:\n self._set_insufficient_confidence_warning()\n return dummy_result\n\n with self.api.m.step.nest('Re-testing reference range'):\n expiration_time = time.time() + REGRESSION_CHECK_TIMEOUT\n while time.time() < expiration_time:\n if len(self.good_rev.values) >= 5 and len(self.bad_rev.values) >= 5:\n if self.significantly_different(self.good_rev.values,\n self.bad_rev.values):\n return True\n if len(self.good_rev.values) == len(self.bad_rev.values):\n revision_to_retest = self.last_tested_revision\n else:\n revision_to_retest = min(self.good_rev, self.bad_rev,\n key=lambda x: len(x.values))\n if len(revision_to_retest.values) < MAX_REQUIRED_SAMPLES:\n revision_to_retest.retest()\n else:\n break\n self._set_insufficient_confidence_warning()\n return False",
"def is_definitely_calculated(self):\n return self.__potential_numbers_to_play_with.__len__() == 0 and self.__calculated_solution_numbers.__len__() != 0",
"def solved(self):\n return not any(len(self.domain[key])>1 for key in self.domain)",
"def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False",
"def is_equivalence(self) -> bool:",
"def isUnresolved(self):\n \n return self.id == UNRESOLVED",
"def discrepancy(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result -= value * math.log(self.betP(focal), 2)\n return round(result, 6)",
"def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True",
"def is_absolutely_convergent(self):\n return Sum(abs(self.function), self.limits).is_convergent()",
"def check_convergence(self):\n return False",
"def is_solved(self):\n return self._start == self._target",
"def hasConflict(self):\n return len(self.coordinates) != len(set(self.coordinates))",
"def fully_charged(self):\n return not (self.remaining_demand > 1e-3)",
"def _has_needs_correcting(self, dframe):\n return (dframe.loc[dframe.sync_status == int(ConsentSyncStatus.NEEDS_CORRECTING)].shape[0] > 0)",
"def possible(x: float, p: Product) -> bool:\n defects_present = defects_counts(x, p.length)\n return contains(Counter(p.max_defects), defects_present)",
"def is_equimolar(self):\n return max(self.proportions) == min(self.proportions)",
"def is_solved(self):\n return not self.grid"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run when the palette is closed | def on_palette_close(self):
pass | [
"def _on_close(self):\n self.shell_obj.closed()",
"def panel_close_callback(self, panel, data):\n # We better make sure the presets are stored and saved\n self.store_preset()\n\n # Calling destroy() here, crashes LightWave (v11.0), so I have it\n # commented out, and relies on only setting the variables to None.\n # self._ui.destroy(self._panel)\n self._panel = None\n self._ui = None\n self._controls = None\n\n # Perhaps it would be better to remove the plugin completely when\n # closing the window? I keep that line here, commented out, during dev\n # until I've decided. If I keep it, I need to add a method to find the\n # actual index in the Master Plugins list.\n lwsdk.command('RemoveServer MasterHandler 1')",
"def end(self, event):\n plt.close()",
"def on_close(self, event):\n # Save pos and size\n x, y = self.GetPosition()\n width, height = self.GetSize()\n self.__config.set('window.x', x)\n self.__config.set('window.y', y)\n self.__config.set('window.width', width)\n self.__config.set('window.height', height)\n\n # Style\n style = self.GetWindowStyle()\n self.__config.set('window.style', style)\n\n self.__config.save()\n\n # Stop monitoring\n self.__cor.stop_monitor()\n\n # Kill graph as it seems to be stopping script from ending\n self.__graph = None\n\n # End\n event.Skip()",
"def on_palette_execute(self, palette: adsk.core.Palette):\r\n pass",
"def close(self, event, data):\n try:\n with open(self.save_file, \"w+\") as save_file:\n try:\n data = json.load(save_file)\n except ValueError:\n data = dict()\n data[\"color\"] = rgb_to_hex(self.rgb_color)\n json.dump(data, save_file)\n except (OSError, json.JSONDecodeError):\n print(\"Error when trying to set save file.\")\n Gtk.main_quit()",
"def close_preferences(self,event):\n self.Destroy()\n event.Skip()",
"def _shutdown(): \n for GD in GlobalDictionary._instances:\n print(\"\\nCleaning up:\", GD.name)\n GD._handler.close()\n del GD\n\n print(\"Shutting down\")\n \n sys.exit(0)",
"def on_close():\n save_window_dimensions()\n root.destroy()",
"def crb_exit(self):\n self.close()",
"def on_mouse_exit(self, event):\n pass",
"def __del__(self):\n self.window.close()",
"def on_close(self, *args):\n self.caller.show()\n self.destroy()",
"def onApplicationClose(self):\n self.movieDisplay.clearImageCache()\n self.quit()",
"def shutdown(self):\n self.window.close()",
"def cleanup(self):\n pygame.quit()",
"def windowClosed(self, renderWindow):\n global _needExit\n _needExit = True",
"def state_finish_exit(cfg, app, win):",
"def deinit(self):\n self._font.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Function is run when the palette is executed. Useful to gather initial data and send to html page | def on_palette_execute(self, palette: adsk.core.Palette):
pass | [
"def __init__(self,palette_to_use='default'):\n palettes = {'default':self.initialize_default_palette,'gmd_paper':self.initialize_gmd_paper_palette}\n palettes[palette_to_use]()",
"def _on_palette_change(self, palette_data: dict) -> None:\n # set the color from the metadata\n color = self._label_to_rgb[palette_data['label']]\n # if the selected color is different, queue a cursor update\n if not np.array_equal(self._color, color):\n self.is_cursor_change = True\n # store the color with the new value\n self._color[:] = color\n # set the is brush flag\n self.is_brush = palette_data['paint'] == 'brush'\n # store the brush size with the new value\n self.brush_size = palette_data['brush_size']\n # if the palette is in super pixel mode, get that data\n if palette_data['paint'] == 'super_pixel':\n # get the algorithm from the dictionary\n algorithm = palette_data['super_pixel']\n # get the arguments for the specific algorithm\n arguments = palette_data[algorithm]\n # get the segments using the given algorithm and arguments\n segs = segment(self._image, algorithm, **arguments)\n # apply the segmented image pixels and segments to local structures\n self._super_pixel_segments[:], self._super_pixel[:] = segs\n # otherwise set the super pixel data back to 0\n else:\n self._super_pixel_segments[:] = 0\n self._super_pixel[:] = 0",
"def run(self):\n self.print_welcome()\n self.handle_inputs()",
"def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()",
"def start_displayhook(self):\n self.msg = self.session.msg(\n \"execute_result\",\n {\n \"data\": {},\n \"metadata\": {},\n },\n parent=self.parent_header,\n )",
"def injectThem(self):\r\n @self.app.context_processor\r\n def inject_vars():\r\n return dict(colorpicker=self)",
"def initialize_graphics():\n\n print \"###################################################################\"\n print \"### ####\"\n print \"### Demonstrate Through Python! ####\"\n print \"### ####\"\n print \"###################################################################\"",
"def on_reading(self, *args):\n self.set_colors()",
"def __init__(self, *args, **kwargs):\n _core_.QueryNewPaletteEvent_swiginit(self,_core_.new_QueryNewPaletteEvent(*args, **kwargs))",
"def start_displayhook(self):\n pass",
"def run_palette_editor(menu):\n\n pos = QtGui.QCursor().pos()\n palette_editor.run(pos)",
"def _on_pebble_ready(self, _):\n self._common_exit_hook()",
"def on_palette_close(self):\r\n pass",
"def start_preview(self):\n pass",
"def _populate_output(self):\n pass",
"def state_preview_do(cfg, app, win, events):",
"def populating_popup(self, *args):\n return _ida_hexrays.Hexrays_Hooks_populating_popup(self, *args)",
"def on_palette_color_set(self, btn):\n palette = []\n for i in range(16):\n palette.append(hexify_color(\n self.get_widget('palette_%d' % i).get_color()))\n palette = ':'.join(palette)\n self.client.set_string(KEY('/style/font/palette'), palette)\n self.set_palette_name(palette)",
"def on_startup(self) -> None:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the selection spec. | def build_selection_spec(client_factory, name):
sel_spec = client_factory.create('ns0:SelectionSpec')
sel_spec.name = name
return sel_spec | [
"def build_selection_spec(name):\n sel_spec = vmodl.query.PropertyCollector.SelectionSpec()\n sel_spec.name = name\n return sel_spec",
"def _make_select(self):\n conditions = []\n values = []\n picklist = None\n if self.selection_dict:\n select_d = self.selection_dict\n if 'ksize' in select_d and select_d['ksize']:\n conditions.append(\"sourmash_sketches.ksize = ?\")\n values.append(select_d['ksize'])\n if 'num' in select_d and select_d['num'] > 0:\n conditions.append(\"sourmash_sketches.num > 0\")\n if 'scaled' in select_d and select_d['scaled'] > 0:\n conditions.append(\"sourmash_sketches.scaled > 0\")\n if 'containment' in select_d and select_d['containment']:\n conditions.append(\"sourmash_sketches.scaled > 0\")\n if 'moltype' in select_d and select_d['moltype'] is not None:\n moltype = select_d['moltype']\n assert moltype in ('DNA', 'protein', 'dayhoff', 'hp'), moltype\n conditions.append(f\"sourmash_sketches.moltype = '{moltype}'\")\n\n picklist = select_d.get('picklist')\n\n return conditions, values, picklist",
"def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )",
"def create_selection():\n operation = Forward()\n nested = Group(Suppress(\"(\") + operation + Suppress(\")\")).setResultsName(\"nested\")\n select_expr = Forward()\n functions = select_functions(select_expr)\n maybe_nested = functions | nested | Group(var_val)\n operation <<= maybe_nested + OneOrMore(oneOf(\"+ - * /\") + maybe_nested)\n select_expr <<= operation | maybe_nested\n alias = Group(Suppress(upkey(\"as\")) + var).setResultsName(\"alias\")\n full_select = Group(\n Group(select_expr).setResultsName(\"selection\") + Optional(alias)\n )\n return Group(\n Keyword(\"*\") | upkey(\"count(*)\") | delimitedList(full_select)\n ).setResultsName(\"attrs\")",
"def build_query(self):\n\n # TODO: Translate the query parameters into a QueryBuild.Selectors object\n if(self.date is not None):\n date_search = Query.DateSearch(type = 'single_date', values = self.date)\n else:\n date_search = Query.DateSearch(type = 'interval', values = [self.start_date, self.end_date])\n\n Selector = Query.Selectors(date_search = date_search, number = self.number, filters = self.filter, return_object = None)\n return Selector",
"def generateSpec(self):\r\n if self.targetlang == 'python':\r\n self._genSpecPy()\r\n elif self.targetlang == 'c':\r\n self._genSpecC()\r\n elif self.targetlang == 'matlab':\r\n self._genSpecMatlab()\r\n elif self.targetlang == 'odetools':\r\n raise NotImplementedError\r\n elif self.targetlang == 'xpp':\r\n raise NotImplementedError\r\n else:\r\n raise ValueError('targetlang attribute must be in '+str(targetLangs))",
"def build_select(self, tree):\n assert isinstance(tree, sql_dialects.ast.Select)\n raise NotImplementedError()",
"def create_select(qualifier, lines, select_id=None):\n options = {} #{ option : [Label]}\n for label in lines.keys():\n option = qualifier(label)\n if (option not in options):\n options[option] = []\n options[option].append(label)\n option_list = list(options.keys())\n option_list.sort()\n print '<select class=\"lines\"',\n if select_id is not None:\n print 'id=%s' % qa(select_id)\n print 'multiple=\"true\" size=\"10\" onchange=\"updateSvg();\">'\n for option in option_list:\n print '<option value=' + qa('[' + \n reduce(lambda x,y:x+json.dumps(str(y))+',',options[option],\"\")[0:-1]\n + ']') + '>'+qe(option)+'</option>'\n print '</select>'",
"def build(self):\n return SuiteExpr(party=self.party_name, programs=list(self.programs))",
"def layout_selection(self):\n select_txt = wx.StaticText(self, -1, 'Selection Options')\n select_txt.SetForegroundColour('blue')\n self.selection_cbox = wx.ComboBox(self, -1, style=wx.CB_READONLY)\n list_of_options = ['Select all Data',\n 'Unselect all Data',\n 'Select all Data 1D',\n 'Unselect all Data 1D',\n 'Select all Data 2D',\n 'Unselect all Data 2D']\n for option in list_of_options:\n self.selection_cbox.Append(str(option))\n self.selection_cbox.SetValue('Select all Data')\n wx.EVT_COMBOBOX(self.selection_cbox, -1, self._on_selection_type)\n self.sizer5.AddMany([(select_txt, 0, wx.ALL, 5),\n (self.selection_cbox, 0, wx.ALL, 5)])\n self.enable_selection()",
"def test_generate_choice_input(self):\n \n\n label_to_use=str(self.test_option_choice).split('/')[-1].strip('--')\n obs=generate_choice_input('make_3d_plots','Test choice',\n 'Test choice',self.test_option_choice,\n label_to_use)\n \n exp='<tr><th>Test choice Test choice</th>' +\\\n '<td><select id=\"make_3d_plots:background_color\">\\n'+\\\n '<option selected>black\\n<option>white\\n</select></td></tr>\\n'\n \n self.assertEqual(obs,exp)",
"def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)",
"def buildRigFromSelection():\n # Get Selection\n sel = cmds.ls(sl=1)\n iso = cmds.filterExpand(sel, sm=45)\n if not iso: iso = []\n # Adjust Selection\n sel = list(set(sel) - set(iso))\n\n # Build Surface Rigs\n for surface in sel:\n\n # Check Surface\n if glTools.utils.surface.isSurface(surface):\n minU = cmds.getAttr(surface + '.minValueU')\n maxU = cmds.getAttr(surface + '.maxValueU')\n midU = minU + ((maxU - minU) * 0.5)\n buildRig(surface, uValue=midU)\n\n # Build Isoparm Rigs\n for crv in iso:\n surface = cmds.ls(crv, o=True)[0]\n uValue = float(crv.split('[')[-1].split(']')[0])\n buildRig(surface, uValue)",
"def _build(self):\n raise NotImplementedError",
"def build_schema(self, spec, **kwargs):\n pass",
"def add_selector(self, listing):\n # We will be able to select X-frames and its boundaries\n # will be stored in the given list\n\n def onselect(xmin, xmax):\n# indmin, indmax = np.searchsorted(x, (xmin, xmax))\n# indmax = min(len(x)-1, indmax)\n indmin = xmin\n indmax = xmax\n onselect.listing.append([indmin, indmax])\n print (onselect.listing)\n \n onselect.listing = listing\n \n # set useblit True on gtkagg for enhanced performance\n ax = self.axes\n span = SpanSelector(ax, onselect, 'horizontal', useblit=True,\n rectprops=dict(alpha=0.5, facecolor='red') )\n \n self.widget_list.append(span)",
"def test_toolselection_factory():\n stmt = ToolSelectionStmt.from_excellon('T01')\n assert_equal(stmt.tool, 1)\n assert_equal(stmt.compensation_index, None)\n stmt = ToolSelectionStmt.from_excellon('T0223')\n assert_equal(stmt.tool, 2)\n assert_equal(stmt.compensation_index, 23)\n stmt = ToolSelectionStmt.from_excellon('T042')\n assert_equal(stmt.tool, 42)\n assert_equal(stmt.compensation_index, None)",
"def _create_features_dropdown(self, name=_features_dropdown):\n fts = sorted(self.features)\n d = Select(options=fts, css_classes=[self._features_dropdown], name=name)\n return d",
"def build_traversal_spec(client_factory, name, spec_type, path, skip,\r\n select_set):\r\n traversal_spec = client_factory.create('ns0:TraversalSpec')\r\n traversal_spec.name = name\r\n traversal_spec.type = spec_type\r\n traversal_spec.path = path\r\n traversal_spec.skip = skip\r\n traversal_spec.selectSet = select_set\r\n return traversal_spec"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the traversal spec object. | def build_traversal_spec(client_factory, name, spec_type, path, skip,
select_set):
traversal_spec = client_factory.create('ns0:TraversalSpec')
traversal_spec.name = name
traversal_spec.type = spec_type
traversal_spec.path = path
traversal_spec.skip = skip
traversal_spec.selectSet = select_set
return traversal_spec | [
"def build_traversal_spec(name, type_, path, skip, select_set):\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = name\n traversal_spec.type = type_\n traversal_spec.path = path\n traversal_spec.skip = skip\n traversal_spec.selectSet = select_set\n return traversal_spec",
"def build_recursive_traversal_spec():\n visit_folders_select_spec = build_selection_spec('visitFolders')\n # Next hop from Datacenter\n dc_to_hf = build_traversal_spec('dc_to_hf',\n vim.Datacenter,\n 'hostFolder',\n False,\n [visit_folders_select_spec])\n dc_to_vmf = build_traversal_spec('dc_to_vmf',\n vim.Datacenter,\n 'vmFolder',\n False,\n [visit_folders_select_spec])\n dc_to_netf = build_traversal_spec('dc_to_netf',\n vim.Datacenter,\n 'networkFolder',\n False,\n [visit_folders_select_spec])\n\n # Next hop from HostSystem\n h_to_vm = build_traversal_spec('h_to_vm',\n vim.HostSystem,\n 'vm',\n False,\n [visit_folders_select_spec])\n\n # Next hop from ComputeResource\n cr_to_h = build_traversal_spec('cr_to_h',\n vim.ComputeResource,\n 'host',\n False,\n [])\n cr_to_ds = build_traversal_spec('cr_to_ds',\n vim.ComputeResource,\n 'datastore',\n False,\n [])\n\n rp_to_rp_select_spec = build_selection_spec('rp_to_rp')\n rp_to_vm_select_spec = build_selection_spec('rp_to_vm')\n\n cr_to_rp = build_traversal_spec('cr_to_rp',\n vim.ComputeResource,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n\n # Next hop from ClusterComputeResource\n ccr_to_h = build_traversal_spec('ccr_to_h',\n vim.ClusterComputeResource,\n 'host',\n False,\n [])\n ccr_to_ds = build_traversal_spec('ccr_to_ds',\n vim.ClusterComputeResource,\n 'datastore',\n False,\n [])\n ccr_to_rp = build_traversal_spec('ccr_to_rp',\n vim.ClusterComputeResource,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n # Next hop from ResourcePool\n rp_to_rp = build_traversal_spec('rp_to_rp',\n vim.ResourcePool,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n rp_to_vm = build_traversal_spec('rp_to_vm',\n vim.ResourcePool,\n 'vm',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n\n # Get the assorted traversal spec which takes care of the objects to\n # be searched for from the rootFolder\n traversal_spec = build_traversal_spec('visitFolders',\n vim.Folder,\n 'childEntity',\n False,\n [visit_folders_select_spec,\n h_to_vm,\n dc_to_hf,\n dc_to_vmf,\n dc_to_netf,\n cr_to_ds,\n cr_to_h,\n cr_to_rp,\n ccr_to_h,\n ccr_to_ds,\n ccr_to_rp,\n rp_to_rp,\n rp_to_vm])\n return traversal_spec",
"def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\",\r\n \"hostFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting to vmFolder from datacenter\r\n dc_to_vmf = build_traversal_spec(client_factory, \"dc_to_vmf\", \"Datacenter\",\r\n \"vmFolder\", False,\r\n [visit_folders_select_spec])\r\n # For getting Host System to virtual machine\r\n h_to_vm = build_traversal_spec(client_factory, \"h_to_vm\", \"HostSystem\",\r\n \"vm\", False,\r\n [visit_folders_select_spec])\r\n\r\n # For getting to Host System from Compute Resource\r\n cr_to_h = build_traversal_spec(client_factory, \"cr_to_h\",\r\n \"ComputeResource\", \"host\", False, [])\r\n\r\n # For getting to datastore from Compute Resource\r\n cr_to_ds = build_traversal_spec(client_factory, \"cr_to_ds\",\r\n \"ComputeResource\", \"datastore\", False, [])\r\n\r\n rp_to_rp_select_spec = build_selection_spec(client_factory, \"rp_to_rp\")\r\n rp_to_vm_select_spec = build_selection_spec(client_factory, \"rp_to_vm\")\r\n # For getting to resource pool from Compute Resource\r\n cr_to_rp = build_traversal_spec(client_factory, \"cr_to_rp\",\r\n \"ComputeResource\", \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to child res pool from the parent res pool\r\n rp_to_rp = build_traversal_spec(client_factory, \"rp_to_rp\", \"ResourcePool\",\r\n \"resourcePool\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # For getting to Virtual Machine from the Resource Pool\r\n rp_to_vm = build_traversal_spec(client_factory, \"rp_to_vm\", \"ResourcePool\",\r\n \"vm\", False,\r\n [rp_to_rp_select_spec, rp_to_vm_select_spec])\r\n\r\n # Get the assorted traversal spec which takes care of the objects to\r\n # be searched for from the root folder\r\n traversal_spec = build_traversal_spec(client_factory, \"visitFolders\",\r\n \"Folder\", \"childEntity\", False,\r\n [visit_folders_select_spec, dc_to_hf,\r\n dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,\r\n rp_to_rp, h_to_vm, rp_to_vm])\r\n return traversal_spec",
"def build_object_spec(root_folder, traversal_specs):\n object_spec = vmodl.query.PropertyCollector.ObjectSpec()\n object_spec.obj = root_folder\n object_spec.skip = False\n object_spec.selectSet = traversal_specs\n return object_spec",
"def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()",
"def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n rspec = RSpec(version=rspec_version)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n top_auth = resource_hrn.split('.')[0]\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],top_auth)\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n #print \"sfa_leases\", sfa_leases\n if sfa_leases:\n # SFAWRAP BUG ???\n # rspec.version.add_leases bugs with an empty set of leases\n # slice_id = leases[0]['slice_id']\n # TypeError: list indices must be integers, not str\n rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n \n return rspec.toxml()",
"def _build(self):\n raise NotImplementedError",
"def _BuildGenericTestSpec(self):\n device_files = []\n if self._args.obb_files:\n for obb_file in self._args.obb_files:\n device_files.append(self._messages.DeviceFile(\n obbFile=self._messages.ObbFile(\n obbFileName=os.path.basename(obb_file),\n obb=self._BuildFileReference(obb_file))))\n\n environment_variables = []\n if self._args.environment_variables:\n for key, value in self._args.environment_variables.iteritems():\n environment_variables.append(\n self._messages.EnvironmentVariable(\n key=key, value=value))\n\n directories_to_pull = self._args.directories_to_pull or []\n\n account = None\n if self._args.auto_google_login:\n account = self._messages.Account(googleAuto=self._messages.GoogleAuto())\n\n setup = self._messages.TestSetup(\n filesToPush=device_files,\n account=account,\n environmentVariables=environment_variables,\n directoriesToPull=directories_to_pull)\n\n return self._messages.TestSpecification(\n testTimeout=matrix_ops.ReformatDuration(self._args.timeout),\n testSetup=setup)",
"def _build(self) -> None:\n\n self.vocab = sorted(set(self.tokens).union({c.UNK}))\n self.vocab_size = len(self.vocab)\n\n self.vocab_index = {t: i for i, t in enumerate(self.vocab)}\n self.index_vocab = {i: t for i, t in enumerate(self.vocab)}",
"def build_rspec(cls, slice_urn, resources, leases, flowspace, vms, rspec_version='GENI 3 request'):\n import time\n start_time = None\n end_time = None\n\n # Default duration for WiLab is 2 hours\n duration_default = 120\n for lease in leases:\n if 'end_time' in lease:\n end_time = lease['end_time']\n start_time = lease['start_time']\n break\n\n if start_time is None:\n # start_time = Now\n start_time = time.time()\n\n if end_time is None:\n end_time = int(start_time + duration_default*60)\n #raise Exception, \"end_time is mandatory in leases\"\n\n # duration in seconds from now till end_time\n duration = end_time - start_time\n # duration in minutes\n duration = duration / 60\n duration = int(duration)\n if duration < duration_default:\n duration = duration_default\n Log.tmp(\"start_time = \",start_time)\n Log.tmp(\"end_time = \",end_time)\n Log.tmp(\"duration = \",duration)\n # RSpec will have expires date = now + duration\n rspec = RSpec(version=rspec_version, ttl=duration, expires=end_time)\n\n nodes = []\n channels = []\n links = []\n\n # XXX Here it is only about mappings and hooks between ontologies\n i = 0\n for urn in resources:\n # XXX TO BE CORRECTED, this handles None values\n if not urn:\n continue\n resource = dict()\n # TODO: take into account the case where we send a dict of URNs without keys\n #resource['component_id'] = resource.pop('urn')\n resource['component_id'] = urn\n resource_hrn, resource_type = urn_to_hrn(urn) # resource['component_id'])\n # build component_manager_id\n\n # The only change for WiLab compared to Generic SFAWrapParser\n cm = urn.split(\"+\")\n resource['component_manager_id'] = \"%s+%s+authority+cm\" % (cm[0],cm[1])\n\n #print \"resource_type\", resource_type\n if resource_type == 'node':\n #print \"NODE\", resource, cls\n resource['client_id'] = \"PC\" + str(i)\n resource = cls.on_build_resource_hook(resource)\n nodes.append(resource)\n elif resource_type == 'link':\n links.append(resource)\n elif resource_type == 'channel':\n channels.append(resource)\n else:\n raise Exception, \"Not supported type of resource\" \n\n i = i + 1\n #for node in nodes:\n # print \"NODE:\", node\n\n rspec.version.add_nodes(nodes, rspec_content_type=\"request\")\n #rspec.version.add_links(links)\n #rspec.version.add_channels(channels)\n\n #sfa_leases = cls.manifold_to_sfa_leases(leases, slice_urn)\n ##print \"sfa_leases\", sfa_leases\n #if sfa_leases:\n # # SFAWRAP BUG ???\n # # rspec.version.add_leases bugs with an empty set of leases\n # # slice_id = leases[0]['slice_id']\n # # TypeError: list indices must be integers, not str\n # rspec.version.add_leases(sfa_leases, []) # XXX Empty channels for now\n return rspec.toxml()",
"def create_behavior_tree():\n\n sequence = Selection(\"Root\")\n sequence.append(take_all())\n sequence.append(follow_home())\n\n return sequence",
"def _build_octree(self):\n\n # cleanup old tree\n self._nodes_positions = []\n self._nodes_mass = []\n self._nodes_sizes = []\n self._nodes_children_types = []\n self._nodes_children_ids = []\n\n min_pos = np.min(self._positions)\n max_pos = np.max(self._positions)\n\n self._build_octree_branch(\n bodies=list(range(self.bodies)),\n coords_min=np.array([min_pos] * 3),\n coords_max=np.array([max_pos] * 3)\n )",
"def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()",
"def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up",
"def build_schema(self, spec, **kwargs):\n pass",
"def ConstructTree(self):\n step = 0\n totalNodes = 0\n while step <= self.__steps:\n self.__nodes[step] = {}\n nUps = 0\n while nUps <= totalNodes:\n combins = BinomialOptionModel.__nCr(totalNodes, nUps)\n self.__nodes[step][nUps] = BinomNode(self.__underlyingStart, nUps, totalNodes - nUps, step, combins)\n nUps += 1\n totalNodes += 1\n step += 1\n # Price the option at each node:\n self.__CalcOptionPrices()\n # Determine asset prices at each node:\n self.__CalcAssetPrices()\n # Compute all the hedge ratios at each node:\n self.__ComputeSCHRs()\n # Compute all stock + bond replicating portfolio hedge ratios at each node:\n self.__ComputeSBHRs()",
"def tree_construct(self, *args, **kwargs):\n l_files = []\n d_constructCallback = {}\n fn_constructCallback = None\n d_probe = {}\n l_range = []\n\n for k, v in kwargs.items():\n if k == 'l_files': l_files = v\n if k == 'constructCallback': fn_constructCallback = v\n if k == 'd_probe': d_probe = v\n\n if d_probe: l_files = d_probe['l_files']\n index = 0\n total = len(l_files)\n if int(self.verbosityLevel) and self.toConsole():\n l_range = tqdm(l_files, desc = ' Constructing tree')\n else:\n l_range = l_files\n for l_series in l_range:\n if len(l_series):\n str_path = os.path.dirname(l_series[0])\n l_series = [ os.path.basename(i) for i in l_series]\n # self.simpleProgress_show(index, total)\n self.d_inputTree[str_path] = l_series\n if fn_constructCallback:\n kwargs['path'] = str_path\n d_constructCallback = fn_constructCallback(l_series, **kwargs)\n self.d_inputTreeCallback[str_path] = d_constructCallback\n self.d_outputTree[str_path] = \"\"\n index += 1\n return {\n 'status': True,\n 'd_constructCallback': d_constructCallback,\n 'totalNumberOfAllSeries': index,\n 'd_probe': d_probe\n }",
"def _build(self):\n output_types = tuple()\n output_shapes = tuple()\n\n # get types and shapes of EgoGraphs.\n src_ego_types, src_ego_shapes = \\\n self._ego_types_and_shapes(self._src_ego_spec)\n self._src_ego_size = len(src_ego_types)\n\n dst_ego_types, dst_ego_shapes = \\\n self._ego_types_and_shapes(self._dst_ego_spec)\n self._dst_ego_size = len(dst_ego_types)\n\n edge_ego_types, edge_ego_shapes = \\\n self._ego_types_and_shapes(self._edge_ego_spec)\n self._edge_ego_size = len(edge_ego_types)\n\n # pos src\n output_types += src_ego_types\n output_shapes += src_ego_shapes\n if self._dst_ego_spec is not None:\n # pos dst\n output_types += dst_ego_types\n output_shapes += dst_ego_shapes\n if self._negative_sample is not None:\n if self._edge_ego_spec is not None:\n # negative_sample return edges.\n # neg src\n output_types += src_ego_types\n output_shapes += src_ego_shapes\n # neg dst\n output_types += dst_ego_types\n output_shapes += dst_ego_shapes\n # pos edge\n if self._edge_ego_spec is not None:\n output_types += edge_ego_types\n output_shapes += edge_ego_shapes\n if self._negative_sample is not None:\n # neg edge\n output_types += edge_ego_types\n output_shapes += edge_ego_shapes\n\n # wrap dataset.\n if self._full_graph_mode: # constant tensors\n value = next(self._sample_generator())\n value = tuple([tf.convert_to_tensor(i) for i in value])\n else:\n dataset = tf.data.Dataset.from_generator(self._sample_generator,\n output_types,\n output_shapes)\n self._iterator = dataset.make_initializable_iterator()\n value = self._iterator.get_next()\n\n return self._construct_ego_tensors(value)",
"def recursive_swagger_spec(minimal_swagger_dict, node_spec):\n minimal_swagger_dict['definitions']['Node'] = node_spec\n return Spec(minimal_swagger_dict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the Recursive Traversal Spec to traverse the object managed object hierarchy. | def build_recursive_traversal_spec(client_factory):
visit_folders_select_spec = build_selection_spec(client_factory,
"visitFolders")
# For getting to hostFolder from datacenter
dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
"hostFolder", False,
[visit_folders_select_spec])
# For getting to vmFolder from datacenter
dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
"vmFolder", False,
[visit_folders_select_spec])
# For getting Host System to virtual machine
h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
"vm", False,
[visit_folders_select_spec])
# For getting to Host System from Compute Resource
cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
"ComputeResource", "host", False, [])
# For getting to datastore from Compute Resource
cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
"ComputeResource", "datastore", False, [])
rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
# For getting to resource pool from Compute Resource
cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
"ComputeResource", "resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to child res pool from the parent res pool
rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
"resourcePool", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# For getting to Virtual Machine from the Resource Pool
rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
"vm", False,
[rp_to_rp_select_spec, rp_to_vm_select_spec])
# Get the assorted traversal spec which takes care of the objects to
# be searched for from the root folder
traversal_spec = build_traversal_spec(client_factory, "visitFolders",
"Folder", "childEntity", False,
[visit_folders_select_spec, dc_to_hf,
dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
rp_to_rp, h_to_vm, rp_to_vm])
return traversal_spec | [
"def build_recursive_traversal_spec():\n visit_folders_select_spec = build_selection_spec('visitFolders')\n # Next hop from Datacenter\n dc_to_hf = build_traversal_spec('dc_to_hf',\n vim.Datacenter,\n 'hostFolder',\n False,\n [visit_folders_select_spec])\n dc_to_vmf = build_traversal_spec('dc_to_vmf',\n vim.Datacenter,\n 'vmFolder',\n False,\n [visit_folders_select_spec])\n dc_to_netf = build_traversal_spec('dc_to_netf',\n vim.Datacenter,\n 'networkFolder',\n False,\n [visit_folders_select_spec])\n\n # Next hop from HostSystem\n h_to_vm = build_traversal_spec('h_to_vm',\n vim.HostSystem,\n 'vm',\n False,\n [visit_folders_select_spec])\n\n # Next hop from ComputeResource\n cr_to_h = build_traversal_spec('cr_to_h',\n vim.ComputeResource,\n 'host',\n False,\n [])\n cr_to_ds = build_traversal_spec('cr_to_ds',\n vim.ComputeResource,\n 'datastore',\n False,\n [])\n\n rp_to_rp_select_spec = build_selection_spec('rp_to_rp')\n rp_to_vm_select_spec = build_selection_spec('rp_to_vm')\n\n cr_to_rp = build_traversal_spec('cr_to_rp',\n vim.ComputeResource,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n\n # Next hop from ClusterComputeResource\n ccr_to_h = build_traversal_spec('ccr_to_h',\n vim.ClusterComputeResource,\n 'host',\n False,\n [])\n ccr_to_ds = build_traversal_spec('ccr_to_ds',\n vim.ClusterComputeResource,\n 'datastore',\n False,\n [])\n ccr_to_rp = build_traversal_spec('ccr_to_rp',\n vim.ClusterComputeResource,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n # Next hop from ResourcePool\n rp_to_rp = build_traversal_spec('rp_to_rp',\n vim.ResourcePool,\n 'resourcePool',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n rp_to_vm = build_traversal_spec('rp_to_vm',\n vim.ResourcePool,\n 'vm',\n False,\n [rp_to_rp_select_spec,\n rp_to_vm_select_spec])\n\n # Get the assorted traversal spec which takes care of the objects to\n # be searched for from the rootFolder\n traversal_spec = build_traversal_spec('visitFolders',\n vim.Folder,\n 'childEntity',\n False,\n [visit_folders_select_spec,\n h_to_vm,\n dc_to_hf,\n dc_to_vmf,\n dc_to_netf,\n cr_to_ds,\n cr_to_h,\n cr_to_rp,\n ccr_to_h,\n ccr_to_ds,\n ccr_to_rp,\n rp_to_rp,\n rp_to_vm])\n return traversal_spec",
"def build_object_spec(root_folder, traversal_specs):\n object_spec = vmodl.query.PropertyCollector.ObjectSpec()\n object_spec.obj = root_folder\n object_spec.skip = False\n object_spec.selectSet = traversal_specs\n return object_spec",
"def _traverse(self, **kwargs):\r\n filterlist=[]\r\n if kwargs.has_key('ref'):\r\n filterlist.append(lambda x: re.match(kwargs.get('ref'), x.ref))\r\n if kwargs.has_key('name'):\r\n filterlist.append(lambda x: re.match(kwargs.get('name'), x._name))\r\n if kwargs.has_key('path'):\r\n filterlist.append(lambda x: re.match(kwargs.get('path'), x._path()))\r\n if kwargs.has_key('type'):\r\n filterlist.append(lambda x: isinstance(x, kwargs.get('type')))\r\n if kwargs.has_key('filters'):\r\n filterlist += kwargs.get('filters')\r\n\r\n ret = []\r\n for child in self._objects():\r\n subchildren = child._tail_recurse(_apply_filter,filters=filterlist,depth=kwargs.get('depth',-1))\r\n ret += subchildren\r\n return ret",
"def test__build_sub_resoruces_hierarchy(self):\n root = mock.MagicMock()\n sub_resource = mock.MagicMock(relative_address=\"path1/path2\")\n attrs = mock.MagicMock()\n res_dict = defaultdict(list)\n res_dict[2] = [(\"path1\", sub_resource)]\n\n self.utils._LegacyUtils__set_models_hierarchy_recursively = mock.MagicMock()\n # act\n self.utils._LegacyUtils__build_sub_resoruces_hierarchy(root=root,\n sub_resources=[sub_resource],\n attributes=attrs)\n # verify\n self.utils._LegacyUtils__set_models_hierarchy_recursively.assert_called_once_with(res_dict,\n 1,\n root,\n '',\n attrs)",
"def build_traversal_spec(name, type_, path, skip, select_set):\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = name\n traversal_spec.type = type_\n traversal_spec.path = path\n traversal_spec.skip = skip\n traversal_spec.selectSet = select_set\n return traversal_spec",
"def recursive_swagger_spec(minimal_swagger_dict, node_spec):\n minimal_swagger_dict['definitions']['Node'] = node_spec\n return Spec(minimal_swagger_dict)",
"def object_build(self, node, obj):\n if obj in self._done:\n return self._done[obj]\n self._done[obj] = node\n for name in dir(obj):\n try:\n member = getattr(obj, name)\n except AttributeError:\n # damned ExtensionClass.Base, I know you're there !\n attach_dummy_node(node, name)\n continue\n if inspect.ismethod(member):\n member = member.__func__\n if inspect.isfunction(member):\n _build_from_function(node, name, member, self._module)\n elif inspect.isbuiltin(member):\n if not _io_discrepancy(member) and self.imported_member(\n node, member, name\n ):\n continue\n object_build_methoddescriptor(node, member, name)\n elif inspect.isclass(member):\n if self.imported_member(node, member, name):\n continue\n if member in self._done:\n class_node = self._done[member]\n if class_node not in node.locals.get(name, ()):\n node.add_local_node(class_node, name)\n else:\n class_node = object_build_class(node, member, name)\n # recursion\n self.object_build(class_node, member)\n if name == \"__class__\" and class_node.parent is None:\n class_node.parent = self._done[self._module]\n elif inspect.ismethoddescriptor(member):\n assert isinstance(member, object)\n object_build_methoddescriptor(node, member, name)\n elif inspect.isdatadescriptor(member):\n assert isinstance(member, object)\n object_build_datadescriptor(node, member, name)\n elif isinstance(member, _CONSTANTS):\n attach_const_node(node, name, member)\n elif inspect.isroutine(member):\n # This should be called for Jython, where some builtin\n # methods aren't caught by isbuiltin branch.\n _build_from_function(node, name, member, self._module)\n elif _safe_has_attribute(member, \"__all__\"):\n module = build_module(name)\n _attach_local_node(node, module, name)\n # recursion\n self.object_build(module, member)\n else:\n # create an empty node so that the name is actually defined\n attach_dummy_node(node, name, member)\n return None",
"def test_recursive(self):\n parent = Thing('parent')\n child = Thing('child')\n child.sibling = Thing('sibling')\n\n parent.self = parent\n parent.child = child\n parent.child.twin = child\n parent.child.parent = parent\n parent.child.sibling.parent = parent\n\n cloned = jsonpickle.decode(jsonpickle.encode(parent))\n\n self.assertEqual(parent.name,\n cloned.name)\n self.assertEqual(parent.child.name,\n cloned.child.name)\n self.assertEqual(parent.child.sibling.name,\n cloned.child.sibling.name)\n self.assertEqual(cloned,\n cloned.child.parent)\n self.assertEqual(cloned,\n cloned.child.sibling.parent)\n self.assertEqual(cloned,\n cloned.child.twin.parent)\n self.assertEqual(cloned.child,\n cloned.child.twin)",
"def test_get_folder_deep_and_complex_path(self):\n '#arrange'\n pv_service = pyVmomiService(None, None, Mock())\n\n def find_child_mock(*args):\n root = args[0]\n if hasattr(root, pv_service.ChildEntity):\n for folder in root.childEntity:\n if folder.name == args[1]:\n return folder\n else:\n for folder in root:\n if folder.name == args[1]:\n return folder\n return None\n\n si = create_autospec(spec=vim.ServiceInstance)\n si.RetrieveContent = Mock()\n si.content = create_autospec(spec=vim.ServiceInstanceContent())\n si.content.searchIndex = Mock()\n si.content.searchIndex.FindChild = MagicMock(side_effect=find_child_mock)\n\n first_folder = Mock(spec=[], name='first')\n first_folder.name = 'first'\n\n second_folder = Mock(spec=[], name='second')\n second_folder.name = 'second'\n\n third_folder = Mock(spec=[], name='third')\n third_folder.name = 'third'\n\n fourth_folder = Mock(spec=[], name='fourth')\n fourth_folder.name = 'fourth'\n\n fifth_folder = Mock(spec=[], name='fifth')\n fifth_folder.name = 'fifth'\n\n sixth_folder = Mock(spec=[], name='sixth')\n sixth_folder.name = 'sixth'\n\n si.content.rootFolder = Mock()\n si.content.rootFolder.name = 'rootFolder'\n si.content.rootFolder.childEntity = [first_folder, second_folder]\n first_folder.vmFolder = [second_folder, sixth_folder]\n second_folder.networkFolder = [fourth_folder, third_folder]\n third_folder.hostFolder = [third_folder, fourth_folder]\n fourth_folder.datacenterFolder = [fifth_folder]\n fifth_folder.datastoreFolder = [sixth_folder]\n\n '#act'\n result = pv_service.get_folder(si, 'first/second/third/fourth/fifth/sixth')\n\n '#assert'\n self.assertEqual(result, sixth_folder)",
"def __build_test_model_children_tree_1(self) -> Model:\n self.model_builder.clear()\n\n r_a = SystemFile(\"a\", 1024, True)\n r_aa = SystemFile(\"aa\", 512, False)\n r_a.add_child(r_aa)\n r_ab = SystemFile(\"ab\", 512, False)\n r_a.add_child(r_ab)\n r_b = SystemFile(\"b\", 3090, True)\n r_ba = SystemFile(\"ba\", 2048, True)\n r_b.add_child(r_ba)\n r_baa = SystemFile(\"baa\", 2048, False)\n r_ba.add_child(r_baa)\n r_bb = SystemFile(\"bb\", 42, True) # only in remote\n r_b.add_child(r_bb)\n r_bba = SystemFile(\"bba\", 42, False) # only in remote\n r_bb.add_child(r_bba)\n r_bd = SystemFile(\"bd\", 1000, False)\n r_b.add_child(r_bd)\n r_c = SystemFile(\"c\", 1234, False) # only in remote\n r_d = SystemFile(\"d\", 5678, True) # only in remote\n r_da = SystemFile(\"da\", 5678, False) # only in remote\n r_d.add_child(r_da)\n\n l_a = SystemFile(\"a\", 1024, True)\n l_aa = SystemFile(\"aa\", 512, False)\n l_a.add_child(l_aa)\n l_ab = SystemFile(\"ab\", 512, False)\n l_a.add_child(l_ab)\n l_b = SystemFile(\"b\", 1611, True)\n l_ba = SystemFile(\"ba\", 512, True)\n l_b.add_child(l_ba)\n l_baa = SystemFile(\"baa\", 512, False)\n l_ba.add_child(l_baa)\n l_bc = SystemFile(\"bc\", 99, True) # only in local\n l_b.add_child(l_bc)\n l_bca = SystemFile(\"bca\", 99, False) # only in local\n l_bc.add_child(l_bca)\n l_bd = SystemFile(\"bd\", 1000, False)\n l_b.add_child(l_bd)\n\n s_b = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.RUNNING, \"b\", \"\")\n s_b.total_transfer_state = LftpJobStatus.TransferState(1611, 3090, 52, 10, 1000)\n s_b.add_active_file_transfer_state(\"ba/baa\", LftpJobStatus.TransferState(512, 2048, 25, 5, 500))\n s_c = LftpJobStatus(0, LftpJobStatus.Type.PGET, LftpJobStatus.State.QUEUED, \"c\", \"\")\n s_d = LftpJobStatus(0, LftpJobStatus.Type.MIRROR, LftpJobStatus.State.QUEUED, \"d\", \"\")\n\n self.model_builder.set_remote_files([r_a, r_b, r_c, r_d])\n self.model_builder.set_local_files([l_a, l_b])\n self.model_builder.set_lftp_statuses([s_b, s_c, s_d])\n return self.model_builder.build_model()",
"def dfs(obj):\n log.inc()\n log.p(\"DFS \"+str(obj))\n traversal=[]\n if obj in visited:\n traversal=visited[obj]\n else: \n visited[obj] = [] # combat recusive loop\n constraints=map(lambda r: relsDict[r], obj.rel)\n allExtendedPhrases=[]\n for r in sorted(constraints):\n phrases=dfs(r)\n traversal.extend(phrases) # carry forward\n\n def isDirectConstraint(ph): \n isConstraint = ph[-1] in obj.rel\n return isConstraint\n\n extendedPhrases=[ ph + [obj.oid] for ph in phrases if isDirectConstraint(ph) ]\n allExtendedPhrases.append(extendedPhrases)\n\n traversal.append([obj.oid])\n for phrase in allExtendedPhrases: traversal.extend(phrase)\n visited[obj] = traversal\n\n log.p(\"result=\"+str(traversal))\n log.dec()\n return traversal",
"def _fetchObjectChildren(self, obj, obj_path):\n obj_children = []\n path_strings = []\n tree_items = []\n\n is_attr_list = [False] * len(obj_children)\n\n # Object attributes\n # Needed to handle errors while getting object's attributes\n # Related with spyder-ide/spyder#6728 and spyder-ide/spyder#9959\n for attr_name in dir(obj):\n try:\n attr_value = getattr(obj, attr_name)\n obj_children.append((attr_name, attr_value))\n path_strings.append('{}.{}'.format(obj_path, attr_name)\n if obj_path else attr_name)\n is_attr_list.append(True)\n except Exception:\n # Attribute could not be get\n pass\n assert len(obj_children) == len(path_strings), \"sanity check\"\n\n for item, path_str, is_attr in zip(obj_children, path_strings,\n is_attr_list):\n name, child_obj = item\n tree_items.append(TreeItem(child_obj, name, path_str, is_attr))\n\n return tree_items",
"def make_drs_tree(self):\n pass",
"def describeObj(obj, depth=4, path=None, ignore=None):\n if path is None:\n path = [obj]\n if ignore is None:\n ignore = {} ## holds IDs of objects used within the function.\n ignore[id(sys._getframe())] = None\n ignore[id(path)] = None\n gc.collect()\n refs = gc.get_referrers(obj)\n ignore[id(refs)] = None\n printed=False\n for ref in refs:\n if id(ref) in ignore:\n continue\n if id(ref) in list(map(id, path)):\n print(\"Cyclic reference: \" + refPathString([ref]+path))\n printed = True\n continue\n newPath = [ref]+path\n if len(newPath) >= depth:\n refStr = refPathString(newPath)\n if '[_]' not in refStr: ## ignore '_' references generated by the interactive shell\n print(refStr)\n printed = True\n else:\n describeObj(ref, depth, newPath, ignore)\n printed = True\n if not printed:\n print(\"Dead end: \" + refPathString(path))",
"def _list_traverse(self,**kwargs):\r\n return [child._path(self) for child in self._traverse(**kwargs)]",
"def objectTree(self, obj=None):\n\t\tif obj is None:\n\t\t\tobj = self\n\t\trw = self._rw\n\n\t\trdc.showObjectTree(bringToTop=True, refresh=True)",
"def test_help_on_objects(hlwm, path='', depth=8):\n help_txt = hlwm.call(['help', path]).stdout\n assert f\"Object '{path}'\" in help_txt\n\n if depth < 0:\n return\n\n for child in hlwm.list_children(path):\n newpath = (path + '.' + child).lstrip('.')\n test_help_on_objects(hlwm, path=newpath, depth=depth - 1)",
"def test_expanding_rmg_objects(self):\n self.assertEqual(expand_to_dict(self.highly_nested_object), self.highly_nest_dictionary)\n self.assertEqual(self.highly_nested_object.as_dict(), self.highly_nest_dictionary)",
"def LoadObjectRecursively(*args, **kwargs):\n return _xrc.XmlResource_LoadObjectRecursively(*args, **kwargs)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the Property Spec. | def build_property_spec(client_factory, type="VirtualMachine",
properties_to_collect=["name"],
all_properties=False):
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = all_properties
property_spec.pathSet = properties_to_collect
property_spec.type = type
return property_spec | [
"def build_property_spec(type_=vim.VirtualMachine,\n properties_to_collect=None, all_properties=False):\n if not properties_to_collect:\n properties_to_collect = ['name']\n\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.all = all_properties\n property_spec.pathSet = properties_to_collect\n property_spec.type = type_\n return property_spec",
"def build_property_filter_spec(property_specs, object_specs):\n property_filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n property_filter_spec.propSet = property_specs\n property_filter_spec.objectSet = object_specs\n return property_filter_spec",
"def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec",
"def as_podspec(self):\n print('\\n')\n print('Building Podspec for %s' % self.name)\n print('-----------------------------------------------------------')\n\n podspec = \"Pod::Spec.new do |s|\\n\\n\"\n podspec += \" s.name = '%s'\\n\" % self.name\n podspec += \" s.version = '%s'\\n\" % self.version\n podspec += \" s.summary = 'REPLACEME.'\\n\"\n podspec += \" s.homepage = 'REPLACEME'\\n\"\n podspec += \" s.license = 'REPLACEME'\\n\"\n podspec += \" s.author = 'REPLACEME'\\n\\n\"\n\n podspec += \" s.documentation_url = 'REPLACEME'\\n\\n\"\n\n podspec += \" s.swift_version = '5.0'\\n\"\n podspec += \" s.ios.deployment_target = '11.0'\\n\"\n\n podspec += \"\\n\" if len(self.dependencies) > 0 else \"\"\n\n for dep in self.dependencies:\n podspec += dep.as_podspec()\n\n podspec += \"\\n\"\n podspec += \" s.source = { :git => 'REPLACEME', :tag => s.version.to_s }\\n\"\n podspec += \" s.source_files = 'REPLACEME'\\n\" % (self.category, self.name)\n\n podspec += \"\\nend\"\n return podspec",
"def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template",
"def create_properties_files(self):\n\n print('Saving build parameters...')\n properties = {\n 'PRODUCT': self.product,\n 'RELEASE': self.release,\n 'PRODUCT_BRANCH': self.product_branch,\n 'VERSION': self.version,\n 'BLD_NUM': self.build_num,\n 'PROD_NAME': self.prod_name,\n 'PRODUCT_PATH': self.product_path,\n 'MANIFEST': str(self.manifest),\n 'PARENT': self.parent,\n 'BUILD_JOB': self.build_job,\n 'PLATFORMS': self.platforms,\n 'GO_VERSION': self.go_version,\n 'FORCE': self.force\n }\n\n with open(self.output_files['build-properties.json'], 'w') as fh:\n json.dump(properties, fh, indent=2, separators=(',', ': '))\n\n with open(self.output_files['build.properties'], 'w') as fh:\n plats = ' '.join(self.platforms)\n fh.write(f'PRODUCT={self.product}\\n'\n f'RELEASE={self.release}\\n'\n f'PRODUCT_BRANCH={self.product_branch}\\n'\n f'VERSION={self.version}\\n'\n f'BLD_NUM={self.build_num}\\n'\n f'PROD_NAME={self.prod_name}\\n'\n f'PRODUCT_PATH={self.product_path}\\n'\n f'MANIFEST={self.manifest}\\n'\n f'PARENT={self.parent}\\n'\n f'BUILD_JOB={self.build_job}\\n'\n f'PLATFORMS={plats}\\n'\n f'GO_VERSION={self.go_version}\\n'\n f'FORCE={self.force}\\n')",
"def build_object_spec(root_folder, traversal_specs):\n object_spec = vmodl.query.PropertyCollector.ObjectSpec()\n object_spec.obj = root_folder\n object_spec.skip = False\n object_spec.selectSet = traversal_specs\n return object_spec",
"def _create_property_field(property_, alias_dictionary):\n name_for_methods = property_['name_for_methods']\n\n assert property_['default_value'] is not None, \\\n ('MakeComputedStyleBase requires an default value for all fields, none specified '\n 'for property ' + property_['name'])\n\n if property_['field_template'] in alias_dictionary:\n alias_template = property_['field_template']\n for field in alias_dictionary[alias_template]:\n if field != 'name':\n property_[field] = alias_dictionary[alias_template][field]\n\n if property_['field_template'] == 'keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n assert property_['field_size'] is None, \\\n (\"'\" + property_['name'] + \"' is a keyword field, \"\n \"so it should not specify a field_size\")\n size = int(math.ceil(math.log(len(property_['keywords']), 2)))\n elif property_['field_template'] == 'multi_keyword':\n type_name = property_['type_name']\n default_value = type_name + '::' + enum_value_name(property_['default_value'])\n size = len(property_['keywords']) - 1 # Subtract 1 for 'none' keyword\n elif property_['field_template'] == 'external':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n elif property_['field_template'] == 'primitive':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = 1 if type_name == 'bool' else property_[\"field_size\"] # pack bools with 1 bit.\n elif property_['field_template'] == 'pointer':\n type_name = property_['type_name']\n default_value = property_['default_value']\n size = None\n else:\n assert property_['field_template'] == 'monotonic_flag', \"Please put a valid value for field_template\"\n type_name = 'bool'\n default_value = 'false'\n size = 1\n\n if property_['wrapper_pointer_name']:\n assert property_['field_template'] in ['pointer', 'external']\n if property_['field_template'] == 'external':\n type_name = '{}<{}>'.format(property_['wrapper_pointer_name'], type_name)\n\n return Field(\n 'property',\n name_for_methods,\n property_name=property_['name'],\n inherited=property_['inherited'],\n independent=property_['independent'],\n type_name=type_name,\n wrapper_pointer_name=property_['wrapper_pointer_name'],\n field_template=property_['field_template'],\n size=size,\n default_value=default_value,\n custom_copy=property_['custom_copy'],\n custom_compare=property_['custom_compare'],\n mutable=property_['mutable'],\n getter_method_name=property_['getter'],\n setter_method_name=property_['setter'],\n initial_method_name=property_['initial'],\n computed_style_custom_functions=property_['computed_style_custom_functions'],\n )",
"def _BuildGenericTestSpec(self):\n device_files = []\n if self._args.obb_files:\n for obb_file in self._args.obb_files:\n device_files.append(self._messages.DeviceFile(\n obbFile=self._messages.ObbFile(\n obbFileName=os.path.basename(obb_file),\n obb=self._BuildFileReference(obb_file))))\n\n environment_variables = []\n if self._args.environment_variables:\n for key, value in self._args.environment_variables.iteritems():\n environment_variables.append(\n self._messages.EnvironmentVariable(\n key=key, value=value))\n\n directories_to_pull = self._args.directories_to_pull or []\n\n account = None\n if self._args.auto_google_login:\n account = self._messages.Account(googleAuto=self._messages.GoogleAuto())\n\n setup = self._messages.TestSetup(\n filesToPush=device_files,\n account=account,\n environmentVariables=environment_variables,\n directoriesToPull=directories_to_pull)\n\n return self._messages.TestSpecification(\n testTimeout=matrix_ops.ReformatDuration(self._args.timeout),\n testSetup=setup)",
"def render_specification_properties(spec, newline='\\n', ignore_props=None, prepend_items=None, append_items=None):\n\n spec_prop_list = []\n if prepend_items is not None:\n spec_prop_list += prepend_items\n ignore_keys = [] if ignore_props is None else ignore_props\n # Add link properties\n if isinstance(spec, LinkSpec):\n spec_prop_list.append('**Target Type** %s' %\n RSTDocument.get_reference(RSTSectionLabelHelper.get_section_label(\n spec['target_type']),\n spec['target_type']))\n # Add dataset properties\n if isinstance(spec, DatasetSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n spec_prop_list.append('**Neurodata Type:** %s' % str(spec.data_type_def))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('linkable', None) is not None and 'linnkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add group properties\n if isinstance(spec, GroupSpec):\n if spec.data_type_def is not None and spec.def_key() not in ignore_keys:\n ntype = str(spec.data_type_def)\n spec_prop_list.append('**Neurodata Type:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(ntype),\n ntype))\n if spec.data_type_inc is not None and spec.inc_key() not in ignore_keys:\n extend_type = str(spec.data_type_inc)\n spec_prop_list.append('**Extends:** %s' %\n RSTDocument.get_reference(\n RSTSectionLabelHelper.get_section_label(extend_type),\n extend_type))\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('quantity', None) is not None and 'quantity' not in ignore_keys:\n spec_prop_list.append('**Quantity:** %s' % SpecToRST.quantity_to_string(spec['quantity']))\n if spec.get('linkable', None) is not None and 'linkable' not in ignore_keys:\n spec_prop_list.append('**Linkable:** %s' % str(spec['linkable']))\n # Add attribute spec properites\n if isinstance(spec, AttributeSpec):\n if 'primitive_type' not in ignore_keys:\n spec_prop_list.append('**Primitive Type:** %s' % SpecToRST.spec_basetype_name(spec))\n if spec.get('dtype', None) is not None and 'dtype' not in ignore_keys:\n spec_prop_list.append('**Data Type:** %s' % SpecToRST.render_data_type(spec['dtype']))\n if spec.get('dims', None) is not None and 'dims' not in ignore_keys:\n spec_prop_list.append('**Dimensions:** %s' % str(spec['dims']))\n if spec.get('shape', None) is not None and 'shape' not in ignore_keys:\n spec_prop_list.append('**Shape:** %s' % str(spec['shape']))\n if spec.get('required', None) is not None and 'required' not in ignore_keys:\n spec_prop_list.append('**Required:** %s' % str(spec['required']))\n if spec.get('value', None) is not None and 'value' not in ignore_keys:\n spec_prop_list.append('**Value:** %s' % str(spec['value']))\n if spec.get('default_value', None) is not None and 'default_value' not in ignore_keys:\n spec_prop_list.append('**Default Value:** %s' % str(spec['default_value']))\n\n # Add common properties\n if spec.get('default_name', None) is not None:\n spec_prop_list.append('**Default Name:** %s' % str(spec['default_name']))\n if spec.get('name', None) is not None:\n spec_prop_list.append('**Name:** %s' % str(spec['name']))\n\n # Add custom items if necessary\n if append_items is not None:\n spec_prop_list += append_items\n\n # Render the specification properties list\n spec_doc = ''\n if len(spec_prop_list) > 0:\n spec_doc += newline\n for dp in spec_prop_list:\n spec_doc += newline + '- ' + dp\n spec_doc += newline\n # Return the rendered list\n return spec_doc",
"def generateSpec(self):\r\n if self.targetlang == 'python':\r\n self._genSpecPy()\r\n elif self.targetlang == 'c':\r\n self._genSpecC()\r\n elif self.targetlang == 'matlab':\r\n self._genSpecMatlab()\r\n elif self.targetlang == 'odetools':\r\n raise NotImplementedError\r\n elif self.targetlang == 'xpp':\r\n raise NotImplementedError\r\n else:\r\n raise ValueError('targetlang attribute must be in '+str(targetLangs))",
"def build_schema(self, spec, **kwargs):\n pass",
"def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()",
"def create_property(self, key, prop):\n\n setting = self.new_property(key, prop)\n setting.create()\n return setting",
"def build_traversal_spec(name, type_, path, skip, select_set):\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = name\n traversal_spec.type = type_\n traversal_spec.path = path\n traversal_spec.skip = skip\n traversal_spec.selectSet = select_set\n return traversal_spec",
"def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)",
"def _makeProperty( key, value ):\n property = PropertyValue()\n property.Name = key\n property.Value = value\n return property",
"def test_build_property(self):\n v1 = versions.Version(version='1.2.3.4', name='foo')\n expected = 4\n\n self.assertEqual(v1.build, expected)",
"def model_spec(self):\n model_spec = dict(name=self.name,\n comps=[],\n dt=self.dt,\n datestart=self.datestart,\n datestop=self.datestop,\n tlm_code=None,\n mval_names=[])\n\n model_spec['pars'] = [dict(par) for par in self.pars]\n\n stringfy = lambda x: (str(x) if isinstance(x, component.ModelComponent)\n else x)\n for comp in self.comps:\n init_args = [stringfy(x) for x in comp.init_args]\n init_kwargs = dict((k, stringfy(v))\n for k, v in comp.init_kwargs.items())\n model_spec['comps'].append(dict(class_name=comp.__class__.__name__,\n name=comp.name,\n init_args=init_args,\n init_kwargs=init_kwargs))\n return model_spec"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the Property Filter Spec. | def build_property_filter_spec(client_factory, property_specs, object_specs):
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_filter_spec.propSet = property_specs
property_filter_spec.objectSet = object_specs
return property_filter_spec | [
"def build_property_filter_spec(property_specs, object_specs):\n property_filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n property_filter_spec.propSet = property_specs\n property_filter_spec.objectSet = object_specs\n return property_filter_spec",
"def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec",
"def populate_filter_properties(self, request_spec, filter_properties):\n vol = request_spec['volume_properties']\n filter_properties['size'] = vol['size']\n filter_properties['availability_zone'] = vol.get('availability_zone')\n filter_properties['user_id'] = vol.get('user_id')\n filter_properties['metadata'] = vol.get('metadata')\n filter_properties['qos_specs'] = vol.get('qos_specs')",
"def build_property_spec(type_=vim.VirtualMachine,\n properties_to_collect=None, all_properties=False):\n if not properties_to_collect:\n properties_to_collect = ['name']\n\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.all = all_properties\n property_spec.pathSet = properties_to_collect\n property_spec.type = type_\n return property_spec",
"def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters",
"def __init__(self,filter_config:dict, property_list:list=None) -> None:\n self._filter_config = filter_config\n self._filter_dict = None\n self._filter_sets = None\n self._property_list = property_list\n\n if not filter_config:\n logger.warning(\"No Filter Configuration\")\n\n # parse all filters\n self._parse_config_dict(self._filter_config)",
"def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec",
"def create_filters(self):",
"def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters",
"def build_filtering(cls, model, spec, bases):\n ## Get the meta specification from the base models.\n base_filters = [e for e in [cls.get_filtering(base) for base in bases] if e is not None]\n\n ## Get the attributes of interest from the spec:\n attrs = dict([field for field in inspect.getmembers(spec) if not field[0].startswith(\"__\")])\n\n ## Extend bases and return:\n return type(\"Filtering\", tuple(base_filters), attrs)",
"def build_filter_props(buttons):\n ui = UILayout(\"PyxleyChart\")\n\n for b in buttons:\n ui.add_filter(b)\n\n return ui.build_props()",
"def build_filter(self, title, name, values):\n return {\n 'title': title,\n 'id': name,\n 'values': values\n }",
"def _generateFilterOptions(self, filterDataInput=None):\n\n filterName = \"\"\n filterFunction = None\n filterFunctionKwargs = {}\n callback = None\n callbackKwargs = {}\n filterKind = None\n filterValue = None\n postFilterFunction = None\n postFilterFunctionKwargs = {}\n filters = []\n\n filterReturnData = {\n \"filterName\":filterName,\n \"filterFunction\":filterFunction,\n \"filterFunctionKwargs\":filterFunctionKwargs,\n \"postFilterFunction\":postFilterFunction,\n \"postFilterFunctionKwargs\":postFilterFunctionKwargs,\n \"callback\":callback,\n \"callbackKwargs\":callbackKwargs,\n \"filterValue\":filterValue,\n }\n\n if filterDataInput == None:\n filters.append(filterReturnData)\n return filters\n\n if filterDataInput and isinstance(filterDataInput, str):\n filterDataInput = filterDataInput.split(\" \")\n for filterData in filterDataInput:\n if filterData.startswith(\"t#\"):\n filterKind = \"type\"\n filterName = filterData[2:]\n filterValue = filterName\n elif filterData.startswith(\"n#\"):\n filterKind = \"name\"\n filterName = filterData[2:]\n filterValue = filterName\n elif filterData.startswith(\"[\") and filterData.endswith(\"]\"):\n filterKind = self._parseAttributeFilterSyntax(filterData)\n attrFilterData = self._parseAttributeFilterData(filterData, filterKind)\n filterName = attrFilterData.get(\"filterParmName\")\n filterValue = attrFilterData.get(\"filterParmValue\")\n else: # will be considered a plain name\n filterKind = \"name\"\n filterName = filterData\n filterValue = filterName\n\n if filterKind:\n if filterKind == \"type\":\n filterFunction = self._getAttr\n filterFunctionKwargs = {\"methods\":[\"type\", \"name\"]}\n elif filterKind == \"name\":\n filterFunctionKwargs = {\"methods\":[\"name\"]}\n filterFunction = self._getAttr\n #attribute related filters\n elif filterKind == \"attr\":\n filterFunction = self._getAttr\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}]}\n elif filterKind == \"attrValue\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrIs\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrContains\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrContains\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrStarts\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrStarts\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrEnds\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrEnds\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrNot\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrNot\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n\n if filterKind ==\"name\" or filterKind ==\"type\":\n if filterName.find(\"*\") != -1:\n postFilterFunction = self._fnMatch\n postFilterFunctionKwargs = {\"pattern\":filterName}\n filterValue = True\n\n filterReturnData = {\n \"filterName\":filterName,\n \"filterFunction\":filterFunction,\n \"filterFunctionKwargs\":filterFunctionKwargs,\n \"postFilterFunction\":postFilterFunction,\n \"postFilterFunctionKwargs\":postFilterFunctionKwargs,\n \"callback\":callback,\n \"callbackKwargs\":callbackKwargs,\n \"filterValue\":filterValue,\n }\n\n filters.append(filterReturnData)\n\n return filters",
"def BuildFilterParser():\n field_name = pyp.Word(pyp.alphas, pyp.alphanums + \"_/.\")\n\n # Integer\n num_sign = pyp.Word(\"-+\", exact=1)\n number = pyp.Combine(pyp.Optional(num_sign) + pyp.Word(pyp.nums))\n number.setParseAction(lambda toks: int(toks[0]))\n\n quoted_string = pyp.quotedString.copy().setParseAction(pyp.removeQuotes)\n\n # Right-hand-side value\n rval = (number | quoted_string)\n\n # Boolean condition\n bool_cond = field_name.copy()\n bool_cond.setParseAction(lambda toks: [[OP_TRUE, toks[0]]])\n\n # Simple binary conditions\n binopstbl = {\n \"==\": OP_EQUAL,\n \"=\": OP_EQUAL, # legacy support\n \"!=\": OP_NOT_EQUAL, # legacy support\n \"<\": OP_LT,\n \"<=\": OP_LE,\n \">\": OP_GT,\n \">=\": OP_GE,\n }\n\n binary_cond = (field_name + pyp.oneOf(list(binopstbl)) + rval)\n binary_cond.setParseAction(lambda lhs_op_rhs: [[binopstbl[lhs_op_rhs[1]],\n lhs_op_rhs[0],\n lhs_op_rhs[2]]])\n\n # \"in\" condition\n in_cond = (rval + pyp.Suppress(\"in\") + field_name)\n in_cond.setParseAction(lambda value_field: [[OP_CONTAINS,\n value_field[1],\n value_field[0]]])\n\n # \"not in\" condition\n not_in_cond = (rval + pyp.Suppress(\"not\") + pyp.Suppress(\"in\") + field_name)\n not_in_cond.setParseAction(lambda value_field: [[OP_NOT, [OP_CONTAINS,\n value_field[1],\n value_field[0]]]])\n\n # Regular expression, e.g. m/foobar/i\n regexp_val = pyp.Group(pyp.Optional(\"m\").suppress() +\n pyp.MatchFirst([pyp.QuotedString(i, escChar=\"\\\\\")\n for i in _KNOWN_REGEXP_DELIM]) +\n pyp.Optional(pyp.Word(pyp.alphas), default=\"\"))\n regexp_val.setParseAction(_ConvertRegexpValue)\n regexp_cond = (field_name + pyp.Suppress(\"=~\") + regexp_val)\n regexp_cond.setParseAction(lambda field_value: [[OP_REGEXP, field_value[0],\n field_value[1]]])\n\n not_regexp_cond = (field_name + pyp.Suppress(\"!~\") + regexp_val)\n not_regexp_cond.setParseAction(lambda field_value:\n [[OP_NOT, [OP_REGEXP, field_value[0],\n field_value[1]]]])\n\n # Globbing, e.g. name =* \"*.site\"\n glob_cond = (field_name + pyp.Suppress(\"=*\") + quoted_string)\n glob_cond.setParseAction(lambda field_value:\n [[OP_REGEXP, field_value[0],\n utils.DnsNameGlobPattern(field_value[1])]])\n\n not_glob_cond = (field_name + pyp.Suppress(\"!*\") + quoted_string)\n not_glob_cond.setParseAction(lambda field_value:\n [[OP_NOT,\n [OP_REGEXP, field_value[0],\n utils.DnsNameGlobPattern(field_value[1])]]])\n\n # All possible conditions\n condition = (binary_cond ^ bool_cond ^\n in_cond ^ not_in_cond ^\n regexp_cond ^ not_regexp_cond ^\n glob_cond ^ not_glob_cond)\n\n # Associativity operators\n filter_expr = pyp.infixNotation(condition, [\n (pyp.Keyword(\"not\").suppress(), 1, pyp.opAssoc.RIGHT,\n lambda toks: [[OP_NOT, toks[0][0]]]),\n (pyp.Keyword(\"and\").suppress(), 2, pyp.opAssoc.LEFT,\n _ConvertLogicOp(OP_AND)),\n (pyp.Keyword(\"or\").suppress(), 2, pyp.opAssoc.LEFT,\n _ConvertLogicOp(OP_OR)),\n ])\n\n parser = pyp.StringStart() + filter_expr + pyp.StringEnd()\n parser.parseWithTabs()\n\n # Originally C{parser.validate} was called here, but there seems to be some\n # issue causing it to fail whenever the \"not\" operator is included above.\n\n return parser",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters",
"def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter",
"def test_filter_spec(filter_args, expected_filter_spec):\n filter = Filter(**filter_args)\n _check_equal_filter_spec(filter.filter_spec, expected_filter_spec)\n # TODO: check other properties of filter_spec?",
"def _propertyFilter(self, entity, params):\n\n if 'property_conditions' not in params:\n raise ProtocolError()\n\n conditions = params['property_conditions']\n\n for field, allowed_values in conditions.iteritems():\n if entity.__getattribute__(field) not in allowed_values:\n return False\n\n return True"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the properties of the Managed object specified. | def get_object_properties(vim, collector, mobj, type, properties):
client_factory = vim.client.factory
if mobj is None:
return None
usecoll = collector
if usecoll is None:
usecoll = vim.get_service_content().propertyCollector
property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
property_spec = client_factory.create('ns0:PropertySpec')
property_spec.all = (properties is None or len(properties) == 0)
property_spec.pathSet = properties
property_spec.type = type
object_spec = client_factory.create('ns0:ObjectSpec')
object_spec.obj = mobj
object_spec.skip = False
property_filter_spec.propSet = [property_spec]
property_filter_spec.objectSet = [object_spec]
return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec]) | [
"def getProperties( cls ):\n\t\timport inspect\n\t\tfrom basicproperty.basic import BasicProperty\n\t\tdef isABasicProperty( object ):\n\t\t\t\"\"\"Predicate which checks to see if an object is a property\"\"\"\n\t\t\treturn isinstance( object, BasicProperty )\n\t\treturn dict(getmembers( cls, isABasicProperty)).values()",
"def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\n property_spec = client_factory.create('ns0:PropertySpec')\n property_spec.all = (properties is None or len(properties) == 0)\n property_spec.pathSet = properties\n property_spec.type = type\n object_spec = client_factory.create('ns0:ObjectSpec')\n object_spec.obj = mobj\n object_spec.skip = False\n property_filter_spec.propSet = [property_spec]\n property_filter_spec.objectSet = [object_spec]\n return retrieve_properties_ex(vim,\n usecoll,\n [property_filter_spec])",
"def getProperties(self):\n return self.metadataByProperty.keys()",
"def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ...",
"def get_properties(self, model: Union[Model, ModelId, str]) -> List[ModelProperty]:\n with self.session() as s:\n return self.get_properties_tx(tx=s, model=model)",
"def _get_properties(cls):\n return get_class_properties(cls)",
"def properties(self) -> dict:\n return self._entity_properties",
"def _collectSimpleProperties(self, obj):\n if IChildren.providedBy(obj):\n return {}\n props = {}\n for key, value in obj._props.iteritems():\n if IProperty.providedBy(value):\n continue\n assert not isinstance(value, Persistent), (\"Persistent \"\n \"value %r in property %r\" % (value, key))\n props[key] = value\n return props",
"def get_properties_for_a_collection_of_objects(vim, type,\r\n obj_list, properties):\r\n client_factory = vim.client.factory\r\n if len(obj_list) == 0:\r\n return []\r\n prop_spec = get_prop_spec(client_factory, type, properties)\r\n lst_obj_specs = []\r\n for obj in obj_list:\r\n lst_obj_specs.append(get_obj_spec(client_factory, obj))\r\n prop_filter_spec = get_prop_filter_spec(client_factory,\r\n lst_obj_specs, [prop_spec])\r\n return vim.RetrieveProperties(vim.get_service_content().propertyCollector,\r\n specSet=[prop_filter_spec])",
"def object_attributes(obj):\n return obj.__dict__.items()",
"def getObjectProperty(self, owner: unicode, propertyName: unicode, saveableObjectClass: java.lang.Class, create: bool) -> ghidra.program.model.util.ObjectPropertyMap:\n ...",
"def getPropertiesAll():",
"def get_object_properties_dict(si, moref, properties_to_collect):\n obj_contents = get_object_properties(si, moref, properties_to_collect)\n if obj_contents is None:\n return {}\n property_dict = {}\n if hasattr(obj_contents[0], 'propSet'):\n dynamic_properties = obj_contents[0].propSet\n if dynamic_properties:\n for prop in dynamic_properties:\n property_dict[prop.name] = prop.val\n # The object may have information useful for logging\n if hasattr(obj_contents[0], 'missingSet'):\n for m in obj_contents[0].missingSet:\n LOG.warning(\"Unable to retrieve value for %(path)s \"\n \"Reason: %(reason)s\",\n {'path': m.path,\n 'reason': m.faultCause.localizedMessage})\n return property_dict",
"def getObjectPropertyMap(self, propertyName: unicode) -> ghidra.program.model.util.ObjectPropertyMap:\n ...",
"def properties(self):\n return OrgProperty.get_by_org(self.id)",
"def properties(self):\n return PropertyManager(session=self._session)",
"def getObjectProperties(self, target_iri: str) -> Query:\n\n logging.debug('Preparing query to get object properties for {0}'.format(\n target_iri))\n\n return prepareQuery(\"\"\"\n SELECT DISTINCT ?p ?name ?orgname ?parentOrgName\n WHERE {{\n <{target_iri}> ?p ?o .\n ?p rdf:type owl:ObjectProperty .\n {{\n {{\n ?o precis:hasName ?name .\n }}\n OPTIONAL\n {{\n {{\n ?o precis:employedAt ?org .\n ?org precis:hasName ?orgname .\n }}\n UNION\n {{\n ?o precis:degreeUniversity ?org .\n ?org precis:hasName ?orgname .\n }}\n UNION\n {{\n {{\n ?o precis:hasParentOrganization ?org .\n ?org precis:hasName ?orgname .\n }}\n OPTIONAL\n {{\n ?org precis:hasParentOrganization ?parentOrg .\n ?parentOrg precis:hasName ?parentOrgName .\n }}\n }}\n }}\n }}\n }}\n \"\"\".format(target_iri=target_iri),\n initNs=self.initN)",
"def model_properties(self):\n return self._model_props",
"def getProperties(groupId, contractId):\n\tprint \"Getting properties for group %s and contract %s\" % (groupId, contractId)\n\tproperty_parameters = { \"contractId\":contractId, \"groupId\":groupId }\n\tproperty_result = getResult('/papi/v0/properties', property_parameters)\n\t\n\tif \"properties\" in property_result:\n\t\tproperty_items = property_result['properties']['items']\n\telse:\n\t\tproperty_items = []\n\n\treturn (property_items)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Builds the Property Filter Spec Object. | def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
prop_filter_spec = \
client_factory.create('ns0:PropertyFilterSpec')
prop_filter_spec.propSet = prop_spec
prop_filter_spec.objectSet = obj_spec
return prop_filter_spec | [
"def build_property_filter_spec(property_specs, object_specs):\n property_filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n property_filter_spec.propSet = property_specs\n property_filter_spec.objectSet = object_specs\n return property_filter_spec",
"def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec",
"def populate_filter_properties(self, request_spec, filter_properties):\n vol = request_spec['volume_properties']\n filter_properties['size'] = vol['size']\n filter_properties['availability_zone'] = vol.get('availability_zone')\n filter_properties['user_id'] = vol.get('user_id')\n filter_properties['metadata'] = vol.get('metadata')\n filter_properties['qos_specs'] = vol.get('qos_specs')",
"def __init__(self,filter_config:dict, property_list:list=None) -> None:\n self._filter_config = filter_config\n self._filter_dict = None\n self._filter_sets = None\n self._property_list = property_list\n\n if not filter_config:\n logger.warning(\"No Filter Configuration\")\n\n # parse all filters\n self._parse_config_dict(self._filter_config)",
"def _createSpecificProperty(self, filter_name):\n import uno\n from com.sun.star.beans import PropertyValue\n if filter_name == \"impress_html_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('IsExportNotes', 0, True, 0),\n PropertyValue('PublishMode', 0, 0, 0),\n PropertyValue('Width', 0, 640, 0),\n PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"impress_pdf_Export\":\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('ExportNotesPages', 0, True, 0),\n PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif \"pdf_Export\" in filter_name :\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('SelectPdfVersion', 0, 1, 0),),), 0)\n elif filter_name in (\"draw_html_Export\", \"HTML (StarCalc)\"):\n property = PropertyValue('FilterData', 0,\n uno.Any('[]com.sun.star.beans.PropertyValue',\n (PropertyValue('Format', 0, 2, 0),),), 0)\n elif filter_name == \"Text (encoded)\":\n property = PropertyValue('FilterFlags', 0, 'UTF8,LF', 0)\n else:\n return []\n\n return [property, ]",
"def build_property_spec(type_=vim.VirtualMachine,\n properties_to_collect=None, all_properties=False):\n if not properties_to_collect:\n properties_to_collect = ['name']\n\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.all = all_properties\n property_spec.pathSet = properties_to_collect\n property_spec.type = type_\n return property_spec",
"def create_filters(self):",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.str_params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.str_params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.str_params.get(param)\n\n if len(properties) > 0:\n filters['properties'] = properties\n\n return filters",
"def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = properties_to_collect\r\n property_spec.type = type\r\n return property_spec",
"def build_filtering(cls, model, spec, bases):\n ## Get the meta specification from the base models.\n base_filters = [e for e in [cls.get_filtering(base) for base in bases] if e is not None]\n\n ## Get the attributes of interest from the spec:\n attrs = dict([field for field in inspect.getmembers(spec) if not field[0].startswith(\"__\")])\n\n ## Extend bases and return:\n return type(\"Filtering\", tuple(base_filters), attrs)",
"def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters",
"def build_filter_props(buttons):\n ui = UILayout(\"PyxleyChart\")\n\n for b in buttons:\n ui.add_filter(b)\n\n return ui.build_props()",
"def _generateFilterOptions(self, filterDataInput=None):\n\n filterName = \"\"\n filterFunction = None\n filterFunctionKwargs = {}\n callback = None\n callbackKwargs = {}\n filterKind = None\n filterValue = None\n postFilterFunction = None\n postFilterFunctionKwargs = {}\n filters = []\n\n filterReturnData = {\n \"filterName\":filterName,\n \"filterFunction\":filterFunction,\n \"filterFunctionKwargs\":filterFunctionKwargs,\n \"postFilterFunction\":postFilterFunction,\n \"postFilterFunctionKwargs\":postFilterFunctionKwargs,\n \"callback\":callback,\n \"callbackKwargs\":callbackKwargs,\n \"filterValue\":filterValue,\n }\n\n if filterDataInput == None:\n filters.append(filterReturnData)\n return filters\n\n if filterDataInput and isinstance(filterDataInput, str):\n filterDataInput = filterDataInput.split(\" \")\n for filterData in filterDataInput:\n if filterData.startswith(\"t#\"):\n filterKind = \"type\"\n filterName = filterData[2:]\n filterValue = filterName\n elif filterData.startswith(\"n#\"):\n filterKind = \"name\"\n filterName = filterData[2:]\n filterValue = filterName\n elif filterData.startswith(\"[\") and filterData.endswith(\"]\"):\n filterKind = self._parseAttributeFilterSyntax(filterData)\n attrFilterData = self._parseAttributeFilterData(filterData, filterKind)\n filterName = attrFilterData.get(\"filterParmName\")\n filterValue = attrFilterData.get(\"filterParmValue\")\n else: # will be considered a plain name\n filterKind = \"name\"\n filterName = filterData\n filterValue = filterName\n\n if filterKind:\n if filterKind == \"type\":\n filterFunction = self._getAttr\n filterFunctionKwargs = {\"methods\":[\"type\", \"name\"]}\n elif filterKind == \"name\":\n filterFunctionKwargs = {\"methods\":[\"name\"]}\n filterFunction = self._getAttr\n #attribute related filters\n elif filterKind == \"attr\":\n filterFunction = self._getAttr\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}]}\n elif filterKind == \"attrValue\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrIs\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrContains\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrContains\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrStarts\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrStarts\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrEnds\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrEnds\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n elif filterKind == \"attrNot\":\n targetValue = filterValue\n filterValue = None\n filterFunction = self._attrNot\n filterFunctionKwargs = {\"methods\":[{\"name\":\"parm\", \"args\":filterName}, {\"name\":\"evalAsString\"}], \"targetValue\":targetValue, \"targetParm\":filterName}\n\n if filterKind ==\"name\" or filterKind ==\"type\":\n if filterName.find(\"*\") != -1:\n postFilterFunction = self._fnMatch\n postFilterFunctionKwargs = {\"pattern\":filterName}\n filterValue = True\n\n filterReturnData = {\n \"filterName\":filterName,\n \"filterFunction\":filterFunction,\n \"filterFunctionKwargs\":filterFunctionKwargs,\n \"postFilterFunction\":postFilterFunction,\n \"postFilterFunctionKwargs\":postFilterFunctionKwargs,\n \"callback\":callback,\n \"callbackKwargs\":callbackKwargs,\n \"filterValue\":filterValue,\n }\n\n filters.append(filterReturnData)\n\n return filters",
"def build_filter(self, title, name, values):\n return {\n 'title': title,\n 'id': name,\n 'values': values\n }",
"def _get_filters(self, req):\n filters = {}\n properties = {}\n\n for param in req.params:\n if param in SUPPORTED_FILTERS:\n filters[param] = req.params.get(param)\n if param.startswith('property-'):\n _param = param[9:]\n properties[_param] = req.params.get(param)\n\n if 'changes-since' in filters:\n isotime = filters['changes-since']\n try:\n filters['changes-since'] = timeutils.parse_isotime(isotime)\n except ValueError:\n raise exc.HTTPBadRequest(_(\"Unrecognized changes-since value\"))\n\n if 'protected' in filters:\n value = self._get_bool(filters['protected'])\n if value is None:\n raise exc.HTTPBadRequest(_(\"protected must be True, or \"\n \"False\"))\n\n filters['protected'] = value\n\n # only allow admins to filter on 'deleted'\n if req.context.is_admin:\n deleted_filter = self._parse_deleted_filter(req)\n if deleted_filter is not None:\n filters['deleted'] = deleted_filter\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n elif 'changes-since' not in filters:\n filters['deleted'] = False\n\n if properties:\n filters['properties'] = properties\n\n return filters",
"def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter",
"def BuildFilterParser():\n field_name = pyp.Word(pyp.alphas, pyp.alphanums + \"_/.\")\n\n # Integer\n num_sign = pyp.Word(\"-+\", exact=1)\n number = pyp.Combine(pyp.Optional(num_sign) + pyp.Word(pyp.nums))\n number.setParseAction(lambda toks: int(toks[0]))\n\n quoted_string = pyp.quotedString.copy().setParseAction(pyp.removeQuotes)\n\n # Right-hand-side value\n rval = (number | quoted_string)\n\n # Boolean condition\n bool_cond = field_name.copy()\n bool_cond.setParseAction(lambda toks: [[OP_TRUE, toks[0]]])\n\n # Simple binary conditions\n binopstbl = {\n \"==\": OP_EQUAL,\n \"=\": OP_EQUAL, # legacy support\n \"!=\": OP_NOT_EQUAL, # legacy support\n \"<\": OP_LT,\n \"<=\": OP_LE,\n \">\": OP_GT,\n \">=\": OP_GE,\n }\n\n binary_cond = (field_name + pyp.oneOf(list(binopstbl)) + rval)\n binary_cond.setParseAction(lambda lhs_op_rhs: [[binopstbl[lhs_op_rhs[1]],\n lhs_op_rhs[0],\n lhs_op_rhs[2]]])\n\n # \"in\" condition\n in_cond = (rval + pyp.Suppress(\"in\") + field_name)\n in_cond.setParseAction(lambda value_field: [[OP_CONTAINS,\n value_field[1],\n value_field[0]]])\n\n # \"not in\" condition\n not_in_cond = (rval + pyp.Suppress(\"not\") + pyp.Suppress(\"in\") + field_name)\n not_in_cond.setParseAction(lambda value_field: [[OP_NOT, [OP_CONTAINS,\n value_field[1],\n value_field[0]]]])\n\n # Regular expression, e.g. m/foobar/i\n regexp_val = pyp.Group(pyp.Optional(\"m\").suppress() +\n pyp.MatchFirst([pyp.QuotedString(i, escChar=\"\\\\\")\n for i in _KNOWN_REGEXP_DELIM]) +\n pyp.Optional(pyp.Word(pyp.alphas), default=\"\"))\n regexp_val.setParseAction(_ConvertRegexpValue)\n regexp_cond = (field_name + pyp.Suppress(\"=~\") + regexp_val)\n regexp_cond.setParseAction(lambda field_value: [[OP_REGEXP, field_value[0],\n field_value[1]]])\n\n not_regexp_cond = (field_name + pyp.Suppress(\"!~\") + regexp_val)\n not_regexp_cond.setParseAction(lambda field_value:\n [[OP_NOT, [OP_REGEXP, field_value[0],\n field_value[1]]]])\n\n # Globbing, e.g. name =* \"*.site\"\n glob_cond = (field_name + pyp.Suppress(\"=*\") + quoted_string)\n glob_cond.setParseAction(lambda field_value:\n [[OP_REGEXP, field_value[0],\n utils.DnsNameGlobPattern(field_value[1])]])\n\n not_glob_cond = (field_name + pyp.Suppress(\"!*\") + quoted_string)\n not_glob_cond.setParseAction(lambda field_value:\n [[OP_NOT,\n [OP_REGEXP, field_value[0],\n utils.DnsNameGlobPattern(field_value[1])]]])\n\n # All possible conditions\n condition = (binary_cond ^ bool_cond ^\n in_cond ^ not_in_cond ^\n regexp_cond ^ not_regexp_cond ^\n glob_cond ^ not_glob_cond)\n\n # Associativity operators\n filter_expr = pyp.infixNotation(condition, [\n (pyp.Keyword(\"not\").suppress(), 1, pyp.opAssoc.RIGHT,\n lambda toks: [[OP_NOT, toks[0][0]]]),\n (pyp.Keyword(\"and\").suppress(), 2, pyp.opAssoc.LEFT,\n _ConvertLogicOp(OP_AND)),\n (pyp.Keyword(\"or\").suppress(), 2, pyp.opAssoc.LEFT,\n _ConvertLogicOp(OP_OR)),\n ])\n\n parser = pyp.StringStart() + filter_expr + pyp.StringEnd()\n parser.parseWithTabs()\n\n # Originally C{parser.validate} was called here, but there seems to be some\n # issue causing it to fail whenever the \"not\" operator is included above.\n\n return parser",
"def get_filters(self):",
"def buildRegFilterList(self, outFH, filterList):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Gets the list of properties for the collection of objects of the type specified. | def get_properties_for_a_collection_of_objects(vim, type,
obj_list, properties):
client_factory = vim.client.factory
if len(obj_list) == 0:
return []
prop_spec = get_prop_spec(client_factory, type, properties)
lst_obj_specs = []
for obj in obj_list:
lst_obj_specs.append(get_obj_spec(client_factory, obj))
prop_filter_spec = get_prop_filter_spec(client_factory,
lst_obj_specs, [prop_spec])
return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
specSet=[prop_filter_spec]) | [
"def get_all_properties_type():\n\n results = client.db.property_types.find({})\n return send_result(data=list(results))",
"def getProperties( cls ):\n\t\timport inspect\n\t\tfrom basicproperty.basic import BasicProperty\n\t\tdef isABasicProperty( object ):\n\t\t\t\"\"\"Predicate which checks to see if an object is a property\"\"\"\n\t\t\treturn isinstance( object, BasicProperty )\n\t\treturn dict(getmembers( cls, isABasicProperty)).values()",
"def properties(cls):\n _validate(cls)\n result = []\n for key, value in cls.__dict__.items():\n if isinstance(value, property):\n result.append(key)\n return list(sorted(result))",
"def get_property_list(cls):\n return [\n attr\n for attr in dir(cls)\n if attr in cls.__dict__\n and isinstance(cls.__dict__[attr], hybrid_property)\n and getattr(getattr(cls, attr), \"_is_pg_property\", True)\n ]",
"def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)",
"def supported_type_properties() -> List[TypeProperty]:\n types_props: List[TypeProperty] = []\n for det in PLACE_DETECTORS:\n types_props.extend(det.supported_types_and_properties())\n\n return types_props",
"def _get_properties(cls):\n return get_class_properties(cls)",
"def get_properties(self, model: Union[Model, ModelId, str]) -> List[ModelProperty]:\n with self.session() as s:\n return self.get_properties_tx(tx=s, model=model)",
"def collect_properties(service_instance, view_ref, obj_type, path_set=None,\n include_mors=False):\n collector = service_instance.content.propertyCollector\n\n # Create object specification to define the starting point of\n # inventory navigation\n obj_spec = vmodl.query.PropertyCollector.ObjectSpec()\n obj_spec.obj = view_ref\n obj_spec.skip = True\n\n # Create a traversal specification to identify the path for collection\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()\n traversal_spec.name = 'traverseEntities'\n traversal_spec.path = 'view'\n traversal_spec.skip = False\n traversal_spec.type = view_ref.__class__\n obj_spec.selectSet = [traversal_spec]\n\n # Identify the properties to the retrieved\n property_spec = vmodl.query.PropertyCollector.PropertySpec()\n property_spec.type = obj_type\n\n if not path_set:\n property_spec.all = True\n\n property_spec.pathSet = path_set\n\n # Add the object and property specification to the\n # property filter specification\n filter_spec = vmodl.query.PropertyCollector.FilterSpec()\n filter_spec.objectSet = [obj_spec]\n filter_spec.propSet = [property_spec]\n\n # Retrieve properties\n props = collector.RetrieveContents([filter_spec])\n\n data = []\n for obj in props:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n\n if include_mors:\n properties['obj'] = obj.obj\n\n data.append(properties)\n return data",
"def list_by_type(self) -> typing.List[typing.Tuple[str, ProcsT]]:\n return utils.list_by_x(self.list(), 'type') # type: ignore",
"def list_all_properties(self):\n properties = list(self.property_only_graph.nodes())\n properties = [SchemaProperty(_prop, self) for _prop in properties]\n return properties",
"def list_properties(self) -> List[str]:\n return list(self._int_list_properties.keys())",
"def getProperties(self):\n return self.metadataByProperty.keys()",
"def getPropertiesAll():",
"def get_properties(self):\n properties = []\n for prop_dict in self.product.get_properties():\n prop = prop_dict['property']\n property_group = prop_dict['property_group']\n price = \"\"\n\n try:\n cipv = CartItemPropertyValue.objects.get(cart_item=self,\n property=prop,\n property_group=property_group)\n except CartItemPropertyValue.DoesNotExist:\n continue\n\n if prop.is_select_field:\n try:\n option = PropertyOption.objects.get(pk=int(float(cipv.value)))\n except (PropertyOption.DoesNotExist, ValueError):\n value = cipv.value\n price = 0.0\n else:\n value = option.name\n price = option.price\n\n elif prop.is_number_field:\n format_string = \"%%.%sf\" % prop.decimal_places\n try:\n value = format_string % float(cipv.value)\n except ValueError:\n value = locale.format(\"%.2f\", float(cipv.value))\n else:\n value = cipv.value\n\n properties.append({\n \"name\": prop.name,\n \"title\": prop.title,\n \"unit\": prop.unit,\n \"display_price\": prop.display_price,\n \"value\": value,\n \"price\": price,\n \"obj\": prop,\n \"property_group\": property_group,\n \"property_group_name\": property_group.name\n })\n\n properties = sorted(properties, key=lambda x: '{0}-{1}'.format(x['property_group_name'], x['obj'].position))\n return properties",
"def get_object_properties(vim, collector, mobj, type, properties):\r\n client_factory = vim.client.factory\r\n if mobj is None:\r\n return None\r\n usecoll = collector\r\n if usecoll is None:\r\n usecoll = vim.get_service_content().propertyCollector\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = (properties is None or len(properties) == 0)\r\n property_spec.pathSet = properties\r\n property_spec.type = type\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = mobj\r\n object_spec.skip = False\r\n property_filter_spec.propSet = [property_spec]\r\n property_filter_spec.objectSet = [object_spec]\r\n return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])",
"def list_property(\n self, key: str) -> Collection[Tuple[str, PropertyAttribute]]:\n return self._env.list_property(key)",
"def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()",
"def getProperties(self, owner: unicode) -> List[ghidra.program.model.util.PropertyMap]:\n ..."
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run `code` with profiler. Used by ``%prun`` and ``%run p``. | def _run_with_profiler(self, code, opts, namespace):
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print(sys_exit, end=' ')
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
if text_file:
with open(text_file, 'w') as pfile:
pfile.write(output)
if 'r' in opts:
return stats
else:
return None | [
"def runcode(self, code):\n try:\n buf = io.StringIO()\n with redirect_stdout(buf):\n exec(code, self.locals)\n result = self._result_from_stdout(buf)\n if result is None:\n result = self._result_from_code(code)\n self._last_expr_result = result\n except SystemExit:\n raise\n except:\n self.showtraceback()",
"def run_code(self, code):\n try:\n output = subprocess.check_output(['python', '-c', code], # run code\n stderr=subprocess.STDOUT, # redirect stderr output\n universal_newlines=True, # return running result as string\n timeout=30) # set up timeout limit\n except subprocess.CalledProcessError as e: # catch runtime error\n output = e.output # acquire subprocess error message\n except subprocess.TimeoutExpired as e: # catch timeout error\n output = '\\r\\n'.join(['Time Out!', e.output]) # acquire subprocess error message and add in timeout error\n return output # return execution results.",
"def run_code(code, code_path=None, ns=None, function_name=None, workdir=None,\n pre_code=None, raises=None):\n # Change the working directory to the directory of the example, so\n # it can get at its data files, if any. Add its path to sys.path\n # so it can import any helper modules sitting beside it.\n pwd = os.getcwd()\n old_sys_path = list(sys.path)\n workdir = os.getcwd() if workdir is None else workdir\n os.chdir(workdir)\n sys.path.insert(0, workdir)\n\n # Reset sys.argv\n old_sys_argv = sys.argv\n sys.argv = [code_path]\n\n # Redirect stdout\n stdout = sys.stdout\n sys.stdout = io.StringIO()\n\n # Assign a do-nothing print function to the namespace. There\n # doesn't seem to be any other way to provide a way to (not) print\n # that works correctly across Python 2 and 3.\n def _dummy_print(*arg, **kwarg):\n pass\n\n ns = {} if ns is None else ns\n try:\n try:\n code = unescape_doctest(code)\n if pre_code and not ns:\n exec(str(pre_code), ns)\n ns['print'] = _dummy_print\n if \"__main__\" in code:\n exec(\"__name__ = '__main__'\", ns)\n if raises is None:\n exec(code, ns)\n else: # Code should raise exception\n try:\n exec(code, ns)\n except raises:\n pass\n if function_name:\n exec(function_name + \"()\", ns)\n except (Exception, SystemExit):\n raise PlotError(traceback.format_exc())\n finally:\n os.chdir(pwd)\n sys.argv = old_sys_argv\n sys.path[:] = old_sys_path\n sys.stdout = stdout\n return ns",
"def run_code(error_code, noise, trials=10):\n pq, code_register = error_code(QubitPlaceholder(), noise=noise)\n ro = pq.declare('ro', 'BIT', len(code_register))\n pq += [MEASURE(qq, rr) for qq, rr in zip(code_register, ro)]\n\n return qvm.run(address_qubits(pq), trials=trials)",
"def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)",
"def trace(self, code):\n self.setup(code)\n self.btracer.setup()\n rewrite_function(self.top_level_function)\n sys.settrace(self.tracer)\n try:\n self.top_level_function()\n finally:\n sys.settrace(None)\n self.teardown()\n self.btracer.teardown()",
"def run_monitored_proc(code):\n if not sys.platform.startswith('linux'):\n raise RuntimeError(\"Peak memory monitoring only works on Linux\")\n\n code = textwrap.dedent(code)\n process = subprocess.Popen([sys.executable, '-c', code])\n\n peak_memusage = -1\n\n start = time.time()\n while True:\n ret = process.poll()\n if ret is not None:\n break\n\n with open('/proc/%d/status' % process.pid, 'r') as f:\n procdata = f.read()\n\n m = re.search(r'VmRSS:\\s*(\\d+)\\s*kB', procdata, re.S | re.I)\n if m is not None:\n memusage = float(m.group(1)) * 1e3\n peak_memusage = max(memusage, peak_memusage)\n\n time.sleep(0.01)\n\n process.wait()\n\n duration = time.time() - start\n\n if process.returncode != 0:\n raise AssertionError(\"Running failed:\\n%s\" % code)\n\n return duration, peak_memusage",
"def run_code(self, code: str, with_preprocess: bool = False,\n exception_list: Tuple = (), *args, **kwargs):\n # Get the path to the configuration file\n all_codes = get_all_codes(self.all_cfgs_dir)\n cfg_path = all_codes[code]\n # Run the experiment\n runner = self.get_runner()\n runner.merge_cfg(cfg_path)\n # Setup the outputs\n current_experiment_output_dir = os.path.join(self.hyper_experiment_path, f'exp-{code}')\n if not os.path.exists(current_experiment_output_dir):\n os.mkdir(current_experiment_output_dir)\n runner.set_output_dir(current_experiment_output_dir)\n\n # Run the experiment\n if self.verbose > 0:\n print(\"---\")\n print(\"This the configuration that will be used:\")\n print(runner.cfg)\n print(\"---\")\n runner.verbose = max(0, self.verbose - 1)\n try:\n if with_preprocess:\n runner.preprocess()\n score = runner.run(*args, **kwargs)\n except exception_list as e:\n warnings.warn(f\"Exception caught {e}\")\n score = None\n self.CACHE.LOAD()\n score_dict = self.CACHE.SET_IFN('score_dict', {})\n score_dict[code] = score\n self.CACHE.SET('score_dict', score_dict)\n self.CACHE.SAVE()\n runner.CACHE.RESET(prompt=False)\n return score",
"def code():",
"def execute(self, code):\n exec_(code, self, self)",
"def _run_code(self, code):\r\n #check for a console\r\n if self.console is None:\r\n log.warning('No managing console!')\r\n raise Exception('No managing console!')\r\n\r\n #set busy flag and send busy messages\r\n self.busy = True\r\n \r\n #published message\r\n self.publish_msg( eng_messages.ENGINE_STATE_BUSY+'.'+self.name, \r\n data=(self.debug, self.profile) )\r\n \r\n #enable debugger?\r\n if self.debug is True:\r\n trace_func = self.debugger\r\n else:\r\n trace_func = None\r\n\r\n #enable profiler?\r\n if self.profile is True:\r\n profile_func = self.profiler\r\n else:\r\n profile_func = None\r\n\r\n #run the code\r\n try:\r\n sys.settrace(trace_func)\r\n sys.setprofile(profile_func)\r\n exec code in self._userdict\r\n sys.settrace(None)\r\n sys.setprofile(None)\r\n\r\n #system exit - call engine.exit()\r\n except SystemExit:\r\n sys.settrace(None)\r\n sys.setprofile(None)\r\n self.busy = False\r\n log.debug('system exit in runnning code')\r\n self.exit()\r\n return\r\n\r\n #keyboard interrupt stopped running code\r\n except KeyboardInterrupt:\r\n sys.settrace(None)\r\n sys.setprofile(None)\r\n self.busy = False\r\n\r\n #engine stopped code in order to exit/or disconnect - do not prompt.\r\n if self._stop_quiet:\r\n self._stop_quiet = False\r\n return\r\n\r\n #user stopped code\r\n self._stop = False\r\n if self._isreading: #cancel the read prompt.\r\n self.send_msg( self.console, eng_messages.CON_PROMPT_STDIN, \r\n data=(self.prompts[1],None,))\r\n sys.stderr.write('STOP: User forced running code to stop.\\n\\n')\r\n \r\n #other exception - could be an error 1) caused by the engine exiting 2) a different error caused by\r\n #the KeyboardInterrupt (wxpython doesn't play nice!) or 3) a user code error\r\n except:\r\n sys.settrace(None)\r\n sys.setprofile(None)\r\n self.busy = False\r\n\r\n #1) engine is exiting/stopping quietly - probably some error \r\n # caused by engine exiting\r\n if self._stop_quiet:\r\n self._stop_quiet = False\r\n log.exception('Exception raised to stop running code? - engine wants to exit.')\r\n return\r\n\r\n #2) user stopped code\r\n if self._stop is True:\r\n self._stop = False\r\n if self._isreading: #cancel the read prompt.\r\n self.send_msg( self.console, eng_messages.CON_PROMPT_STDIN,\r\n data=(self.prompts[1], None,))\r\n sys.stderr.write('STOP: User forced running code to stop.\\n\\n')\r\n\r\n #3) error in user code.\r\n self.compiler.show_traceback()\r\n\r\n #reset internal state flags\r\n self.busy = False\r\n self._isreading = False\r\n if self.debug is True:\r\n self.debugger.reset()\r\n\r\n #softspace makes the print statement work correctly when using final \r\n #comma to supress newlines.\r\n if softspace(sys.stdout, 0):\r\n print \r\n\r\n #If exiting skip the rest.\r\n if self._stop_quiet is True:\r\n return\r\n\r\n #send an engine done message\r\n self.publish_msg( eng_messages.ENGINE_STATE_DONE+'.'+self.name, \r\n data=(self.debug, self.profile) )\r\n \r\n #prompt the console for new command\r\n try:\r\n self.send_msg(self.console, eng_messages.CON_PROMPT,\r\n (self.prompts[0], False,))\r\n except:\r\n log.exception('error ')\r\n pass",
"def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)",
"def ExecuteCode(code, global_dict):\n # Indeed, using exec generates a lint warning. But some user code\n # actually uses exec, and we have to test for it ...\n exec code in global_dict",
"def exec_code(code, db, write=True):\n evaler = Evaluator(db, write=write)\n glb = {}\n loc = ExecutionContext(evaler=evaler)\n exec(code, glb, loc)",
"def RunScript(code):\n with ScriptContext() as script_module:\n try:\n exec code in script_module.__dict__\n except:\n # Get exception output as close to exec as possible.\n # We don't take the first entry in the traceback because it just contains\n # \"exec\". Everything after that is the submitted code.\n try:\n etype, evalue, tb = sys.exc_info()\n traceback.print_exception(etype,\n evalue,\n tb.tb_next, # one frame up\n file=sys.stderr)\n finally:\n del tb # break circular references when using exc_info\n\n return sys.stdout.getvalue(), sys.stderr.getvalue()",
"def timeit_profile(stmt, number, repeat, setup,\n timer, pickle_protocol, dump_filename, mono, **_ignored):\n del _ignored\n sys.path.insert(0, os.curdir)\n globals_ = {}\n exec_(setup, globals_)\n if number is None:\n # determine number so that 0.2 <= total time < 2.0 like timeit.\n dummy_profiler = Profiler()\n dummy_profiler.start()\n for x in range(1, 10):\n number = 10 ** x\n t = time.time()\n for y in range(number):\n exec_(stmt, globals_)\n if time.time() - t >= 0.2:\n break\n dummy_profiler.stop()\n del dummy_profiler\n code = compile('for _ in range(%d): %s' % (number, stmt),\n 'STATEMENT', 'exec')\n __profile__(stmt, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)",
"def run_code(self, source, filename):\n code = compile(source, filename, 'exec')\n try:\n exec code in self.globals\n except KeyboardInterrupt:\n self.world.quit()",
"def runcode(self,code_obj):\n\n # Set our own excepthook in case the user code tries to call it\n # directly, so that the IPython crash handler doesn't get triggered\n old_excepthook,sys.excepthook = sys.excepthook, self.excepthook\n outflag = 1 # happens in more places, so it's easier as default\n try:\n try:\n exec code_obj in self.locals\n finally:\n # Reset our crash handler in place\n sys.excepthook = old_excepthook\n except SystemExit:\n self.resetbuffer()\n self.showtraceback()\n warn( __builtin__.exit,level=1)\n except self.custom_exceptions:\n etype,value,tb = sys.exc_info()\n self.CustomTB(etype,value,tb)\n except:\n self.showtraceback()\n else:\n outflag = 0\n if code.softspace(sys.stdout, 0):\n print\n # Flush out code object which has been run (and source)\n self.code_to_run = None\n self.code_to_run_src = ''\n return outflag",
"def RunCode(self, code, is_function_call = False):\r\n if is_function_call:\r\n code.replace(' ','_')\r\n if not code.endswith(')'):\r\n code = code + '()'\r\n self.addline(code)\r\n else:\r\n self.addline(code)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
read feature file, find out mass shift then correct | def feature_file_mass_correction(feature_filename: str):
output_feature_filename = feature_filename + '.mass_corrected'
ppm_shift = []
with open(feature_filename, 'r') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
seq_index = header.index("seq")
mz_index = header.index("m/z")
z_index = header.index("z")
for line in reader:
mz = float(line[mz_index])
z = float(line[z_index])
observed_mass = mz * z - z * config.mass_H
if not line[seq_index]:
continue
okay, peptide = parse_raw_sequence(line[seq_index])
if not okay:
# unknown mods
continue
theoretical_mass = compute_neutral_peptide_mass(peptide)
ppm = (observed_mass - theoretical_mass) / theoretical_mass * 1e6
ppm_shift.append(ppm)
if len(ppm_shift) < 100:
raise ValueError("too less identified feature for mass correction")
ppm_shift = np.median(ppm_shift)
print(f"ppm shift: {ppm_shift}")
with open(feature_filename, 'r') as fr:
with open(output_feature_filename, 'w') as fw:
reader = csv.reader(fr, delimiter=',')
writer = csv.writer(fw, delimiter=',')
writer.writerow(next(reader))
for line in reader:
mz = float(line[mz_index])
mz = mz * (1 - ppm_shift * 1e-6)
line[mz_index] = "{}".format(mz)
writer.writerow(line) | [
"def read_msp(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50,windowed_mode=False):\n\n\tinfile = open(infile_name)\n\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(feat_lim_file).readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_msp(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"Name: \"):\n\t\t\tif len(temp_entry) == 0:\n\t\t\t\ttemp_entry.append(line.strip())\n\t\t\t\tcontinue\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\t\n\t\t\t#Make sure the current line is still used for the next entry\n\t\t\ttemp_entry = [line]\n\t\t\t\n\t\t\t#print(counter)\n\t\t\tcounter += 1\n\t\t\t\n\t\ttemp_entry.append(line.strip())\n\t\n\t#If everything is empty; return\n\tif len(temp_entry) == 0:\n\t\ttemp_entry.append(line.strip())\n\t\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)\n\n\t#Analyse the last record; since we do not know when the spectra ends\n\tidentifier,mz_list,intensity_list = parse_msp(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\tinstance_names.append(identifier)\n\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features)\n\t\n\t#print(counter)\n\tcounter += 1\n\t\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)",
"def read_mgf(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50):\t\t \n\t\n\tinfile = open(infile_name)\n\t\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(\"selected_features.txt\").readlines()]\n\t\t\n\tcounter = 0\n\ttemp_entry = []\n\tinstance_names = []\n\tnum_instances = num_instances_mgf(infile_name)\n\t#print(num_instances)\n\n\tif len(feat_bins) == 0: feat_bins = np.arange(0,max_dist+step_size,step_size)\n\t\n\t#Initialize the feature matrix, must be lil since scr is slow when mutating values!\n\tfeat_matrix = scipy.sparse.lil_matrix((num_instances, len(feat_bins)),dtype=np.float32)\n\t\n\t#Iterate over the file and filter out single entries\n\tfor line in infile:\n\t\tif line.startswith(\"END IONS\"):\n\t\t\t#For this entry get identifier,m/z,intensities\n\t\t\tidentifier,mz_list,intensity_list = parse_mgf(temp_entry,top=top_peaks,windowed_mode=windowed_mode)\n\t\t\tinstance_names.append(identifier)\n\t\t\t#Fill in the feature matrix\n\t\t\tfeat_matrix = get_feats(mz_list,intensity_list,feat_matrix,counter,feat_bins,allowed_c=selected_features,max_dist=max_dist)\n\t\t\tcounter += 1\n\t\t\t#print(counter)\n\t\t\ttemp_entry = []\n\t\t\tcontinue\n\t\tif line.startswith(\"BEGIN IONS\"):\n\t\t\tcontinue\n\t\ttemp_entry.append(line)\n\n\treturn(feat_matrix.asformat(\"csr\"),feat_bins,instance_names,counter)",
"def read_shiftx2(input_file, offset=0):\n preds_long = pd.read_csv(input_file)\n preds_long[\"NUM\"] = preds_long[\"NUM\"] + offset # Apply any offset to residue numbering\n preds_long[\"Res_name\"] = preds_long[\"NUM\"].astype(str)+preds_long[\"RES\"]\n if any(preds_long.columns == \"CHAIN\"): preds_long = preds_long.drop(\"CHAIN\", axis=1) # Assuming that there's only one CHAIN in the predictions...\n preds_long = preds_long.reindex(columns=[\"NUM\",\"RES\",\"Res_name\",\"ATOMNAME\",\"SHIFT\"]) \n preds_long.columns = [\"Res_N\",\"Res_type\",\"Res_name\",\"Atom_type\",\"Shift\"]\n \n # Convert from wide to long format\n preds = preds_long.pivot(index=\"Res_N\", columns=\"Atom_type\", values=\"Shift\")\n \n # Add the other data back in\n tmp = preds_long[[\"Res_N\",\"Res_type\",\"Res_name\"]]\n tmp = tmp.drop_duplicates(subset=\"Res_name\")\n tmp.index = tmp[\"Res_N\"]\n preds = pd.concat([tmp, preds], axis=1)\n \n # Make columns for the i-1 predicted shifts of C, CA and CB\n preds_m1 = preds[list({\"C\",\"CA\",\"CB\",\"Res_type\"}.intersection(preds.columns))].copy()\n preds_m1.index = preds_m1.index+1\n preds_m1.columns = preds_m1.columns + \"m1\"\n preds = pd.merge(preds, preds_m1, how=\"left\", left_index=True, right_index=True)\n \n # Restrict to only certain atom types\n atom_set = {\"H\",\"N\",\"C\",\"CA\",\"CB\",\"Cm1\",\"CAm1\",\"CBm1\",\"HA\"}\n preds = preds[[\"Res_name\",\"Res_N\",\"Res_type\",\"Res_typem1\"]+list(atom_set.intersection(preds.columns))]\n \n preds.index = preds[\"Res_name\"]\n \n return(preds)",
"def read_train(file):\n raw_data = np.genfromtxt(file, delimiter=',', dtype=\"S1,\" + \"f8,\" * (NUM_FEATURES - 1) + \"f8\")\n new_data = np.zeros((raw_data.shape[0], (NUM_FEATURES + 1)))\n for sample in range(raw_data.shape[0]):\n new_data[sample, 0] = (1 if raw_data[sample][0] == b'M' else 0)\n for feature in range(30):\n new_data[sample, feature+1] = raw_data[sample][feature + 1]\n return new_data",
"def load_redshifts(fname, cosmology, t_bigbang):\n\n with open(fname, \"r\") as f:\n\n a = np.loadtxt(f)\n\n z = 1.0/a - 1.0\n\n lookback = (t_bigbang - cosmology.lookback_time(z).value*1.0e3) # In Myr.\n\n return z, lookback",
"def read_szf_fmv_12(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"land_frac\", uint_nan),\n (\"flagfield_gen2\", byte_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags(data)\n\n return data, metadata",
"def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )",
"def read_szf_fmv_13(eps_file):\n data = {}\n metadata = {}\n\n n_lines = eps_file.mdr_counter\n n_node_per_line = eps_file.mdr[\"LONGITUDE_FULL\"].shape[1]\n idx_nodes = np.arange(n_lines).repeat(n_node_per_line)\n\n # extract metadata\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n metadata[\"state_vector_time\"] = datetime.strptime(\n eps_file.mphr[\"STATE_VECTOR_TIME\"][:-4], \"%Y%m%d%H%M%S\")\n\n fields = [\n \"processor_major_version\", \"processor_minor_version\",\n \"format_major_version\", \"format_minor_version\"\n ]\n for f in fields:\n metadata[f] = np.int16(eps_file.mphr[f.upper()])\n\n # extract time\n dt = np.datetime64(\n \"2000-01-01\") + eps_file.mdr[\"UTC_LOCALISATION\"][\"day\"].astype(\n \"timedelta64[D]\"\n ) + eps_file.mdr[\"UTC_LOCALISATION\"][\"time\"].astype(\"timedelta64[ms]\")\n data[\"time\"] = dt[idx_nodes]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\", \"flagfield_rf1\", \"flagfield_rf2\", \"flagfield_pl\",\n \"flagfield_gen1\"\n ]\n\n fields = [\n \"degraded_inst_mdr\", \"degraded_proc_mdr\", \"sat_track_azi\",\n \"beam_number\"\n ]\n\n # 101 min = 6082 seconds\n # state_vector_time = ascending node crossing time - 1520.5,\n # time crossing at -90 lat\n orbit_start_time = metadata[\"state_vector_time\"] - timedelta(\n seconds=1520.5)\n orbit_end_time = orbit_start_time + timedelta(seconds=6082)\n\n data[\"orbit_nr\"] = np.ma.zeros(\n data[\"time\"].size, dtype=np.int32,\n fill_value=int32_nan) + metadata[\"orbit_start\"]\n data[\"orbit_nr\"][data[\"time\"] > orbit_end_time] += 1\n\n metadata[\"orbits\"] = {}\n for orbit_nr in np.unique(data[\"orbit_nr\"]):\n if orbit_nr == metadata[\"orbit_start\"]:\n metadata[\"orbits\"][orbit_nr] = (orbit_start_time, orbit_end_time)\n else:\n metadata[\"orbits\"][orbit_nr] = (orbit_end_time, orbit_end_time +\n timedelta(seconds=6082))\n\n # extract data\n for f in fields:\n if eps_file.mdr_sfactor[f.upper()] == 1:\n data[f] = eps_file.mdr[f.upper()].flatten()[idx_nodes]\n else:\n data[f] = (eps_file.mdr[f.upper()].flatten() * 1. /\n eps_file.mdr_sfactor[f.upper()])[idx_nodes]\n\n data[\"swath_indicator\"] = (data[\"beam_number\"].flatten() > 3).astype(\n np.uint8)\n data[\"as_des_pass\"] = (data[\"sat_track_azi\"] < 270).astype(np.uint8)\n\n fields = [(\"longitude_full\", long_nan), (\"latitude_full\", long_nan),\n (\"sigma0_full\", long_nan), (\"inc_angle_full\", uint_nan),\n (\"azi_angle_full\", int_nan), (\"flagfield\", int_nan)]\n\n for f, nan_val in fields:\n data[f] = eps_file.mdr[f.upper()].flatten()\n invalid = eps_file.mdr[f.upper()].flatten() == nan_val\n\n if eps_file.mdr_sfactor[f.upper()] != 1:\n data[f] = data[f] * 1. / eps_file.mdr_sfactor[f.upper()]\n\n data[f][invalid] = nan_val\n\n # modify longitudes from (0, 360) to (-180, 180)\n mask = np.logical_and(data[\"longitude_full\"] != long_nan,\n data[\"longitude_full\"] > 180)\n data[\"longitude_full\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n idx = (data[\"azi_angle_full\"] != int_nan) & (data[\"azi_angle_full\"] < 0)\n data[\"azi_angle_full\"][idx] += 360\n\n # set flags\n data[\"f_usable\"] = set_flags_fmv13(data[\"flagfield\"])\n\n return data, metadata",
"def read_smx_fmv_12(eps_file):\n raw_data = eps_file.scaled_mdr\n raw_unscaled = eps_file.mdr\n\n n_node_per_line = raw_data[\"LONGITUDE\"].shape[1]\n n_lines = raw_data[\"LONGITUDE\"].shape[0]\n n_records = eps_file.mdr_counter * n_node_per_line\n idx_nodes = np.arange(eps_file.mdr_counter).repeat(n_node_per_line)\n\n data = {}\n metadata = {}\n\n metadata[\"spacecraft_id\"] = np.int8(eps_file.mphr[\"SPACECRAFT_ID\"][-1])\n metadata[\"orbit_start\"] = np.uint32(eps_file.mphr[\"ORBIT_START\"])\n\n ascat_time = shortcdstime2jd(raw_data[\"UTC_LINE_NODES\"].flatten()[\"day\"],\n raw_data[\"UTC_LINE_NODES\"].flatten()[\"time\"])\n data[\"jd\"] = ascat_time[idx_nodes]\n\n fields = [(\"sigma0_trip\", long_nan), (\"inc_angle_trip\", uint_nan),\n (\"azi_angle_trip\", int_nan), (\"kp\", uint_nan),\n (\"f_land\", uint_nan)]\n\n for f, nan_val in fields:\n data[f] = raw_data[f.upper()].reshape(n_records, 3)\n valid = raw_unscaled[f.upper()].reshape(n_records, 3) != nan_val\n data[f][~valid] = nan_val\n\n fields = [\"sat_track_azi\", \"abs_line_number\"]\n for f in fields:\n data[f] = raw_data[f.upper()].flatten()[idx_nodes]\n\n fields = [(\"longitude\", long_nan, long_nan),\n (\"latitude\", long_nan, long_nan),\n (\"swath_indicator\", byte_nan, byte_nan),\n (\"soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_error\", uint_nan, uint_nan),\n (\"sigma40\", long_nan, long_nan),\n (\"sigma40_error\", long_nan, long_nan),\n (\"slope40\", long_nan, long_nan),\n (\"slope40_error\", long_nan, long_nan),\n (\"dry_backscatter\", long_nan, long_nan),\n (\"wet_backscatter\", long_nan, long_nan),\n (\"mean_surf_soil_moisture\", uint_nan, uint_nan),\n (\"soil_moisture_sensetivity\", ulong_nan, float32_nan),\n (\"correction_flags\", uint8_nan, uint8_nan),\n (\"processing_flags\", uint8_nan, uint8_nan),\n (\"aggregated_quality_flag\", uint8_nan, uint8_nan),\n (\"snow_cover_probability\", uint8_nan, uint8_nan),\n (\"frozen_soil_probability\", uint8_nan, uint8_nan),\n (\"innudation_or_wetland\", uint8_nan, uint8_nan),\n (\"topographical_complexity\", uint8_nan, uint8_nan)]\n\n for f, nan_val, new_nan_val in fields:\n data[f] = raw_data[f.upper()].flatten()\n valid = raw_unscaled[f.upper()].flatten() != nan_val\n data[f][~valid] = new_nan_val\n\n # sat_track_azi (uint)\n data[\"as_des_pass\"] = \\\n np.array(raw_data[\"SAT_TRACK_AZI\"].flatten()[idx_nodes] < 270)\n\n # modify longitudes from [0,360] to [-180,180]\n mask = np.logical_and(data[\"longitude\"] != long_nan,\n data[\"longitude\"] > 180)\n data[\"longitude\"][mask] += -360.\n\n # modify azimuth from (-180, 180) to (0, 360)\n mask = (data[\"azi_angle_trip\"] != int_nan) & (data[\"azi_angle_trip\"] < 0)\n data[\"azi_angle_trip\"][mask] += 360\n\n fields = [\"param_db_version\", \"warp_nrt_version\"]\n for f in fields:\n data[f] = raw_data[\"PARAM_DB_VERSION\"].flatten()[idx_nodes]\n\n metadata[\"spacecraft_id\"] = int(eps_file.mphr[\"SPACECRAFT_ID\"][2])\n\n data[\"node_num\"] = np.tile((np.arange(n_node_per_line) + 1), n_lines)\n\n data[\"line_num\"] = idx_nodes\n\n return data, metadata",
"def extract_rms(self, rms_file):\n mcDir = os.path.join(self.feat_dir, 'mc')\n if not os.path.isdir(mcDir):\n \tprint 'No_Feat_DIR'\n \tsys.exit(0)\n rmsFile = open(os.path.join(mcDir, rms_file), 'r')\n csv_parFile = csv.reader(rmsFile, delimiter=' ')\n rmsData = []\n for line in rmsFile:\n rmsData.append(line)\n if len(rmsData) == 1:\n return float(rmsData[0])\n else:\n print 'RMS file: ', rms_file, ' Has multiple lines....cannot use this function to parse this file.'\n return False",
"def test_read_0_1_smirff(self):\n ForceField(\n get_data_file_path(\n \"test_forcefields/smirff99Frosst_reference_0_1_spec.offxml\"\n )\n )",
"def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def file_is_fixedStep(filename):\n \n file = open(filename)\n\n for line in file:\n elems = line.split()\n if (elems[0] == \"fixedStep\"):\n return True\n\n return False",
"def import_from_mdata_ascii(self, filename):\n \n f = open(filename, 'r') \n data = f.read()\n f.close()\n mylines = data.splitlines()\n #get nps associated with mdata\n self.nps = np.longlong(mylines[0].split()[5])\n \n \n #find the nx,ny,nz line\n p = re.compile('^f .*')\n j=0\n for i in np.arange(len(mylines)):\n if(p.match(mylines[i])):\n j=i\n break\n \n q = mylines[j].split()\n nxyz = np.longlong(q[1])\n nx = np.long(q[3])\n ny = np.long(q[4])\n nz = np.long(q[5])\n \n #allocate your arrays\n self.tally_values= np.zeros([nxyz])\n self.unc_values=np.zeros([nxyz])\n self.xb = np.zeros([nx+1])\n self.yb = np.zeros([ny+1])\n self.zb = np.zeros([nz+1])\n \n #read in xbounds\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (nx+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.xb = np.array(temp.split(), dtype=np.double)\n print(\"min/max xb: {0},{1}\".format(min(self.xb),max(self.xb)))\n #read in ybounds\n j=i\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (ny+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.yb = np.array(temp.split(), dtype=np.double)\n print(\"min/max yb: {0},{1}\".format(min(self.yb),max(self.yb)))\n #read in zbounds\n j=i\n t=0\n for i in np.arange((j+1), len(mylines)):\n t= t + len(mylines[i].split())\n if(t == (nz+1)):\n break\n temp=''\n for k in np.arange((j+1),i+1):\n temp += mylines[k]\n self.zb = np.array(temp.split(), dtype=np.double)\n print(\"min/max zb: {0},{1}\".format(min(self.zb),max(self.zb)))\n \n #advance to tally values\n p = re.compile('^vals.*')\n j=0\n for i in np.arange(len(mylines)):\n if(p.match(mylines[i])):\n j=i\n break\n #read everything into an array\n tempArr = np.zeros([2*nxyz])\n si=0\n fi=0\n for i in np.arange((j+1),len(mylines)):\n temp = np.asarray(mylines[i].split(), dtype=np.double)\n fi = si + len(temp)\n tempArr[si:fi] = temp\n si = fi\n \n #separate absobred dose and uncertainty\n self.unc_values = tempArr[1::2].reshape([nx,ny,nz],order='F')\n self.tally_values = tempArr[0::2].reshape([nx,ny,nz], order='F')\n #xc = 0.5*self.xb[0:-1] + 0.5*self.xb[1:]\n #e= np.reshape(np.repeat(xc,nz),[nx,nz])\n #e1=np.reshape(np.repeat(zc,nx), [nx,nz], order='F')",
"def read_ised(self,filename):\n\n with open(filename,'rb') as f:\n check = array.array('i')\n check.fromfile(f,2)\n \n if check[1] == 221:\n ksl, ksi = 2, 1\n F_l, F_i = 3, 2\n else:\n ksl, ksi = 3, 2\n F_l, F_i = 5, 4\n \n with open(filename,'rb') as f:\n ks = array.array('i')\n ks.fromfile(f,ksl)\n\n ta = array.array('f')\n ta.fromfile(f,ks[ksi])\n self.ta = numpy.array(ta)\n\n tmp = array.array('i')\n tmp.fromfile(f,3)\n self.ml,self.mul,iseg = tmp\n\n if iseg > 0:\n tmp = array.array('f')\n tmp.fromfile(f,iseg*6)\n\n tmp = array.array('f')\n tmp.fromfile(f,5)\n self.totm, self.totn, self.avs, self.jo, self.tauo = tmp\n\n\n self.ids= array.array('c')\n self.ids.fromfile(f,80)\n\n tmp = array.array('f')\n tmp.fromfile(f,4)\n self.tcut = tmp[0]\n self.ttt = tmp[1:]\n\n ids = array.array('c')\n ids.fromfile(f,80)\n\n self.ids = array.array('c')\n self.ids.fromfile(f,80)\n\n self.igw = array.array('i')\n self.igw.fromfile(f,1)\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.iw = array.array('i')\n self.iw.fromfile(f,1)\n\n wave = array.array('f')\n wave.fromfile(f,self.iw[0])\n self.wave = numpy.array(wave)\n\n #SED Section\n self.F = array.array('i')\n self.F.fromfile(f,F_l)\n self.iw = self.F[F_i] #Number of wavelength elements\n\n self.sed = numpy.zeros((self.iw,ks[ksi]),dtype=numpy.float32)\n G = array.array('f')\n G.fromfile(f,self.iw)\n self.sed[:,0] = G\n ik = array.array('i')\n ik.fromfile(f,1)\n\n self.h = numpy.empty((ik[0],ks[ksi]),'f')\n H = array.array('f')\n H.fromfile(f,ik[0])\n self.h[:,0] = H\n\n for i in range(1,ks[ksi]): #Fill rest of array with SEDs\n F = array.array('i')\n F.fromfile(f,F_l)\n iw = F[F_i]\n\n G = array.array('f')\n G.fromfile(f,iw)\n self.sed[:,i] = G\n ik = array.array('i')\n ik.fromfile(f,1)\n\n H = array.array('f')\n H.fromfile(f,ik[0])\n self.h[:,i] = H\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.bflx = array.array('f')\n self.bflx.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n strm = array.array('f')\n strm.fromfile(f,tmp[F_i])\n self.strm = numpy.array(strm)\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.evf = array.array('f')\n self.evf.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.evf = array.array('f')\n self.evf.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.snr = array.array('f')\n self.snr.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.pnr = array.array('f')\n self.pnr.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.sn = array.array('f')\n self.sn.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.bh = array.array('f')\n self.bh.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n self.wd = array.array('f')\n self.wd.fromfile(f,tmp[F_i])\n\n tmp = array.array('i')\n tmp.fromfile(f,F_l)\n\n rmtm = array.array('f')\n rmtm.fromfile(f,tmp[F_i])\n self.rmtm = numpy.array(rmtm)",
"def convert_matrix(infile, names,refdict,nosamples):\n \n if infile.endswith(\".gz\"):\n inf = gzip.open(infile, \"rb\")\n \n else:\n inf = open(infile, \"r\")\n for line in inf:\n line = line.rsplit()\n if line[0] == \"chromosome\":\n pass # header\n else:\n \n\n chrom = line[0]\n start = line[1]\n stop = line[2]\n TE = line[4]\n n_te = str(len(TE.split(\",\")))\n tes=TE.split(\",\")\n tefam=[]\n tesuperfamily=[]\n \n \n for i in xrange(len(tes)):\n \n tefam.append(refdict[tes[i]][0])\n \n tesuperfamily.append(refdict[tes[i]][1])\n \n \n superfamily=list(set(tesuperfamily))\n if 'Unknown' in superfamily:\n superfamily.remove('Unknown')\n if not superfamily:\n superfamily.append('Unknown')\n \n pos = line[5].split(\",\")\n neg = line[6].split(\",\")\n#missing = 305-(len(pos)+len(neg))/305\n te_id = \"\\t\".join([chrom, start, stop])\n status = get_status(pos, neg, names)\n column_ordered = []\n for i in names:\n column_ordered.append(status[i])\n noNA = filter(lambda x: x != \"NA\", status.values()) \n noNA = map(int, noNA)\n pos_count = sum(noNA)\n l = len(noNA)\n neg_count = l - pos_count\n TE_present=pos_count\n TE_absent=neg_count\n if(pos_count < neg_count):\n Minor_allele=\"presence\"\n\n else:\n Minor_allele=\"absence\"\n#print Minor_allele\n q20=int(0.2*nosamples)\n q80=int(0.8*nosamples)\n if (TE_absent < q20):\n Absence_classification=\"True deletion\"\n elif (TE_absent > q80):\n Absence_classification=\"No insertion\"\n else:\n Absence_classification=\"NA\"\n original_call_deletion = 'T'\n MAF=float(min(TE_present, TE_absent))/nosamples\n #print int(min(TE_present, TE_absent)) ,MAF\n if(MAF < 0.025):\n Frequency_classification = \"Rare\"\n else:Frequency_classification =\"Common\"\n print(te_id + \"\\t\" + TE + \"\\t\" + \",\".join(tefam) + \"\\t\" +\",\".join(superfamily) + \"\\t\" +n_te + \"\\t\" + str(pos_count) + \"\\t\" + str(neg_count) + \"\\t\" +str(Minor_allele) + \"\\t\" +original_call_deletion + \"\\t\" +str(Absence_classification) + \"\\t\" +str(MAF) + \"\\t\" +str(Frequency_classification) + \"\\t\"+\"\\t\".join(column_ordered))\n inf.close()",
"def read_file_agsm(self,filename):\n\n narr,larr,farr,iarr,nn,exceed_freqlim = \\\n aims_fortran.read_file_agsm(filename,config.npositive,config.agsm_cutoff, \\\n config.cutoff*self.cutoff)\n self.modes = np.array(zip(narr[0:nn],larr[0:nn],farr[0:nn],iarr[0:nn]),dtype=modetype)\n\n return exceed_freqlim",
"def sgd_features(filepath=None):\n\n if filepath == None:\n filepath=load_sgd_tab()\n\n arabic_to_roman_dict=chromosomename_roman_to_arabic()[0]\n \n with open(filepath) as f:\n lines = f.readlines()\n\n\n feature_list = []\n feature_orf_dict = {}\n feature_ars_dict = {}\n feature_telomere_dict = {}\n feature_ltr_dict = {}\n feature_centromere_dict = {}\n feature_Xelement_dict = {}\n feature_intron_dict = {}\n feature_ncrna_dict = {}\n feature_ncexon_dict = {}\n feature_trna_dict = {}\n feature_snorna_dict = {}\n feature_teg_dict = {}\n feature_5p_utrintron_dict = {}\n feature_mas_dict = {}\n feature_snrna_dict = {}\n feature_rrna_dict = {}\n feature_ets_dict = {}\n feature_its_dict = {}\n feature_oor_dict = {}\n feature_telrna_dict = {}\n \n for line in lines:\n l = line.strip('\\n').split('\\t')\n if not l[1] in feature_list:\n feature_list.append(l[1])\n\n if not l[8].endswith('micron') and not l[8] == '':\n chromosome = arabic_to_roman_dict.get(int(l[8]))\n if l[1] == 'ORF':\n feature_orf_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ARS':\n feature_ars_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomere':\n feature_telomere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'long_terminal_repeat':\n feature_ltr_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'centromere':\n feature_centromere_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'X_element':\n feature_Xelement_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'intron':\n feature_intron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'ncRNA_gene':\n feature_ncrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'noncoding_exon':\n feature_ncexon_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'tRNA_gene':\n feature_trna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snoRNA_gene':\n feature_snorna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'transposable_element_gene':\n feature_teg_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'five_prime_UTR_intron':\n feature_5p_utrintron_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'matrix_attachment_site':\n feature_mas_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'snRNA_gene':\n feature_snrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'rRNA_gene':\n feature_rrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'external_transcribed_spacer_region':\n feature_ets_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'internal_transcribed_spacer_region':\n feature_its_dict[l[6]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'origin_of_replication':\n feature_oor_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n elif l[1] == 'telomerase_RNA_gene':\n feature_telrna_dict[l[3]] = [l[1], l[2], l[4], l[5], l[6], chromosome, l[9],l[10]]\n\n\n \n\n\n genomicregions_list = ['ORF', 'ARS', 'Telomere', 'long_terminal_repeat',\n 'Centromere', 'X_element', 'Intron', 'ncRNA_gene',\n 'Noncoding_exon', 'tRNA_gene', 'snoRNA_gene',\n 'transposable_element_gene', 'five_prime_UTR_intron',\n 'matrix_attachment_site', 'snRNA_gene', 'rRNA_gene',\n 'external_transcribed_spacer_region',\n 'internal_transcribed_spacer_region',\n 'origin_of_replication', 'telomerase_RNA_gene']\n\n\n return(genomicregions_list, feature_orf_dict, feature_ars_dict, feature_telomere_dict,\n feature_ltr_dict, feature_centromere_dict, feature_Xelement_dict, feature_intron_dict,\n feature_ncrna_dict, feature_ncexon_dict, feature_trna_dict,\n feature_snorna_dict, feature_teg_dict, feature_5p_utrintron_dict,\n feature_mas_dict, feature_snrna_dict, feature_rrna_dict,\n feature_ets_dict, feature_its_dict, feature_oor_dict,\n feature_telrna_dict)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Factory method to create a cache object from github/spilchen/baseball_id_db This is called as part of package initialization and so can be refered to via the Lookup variable. >>> from baseball_id import Lookup >>> Lookup.from_yahoo_ids([10794, 9542, 7578]) | def create(cls):
ssl._create_default_https_context = ssl._create_unverified_context
c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv')
return c | [
"def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c",
"def load_by_ids(cls,ids):\n if not ids or ids[0] == '':\n return None\n es = from_caches(ids) #(ids,'SuiBook') as prefixed\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiBook.get_by_id(map(lambda e:int(e),notfounds)) if e)\n to_caches(es2) #to_caches(dict(),time,key_prefix='SuiBook')\n es.update(es2)\n return es",
"def __init__(self, simplecache=None, kodidb=None):\n\n if not kodidb:\n from kodidb import KodiDb\n self.kodidb = KodiDb()\n else:\n self.kodidb = kodidb\n\n if not simplecache:\n from simplecache import SimpleCache\n self.cache = SimpleCache()\n else:\n self.cache = simplecache",
"def init():\n database = \"database.pkl\"\n\n onsite_bills = BillID(database)\n online_bills = BillID(database)\n\n return onsite_bills, online_bills",
"def load_by_ids(cls,ids):\n es = from_caches(ids) #some are loaded from memcache, others are ignored.\n notfounds = filter(lambda e:e not in es, ids)\n if len(notfounds)>0:\n es2 = dict((str(e.key().id()),e) for e in SuiGoods.get_by_id(map(lambda e:int(e),notfounds)))\n to_caches(es2)\n es.update(es2)\n return es",
"def get_api_cache(apiId=None):\n pass",
"def __init__(self, *args, **kw):\n # kw['strIdent'] = DBCAT\n BaseDB.__init__(self, *args, **kw)\n # cache by project name as key and project Id as value\n self._gbl_projectid_cache = {}",
"def __init_bize(self):\n db_engine = SQLLiteStorage(Config.BIZE_DB_FILE, 0)\n result_list = db_engine.get_data('select xid,name,jianpin,identity,identity_name from bize')\n for bize_rl in result_list:\n bize = Bize()\n bize.set_xid(bize_rl[0])\n bize.set_name(bize_rl[1])\n bize.set_jianpin(bize_rl[2])\n if bize.xid() / 10 == 1999999:\n bize.set_identity(100)\n bize.set_identity_name(u'机构专用')\n else:\n bize.set_identity(bize_rl[3])\n bize.set_identity_name(bize_rl[4])\n self.bize_dict[bize.xid()] = bize",
"def _cache(item_label, item_list):\n id_label = item_label + '_id'\n mbid_label = item_label + '_mbid'\n echonest_id_label = item_label + '_echonest_id'\n items = {}\n for item in item_list:\n key = '/%s/%s' % (item_label, item[id_label])\n items[key] = item\n musicbrainz_id = item.get(mbid_label, None)\n if musicbrainz_id:\n items['/musicbrainz/%s/%s' % (item_label, musicbrainz_id)] = key\n # echonest_id = item.get(echonest_id_label, None)\n # if echonest_id:\n # items['/echonest/%s/%s' % (item_label, echonest_id)] = key\n application.config.get('CACHE').set_many(items)",
"def fill_cache_from_class(cls, stderr=sys.stderr):\n try:\n et = EEStoreType.objects.create(name=cls.etype)\n except IntegrityError as e:\n stderr.write(f'\"Type {cls.etype} alreday exists: {e}')\n et = EEStoreType.objects.get(name=cls.etype)\n\n try:\n esource = EEStoreSource.objects.create(eestore_type=et, name=cls.source)\n except IntegrityError as e:\n stderr.write(f'\"Source {cls.source}\" alreday exists: {e}')\n esource = EEStoreSource.objects.get(eestore_type=et, name=cls.source)\n timestamp = tznow()\n for i, item in enumerate(sorted(cls.items), start=1):\n eestore_pid = f'{cls.etype}:{cls.source}:{item}'\n try:\n ec = EEStoreCache.objects.create(\n eestore_type=et,\n source=esource,\n eestore_id=i,\n eestore_pid=eestore_pid,\n name=item,\n pid=item,\n remote_id=item,\n last_fetched=timestamp,\n )\n except IntegrityError as e:\n stderr.write(f'\"{item}\" alreday exists: {e}')\n\n return esource",
"def CacheFactory (cls, name, entrycls, tablename, **kwargs):\n newclass = type (name, (cls, Base), {\"__tablename__\": tablename, \\\n \"file\": sqlalchemy.Column (sqlalchemy.String, sqlalchemy.ForeignKey (entrycls.file)), \\\n \"entry\": sqlalchemy.orm.relationship (entrycls, **kwargs), \\\n \"__table_args__\": (sqlalchemy.UniqueConstraint('name', 'file', name='uix_1' + name),)})\n setattr (entrycls, \"Cache\", newclass)\n return newclass",
"def __init__(self, id):\n super(BAPDatabase, self).__init__(id)",
"def create_db_from_cache():\n with open('matches.cache', 'rb') as f:\n matches = pickle.load(f)\n\n Base.metadata.create_all(engine)\n match_loader(matches)",
"def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:\n cache_data = []\n logger = logs.get_logger(bdp_bds_cache, **kwargs)\n kwargs['has_date'] = kwargs.pop('has_date', func == 'bds')\n kwargs['cache'] = kwargs.get('cache', True)\n\n tickers = utils.flatten(tickers)\n flds = utils.flatten(flds)\n loaded = pd.DataFrame(data=0, index=tickers, columns=flds)\n\n for ticker, fld in product(tickers, flds):\n data_file = storage.ref_file(\n ticker=ticker, fld=fld, ext='pkl', **{\n k: v for k, v in kwargs.items() if k not in EXC_COLS\n }\n )\n if not files.exists(data_file): continue\n logger.debug(f'reading from {data_file} ...')\n cache_data.append(pd.read_pickle(data_file))\n loaded.loc[ticker, fld] = 1\n\n to_qry = loaded.where(loaded == 0)\\\n .dropna(how='all', axis=1).dropna(how='all', axis=0)\n\n return ToQuery(\n tickers=to_qry.index.tolist(), flds=to_qry.columns.tolist(),\n cached_data=cache_data\n )",
"def _from_db_object_list(db_objects, cls, context):\n return [Boar._from_db_object(cls(context), obj)\n for obj in db_objects]",
"def lookup(cls, _db, short_name):\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library",
"def get_from_cache_fallback(self, xid, cls):\n if xid in self._cache:\n return self._cache[xid]\n else:\n self._cache[xid] = ret = cls(self, xid)\n return ret",
"def __init__(self,mirror=\"warehouse.primekinetics.org\",cache=\"cache\"):\n\t\tself.mirrorLocation=mirror\n\t\tself.cacheLocation=cache\n\t\tself.cas2primeids=dict()\n\t\tself.primeid2cas=dict()\n\t\n\t\tself.cacheItems=['cas2primeids','primeid2cas'] # these are the items we wish to save/load in the cache\n\t\ttry: \n\t\t\tself.loadCache() # try to load the cache\n\t\texcept:\n\t\t\tprint \"Couldn't load cache.\"\n\t\t\tself.readCAS()\n\t\t\tself.saveCache()",
"def make_library_cache(prefix):\n # avoid cache prefix reuse\n assert prefix not in _lib_cache_prefixes\n _lib_cache_prefixes.add(prefix)\n\n class CustomCodeLibraryCacheImpl(CodeLibraryCacheImpl):\n _filename_prefix = prefix\n\n class LibraryCache(Cache):\n \"\"\"\n Implements Cache that saves and loads CodeLibrary objects for additional\n feature for the specified python function.\n \"\"\"\n _impl_class = CustomCodeLibraryCacheImpl\n\n return LibraryCache"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Factory method to create a fake data source This refers to a static data file that is in the current package. This function exists for testing purposes as it avoids network traffic to get the actual uptodate ID mapping. | def create_fake(cls):
source = pkg_resources.open_text('baseball_id', 'sample.master.csv',
encoding='iso-8859-1')
c = lookup.Cache(source)
return c | [
"def make_test_data(self):\n import data",
"def _create_data_source(metadata):\n factory = metadata.get_callable()\n src = factory()\n engine = tools.get_engine()\n engine.add_source(src)\n return src",
"def test_data_source_soaps_id_dynamic_datas_get(self):\n pass",
"def test_factory_methods(self):\n\n DatumTest.create_data()",
"def _data_source(source):\n try:\n return DataSource(source)\n except ValueError:\n log.warning('Could not interpret data source `%s`. '\n 'Setting to `Unknown`.', source)\n return DataSource(99)",
"def fixture_example_data():\n import_example_data()",
"def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr",
"def load_data(identifier):\n if identifier == data.FAKE_DATA_ID:\n return data.fake_data()\n else:\n urls = settings.DATA_SOURCES\n if settings.UPLOAD_DIR:\n urls += [urllib.parse.urlparse(\"file://%s\" % settings.UPLOAD_DIR)]\n for url in urls:\n if fs.path.basename(url.path)[: -len(\".h5ad\")] == identifier:\n return data.load_data(url, identifier)\n else:\n if identifier and does_exist(url, identifier + \".h5ad\"):\n return _load_data_cached(\n url._replace(path=fs.path.join(url.path, identifier + \".h5ad\")), identifier\n )",
"def _factory(*args_, **kwargs_):\n return DataSetType(*args_, **kwargs_)",
"def load_data_source(data_source):\n source_module = __import__('source_'+data_source)\n get_source = getattr(source_module, 'get_source')\n return get_source()",
"def dataset_initialize(self, folder):\r\n if not os.path.isdir(folder):\r\n raise ValueError('Invalid folder: ' + folder)\r\n\r\n ref = self.config_values[self.CONFIG_NAME_USER] + '/INSERT_SLUG_HERE'\r\n licenses = []\r\n default_license = {'name': 'CC0-1.0'}\r\n licenses.append(default_license)\r\n\r\n meta_data = {\r\n 'title': 'INSERT_TITLE_HERE',\r\n 'id': ref,\r\n 'licenses': licenses\r\n }\r\n meta_file = os.path.join(folder, self.DATASET_METADATA_FILE)\r\n with open(meta_file, 'w') as f:\r\n json.dump(meta_data, f, indent=2)\r\n\r\n print('Data package template written to: ' + meta_file)\r\n return meta_file",
"def make_data_load(\r\n self, data_source: DataSourceBase, params: Dict, loader_type: str\r\n ) -> object:\r\n raise NotImplementedError",
"def fake_data(self):\n return None",
"def makeIdFactory(self, dataRef):\n # With the default configuration, this IdFactory doesn't do anything, because\n # the IDs it generates are immediately overwritten by the ID from the reference\n # catalog (since that's in config.measurement.copyColumns). But we create one here anyway, to\n # allow us to revert back to the old behavior of generating new forced source IDs,\n # just by renaming the ID in config.copyColumns to \"object_id\".\n expBits = dataRef.get(self.config.coaddName + \"CoaddId_bits\")\n expId = int(dataRef.get(self.config.coaddName + \"CoaddId\"))\n return lsst.afw.table.IdFactory.makeSource(expId, 64 - expBits)",
"def get_data_source(DataSourceId=None, Verbose=None):\n pass",
"def registerSampleData():\n # It is always recommended to provide sample data for users to make it easy to try the module,\n # but if no sample data is available then this method (and associated startupCompeted signal connection) can be removed.\n\n import SampleData\n iconsPath = os.path.join(os.path.dirname(__file__), 'Resources/Icons')\n\n # To ensure that the source code repository remains small (can be downloaded and installed quickly)\n # it is recommended to store data sets that are larger than a few MB in a Github release.\n\n # RegularizedFastMarching1\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching1',\n # Thumbnail should have size of approximately 260x280 pixels and stored in Resources/Icons folder.\n # It can be created by Screen Capture module, \"Capture all views\" option enabled, \"Number of images\" set to \"Single\".\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching1.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95\",\n fileNames='RegularizedFastMarching1.nrrd',\n # Checksum to ensure file integrity. Can be computed by this command:\n # import hashlib; print(hashlib.sha256(open(filename, \"rb\").read()).hexdigest())\n checksums = 'SHA256:998cb522173839c78657f4bc0ea907cea09fd04e44601f17c82ea27927937b95',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching1'\n )\n\n # RegularizedFastMarching2\n SampleData.SampleDataLogic.registerCustomSampleDataSource(\n # Category and sample name displayed in Sample Data module\n category='RegularizedFastMarching',\n sampleName='RegularizedFastMarching2',\n thumbnailFileName=os.path.join(iconsPath, 'RegularizedFastMarching2.png'),\n # Download URL and target file name\n uris=\"https://github.com/Slicer/SlicerTestingData/releases/download/SHA256/1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97\",\n fileNames='RegularizedFastMarching2.nrrd',\n checksums = 'SHA256:1a64f3f422eb3d1c9b093d1a18da354b13bcf307907c66317e2463ee530b7a97',\n # This node name will be used when the data set is loaded\n nodeNames='RegularizedFastMarching2'\n )",
"def test_teams_id_data_source_soaps_nk_dynamic_datas_get(self):\n pass",
"def make_default_data():\n data = Data()\n data.description = \"(unnamed repo)\"\n return data",
"def dummy_data_dir() -> Path:\n dir_path = Path(__file__).parent\n return dir_path / \"dummy_data\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
The extracter moves files. Arguments input_folder and output_folder are set through GUI. Based on the values in the column called column_name in the spreadsheet, files are copied from input_folder to output_folder. Here, these are the gilbert_numbers in the spreadsheet fed from main(). The are matched to the file names. Each gilber_number gets its own directory in the output_folder. output_folder should be empty, at least not contain the same gilbert_numbers already. Also copies all speaker files from input_folder to output_folder. | def extracter(spreadsheet, column_name):
print header, "Running the extracter."
root=Tkinter.Tk()
root.withdraw()
root.update()
input_folder=tkFileDialog.askdirectory(title="Inputfolder: Please choose a directory that contains your corpus files")
root=Tkinter.Tk()
root.withdraw()
root.update()
output_folder=tkFileDialog.askdirectory(title="Outputfolder: Please choose a directory to copy files into")
print header, "Copying files from '{}' to '{}'.".format(input_folder, output_folder)
#collecting input files
inputfiles=[]
print "Locating files."
for dirpath, subdirs, files in os.walk(input_folder):
for f in files:
inputfiles.append(os.path.join(dirpath, f))
if len(inputfiles) in [1000,2000,4000,8000,1600,24000]:
print "{} files processed, still working.".format(len(inputfiles))
print "Found {} files.".format(len(inputfiles))
#read from spreadsheet
# with open(spreadsheet, "r") as spreadsheet:
# spreadsheet=pandas.read_csv(spreadsheet, encoding="utf-8")
numbers_to_be_extracted= spreadsheet[column_name].unique()
print header, "Gilbert numbers to be extracted:"
print ",".join([unicode(i) for i in numbers_to_be_extracted])
#copying speaker files
print header, "Copying speaker files."
speakerfiles=[f for f in inputfiles if re.match(".*\.txt", os.path.split(f)[1])]
os.mkdir(os.path.join(output_folder, "speakers"))
for s in speakerfiles:
shutil.copy2(s, os.path.join(output_folder, "speakers"))
#finding relevant input files
result=[]
for number in numbers_to_be_extracted:
print "Processing {}, creating folder '{}'.".format(number, number)
os.mkdir(os.path.join(output_folder, unicode(number)))
regex="(\d+)-(\d+)-(\d+)-"+number.astype('U')+"-(\D+)\.wav"
findings= [f for f in inputfiles if re.match(regex, os.path.split(f)[1])]
result= result+findings
for find in findings:
shutil.copy2(find, os.path.join(output_folder, unicode(number), os.path.split(find)[1]))
print header, "{} files have been copied to {}.".format(len(result), output_folder) | [
"def move_input_files():\n\n back_calc_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\Forage_model\\model_results\\regional_properties\\back_calc_2014_total\"\n orig_input_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\Worldclim_precip\"\n new_input_dir = r\"C:\\Users\\Ginger\\Dropbox\\NatCap_backup\\Forage_model\\CENTURY4.6\\Kenya\\input\\regional_properties\\forward_from_2014\"\n\n folders = [f for f in os.listdir(back_calc_dir) if\n os.path.isdir(os.path.join(back_calc_dir, f))]\n for folder in folders:\n site_dir = os.path.join(back_calc_dir, folder)\n FID = folder[4:]\n sch_files = [f for f in os.listdir(site_dir) if f.endswith('.sch')]\n sch_iter_list = [int(re.search('{}_{}(.+?).sch'.format(FID,\n FID), f).group(1)) for f in sch_files]\n if len(sch_iter_list) == 0: # no schedule modification was needed\n final_sch = os.path.join(orig_input_dir, '{}.sch'.format(FID))\n else:\n final_sch_iter = max(sch_iter_list)\n final_sch = os.path.join(site_dir, '{}_{}{}.sch'.format(FID,\n FID, final_sch_iter))\n grz_files = [f for f in os.listdir(site_dir) if f.startswith('graz')]\n if len(grz_files) > 0:\n grz_iter_list = [int(re.search('graz_{}(.+?).100'.format(\n FID), f).group(1)) for f in grz_files]\n final_iter = max(grz_iter_list)\n final_grz = os.path.join(site_dir, 'graz_{}{}.100'.format(\n FID, final_iter))\n shutil.copyfile(final_grz, os.path.join(new_input_dir,\n 'graz_{}.100'.format(\n FID)))\n shutil.copyfile(final_sch, os.path.join(new_input_dir, '{}.sch'.format(FID)))",
"def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n mkdir(sample_dir)\n # resize and move the mask images - e.g. 'target_folder/sample_name/imgs_necrosis.png'\n img_file_nec = join(input_folder, 'Necrosis',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_nec, self.rescale_ratio)\n img_nec = img_res.copy()\n cv2.imwrite(join(sample_dir, 'necrosis.png'), img_res)\n\n img_file_perf = join(input_folder, 'Perfusion',\n 'Tissue Slides.'+sample_name+'.png')\n img_res = self.process_img(img_file_perf, self.rescale_ratio)\n cv2.imwrite(join(sample_dir, 'perfusion.png'), img_res)\n\n # resize and move the maker HE and EF5 images\n files = listdir(input_folder)\n img_files = [x for x in files if x.split(\n '.')[-1] in ('tif', 'jpg', 'png')]\n for img_file in img_files:\n if (sample_name+'_' in img_file) or (sample_name+'-' in img_file):\n if ('HE-G' in img_file) or ('HE-green' in img_file) or ('HEgreen' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-green.png')):\n cv2.imwrite(join(sample_dir, 'HE-green.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-R' in img_file) or ('HE-red' in img_file) or ('HEred' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-red.png')):\n cv2.imwrite(join(sample_dir, 'HE-red.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif ('HE-B' in img_file) or ('HE-blue' in img_file) or ('HE-blue' in img_file):\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n if not os.path.exists(join(sample_dir, 'HE-blue.png')):\n cv2.imwrite(join(sample_dir, 'HE-blue.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n elif 'EF5' in img_file:\n img_res = self.process_img(\n join(input_folder, img_file), self.rescale_ratio)\n img_ef5 = img_res.copy()\n if not os.path.exists(join(sample_dir, 'EF5.png')):\n cv2.imwrite(join(sample_dir, 'EF5.png'), img_res)\n else:\n warnings.warn(\n f\"file already exists, while processing {img_file}\")\n\n masked_ef5 = (img_ef5 * (img_nec <= 0)).astype(img_ef5.dtype)\n cv2.imwrite(join(sample_dir, 'EF5_masked.png'), masked_ef5)\n assert len(listdir(sample_dir)) == 7\n return",
"def classify(self, input_folder):\n\n print('Input directory is ', input_folder)\n\n self.create_folders(input_folder)\n\n for file in os.listdir(input_folder):\n\n # Do not touch unseen files (e.g. .DS_STORE)\n if file.startswith('.'):\n continue\n\n original_file_path = os.path.join(input_folder, file)\n\n # Do not touch existing directories\n if os.path.isdir(original_file_path) or file in self.folders:\n continue\n\n extension = file.split('.')[-1]\n\n moved_ind = 0\n for folder, ext_list in self.folder_to_files.items():\n folder = os.path.join(input_folder, folder)\n if extension in ext_list:\n shutil.move(original_file_path, folder)\n print(f'{file} MOVED TO {folder}.')\n moved_ind = 1\n break\n\n if not moved_ind:\n folder = os.path.join(input_folder, self.other_folder)\n shutil.move(original_file_path, folder)\n print(f'{file} MOVED TO {folder}')",
"def project_extract(self):\n # Run the convert_csv method for all the raw files\n # Run the reshape_csv method for all the csv files\n # Clean the folders if specified\n self.__open_folder_files(self.convert_csv,\\\n self._raw_location)\n self.__open_folder_files(self.reshape_csv,\\\n self._csv_location)\n if self._remove_csv == True:\n self.__open_folder_files(self.remove_csv,\\\n self._csv_location)\n if self._remove_reshaped == True:\n self.__open_folder_files(self.remove_reshaped,\\\n self._reshaped_location)",
"def move_generators_to_input(self, generator_folder_glob):\n spawn_folder_names = []\n generator_folders = glob(generator_folder_glob)\n for i, folder in enumerate(generator_folders):\n base_name = 'e01s{:02d}_{}f0000'.format(i + 1, os.path.basename(folder))\n input_destination = os.path.join(self.input_folder, base_name)\n data_destination = os.path.join(self.data_folder, base_name)\n create_folder(input_destination)\n create_folder(data_destination)\n spawn_folder_names.append(input_destination)\n create_symlinks(\n files=os.path.join(folder, '*'),\n dst_folder=os.path.relpath(input_destination)\n )\n return spawn_folder_names",
"def binder(folder_name: str, output_name: str = \"output.exe\", verbose=True):\n\n # we get all the files from the given folder\n files: List[str] = os.listdir(folder_name)\n\n if files == []:\n print(\" No file in \", folder_name, \" folder\")\n return\n\n # we sort then by comparing the concatenated number\n files = sorted(files, key=lambda x: int(x.split(\"_\")[0]))\n\n if verbose:\n print(\"encoutered {} files:\".format(len(files)))\n for file in files:\n print(file)\n\n # we open an output stream\n with open(output_name, \"wb+\") as output_stream:\n # And for every gathered files\n for file in files:\n with open(os.path.join(folder_name, file), \"rb\") as input:\n # we add it at the end of the document\n output_stream.write(input.read())\n\n print(\"Done!\")",
"def __concatonate_files(self, new_file_name, parent_folder):\n\n # make the output directory\n output_file = self.save_directory + \"/\" + new_file_name\n\n # check if save_directory exists\n if not os.path.exists(self.save_directory):\n try:\n # make the directory\n os.makedirs(self.save_directory)\n except PermissionError:\n # if the user is unable to write to this directory, we should not continue\n print(\"You do not have the correct permissions for creating a directory here. Please try again.\")\n exit(-1)\n\n barcode_files = []\n for root, directory, files in os.walk(parent_folder):\n # we need to know where each file is in the barcode folder so we can read data from it\n for name in files:\n barcode_files.append( os.path.join(root, name) )\n\n with open(output_file, 'w') as writer:\n for name in barcode_files:\n with open(name, 'r') as reader:\n for line in reader:\n writer.write(line)",
"def process_input(args):\n for element in args.input:\n if os.path.isdir(element): \n listing = os.listdir(element)\n for file in listing:\n process_file(os.path.join(element, file), args.output, args.verbose, args.clearoutput, args.enable_procyon, args.disable_description, args.disable_dump, args.no_kit_exception, args.disable_sql)\n if args.movein:\n if args.verbose:\n print \"Moving %s to %s\" % (os.path.join('.',element), os.path.join(args.movein, element))\n # TODO: issue if inner dirs. Are we handling this?\n try: \n os.rename(os.path.join(element, file), os.path.join(args.movein, file))\n except OSError, e:\n if args.verbose:\n print \"%s no longer present?: %s\\n\" % (file, str(e))\n\n if os.path.isfile(element):\n process_file(os.path.join('.',element), args.output, args.verbose, args.clearoutput, args.enable_procyon, args.disable_description, args.disable_dump, args.no_kit_exception)\n # dirname = os.path.join(args.output, '{filename}-*'.format(filename=element))\n if args.movein:\n if args.verbose:\n print \"Moving %s to %s\" % (os.path.join('.',element), os.path.join(args.movein, os.path.basename(element)))\n os.rename(os.path.join('.',element), os.path.join(args.movein, os.path.basename(element)))",
"def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)",
"def read_input_txt_file(self, inputfile, outputfolder):\n\n # set output folder from sys argv and append \\\\\n self.outputfolder = outputfolder + \"\\\\\"\n\n with open(inputfile, 'r') as file2open:\n # for each line split into columns\n for line in file2open:\n #split line on tab\n splitline=line.split('\\t')\n \n \n # check if any empty lines, or fields are present in input file. do not check prefix (last element in list)\n if '' in splitline[0:7]:\n raise ValueError(\"\\nError in the input file! \\nHave you used Excel?!?!?! \\n\\\n Please open in notepad and ensure there are no blank lines and all fields are present\")\n \n # assign each value to a variable\n # barcode, subarray (numeric), dye and scan number for file 1\n file1_barcode=splitline[0]\n file1_subarray=int(splitline[1])\n file1_dye=splitline[2]\n file1_scan_number=splitline[3]\n \n # barcode, subarray (numeric), dye and scan number for file 2\n file2_barcode=splitline[4]\n file2_subarray=int(splitline[5])\n file2_dye=splitline[6]\n file2_scan_number=splitline[7].rstrip()\n \n \n # a prefix can be added to as the last column, which is added to the start of the output filename (len(splitline) == 9)\n if len(splitline)==9: \n # capture prefix and remove newline\n out_file_prefix=splitline[8].rstrip()\n #check the prefix is not empty\n assert len(out_file_prefix)!= 0,\"Prefix column is empty, were you trying to add a prefix??!\"\n \n #and append an underscore to help later.\n out_file_prefix=out_file_prefix+\"_\"\n # if no prefix specified\n else:\n out_file_prefix=None\n \n # check the given subarray values are valid. if they are not the text value will not be returned from the dictionary\n assert file1_subarray in self.subarray_dict, \"the given subarray for the Cy3 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n assert file2_subarray in self.subarray_dict, \"the given subarray for the Cy5 sample is invalid (\"+str(file2_subarray)+\")(must be a number 1-8)\"\n \n # convert the given subarray (an integer 1-8 - the keys in self.subarray_dict) into the string used in the file name (the values in self.subarray_dict)\n file1_subarray=self.subarray_dict[file1_subarray]\n file2_subarray=self.subarray_dict[file2_subarray]\n \n\n # concatenate barcode, scan number and subarray text string to create a filename pattern to search for\n filename1 = str(file1_barcode) + \"_S0\"+file1_scan_number+\"*\" + file1_subarray\n filename2 = str(file2_barcode) + \"_S0\"+file2_scan_number+\"*\" +file2_subarray\n\n # append to a list\n self.files_to_find.append((filename1, file1_dye, filename2, file2_dye,out_file_prefix))",
"def processInputFolder(self):\n for file in os.listdir(self.config[\"inputPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateImages(file)",
"def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)",
"def main():\n # checking the directory\n cwd = os.getcwd()\n print(f'The working directory: {cwd}')\n # counting time \n start_time = time.process_time()\n # passing args\n arg = parse_arguments()\n sub_dir = arg.sub_dir\n dir_out = arg.dir_out\n file_amb = 'csv_to_clean'\n names_ambigous = defaultdict(str)\n with open(file_amb, 'r') as fh:\n for line in fh:\n name = line.strip().split('/')[2]\n names_ambigous[name] = names_ambigous.get(name, '')\n names_ambigous[name] += line.strip()\n print(f'number files: {len(names_ambigous)}')\n # checking if the output directory exist\n # if not make it\n f_pwd = os.path.join('Results', 'kmer_counts')\n # get the genus names\n cnt = 0\n for name, filename in names_ambigous.items():\n cleaned = get_csv_clean(filename)\n full_path = os.path.join(f_pwd, name)\n if os.path.exists(full_path):\n print(f'The path {full_path} exist')\n pass\n else:\n os.makedirs(full_path)\n csv_name = f'{full_path}/{name}_k2_8_chr.csv'\n print(f'Checking the full path {csv_name}')\n with open(csv_name, 'w') as fout:\n for km, cn in cleaned.items():\n fout.write(f'{km},{cn}\\n')\n cnt += 1\n # get final time of the script\n end = time.process_time()\n total_time = end - start_time\n print(f'The script takes {total_time} to finish!')\n print(f'Where read and manipulated {cnt} files')\n print('Done!')",
"def moveprocessedfb2(self, input_folder_path, processed_folder_path, conn, logg):\n logg.writing_log(conn, 'Starting moving processed fb2 files')\n if os.listdir(input_folder_path):\n for file_name in os.listdir(input_folder_path):\n os.rename(os.path.join(input_folder_path, file_name), os.path.join(processed_folder_path, file_name))\n logg.writing_log(conn, 'All processed files are moved to processed folder')\n else:\n logg.writing_log(conn, 'The folder is empty, nothing to move')\n conn.commit()\n conn.close()",
"def test_input_folders_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n folder = data_dir + \"build-custom/files/more/\"\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folders_files\"\n params[\"input\"] = files + [folder]\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files.extend(list_files_folder(folder, ext=params[\"input_extension\"]))\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")",
"def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))",
"def transform_to_songs(data_folder, dataset_name, _):\n from_path = f\"{data_folder}/1-extract/{dataset_name}.csv\"\n to_path = f\"{data_folder}/2-transform/songs.csv\"\n\n # TODO Fill in this function\n pass",
"def enter_folder(infile, indir, outdir):\n\tdf_new = pd.DataFrame(columns=['SeqID', 'Genome_Fraction', 'N50', 'Total_Alignment_Length', 'Duplication_Ratio',\n\t\t\t\t 'Number_of_Contigs', 'Total_Assembly_Length', 'Largest_Contig', 'Missasemblies']) # make a empty dataframe\n\tif infile is not None: \n\t\tdf = pd.read_csv(infile, sep='\\t', index_col=0, header=None)\n\t\tfor i in range(df.shape[0]):\n\t\t\tsample = str(df.iloc[i].name) # folder name that will be looped through code.\n\t\t\tos.chdir(indir + sample)\n\t\t\tdf_new = get_assembly_stats(df, indir, outdir, df_new, sample)\n\telse:\n\t\tdirlist = os.listdir(indir) # get files and directories in the input directory\n\t\tfor folder in dirlist:\n\t\t\tos.chdir(indir + folder)\n\t\t\t#sample=os.path.split(os.path.split(os.path.realpath(__file__))[0])[1]\n\t\t\tsample=os.path.split(os.path.abspath(os.getcwd()))[1]\n\t\t\tdf_new = get_assembly_stats(None, indir, outdir, df_new, sample)\n\tos.chdir(outdir)\n\tdf_new.to_csv('Assembly_stats.csv', sep='\\t', index=True)\n\treturn df_new",
"def organize(src_dir=None, dst_dir=None):\n root = tkinter.Tk()\n root.withdraw()\n\n # Prompt user to select source and destination directories\n initial = os.path.expanduser('~')\n if not src_dir:\n title = ('Please select the directory containing the element maps:')\n src_dir = tkinter.filedialog.askdirectory(parent=root, title=title,\n initialdir=os.getcwd())\n print('Source directory is {}'.format(src_dir))\n if not dst_dir:\n title = ('Please select the destination'\n ' for the organized element maps:')\n dst_dir = tkinter.filedialog.askdirectory(parent=root, title=title,\n initialdir=os.getcwd())\n print('Destination directory is {}'.format(dst_dir))\n total = 0\n moved = 0\n exist = 0\n for fp in glob.iglob(os.path.join(src_dir, '*.tif')):\n # Set directory name\n fn = os.path.basename(fp)\n dn = _get_name(fn)\n if dn:\n total += 1\n # Create directory if neccesary\n try:\n os.makedirs(os.path.join(dst_dir + _get_grid(fn), dn))\n except OSError:\n pass\n else:\n print('Creating directory {}...'.format(dn))\n # Move file into proper directory\n src = os.path.join(fp)\n dst = os.path.join(dst_dir + _get_grid(fn), dn)\n try:\n open(dst, 'r')\n except IOError:\n print('Copying {}...'.format(fn))\n try:\n shutil.copy2(src, dst)\n except IOError:\n print('Could not write to destination. Out of space?')\n raise\n else:\n exist += 1\n else:\n #print '{} already exists!'.format(os.path.basename(dst))\n moved += 1\n print(('{:,} files processed ({:,} moved,'\n ' {:,} already existed)').format(total, exist, moved))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Matches gilbert_sentences as contained in the inputfile to user input. Input can be numeric (e.g. entries in inputfile[items] that are = 2). Input can be characters (e.g. all entries in inputfile[transcription] that contain 'ɪ'). If output_csv is set, the resulting data will be written to a csv file. If move_data is set, the resulting data will be used to copy the relevant files to desired folder. | def main(inputfile, column, search_term, move_data=True, output_csv=False):
parser = argparse.ArgumentParser()
print header, "Running the sentence finder", header
with open(inputfile, "r") as inputspread:
inputdata=pandas.read_csv(inputspread, encoding="utf-8")
for c in inputdata.columns:
parser.add_argument("--"+c, type=lambda s: unicode(s, 'utf8'))
parser.add_argument("--move_data", action='store_true', help="If this flag is set, files that match criteria are copied from input_folder to output_folder. input_folder and output_folder are set through graphical interface.")
parser.add_argument("--output_csv", action='store_true', help="If set, this flag is set, a csv file containing the sentences that match the criteria is written. The output_folder is set through graphical interface.")
args= parser.parse_args(['--transcription', 'ɪ', '--items', '<3'])
print args
argsdict= vars(args)
print argsdict
for item in [a for a in argsdict if argsdict[a]]:
print type(argsdict[item])
intinput=re.compile(ur"(["+"".join(operatordict.keys())+"]+)(?:\s*?)([0-9]+)", re.UNICODE)
strinput=re.compile(ur"(["+"".join(operatordict.keys())+"]+?)(?:\s*?)(\D+)", re.UNICODE)
outputdata=inputdata
#the actual matching happens here, two procedures for strings versus numbers
for item in [a for a in argsdict if argsdict[a]]:
if re.match(strinput, argsdict[item]):
#what if they try to use > or < with a string
matcher=re.findall(strinput, argsdict[item])[0]
print "Matching string", item, " ".join(matcher)
print type(argsdict[item])
outputdata=pandas.merge(outputdata, inputdata[inputdata[item].str.contains(matcher[1])], how='inner')
elif re.match(intinput, argsdict[item]):
matcher=re.findall(intinput, argsdict[item])[0]
print "Matching number", item, " ".join(matcher)
print type(matcher[1])
outputdata=pandas.merge(outputdata, inputdata[operatordict[matcher[0]](inputdata[item], float(matcher[1]))], how='inner')
else:
print "\nError: No match for the input ", item
print header, "Resulting dataset:"
print outputdata
if output_csv:
print header, "output_csv is activated."
outputdata.to_csv(output_csv, encoding="utf-8", index=False)
print header, "Written csv spreadsheet to {}".format(output_csv)
if move_data:
print header, "move_data is activated."
extracter(outputdata, 'gilbert_number') | [
"def process_file(path_in, path_out, threshold):\n infile = open(path_in, \"r\", encoding=\"utf-8\")\n outfile = open(path_out, \"w\", encoding=\"utf-8\")\n csv_reader = csv.reader(infile)\n csv_writer = csv.writer(outfile)\n for i, line in enumerate(csv_reader):\n try:\n text_id, text, masked, label_binary, label_ternary, label_finegrained, source = line\n except ValueError:\n if line == ['Place for parser output']:\n pass\n else:\n import pdb; pdb.set_trace()\n if print_only:\n print(check_sentences(text, threshold, print_only))\n else:\n # return False\n swiss_text = check_sentences(text, threshold)\n if i % 10000 == 0:\n print(\"Processed line #{}\".format(i) + \" {}\".format(text))\n if swiss_text:\n csv_writer.writerow([text_id, text, masked, label_binary, label_ternary, label_finegrained, source])\n infile.close()\n outfile.close()",
"def extractWords(self, inputDataset):\n reviewFile = open(inputDataset, \"r\", encoding=\"utf-8-sig\")\n for record in reviewFile:\n record = record.strip().split(\"\\t\") # tab-delimited .txt file\n self.addUnigrams(int(record[0]), record[1])\n reviewFile.close()",
"def process_input_file(sess, char_dict, model_settings, model_vars, input_file):\n \n with open(input_file, 'r') as f:\n for s in f: # s is the line string\n if s and (len(s) > 0):\n chars = list(s.strip())\n cids = char_dict.chars2cids(chars)\n \n in_embedding = np.eye(model_settings['input_classes'])\n one_hot = in_embedding[cids]\n one_hot_by_t = np.expand_dims(one_hot, 1)\n\n # run session to retriev prob\n probs = process_sentence(sess, model_settings, model_vars, one_hot_by_t) \n labels = viterbi(probs)\n words = char_dict.chars2words(chars, labels)\n print('|'.join(words))",
"def train_from_data(bigrams, fname):\n context = ''\n def remove_punct(word):\n punct = \"\"\"-_,. :;\"'()[]{}$!?/\\\\\"\"\"\n for c in punct:\n word = word.replace(c,' ')\n return word\n\n with open(fname) as f:\n for line in f:\n line = remove_punct(line)\n for word in line.split():\n if not word: continue\n bigrams[context, word.lower()] += 1\n context = word.lower()",
"def trigram(self, input):\n for l in input:\n line = l.strip()\n y1, y2, y3 = line.split(' ')\n if line:\n print line, log(self.q(y3, y1, y2))",
"def _process_input_file(filename, stats):\n tf.logging.info(\"Processing input file: %s\", filename)\n processed = []\n\n dataset = pd.read_csv(filename, delimiter=\"\\t\")\n for sample_idx in range(dataset.shape[0]):\n sample = dataset.iloc[sample_idx]\n stats.update([\"sentences_seen\"])\n # The first 2 sentences per file will be skipped.\n\n input1 = sample.sentence1\n input2 = sample.sentence2\n label = sample.gold_label\n stats.update([\"sentences_considered\"])\n if isinstance(input1, str) and isinstance(input2, str) and label in ['entailment', 'neutral', 'contradiction']:\n serialized = _create_serialized_example(input1, input2, label)\n processed.append(serialized)\n stats.update([\"sentences_output\"])\n else:\n stats.update([\"invalid label\"])\n tf.logging.info(\"Completed processing file %s\", filename)\n return processed",
"def _process_input_file(filename, vocab, stats):\n tf.logging.info(\"Processing input file: %s\", filename)\n processed = []\n\n predecessor = None # Predecessor sentence (list of words).\n current = None # Current sentence (list of words).\n successor = None # Successor sentence (list of words).\n\n for successor_str in tf.gfile.FastGFile(filename):\n stats.update([\"sentences_seen\"])\n successor = successor_str.split()\n\n # The first 2 sentences per file will be skipped.\n if predecessor and current and successor:\n stats.update([\"sentences_considered\"])\n\n # Note that we are going to insert <EOS> later, so we only allow\n # sentences with strictly less than max_sentence_length to pass.\n if FLAGS.max_sentence_length and (\n len(predecessor) >= FLAGS.max_sentence_length or len(current) >=\n FLAGS.max_sentence_length or len(successor) >=\n FLAGS.max_sentence_length):\n stats.update([\"sentences_too_long\"])\n else:\n serialized = _create_serialized_example(predecessor, current, successor,\n vocab)\n processed.append(serialized)\n stats.update([\"sentences_output\"])\n\n predecessor = current\n current = successor\n\n sentences_seen = stats[\"sentences_seen\"]\n sentences_output = stats[\"sentences_output\"]\n if sentences_seen and sentences_seen % 100000 == 0:\n tf.logging.info(\"Processed %d sentences (%d output)\", sentences_seen,\n sentences_output)\n if FLAGS.max_sentences and sentences_output >= FLAGS.max_sentences:\n break\n\n tf.logging.info(\"Completed processing file %s\", filename)\n return processed",
"def translate_files(input_file, output_file, translate_dict, delete_symbols):\n\n for line in input_file:\n result = translate(line, translate_dict, delete_symbols)\n output_file.write(result)",
"def trigram_transform(data,\n stoplist,\n stop_remove = [],\n parsed_col = 'parsed',\n trigram_outfile = 'trigram_transformed.txt',\n trigram_col = 'trigram_review'):\n from gensim.models import Phrases\n \n bigram_model = Phrases.load('phrase_model_bigram_save.txt')\n trigram_model = Phrases.load('phrase_model_trigram_save.txt')\n \n with open(trigram_outfile, 'w', encoding = 'utf_8') as file:\n item_list = list(data.index)\n trigram_list = list()\n for doc in item_list:\n unigram_review = [token.lemma_ for token \n in data.loc[doc, parsed_col]\n if not thing_to_remove(token)]\n bigram_review = bigram_model[unigram_review]\n trigram_review = trigram_model[bigram_review]\n trigram_review = [term for term in trigram_review\n if (term not in stoplist # no stoplist words\n and not (len(term) < 2 # no short words\n and term not in set(stop_remove)\n # unless they're in stop_remove\n ))]\n trigram_list.append(trigram_review)\n trigram_review_str = u' '.join(trigram_review)\n file.write(trigram_review_str + '\\n')\n data.loc[:, trigram_col] = trigram_list",
"def process_file(file_path, output_dir):\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\n data = open(file_path).read().splitlines()\n\n # line별로 process를 해준 뒤,\n processed_data = [process_line(line, tokenizer) for line in data]\n\n intentions = list(map(lambda x: x[0], processed_data))\n tokens = list(map(lambda x: x[1], processed_data))\n\n # seq_in : 토큰들로만 이루어진 파일\n intention_file = os.path.join(output_dir, \"label\")\n seq_in = os.path.join(output_dir, \"seq.in\")\n\n with open(intention_file, \"w\") as f:\n f.write(\"\\n\".join(intentions) + \"\\n\")\n\n with open(seq_in, \"w\") as f:\n f.write(\"\\n\".join(tokens)+ \"\\n\")",
"def _process_text_file(self, data_folder, data_type, label):\n\n for data_file in data_folder:\n # Skips header of email\n found_first_new_line = True\n cleaned_text = []\n with open(data_file, encoding=\"utf8\", errors=\"replace\") as f:\n for line in f:\n if line == '\\n':\n # found_first_new_line = True\n continue\n elif found_first_new_line:\n # Separates text data into tokens and parses out stop words and symbols\n tokens = nlp(line)\n words = [token.lemma_.lower() for token in tokens]\n words = [w for w in words if self._predicate_filter(w)]\n for w in words:\n cleaned_text.append(w)\n \n # Append data into csv\n with open(f'preprocessing/{data_type}/{data_type}_data.csv', 'a+', newline='') as write_file:\n writer = csv.writer(write_file)\n cleaned_text = ' '.join(cleaned_text)\n writer.writerow([label, cleaned_text])",
"def flag_sentences(\n sentences, language, feminine_input=[], masculine_input=[]\n ):\n flagged_sentences = []\n\n # Read names\n f = open('filters/gender_bias/lexicals.json')\n data = json.load(f)\n\n # Define the words, that represent feminine and masculine groups in both languages\n if language == \"en\":\n\n feminine_titles = data[\"feminine_titles_en\"]\n feminine_relation = data[\"feminine_relation_en\"]\n feminine_relation_plural = data[\"feminine_relation_plural_en\"]\n feminine_jobs = data[\"feminine_jobs_en\"]\n feminine_jobs_plural = data[\"feminine_jobs_plural_en\"]\n feminine_names = data['feminine_names_en']\n\n masculine_titles = data[\"masculine_titles_en\"]\n masculine_relation = data[\"masculine_relation_en\"]\n masculine_relation_plural = data[\"masculine_relation_plural_en\"]\n masculine_jobs = data[\"masculine_jobs_en\"]\n masculine_jobs_plural = data[\"masculine_jobs_plural_en\"]\n masculine_names = data[\"masculine_names_en\"]\n\n feminine = (\n [\"she\", \"her\", \"hers\"]\n + feminine_relation\n + feminine_relation_plural\n + feminine_titles\n + feminine_jobs\n + feminine_jobs_plural\n + feminine_names\n )\n masculine = (\n [\"he\", \"him\", \"his\"]\n + masculine_relation\n + masculine_relation_plural\n + masculine_titles\n + masculine_jobs\n + masculine_jobs_plural\n + masculine_names\n )\n\n elif language == \"fr\":\n\n feminine_titles = data[\"feminine_titles_fr\"]\n feminine_relation = data[\"feminine_relation_fr\"]\n feminine_relation_plural = data[\"feminine_relation_plural_fr\"]\n feminine_jobs = data[\"feminine_jobs_fr\"]\n feminine_jobs_plural = data[\"feminine_jobs_plural_fr\"]\n feminine_names = data['feminine_names_fr']\n\n masculine_jobs = data[\"masculine_jobs_fr\"]\n masculine_jobs_plural = data[\"masculine_jobs_plural_fr\"]\n masculine_relation = data[\"masculine_relation_fr\"]\n masculine_relation_plural = data[\"masculine_relation_plural_fr\"]\n masculine_titles = data[\"masculine_titles_fr\"]\n masculine_names = data['masculine_names_fr']\n\n feminine = (\n [\"elle\", \"sienne\"]\n + feminine_relation\n + feminine_relation_plural\n + feminine_titles\n + feminine_jobs\n + feminine_jobs_plural\n + feminine_names\n )\n masculine = (\n [\"il\", \"sien\"]\n + masculine_relation\n + masculine_relation_plural\n + masculine_titles\n + masculine_jobs\n + masculine_jobs_plural\n + masculine_names\n )\n\n elif language == \"pl\":\n\n feminine_titles = data[\"feminine_titles_pl\"]\n feminine_relation = data[\"feminine_relation_pl\"]\n feminine_relation_plural = data[\"feminine_relation_plural_pl\"]\n feminine_jobs = data[\"feminine_jobs_pl\"]\n feminine_jobs_plural = data[\"feminine_jobs_plural_pl\"]\n feminine_names = data['feminine_names_pl']\n\n masculine_titles = data[\"masculine_titles_pl\"]\n masculine_relation = data[\"masculine_relation_pl\"]\n masculine_relation_plural = data[\"masculine_relation_plural_pl\"]\n masculine_jobs = data[\"masculine_jobs_pl\"]\n masculine_jobs_plural = data[\"masculine_jobs_plural_pl\"]\n masculine_names = data['masculine_names_pl']\n\n feminine = (\n [\"ona\", \"jej\"]\n + feminine_relation\n + feminine_relation_plural\n + feminine_titles\n + feminine_jobs\n + feminine_jobs_plural\n + feminine_names\n )\n masculine = (\n [\"on\", \"jego\"]\n + masculine_relation\n + masculine_relation_plural\n + masculine_titles\n + masculine_jobs\n + masculine_jobs_plural\n + masculine_names\n )\n\n elif language == \"ru\":\n\n feminine_titles = data[\"feminine_titles_ru\"]\n feminine_relation = data[\"feminine_relation_ru\"]\n feminine_relation_plural = data[\"feminine_relation_plural_ru\"]\n feminine_jobs = data[\"feminine_jobs_ru\"]\n feminine_jobs_plural = data[\"feminine_jobs_plural_ru\"]\n feminine_names = data['feminine_names_ru']\n\n masculine_titles = data[\"masculine_titles_ru\"]\n masculine_relation = data[\"masculine_relation_ru\"]\n masculine_relation_plural = data[\"masculine_relation_plural_ru\"]\n masculine_jobs = data[\"masculine_jobs_ru\"]\n masculine_jobs_plural = data[\"masculine_jobs_plural_ru\"]\n masculine_names = data[\"masculine_names_ru\"]\n\n feminine = (\n [\"она\", \"ее\"]\n + feminine_relation\n + feminine_relation_plural\n + feminine_titles\n + feminine_jobs\n + feminine_jobs_plural\n + feminine_names\n )\n masculine = (\n [\"он\", \"его\"]\n + masculine_relation\n + masculine_relation_plural\n + masculine_titles\n + masculine_jobs\n + masculine_jobs_plural\n + masculine_names\n )\n else:\n raise NameError(\n 'The specified language is not supported or misformatted. Try \"en\", \"fr\", \"pl\" or \"ru\" as language arguments to the filter() method.'\n )\n\n # Close names file\n f.close()\n\n assert (\n len(sentences) > 0\n ), \"You must provide at least one sentence for the analysis. Check the content of your sentences array you pass to the filter() method.\"\n\n for sentence in sentences:\n\n # Clean the sentence content using regex\n processed_sentence = sentence.lower()\n processed_sentence = re.sub(\"^\", \" \", processed_sentence)\n processed_sentence = re.sub(\"$\", \" \", processed_sentence)\n\n # Take care of urls\n words = []\n for word in processed_sentence.split():\n i = word.find(\"http\")\n if i >= 0:\n word = word[:i] + \" \" + \"__url__\"\n words.append(word.strip())\n processed_sentence = \" \".join(words)\n processed_sentence = re.sub(r\"\\[([^\\]]*)\\] \\( *__url__ *\\)\", r\"\\1\", processed_sentence)\n\n # Remove illegal chars and extra space\n processed_sentence = re.sub(\"__url__\", \"URL\", processed_sentence)\n processed_sentence = re.sub(r\"[^A-Za-z0-9():,.!?\\\"\\']\", \" \", processed_sentence)\n processed_sentence = re.sub(\"URL\", \"__url__\", processed_sentence)\n processed_sentence = re.sub(r\"^\\s+\", \"\", processed_sentence)\n processed_sentence = re.sub(r\"\\s+$\", \"\", processed_sentence)\n processed_sentence = re.sub(r\"\\s+\", \" \", processed_sentence)\n\n # Make sure that user input has words in lower case form\n joint_feminine = feminine + feminine_input\n joint_feminine = [word.lower() for word in joint_feminine]\n joint_masculine = masculine + masculine_input\n joint_masculine = [word.lower() for word in joint_masculine]\n\n # Split the words in the processed_sentence to find the intersection with the feminine array of keywords\n intersection_feminine = set(processed_sentence.split()).intersection(\n set(joint_feminine)\n )\n\n # Split the words in the processed_sentence to find the intersection with the masculine array of keywords\n intersection_masculine = set(processed_sentence.split()).intersection(\n set(joint_masculine)\n )\n\n # If the intersection occurred, the intersection_feminine and intersection_masculine variables will contain at least one common keyword\n # use this intersection information to get the value for the corresponding flags\n feminine_flag = len(intersection_feminine) > 0\n masculine_flag = len(intersection_masculine) > 0\n\n # In case the processed_sentence contains the keywords from feminine and masculine arrays, set a union_flag value\n union_flag = (\n len(intersection_feminine) > 0\n and len(intersection_masculine) > 0\n )\n\n # If the processed_sentence didn't contain the keywords neither from feminine, nor from masculine arrays, set a neutral_flag value\n neutral_flag = (\n len(intersection_feminine) == 0\n and len(intersection_masculine) == 0\n )\n\n # Use the union_flag value to set the neutral_flag value, setting to False the feminine and masculine flags\n if union_flag is True:\n feminine_flag = False\n masculine_flag = False\n neutral_flag = True\n\n # Create the sentence object with the retrieved flag values\n sentence_object = {\n \"sentence\": sentence,\n \"feminine_flag\": feminine_flag,\n \"masculine_flag\": masculine_flag,\n \"neutral_flag\": neutral_flag,\n }\n\n # Append the object to the array we return\n flagged_sentences.append(sentence_object)\n\n return flagged_sentences",
"def compose_g_carpa(\n in_carpa_path: str,\n temp_carpa_path: str,\n words_mapping: MappingType,\n carpa_path: str,\n log_file: TextIO,\n):\n bos_symbol = words_mapping[\"<s>\"]\n eos_symbol = words_mapping[\"</s>\"]\n unk_symbol = words_mapping[\"<unk>\"]\n with open(in_carpa_path, \"r\", encoding=\"utf8\") as f, open(\n temp_carpa_path, \"w\", encoding=\"utf8\"\n ) as outf:\n current_order = -1\n num_oov_lines = 0\n for line in f:\n line = line.strip()\n col = line.split()\n if current_order == -1 and not re.match(r\"^\\\\data\\\\$\", line):\n continue\n if re.match(r\"^\\\\data\\\\$\", line):\n log_file.write(r\"Processing data...\\n\")\n current_order = 0\n outf.write(line + \"\\n\")\n elif re.match(r\"^\\\\[0-9]*-grams:$\", line):\n current_order = int(re.sub(r\"\\\\([0-9]*)-grams:$\", r\"\\1\", line))\n log_file.write(f\"Processing {current_order} grams...\\n\")\n outf.write(line + \"\\n\")\n elif re.match(r\"^\\\\end\\\\$\", line):\n outf.write(line + \"\\n\")\n elif not line:\n if current_order >= 1:\n outf.write(\"\\n\")\n else:\n if current_order == 0:\n outf.write(line + \"\\n\")\n else:\n if len(col) > 2 + current_order or len(col) < 1 + current_order:\n raise Exception(f'Bad line in arpa lm \"{line}\"')\n prob = col.pop(0)\n is_oov = False\n for i in range(current_order):\n try:\n col[i] = str(words_mapping[col[i]])\n except KeyError:\n is_oov = True\n num_oov_lines += 1\n break\n if not is_oov:\n rest_of_line = \" \".join(col)\n outf.write(f\"{prob}\\t{rest_of_line}\\n\")\n carpa_proc = subprocess.Popen(\n [\n thirdparty_binary(\"arpa-to-const-arpa\"),\n f\"--bos-symbol={bos_symbol}\",\n f\"--eos-symbol={eos_symbol}\",\n f\"--unk-symbol={unk_symbol}\",\n temp_carpa_path,\n carpa_path,\n ],\n stdin=subprocess.PIPE,\n stderr=log_file,\n stdout=log_file,\n env=os.environ,\n )\n carpa_proc.communicate()\n os.remove(temp_carpa_path)",
"def test_LM(in_file, out_file, LM):\n\tprint \"testing language models...\"\n # for each input line, break string into ngrams, then check it against each probability model\n\ttest_contents = open(in_file).readlines()\n\twriter = open(out_file, 'w')\n\tfor line in test_contents:\n\t\tfourgrams = ngram_from_line(line)\n\t\tlabel = calculate_probability(fourgrams, LM)\n\t\twriter.write(label + \" \" + line)\n\t\t#print(label + \" \" + line)\n\twriter.close()",
"def process_file(\n ctx,\n column: str,\n csv_file: Path,\n in_place: bool,\n output_file: Path,\n save_openrefine: bool,\n openrefine_output_file: Path,\n save_processed_values: bool,\n processed_values_output_file: Path,\n ignore_values_file: Path,\n dry_run: bool,\n):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n\n unique_values, rows, headers = _read_unique_values_from_csv(\n csv_file, column,\n )\n\n ignore_values = _read_ignore_values_file(ignore_values_file)\n\n processed_values, replacements = _process_suggestions(\n replacer, unique_values, ignore_values,\n )\n\n if save_processed_values:\n _create_processed_values_output_file(\n processed_values, processed_values_output_file, csv_file,\n )\n\n if save_openrefine:\n _create_openrefine_file(\n openrefine_output_file, csv_file, replacements, column,\n )\n\n if dry_run:\n sys.exit(0)\n\n output_file_path = csv_file if in_place else output_file\n _create_output_file(\n output_file_path, csv_file, headers, rows, replacements, column,\n )",
"def analyze_input_files(name_file,title_file,known_for_file):\n\n tconst_set = set()\n\n remove_first_line(title_file)\n title_in = codecs.open(title_file,'r','utf-8')\n title_table = title_in.read().splitlines(True)\n title_in.close()\n\n #Prepare set of tconst values from title table\n for t in title_table:\n r = t.rstrip().split(\"\\t\")\n tconst_set.add(r[0])\n\n remove_first_line(name_file)\n f_in = codecs.open(name_file, 'r','utf-8')\n f_out_name = codecs.open('name_temp.tsv', 'w','utf-8')\n f_out_relation = codecs.open(known_for_file, 'w','utf-8')\n table = f_in.read().splitlines(True)\n\n for i in table:\n k = i.rstrip().split(\"\\t\")\n line_for_name = \"\\t\".join(k[:-1]) + \"\\n\"\n f_out_name.write(line_for_name)\n\n actor_relations = k[-1].split(\",\")\n for relation in actor_relations:\n if relation in tconst_set:\n line_for_relation = k[0] + \"\\t\" + relation + \"\\n\"\n f_out_relation.write(line_for_relation)\n\n f_in.close()\n f_out_name.close()\n f_out_relation.close()\n\n # Delete original file and rename temp file to original name\n os.remove(name_file)\n os.rename('name_temp.tsv',name_file)",
"def process_corpus(input_dir):\n data_files = [f for f in os.listdir(input_dir) if valid_file(input_dir, f)]\n data_set = []\n for file_name in data_files:\n with open(os.path.join(input_dir, file_name), 'r') as f:\n f_text = f.read()\n f_text = unicode(f_text, errors='replace')\n f_text = f_text.encode('ascii', 'ignore')\n f_text = f_text.replace('\\n', ' ').replace('\\r', '')\n f_text = re.sub(' +', ' ', f_text)\n author, book = file_name.rstrip('.txt').split('-')\n tags = get_pos_tags(f_text)\n f_dict = {'label': file_name,\n 'author': author,\n 'book': book,\n 'text': f_text,\n 'pos': tags}\n data_set.append(f_dict)\n save_dir = '/'.join(input_dir.split('/')[:-1])\n newf = file_name.replace('.txt', '')\n open(os.path.join(save_dir, newf + '.data'), 'w').write(f_text)\n open(os.path.join(save_dir, newf + '.pos'), 'w').write(tags)\n pickle.dump(f_dict, open(os.path.join(save_dir, newf+\".p\"), \"wb\"))\n return data_set",
"def train_suffixes(in_file, out_file,\n max_suf=DEFAULT_MAX_SUF, lowercase=False,\n input_format='factored', threshold=1):\n\n # initialize data structures\n suff = {'*': {}}\n for suf_len in xrange(1, max_suf + 1):\n suff[suf_len] = {}\n line_no = 0\n # setup input format (function to return (form, lemma, tag) triplets in an array)\n if input_format == 'factored':\n get_tokens_func = lambda line: [token.split('|') for token in line.split()]\n elif input_format == 'one_per_line':\n get_tokens_func = lambda line: [line.split()] if line else []\n elif input_format == 'csts':\n get_tokens_func = lambda line: [re.match(r'<[df][^>]*>([^<]*)<l>([^<]*)<t>([^<]*)(?:<|$)',\n line).groups()] \\\n if re.match(r'<[df][ >]', line) else []\n else:\n raise Exception('Unknown format ' + input_format)\n\n # process the input file\n with codecs.open(in_file, 'r', 'UTF-8') as in_file:\n for line in in_file:\n for form, lemma, tag in get_tokens_func(line.strip()):\n if lowercase:\n form = form.lower()\n lemma = lemma.lower()\n diff = ' '.join(inv_edit_script(form.lower(), lemma.lower())).strip()\n # irregular words go under separate index\n if diff.startswith('*'):\n suf_lemmas = suff['*'].get(form, {})\n suff['*'][form] = suf_lemmas\n key = '|'.join((diff, tag))\n suf_lemmas[key] = suf_lemmas.get(key, 0) + 1\n # other words get prefixes\n else:\n for suf_len in xrange(1, max_suf + 1):\n suf_lemmas = suff[suf_len].get(form[-suf_len:], {})\n suff[suf_len][form[-suf_len:]] = suf_lemmas\n key = '|'.join((diff, tag))\n suf_lemmas[key] = suf_lemmas.get(key, 0) + 1\n line_no += 1\n if line_no % 10000 == 0:\n print >> sys.stderr, str(line_no),\n print >> sys.stderr, ''\n\n # prune according to the threshold\n for _, suff_len in suff.iteritems():\n for suffix in suff_len:\n suff_len[suffix] = {tag_lemma: count\n for tag_lemma, count in suff_len[suffix].iteritems()\n if count > threshold}\n # save the output\n out_file = codecs.open(out_file, 'w', 'UTF-8')\n json.dump(suff, out_file, indent=4, separators=(',', ': '), ensure_ascii=False)\n out_file.close()",
"def process_raw_phrases(file_path):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Build or update a Ticker metrics using a Quotecast object. Only the metrics which can be converted to float are supported. But that should be enough to handle all the real use cases. | def build_ticker_from_quotecast(
quotecast: Quotecast,
references: Dict[int, List[str]] = None,
ticker: Ticker = None,
) -> Ticker:
if references is None:
references = dict()
if ticker is None:
ticker = Ticker()
# SETUP PRODUCTS & METRICS
message_array = json.loads(quotecast.json_data)
for message in message_array:
if message["m"] == "un":
reference = message["v"][0]
value = message["v"][1]
product, metric = references[reference]
ticker.products[product].metrics[metric] = value
elif message["m"] == "us":
reference = message["v"][0]
value = message["v"][1]
product, metric = references[reference]
if value[4] == "-":
date = datetime.datetime.strptime(
value,
"%Y-%m-%d",
)
value = datetime.datetime.timestamp(date)
ticker.products[product].metrics[metric] = value
elif value[2] == ":":
time = datetime.time.fromisoformat(value)
value = time.hour * 3600 + time.minute * 60 + time.second
ticker.products[product].metrics[metric] = value
else:
# NOT CONVERTIBLE TO FLOAT
raise RuntimeWarning(
"Unsupported string metric : " f"{metric} = {message}"
)
elif message["m"] == "a_req":
references[message["v"][1]] = message["v"][0].rsplit(
sep=".",
maxsplit=1,
)
elif message["m"] == "a_rel":
delete_list = []
for reference in references:
if ".".join(references[reference]) == message["v"][0]:
delete_list.append(reference)
for reference in delete_list:
del references[reference]
elif message["m"] == "h":
pass
elif message["m"] == "ue":
pass
elif message["m"] == "d":
raise AttributeError(f"Subscription rejected : {message}")
else:
raise AttributeError(f"Unknown metric : {message}")
# SETUP PRODUCT LIST
ticker.product_list.extend(ticker.products)
# SETUP METADATA
ticker.metadata.MergeFrom(quotecast.metadata)
return ticker | [
"def _update_metrics(self):\n raise NotImplementedError",
"def update_metrics(self, round_num: int, metrics_to_append: Dict[str, Any]):\n raise NotImplementedError",
"def add_ticker_to_df(self, ticker):\n\n new_stock = TickerData(ticker=ticker, use_early_replacements=self.use_early_replacements,\n day_of_month_for_monthly_data=self.day_of_month_for_monthly_data,\n force_new_data=self.force_new_data,\n ).data_monthly\n\n new_stock['cap_gains'] = new_stock['Close'].shift(-1) / new_stock['Close']\n new_stock['total_gains'] = new_stock['Adj Close'].shift(-1) / new_stock['Adj Close']\n new_stock['div_gains'] = new_stock['total_gains'] - new_stock['cap_gains'] + 1\n\n # capital gains, total gains, dividend gains during duration.\n new_stock['cap_dur'] = new_stock['Close'] / new_stock['Close'].shift(self.lookback_months)\n new_stock['total_dur'] = (new_stock['Adj Close'] /\n new_stock['Adj Close'].shift(self.lookback_months))\n new_stock['div_dur'] = new_stock['total_dur'] - new_stock['cap_dur']\n\n st = 1 - self.tax_rates_by_ticker[ticker]['ST_GAINS']\n lt = 1 - self.tax_rates_by_ticker[ticker]['LT_GAINS']\n div = 1 - self.tax_rates_by_ticker[ticker]['INCOME']\n\n # ST momentum = capital gains + dividends. Tax cap gains only if greater than 1\n new_stock['st_mom'] = np.where(new_stock['cap_dur'] <= 1.0,\n new_stock['cap_dur'],\n (new_stock['cap_dur'] - 1) * st + 1)\n new_stock['st_mom'] += new_stock['div_dur'] * div\n new_stock['lt_mom'] = np.where(new_stock['cap_dur'] <= 1.0,\n new_stock['cap_dur'],\n (new_stock['cap_dur'] - 1) * lt + 1)\n new_stock['lt_mom'] += new_stock['div_dur'] * div\n\n # add to main df for component\n self.df[f'{ticker}_adj_close'] = new_stock['Adj Close']\n self.df[f'{ticker}_close'] = new_stock['Close']\n self.df[f'{ticker}_st_mom'] = new_stock['st_mom']\n self.df[f'{ticker}_lt_mom'] = new_stock['lt_mom']\n self.df[f'{ticker}_pretax_mom'] = new_stock['total_dur']",
"def get_metrics(data):\n ticker = data.ticker.unique()[0]\n try:\n info = yf.Ticker(ticker).info\n info = dict(filter(lambda item: item[1] is not None, info.items()))\n except:\n info = dict()\n t_stats = (\n GroupStats(data[\"Adj Close\"]).stats.to_dict(orient=\"dict\").get(\"Adj Close\")\n )\n\n metrics = {}\n metrics[\"beta\"] = beta = numeric_round(info.get(\"beta\", \"N/A\"), 2)\n metrics[\"peg\"] = peg = numeric_round(info.get(\"pegRatio\", \"N/A\"), 2)\n metrics[\"ptb\"] = ptb = numeric_round(info.get(\"priceToBook\", \"N/A\"), 2)\n metrics[\"dividend_pt\"] = dividend_pt = numeric_round(\n info.get(\"dividendRate\", \"N/A\"), 2\n )\n metrics[\"payout\"] = payout = numeric_round(info.get(\"payoutRatio\", \"N/A\"), 2)\n metrics[\"calmar\"] = calmar = numeric_round(t_stats.get(\"calmar\", \"N/A\"), 2)\n metrics[\"cagr\"] = cagr = numeric_round(t_stats.get(\"cagr\", \"N/A\"), 2)\n metrics[\"monthly_sharpe\"] = monthly_sharpe = numeric_round(\n t_stats.get(\"monthly_sharpe\", \"N/A\"), 2\n )\n metrics[\"monthly_sortino\"] = monthly_sortino = numeric_round(\n t_stats.get(\"monthly_sortino\", \"N/A\"), 2\n )\n\n metrics_summary = f\"\"\"\n \\u03B2: {beta},\\n\n PEG Ratio: {peg},\\n\n P/B Ratio: {ptb},\\n\n Dividend rate: {dividend_pt},\\n\n Payout Ratio: {payout}, \\n\n Calmar Ratio: {calmar}, \\n\n CAGR: {cagr}, \\n\n Monthly Sharpe: {monthly_sharpe}, \\n\n Monthly Sortino: {monthly_sortino}\n \"\"\"\n return ticker, metrics_summary, metrics",
"def populate_metric_values(self):\n self.new_counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]] = get_counter_metrics(\n self.counter_metric_specs, \n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n \n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_counter_metrics(self.new_counter_metrics[detailed_version.id])\n\n self.aggregated_counter_metrics = self.get_aggregated_counter_metrics()\n\n self.new_ratio_metrics: Dict[iter8id, Dict[iter8id, RatioDataPoint]] = get_ratio_metrics(\n self.ratio_metric_specs, \n self.counter_metric_specs, \n self.aggregated_counter_metrics,\n [version.spec for version in self.detailed_versions.values()],\n self.eip.start_time\n )\n\n # This is in the shape of a Dict[str, RatioMaxMin], where the keys are ratio metric ids\n # and values are their max mins. \n\n self.ratio_max_mins = self.get_ratio_max_mins()\n\n for detailed_version in self.detailed_versions.values():\n detailed_version.aggregate_ratio_metrics(\n self.new_ratio_metrics[detailed_version.id]\n )",
"def set_metrics(self):",
"def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)",
"def on_meter_values(self, connector_id: int, meter_value: Dict, **kwargs):\n for bucket in meter_value:\n for sampled_value in bucket[\"sampled_value\"]:\n if \"measurand\" in sampled_value:\n self._metrics[sampled_value[\"measurand\"]] = sampled_value[\"value\"]\n self._metrics[sampled_value[\"measurand\"]] = round(\n float(self._metrics[sampled_value[\"measurand\"]]), 1\n )\n if \"unit\" in sampled_value:\n self._units[sampled_value[\"measurand\"]] = sampled_value[\"unit\"]\n if (\n self._units[sampled_value[\"measurand\"]]\n == DEFAULT_POWER_UNIT\n ):\n self._metrics[sampled_value[\"measurand\"]] = (\n float(self._metrics[sampled_value[\"measurand\"]]) / 1000\n )\n self._units[sampled_value[\"measurand\"]] = HA_POWER_UNIT\n if (\n self._units[sampled_value[\"measurand\"]]\n == DEFAULT_ENERGY_UNIT\n ):\n self._metrics[sampled_value[\"measurand\"]] = (\n float(self._metrics[sampled_value[\"measurand\"]]) / 1000\n )\n self._units[sampled_value[\"measurand\"]] = HA_ENERGY_UNIT\n if len(sampled_value.keys()) == 1: # for backwards compatibility\n self._metrics[DEFAULT_MEASURAND] = sampled_value[\"value\"]\n self._units[DEFAULT_MEASURAND] = DEFAULT_ENERGY_UNIT\n if \"Meter.Start\" not in self._metrics:\n self._metrics[\"Meter.Start\"] = self._metrics[DEFAULT_MEASURAND]\n if \"Transaction.Id\" not in self._metrics:\n self._metrics[\"Transaction.Id\"] = kwargs.get(\"transaction_id\")\n self._transactionId = kwargs.get(\"transaction_id\")\n self._metrics[\"Session.Time\"] = round(\n (int(time.time()) - float(self._metrics[\"Transaction.Id\"])) / 60\n )\n self._metrics[\"Session.Energy\"] = round(\n float(self._metrics[DEFAULT_MEASURAND])\n - float(self._metrics[\"Meter.Start\"]),\n 1,\n )\n return call_result.MeterValuesPayload()",
"def calc_TCA_metrics(df, qwap=None, qwapU=None, arrActSlipPct=None, formatted=True):\n\n # Build results df so as to put variable definitions at start\n cols = ['Maker', 'Taker', 'Total', 'Desc']\n results = pd.DataFrame(index=rows_dict.keys(), columns=cols)\n\n # Restrict calculations to positive quantity fills only\n df = df[df['fillQuantity'] > 0].copy()\n results.loc['Order'] = ''\n\n # Populate Arrival Stats and Contract Details (incl side and mult)\n # Handle Generic Metrics\n if df['parentBid'].iloc[0] > 0:\n arrivalMid = (df['parentBid'].iloc[0] + df['parentAsk'].iloc[0]) / 2\n else:\n arrivalMid = (df['fillBid'].iloc[0] + df['fillAsk'].iloc[0]) / 2\n # Save to results\n results.loc['Arrival Mid'] = arrivalMid\n\n if df['orderSide'].iloc[0] == 'Buy':\n side = 1\n else:\n side = -1\n if df['secType'].iloc[0] == 'Option':\n mult = 100\n else:\n mult = 1\n\n # Handle qwap-dependent Metrics\n if qwap is not None:\n results.loc['Qwap'] = qwap\n results.loc['Qwap U'] = qwapU\n\n # Handle delta-dependent Metrics and data\n delta = df['fillDe'].iloc[0]\n vega = df['fillVe'].iloc[0]\n if delta != 0:\n if df['parentMark'].iloc[0] > 0:\n arrivalMark = df['parentMark'].iloc[0]\n arrivalUMid = (df['parentUBid'].iloc[0] + df['parentUAsk'].iloc[0]) / 2\n else:\n arrivalMark = df['fillMark'].iloc[0]\n arrivalUMid = (df['fillUBid'].iloc[0] + df['fillUAsk'].iloc[0]) / 2\n # Add delta-adjusted price column\n df['fillUMid'] = (df['fillUBid'] + df['fillUAsk']) / 2\n df['fillDPrice'] = df['fillPrice'] - delta * (df['fillUMid'] - arrivalUMid)\n # Calculate arrival vols\n firstFillVol = df['fillVol'].iloc[0]\n firstFillDPx = df['fillDPrice'].iloc[0]\n arrivalMidVol = firstFillVol + (arrivalMid - firstFillDPx) / (100 * vega)\n arrivalMarkVol = firstFillVol + (arrivalMark - firstFillDPx) / (100 * vega)\n results.loc['Delta'] = delta\n results.loc['Vega'] = vega\n results.loc['Arrival Mark'] = arrivalMark\n results.loc['Arrival U Mid'] = arrivalUMid\n results.loc['Arrival Mid Vol'] = arrivalMidVol\n results.loc['Arrival Mark Vol'] = arrivalMarkVol\n\n # Handle qwap- and delta-dependent Metrics\n if qwap is not None:\n qwapDPx = qwap - delta * (qwapU - arrivalUMid)\n qwapVol = arrivalMidVol + (qwapDPx - arrivalMid) / (100 * vega)\n results.loc['Qwap Vol'] = qwapVol\n\n # Handle arrActSlipPct and delta-dependent Metrics\n if arrActSlipPct is not None:\n actUMid = (df['fillUMid'].iloc[0]) * (1 + arrActSlipPct)\n # Note that this uses Mid at the time of first option fill, rather than order arrival,\n # since my stock returns are based off the time of the first stock fill\n # (which will follow the option fill)\n results.loc['Act U Mid'] = actUMid\n\n\n # Calculate Metrics that Depend on Make/Take Classification\n def populate_rows(sdf, col):\n # sdf - subdataframe - e.g. filtered for just Make or Take trades\n # col - the column to populate (Maker / Taker / Total)\n\n # Calc metrics that require none of (delta/vega, qwap, arrActSlipPct)\n childOrders = sdf['clOrdId'].unique().shape[0]\n avgChildSize = sdf.groupby('clOrdId').first()['childSize'].sum() / childOrders\n filledCtr = sdf['fillQuantity'].sum()\n ctrFillRate = filledCtr / (childOrders * avgChildSize)\n avgFillPctSpread = ((sdf['fillPrice'] - sdf['fillBid'])\n / (sdf['fillAsk'] - sdf['fillBid'])\n * sdf['fillQuantity']).sum() / filledCtr\n execPx = (sdf['fillPrice'] * sdf['fillQuantity']).sum() / filledCtr\n pxRange = sdf['fillPrice'].max() - sdf['fillPrice'].min()\n slipArrMidPx = side * (arrivalMid - execPx)\n slipArrMidUSD = slipArrMidPx * filledCtr * mult\n # Save to results\n results.loc['Child Orders', col] = childOrders\n results.loc['Avg Child Size', col] = avgChildSize\n results.loc['Filled Ctr', col] = filledCtr\n results.loc['Ctr Fill Rate', col] = ctrFillRate\n results.loc['Avg Fill Pct Spread', col] = avgFillPctSpread\n results.loc['Exec Px', col] = execPx\n results.loc['Px Range', col] = pxRange\n results.loc['Slip Arr Mid Px', col] = slipArrMidPx\n results.loc['Slip Arr Mid USD', col] = slipArrMidUSD\n\n # Calc metrics that require only qwap\n if qwap is not None:\n slipQwapPx = side * (qwap - execPx)\n slipQwapUSD = slipQwapPx * filledCtr * mult\n results.loc['Slip Qwap Px', col] = slipQwapPx\n results.loc['Slip Qwap USD', col] = slipQwapUSD\n\n # Calc metrics that require only delta/vega\n if delta != 0:\n slipArrMarkPx = side * (arrivalMark - execPx) # Doesn't use delta but mark is zero for non options\n slipArrMarkUSD = slipArrMarkPx * filledCtr * mult\n theoUMid = (sdf['fillUMid'] * sdf['fillQuantity']).sum() / filledCtr\n execDTheoArrMidPx = execPx - delta * (theoUMid - arrivalUMid)\n dTheoPxRange = sdf['fillDPrice'].max() - sdf['fillDPrice'].min()\n dTheoSlipArrMidPx = side * (arrivalMid - execDTheoArrMidPx)\n dTheoSlipArrMidUSD = dTheoSlipArrMidPx * filledCtr * mult\n dTheoSlipArrMarkPx = side * (arrivalMark - execDTheoArrMidPx)\n dTheoSlipArrMarkUSD = dTheoSlipArrMarkPx * filledCtr * mult\n execDTheoVol = arrivalMidVol + (execDTheoArrMidPx - arrivalMid) / (100 * vega)\n dTheoVolRange = dTheoPxRange / (100 * vega)\n dTheoSlipArrMidVol = dTheoSlipArrMidPx / (100 * vega)\n dTheoSlipArrMarkVol = dTheoSlipArrMarkPx / (100 * vega)\n # Save to results\n results.loc['Slip Arr Mark Px', col] = slipArrMarkPx\n results.loc['Slip Arr Mark USD', col] = slipArrMarkUSD\n results.loc['Theo U Mid', col] = theoUMid\n results.loc['Exec DTheo Arr Mid Px', col] = execDTheoArrMidPx\n results.loc['DTheo Px Range', col] = dTheoPxRange\n results.loc['DTheo Slip Arr Mid Px', col] = dTheoSlipArrMidPx\n results.loc['DTheo Slip Arr Mid USD', col] = dTheoSlipArrMidUSD\n results.loc['DTheo Slip Arr Mark Px', col] = dTheoSlipArrMarkPx\n results.loc['DTheo Slip Arr Mark USD', col] = dTheoSlipArrMarkUSD\n results.loc['Exec DTheo Vol', col] = execDTheoVol\n results.loc['DTheo Vol Range', col] = dTheoVolRange\n results.loc['DTheo Slip Arr Mid Vol', col] = dTheoSlipArrMidVol\n results.loc['DTheo Slip Arr Mark Vol', col] = dTheoSlipArrMarkVol\n\n # Calc metrics that require both delta/vega and qwap\n if qwap is not None:\n execDTheoQwapPx = execPx - delta * (theoUMid - qwapU)\n dTheoSlipQwapPx = side * (qwap - execDTheoQwapPx)\n dTheoSlipQwapUSD = dTheoSlipQwapPx * filledCtr * mult\n dTheoSlipQwapVol = dTheoSlipQwapPx / (100 * vega)\n # Save to results\n results.loc['Exec DTheo Qwap Px', col] = execDTheoQwapPx\n results.loc['DTheo Slip Qwap Px', col] = dTheoSlipQwapPx\n results.loc['DTheo Slip Qwap USD', col] = dTheoSlipQwapUSD\n results.loc['DTheo Slip Qwap Vol', col] = dTheoSlipQwapVol\n\n # Calc metrics that require delta/vega and arrActSlipPct\n if arrActSlipPct is not None:\n execDActArrMidPx = execPx - delta * (actUMid - arrivalUMid)\n dActSlipArrMidPx = side * (arrivalMid - execDActArrMidPx)\n dActSlipArrMidUSD = dActSlipArrMidPx * filledCtr * mult\n dActSlipArrMarkPx = side * (arrivalMark - execDActArrMidPx)\n dActSlipArrMarkUSD = dActSlipArrMarkPx * filledCtr * mult\n execDActVol = arrivalMidVol + (execDActArrMidPx - arrivalMid) / (100 * vega)\n dActSlipArrMidVol = dActSlipArrMidPx / (100 * vega)\n dActSlipArrMarkVol = dActSlipArrMarkPx / (100 * vega)\n # Save to results\n results.loc['Exec DAct Arr Mid Px', col] = execDActArrMidPx\n results.loc['DAct Slip Arr Mid Px', col] = dActSlipArrMidPx\n results.loc['DAct Slip Arr Mid USD', col] = dActSlipArrMidUSD\n results.loc['DAct Slip Arr Mark Px', col] = dActSlipArrMarkPx\n results.loc['DAct Slip Arr Mark USD', col] = dActSlipArrMarkUSD\n results.loc['Exec DAct Vol', col] = execDActVol\n results.loc['DAct Slip Arr Mid Vol', col] = dActSlipArrMidVol\n results.loc['DAct Slip Arr Mark Vol', col] = dActSlipArrMarkVol\n\n # Calc metrics that require delta/vega, arrActSlipPct and qwap\n if qwap is not None:\n execDActQwapPx = execPx - delta * (actUMid - qwapU)\n dActSlipQwapPx = side * (qwap - execDActQwapPx)\n dActSlipQwapUSD = dActSlipQwapPx * filledCtr * mult\n dActSlipQwapVol = dActSlipQwapPx / (100 * vega)\n # Save to results\n results.loc['Exec DAct Qwap Px', col] = execDActQwapPx\n results.loc['DAct Slip Qwap Px', col] = dActSlipQwapPx\n results.loc['DActSlip Qwap USD', col] = dActSlipQwapUSD\n results.loc['DAct Slip Qwap Vol', col] = dActSlipQwapVol\n\n # Run populate_rows for makeDf / takeDf\n makeDf = df[df['childMakerTaker'] == 'Maker']\n takeDf = df[df['childMakerTaker'] == 'Taker']\n\n if makeDf['fillQuantity'].sum() > 0:\n populate_rows(makeDf, 'Maker')\n else:\n results['Maker'] = 0\n\n if takeDf['fillQuantity'].sum() > 0:\n populate_rows(takeDf, 'Taker')\n else:\n results['Taker'] = 0\n\n if df['fillQuantity'].sum() > 0:\n populate_rows(df, 'Total')\n else:\n results['Total'] = 0\n\n # Add descriptions\n for key in rows_dict.keys():\n results.loc[key, 'Desc'] = rows_dict[key][1]\n results.loc['Order', 'Desc'] = make_title(df)\n\n # Add formatting and return results\n if formatted:\n results = format_df(results, format_dict)\n return results",
"def update_measurements(measurements, qoe_mos, quality, vtranscoder_produced_profiles, transcoder_no):\n measurements['mean_opinion_score'] = qoe_mos\n measurements['quality'] = quality\n measurements['no_of_profiles_produced'] = len(vtranscoder_produced_profiles)\n measurements['percentage_of_gpu_users'] = percentage_of_gpu_users(transcoder_no)\n measurements['qoe_sum'] = \\\n Spectator.objects.filter(transcoder_no=transcoder_no).aggregate(Sum('mos_score'))['mos_score__sum']\n measurements['transcoding_cost'] = \\\n 0.5 + (1 if 1 in vtranscoder_produced_profiles else 0) + (1 if 2 in vtranscoder_produced_profiles else 0) + \\\n (1 if (1 in vtranscoder_produced_profiles and 2 in vtranscoder_produced_profiles) else 0) + \\\n (15 if is_gpu_needed(vtranscoder_produced_profiles) else 0)\n measurements['produced_profiles'] = vtranscoder_produced_profiles\n return measurements",
"def __call__(\n self,\n value: float = 0.0,\n values: ty.Sequence[float] = tuple(),\n counts: ty.Sequence[float] = tuple(),\n statistic_values: ty.Optional[MetricStatistics] = None,\n timestamp: ty.Optional[datetime] = None,\n ):\n metric_dict = dict(\n Namespace=self.namespace,\n MetricData=[self.metric_maker(value, values, counts, statistic_values, timestamp)],\n )\n logger.debug(\"put_metric\", extra=dict(put_metric=metric_dict))\n CLOUDWATCH_CLIENT().put_metric_data(**metric_dict) # type: ignore",
"def test_update_derived_metric(self):\n pass",
"def compute_track_metrics(use_async=CELERY_ENABLED):\n\n # arbitrary field check to not update already loaded tracks\n for track in Track.objects.filter(duration__is_null=False):\n if use_async:\n async_set_metrics.delay(track)\n else:\n sleep(2)\n track.set_metrics()",
"def map_to_ticker(self, raw_ticker: HitbtcRawTickerModel) -> HitbtcTickerModel:\n\n symbol = raw_ticker[\"symbol\"]\n low = Decimal(raw_ticker[\"low\"])\n high = Decimal(raw_ticker[\"high\"])\n volume = Decimal(raw_ticker[\"volume\"])\n volume_quote = Decimal(raw_ticker[\"volumeQuote\"])\n timestamp = raw_ticker[\"timestamp\"]\n raw_ask = raw_ticker[\"ask\"]\n ask = Decimal(raw_ask) if raw_ask is not None else raw_ask\n raw_bid = raw_ticker[\"bid\"]\n bid = Decimal(raw_bid) if raw_bid is not None else raw_bid\n raw_last = raw_ticker[\"last\"]\n last = Decimal(raw_last) if raw_last is not None else raw_last\n raw_open = raw_ticker[\"open\"]\n open_ = Decimal(raw_open) if raw_open is not None else raw_open\n\n ticker = HitbtcTickerModel(\n symbol=symbol,\n low=low,\n high=high,\n volume=volume,\n volume_quote=volume_quote,\n timestamp=timestamp,\n ask=ask,\n bid=bid,\n last=last,\n open=open_)\n\n return ticker",
"def metrics_builder(metrics_dict):\n df = pd.DataFrame(metrics_dict[\"combined_delay\"].mean(axis=0), columns=[\"Metrics\"])\n df.loc[\"Max_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_max_index\"]]\n df.loc[\"Min_Actual_Delay\"] = metrics_dict[\"combined_delay\"][\"Actual_Delay\"].loc[metrics_dict[\"actual_min_index\"]]\n df.loc[\"Max_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_max_index\"]]\n df.loc[\"Min_Predicted_Delay\"] = metrics_dict[\"combined_delay\"][\"Predicted_Delay\"].loc[\n metrics_dict[\"predicted_min_index\"]]\n df.loc[\"Mean_Absolute_Error\"] = metrics_dict[\"MAE\"]\n df.loc[\"R2\"] = metrics_dict[\"R2\"]\n df.loc[\"Median_Absolute_Error\"] = metrics_dict[\"MEDAE\"]\n df.loc[\"Root_Mean_Squared_Error\"] = metrics_dict[\"RMSE\"]\n df.loc[\"Mean_Squared_Log_Error\"] = metrics_dict[\"MSLE\"]\n df = df.rename(index={\"Actual_Delay\": \"Actual_Delay_Mean\", \"Predicted_Delay\": \"Predicted_Delay_Mean\",\n \"Difference_In_Delay\": \"Difference_In_Delay_Mean\"})\n return df",
"def _measurement_update(self):\n pass",
"def train_qdm(historical, reference, out, variable, kind):\n hist = storage.read(historical)\n ref = storage.read(reference)\n\n kind_map = {\"additive\": \"+\", \"multiplicative\": \"*\"}\n try:\n k = kind_map[kind]\n except KeyError:\n # So we get a helpful exception message showing accepted kwargs...\n raise ValueError(f\"kind must be {set(kind_map.keys())}, got {kind}\")\n\n qdm = train_quantiledeltamapping(\n reference=ref, historical=hist, variable=variable, kind=k\n )\n\n storage.write(out, qdm.ds)",
"def _add_to_prometheus_metrics(self, scope, data):\n\n try: created = parse(data.get(\"created_at\")).timestamp()\n except TypeError: created = 0\n try: finished = parse(data.get(\"finished_at\")).timestamp()\n except TypeError: finished = 0\n try: started = parse(data.get(\"started_at\")).timestamp()\n except TypeError: started = 0\n\n self._prometheus_metrics[scope][\"id\"].add_metric([self._project, scope], data.get(\"id\", 0))\n self._prometheus_metrics[scope][\"duration\"].add_metric([self._project, scope], data.get(\"duration\", 0))\n self._prometheus_metrics[scope][\"created_timestamp\"].add_metric([self._project, scope], created)\n self._prometheus_metrics[scope][\"finished_timestamp\"].add_metric([self._project, scope], finished)\n self._prometheus_metrics[scope][\"started_timestamp\"].add_metric([self._project, scope], started)",
"def rateQuality(\n quality_metrics: dict,\n overall_Good_Cutoff: float = 0.1,\n overall_Bad_Cutoff: float = 0.2,\n time_Good_Cutoff: float = 0.1,\n time_Bad_Cutoff: float = 0.2,\n bad_Channel_Good_Cutoff: float = 0.15,\n bad_Channel_Bad_Cutoff: float = 0.3,\n channel_Good_Cutoff: float = 0.15,\n channel_Bad_Cutoff: float = 0.3,\n):\n\n # Check that the values in quality_metrics{} are positive numbers not equal to 0\n\n for i in quality_metrics.values():\n # Verify if any value in quality_metrics is a string\n if isinstance(i, str):\n logging.log(\n 40,\n \"Some value of Quality Metrics is not a positive number, please verify your EEG input data\",\n )\n break\n # Verify if any value in quality_metrics is bool type\n elif isinstance(i, bool):\n logging.log(\n 40,\n \"Some value of Quality Metrics is not a positive number, please verify your EEG input data\",\n )\n break\n # Verify if any value in quality_metrics is a negative number\n elif i < 0:\n logging.log(\n 40,\n \"Some value of Quality Metrics is not a positive number, please verify your EEG input data\",\n )\n break\n else:\n\n # Rating of EEG DATA according to the values of quality_metrics\n\n # The function rates the EEG DATA with the rule that the rating depends on the WORST rating\n\n if (\n quality_metrics[\"overall_high_amp\"] > overall_Bad_Cutoff\n or quality_metrics[\"times_high_var\"] > time_Bad_Cutoff\n or quality_metrics[\"ratio_bad_chans\"] > bad_Channel_Bad_Cutoff\n or quality_metrics[\"chan_high_var\"] > channel_Bad_Cutoff\n ):\n dataset_qualification = {\n \"dataset_qualification\": \"Bad dataset\"\n } # Bad EEG dataset rating if any rating is BAD\n # logging.info(\"Bad dataset: %s\", dataset_qualification['dataset_qualification'])\n return dataset_qualification\n elif (\n quality_metrics[\"overall_high_amp\"] < overall_Good_Cutoff\n and quality_metrics[\"times_high_var\"] < time_Good_Cutoff\n and quality_metrics[\"ratio_bad_chans\"] < bad_Channel_Good_Cutoff\n and quality_metrics[\"chan_high_var\"] < channel_Good_Cutoff\n ):\n dataset_qualification = {\n \"dataset_qualification\": \"Good dataset\"\n } # Good EEG dataset rating if all ratings are GOOD\n # logging.info(\"Good dataset: %s\", dataset_qualification['dataset_qualification'])\n return dataset_qualification\n else:\n dataset_qualification = {\n \"dataset_qualification\": \"Regular dataset\"\n } # Regular EEG dataset rating if any rating is REGULAR\n # logging.info(\"Regular dataset: %s\", dataset_qualification['dataset_qualification'])\n return dataset_qualification"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Rebuild the request from history (self.__references). | def rebuild_request(self) -> Quotecast.Request:
references = self.references
request = Quotecast.Request()
for vwd_id, metric in references.values():
request.subscriptions[vwd_id].append(metric)
return request | [
"def action_rebuild(self, *args):\n\t\tself.get_active_image()._history._rebuild_from_history()",
"def rebuild(self):\n _logger.info( \"Rebuilding the API Caches...\" )\n\n # fill out the data structures\n self._buildApiTypesList()\n #_buildMayaTypesList()\n \n self._buildMayaReservedTypes(force=True)\n\n self._buildApiRelationships()\n\n # merge in the manual overrides: we only do this when we're rebuilding or in the pymelControlPanel\n _logger.info( 'merging in dictionary of manual api overrides')\n self._mergeClassOverrides()",
"def restore_references(self):\n for referrer in self._referrers:\n referrer.add_reference(self._gadget)",
"def _update(self):\n self.history = pd.concat([self.history, self._check_stats()], ignore_index=True)",
"def _problem_update_history(self, _):\n self._update_reward_values()\n self.history.curr_reward.append(self.curr_reward)\n self.history.curr_best_reward.append(self.curr_best_reward)",
"def refresh_history(self, history, state_next):\n if self.is_recurrent:\n history[:-1] = history[1:]\n history[-1] = state_next\n else:\n history[:, :, :-self.input_depth] = history[:, :, self.input_depth:]\n history[:, :, -self.input_depth:] = state_next\n return history",
"def history(self, history):\n self._history = history",
"def freshen_build_caches(self):",
"def requeue(self):",
"def history(self, history):\n\n self._history = history",
"def _push_history(self):\n self._history.append(self._state)",
"def CallHistory(self):",
"def _update_refsets(\n self, refsets: List[RefSet], evaluator: FunctionEvaluator\n ):\n # gather final refset entries\n x = np.vstack([refset.x for refset in refsets])\n fx = np.concatenate([refset.fx for refset in refsets])\n\n # reset function evaluation counter\n evaluator.n_eval = 0\n evaluator.n_eval_round = 0\n\n for i, ess_init_args in enumerate(self.ess_init_args):\n refsets[i] = RefSet(\n dim=ess_init_args['dim_refset'], evaluator=evaluator\n )\n refsets[i].initialize_from_array(x_diverse=x, fx_diverse=fx)\n refsets[i].sort()",
"def referrals(self, referrals):\n self._referrals = referrals",
"def change_history(self, new_reflist, modification_msg):\n self.visual.log(\"New reference list wrt: [{}], yielded {} items.\".format(modification_msg, len(new_reflist)))\n self.push_reference_list(new_reflist, modification_msg)\n # unselect stuff -- it's meaningless now\n self.unselect()",
"def rebuild(self): # remake_all_components\n for _, obj in self._components.items(): # pylint: disable=unused-variable\n obj.rebuild()",
"def referrals(self, referrals):\n\n self._referrals = referrals",
"def build_m_ref(self):\n for row in self.data.itertuples():\n self.reference_model.unlearn_review(row.entity_id, row.ngrams, row.stars, recalculate_features=False)\n self.reference_model.calculate_topk_features()\n\n # get predictions using m_ref\n self.m_ref_predictions = self.get_predictions(self.reference_model)\n self.m_ref_predictions_global = self.get_predictions(self.reference_model, mode='global')",
"def match_history(self) -> None:\n\n self._match_link_gen()\n self._download_json()\n\n return None"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check to see whether an id is for a group | def is_group(id):
return id.startswith('G') | [
"def group_exists(groupid):",
"def has_group():",
"def group_exists(self, group_name):",
"def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True",
"def test_get_group__valid_id(self):\n\n self.assertEqual(entities.Group(self.config_dict['groups'][0]['id'],\n self.config_dict['groups'][0]['policy'],\n self.config_dict['groups'][0]['experiments'],\n self.config_dict['groups'][0]['trafficAllocation']),\n self.project_config.get_group('19228'))",
"def isValidGroup(expense_group_id, cursor):\n query = \"\"\"\n SELECT * FROM expense_group WHERE id = ?\n \"\"\"\n cursor.execute(query, (expense_group_id,))\n return len(cursor.fetchall()) == 1",
"def isSetId(self):\n return _libsbml.Group_isSetId(self)",
"def test_groups_group_id_get(self):\n pass",
"def is_group(group):\n return type(group).__name__ == \"Group\"",
"def is_user_in_group(user_id, group_id):\n query = \"\"\"\n SELECT pos_id\n FROM current_position_holders NATURAL JOIN positions\n WHERE user_id = %s AND group_id = %s\n LIMIT 1\n \"\"\"\n with flask.g.pymysql_db.cursor() as cursor:\n cursor.execute(query, [user_id, group_id])\n return cursor.fetchone() is not None",
"def has_group(self, group: str) -> bool:\n return group in self.group_element_index",
"def has_group(self, group_name):\n # print self.groups\n return group_name in self.groups",
"def is_group():\n return False",
"def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False",
"def is_group(obj) -> bool:\n return hasattr(obj, IOConstants.GROUP_ATTR_NAME)",
"def is_in_group(user, group_name):\n return is_in_group_user_id(user.id, group_name)",
"def test_get_group_by_id(self):\n pass",
"def belongs_to_group(self, group):\n return group in [x.name for x in self.groups.all()]",
"def group_exists(self):\n return AzureTools().group_exists(names.group_name(self))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
check to see whether an id is for a user | def is_user(id):
return id.startswith('U') | [
"def _check_id(self, user_id):\n add_params = {'user_ids': user_id}\n response = self._execute_requests('users.get', add_params)\n\n if 'error' in response:\n # проверка id на существование\n result = response['error']['error_msg']\n logger.error(f\"{result} '{user_id}'\")\n return result\n\n if response['response'][0]['is_closed'] is True and response['response'][0]['can_access_closed'] is False:\n # проверка id (открытый/закрытый)\n result = 'Private id'\n logger.error(f\"{result} '{user_id}'\")\n return result\n else:\n # в случае успешного прохождения проверок, возвращает id пользователя\n self.id_status = True\n result = response['response'][0]['id']\n logger.info(f\"Create instance VkUser for '{user_id}' id #{result}\")\n return result",
"def user_exists(userid):",
"def my_id(self, user_id):\n return user_id == self.socket.user_id",
"def member_exists(user_id):\n return True if User.find(user_id) else False",
"def user_exists():\n return 'userId' in session and User.query.filter_by(\n id=session['userId']).first() is not None",
"def is_user(request):\n return request.authenticated_userid == request.matchdict['username']",
"def check_id(self, id):",
"def validateID(self, userID):\n # Retrieving required data regarding the user\n url = f\"https://api.steampowered.com/ISteamUser/GetPlayerSummaries/v2/?steamids={str(userID)}&key={self.__apiKey}\"\n userInfo = self.retrieveData(url)\n # Returns False if the given ID was invalid\n if len(userInfo[\"response\"][\"players\"]) == 0:\n return False\n else:\n return True",
"def __contains__(self, userid):\r\n userid = int(userid)\r\n return bool(userid in self.players)",
"def user_in_session():\n return 'user_id' in login_session",
"def is_current_user(user_id):\n if current_user.id == user_id:\n return True\n else:\n return False",
"def user_is_owner(user_id):\n if current_user:\n return user_id == current_user.id\n return False",
"def isActiveUser(self,user_id):\n query = \"\"\"\n SELECT id from users \n where refresh_token is not null \n and id = '%s'\n \"\"\" %(user_id)\n results = Database().executeQuery(query)\n if(results.rowcount>0):\n return True\n return False",
"def same_user(user_id):\n return user_id == login_session['user_id']",
"def valid_userid(userid):\n\n return (is_integer(userid) and userid > 0)",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def is_valid_identifier(self, input_value, action_result, isUserUid=False):\r\n\r\n # For pagination, start from first page\r\n page_num = CODE42_PAGINATION\r\n while True:\r\n\r\n # Use page number as a param\r\n params = {'pgNum': page_num}\r\n # Make REST call\r\n ret_val, response = self._make_rest_call(endpoint=CODE42_USERS_ENDPOINT, action_result=action_result,\r\n params=params)\r\n\r\n if phantom.is_fail(ret_val):\r\n return phantom.APP_ERROR, None\r\n\r\n # Check for empty list\r\n if not response.get('data', {}).get('users', []):\r\n break\r\n\r\n # Iterate through all the users\r\n for user in response['data']['users']:\r\n if user['username'].lower() == input_value.lower():\r\n if isUserUid:\r\n user_id = user['userUid']\r\n else:\r\n user_id = user['userId']\r\n return phantom.APP_SUCCESS, user_id\r\n\r\n # Increment page number\r\n page_num += CODE42_PAGINATION\r\n\r\n return phantom.APP_ERROR, None",
"def is_user_id_available(self,\n\t user_id,\n\t shutit_pexpect_child=None,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.is_user_id_available(user_id,\n\t\t note=note,\n\t\t loglevel=loglevel)",
"def test_user_id_get(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
broadcast a new user joining the group | def user_joined_group(cls, group, user):
text = "{} joined the group chat".format(user.username)
cls._broadcast_group(group, None, group, text) | [
"def cli(ctx, group, user):\n return ctx.gi.users.add_to_group(group, user)",
"def join(self):\n channel = self.data[0]\n user_pseudonym = VALIDATED_USERS.get_pseudonym(SOCKET_TO_USERID.get(self.source, None))\n\n if user_pseudonym and self.target:\n target_server = self.target[1]\n if(BANHANDLER.is_banned_from_channel(user_pseudonym, target_server, channel)):\n self.source[0].send(\":orcbot!~@localhost PRIVMSG \"+SOCKET_TO_USERID[self.source]+\" :You're banned from \"+channel+\"\\r\\n\")\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].sendall(self.message)\n self.send()",
"def userJoined(self, user, channel):\n self.dispatch('population', 'userJoined', user, channel)",
"def user_left_group(cls, group, user):\n text = \"{} left the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)",
"def userJoined(self, user, channel):\n pass",
"def add_user_to_global_group(sender, instance, created, **kwargs):\n if created:\n instance.edit_group_membership(\n group=Group.objects.get_global_group(),\n user=instance,\n action='add'\n )",
"async def handle_member_join(self, new: Member) -> None:\r\n await self.on_user_joined.emit(new)",
"def broadcastMessage(self, data):\n print('Broadcast message to all users(unrealized) ')\n # TODO: realise method which send 1 message to all users",
"def join(self, gid, group_fields=None):\n kwargs = {}\n if group_fields:\n kwargs['data'] = group_fields\n r = self.put(\"/user/groups/{gid:d}\".format(gid=gid), **kwargs )\n if r.status_code == 204:\n return { \"status\" : True, \"message\" : \"\"}\n return { \"status\" : False, \"message\" : r.json() }",
"def _broadcast_local(self, data):\n local_users = self.factory.host_manager.get_users()\n for user in local_users:\n if \"bot\" not in user[\"username\"]:\n self._send_to(data, user)",
"def on_join(self, room, user):\n pass",
"def join(self, user):\n self.players.add(user)\n if user.pk not in self.queue:\n self.queue.append(user.pk)\n self.save()",
"def __on_newuser_join(self, user):\n print(f'\\n** ChatRoom info ** user {user} has joined the chat ** {len(self.user_list())} user/s **\\n')",
"def __send_broadcast_to_users(self, sending_group=\"global\"):\n\n if sending_group == \"global\":\n data = self.__global_broadcast_entry.get()\n self.__global_broadcast_entry.delete(0, 'end')\n print(f\"broad casting data: {data}\")\n self.__telegram_controller.broadcast_to_users(data, sending_group = \"global\")\n\n elif sending_group == \"line\":\n line = self.__line_number_broadcast_entry.get()\n if len(line) >0 and line.isnumeric():\n data = self.__line_text_broadcast_entry.get()\n self.__line_text_broadcast_entry.delete(0, 'end')\n self.__line_number_broadcast_entry.delete(0, 'end')\n self.__telegram_controller.broadcast_to_users(data, sending_group=line)\n else:\n print(f\"line number must be a number, {line}\")\n else:\n print(f\"{sending_group} is an invalid sending group\")",
"def join_group(self, user, group, force=0):\n if not force and not group.can_join(user):\n raise NotEnoughPrivileges\n \n group.add_member(user)\n user.add_to_group(get_usergroup_database().get_usergroup(group.get_user_id()))\n if hasattr(user, 'karma_activity_credit'):\n # groups can join groups, and groups don't have karma_activity_credit\n user.karma_activity_credit()\n \n self._flush_user_data_caches(user)",
"def add_to_group(self, group, user):\n data = {'group': group, 'user': user}\n return self.post('addUserToGroup', data)",
"def add_user_to_group(user, group):\n Command.run(['usermod', '-a', '-G', user, group])",
"def append_user(self, user_id):\n #opts = {u'name':self.name, u'user':user_id}\n \n # verify permissions\n self.controller.check_authorization(self.objtype, self.objdef, \n self.objid, u'update')\n \n try:\n user = self.manager.get_entity(ModelUser, user_id)\n #user = self.controller.get_entity(user_id, self.manager.get_users)\n except (QueryError, TransactionError) as ex:\n #self.send_event(u'user-set.update', params=opts, exception=ex)\n self.logger.error(ex, exc_info=1)\n raise ApiManagerError(ex, code=ex.code)\n\n # verify user permissions\n self.controller.check_authorization(User.objtype, User.objdef, \n user.objid, u'view')\n\n try:\n res = self.manager.append_group_user(self.model, user)\n if res is True: \n self.logger.debug(u'Append user %s to group %s' % (\n user, self.name))\n else:\n self.logger.debug(u'User %s already linked with group %s' % (\n user, self.name))\n #self.send_event(u'user-set.update', params=opts)\n return res\n except (QueryError, TransactionError) as ex:\n #self.send_event(u'user-set.update', params=opts, exception=ex)\n self.logger.error(ex, exc_info=1)\n raise ApiManagerError(ex, code=ex.code)",
"def add_member(self, user):\n user_in = user.get_groups()\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n print('user is already a member')\n return False\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
broadcast a user leaving the group | def user_left_group(cls, group, user):
text = "{} left the group chat".format(user.username)
cls._broadcast_group(group, None, group, text) | [
"def leave_group(self):\n\t\tself.sendMessage(ID_CTRL + \"LEAVE\", True)\n\t\tself.joinstate = 0\n\t\tself.createstate = 0\n\t\tself.__key = None",
"def on_leave(self, room, user):\n pass",
"def leave_group(self) -> Result:\n return self._execute_command('leaveGroup')",
"async def user_removed_from_group(\n self, group_id: str, user_id: str, content: JsonDict\n ) -> None:\n # TODO: Check if user in group\n token = await self.store.register_user_group_membership(\n group_id, user_id, membership=\"leave\"\n )\n self.notifier.on_new_event(\"groups_key\", token, users=[user_id])",
"def receive_leave_group(group_state: 'GroupState'):\n group_state.group_state_logger.debug('Querier MembersPresent: receive_leave_group')\n group_ip = group_state.group_ip\n\n group_state.set_timer(alternative=True)\n group_state.set_retransmit_timer()\n\n packet = PacketIGMPHeader(type=MEMBERSHIP_QUERY, max_resp_time=LAST_MEMBER_QUERY_INTERVAL * 10,\n group_address=group_ip)\n group_state.router_state.send(data=packet.bytes(), address=group_ip)\n\n group_state.set_state(CheckingMembership)",
"def leave_group(self) -> None:\r\n\r\n self._request_builder.delete(url=f\"{BASE_PATH['gaming_lounge']}{API_PATH['leave_group'].format(group_id=self.group_id)}\")",
"async def leave(self):\n\t\tif self.group == None:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tawait self.group.remove(self)\n\n\t\tself.group = None",
"def user_joined_group(cls, group, user):\n text = \"{} joined the group chat\".format(user.username)\n cls._broadcast_group(group, None, group, text)",
"def decline_invitation(self, user, group):\n if group.is_invited(user):\n group.remove_invitation(user)",
"def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)",
"def leave_group(self):\n if VERBOSE:\n print(\"--leave_group\")\n self.group_level -= 1",
"def action_remove_from_group(self, kwargs):\n user = kwargs[\"user\"]\n group = kwargs[\"group\"]\n\n if self.engine.remove_user_from_group(user, group):\n info(f\"User {user} sucessfully removed from {group}\")\n else:\n error(f\"Unable to remove {user} from {group}, check privileges or dn\")",
"def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")",
"def leave(self):\n is_group_conversation = (self._conversation.type ==\n hangouts_pb2.CONVERSATION_TYPE_GROUP)\n try:\n if is_group_conversation:\n yield from self._client.remove_user(\n hangouts_pb2.RemoveUserRequest(\n request_header=self._client.get_request_header(),\n event_request_header=self._get_event_request_header(),\n )\n )\n else:\n yield from self._client.delete_conversation(\n hangouts_pb2.DeleteConversationRequest(\n request_header=self._client.get_request_header(),\n conversation_id=hangouts_pb2.ConversationId(\n id=self.id_\n ),\n delete_upper_bound_timestamp=parsers.to_timestamp(\n datetime.datetime.now(tz=datetime.timezone.utc)\n )\n )\n )\n except exceptions.NetworkError as e:\n logger.warning('Failed to leave conversation: {}'.format(e))\n raise",
"def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return",
"def kick_user(self, room_id, user_id, reason=\"\"):\n self.set_membership(room_id, user_id, \"leave\", reason)",
"def on_unban(self, room, user, target):\n pass",
"async def chat_leave(self, event):\n print(\"PrivateChatConsumer\", \"chat_leave\")\n if event[\"username\"]:\n await self.send_json({\n \"msg_type\": MSG_TYPE_LEAVE,\n \"room_id\": event[\"room_id\"],\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"message\": event[\"username\"] + \" disconnected.\"\n })",
"def leave(self, topic):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Start an oef node. | def _start_oef_node(self, network_node): | [
"def launch_oef():\n script_path = os.path.join(\"scripts\", \"oef\", \"launch.py\")\n configuration_file_path = os.path.join(\"scripts\", \"oef\", \"launch_config.json\")\n print(\"Launching new OEF Node...\")\n subprocess.Popen(\n [\"python3\", script_path, \"-c\", configuration_file_path, \"--background\"],\n stdout=subprocess.PIPE,\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n # Wait for OEF\n print(\"Waiting for the OEF to be operative...\")\n wait_for_oef = subprocess.Popen(\n [os.path.join(\"sandbox\", \"wait-for-oef.sh\"), \"127.0.0.1\", \"10000\", \":\"],\n env=os.environ,\n cwd=ROOT_DIR,\n )\n\n wait_for_oef.wait(30)",
"def ex_start_node(self, node):\n # NOTE: This method is here for backward compatibility reasons after\n # this method was promoted to be part of the standard compute API in\n # Libcloud v2.7.0\n return self.start_node(node=node)",
"def start_node(self, **kwargs):\n # project_name, node_name\n\n try:\n if kwargs['project_name'] in self.data:\n project_name = kwargs['project_name']\n project_id = self.data[project_name]['project_id']\n if kwargs['node_name'] in self.data[project_name]['nodes']:\n node_name = kwargs['node_name']\n node_id = self.data[project_name]['nodes'][node_name]['node_id']\n resp = self.post_to_server('projects/{}/nodes/{}/start'.format(project_id, node_id),{})\n print('Node \\'{}\\' started.'.format(node_name))\n self.data[project_name]['nodes'][node_name]['status'] = \"running\"\n except:\n traceback_print_exc()",
"def start_node(target, gatk):\n reference = gatk.get_input_path('reference.fasta')\n\n # Create index file for reference genome (.fai)\n try:\n subprocess.check_call(['samtools', 'faidx', reference])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nsamtools failed to create reference index!')\n except OSError:\n raise RuntimeError('\\nFailed to find \"samtools\". \\nInstall via \"apt-get install samtools\".')\n\n # Create dict file for reference genome (.dict)\n try:\n subprocess.check_call(['picard-tools', 'CreateSequenceDictionary',\n 'R={}'.format(reference),\n 'O={}.dict'.format(os.path.splitext(reference)[0])])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nPicard failed to create reference dictionary')\n except OSError:\n raise RuntimeError('\\nFailed to find \"picard\". \\nInstall via \"apt-get install picard-tools')\n\n # upload to S3\n gatk.upload_to_s3(reference + '.fai')\n gatk.upload_to_s3(os.path.splitext(reference)[0] + '.dict')\n\n # Spawn children and follow-on\n target.addChildTargetFn(normal_index, (gatk,))\n target.addChildTargetFn(tumor_index, (gatk,))\n target.setFollowOnTargetFn(mutect, (gatk,))",
"def start_node(package, executable, node_name, arguments):\n node = roslaunch.core.Node(package, executable, name=node_name, args=arguments)\n launch = roslaunch.scriptapi.ROSLaunch()\n launch.start()\n process = launch.launch(node)\n return process",
"def startup(self) -> None:\n with self.lock:\n self.makenodedir()\n if self.up:\n raise ValueError(\"starting a node that is already up\")\n # create a new namespace for this node using vnoded\n vnoded = (\n f\"{VNODED} -v -c {self.ctrlchnlname} -l {self.ctrlchnlname}.log \"\n f\"-p {self.ctrlchnlname}.pid\"\n )\n if self.directory:\n vnoded += f\" -C {self.directory}\"\n env = self.session.get_environment(state=False)\n env[\"NODE_NUMBER\"] = str(self.id)\n env[\"NODE_NAME\"] = str(self.name)\n output = self.host_cmd(vnoded, env=env)\n self.pid = int(output)\n logger.debug(\"node(%s) pid: %s\", self.name, self.pid)\n # bring up the loopback interface\n logger.debug(\"bringing up loopback interface\")\n self.node_net_client.device_up(\"lo\")\n # set hostname for node\n logger.debug(\"setting hostname: %s\", self.name)\n self.node_net_client.set_hostname(self.name)\n # mark node as up\n self.up = True\n # create private directories\n for dir_path in PRIVATE_DIRS:\n self.create_dir(dir_path)",
"def start(self):\n self.handler = None\n self.master_node = None\n self.uri = None\n handler = rosmaster.master_api.ROSMasterHandler(self.num_workers)\n master_node = rosgraph.xmlrpc.XmlRpcNode(self.port, handler)\n master_node.start()\n while not master_node.uri:\n time.sleep(0.0001)\n self.handler = handler\n self.master_node = master_node\n self.uri = master_node.uri\n logging.getLogger('rosmaster.master').info(\"Master initialized: port[%s], uri[%s]\", self.port, self.uri)",
"def runnode(self, node, pynode=False):\n if pynode:\n process = coreinterface.PythonNode(node, self)\n else:\n process = coreinterface.ExecutableNode(node, self)\n process.spawn()\n self.loadingnodes[node] = process",
"def power_on(self, context, instance, network_info,\n block_device_info=None):\n Maas().node_start(instance['uuid'])",
"def help_start_odl(self):\n print(\"start_odl\")\n print(\"\\tStart an OpenDaylight controller.\")",
"def start_node(self, node, override_cfg_params=None):\n node.account.mkdirs(RedpandaService.DATA_DIR)\n node.account.mkdirs(os.path.dirname(RedpandaService.CONFIG_FILE))\n\n self.write_conf_file(node, override_cfg_params)\n\n if self.coproc_enabled():\n self.start_wasm_engine(node)\n\n cmd = (f\"nohup {self.find_binary('redpanda')}\"\n f\" --redpanda-cfg {RedpandaService.CONFIG_FILE}\"\n f\" --default-log-level {self._log_level}\"\n f\" --logger-log-level=exception=debug:archival=debug \"\n f\" --kernel-page-cache=true \"\n f\" --overprovisioned \"\n f\" --smp {self._num_cores} \"\n f\" --memory 6G \"\n f\" --reserve-memory 0M \"\n f\" >> {RedpandaService.STDOUT_STDERR_CAPTURE} 2>&1 &\")\n\n node.account.ssh(cmd)\n\n wait_until(\n lambda: Admin.ready(node).get(\"status\") == \"ready\",\n timeout_sec=RedpandaService.READY_TIMEOUT_SEC,\n err_msg=f\"Redpanda service {node.account.hostname} failed to start\",\n retry_on_exc=True)",
"def cli_node_start(name, config, environment, system_folders, dockerized):\n ContextClass = DockerNodeContext if dockerized else NodeContext\n\n # in case a configuration file is given, we bypass all the helper\n # stuff since you know what you are doing\n if config:\n name = Path(config).stem\n ctx = ContextClass(name, environment, system_folders, config)\n\n else:\n # in case no name is supplied, ask user to select one\n if not name:\n name, environment = select_configuration_questionaire(\n \"node\",\n system_folders\n )\n\n # check that config exists in the APP, if not a questionaire will\n # be invoked\n if not ContextClass.config_exists(name, environment, system_folders):\n question = f\"Configuration '{name}' using environment\"\n question += f\" '{environment}' does not exist.\\n Do you want to\"\n question += f\" create this config now?\"\n\n if q.confirm(question).ask():\n configuration_wizard(\"node\", name, environment, system_folders)\n\n else:\n sys.exit(0)\n\n # create dummy node context\n ctx = ContextClass(name, environment, system_folders)\n\n # run the node application\n node.run(ctx)",
"def run(self):\n self.etcd.start()",
"def initialize(bootstrap_contact=None):\n\n port = int(sys.argv[1])\n node_obj = node.Node(port)\n\n node_obj.joinNetwork(bootstrap_contact)\n\n return node_obj",
"def start_node(logfile, port):\n handler = logging.FileHandler(logfile)\n f = ' '.join(['%(asctime)s', '%(processName)-10s', '%(name)s',\n '%(levelname)-8s', '%(message)s'])\n formatter = logging.Formatter(f)\n handler.setFormatter(formatter)\n root = logging.getLogger()\n root.addHandler(handler)\n root.setLevel(logging.DEBUG)\n root.info('Starting node in new process')\n event_loop = asyncio.get_event_loop()\n connector = HttpConnector(event_loop)\n instance = Drogulus(PRIVATE_KEY, PUBLIC_KEY, event_loop, connector, port)\n app = make_http_handler(event_loop, connector, instance._node)\n f = event_loop.create_server(app, '0.0.0.0', port)\n event_loop.run_until_complete(f)\n try:\n event_loop.run_forever()\n except KeyboardInterrupt:\n pass",
"def start_demo_network(ctx: ServerContext) -> None:\n # run the server\n vserver_start(ctx, None, None, None, False, None, None, True, '', False)\n\n # run all nodes that belong to this server\n configs, _ = NodeContext.available_configurations(system_folders=False)\n node_names = [\n config.name for config in configs if f'{ctx.name}_node_' in config.name\n ]\n for name in node_names:\n subprocess.run([\"vnode\", \"start\", \"--name\", name])",
"def launch(self, node):\n if not self.started:\n raise RLException(\"please start ROSLaunch first\")\n elif not isinstance(node, Node):\n raise ValueError(\"arg must be of type Node\")\n\n proc, success = self.parent.runner.launch_node(node)\n if not success:\n raise RLException(\"failed to launch %s/%s\"%(node.package, node.type))\n return proc",
"def start_server():\n server.set_endpoint(\"opc.tcp://0.0.0.0:4840/freeopcua/server/\")\n objects = server.get_objects_node()\n myobj = objects.add_object(0, \"MyObject\")\n populate_server(myobj)\n server.start()",
"def start_network(self):\n from mininet.topo import Topo\n from mininet.net import Mininet\n from mininet.node import OVSController\n class SingleSwitchTopo(Topo):\n \"Single Switch Topology\"\n def __init__(self, count=1, **params):\n Topo.__init__(self, **params)\n hosts = [ self.addHost('h%d' % i) for i in range(1, count + 1) ]\n s1 = self.addSwitch('s1')\n for h in hosts:\n self.addLink(h, s1)\n self.net = Mininet(topo = SingleSwitchTopo(4), controller = OVSController)\n self.net.start()\n self.impersonate(False)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a generated protocol's serialisation + deserialisation work correctly. | def test_generated_protocol_serialisation(self):
# create a message
reply_message = {1: "number one", 2: "number two", 7: "number seven"}
# message 1
message = TwoPartyNegotiationMessage(
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,
reply_message=reply_message,
)
# serialise the message
encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)
# deserialise the message
decoded_message = TwoPartyNegotiationSerializer().decode(
encoded_message_in_bytes
)
# Compare the original message with the serialised+deserialised message
assert decoded_message.message_id == message.message_id
assert decoded_message.dialogue_reference == message.dialogue_reference
assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]
assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]
assert decoded_message.target == message.target
assert decoded_message.performative == message.performative
assert decoded_message.reply_message == message.reply_message | [
"def test_generated_protocol_serialisation_ct(self):\n # create a message with pt content\n some_dict = {1: True, 2: False, 3: True, 4: False}\n data_model = TProtocolMessage.DataModel(\n bytes_field=b\"some bytes\",\n int_field=42,\n float_field=42.7,\n bool_field=True,\n str_field=\"some string\",\n set_field={1, 2, 3, 4, 5},\n list_field=[\"some string 1\", \"some string 2\"],\n dict_field=some_dict,\n )\n message = TProtocolMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TProtocolMessage.Performative.PERFORMATIVE_CT,\n content_ct=data_model,\n )\n\n # serialise the message\n encoded_message_in_bytes = TProtocolMessage.serializer.encode(message)\n\n # deserialise the message\n decoded_message = TProtocolMessage.serializer.decode(encoded_message_in_bytes)\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.content_ct == message.content_ct",
"def test_generated_protocol_serialisation_pt(self):\n # create a message with pt content\n message = TProtocolMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TProtocolMessage.Performative.PERFORMATIVE_PT,\n content_bytes=b\"some bytes\",\n content_int=42,\n content_float=42.7,\n content_bool=True,\n content_str=\"some string\",\n )\n\n # serialise the message\n encoded_message_in_bytes = TProtocolMessage.serializer.encode(message)\n\n # deserialise the message\n decoded_message = TProtocolMessage.serializer.decode(encoded_message_in_bytes)\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.content_bytes == message.content_bytes\n assert decoded_message.content_int == message.content_int\n # floats do not seem to lose some precision when serialised then deserialised using protobuf\n # assert decoded_message.content_float == message.content_float\n assert decoded_message.content_bool == message.content_bool\n assert decoded_message.content_str == message.content_str",
"def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob",
"def test_serialize_deserialize(self):\n # Build a transformer network to use within the dual encoder model. (Here,\n # we use a short sequence_length for convenience.)\n sequence_length = 32\n test_network = networks.BertEncoder(\n vocab_size=100, num_layers=2, sequence_length=sequence_length)\n\n # Create a dual encoder model with the created network. (Note that all the\n # args are different, so we can catch any serialization mismatches.)\n dual_encoder_model = dual_encoder.DualEncoder(\n test_network, max_seq_length=sequence_length, output='predictions')\n\n # Create another dual encoder model via serialization and deserialization.\n config = dual_encoder_model.get_config()\n new_dual_encoder = dual_encoder.DualEncoder.from_config(config)\n\n # Validate that the config can be forced to JSON.\n _ = new_dual_encoder.to_json()\n\n # If the serialization was successful, the new config should match the old.\n self.assertAllEqual(dual_encoder_model.get_config(),\n new_dual_encoder.get_config())",
"def test_proto_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid)\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob, from_proto=True)\n assert obj == obj2",
"def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob",
"def test_compare_latest_generator_output_with_test_protocol(self):\n # Skip if prerequisite applications are not installed\n try:\n check_prerequisites()\n except FileNotFoundError:\n pytest.skip(\n \"Some prerequisite applications are not installed. Skipping this test.\"\n )\n\n # Specification\n # protocol_name = \"t_protocol\"\n path_to_specification = os.path.join(\n ROOT_DIR, \"tests\", \"data\", \"sample_specification.yaml\"\n )\n path_to_generated_protocol = self.t\n # path_to_original_protocol = os.path.join(\n # ROOT_DIR, \"tests\", \"data\", \"generator\", protocol_name\n # )\n path_to_package = \"tests.data.generator.\"\n\n # Generate the protocol\n protocol_generator = ProtocolGenerator(\n path_to_specification,\n path_to_generated_protocol,\n path_to_protocol_package=path_to_package,\n )\n protocol_generator.generate()\n\n # # compare __init__.py\n # init_file_generated = Path(self.t, protocol_name, \"__init__.py\")\n # init_file_original = Path(path_to_original_protocol, \"__init__.py\",)\n # assert filecmp.cmp(init_file_generated, init_file_original)\n\n # # compare protocol.yaml\n # protocol_yaml_file_generated = Path(self.t, protocol_name, \"protocol.yaml\")\n # protocol_yaml_file_original = Path(path_to_original_protocol, \"protocol.yaml\",)\n # assert filecmp.cmp(protocol_yaml_file_generated, protocol_yaml_file_original)\n\n # # compare message.py\n # message_file_generated = Path(self.t, protocol_name, \"message.py\")\n # message_file_original = Path(path_to_original_protocol, \"message.py\",)\n # assert filecmp.cmp(message_file_generated, message_file_original)\n\n # # compare serialization.py\n # serialization_file_generated = Path(self.t, protocol_name, \"serialization.py\")\n # serialization_file_original = Path(\n # path_to_original_protocol, \"serialization.py\",\n # )\n # assert filecmp.cmp(serialization_file_generated, serialization_file_original)\n\n # # compare .proto\n # proto_file_generated = Path(\n # self.t, protocol_name, \"{}.proto\".format(protocol_name)\n # )\n # proto_file_original = Path(\n # path_to_original_protocol, \"{}.proto\".format(protocol_name),\n # )\n # assert filecmp.cmp(proto_file_generated, proto_file_original)\n\n # # compare _pb2.py\n # pb2_file_generated = Path(\n # self.t, protocol_name, \"{}_pb2.py\".format(protocol_name)\n # )\n # with open(ROOT_DIR + \"/x_pb2.py\", \"w\") as fp:\n # fp.write(pb2_file_generated.read_text())\n # pb2_file_original = Path(\n # path_to_original_protocol, \"{}_pb2.py\".format(protocol_name),\n # )\n # assert filecmp.cmp(pb2_file_generated, pb2_file_original)\n assert True",
"def test_serializer(inout):\n from aiida_wannier90_workflows.utils.workflows.builder.serializer import serialize\n\n assert serialize(inout[0]) == inout[1], inout",
"def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2",
"def test_default_protocol_schemas(available_protocol):\n protocol = available_protocols[available_protocol]('dummy_id')\n protocol_schema = protocol.schema\n\n recreated_protocol = available_protocols[available_protocol]('dummy_id')\n recreated_protocol.schema = protocol_schema\n\n assert protocol.schema.json() == recreated_protocol.schema.json()",
"def test_serialize_deserialize1(self):\n for command in Command:\n serialized = command.serialize()\n deserialized = Command.deserialize(serialized)\n self.assertTrue(deserialized is command)",
"def test_validate_serialization_and_deserialization_processingblock_using_schema_class():\n processing_block_config = ProcessingBlockSchema(many=True).loads(\n VALID_PROCESSING_BLOCK_JSON_PI16\n )\n serialized_processing_block_config = ProcessingBlockSchema(many=True).dumps(\n processing_block_config\n )\n\n assert_json_is_equal(\n VALID_PROCESSING_BLOCK_JSON_PI16, serialized_processing_block_config\n )",
"def test_protocolToConsumer(self):\n result = []\n p = Protocol()\n p.dataReceived = result.append\n consumer = IConsumer(p)\n consumer.write(b\"hello\")\n self.assertEqual(result, [b\"hello\"])\n self.assertIsInstance(consumer, ProtocolToConsumerAdapter)",
"def test_serialization_no_conformers(self):\n mol = Molecule.from_smiles(\"CCO\")\n\n dict_copy = Molecule.from_dict(mol.to_dict())\n assert mol == dict_copy\n\n # TODO: yaml_copy = Molecule.from_yaml(mol.to_yaml())\n with pytest.raises(NotImplementedError):\n mol.to_toml()\n\n bson_copy = Molecule.from_bson(mol.to_bson())\n assert mol == bson_copy\n\n json_copy = Molecule.from_json(mol.to_json())\n assert mol == json_copy\n\n # TODO: round-trip when from_xml is implemented\n mol_as_xml = mol.to_xml()\n with pytest.raises(NotImplementedError):\n Molecule.from_xml(mol_as_xml)\n\n messagepack_copy = Molecule.from_messagepack(mol.to_messagepack())\n assert mol == messagepack_copy\n\n pickle_copy = pickle.loads(pickle.dumps(mol))\n assert mol == pickle_copy",
"def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))",
"def test_interfaces(self):\n proto = Protocol()\n self.assertTrue(verifyObject(IProtocol, proto))\n self.assertTrue(verifyObject(ILoggingContext, proto))",
"def test_buildProtocol(self):\n world = World(MagicMock())\n f = BotFactory(world, {'foo': 'bar'})\n proto = f.buildProtocol(None)\n self.assertEqual(proto.factory, f)\n self.assertTrue(isinstance(proto, BotLineProtocol))\n self.assertTrue(isinstance(proto.avatar, Avatar))\n self.assertEqual(proto.avatar._world, world, \"Should be connected to \"\n \"the world\")\n self.assertNotEqual(proto.avatar._game_piece, None, \"Should have a game\"\n \" piece\")\n self.assertEqual(proto.avatar.availableCommands(), {'foo': 'bar'})\n obj = world.get(proto.avatar._game_piece)\n self.assertEqual(obj['kind'], 'bot', \"Should make a bot in the world\")\n\n self.assertTrue(isinstance(proto.event_transformer,\n ToStringTransformer))",
"def test_picklable(self):\n pickled = pickle.dumps(self.structure)\n unpickled = pickle.loads(pickled)\n self.assertEqual(self.structure, unpickled)",
"def test_defaultBuildProtocol(self):\n\n class SomeProtocol(Protocol):\n pass\n\n f = Factory()\n f.protocol = SomeProtocol\n protocol = f.buildProtocol(None)\n self.assertIsInstance(protocol, SomeProtocol)\n self.assertIs(protocol.factory, f)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test that a generated protocol could be used in exchanging messages between two agents. | def test_generated_protocol_end_to_end(self):
# AEA components
ledger_apis = LedgerApis({}, FETCHAI)
wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})
wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})
identity_1 = Identity(
name="my_aea_1",
address=wallet_1.addresses.get(FETCHAI),
default_address_key=FETCHAI,
)
identity_2 = Identity(
name="my_aea_2",
address=wallet_2.addresses.get(FETCHAI),
default_address_key=FETCHAI,
)
oef_connection_1 = OEFConnection(
address=identity_1.address, oef_addr=HOST, oef_port=PORT
)
oef_connection_2 = OEFConnection(
address=identity_2.address, oef_addr=HOST, oef_port=PORT
)
resources_1 = Resources()
resources_2 = Resources()
# add generated protocols to resources
generated_protocol_configuration = ProtocolConfig.from_json(
yaml.safe_load(
open(
os.path.join(
self.cwd,
"tests",
"data",
"generator",
"two_party_negotiation",
"protocol.yaml",
)
)
)
)
generated_protocol = Protocol(
TwoPartyNegotiationMessage.protocol_id,
TwoPartyNegotiationSerializer(),
generated_protocol_configuration,
)
resources_1.protocol_registry.register(
TwoPartyNegotiationMessage.protocol_id, generated_protocol
)
resources_2.protocol_registry.register(
TwoPartyNegotiationMessage.protocol_id, generated_protocol
)
# create AEAs
aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1)
aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2)
inform_number = tuple((1370, 1991, 1, 4, 17, 6))
# message 1
message = TwoPartyNegotiationMessage(
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
performative=TwoPartyNegotiationMessage.Performative.INFORM,
inform_number=inform_number,
)
encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)
envelope = Envelope(
to=identity_2.address,
sender=identity_1.address,
protocol_id=TwoPartyNegotiationMessage.protocol_id,
message=encoded_message_in_bytes,
)
# message 2
reply_message = {1: "number one", 2: "number two", 7: "number seven"}
message_2 = TwoPartyNegotiationMessage(
message_id=2,
dialogue_reference=(str(0), ""),
target=1,
performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,
reply_message=reply_message,
)
encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2)
# add handlers to AEA resources
agent_1_handler = Agent1Handler(
skill_context=SkillContext(aea_1.context), name="fake_skill"
)
resources_1.handler_registry.register(
(
PublicId.from_str("fetchai/fake_skill:0.1.0"),
TwoPartyNegotiationMessage.protocol_id,
),
agent_1_handler,
)
agent_2_handler = Agent2Handler(
encoded_messsage=encoded_message_2_in_bytes,
skill_context=SkillContext(aea_2.context),
name="fake_skill",
)
resources_2.handler_registry.register(
(
PublicId.from_str("fetchai/fake_skill:0.1.0"),
TwoPartyNegotiationMessage.protocol_id,
),
agent_2_handler,
)
# add error skill to AEAs
error_skill_1 = Skill.from_dir(
os.path.join(AEA_DIR, "skills", "error"), aea_1.context
)
resources_1.add_skill(error_skill_1)
error_skill_2 = Skill.from_dir(
os.path.join(AEA_DIR, "skills", "error"), aea_2.context
)
resources_2.add_skill(error_skill_2)
# Start threads
t_1 = Thread(target=aea_1.start)
t_2 = Thread(target=aea_2.start)
try:
t_1.start()
t_2.start()
time.sleep(1.0)
aea_1.outbox.put(envelope)
time.sleep(5.0)
assert (
agent_2_handler.handled_message.message_id == message.message_id
), "Message from Agent 1 to 2: message ids do not match"
assert (
agent_2_handler.handled_message.dialogue_reference
== message.dialogue_reference
), "Message from Agent 1 to 2: dialogue references do not match"
assert (
agent_2_handler.handled_message.dialogue_reference[0]
== message.dialogue_reference[0]
), "Message from Agent 1 to 2: dialogue reference[0]s do not match"
assert (
agent_2_handler.handled_message.dialogue_reference[1]
== message.dialogue_reference[1]
), "Message from Agent 1 to 2: dialogue reference[1]s do not match"
assert (
agent_2_handler.handled_message.target == message.target
), "Message from Agent 1 to 2: targets do not match"
assert (
agent_2_handler.handled_message.performative == message.performative
), "Message from Agent 1 to 2: performatives do not match"
assert (
agent_2_handler.handled_message.inform_number == message.inform_number
), "Message from Agent 1 to 2: inform_numbers do not match"
assert (
agent_1_handler.handled_message.message_id == message_2.message_id
), "Message from Agent 1 to 2: dialogue references do not match"
assert (
agent_1_handler.handled_message.dialogue_reference
== message_2.dialogue_reference
), "Message from Agent 2 to 1: dialogue references do not match"
assert (
agent_1_handler.handled_message.dialogue_reference[0]
== message_2.dialogue_reference[0]
), "Message from Agent 2 to 1: dialogue reference[0]s do not match"
assert (
agent_1_handler.handled_message.dialogue_reference[1]
== message_2.dialogue_reference[1]
), "Message from Agent 2 to 1: dialogue reference[1]s do not match"
assert (
agent_1_handler.handled_message.target == message_2.target
), "Message from Agent 2 to 1: targets do not match"
assert (
agent_1_handler.handled_message.performative == message_2.performative
), "Message from Agent 2 to 1: performatives do not match"
assert (
agent_1_handler.handled_message.reply_message == message_2.reply_message
), "Message from Agent 1 to 2: reply_messages do not match"
time.sleep(2.0)
finally:
aea_1.stop()
aea_2.stop()
t_1.join()
t_2.join() | [
"def test_trackProtocols(self):\n f = EventFeedLineFactory()\n p1 = f.buildProtocol(None)\n p2 = f.buildProtocol(None)\n \n self.assertNotIn(p1, f.connected_protocols)\n self.assertNotIn(p2, f.connected_protocols)\n\n p1.makeConnection(StringTransport())\n self.assertIn(p1, f.connected_protocols)\n\n p2.makeConnection(StringTransport())\n self.assertIn(p2, f.connected_protocols)\n\n p1.connectionLost('whatever')\n\n self.assertNotIn(p1, f.connected_protocols, \"After a protocol \"\n \"disconnects, it should not be in the connected \"\n \"protocols list anymore\")",
"def test_generated_protocol_end_to_end(self):\n agent_name_1 = \"my_aea_1\"\n agent_name_2 = \"my_aea_2\"\n builder_1 = AEABuilder()\n builder_1.set_name(agent_name_1)\n builder_1.add_private_key(DEFAULT_LEDGER, self.private_key_path_1)\n builder_1.set_default_ledger(DEFAULT_LEDGER)\n builder_1.set_default_connection(PublicId.from_str(\"fetchai/oef:0.6.0\"))\n builder_1.add_protocol(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"protocols\", \"fipa\")\n )\n builder_1.add_protocol(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"protocols\", \"oef_search\")\n )\n builder_1.add_component(\n ComponentType.PROTOCOL,\n Path(ROOT_DIR, \"tests\", \"data\", \"generator\", \"t_protocol\"),\n skip_consistency_check=True,\n )\n builder_1.add_connection(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"connections\", \"oef\")\n )\n\n builder_2 = AEABuilder()\n builder_2.set_name(agent_name_2)\n builder_2.add_private_key(DEFAULT_LEDGER, self.private_key_path_2)\n builder_2.set_default_ledger(DEFAULT_LEDGER)\n builder_2.add_protocol(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"protocols\", \"fipa\")\n )\n builder_2.add_protocol(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"protocols\", \"oef_search\")\n )\n builder_2.set_default_connection(PublicId.from_str(\"fetchai/oef:0.6.0\"))\n builder_2.add_component(\n ComponentType.PROTOCOL,\n Path(ROOT_DIR, \"tests\", \"data\", \"generator\", \"t_protocol\"),\n skip_consistency_check=True,\n )\n builder_2.add_connection(\n Path(ROOT_DIR, \"packages\", \"fetchai\", \"connections\", \"oef\")\n )\n\n # create AEAs\n aea_1 = builder_1.build(connection_ids=[PublicId.from_str(\"fetchai/oef:0.6.0\")])\n aea_2 = builder_2.build(connection_ids=[PublicId.from_str(\"fetchai/oef:0.6.0\")])\n\n # message 1\n message = TProtocolMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TProtocolMessage.Performative.PERFORMATIVE_PT,\n content_bytes=b\"some bytes\",\n content_int=42,\n content_float=42.7,\n content_bool=True,\n content_str=\"some string\",\n )\n message.counterparty = aea_2.identity.address\n envelope = Envelope(\n to=aea_2.identity.address,\n sender=aea_1.identity.address,\n protocol_id=TProtocolMessage.protocol_id,\n message=message,\n )\n\n # message 2\n message_2 = TProtocolMessage(\n message_id=2,\n dialogue_reference=(str(0), \"\"),\n target=1,\n performative=TProtocolMessage.Performative.PERFORMATIVE_PT,\n content_bytes=b\"some other bytes\",\n content_int=43,\n content_float=43.7,\n content_bool=False,\n content_str=\"some other string\",\n )\n message_2.counterparty = aea_1.identity.address\n\n # add handlers to AEA resources]\n skill_context_1 = SkillContext(aea_1.context)\n skill_1 = Skill(SkillConfig(\"fake_skill\", \"fetchai\", \"0.1.0\"), skill_context_1)\n skill_context_1._skill = skill_1\n\n agent_1_handler = Agent1Handler(\n skill_context=skill_context_1, name=\"fake_handler_1\"\n )\n aea_1.resources._handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TProtocolMessage.protocol_id,\n ),\n agent_1_handler,\n )\n skill_context_2 = SkillContext(aea_2.context)\n skill_2 = Skill(SkillConfig(\"fake_skill\", \"fetchai\", \"0.1.0\"), skill_context_2)\n skill_context_2._skill = skill_2\n\n agent_2_handler = Agent2Handler(\n message=message_2, skill_context=skill_context_2, name=\"fake_handler_2\",\n )\n aea_2.resources._handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TProtocolMessage.protocol_id,\n ),\n agent_2_handler,\n )\n\n # Start threads\n t_1 = Thread(target=aea_1.start)\n t_2 = Thread(target=aea_2.start)\n try:\n t_1.start()\n t_2.start()\n time.sleep(1.0)\n aea_1.outbox.put(envelope)\n time.sleep(5.0)\n assert (\n agent_2_handler.handled_message.message_id == message.message_id\n ), \"Message from Agent 1 to 2: message ids do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference\n == message.dialogue_reference\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[0]\n == message.dialogue_reference[0]\n ), \"Message from Agent 1 to 2: dialogue reference[0]s do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[1]\n == message.dialogue_reference[1]\n ), \"Message from Agent 1 to 2: dialogue reference[1]s do not match\"\n assert (\n agent_2_handler.handled_message.target == message.target\n ), \"Message from Agent 1 to 2: targets do not match\"\n assert (\n agent_2_handler.handled_message.performative == message.performative\n ), \"Message from Agent 1 to 2: performatives do not match\"\n assert (\n agent_2_handler.handled_message.content_bytes == message.content_bytes\n ), \"Message from Agent 1 to 2: content_bytes do not match\"\n assert (\n agent_2_handler.handled_message.content_int == message.content_int\n ), \"Message from Agent 1 to 2: content_int do not match\"\n # floats do not seem to lose some precision when serialised then deserialised using protobuf\n # assert agent_2_handler.handled_message.content_float == message.content_float, \"Message from Agent 1 to 2: content_float do not match\"\n assert (\n agent_2_handler.handled_message.content_bool == message.content_bool\n ), \"Message from Agent 1 to 2: content_bool do not match\"\n assert (\n agent_2_handler.handled_message.content_str == message.content_str\n ), \"Message from Agent 1 to 2: content_str do not match\"\n\n assert (\n agent_1_handler.handled_message.message_id == message_2.message_id\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference\n == message_2.dialogue_reference\n ), \"Message from Agent 2 to 1: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[0]\n == message_2.dialogue_reference[0]\n ), \"Message from Agent 2 to 1: dialogue reference[0]s do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[1]\n == message_2.dialogue_reference[1]\n ), \"Message from Agent 2 to 1: dialogue reference[1]s do not match\"\n assert (\n agent_1_handler.handled_message.target == message_2.target\n ), \"Message from Agent 2 to 1: targets do not match\"\n assert (\n agent_1_handler.handled_message.performative == message_2.performative\n ), \"Message from Agent 2 to 1: performatives do not match\"\n assert (\n agent_1_handler.handled_message.content_bytes == message_2.content_bytes\n ), \"Message from Agent 2 to 1: content_bytes do not match\"\n assert (\n agent_1_handler.handled_message.content_int == message_2.content_int\n ), \"Message from Agent 2 to 1: content_int do not match\"\n # floats do not seem to lose some precision when serialised then deserialised using protobuf\n # assert agent_1_handler.handled_message.content_float == message_2.content_float, \"Message from Agent 2 to 1: content_float do not match\"\n assert (\n agent_1_handler.handled_message.content_bool == message_2.content_bool\n ), \"Message from Agent 2 to 1: content_bool do not match\"\n assert (\n agent_1_handler.handled_message.content_str == message_2.content_str\n ), \"Message from Agent 2 to 1: content_str do not match\"\n time.sleep(2.0)\n finally:\n aea_1.stop()\n aea_2.stop()\n t_1.join()\n t_2.join()",
"def test_compare_latest_generator_output_with_test_protocol(self):\n # Skip if prerequisite applications are not installed\n try:\n check_prerequisites()\n except FileNotFoundError:\n pytest.skip(\n \"Some prerequisite applications are not installed. Skipping this test.\"\n )\n\n # Specification\n # protocol_name = \"t_protocol\"\n path_to_specification = os.path.join(\n ROOT_DIR, \"tests\", \"data\", \"sample_specification.yaml\"\n )\n path_to_generated_protocol = self.t\n # path_to_original_protocol = os.path.join(\n # ROOT_DIR, \"tests\", \"data\", \"generator\", protocol_name\n # )\n path_to_package = \"tests.data.generator.\"\n\n # Generate the protocol\n protocol_generator = ProtocolGenerator(\n path_to_specification,\n path_to_generated_protocol,\n path_to_protocol_package=path_to_package,\n )\n protocol_generator.generate()\n\n # # compare __init__.py\n # init_file_generated = Path(self.t, protocol_name, \"__init__.py\")\n # init_file_original = Path(path_to_original_protocol, \"__init__.py\",)\n # assert filecmp.cmp(init_file_generated, init_file_original)\n\n # # compare protocol.yaml\n # protocol_yaml_file_generated = Path(self.t, protocol_name, \"protocol.yaml\")\n # protocol_yaml_file_original = Path(path_to_original_protocol, \"protocol.yaml\",)\n # assert filecmp.cmp(protocol_yaml_file_generated, protocol_yaml_file_original)\n\n # # compare message.py\n # message_file_generated = Path(self.t, protocol_name, \"message.py\")\n # message_file_original = Path(path_to_original_protocol, \"message.py\",)\n # assert filecmp.cmp(message_file_generated, message_file_original)\n\n # # compare serialization.py\n # serialization_file_generated = Path(self.t, protocol_name, \"serialization.py\")\n # serialization_file_original = Path(\n # path_to_original_protocol, \"serialization.py\",\n # )\n # assert filecmp.cmp(serialization_file_generated, serialization_file_original)\n\n # # compare .proto\n # proto_file_generated = Path(\n # self.t, protocol_name, \"{}.proto\".format(protocol_name)\n # )\n # proto_file_original = Path(\n # path_to_original_protocol, \"{}.proto\".format(protocol_name),\n # )\n # assert filecmp.cmp(proto_file_generated, proto_file_original)\n\n # # compare _pb2.py\n # pb2_file_generated = Path(\n # self.t, protocol_name, \"{}_pb2.py\".format(protocol_name)\n # )\n # with open(ROOT_DIR + \"/x_pb2.py\", \"w\") as fp:\n # fp.write(pb2_file_generated.read_text())\n # pb2_file_original = Path(\n # path_to_original_protocol, \"{}_pb2.py\".format(protocol_name),\n # )\n # assert filecmp.cmp(pb2_file_generated, pb2_file_original)\n assert True",
"def test_identify(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, _ = self.create_protocol('protocol_b')\n\n transport_a.get_extra_info.return_value = ('127.0.0.1', 1000)\n transport_b.get_extra_info.return_value = ('127.0.0.2', 1000)\n\n self.assertTrue(len(protocol_a.messages) == 0)\n\n protocol_a.identify()\n\n # Check that a message has been sent.\n self.assertTrue(transport_a.write.called)\n self.assertTrue(len(protocol_a.messages) == 1)\n\n # Get the message and check for the key.\n output = transport_a.write.call_args[0][0]\n self.assertTrue(protocol_a.self_key in output.decode())\n\n # Feed the message to the other protocol.\n protocol_b.data_received(output)\n\n # Check that the routing tree has been called to add a Node with the right key.\n self.assertTrue(tree_b.add_node.called)\n self.assertTrue(tree_b.add_node.call_args[0][0].key == 'protocol_a')\n\n # Check that the response on the identify is written to the transport.\n self.assertTrue(transport_b.write.called)\n\n # Get the response, check the key.\n output = transport_b.write.call_args[0][0]\n self.assertTrue(protocol_b.self_key in output.decode())\n\n # Feed the response to the original protocol.\n protocol_a.data_received(output)\n\n # The routing tree should've been called to add the Node with the right key.\n self.assertTrue(tree_a.add_node.called)\n self.assertTrue(tree_a.add_node.call_args[0][0].key == 'protocol_b')\n\n # The messages dict should now be empty again.\n self.assertTrue(len(protocol_a.messages) == 0)",
"def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)",
"def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )",
"def test_generated_protocol_serialisation(self):\n # create a message\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n\n # serialise the message\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n\n # deserialise the message\n decoded_message = TwoPartyNegotiationSerializer().decode(\n encoded_message_in_bytes\n )\n\n # Compare the original message with the serialised+deserialised message\n assert decoded_message.message_id == message.message_id\n assert decoded_message.dialogue_reference == message.dialogue_reference\n assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0]\n assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1]\n assert decoded_message.target == message.target\n assert decoded_message.performative == message.performative\n assert decoded_message.reply_message == message.reply_message",
"def test_protocols_updated(self):\n assert self.agent_config.protocols == {self.new_protocol_id}",
"def test_receiverInterfaces(self):\n self.assertTrue(verifyObject(IBoxReceiver, self.protocolClass()))",
"def test_v1alpha3vmi_port_forward_with_protocol(self):\n pass",
"def test_protocolToConsumer(self):\n result = []\n p = Protocol()\n p.dataReceived = result.append\n consumer = IConsumer(p)\n consumer.write(b\"hello\")\n self.assertEqual(result, [b\"hello\"])\n self.assertIsInstance(consumer, ProtocolToConsumerAdapter)",
"def test_buildProtocol(self):\n queryData = (\"fromUser\", None, None)\n factory = irc.DccChatFactory(None, queryData)\n protocol = factory.buildProtocol(\"127.0.0.1\")\n self.assertIsInstance(protocol, irc.DccChat)\n self.assertEqual(protocol.factory, factory)",
"def test_interfaces(self):\n proto = Protocol()\n self.assertTrue(verifyObject(IProtocol, proto))\n self.assertTrue(verifyObject(ILoggingContext, proto))",
"def test_v1alpha3vm_port_forward_with_protocol(self):\n pass",
"def test_v1vmi_port_forward_with_protocol(self):\n pass",
"def test_buildProtocol(self):\n world = World(MagicMock())\n f = BotFactory(world, {'foo': 'bar'})\n proto = f.buildProtocol(None)\n self.assertEqual(proto.factory, f)\n self.assertTrue(isinstance(proto, BotLineProtocol))\n self.assertTrue(isinstance(proto.avatar, Avatar))\n self.assertEqual(proto.avatar._world, world, \"Should be connected to \"\n \"the world\")\n self.assertNotEqual(proto.avatar._game_piece, None, \"Should have a game\"\n \" piece\")\n self.assertEqual(proto.avatar.availableCommands(), {'foo': 'bar'})\n obj = world.get(proto.avatar._game_piece)\n self.assertEqual(obj['kind'], 'bot', \"Should make a bot in the world\")\n\n self.assertTrue(isinstance(proto.event_transformer,\n ToStringTransformer))",
"def test_introduce_send_proposal(self):\n pass",
"def test_verify_connection_to_a_device():",
"def test_websocket_mechanics():\n transport = StringTransportWithDisconnection()\n service = hey_joe.WebSocketService(\"127.0.0.1\", 9000)\n protocol = service.buildProtocol(service._hey_joe_addr)\n protocol.transport = transport\n transport.protocol = protocol\n protocol.connectionMade()\n data_to_send = b'GET / HTTP/1.1\\r\\nHost: somewhere_in_the_world:9000\\r\\nConnection: keep-alive, Upgrade\\r\\nUpgrade: websocket\\r\\nSec-WebSocket-Version: 13\\r\\nSec-WebSocket-Key: F76ObkF/aCKX8WkmAgx2OQ==\\r\\n\\r\\n'\n protocol.dataReceived(data_to_send)\n assert transport.value().startswith(b'HTTP/1.1 101 Switching Protocols\\r\\nServer: hendrix')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test _specification_type_to_python_type method unsupported type. | def test__specification_type_to_python_type_unsupported_type(self):
with self.assertRaises(TypeError):
_specification_type_to_python_type("unsupported_type") | [
"def test__specification_type_to_python_type_unsupported_type(self):\n with self.assertRaises(ProtocolSpecificationParseError):\n _specification_type_to_python_type(\"unsupported_type\")",
"def test_value_error_for_computing_missing_type():\n with pytest.raises(ValueError):\n compute_type(\"missing_type\", {})",
"def test_coerce() -> None:\n assert _coerce(\"1.0\") == Version(\"1.0\")\n assert _coerce(1.0) == Version(\"1.0\")\n expected = \"Unable to coerce object type\"\n with pytest.raises(NotImplementedError, match=expected):\n _coerce(type(Version))",
"def test_wrong_type(self, rule):\n rule._expected_value_type = float\n with pytest.raises(TypeError) as e:\n _ = rule._get_comparison()\n assert \"42\" in str(e.value)\n assert \"type\" in str(e.value)",
"def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})",
"def CheckType(self, *args, **kwargs):\n pass",
"def test_typespec_none_on_construction(self):\n expected = None\n actual = self.part.typespec\n msg = \"expected '%s', got '%s'\" % (expected, actual)\n self.assertEqual(expected, actual, msg)",
"def test_ticket_type_change_error_bad_type(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('ticket_type change bad_type changed_type')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def _validate_type(self) -> None:\n # TODO: add transformation logic so that we don't have to transform inputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def _validate_type(self):\n # TODO: add transformation logic so that we don't have to transform outputs at every place they are used, including v1 back compat support\n if not spec_type_is_parameter(self.type):\n type_utils.validate_bundled_artifact_type(self.type)",
"def test_unsupported():\n with pytest.raises(Exception) as exception_info:\n get_iso8601_string(1)\n assert exception_info.value.message == 'Unsupported type: ``%s``.\\nSupported types: ``<datetime.datetime>``, ``<datetime.date>``, or ``<str>``.' % repr(type(1))\n #assert exception_info.value.argument == 1",
"def _validate_type(self):\n if self._type != \"inspection\":\n raise securesystemslib.exceptions.FormatError(\n \"The _type field must be set to 'inspection'!\")",
"def test_incompatible_option_type(key, value):\n wrong_types = {int, str, list, bool} - {type(value)}\n for wrong_type in wrong_types:\n test_value = wrong_type()\n with pytest.raises(InputError):\n _check_input_config({key: test_value})",
"def _check_type_compatibility(self, type_name1, type_name2,\n operation):\n if type_name1 != type_name2:\n raise TypeCompatibilityError(type_name1, type_name2, operation)",
"def test_types(self):\n \n self.assertIsInstance(self.mapped_info, numpy.ndarray)\n self.assertIsInstance(self.mapped_shape, tuple)\n self.assertIsInstance(self.bits_per_symbol, int)\n self.assertIsInstance(self.frame_size, int)\n self.assertIsInstance(self.modulation_type, str)\n \n pass",
"def try_wrong_types(self, p, name, type_):\n for x in (1, 1.0, \"x\", True, np.ndarray,):\n if type(x) != type_:\n with self.assertRaises(TypeError, msg=f\"{name} {type_} {x}\"):\n setattr(p, name, x)",
"def test_types(self):\n \n self.assertIsInstance(self.netlist, str)\n self.assertIsInstance(self.waves, dict)\n \n pass",
"def test_should_return_error_if_stmt_contains_no_type(self):\r\n with self.assertRaises(TypeError):\r\n self.spec_parser.parse_statement({'name': 'todd'})",
"def test_validate_type_invalid(_):\n assert not benchmark_utils.validate_type('benchmark')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test _union_sub_type_to_protobuf_variable_name method tuple. | def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock):
_union_sub_type_to_protobuf_variable_name("content_name", "Tuple")
mock.assert_called_once() | [
"def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock):\n pytest.skip()\n _union_sub_type_to_protobuf_variable_name(\"content_name\", \"Tuple[str, ...]\")\n mock.assert_called_once()",
"def _union_sub_type_to_protobuf_variable_name(\n content_name: str, content_type: str\n) -> str:\n if content_type.startswith(\"FrozenSet\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"set_of_{}\".format(sub_type)\n elif content_type.startswith(\"Tuple\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"list_of_{}\".format(sub_type)\n elif content_type.startswith(\"Dict\"):\n sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]\n sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]\n expanded_type_str = \"dict_of_{}_{}\".format(sub_type_1, sub_type_2)\n else:\n expanded_type_str = content_type\n\n protobuf_variable_name = \"{}_type_{}\".format(content_name, expanded_type_str)\n\n return protobuf_variable_name",
"def VisitUnionType(self, node):\n return \"Union[%s]\" % \", \".join(node.type_list)",
"def _decode_union_old(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Union member has no associated value\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n # Union member has value\n if len(obj) != 1:\n raise bv.ValidationError('expected 1 key, got %s' % len(obj))\n tag = list(obj)[0]\n raw_val = obj[tag]\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if isinstance(val_data_type, bv.Nullable) and raw_val is None:\n val = None\n elif isinstance(val_data_type, bv.Void):\n if raw_val is None or not strict:\n # If raw_val is None, then this is the more verbose\n # representation of a void union member. If raw_val isn't\n # None, then maybe the spec has changed, so check if we're\n # in strict mode.\n val = None\n else:\n raise bv.ValidationError('expected null, got %s' %\n bv.generic_type_name(raw_val))\n else:\n try:\n val = _json_compat_obj_decode_helper(\n val_data_type, raw_val, alias_validators, strict, True,\n for_msgpack)\n except bv.ValidationError as e:\n e.add_parent(tag)\n raise\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)",
"def union_parts(union: UnionKind, value: dict):\n selector, sub_value = list(value.items())[0]\n final_kind = union.kind_for(selector)\n value = sub_value\n return final_kind, value",
"def _decode_union(data_type, obj, alias_validators, strict, for_msgpack):\n val = None\n if isinstance(obj, six.string_types):\n # Handles the shorthand format where the union is serialized as only\n # the string of the tag.\n tag = obj\n if tag in data_type.definition._tagmap:\n val_data_type = data_type.definition._tagmap[tag]\n if not isinstance(val_data_type, (bv.Void, bv.Nullable)):\n raise bv.ValidationError(\n \"expected object for '%s', got symbol\" % tag)\n if tag == data_type.definition._catch_all:\n raise bv.ValidationError(\n \"unexpected use of the catch-all tag '%s'\" % tag)\n else:\n if not strict and data_type.definition._catch_all:\n tag = data_type.definition._catch_all\n else:\n raise bv.ValidationError(\"unknown tag '%s'\" % tag)\n elif isinstance(obj, dict):\n tag, val = _decode_union_dict(\n data_type, obj, alias_validators, strict, for_msgpack)\n else:\n raise bv.ValidationError(\"expected string or object, got %s\" %\n bv.generic_type_name(obj))\n return data_type.definition(tag, val)",
"def get_union_typehint_string(union_typehint) -> str:\n return re.sub(r\"typing.Union|[\\[\\]'\\\"()]|ForwardRef\", \"\", str(union_typehint))",
"def construct_union_class_name(inner_types: T.Sequence[T.Type]) -> str:\n type_names = [x.__name__ for x in inner_types]\n caps_cased_names = \"\".join(n[0].upper() + n[1:] for n in type_names)\n\n return f\"UnionOf{caps_cased_names}\"",
"def testUnion(self):\n src = textwrap.dedent(\"\"\"\n a = ... # type: Union[int, float]\n b = ... # type: int or float or complex\n c = ... # type: typing.Union[int, float, complex]\n \"\"\")\n expected = textwrap.dedent(\"\"\"\n from typing import Union\n\n a = ... # type: Union[int, float]\n b = ... # type: Union[int, float, complex]\n c = ... # type: Union[int, float, complex]\n \"\"\")\n self.TestRoundTrip(src, expected)",
"def testNamedTupleDedup(self):\n\n src = textwrap.dedent(\"\"\"\n from typing import NamedTuple, Tuple\n\n x = ... # type: NamedTuple(\"nt\", [(\"f1\", int), (\"f2\", float)])\n y = ... # type: NamedTuple(\"nt\", [(\"f1\", int)])\n \"\"\")\n\n tree = self.Parse(src)\n x = tree.Lookup(\"x\")\n y = tree.Lookup(\"y\")\n self.assertNotEqual(x.type, y.type)\n self.assertNotEqual(tree.Lookup(x.type.name), tree.Lookup(y.type.name))",
"def _generate_struct_union(self, n, name):\n s = name + ' ' + (n.name or '')\n if n.decls:\n s += '\\n'\n s += self._make_indent() \n self.indent_level += 2\n s += '{\\n'\n for decl in n.decls:\n s += self._generate_stmt(decl)\n self.indent_level -= 2\n s += self._make_indent() + '}'\n return s",
"def test_unions(self):\n obj = self.analyzer.get_object(['union'])\n assert obj.type == 'number|string|Color'",
"def typeToName(type: int) -> unicode:\n ...",
"def method_union_name(self) -> str:",
"def get_Union_params(un):\n try:\n return un.__union_params__\n except AttributeError:\n # Python 3.6\n return un.__args__",
"def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)",
"def _check_typevar(self, name: str, node: nodes.AssignName) -> None:\n if isinstance(node.parent, nodes.Assign):\n keywords = node.assign_type().value.keywords\n args = node.assign_type().value.args\n elif isinstance(node.parent, nodes.Tuple):\n keywords = (\n node.assign_type().value.elts[node.parent.elts.index(node)].keywords\n )\n args = node.assign_type().value.elts[node.parent.elts.index(node)].args\n\n variance = TypeVarVariance.invariant\n name_arg = None\n for kw in keywords:\n if variance == TypeVarVariance.double_variant:\n pass\n elif kw.arg == \"covariant\" and kw.value.value:\n variance = (\n TypeVarVariance.covariant\n if variance != TypeVarVariance.contravariant\n else TypeVarVariance.double_variant\n )\n elif kw.arg == \"contravariant\" and kw.value.value:\n variance = (\n TypeVarVariance.contravariant\n if variance != TypeVarVariance.covariant\n else TypeVarVariance.double_variant\n )\n\n if kw.arg == \"name\" and isinstance(kw.value, nodes.Const):\n name_arg = kw.value.value\n\n if name_arg is None and args and isinstance(args[0], nodes.Const):\n name_arg = args[0].value\n\n if variance == TypeVarVariance.double_variant:\n self.add_message(\n \"typevar-double-variance\",\n node=node,\n confidence=interfaces.INFERENCE,\n )\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(\"\",),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.covariant and not name.endswith(\"_co\"):\n suggest_name = f\"{re.sub('_contra$', '', name)}_co\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is covariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.contravariant and not name.endswith(\"_contra\"):\n suggest_name = f\"{re.sub('_co$', '', name)}_contra\"\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is contravariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n elif variance == TypeVarVariance.invariant and (\n name.endswith(\"_co\") or name.endswith(\"_contra\")\n ):\n suggest_name = re.sub(\"_contra$|_co$\", \"\", name)\n self.add_message(\n \"typevar-name-incorrect-variance\",\n node=node,\n args=(f'. \"{name}\" is invariant, use \"{suggest_name}\" instead'),\n confidence=interfaces.INFERENCE,\n )\n\n if name_arg is not None and name_arg != name:\n self.add_message(\n \"typevar-name-mismatch\",\n node=node,\n args=(name_arg, name),\n confidence=interfaces.INFERENCE,\n )",
"def variables(s):\n return tuple(Variable(c) for c in s)",
"def property_to_py_name(cpp_struct_name):\r\n first_underscore = cpp_struct_name.find('_')\r\n assert first_underscore != -1\r\n return cpp_struct_name[first_underscore + 1:]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test _includes_custom_type method positive result. | def test__includes_custom_type_positive(self, *mocks):
content_type = "Union[str]"
result = self.protocol_generator._includes_custom_type(content_type)
self.assertTrue(result)
content_type = "Optional[str]"
result = self.protocol_generator._includes_custom_type(content_type)
self.assertTrue(result) | [
"def _includes_custom_type(content_type: str) -> bool:\n\n if content_type.startswith(\"Optional\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n result = _includes_custom_type(sub_type)\n elif content_type.startswith(\"Union\"):\n sub_types = _get_sub_types_of_compositional_types(content_type)\n result = False\n for sub_type in sub_types:\n if _includes_custom_type(sub_type):\n result = True\n break\n elif (\n content_type.startswith(\"FrozenSet\")\n or content_type.startswith(\"Tuple\")\n or content_type.startswith(\"Dict\")\n or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()\n ):\n result = False\n else:\n result = True\n return result",
"def test_pluggable_type(self):\n assert True",
"def is_special(type_):\n return type_ in _SPECIAL_TYPES",
"def test_search_concern_custom_field(self):\n pass",
"def test_search_incident_custom_field(self):\n pass",
"def test_get_types(self):\n pass",
"def test_search_incident_type(self):\n pass",
"def test_search_incident_sub_type(self):\n pass",
"def test_search_concern_sub_type(self):\n pass",
"def test_search_entry_custom_field(self):\n pass",
"def test_posting_with_custom_type(self):\n #Testing returned types pre POST\n resp0 = self.client.get('/api/v1/supply/type/', format='json')\n self.assertEqual(resp0.status_code, 200, msg=resp0)\n type_list = resp0.data\n self.assertNotIn('egg', type_list)\n self.assertIn('wood', type_list)\n self.assertEqual(len(type_list), 1)\n \n #POST\n modified_supply = base_supply.copy()\n modified_supply['type'] = 'egg'\n resp = self.client.post('/api/v1/supply/', format='json',\n data=modified_supply)\n self.assertEqual(resp.status_code, 201)\n \n #Tests the response\n obj = resp.data\n self.assertIn('type', obj)\n self.assertNotIn('custom-type', obj)\n self.assertEqual(obj['type'], 'egg')\n \n \"\"\"\n resp2 = self.client.get('/api/v1/supply/type/', format='json')\n self.assertHttpOK(resp2)\n type_list = self.deserialize(resp2)\n self.assertIn('egg', type_list)\n self.assertIn('wood', type_list)\n self.assertEqual(len(type_list), 2)\n \"\"\"",
"def test_search_custom_field_definition(self):\n pass",
"def test_search_concern_type(self):\n pass",
"def test_custom_resource_type(self):\n template = Template()\n template.add_resource(MyCustomResource(\"foo\",\n Foo=\"bar\",\n ServiceToken=\"baz\"))\n generated = TemplateGenerator(json.loads(template.to_json()))\n\n # validated that the templates are equal to each other\n self.assertDictEqual(template.to_dict(), generated.to_dict())\n foo = generated.resources[\"foo\"]\n self.assertFalse(isinstance(foo, MyCustomResource))",
"def test_search_contribution_custom_field(self):\n pass",
"def has_type(self, item_type):\n raise NotImplementedError()",
"def is_custom(self):\n return self._is_custom",
"def test_get_types(self):\n self.assert_record_method_is_passthrough(\"get_types\",\n \"get_available_types\")",
"def test_search_function_type(self):\n pass"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. | def convert_text_to_rouge_format(text, title="dummy title"):
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1) if sent != '']
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html | [
"def nltk_text(self, text):\n text = nltk.Text(word_tokenize(text))\n return text",
"def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)",
"def normalize(self, text: str) -> str:",
"def format_ocr_text(self, page):\n \n #read out of the text file that tesseract made\n ocr_text = open(self.ocr_text, 'r')\n \n # write into this file\n djvu_text = open( self.djvu_text, 'w' )\n \n text = \"(page 0 0 1 1\\n\"\n \n self.out_text.write('\\n## Page %d ###\\n\\n' % page )\n \n for line in ocr_text:\n \n #write to the human readable file\n self.out_text.write(line)\n \n # add each line of text\n # escaping \" to \\\" as we go\n text += '(line 0 0 1 1 \"%s\")\\n' % line.replace('\"', r'\\\"').strip()\n \n text += \")\\n\"\n \n djvu_text.write( text )\n \n ocr_text.close()\n djvu_text.close()",
"def process_text(self, text, language):",
"def convert_all(text):\r\n\tpig_tokens = ''\r\n\r\n\t#tokenizes the text\r\n\ttokens = word_tokenize(text)\r\n\r\n\t#regex for non-alphabetical characters\r\n\tpattern = re.compile(r'[^a-zA-Z]')\r\n\r\n\t#converts the words to pig latin and appends them to the sentence.\r\n\tfor token in tokens:\r\n\t\tif not re.findall(pattern, token):\r\n\t\t\tword = word_to_pig_latin(token)\r\n\r\n\t\t\tif re.findall(r'[A-Z]', word):\r\n\t\t\t\tword = word.lower()\r\n\t\t\t\tword = word.capitalize()\r\n\t\t\tpig_tokens += ' ' + word\r\n\t\telse:\r\n\t\t\tpig_tokens += token\r\n\r\n\tpig_text = ''.join(pig_tokens)\r\n\r\n\treturn pig_text",
"def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)",
"def recover_raw_text(text: str) -> str:\n sp_text = list()\n\n # 抽取完整的症状分词结果\n nested, idx, span = 0, 0, ''\n while idx < len(text):\n if text[idx] == '[':\n if not nested and len(span) > 0:\n sp_text.append(span)\n span = ''\n nested += 1\n span += text[idx]\n idx += 1\n continue\n\n if text[idx] == ']':\n nested -= 1\n span += text[idx]\n idx += 1\n if not nested:\n sp_text.append(span + text[idx:idx+3])\n idx += 3\n span = ''\n continue\n\n span += text[idx]\n idx += 1\n\n assert not nested\n if len(span) > 0:\n sp_text.append(span)\n sp_text = [sp for sp in sp_text if len(sp) > 0]\n assert ''.join(sp_text) == text\n\n out_text = list()\n for span in sp_text:\n res = re.findall(r'\\[\\d+(.+?)\\d+\\](sym|dis|ite|bod)', span)\n if len(res) == 0:\n out_text.append(span.strip())\n continue\n\n # 处理嵌套的实体\n for r in res:\n tr = re.sub(r'\\[(.+?)\\](dis|ite|bod)', r'\\1', r[0])\n out_text.append(tr)\n\n output = list()\n for span in out_text:\n if len(span) == 0:\n continue\n output.append(span.strip())\n return ''.join(output).replace(' ', '')",
"def preprocess(\n self,\n text: 'str',\n ) -> 'str':",
"def process_text(text):\n no_split_dict = {'u . s': 'u.s', 'u . n': 'u.n', 'u . k': 'u.k', 'l . a': 'l.a', 'j . k': 'j.k', 'a . m': 'a.m',\n 'p . m': 'p.m', 'd . j': 'd.j', 'd . a': 'd.a'}\n\n text = re.sub(\".*--\", \"\", text, count=1) # Removing cnn from start of text\n if text.startswith('(CNN)'): # Remove cnn from articles that starts with only cnn\n text = re.sub('\\(CNN\\)', '', text, count=1)\n text = re.sub(r'(?<=[^?!.0-9])(?=[.,!?])', ' ', text) # 4\n text = re.sub(r'(?![0-9])(?<=[.,])(?=[^\\s])', r' ', text) # 4\n text = text.lower() # 2\n text = re.sub('[^A-Za-z0-9 .!?,øæå]+', '', text) # 3\n text = re.sub(r'((?<=[a-z])(?=[.]))|((?=[a-z])(?<=[.]))(?=[^\\s])', r' ', text) # space a-z.a-z\n text = re.sub(r'((?=[0-9])(?<=[a-z]))|((?=[a-z])(?<=[0-9]))(?=[^\\s])', r' ', text) # space 0-9a-z\n for key in no_split_dict:\n text = text.replace(key, no_split_dict[key]) # Fixing word splits\n text = re.sub('[0-9]', '#', text) # 8\n text = \" \".join(text.split()) # 5, 6, 7 - i think\n return text",
"def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text",
"def raw(text):",
"def convertTextWikiToHtml (\r\n\r\n self,\r\n text = None,\r\n normalize = False\r\n \r\n ) :\r\n\r\n if utilities.isEmpty( text ) : return False \r\n\r\n # replaces horizontal lines\r\n\r\n text = text.replace( wiki.horizontalLineCode, html.horizontalLine() )\r\n\r\n # processes the wiki tags in order (otherwise there is ambiguity in wiki syntax)\r\n\r\n for format in wiki.formatList : text = self.convertFormatWikiToHtml( text, format )\r\n\r\n # processes links\r\n\r\n text = self.convertLinksWikiToHtml( text )\r\n\r\n return text",
"def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text",
"def convertTextWikiToWiki (\r\n\r\n self,\r\n text = None,\r\n normalize = False,\r\n ) :\r\n\r\n if utilities.isEmpty( text ) : return \"\" \r\n\r\n # processes starting spaces\r\n\r\n # removes starting spaces and processes links\r\n\r\n text = wiki.normalizeHeadingSpaces( text )\r\n \r\n text = self.convertLinksWikiToWiki( text, normalize )\r\n\r\n return text",
"def _prepare_text(self, text, tagblanks=False, numlines=False,\r\n notagurl=False, notagemail=False, notagip=False,\r\n notagdns=False, nosgmlsplit=False):\r\n logger.debug(\"Preparing text for tagger with options tagblanks=%d, numlines=%d, notagurl=%d, \"\r\n \"notagemail=%d, notagip=%d, notagdns=%d, nosgmlsplit=%d).\",\r\n tagblanks, numlines, notagurl, notagemail, notagip, notagdns, nosgmlsplit)\r\n\r\n # To avoid searching in many place for SGML tags, such tags\r\n # are wrapped inside an FinalPart object.\r\n\r\n # Build a list of lines. If we start from a list of text\r\n if isinstance(text, six.text_type):\r\n lines = text.splitlines()\r\n else:\r\n lines = []\r\n for t in text:\r\n if '\\n' in t:\r\n lines.extend(t.splitlines())\r\n else:\r\n lines.append(t)\r\n\r\n # If necessary, add line numbering SGML tags (which will\r\n # be passed out as is by TreeTagger and which could be\r\n # used to identify lines in the flow of tags).\r\n if numlines:\r\n logger.debug(\"Numbering lines.\")\r\n parts = []\r\n for num, line in enumerate(lines):\r\n parts.append(FinalPart(NUMBEROFLINE.format(num + 1,)))\r\n parts.append(line)\r\n # Remove temporary storage.\r\n\r\n logger.debug(\"Inserted line numbers as SGML tags between lines.\")\r\n else:\r\n parts = lines\r\n\r\n # First, we split the text between SGML tags and non SGML\r\n # part tags (for pure text, this will make no difference,\r\n # but consume time).\r\n if not nosgmlsplit:\r\n logger.debug(\"Identifying SGML tags from within text.\")\r\n newparts = []\r\n for part in parts:\r\n if isinstance(part, FinalPart):\r\n newparts.append(part)\r\n else:\r\n newparts.extend(split_sgml(part))\r\n parts = newparts\r\n logger.debug(\"Splitted between SGML tags and others %r.\")\r\n\r\n newparts = []\r\n if tagblanks:\r\n # If requested, replace internal blanks by other SGML tags.\r\n logger.debug(\"Replacing blanks by corresponding SGML tags.\")\r\n for part in parts:\r\n if isinstance(part, FinalPart):\r\n newparts.append(part)\r\n else:\r\n newparts.extend(blank_to_tag(part))\r\n else:\r\n # Else, replace cr, lf, vt, ff, and tab characters with blanks.\r\n logger.debug(\"Replacing blanks by spaces.\")\r\n for part in parts:\r\n if isinstance(part, FinalPart):\r\n newparts.append(part)\r\n else:\r\n newparts.append(blank_to_space(part))\r\n parts = newparts\r\n logger.debug(\"Blanks replacement done.\")\r\n\r\n if not notagurl:\r\n logger.debug(\"Replacing URLs.\")\r\n parts = build_with_callable(parts,\r\n split_url, self.replurlexp, REPLACED_URL_TAG)\r\n logger.debug(\"URLs replacement done.\")\r\n\r\n if not notagemail:\r\n logger.debug(\"Replacing Emails.\")\r\n parts = build_with_callable(parts,\r\n split_email, self.replemailexp, REPLACED_EMAIL_TAG)\r\n logger.debug(\"Emails replacement done.\")\r\n\r\n if not notagip:\r\n logger.debug(\"Replacing IP addresses.\")\r\n parts = build_with_callable(parts,\r\n split_ip, self.replipexp, REPLACED_IP_TAG)\r\n logger.debug(\"IP adresses replacement done.\")\r\n\r\n if not notagdns:\r\n logger.debug(\"Replacing DNS names.\")\r\n parts = build_with_callable(parts,\r\n split_dns, self.repldnsexp, REPLACED_DNS_TAG)\r\n logger.debug(\"DNS names replacement done.\")\r\n\r\n # Process part by part, some parts wille be SGML tags, other don't.\r\n logger.debug(\"Splittint parts of text.\")\r\n newparts = []\r\n for part in parts:\r\n if isinstance(part, FinalPart):\r\n # TreeTagger process by line... a token cannot be on multiple\r\n # lines (in case it occured in source text).\r\n part.text = part.text.replace(\"\\n\", \" \")\r\n logger.debug(\"No _prepare_part() for final part %s.\", part)\r\n newparts.append(part)\r\n else:\r\n # This is another part which need more analysis.\r\n newparts.extend(self._prepare_part(part))\r\n parts = newparts\r\n\r\n logger.debug(\"Text preprocessed, parts splitted one by line.\")\r\n\r\n # Return only str items for caller.\r\n return [x.text if isinstance(x, FinalPart) else x for x in parts]",
"def naive(self, text):\n\n\t\tsegmentedText = []\n\n\t\t#Fill in code here\n\t\tsentenceBoundaries = set(['.', '?','!'])\n\t\tsentence = []\n\t\tfor i in text:\n\t\t\tif i in sentenceBoundaries:\n\t\t\t\tsentence.append(i)\n\t\t\t\tsegmentedText.append((''.join(sentence)).strip())\n\t\t\t\tsentence = []\n\t\t\telse:\n\t\t\t\tsentence.append(i)\n\n\n\t\treturn segmentedText",
"def convertingPluralTosingular(line):\n review= TextBlob(line)\n token= review.split()\n token= token.singularize()\n return token",
"def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cross Entropy Calculates the cross entropy of two discrete distributions x and y. | def cross_entropy(x, y, bins, xy_probabilities=False):
# calculate probabilities if probabilities == False
if xy_probabilities:
# same bins for x and y -> same length of x and y if xy_probabilities == True
assert len(x) == len(y)
# if x does not sum up to 1, raise an error
if not np.isclose(sum(x),1,atol=0.0001):
raise ValueError('Probabilities in vector x do not sum up to 1.')
# if y does not sum up to 1, raise an error
if not np.isclose(sum(y),1,atol=0.0001):
raise ValueError('Probabilities in vector y do not sum up to 1.')
# add a small number to all probabilities if zero occurs
if x.any(0):
px = x + 1e-15
py = y + 1e-15
else:
px = x
py = y
else:
# get the bins, joint bins for x and y (same_bins=True)
bins = get_2D_bins(x, y, bins, same_bins=True)
# calculate unconditioned histograms
hist_x = np.histogram(x, bins=bins[0])[0]
hist_y = np.histogram(y, bins=bins[1])[0]
px = (hist_x / np.sum(hist_x)) + 1e-15
py = (hist_y / np.sum(hist_y)) + 1e-15
return - px.dot(np.log2(py)) | [
"def joint_entropy(x: np.array, y: np.array):\n # Note the dimensions of X and Y should be same\n xy = np.c_[x, y] # [[x1,y1], [x2,y2]...[xn,yn]]\n h_xy = entropy(xy)\n return h_xy",
"def cross_entropy(p1, p2):\n xh = 0\n\n # TODO -- Calculate cross-entropy value H(p1, p2) in nats\n for x in p1:\n xh -= p1[x] * math.log(p2[x])\n\n return xh",
"def conditional_entropy(x: np.array, y: np.array):\n c_x_y = joint_entropy(x, y) - entropy(y)\n return c_x_y",
"def crossentropy(x1, x2):\r\n return (x1 * (-np.log(x2))).sum(axis=1)",
"def compute_cross_entropy(self, X, y):\n\n assert X.shape[0] == y.shape[0]\n p = self.predict(X, 'prob')\n\n return -(y * np.log(p+1e-8)).mean()",
"def cEntropy(Y, X):\n return jEntropy(Y, X) - entropy(X)",
"def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)",
"def cross_entropy(input_a, input_b):\n return -np.sum(input_b*np.log(input_a+1e-9))/input_a.shape[0]",
"def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])",
"def cross_entropy (scores, y):\n s = torch.gather(scores, 0, y) # we are indexing scores using the value of y\n loss = ((-((s.exp())/(scores.exp().sum())).log())).sum() # computing the loss\n loss = loss / (scores.shape[1]) # normalizing the loss\n return loss",
"def mutual_information(x: np.array, y: np.array):\n I = entropy(x) - conditional_entropy(x, y)\n return I",
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def cross_entropy(self, yhat):\n n = len(self._y)\n c = 0.0\n for i in range(0, n):\n c += self._y[i] * log(\n yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])\n\n return c",
"def computeCrossEntropyLoss(self, query_logits, query_labels):\n return F.cross_entropy(query_logits, query_labels)",
"def calculate_entropy(y):\n # 换底公式\n log2 = lambda x: math.log(x) / math.log(2)\n # 所有标签\n unique_labels = np.unique(y)\n # 熵\n entropy = 0\n for label in unique_labels:\n count = len(y[y == label])\n # 某一类的概率\n p = count / len(y)\n # 交叉熵计算公式\n entropy += -p * log2(p)\n \n return entropy",
"def cohen_d(x_arr, y_arr):\n delta = np.mean(x_arr) - np.mean(y_arr)\n pooled_std = np.sqrt(\n (\n (len(x_arr) - 1) * np.std(x_arr, ddof=1) ** 2 +\n (len(y_arr) - 1) * np.std(y_arr, ddof=1) ** 2\n ) / (len(x_arr) + len(y_arr))\n )\n return delta / pooled_std",
"def mi_x1x2_c(px1,px2,px1x2_c):\n marginal_entropy = entropy(px1, base=2)\n conditional_entropy = 0.\n for x2i in range(px2.size):\n conditional_entropy += px2[x2i] * entropy(px1x2_c[:,x2i], base=2)\n return marginal_entropy - conditional_entropy",
"def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))",
"def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""Joint Entropy Calculates the joint entropy of two discrete distributions x and y. This is the combined Entropy of X added to the conditional Entropy of x given y. | def joint_entropy(x, y, bins):
# assert array length
assert len(x) == len(y)
# get the bins, x and y get their own bins in case of joint entropy
bins = get_2D_bins(x, y, bins)
# get the joint histogram
joint_hist = np.histogram2d(x, y, bins)[0]
# calculate the joint probability and add a small number
joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15
# calculate and return the joint entropy
return - np.sum(joint_p * np.log2(joint_p)) | [
"def joint_entropy(x: np.array, y: np.array):\n # Note the dimensions of X and Y should be same\n xy = np.c_[x, y] # [[x1,y1], [x2,y2]...[xn,yn]]\n h_xy = entropy(xy)\n return h_xy",
"def H_2(x, y, ns):\n\n if (len(x) != len(y)):\n print(\"H_2 warning : sequences of different lengths, using the shorter...\")\n n = min(len(x), len(y))\n p = np.zeros((ns, ns)) # joint distribution\n for t in range(n):\n p[x[t],y[t]] += 1.0\n p /= n\n h = -np.sum(p[p>0]*log(p[p>0]))\n return h",
"def joint_entropy(aln, col1, col2, base):\n logging.info(\"Calculates joint entropy of {} and {}\".format(col1, col2))\n joint_freq = joint_column_frequencies(aln, col1, col2)\n jentropy = 0\n for key in joint_freq:\n if joint_freq[key] != 0:\n jentropy -= joint_freq[key]*math.log(joint_freq[key], base)\n return jentropy",
"def joint_entropy(pd):\n # Attention to multidimension and proba equal to 0 which makes a matherror for log.\n return entropy(pd.flatten())",
"def conditional_entropy(x: np.array, y: np.array):\n c_x_y = joint_entropy(x, y) - entropy(y)\n return c_x_y",
"def joint_shannon_entropy(stringX, stringY):\n X = np.array(list(stringX))\n Y = np.array(list(stringY))\n joint_symbol_probabilities = []\n for x in set(X):\n for y in set(Y):\n joint_symbol_probabilities.append(np.mean(np.logical_and(X == x, Y == y)))\n return sum(-p * np.log2(p) for p in joint_symbol_probabilities if p != 0)",
"def p_joint(x1,x2,windowx1=1,windowx2=1):\n x1_unique, x1 = np.unique(x1, return_inverse=True)\n x2_unique, x2 = np.unique(x2, return_inverse=True)\n numuniquex1 = x1_unique.size\n numuniquex2 = x2_unique.size\n numwordsx1 = numuniquex1**windowx1\n numwordsx2 = numuniquex2**windowx2\n aux_base_x1 = numuniquex1**np.arange(windowx1)[::-1]\n aux_base_x2 = numuniquex2**np.arange(windowx2)[::-1]\n px1x2 = np.zeros((numwordsx1,numwordsx2)) #matrix of size numwordsx,numwordsy with for the joint probability distribution\n for i in range(len(x1)-windowx1):\n x1i = np.inner(x1[i:i+windowx1], aux_base_x1).astype(np.int)\n x2i = np.inner(x2[i:i+windowx2], aux_base_x2).astype(np.int)\n px1x2[x1i,x2i] += 1\n return px1x2/px1x2.sum()",
"def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n return Ixy\r\n raise Exception('Function not yet implemented!')",
"def joint_entropy(column_i, column_j):\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\treturn -entropy",
"def joint_entropy(column_i, column_j):\n\tif len(column_i) != len(column_j):\n\t\traise IndexError(\"The two MSA should have the same number of related sequences (same species)\")\n\tfreq_ij = dict()\n\ttotal = len(column_i)\n\tentropy = 0\n\tfor index in range(total):\n\t\ti = column_i[index]\n\t\tj = column_j[index]\n\t\tif i+j in freq_ij:\n\t\t\tfreq_ij[i+j] +=1\n\t\telse:\n\t\t\tfreq_ij[i+j] = 1\n\n\tfor key in freq_ij:\n\t\tfreq_ij[key] /= total\n\t\tentropy += freq_ij[key]*math.log(freq_ij[key], 2)\n\tif entropy != 0.0:\n\t\treturn -entropy\n\telse:\n\t\treturn entropy",
"def joint_pdf(self, x1, x2 = None):\n return np.exp(self.joint_logpdf(x1, x2))",
"def transfer_entropy(X, Y):\n coords = Counter(zip(Y[1:], X[:-1], Y[:-1]))\n\n p_dist = np.zeros((config.NUM_STATES, config.NUM_STATES, config.NUM_STATES))\n for y_f, x_p, y_p in coords.keys():\n p_dist[y_p, y_f, x_p] = coords[(y_f, x_p, y_p)] / (len(X) - 1)\n\n p_yp = p_dist.sum(axis=2).sum(axis=1)\n p_joint_cond_yp = p_dist / p_yp[:, None, None]\n p_yf_cond_yp = p_dist.sum(axis=2) / p_yp[:, None]\n p_xp_cond_yp = p_dist.sum(axis=1) / p_yp[:, None]\n\n denominator = np.multiply(p_yf_cond_yp, p_xp_cond_yp)\n denominator[denominator == 0] = np.nan\n\n division = np.divide(p_joint_cond_yp, denominator[:, :, None])\n division[division == 0] = np.nan\n\n log = np.log2(division)\n\n return np.nansum(np.multiply(p_dist, log))",
"def mutual_information(x: np.array, y: np.array):\n I = entropy(x) - conditional_entropy(x, y)\n return I",
"def cross_entropy(x, y, bins, xy_probabilities=False):\n # calculate probabilities if probabilities == False\n if xy_probabilities:\n # same bins for x and y -> same length of x and y if xy_probabilities == True\n assert len(x) == len(y)\n\n # if x does not sum up to 1, raise an error\n if not np.isclose(sum(x),1,atol=0.0001):\n raise ValueError('Probabilities in vector x do not sum up to 1.')\n # if y does not sum up to 1, raise an error\n if not np.isclose(sum(y),1,atol=0.0001):\n raise ValueError('Probabilities in vector y do not sum up to 1.')\n\n # add a small number to all probabilities if zero occurs\n if x.any(0):\n px = x + 1e-15\n py = y + 1e-15\n else:\n px = x\n py = y\n else:\n # get the bins, joint bins for x and y (same_bins=True)\n bins = get_2D_bins(x, y, bins, same_bins=True)\n\n # calculate unconditioned histograms\n hist_x = np.histogram(x, bins=bins[0])[0]\n hist_y = np.histogram(y, bins=bins[1])[0]\n\n px = (hist_x / np.sum(hist_x)) + 1e-15\n py = (hist_y / np.sum(hist_y)) + 1e-15\n\n return - px.dot(np.log2(py))",
"def cond_joint_entropy(joint_prob, pcond):\n # Computing log2(P(X,Y|Z))\n log2_p = (np.ma.log2(pcond)).filled(0)\n # Multipling element wise the arrays\n prod_entropy = np.multiply(joint_prob, log2_p)\n # Getting the - sum of the resulting array.\n H = -(np.sum(prod_entropy))\n return H",
"def calculate_entropy(y):\n # 换底公式\n log2 = lambda x: math.log(x) / math.log(2)\n # 所有标签\n unique_labels = np.unique(y)\n # 熵\n entropy = 0\n for label in unique_labels:\n count = len(y[y == label])\n # 某一类的概率\n p = count / len(y)\n # 交叉熵计算公式\n entropy += -p * log2(p)\n \n return entropy",
"def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))",
"def mutual_information(in1: np.ndarray, in2: np.ndarray, nbins: int) -> float:\n if in1.ndim == 1:\n in1 = in1[:, None]\n if in2.ndim == 1:\n in2 = in2[:, None]\n if in1.shape[0] != in2.shape[0]:\n raise ValueError(\"in1 and in2 must have the same number of samples\")\n in1_entropy = jacknife_entropy(in1, nbins)\n in2_entropy = jacknife_entropy(in2, nbins)\n joint_ent = jacknife_entropy(np.hstack((in1, in2)), nbins)\n return in1_entropy + in2_entropy - joint_ent",
"def joint_feature(self, x, y):\n self._check_size_x(x)\n features, edges = self._get_features(x), self._get_edges(x)\n n_nodes = features.shape[0]\n\n if isinstance(y, tuple):\n # y is result of relaxation, tuple of unary and pairwise marginals\n unary_marginals, pw = y\n unary_marginals = unary_marginals.reshape(n_nodes, self.n_states)\n # accumulate pairwise\n pw = pw.reshape(-1, self.n_states, self.n_states).sum(axis=0)\n else:\n y = y.reshape(n_nodes)\n gx = np.ogrid[:n_nodes]\n\n #make one hot encoding\n unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.int)\n gx = np.ogrid[:n_nodes]\n unary_marginals[gx, y] = 1\n\n ##accumulated pairwise\n pw = np.dot(unary_marginals[edges[:, 0]].T,\n unary_marginals[edges[:, 1]])\n unaries_acc = np.dot(unary_marginals.T, features)\n if self.directed:\n pw = pw.ravel()\n else:\n pw = compress_sym(pw)\n joint_feature_vector = np.hstack([unaries_acc.ravel(), pw])\n return joint_feature_vector"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
r"""KullbackLeibler Divergence Calculates the KullbackLeibler Divergence between two discrete distributions x and y. X is considered to be an empirical discrete distribution while y is considered to be the real discrete distribution of the underlying population. | def kullback_leibler(x, y, bins, xy_probabilities=False):
if xy_probabilities:
# if x does not sum up to 1, raise an error
if not np.isclose(sum(x),1,atol=0.0001):
raise ValueError('Probabilities in vector x do not sum up to 1.')
# if y does not sum up to 1, raise an error
if not np.isclose(sum(y),1,atol=0.0001):
raise ValueError('Probabilities in vector y do not sum up to 1.')
# add a small number to all probabilities if zero occurs
if x.any(0):
px = x + 1e-15
py = y + 1e-15
else:
px = x
py = y
else:
# get the bins, joint bins for x and y (same_bins=True)
bins = get_2D_bins(x, y, bins, same_bins=True)
# calculate unconditioned histograms
hist_x = np.histogram(x, bins=bins[0])[0]
hist_y = np.histogram(y, bins=bins[1])[0]
#calculate probabilities
px = (hist_x / np.sum(hist_x))
py = (hist_y / np.sum(hist_y))
# calculate the cross entropy and unconditioned entropy of y
hcross = cross_entropy(px, py, bins, xy_probabilities=True)
hx = entropy(px, bins, xy_probabilities=True)
return hcross - hx | [
"def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( x.flat_cpt() * np.log( x.flat_cpt() / y.flat_cpt() ) )\n\treturn distance",
"def kl_divergence(x, y, thresholded=True, symmetrized=True, normalize=True):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n # assert (np.all(x.sum(1) != 0.) and np.all(y.sum(1) != 0.))\n if thresholded:\n normalize = True\n if normalize:\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n if thresholded:\n eps = np.finfo(x.dtype).eps\n x = x + eps\n y = y + eps\n x /= x.sum(1).reshape(x.shape[0], 1)\n y /= y.sum(1).reshape(y.shape[0], 1)\n res = __kl_divergence(x, y)\n\n if symmetrized:\n res = 0.5 * res + 0.5 * __kl_divergence(y, x).transpose()\n\n return np.float64(res).reshape(res.shape)",
"def kullback_leibler(distribution1, distribution2):\n\n # The iteration order of the keys in the two dictionaries need not match.\n # Thus, we align them in their respective lists before calling entropy.\n\n list1 = []\n list2 = []\n for k in distribution1.keys():\n list1.append(distribution1[k])\n list2.append(distribution2[k])\n\n return entropy(list1,list2)",
"def kullback_leibler_divergence_loss(self, y_true=None, y_pred=None, decimal=5, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data2(y_true, y_pred, decimal)\n y_pred = np.clip(y_pred, self.EPSILON, 1 - self.EPSILON) # Clip predicted probabilities\n if binary:\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = y_true * np.log(y_true / y_pred) + (1 - y_true) * np.log((1 - y_true) / (1 - y_pred))\n res = np.mean(res)\n else:\n # Convert y_true to one-hot encoded array\n num_classes = len(np.unique(y_true))\n y_true = np.eye(num_classes)[y_true]\n y_true = np.clip(y_true, self.EPSILON, 1 - self.EPSILON) # Clip true labels\n res = np.sum(y_true * np.log(y_true / y_pred), axis=1)\n res = np.mean(res)\n return np.round(res, decimal)",
"def kl_divergence(y_true, y_pred):\n y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred)\n y_true = math_ops.cast(y_true, y_pred.dtype)\n y_true = backend.clip(y_true, backend.epsilon(), 1)\n y_pred = backend.clip(y_pred, backend.epsilon(), 1)\n return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)",
"def kl_divergence(p_dist, q_dist, n_samples_per_axis=30, n_axis=2):\r\n global COUNTER\r\n if n_axis == 2:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y)\r\n elif n_axis == 3:\r\n x = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n y = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n z = np.linspace(-1.0, 1.0, n_samples_per_axis)\r\n grids = np.meshgrid(x, y, z)\r\n elif n_axis == 1:\r\n grids = np.linspace(-1.1, 1.1, 120)\r\n print(\"Grid complete!\")\r\n if n_axis != 1:\r\n grid = np.vstack(grids).reshape((n_axis, n_samples_per_axis**n_axis)).T\r\n else:\r\n grid = grids\r\n grid = np.reshape(grid, (grid.shape[0], 1))\r\n probs_p = np.exp(p_dist.score_samples(grid))\r\n probs_q = np.exp(q_dist.score_samples(grid))\r\n print(\"prob_calc_complete\")\r\n kl = entropy(probs_p, probs_q)\r\n return kl",
"def estimate_kl_divergence(self, argin):\n parser = ArgumentParser(prog='.est_kl')\n parser.add_argument('generator_a', type=str,\n help='Name of the reference generator.')\n parser.add_argument('generator_b', type=str,\n help='Name of the approximating generator.')\n parser.add_argument('--targets', nargs='*',\n help='Sequence of target columns to evaluate the log likelhood. '\n 'By default, all columns in <table> will be used.')\n parser.add_argument('--givens', nargs='*',\n help='Sequence of columns and observed values to condition on. '\n 'The required format is [<col> <val>...].')\n parser.add_argument('--n-samples', type=int,\n help='Number of rows in the dataset to use in the computation. '\n 'Defaults to all rows.')\n\n try:\n args = parser.parse_args(shlex.split(argin))\n except ArgparseError as e:\n self.stdout.write('%s' % (e.message,))\n return\n\n kl = bdbcontrib.estimate_kl_divergence(self._bdb, args.generator_a,\n args.generator_b, targets=args.targets, givens=args.givens,\n n_samples=args.n_samples)\n\n print kl",
"def KL_divergence(model_1, model_2, samples):\n posterior_1 = create_posterior_object(model_1, samples)\n posterior_2 = create_posterior_object(model_2, samples)\n return posterior_1.KL(posterior_2)",
"def KL_divergence(xs,ys,pdf_x=None,pdf_y=None,data_range=None):\n if data_range is None:\n data_range = list(set(xs)) + list(set(ys))\n if pdf_x is None:\n pdf_x = prob_density_func(xs,norm=True,data_range=data_range)\n if pdf_y is None:\n pdf_y = prob_density_func(ys,norm=True,data_range=data_range)\n keys = set(pdf_x.keys()+pdf_y.keys())\n PQ = []\n for k in keys:\n if k in pdf_x and k in pdf_y:\n PQ.append((pdf_x[k],pdf_y[k]))\n return np.sum([p*np.log(float(p)/float(q)) for (p,q) in PQ if q>0 and p>0])",
"def _graph_fn_kl_divergence(distribution_a, distribution_b):\n if get_backend() == \"tf\":\n return tf.no_op()\n # TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray\n #return tf.distributions.kl_divergence(\n # distribution_a=distribution_a,\n # distribution_b=distribution_b,\n # allow_nan_stats=True,\n # name=None\n #)",
"def kl_divergence(p1, p2):\n kl = 0\n\n # TODO -- Calculate KL divergence D_{KL}(p1||p2) in nats\n\t# D_{KL}(p1||p2) = H(p1,p2) - H(p1)\n kl = cross_entropy(p1, p2) - entropy(p1)\n\n return kl",
"def kl_divergence(self, model,states,discrete=False):\n\t\t# observations_tensor = torch.cat(\n\t\t# \t\t[Variable(self.FloatTensor(state)).unsqueeze(0) for state in states])\n\t\tif(discrete):\n\t\t\tactprob = self.model_wrapper(model,states)\n\t\t\t# actprob = torch.normal(mean,std)\n\t\t\t# old_mean, old_std = model()\n\t\t\told_actprob = self.model_wrapper(self.actor,states)\n\t\t\t\n\t\t\t# actprob = model(states)\n\t\t\t# old_actprob = self.actor(states)\n\t\t\t# print(torch.cumsum(torch.exp(old_actprob) *(old_actprob - actprob),1).mean())\n\t\t\treturn torch.cumsum(torch.exp(old_actprob) *(old_actprob - actprob),1).mean()\n\t\telse:\n\t\t\tmean2,std2 = self.actor(states)\n\t\t\tmean1,std1 = model(states)\n\t\t\tstd1_log = torch.log(std1)\n\t\t\tstd2_log = torch.log(std2)\n\t\t\tkl = std2_log - std1_log + (std1.pow(2)+ (-mean1+mean2).pow(2))/(2.0*std2.pow(2))-0.5\n\t\t\treturn kl.sum(1,keepdim=True).mean()",
"def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min()-0.1:x.max()+0.1:xbins, \n y.min()-0.1:y.max()+0.1:ybins]\n\n xy_sample = np.vstack([xx.ravel(), yy.ravel()]).T\n xy_train = np.vstack([x, y]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)",
"def kullback_leibler_loss(self):\n LKL = -0.5*torch.sum(1+self.sigma-self.mu**2-torch.exp(self.sigma))\\\n /float(hp.Nz*hp.batch_size)\n if gpu:\n KL_min = Variable(torch.Tensor([hp.KL_min]).cuda()).detach()\n else:\n KL_min = Variable(torch.Tensor([hp.KL_min])).detach()\n return hp.wKL*self.eta_step * torch.max(LKL,KL_min)",
"def kl_divergence(a, b, normalize=True):\n a, b = np.array(a), np.array(b)\n\n x = np.linspace(\n min(a.min(), b.min()) - 1,\n max(a.max(), b.max()) + 1,\n 100\n )\n\n p = gaussian_kde(a)(x)\n q = gaussian_kde(b)(x)\n\n if normalize:\n p = p/np.sum(p)\n q = q/np.sum(q)\n\n return np.sum(np.where(p != 0, (p) * np.log(p / q), 0))",
"def kde2D(x, y, bandwidth, xbins=100j, ybins=100j, **kwargs):\n\n # create grid of sample locations (default: 100x100)\n xx, yy = np.mgrid[x.min():x.max():xbins, \n y.min():y.max():ybins]\n\n xy_sample = np.vstack([yy.ravel(), xx.ravel()]).T\n xy_train = np.vstack([y, x]).T\n\n kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs)\n kde_skl.fit(xy_train)\n\n # score_samples() returns the log-likelihood of the samples\n z = np.exp(kde_skl.score_samples(xy_sample))\n return xx, yy, np.reshape(z, xx.shape)",
"def kl_with_logits(x, y, dim=2, eps=1e-10):\n eps = torch.tensor([eps])\n if x.is_cuda:\n eps = eps.cuda()\n \n return F.kl_div(\n torch.max(F.softmax(x, dim=dim), eps).log(),\n y, reduction='sum'\n ) / x.shape[0]",
"def kl_div_prior_gradient(self, posterior_logits, posterior_binary_samples):\n #DVAE Eq11 - gradient of prior\n #gradient of the KLD between posterior and prior wrt to prior\n #parameters theta, i.e. generative model parameters.\n #logits to probabilities\n posterior_probs=torch.sigmoid(posterior_logits)\n positive_probs=posterior_probs.detach()\n \n #samples from posterior are labelled positive\n positive_samples=posterior_binary_samples.detach()\n\n n_split=positive_samples.size()[1]//2\n positive_samples_left,positive_samples_right=torch.split(positive_samples,split_size_or_sections=int(n_split),dim=1)\n \n #-z_left^t J z_right\n pos_first_term=torch.matmul(positive_samples_left,self.prior.get_weights())*positive_samples_right\n \n rbm_bias_left=self.prior.get_visible_bias()\n rbm_bias_right=self.prior.get_hidden_bias()\n rbm_bias=torch.cat([rbm_bias_left,rbm_bias_right])#self._h\n \n #this gives [42,400] size\n #- z^t h\n #TODO this uses positive probs. Should it not use positive samples?\n # FIXME an indication are the negative ones where samples are used! On\n #other hand this is the only place this this used\n pos_sec_term=positive_probs*rbm_bias\n # pos_sec_term=positive_samples*rbm_bias\n\n # Energy = -z_left^t J z_right - z^t h\n pos_kld_per_sample=-(torch.sum(pos_first_term,axis=1)+torch.sum(pos_sec_term,axis=1))\n #samples from rbm are labelled negative\n\n #rbm_samples Tensor(\"zeros:0\", shape=(200, 200), dtype=float32)\n #this returns the full RBM set: left and right nodes concatenated\n\n #TODO What are these samples here?\n #TODO what's the impact of doing gibbs sampling here? does this make\n #sense?\n rbm_samples=self.prior.get_samples_kld(approx_post_samples=positive_samples_left,n_gibbs_sampling_steps=1)\n negative_samples=rbm_samples.detach()\n\n # print(self.prior.get_weights())\n n_split=negative_samples.size()[1]//2\n negative_samples_left,negative_samples_right=torch.split(negative_samples,split_size_or_sections=int(n_split),dim=1)\n neg_first_term=torch.matmul(negative_samples_left,self.prior.get_weights())*negative_samples_right\n \n #FIXME see above, the positive case looks different. Why?\n neg_sec_term=negative_samples*rbm_bias\n neg_kld_per_sample=(torch.sum(neg_first_term,axis=1)+torch.sum(neg_sec_term,axis=1))\n \n kld_per_sample=pos_kld_per_sample+neg_kld_per_sample\n\n return kld_per_sample",
"def kl_divergence(self, samples):\n # Check size of input\n if not len(samples.shape) == 2:\n raise ValueError('Given samples list must be n x 2.')\n if samples.shape[1] != self._n_parameters:\n raise ValueError(\n 'Given samples must have length ' + str(self._n_parameters))\n\n best_mode = np.zeros(samples.shape[0])\n for i in range(samples.shape[0]):\n a_sample = samples[i, :]\n a_log_pdf = -np.inf\n a_max_index = -1\n for j, var in enumerate(self._vars):\n a_test_log_pdf = var.logpdf(a_sample)\n if a_test_log_pdf > a_log_pdf:\n a_log_pdf = a_test_log_pdf\n a_max_index = j\n best_mode[i] = a_max_index\n\n kl = np.zeros(len(self._vars))\n for i in range(len(self._vars)):\n y = np.array(samples[best_mode == i, :], copy=True)\n # when a mode has no points use all samples\n if y.shape[0] == 0:\n y = np.array(samples, copy=True)\n m0 = np.mean(y, axis=0)\n s0 = np.cov(y.T)\n s1 = self._covs[i]\n m1 = self._modes[i]\n s1_inv = np.linalg.inv(s1)\n if len(np.atleast_1d(s0)) > 1:\n kl[i] = 0.5 * (\n np.trace(np.matmul(s1_inv, s0)) +\n np.matmul(np.matmul(m1 - m0, s1_inv), m1 - m0) -\n np.log(np.linalg.det(s0)) +\n np.log(np.linalg.det(s1)) -\n self._n_parameters)\n else:\n kl[i] = 0.5 * (\n np.sum(s1_inv * s0) +\n (m1 - m0) * s1_inv * (m1 - m0) -\n np.log(s0) +\n np.log(s1) -\n 1)\n return kl"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Main method to get dependent review IDs of a specific review request on the ReviewBoard. | def main():
parameters = parse_parameters()
review_request_url = "%s/api/review-requests/%s/" % (REVIEWBOARD_URL,
parameters.review_id)
handler = ReviewBoardHandler()
review_request = handler.api(review_request_url)["review_request"]
review_ids = handler.get_dependent_review_ids(review_request)
if parameters.out_file:
with open(parameters.out_file, 'w') as f:
for r_id in review_ids:
f.write("%s\n" % (str(r_id)))
else:
for r_id in review_ids:
print("%s\n" % (str(r_id))) | [
"def __extract_review_ids(self, soup):\n try:\n id_tags = soup.find_all('div', attrs={'class':'review', 'itemprop':'reviews'})\n review_ids = [int(re.sub('review_', '', tag.get('id'))) for tag in id_tags]\n return review_ids\n except:\n raise",
"def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']",
"def get_review_request(self, request_id, api_root):\n try:\n request = api_root.get_review_request(review_request_id=request_id)\n except APIError, e:\n raise CommandError(\"Error getting review request: %s\" % e)\n\n return request",
"def _get_user_reviews(self, user_id):\n return self.interactions[self.interactions.user_id == user_id].recipe_id",
"def fetch_reviews(self, rb_id, start=0, max_results=25):\r\n return self.api_call('/api/review-requests/%s/reviews/?start=%s&max-results=%s'\r\n % (rb_id, start, max_results))['reviews']",
"def _load_review_requests(self, data, *, users_map, groups_map,\n repositories_map):\n for review_request_data in data:\n submitter = users_map[review_request_data.pop('owner')]\n target_groups_data = review_request_data.pop('target_groups', [])\n target_people_data = review_request_data.pop('target_people', [])\n file_attachment_histories_data = review_request_data.pop(\n 'file_attachment_histories', [])\n diffsets_data = review_request_data.pop('diffsets', [])\n depends_on_data = review_request_data.pop('depends_on', [])\n entries_data = review_request_data.pop('entries', [])\n repository = review_request_data.pop('repository', None)\n\n if repository is not None:\n repository = repositories_map[repository]\n\n diffset_history = DiffSetHistory.objects.create()\n\n # Note that we can't use ReviewRequest.objects.create(), as that\n # is overridden and specialized, and we don't want to invoke\n # that behavior here.\n review_request = ReviewRequest(\n submitter=submitter,\n diffset_history=diffset_history,\n repository=repository,\n **review_request_data)\n review_request.save()\n\n # Load the target reviewers.\n if target_groups_data:\n review_request.target_groups.add(*[\n groups_map[_group_name]\n for _group_name in target_groups_data\n ])\n\n if target_people_data:\n review_request.target_people.add(*[\n users_map[_username]\n for _username in target_people_data\n ])\n\n if depends_on_data:\n review_request.depends_on.add(*depends_on_data)\n\n # Load the file attachment histories.\n file_attachments_map = self._load_file_attachment_histories(\n file_attachment_histories_data,\n review_request=review_request)\n\n if file_attachments_map:\n review_request.file_attachments.add(\n *file_attachments_map.values())\n\n # Load the diffsets.\n diffsets_map, filediffs_map = self._load_diffsets(\n diffsets_data,\n repository=repository,\n diffset_history=diffset_history)\n\n # Load the entries (reviews, change descriptions).\n self._load_entries(entries_data,\n review_request=review_request,\n users_map=users_map,\n diffsets_map=diffsets_map,\n filediffs_map=filediffs_map,\n file_attachments_map=file_attachments_map)",
"def get_review_request(self):\n if hasattr(self, '_review_request'):\n return self._review_request\n else:\n return self.get_review().review_request",
"def test_get_url_on_review_request(self) -> None:\n review_request = self.create_review_request()\n\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n review_request=review_request,\n url_name='review-request-detail')),\n '/r/%s/diff/raw/' % review_request.display_id)",
"def get_completed_chart_reviews(self, request):\n reqParams = request.GET\n project = reqParams.get('project', None)\n cohort = reqParams.get('cohort', None)\n patient_id = reqParams.get('patient_id', None)\n queryset = self.search_chart_review_data(project, cohort, patient_id)\n page = self.paginate_queryset(queryset)\n data = []\n\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n data = serializer.data\n\n return self.get_paginated_response(data)",
"def get_review_requests(session):\n user = get_user(session)\n\n params = {\"q\": f\"is:open is:pr review-requested:{user} archived:false\"}\n response = session.get(\"https://api.github.com/search/issues\", params=params)\n response.raise_for_status()\n return response.json()",
"def build_indices(review_ids):\n\n review_indices = {}\n\n # Load qrel_abs_train txt file\n clef_data = pd.read_csv(config.TRAIN_QREL_LOCATION, sep=\"\\s+\", names=['review_id', 'q0', 'pmid', 'included'])\n\n # Get index of documents for each review\n for review_id in review_ids:\n index = clef_data.index[clef_data['review_id'] == review_id].tolist()\n\n # Get the range of index for all documents within each review\n review_indices[review_id] = (min(index), max(index) + 1)\n\n return review_indices",
"def get_review_status(pr_id):\n reviews = get_status_json(pr_id, 'reviews')\n requests = get_status_json(pr_id, 'reviewRequests')\n\n requested_authors = [r[\"login\"] for r in requests]\n\n review_status = {}\n for r in reviews:\n author = r['author']['login']\n date = datetime.fromisoformat(r['submittedAt'].strip('Z'))\n state = r['state']\n if author not in review_status:\n review_status[author] = ReviewComment(state, date, author)\n elif state != 'COMMENTED' and review_status[author].date < date:\n review_status[author] = ReviewComment(state, date, author)\n for a in review_status:\n if a in requested_authors:\n review_status[a] = ReviewComment('REVIEW_REQUESTED', review_status[a].date, a)\n for a in requested_authors:\n if a not in review_status:\n review_status[a] = ReviewComment('UNRESPONSIVE', None, a)\n return review_status, requested_authors",
"def get_review(review_id):\n\n reviewID = storage.get('Review', review_id)\n\n if reviewID is None:\n abort(404)\n return jsonify(reviewID.to_dict())",
"def _request_reviews(self, token, owner, repo, number, reviewers):\n post_data = {'reviewers': reviewers.split(',')}\n headers = {'Authorization': 'Basic ' + token}\n response = requests.post(\n flask.current_app.config['GITHUB_API_CREATE_REVIEW_REQUEST'].format(owner=owner, repo=repo, number=number),\n data=json.dumps(post_data), headers=headers)\n\n return response",
"def get_parent_rr(review_request_details, commit_data=None):\n commit_data = fetch_commit_data(review_request_details, commit_data)\n\n if not is_pushed(review_request_details, commit_data):\n return None\n\n if is_parent(review_request_details, commit_data):\n return review_request_details\n\n identifier = commit_data.get_for(review_request_details, IDENTIFIER_KEY)\n\n return ReviewRequest.objects.get(\n commit_id=identifier,\n repository=review_request_details.repository)",
"def find_reviews():\n print(\"***** Find Reviews of a Business *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n id = business_object['business_id']\n review_object = review_col.find({\"business_id\": id})\n print(f'{business_object[\"name\"]} has'\n f' {business_object[\"review_count\"]} '\n f'reviews:')\n for review in review_object:\n userid = review['user_id']\n print(f'- ({review[\"stars\"]}):'\n f' {review[\"text\"]}.'\n f' {review[\"date\"]}')",
"def get_reviewers(config): # type: (Config) -> List[str]\n phabricator = False\n finders = [\n FindLogReviewers,\n FindHistoricalReviewers,\n FindArcCommitReviewers\n ]\n reviewers = Counter() # type: typing.Counter[str]\n for finder in finders:\n finder_reviewers = finder(config).get_reviewers()\n if config.verbose:\n print(\n \"Reviewers from %s: %s\" %\n (finder.__name__, dict(finder_reviewers))\n )\n reviewers.update(finder_reviewers)\n if finder == FindArcCommitReviewers and finder_reviewers:\n phabricator = True\n\n most_common = [x[0] for x in reviewers.most_common()]\n most_common = [x for x in most_common if x not in config.ignores]\n if phabricator:\n most_common = FindArcCommitReviewers(config) \\\n .filter_phabricator_activated(most_common)\n reviewers_list = most_common[:REVIEWERS_LIMIT]\n return reviewers_list",
"def get_reviews(recipe_id=None):\n\n recipe = storage.get(Recipe, recipe_id)\n print(recipe)\n if not recipe:\n abort(404)\n reviews = []\n for review in recipe.reviews:\n reviews.append(review.to_dict())\n return jsonify(reviews)",
"def get_pr_reviews(api, urn, pr_num):\n params = {\n \"per_page\": settings.DEFAULT_PAGINATION\n }\n path = \"/repos/{urn}/pulls/{pr}/reviews\".format(urn=urn, pr=pr_num)\n data = api(\"get\", path, params=params)\n return data"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Initalize with a usersupplied list of segments. | def __init__(self, segments, lemma = None, case = None):
self.segments = segments
if isinstance(self.segments, str):
self.segments = [Segment.new_segment(s) for s in self.segments]
self.lemma = lemma
self.case = case | [
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()",
"def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])",
"def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = Segment()\n new_segment.save()\n name = \"%s_seg_%s\" % (self.PREFIX, new_segment.id)\n node = Node.objects(id=node_oid)[0]\n list_id = DripCampaign.objects(id=node[\"drip_campaign_id\"])[0][\"list_id\"]\n node.update(set__segment_oid=new_segment.id, set__updated_at=datetime.utcnow())\n\n # gather all users that apply for this node after triggers on previous nodes\n all_euids = set()\n if node[\"initial\"]:\n all_euids = set(List.objects(list_id=list_id)[0][\"members_euid\"])\n else:\n for trg in Trigger.objects(node_to=node_oid):\n for euids, to_node_oid in self.segment_by_triggers(trg[\"node_from\"]):\n if to_node_oid == node_oid:\n all_euids.update(set(euids))\n\n # # intersect euids with current state of the list\n # # it might be the case that some people are removed from the list since previous email\n self.fetch_members_for_list(list_id)\n all_euids = all_euids & set(List.objects(list_id=list_id)[0][\"members_euid\"])\n\n all_euids = list(all_euids)\n\n # apply the user list to segment n stuff\n # if user list is empty, save only meta info and don't actually work with mailchimp\n if all_euids:\n segment_id = self.mw.create_segment(list_id, name)\n self.mw.update_segment_members(list_id, segment_id, all_euids)\n else:\n segment_id = None\n new_segment.update(set__segment_id=segment_id, set__name=name, members_euid=all_euids,\n set__updated_at=datetime.utcnow())",
"def _construct_segments(self, center_points):\n for point in center_points:\n segment = Segment(point, self.image_open, self.image_dmlc, self.settings.tolerance)\n self.segments.append(segment)\n self._update_r_corrs()",
"def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))",
"def setSegments(self,segments):\n self.points=list(set([s.p1 for s in segments]+[s.p2 for s in segments]))\n l=len(self.points)\n self.network=np.zeros((l,l))\n for s in segments:\n i=self.points.index(s.p1)\n j=self.points.index(s.p2)\n if i>j: i,j=j,i\n self.network[i][j]=1",
"def test_get_user_segments(self):\n pass",
"def _populate_member_segments(self, context, network, segment_pairs, oper):\n LOG.debug('_populate_member_segments %s', segment_pairs)\n trunk_list = []\n for (segment, dot1qtag) in segment_pairs:\n net = self.get_network(context, segment)\n member_dict = {'segment': net['name'],\n 'dot1qtag': dot1qtag}\n trunk_list.append(member_dict)\n if oper == n1kv.SEGMENT_ADD:\n network['add_segment_list'] = trunk_list\n elif oper == n1kv.SEGMENT_DEL:\n network['del_segment_list'] = trunk_list",
"def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)",
"def _parse_segments(self):\n reader = csv.reader(open(self._segment_file, 'rU'),\n delimiter='\\t')\n for row in reader:\n if reader.line_num == 1: #skip header\n continue\n sql = '''INSERT INTO segments\n (id, multiplicon, genome, list, first, last, ord)\n VALUES (?,?,?,?,?,?,?)'''\n self._dbconn.execute(sql, row)\n self._dbconn.commit()",
"def _update_segments(self):\n self._validate_CFG()\n self._define_segments()\n # must redefine this Segments list,\n # the code does not work otherwise\n self.segments = [self.P, self.T, self.C,\n self.A1, self.A2, self.B1, self.B2,\n self.J1, self.J2, self.K1, self.K2]\n for s in self.segments:\n s.calc_properties()\n # Must update segment properties before updating the human properties.\n self.calc_properties()",
"def __init__(self, user_list):\n self._user_list = user_list",
"def update_linecollection(self, collection, segments):\n collection.set_segments([(start[0:2], end[0:2]) for start, end in segments])",
"def _trainBySegments(self, divisions, trainingSet):\n # train the subdomain ROMs\n counter, remainder = divisions\n roms = self._trainSubdomainROMs(self._templateROM, counter, trainingSet, self._romGlobalAdjustments)\n # if there were leftover domain segments that didn't go with the rest, train those now\n if remainder:\n unclusteredROMs = self._trainSubdomainROMs(self._templateROM, remainder, trainingSet, self._romGlobalAdjustments)\n roms = np.hstack([roms, unclusteredROMs])\n self._roms = roms",
"def init(self, points):\r\n super(Triangle, self).init(points)\r\n # add more segments for large triangle\r\n if self.element_id in ['1', '2']:\r\n self.segments += [\r\n Segment(p1=self.points[i], p2=self.segments[(i + 1) % 3].midpoint)\r\n for i in range(3)\r\n ]",
"def init(self):\n self.init_db_header()\n self.load_segments()",
"def splits_segments(self):\n self.detect_splits()\n self.postprocess_segments()\n self.set_segs_names()\n self.order_pseudotime()",
"def __init__(self, start_date=None, subusers=None): \n self._subusers = None\n super(SubuserStats, self).__init__()\n\n # Minimum required for subusers stats\n if start_date and subusers:\n self.start_date = start_date\n for subuser_name in subusers:\n self.add_subuser(Subuser(subuser_name))",
"def load_segments(self):\n logging.info(\"try to load the segments ... \")\n last = None\n s_tm = time.time()\n\n lines = self.src_handle.read().splitlines()\n for line in lines:\n logging.info(\"load segment: `{}`\".format(line))\n ps = line.split(\"|\", maxsplit=2)\n if len(ps) != 3:\n raise Exception(\"invalid ip segment line `{}`\".format(line))\n\n sip = util.check_ip(ps[0])\n if sip == -1:\n raise Exception(\n \"invalid ip address `{}` in line `{}`\".format(ps[0], line)\n )\n eip = util.check_ip(ps[1])\n if eip == -1:\n raise Exception(\n \"invalid ip address `{}` in line `{}`\".format(ps[1], line)\n )\n\n if sip > eip:\n raise Exception(\n \"start ip({}) should not be greater than end ip({})\".format(\n ps[0], ps[1]\n )\n )\n if len(ps[2]) < 1:\n raise Exception(\"empty region info in segment line `{}`\".format(line))\n\n segment = seg.Segment(sip=sip, eip=eip, reg=ps[2])\n # Check the continuity of data segment\n if last is not None:\n if last.end_ip + 1 != segment.start_ip:\n raise Exception(\n \"discontinuous data segment: last.eip+1({})!=seg.sip({}, {})\".format(\n sip, eip, ps[0]\n )\n )\n self.segments.append(segment)\n last = segment\n logging.info(\n \"all segments loaded, length: {}, elapsed: {}\".format(\n len(self.segments), time.time() - s_tm\n )\n )"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a WordForm of the given CV shape with random segments. | def random_segs(cls, shape, lemma = None, case = None):
# For each C or V segment in `shape`, initialize a random Segment of the
# appropriate type. Initialize a new WordForm with all these Segments.
return cls([Segment(seg_type = seg) for seg in shape], lemma, case) | [
"def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\n while not any(letter in self.vowels for letter in word):\r\n length = len(word)\r\n if length == 1:\r\n index = 0\r\n elif length == 2:\r\n index = random.randrange(0, 2)\r\n else:\r\n a = len(word) / 2\r\n index = a + random.randrange(-a / 2, a / 2)\r\n word = word[:index] + self.get_letter(100) + word[index + 1:]\r\n\r\n if random.random() > self.capital_chance:\r\n word = word.capitalize()\r\n self.words.append(word)\r\n self.word_count += 1\r\n return word",
"def build(self,\n wv, # wvo = an intance of dense word vectors\n sense_dim_num=10000, # unused\n save_pkl=True, # unused\n norm_type=\"sum\",\n weight_type=\"score\",\n max_cluster_words=20):\n\n # initialize the sense vectors model\n vector_dim = wv.vectors.syn0.shape[1]\n senses_num = self.pcz.get_num_senses()\n sv = SenseGram(size=vector_dim, sorted_vocab=0)\n sv.create_zero_vectors(senses_num, vector_dim)\n sense_count = 0\n\n # fill the sense vectors model\n for word in self.pcz.data:\n for sense_id in self.pcz.data[word]:\n # try to build sense vector for a word sense\n try:\n sense_count += 1\n if sense_count % 10000 == 0: print(sense_count, \"senses processed\")\n\n sense_vector = np.zeros(wv.vectors.syn0[0].shape, dtype=np.float32) # or the word vector?\n\n non_oov = 0\n for i, cluster_word in enumerate(self.pcz.data[word][sense_id][\"cluster\"]):\n if i >= max_cluster_words: break\n \n # define the weight\n if weight_type == \"ones\": weight = 1.0\n elif weight_type == \"score\": weight = float(self.pcz.data[word][sense_id][\"cluster\"][cluster_word])\n elif weight_type == \"rank\": weight = 1.0 / (i + 1)\n else: weight = float(self.pcz.data[word][sense_id][\"cluster\"][cluster_word])\n \n if weight == 0:\n print(\"Warning: zero weight:\", cluster_word, end=' ') \n \n # define the word\n if cluster_word in wv.vectors.vocab:\n cw = cluster_word\n elif cluster_word.split(\"#\")[0] in wv.vectors.vocab:\n cw = cluster_word.split(\"#\")[0]\n else:\n if self.VERBOSE:\n print(\"Warning: word is OOV: '%s'\" % (cluster_word), file=stderr)\n \n compounds = cluster_word.split(\"#\")[0].split(\"_\")\n for cw in compounds:\n if cw in wv.vectors.vocab and len(cw) > 3:\n if self.VERBOSE: print(\"Warning: adding a compound '{}' of '{}'\".format(cw, cluster_word))\n sense_vector += (weight/len(compounds)) * wv.vectors[cw] \n non_oov += 1\n \n continue \n\n non_oov += 1\n sense_vector += weight * wv.vectors[cw]\n\n if non_oov == 0:\n if self.VERBOSE: print(\"Warning: sense is OOV: %s#%s\" % (word, sense_id), file=stderr)\n\n normalizer = self._normalizer(word, sense_id, norm_type, weight_type, max_cluster_words)\n sense_vector = sense_vector / normalizer\n sense_prob = self.pcz.get_sense_prob(word, sense_id)\n sv.add_sense(word, sense_id, sense_vector, sense_prob)\n except:\n print(\"Cannot process sense:\", word, sense_id)\n print(format_exc())\n\n # serialize the sense vector model\n sv.save_word2vec_format(self.sense_vectors_bin_fpath, fvocab=None, binary=False)\n\n print(\"Sense vectors:\", self.sense_vectors_bin_fpath)\n print(\"Created %d sense vectors\" % sense_count)\n\n return sv",
"def main():\n\n # read input images from 'in' directory\n imgFiles = os.listdir('data_in/')\n for (i, f) in enumerate(imgFiles):\n print('Segmenting words of sample %s' % f)\n\n # read image, prepare it by resizing it to fixed height and converting it to grayscale\n img = prepareImg(cv2.imread('data_in/%s' % f), 50)\n\n # execute segmentation with given parameters\n # -kernelSize: size of filter kernel (odd integer)\n # -sigma: standard deviation of Gaussian function used for filter kernel\n # -theta: approximated width/height ratio of words, filter function is distorted by this factor\n # - minArea: ignore word candidates smaller than specified area\n res = wordSegmentation(img, kernelSize=25, sigma=11, theta=7, minArea=100)\n\n # write output to 'out/inputFileName' directory\n if not os.path.exists('out/%s' % f):\n os.mkdir('out/%s' % f)\n\n # iterate over all segmented words\n print('Segmented into %d words' % len(res))\n for (j, w) in enumerate(res):\n (wordBox, wordImg) = w\n (x, y, w, h) = wordBox\n cv2.imwrite('out/%s/%d.png' % (f, j), wordImg) # save word\n cv2.rectangle(img, (x, y), (x + w, y + h), 0, 1) # draw bounding box in summary image\n\n # output summary image with bounding boxes around words\n cv2.imwrite('out/%s/summary.png' % f, img)",
"def create_wordcloud(self, text):\n text = ' '.join(f\"{word}\" for word in text)\n mask = np.array(Image.open(os.path.join(CURRDIR, \"cloud.png\")))\n wc = WordCloud(background_color=\"white\",\n max_words=200,\n mask=mask)\n wc.generate(text)\n wc.to_file(PATH_TO_SAVE_IMG, \"wordle.png\")",
"def create_random_text(word_count=10):\n sample_text_lst = TEXT_BASE_RUS.replace('\\n', '').split(' ')\n generate_text_lst = []\n for i in range(word_count):\n generate_text_lst.append(random.choice(sample_text_lst))\n generate_text = ' '.join(generate_text_lst)\n return generate_text",
"def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()",
"def makeFeatureVec(words, model, num_features=150):\r\n featureVec = np.zeros((num_features,))\r\n num_words = 0.\r\n index2word_set = set(model.wv.index2word)\r\n for word in words:\r\n if word in index2word_set:\r\n num_words += 1\r\n featureVec = np.add(featureVec,model[word]) \r\n featureVec = np.divide(featureVec,num_words)\r\n return featureVec",
"def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))",
"def generate(fit_model):\n pred_indices = []\n pred_words = []\n # Replace start_index with actual start token\n start_index = random.randint(0, len(text) - maxlen - 1)\n current_vec = glove_matrix.get_vec(start_index)\n\n for iteration in range(NUM_PRED_WORDS):\n preds = fit_model.predict(current_vec, verbose=0)\n pred_index = sample(preds)\n pred_indices = pred_indices + [next_index]\n pred_words = pred_words + [glove_matrix.get_word(pred_index)]\n current_vec = glove_matrix.get_vec(pred_index)\n\n assert NUM_PRED_WORDS == len(pred_words)\n return pred_words",
"def generate(self, count=15):\n\n sentence = []\n print(\"self.word_dict\", self.word_dict)\n for i in range(count):\n first_tuple = random.choice(list(self.word_dict.keys())) # first word for our sentence\n first_word = random.choice(first_tuple)\n sentence.append(first_word)\n second_word = self.word_dict[first_tuple]\n # print(\"second_word\", second_word)\n next_word = second_word.sample()\n # print(\"next_word\", next_word)\n # first_tuple = second_word\n sentence.append(next_word)\n # end_tuple =\n sentence = ' '.join(sentence)\n return sentence + \".\"\n # for i in range(len(self.token)):\n # val = list(self.word_dict.values())[i]\n # print(len(val))\n # # print(\"val\", val)\n # next_word = val.sample()\n # sentence.append(next_word)\n # sentence = ' '.join(sentence)\n # return sentence + \".\"",
"def generate_wordcloud(topic_description, use_mask='rectangle', store_to_file=False):\n\n # transform the topic description in frequencies\n topic_frequencies = get_word_frequencies(topic_description)\n\n if use_mask == 'oval':\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"oval.jpg\")))\n else:\n mask = numpy.array(Image.open(os.path.join(config.__resources_folder_path, \"rect.png\")))\n\n wc = WordCloud(background_color=\"white\", max_words=2000, mask=mask)\n # generate word cloud\n wc.generate_from_frequencies(topic_frequencies)\n\n if store_to_file:\n # store to file\n wc.to_file(os.path.join(config.__inputs_outputs_folder_path, \"wordcloud_{0}_{1}.png\".format(\n hash(str(topic_description)), use_mask)))\n\n # show\n plt.imshow(wc, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()",
"def build_from_words(self, words):\n if isinstance(words, unicode):\n self.build(words)\n elif isinstance(words, list):\n flag = \"seg\"\n assert len(words) > 0\n\n word = words[0]\n if isinstance(word, unicode):\n flag = \"seg\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 2 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"pos\"\n elif ((isinstance(word, list) or isinstance(word, tuple)) and\n len(word) == 4 and isinstance(word[0], unicode) and isinstance(word[1], unicode)):\n flag = \"dp\"\n else:\n flag = \"unknown\"\n\n self._xml4nlp = Element('xml4nlp')\n self._note = SubElement(self._xml4nlp, 'note')\n self._doc = SubElement(self._xml4nlp, 'doc')\n\n para = SubElement(self._doc, 'para')\n sent = SubElement(para, 'sent')\n\n para.set(\"id\", \"0\")\n sent.set(\"id\", \"0\")\n\n self._clean_note()\n\n if flag == \"seg\":\n for i, word in enumerate(words):\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word\n }))\n sent.set('cont', (\"\".join(words)))\n self._set_word_on_note()\n elif flag == \"pos\":\n for i, word_pos in enumerate(words):\n word, pos = word_pos\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_pos_on_note()\n elif flag == \"dp\":\n for i, rep in enumerate(words):\n word, pos, head, dep_rel = rep\n sent.append(Element('word', {\n 'id': unicode(i),\n 'cont': word,\n 'pos': pos,\n 'parent': str(int(head) - 1),\n 'relation': dep_rel\n }))\n sent.set('cont', (\"\".join([word[0] for word in words])))\n self._set_parser_on_note()\n\n self.dom = self._xml4nlp",
"def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec",
"def create_feature_vectors(doc, words):\n feature_vectors = []\n labels = []\n feature_vector_dict = {}\n total_line_count = get_line_count(doc)\n with open(doc, 'r') as f:\n prev_word = None\n for idx, word in enumerate(f):\n if (prev_word is None) or (prev_word == '\\n'):\n \"\"\"New sentence so the first word will be a label. Append the label\"\"\"\n labels.append(word.rstrip('\\n'))\n elif (word == '\\n') or (idx == total_line_count):\n \"\"\"\n If end of the sentence, then add to feature_vectors and reset the dictionary\n \"\"\"\n feature_vectors.append(feature_vector_dict)\n feature_vector_dict = {}\n else:\n \"\"\"\n First, check that the word is in the feature words\n Second, if so, add the word to the dictionary. If it already exists, increment count, else add it\n \"\"\"\n word_stripped = word.rstrip('\\n')\n if word_stripped in words:\n word_stripped_idx = words.index(word_stripped) + 1 # The example in the assignment indicates 1 index (not 0)\n if word_stripped_idx not in feature_vector_dict:\n feature_vector_dict[word_stripped_idx] = 1\n else:\n feature_vector_dict[word_stripped_idx] += 1\n prev_word = word\n return feature_vectors, labels",
"def make_word_vector(self, word: str):\n output_vector = np.zeros(self.dimension, dtype=self.field)\n for pos in range(len(word)):\n letter = word[pos]\n hex_key = int(letter.encode().hex(), 16)\n if hex_key not in self.character_vectors:\n self.character_vectors[hex_key] = vu.create_dense_random_vector(\n self.dimension, seed=hex_key, field=self.field)\n output_vector += vu.bind(\n self.gvf.get_vector_for_proportion((pos + 0.5) / (len(word))),\n self.character_vectors[hex_key],\n field=self.field)\n return output_vector",
"def gen_words(self, doc):\n pattern = re.compile(u'[\\\\s\\\\d,.<>/?:;\\'\\\"[\\\\]{}()\\\\|~!@#$%^&*\\\\-_=+a-zA-Z,。《》、?:;“”‘’{}【】()…¥!—┄-]+')\n doc = re.sub(pattern, ' ', doc)\n suffix_indexes = index_of_sorted_suffix(doc, self.max_word_len)\n word_cands = {}\n # compute frequency and neighbors\n for suf in suffix_indexes:\n word = doc[suf[0]:suf[1]]\n if word not in word_cands:\n word_cands[word] = WordInfo(word)\n word_cands[word].update(doc[suf[0] - 1:suf[0]], doc[suf[1]:suf[1] + 1])\n # compute probability and entropy\n length = len(doc)\n for k in word_cands:\n word_cands[k].compute(length)\n word_cands[k].compute_pp(self.pos_prop)\n # compute aggregation of words whose length > 1\n values = sorted(word_cands.values(), key=lambda x: len(x.text))\n for v in values:\n if len(v.text) == 1:\n continue\n v.compute_cohesion(word_cands)\n\n return sorted(values, key=lambda v: v.freq, reverse=True)",
"def generate(self, length=100):\n\n seed_no = randint(0,len(self.tagged_words)-self.pos_n) # choose random seed\n output = [self.tagged_words[seed_no+x] for x in range(self.pos_n-1)]\n for x in range(self.pos_n-1, length):\n next_pos_key = tuple([tw.pos for tw in output[-(self.pos_n-1):]])\n next_pos_choices = self.pos_dictionary[next_pos_key]\n next_word_key = tuple(output[-(self.word_n-1):])\n next_picked = False\n while not next_picked:\n shuffle(next_pos_choices)\n next_pos = next_pos_choices.pop()\n choices = self.get_word_by_pos(self.word_dictionary[next_word_key], next_pos)\n if choices:\n output.append(choice(choices))\n next_picked = True\n else:\n pass\n\n return \" \".join([tw.word for tw in output])",
"def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text",
"def generate(model, n_words, text_seed=None, random_seed=None):\n #control randommization for constant generation of tokens\n np.random.seed(random_seed)\n n = model.order\n if text_seed is None:\n text_seed = [\"<s>\"]*(n-1)\n if len(text_seed)!=n-1:\n raise ValueError(f\"Inconsistency in the size of text_seed, got {len(text_seed)}, expected {n}\")\n\n text_words = []\n regenerate = True\n while regenerate:\n tokens_generated = list(model.generate(n_words, text_seed, random_seed))\n regenerate = False\n if \"</s>\" in tokens_generated:\n i = tokens_generated.index(\"</s>\")\n if i < n_words - model.order:\n # remaining number of words to generate next!\n n_words -= i\n # keep the first sentence!\n tokens_generated = tokens_generated[:i]\n # change the seed to have a new begining for the new sentence!\n if not random_seed is None:\n random_seed = np.random.randint(1000)\n # keep looping\n regenerate = True\n text_words+=tokens_generated\n return \" \".join(text_words)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the suffix vowel. | def add_suffix(self, suffix):
# Append the suffix vowel to this WordForm.
self.segments.append(Segment.new_segment(suffix)) | [
"def _replace_suffix(self, word, suffix, replacement):\n ...",
"def add_letter_suffix(self, letter_char, suffix_char):\n letter = self.add_letter(letter_char)\n suffix = self.add_letter(suffix_char)\n letter.add_suffix(suffix)",
"def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest",
"def get_vowel_names():",
"def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement",
"def put_accent_onlast(s):\n accented = accented_vowel_map.get(s[-2:], s[-2:])\n return s[:-2] + accented",
"def _contains_vowel(self, stem):\n ...",
"def add_suffix_if_consonant(inp):\n return inp + SUFFIX + inp.lower() if is_consonant(inp) else inp",
"async def vowelreplace(self, ctx: commands.Context, replace: str, *, msg: str) -> None:\n result = \"\"\n for letter in msg:\n result += replace if letter.lower() in \"aeiou\" else letter\n await ctx.send(result)",
"def last_char_to_aou(word):\n assert isinstance(word, str)\n ch = last_char_to_vowel(word)\n if ch in \"aou\":\n return \"a\"\n return \"ä\"",
"def __vowel_change(self, letter):\n if letter in self.VOWELS:\n letters = list(self.VOWELS - set((letter)))\n letter = random.choice(letters)\n return letter",
"def trans_vowel(wrd: str, capitalize: bool) -> str:\n wrd, mark = punctuation(wrd)\n trans = '{}way'.format(wrd)\n if mark:\n trans += mark\n if capitalize:\n return trans.capitalize()\n return trans",
"def _ends_with_vowel(self, letter_group: str) -> bool:\n if len(letter_group) == 0:\n return False\n return self._contains_vowels(letter_group[-1])",
"def add_ending(file, suffix, ext, delimiter=\"-\"):\n assert isinstance(file, str), \"File must be a string.\"\n assert isinstance(suffix, str), \"Suffix must be a string.\"\n assert isinstance(ext, str), \"Extension must be a string.\"\n assert isinstance(delimiter, str), \"Delimiter must be a string.\"\n\n path, fullname = os.path.split(file)\n name, ext_orig = os.path.splitext(fullname)\n parts = name.split()\n if suffix:\n parts.append(suffix)\n if ext:\n newname = delimiter.join(parts) + ext\n else:\n newname = delimiter.join(parts) + ext_orig\n\n return os.path.join(path, newname)",
"def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'",
"def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist",
"def add_suffix(self, suffix: str):\n self.target = tuple(var + suffix for var in self.target)\n\n self.scope = tuple(\n var\n if any(\n var in pa and isinstance(self.parents[pa], BackwardFrameReference)\n for pa in self.parents\n )\n else var + suffix\n for var in self.scope\n )",
"def upper_vowel(s):\n for k, v in REPLACED_MAP.iteritems():\n s = s.replace(k, v)\n return s",
"def last_char_to_vowel(word):\n assert isinstance(word, str)\n # We iterate over characters of the word, because the last might be a\n # punctuation, perhaps.\n for last in reversed(word):\n last = last.lower()\n for ch, prev in ((\"a\", \"a/+£\"),\n (\"e\", \"eébcçdgptvwz&*:.\"),\n (\"o\", \"ohk€å\"),\n (\"ä\", \"äflmnrsx§\"),\n (\"ö\", \"ö\"),\n (\"i\", \"ij%$\"),\n (\"u\", \"uq,\"),\n (\"y\", \"yü\")):\n if last in prev:\n return ch\n return \"e\""
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Entrench at the level of the WordForm. | def entrench_word(self, cloud, paradigms, informativity, categorization,
unique_base):
# Entrench within the WordForm's own cloud. Iterate over positions in
# the WordForm (up to three Segments).
for pos, seg in enumerate(self.segments):
if pos < 3:
# Iterate over features.
for feat in seg.features:
if uniform(0, 1) < probability_of_analogy:
# Collect other values of the feature across the cloud.
# Since this is the WordForm's own cloud, set all the
# weights to 1.
wv = [(e.segments[pos].features[feat], 1)
for e in cloud
if e.lemma == self.lemma
and e.case == self.case]
# Entrench the segment based on these values.
seg.entrench_feature(feat, wv,
top_value = self_top_value,
max_movement = self_max_movement)
# Entrench within other clouds of the same paradigm.
if paradigms:
# Iterate over positions in the WordForm (up to three Segments).
for pos, seg in enumerate(self.segments):
if pos < 3:
# Iterate over features.
for feat in seg.features:
if uniform(0, 1) < (probability_of_analogy *
paradigm_weight):
# Get the weight for each case.
weights = dict()
# If informativity is measured via the entropy
# method, the weight of a case is proportional to
# the entropy of the feature across all lemmas of
# that case.
if informativity == 'entropy':
weights = {c: entropy(feat, [e.segments[pos].\
features[feat]
for e in cloud
if e.case == c])
for c in cases}
# If informativity is measured via a classification
# algorithm, the weight of a case is proportional to
# the performance of the classifier on lemmas within
# that case using just the current feature.
elif informativity == 'classification':
weights = {c: performance([e
for e in cloud
if e.case == c],
positions = [pos],
features = [feat],
method = categorization)
for c in cases}
# If informativity is not measured, set the weights
# of all cases to 1.
elif informativity == 'none':
weights = {c: 1
for c in cases}
# If paradigms are required to have a unique base,
# the winner takes all the weight.
if unique_base:
max_weight = max(weights.values())
for c in weights:
if weights[c] < max_weight:
weights[c] = 0
# Collect other values of the feature across the
# cloud, and pair them with their weights.
wv = [(e.segments[pos].features[feat],
weights[e.case])
for e in cloud
if e.lemma == self.lemma
and e.case != self.case]
# Entrench the segment based on these values.
seg.entrench_feature(feat, wv,
top_value = paradigm_top_value,
max_movement = paradigm_max_movement) | [
"def set_level(self,level):\r\n \r\n self.level = level",
"def level_up(self, elemental) -> None:\n elemental.add_exp(elemental.exp_to_level)",
"def _page_update_higher_textequiv_levels(level, pcgts):\n regions = pcgts.get_Page().get_TextRegion()\n if level != 'region':\n for region in regions:\n lines = region.get_TextLine()\n if level != 'line':\n for line in lines:\n words = line.get_Word()\n if level != 'word':\n for word in words:\n glyphs = word.get_Glyph()\n word_unicode = u''.join(glyph.get_TextEquiv()[0].Unicode\n if glyph.get_TextEquiv()\n else u'' for glyph in glyphs)\n word.set_TextEquiv(\n [TextEquivType(Unicode=word_unicode)]) # remove old\n line_unicode = u' '.join(word.get_TextEquiv()[0].Unicode\n if word.get_TextEquiv()\n else u'' for word in words)\n line.set_TextEquiv(\n [TextEquivType(Unicode=line_unicode)]) # remove old\n region_unicode = u'\\n'.join(line.get_TextEquiv()[0].Unicode\n if line.get_TextEquiv()\n else u'' for line in lines)\n region.set_TextEquiv(\n [TextEquivType(Unicode=region_unicode)]) # remove old",
"def set_level ( self, level ):\n self.level = level\n self.indent = self.level * INDENT\n if hasattr ( self.value, 'set_level' ):\n self.value.set_level ( level + 1 )",
"def __change_level(self, level):\n self.level = level",
"def clean_level_(self):\n try:\n # Get the verb categories of the taxonomy\n verb_cats = VerbCategory.objects.filter(taxonomy=self.taxonomy)\n except Taxonomy.DoesNotExist:\n raise Http404('The taxonomy does not exist!')\n else:\n\n # Check categories for the entered level value\n submitted_level = self.cleaned_data.get('level', None)\n\n # if updating, need to allow the original level value to be re-entered\n old_level = None if not self.old_category else self.old_category.level\n\n if submitted_level in [cat.level for cat in verb_cats.all()\\\n if cat.level != old_level]:\n culprit = verb_cats.get(level=submitted_level)\n raise forms.ValidationError(f'The verb category \"{culprit.title}\" \\\n already has this value!')\n\n return submitted_level",
"def addLevel(self):\n pass",
"def resetWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=0 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)",
"def setExpForLevel(self):\n self._expToNextLevel = 2 ** (9 + self.getLevel())",
"def update_w(self):\n if self.flagged:\n self._w.attr = \"flagged\"\n self._w.focus_attr = \"flagged focus\"\n else:\n self._w.attr = \"body\"\n self._w.focus_attr = \"focus\"",
"def Lern(self):\n # TODO: inverting status\n self.db.SetLerning(self.currentWord['id'])",
"def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)",
"def end_form(self):\r\n self.inside_form = False",
"def click_informes_legales(self):\n self.button.click(menu_catalog.M0503_LEGALES)",
"def __editUnindent(self):\n self.activeWindow().unindentLineOrSelection()",
"def newLevel(self):\n if self.level < 4:\n self.level += 1\n message = \"Congratulations! You have reached new level. You are now at \" + levels[self.level] + \" level\"\n messagebox.showinfo(\"New level reached\", message)\n else:\n self.level = 0\n message = \"Congratulations! You are now master of Hangman game, a Yoda of words!. Game will now start\" \\\n \" again in the demonstration level\"\n messagebox.showinfo(\"New level reached\", message)\n self._Hangman__words = self.readWords()",
"def set_up(self, lvl=None):\n self.hilbert_value = self.page[len(self.page)-1].hilbert_value\n if lvl is not None:\n self.level = lvl\n return",
"def odor_level(self):\n pass",
"def _edit_letter(self):\n self._fonteditor.setText(\"\")\n #self._fonteditor.textCursor().select(QtGui.QTextCursor.Document)\n self.stacker.setCurrentWidget(self._fonteditor)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add noise to the nonsuffix segments in the WordForm. | def add_noise(self):
self.segments = deepcopy(self.segments)
# Iterate through each of the first three Segments in the WordForm.
for i in range(3):
# Add noise to each Segment.
self.segments[i].add_noise() | [
"def _update_noise(self, nsig):\n self.integrator.noise.nsig = numpy.array([nsig, ])",
"def add_noise(self):\n \n self.vis_freq = self.skyvis_freq + self.vis_noise_freq",
"def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)",
"def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()",
"def noise_augmentation(wave, noise_files):\n nb_noise_segments = 3\n aug_noise_files = []\n for i in range(nb_noise_segments):\n aug_noise_files.append(random.choice(noise_files))\n\n # aug_noise_files = np.random.choice(noise_files, 3, replace=False)\n dampening_factor = 0.4\n for aug_noise_path in aug_noise_files:\n (fs, aug_noise) = utils.read_wave_file(aug_noise_path)\n wave = wave + aug_noise*dampening_factor\n return wave",
"def noise(self, freq: int, /) -> None:",
"def add_suffix(self, suffix):\n # Append the suffix vowel to this WordForm.\n self.segments.append(Segment.new_segment(suffix))",
"def noise(self, stddev):\n #add noise to weights\n pass",
"def non_speech_removal(a):\n \n \n #Uses a knn model to identify is there is speech or a music/silence in the audio file at every second.\n #Extracts timestamps for those segments where there is music/silence.\n #Using that extracts segments where there is only speech.\n #Joins all speech segments together to form a new audio file ith only speech\n #Return the new audio.\n \n #pyAudioAnalysis/data/models/\n [flagsInd, classesAll, acc, CM] = mid_term_file_classification(a, \"knn_sm\", \"knn\", plot_results=True)\n\n\n sound = AudioSegment.from_file(a)\n multiplier = (len(sound)/len(flagsInd))\n\n\n t = []\n for sec, flag in enumerate(flagsInd):\n if flag == 1:\n t.append(sec)\n\n\n start=0\n end=0\n ts=[]\n for i, val in enumerate(t):\n if t[i]-t[i-1]>5:\n end=t[i-1]\n if start!=end:\n ts.append([start*multiplier, end*multiplier])\n start=t[i]\n end=t[i] \n\n \n ns = sound\n for t in ts:\n s1=ns[:t[0]]\n s2=ns[t[0]:t[1]]\n s3=ns[t[1]:]\n ns=s1+s3\n \n return ns",
"def generate_noise(self, dataset: GeneratedDataset) -> None:",
"def add_silence(self):\n\n self._generate_meta_data(audio_segment=True)\n # Adds three seconds to the end of the audio segment\n self.audio_segment += AudioSegment.silent(duration=3000)\n self.audio_segment.export(self.path, format=self.extension)\n self._generate_meta_data()",
"def add_noise(word, op):\n\n i = random.randint(0, len(word) - 1)\n # op = random.randint(0,12)\n # print(op)\n # i = 1\n if op == 0 or op == 13:\n if word[i] != '_' and len(word) >= 2:\n return word[:i] + word[i+1:]\n if op == 1 or op == 14:\n i += 1\n consonants = ['ch', 'tr', 'kh', 'nh', 'ng',\n 'qu', 'th', 'ph', 'gh', 'gi', 'ngh'] # 11\n tmp = [i for i in consonants if i in word]\n r = random.random()\n if len(tmp) > 0 and r > 0.1:\n tmp1 = random.choice(tmp)\n if tmp1 == 'ngh':\n return word.replace(tmp1, random.choice(consonant_trigraphs(tmp1)))\n else:\n return word.replace(tmp1, consonant_digraphs(tmp1))\n elif i <= len(word) - 1:\n return word[:i-1] + word[i:i+1] + word[i-1:i] + word[i+1:]\n else:\n return word\n if op == 2 or op == 15:\n i += 1\n if i <= len(word) - 1:\n return word[:i] + '_' + word[i:]\n else:\n return word\n if op == 3 or op == 16:\n idx = word.find(\"_\") # check xem co underscore khong\n if idx != -1:\n list_underscore = [m.start() for m in re.finditer('_', word)]\n idx1 = random.choice(list_underscore)\n return word[:idx1] + word[idx1 + 1:]\n else:\n return word\n if op == 4:\n string_list = string.ascii_lowercase\n if word[i] in string_list:\n return word[:i] + random.choice(get_prox_keys(word[i])) + word[i+1:]\n else:\n return word\n # tam thoi coi 2 cai duoi la 1\n if op == 5:\n string_list = 'àảãáạăằẳẵắặâầẩẫấậđèẻẽéẹêềểễếệìỉĩíịòỏõóọôồổỗốộơờởỡớợùủũúụưừửữứựỳỷỹýỵ'\n syllable = word.split('_')\n tmp = []\n for syllable_i in syllable:\n letter = [str(i) for i in string_list if i in syllable_i]\n for letter_i in letter:\n tmp1 = noise_telex(letter_i)\n # print(tmp1)\n r = random.random()\n if r < 0.9:\n rr = random.random()\n if rr > 0.5:\n if type(tmp1[0]).__name__ == 'str':\n # print('1')\n syllable_i = syllable_i.replace(letter_i, tmp1[0])\n\n else:\n # print('2')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[0][0])\n # return syllable_i\n\n else:\n if type(tmp1[1]).__name__ == 'str':\n # print('3')\n syllable_i = syllable_i.replace(letter_i, tmp1[1])\n\n else:\n # print('4')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[1][0]) + tmp1[1][1]\n\n # return syllable_i\n else:\n rr = random.random()\n if rr > 0.5:\n if type(tmp1[0]).__name__ == 'list':\n # print('5')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[0][1])\n\n else:\n # print('6')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[2][0]) + random.choice(tmp1[2][1:])\n tmp.append(syllable_i)\n if len(tmp) > 0:\n return \"_\".join(tmp)\n else:\n return word\n\n if op == 6:\n string_list = 'àảãáạăằẳẵắặâầẩẫấậđèẻẽéẹêềểễếệìỉĩíịòỏõóọôồổỗốộơờởỡớợùủũúụưừửữứựỳỷỹýỵ'\n syllable = word.split('_')\n tmp = []\n for syllable_i in syllable:\n letter = [str(i) for i in string_list if i in syllable_i]\n for letter_i in letter:\n tmp1 = noise_vni(letter_i)\n # print(tmp1)\n r = random.random()\n if r < 0.9:\n rr = random.random()\n if rr > 0.5:\n if type(tmp1[0]).__name__ == 'str':\n # print('1')\n syllable_i = syllable_i.replace(letter_i, tmp1[0])\n\n else:\n # print('2')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[0][0])\n # return syllable_i\n\n else:\n if type(tmp1[1]).__name__ == 'str':\n # print('3')\n syllable_i = syllable_i.replace(letter_i, tmp1[1])\n\n else:\n # print('4')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[1][0]) + tmp1[1][1]\n\n # return syllable_i\n else:\n rr = random.random()\n if rr > 0.5:\n if type(tmp1[0]).__name__ == 'list':\n # print('5')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[0][1])\n\n else:\n # print('6')\n syllable_i = syllable_i.replace(\n letter_i, tmp1[2][0]) + random.choice(tmp1[2][1:])\n tmp.append(syllable_i)\n if len(tmp) > 0:\n return \"_\".join(tmp)\n else:\n return word\n if op == 7:\n l = word[i]\n vowel = 'ouieay'\n if i >= 1 and l in vowel:\n return word[:i] + random.randint(1, 5) * l + word[i+1:]\n else:\n return word\n\n # thay doi o dau\n if op == 8 or op == 17 or op == 18:\n string_list1 = ['l', 'n', 'x', 's', 'r', 'd', 'v']\n string_list2 = [\"ch\", 'tr', 'gi']\n syllable = word.split(\"_\")\n for i, syllable_i in enumerate(syllable):\n r = random.random()\n if r > 0.5:\n if len(syllable_i) >= 1 and syllable_i[0] in string_list1:\n wo = random.choice(closely_pronunciation1(syllable_i[0]))\n syllable[i] = wo + syllable_i[1:]\n elif len(syllable_i) >= 2 and syllable_i[0]+syllable_i[1] in string_list2:\n wo = random.choice(closely_pronunciation1(\n syllable_i[0] + syllable_i[1]))\n syllable[i] = wo + syllable_i[2:]\n return \"_\".join(syllable)\n # thay doi o cuoi\n # co mot vai truong hop rieng thoi\n # saigon phonology\n if op == 9 or op == 19 or op == 20:\n string_list1 = ['inh', 'ênh', 'iên', 'ươn', 'uôn', 'iêt', 'ươt', 'uôt']\n string_list2 = ['ăn', 'an', 'ân', 'ưn', 'ắt', 'ât', 'ưt', 'ôn', 'un',\n 'ât', 'ưt', 'ôn', 'un', 'ôt', 'ut']\n syllable = word.split(\"_\")\n tmp = []\n for syllable_i in syllable:\n if len(syllable_i) >= 3 and syllable_i[len(syllable_i) - 3:] in string_list1:\n syllable_i = syllable_i[:len(\n syllable_i) - 3] + saigon_final3(str(syllable_i[len(syllable_i) - 3:]))\n tmp.append(syllable_i)\n elif len(syllable_i) >= 2 and syllable_i[len(syllable_i) - 2:] in string_list2:\n syllable_i = syllable_i[:len(\n syllable_i) - 2] + saigon_final2(str(syllable_i[len(syllable_i) - 2:]))\n tmp.append(syllable_i)\n if len(tmp) > 0:\n return \"_\".join(tmp)\n else:\n return word\n\n if op == 10 or op == 21:\n string_list = ['ã', 'ả',\n 'ẫ', 'ẩ',\n 'ẵ', 'ẳ',\n 'ẻ', 'ẽ',\n 'ể', 'ễ',\n 'ĩ', 'ỉ',\n 'ũ', 'ủ',\n 'ữ', 'ử',\n 'õ', 'ỏ',\n 'ỗ', 'ổ', 'ỡ', 'ở']\n swap = {'ã': 'ả', 'ả': 'ã', 'ẫ': 'ẩ', 'ẩ': 'ẫ',\n 'ẵ': 'ẳ', 'ẳ': 'ẵ', 'ẻ': 'ẽ', 'ẽ': 'ẻ', 'ễ': 'ể', 'ể': 'ễ',\n 'ĩ': 'ỉ', 'ỉ': 'ĩ', 'ũ': 'ủ', 'ủ': 'ũ', 'ữ': 'ử', 'ử': 'ữ',\n 'õ': 'ỏ', 'ỏ': 'õ', 'ỗ': 'ổ', 'ổ': 'ỗ', 'ỡ': 'ở', 'ở': 'ỡ'}\n tmp = [i for i in string_list if i in word]\n for letters in tmp:\n word = word.replace(letters, swap[letters])\n return word\n\n if op == 11 or op == 22 or op == 23:\n string_list0 = ['ngh']\n string_list1 = ['gh', 'ng']\n string_list2 = ['g', 'c', 'q', 'k']\n syllable = word.split(\"_\")\n for i, syllable_i in enumerate(syllable):\n r = random.random()\n if r > 0.5:\n if len(syllable_i) >= 3 and syllable_i[0] + syllable_i[1] + syllable_i[2] in string_list0:\n wo = random.choice(like_pronunciation2(\n syllable_i[0] + syllable_i[1] + syllable_i[2]))\n syllable[i] = wo + syllable_i[3:]\n elif len(syllable_i) >= 2 and syllable_i[0] + syllable_i[1] in string_list1:\n wo = random.choice(like_pronunciation2(\n syllable_i[0] + syllable_i[1]))\n syllable[i] = wo + syllable_i[2:]\n elif len(syllable_i) >= 1 in string_list2:\n wo = random.choice(like_pronunciation2(syllable_i[0]))\n syllable[i] = wo + syllable_i[1:]\n return \"_\".join(syllable)\n\n \"\"\"\n thay doi vi tri dau\n \"\"\"\n\n string_list1 = 'àảãáạăằẳẵắặâầẩẫấậèẻẽéẹêềểễếệìỉĩíịòỏõóọôồổỗốộơờởỡớợùủũúụưừửữứựỳỷỹýỵaeiouy'\n string_list2 = ['óa', 'oá', 'òa','oà', 'ỏa', 'oả', 'õa', 'oã', 'ọa', 'oạ',\\\n 'áo', 'aó', 'ào','aò', 'ảo', 'aỏ', 'ão', 'aõ', 'ạo', 'aọ',\\\n 'éo', 'eó', 'èo','eò', 'ẻo', 'eỏ', 'ẽo', 'eõ', 'ẹo', 'eọ',\\\n 'óe', 'oé', 'òe','oè', 'ỏe', 'oẻ', 'õe', 'oẽ', 'ọe', 'oẹ',\\\n 'ái', 'aí', 'ài','aì', 'ải', 'aỉ', 'ãi', 'aĩ', 'ại', 'aị',\\\n 'ói', 'oí', 'òi','oì', 'ỏi', 'oỉ', 'õi', 'oĩ', 'ọi', 'oị'] # convert ve khong dau \n\n dict_change = {'óa': 'oá', 'òa':'oà', 'ỏa': 'oả', 'õa': 'oã', 'ọa': 'oạ',\\\n 'oá': 'óa', 'oà':'òa', 'oả': 'ỏa', 'oã': 'õa', 'oạ': 'ọa',\\\n 'áo': 'aó', 'ào':'aò', 'ảo': 'aỏ', 'ão': 'aõ', 'ạo': 'aọ',\\\n 'aó': 'áo', 'aò':'ào', 'aỏ': 'ảo', 'aõ': 'ão', 'aọ': 'ạo',\\\n 'éo': 'eó', 'èo':'eò', 'ẻo': 'eỏ', 'ẽo': 'eõ', 'ẹo': 'eọ',\\\n 'eó': 'éo', 'eò':'èo', 'eỏ': 'ẻo', 'eõ': 'ẽo', 'eọ': 'ẹo',\\\n 'óe': 'oé', 'òe':'oè', 'ỏe': 'oẻ', 'õe': 'oẽ', 'ọe': 'oẹ',\\\n 'oé': 'óe', 'oè':'òe', 'oẻ': 'ỏe', 'oẽ': 'õe', 'oẹ': 'ọe', 'ái': 'aí', 'ài':'aì', 'ải': 'aỉ', 'ãi': 'aĩ', 'ại': 'aị', 'aí': 'ái', 'aì':'ài', 'aỉ': 'ải', 'aĩ': 'ãi', 'aị': 'ại', 'ói': 'oí', 'òi':'oì', 'ỏi': 'oỉ', 'õi': 'oĩ', 'ọi': 'oị',\\\n 'oí': 'ói', 'oì':'òi', 'oỉ': 'ỏi', 'oĩ': 'õi', 'oị': 'ọi'}\n '''\n gom 3 truong hop\n truong hop 1: neu chi co 1 nguyen am, thi doi dauis unsubscriptable\n neu co am dem va am chinh (2 nguyen am), thi chuyen dau sang am dem\n (hoac co ca am dem, am chinh va am cuoi)\n chua xac dinh duoc y,i la am chinh hay am cuoi\n\n '''\n\n syllable = word.split(\"_\")\n word_add = []\n for i, syllable_i in enumerate(syllable):\n tmp = [i for i in string_list1 if i in syllable_i]\n if len(tmp) == 1:\n syllable_i = syllable_i.replace(tmp[0], random.choice(get_change_sign(tmp[0])))\n word_add.append(syllable_i)\n else :\n tmp1 = [i for i in string_list2 if i in syllable_i]\n for letters in tmp1:\n syllable_i = syllable_i.replace(letters, dict_change[letters])\n word_add.append(syllable_i)\n if len(word_add) != 0:\n return \"_\".join(word_add)\n else:\n return word",
"def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise",
"def applyDigiNoise(self):\n #noise = np.random.normal(loc=0.0, scale=self.information['RN'], size=self.image.shape)\n diginoise = np.random.poisson(self.information['gain']/np.sqrt(12), size=self.image.shape)\n\n # Can not be negative\n diginoise[diginoise < 0.0] = 0.0\n\n #add to the image\n self.image += diginoise",
"def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)",
"def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample",
"def build_unigram_noise(freq):\n total = freq.sum()\n noise = freq / total\n assert abs(noise.sum() - 1) < 0.001\n return noise",
"def _generate_noise(self, num_imgs):\n return np.random.normal(0, 1, (num_imgs, self._noise_dim))",
"def add_noise(self):\n self.noise = torch.normal(0.5, .2, self.state.shape).double()\n self.noise *= torch.sqrt(2 *\n self.vars['T']*torch.tensor(self.vars['dt']))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given an Frame object, will return the bytes of that Frame's file. If provided, will also scale the size of the image and convert to the required format. | def convert_frames(frame, img_format: str, scale=None) -> bytes:
path = frame.filename
with open(path, "rb") as image_file:
im = Image.open(image_file)
converted_img = BytesIO()
if scale:
_LOGGER.debug("Scaling the image")
(width, height) = (int(im.width * scale), int(im.height * scale))
_LOGGER.debug("Original size is {}wx{}h, new size is {}wx{}h".format(im.width, im.height, width, height))
im = im.resize([width, height])
im.save(converted_img, img_format)
return converted_img.getvalue() | [
"def get_frame(self):\n success, image = self.streamer.read()\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()",
"def get_frame(self):\n if self.stream.isOpened():\n success, image = self.stream.read()\n if image is None:\n image = cv2.imread('assets/stream_not_found.png')\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n image = cv2.imread('assets/stream_not_found.png')\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()",
"def convert_frame(self, frame):\n return (b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')",
"def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]",
"def frame_to_png(frame):\n val, image = cv2.imencode('.png', frame)\n return ''.join(struct.pack('B', byte[0]) for byte in image)",
"def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]",
"def to_blob(self):\n x = cv2.dnn.blobFromImage(self.frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n return x",
"def convertFrame(self):\n try:\n height,width=self.currentFrame.shape[:2]\n img=QtGui.QImage(self.currentFrame,\n width,\n height,\n QtGui.QImage.Format_RGB888)\n img=QtGui.QPixmap.fromImage(img)\n self.previousFrame = self.currentFrame\n return img\n except:\n return None",
"def _default_encode(self, frame):\n success, encoded_frame = cv2.imencode('.png', frame)\n if not success:\n raise mpf.DetectionException('Failed to encode frame.')\n if len(encoded_frame) <= self.MAX_FILE_SIZE:\n return encoded_frame\n\n # The resulting file was too big when encoded with the default settings, so we switch the encoding method to\n # use the maximum compression.\n self._encode = self._encode_max_compression\n return self._encode_max_compression(frame)",
"def loadImage(self, Frame):\r\n\r\n Path = \"Model/Data/Captured/\" + datetime.now().strftime(\"%Y-%m-%d\") + \"/\" + \\\r\n datetime.now().strftime(\"%H\") + \"/\"\r\n FileName = datetime.now().strftime('%M-%S') + \".jpg\"\r\n\r\n self.SaveFrame(Path, FileName, cv2.resize(\r\n cv2.imdecode(Frame, cv2.IMREAD_UNCHANGED), (257, 257)))\r\n return cv2.imread(Path + FileName).astype(np.float32)",
"def readFrame(self):\n\t\tsuccess, self.frameImage = self.vidcap.read()\n\t\treturn success, self.frameImage",
"def convertFrame(self):\r\n try:\r\n height, width = self.currentFrame.shape[:2]\r\n img = QtGui.QImage(self.currentFrame,\r\n width,\r\n height,\r\n QtGui.QImage.Format_RGB888)\r\n img = QtGui.QPixmap.fromImage(img)\r\n self.previousFrame = self.currentFrame\r\n return img\r\n except:\r\n return None",
"def get_frame(frame):\n\n return int.from_bytes(frame, byteorder='big')",
"def test_get_image_and_to_byte_array_are_compatible(self):\n\n with open(self.subject, \"rb\") as f:\n content = f.read()\n\n image = image_helper.get_image(content)\n\n self.assertEqual(image.size, (800, 450))\n\n bytes_array = image_helper.to_byte_array(image)\n\n image = image_helper.get_image(bytes_array)\n\n self.assertEqual(image.size, (800, 450))",
"def image(self) -> bytes:\n bufsize = 65536\n data, addr = self.sock.recvfrom(bufsize)\n reader = BytesReader(data)\n bhs = 32 # basic header size\n # TODO check pts is parsed correctly\n basic_header = BasicHeader.unpack(reader)\n ehs = basic_header.exHeaderSize\n if ehs > 0:\n ex_header_type, = reader.unpack('>H')\n ex_header: Optional[ExHeader] = None\n if ex_header_type == 3:\n ex_header = ExHeader3.unpack(reader)\n elif ex_header_type == 8:\n ex_header = ExHeader8.unpack(reader)\n elif ex_header_type == 11:\n ex_header = ExHeader11.unpack(reader)\n reader.read(8) # probably reserved data\n else:\n logger.warning('unhandled ex header type %d', ex_header_type)\n logger.debug(f'ex header: {ex_header}')\n self._notify_ex_header_listeners(ex_header)\n offset = bhs + ehs\n if offset != reader.i:\n logger.warning('offsets differ: %d != %d', offset, reader.i)\n length = basic_header.totalSize - offset\n image_data = data[offset:]\n if len(image_data) != length:\n logger.warning('lengths differ: %d != %d', len(image_data), length)\n else:\n logger.debug(f'image data length: {len(image_data)}')\n return image_data",
"def frame(self):\n try:\n AppHelper.runConsoleEventLoop(installInterrupt=True)\n return str(self._delegate.frame.representations()[0].TIFFRepresentation().bytes())\n except:\n return None",
"def image_to_byte(img):\n img2 = img.crop(box=None)\n byte_arr = io.BytesIO()\n img2.save(byte_arr, format='PNG')\n return byte_arr.getvalue()",
"def get_frame(self, frame_id):\n if frame_id is None:\n return None\n return skimage.io.imread(self.get_frame_id_path(frame_id), as_gray=True, plugin=\"matplotlib\")",
"def image_to_bytes(image: Image):\n imgByteArr = BytesIO()\n image.save(imgByteArr, format=image.format)\n imgByteArr = imgByteArr.getvalue()\n return imgByteArr"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Given a dictionary, changes the key from snake case to lower camel case. | def lower_camel_casify_dict_keys(d: dict) -> dict:
return {to_camel_case(key): value for key, value in d.items()} | [
"def dict_keys_snake_to_camel_case(snake_dict: dict) -> dict:\n\n camel_dict = dict()\n\n for key, val in snake_dict.items():\n if isinstance(key, str):\n camel_dict[snake_to_camel_case(key)] = val\n else:\n camel_dict[key] = val\n\n return camel_dict",
"def convert_case(self, key):\n return key if self.use_default_casing else self.casing_function(key)",
"def to_api_case(key):\n return snakecase(key)",
"def _lower(dictionary: dict):\n return {key.lower(): value.lower() for key, value in dictionary.items()}",
"def dict_to_camel(d):\n d2 = {}\n for k,v in d.items():\n d2[under_to_camel(k)] = v\n return d2",
"def _uppercase_keys_in_dict(self, adict):\n return dict( (k.upper(), v) for (k, v) in adict.iteritems() )",
"def __transform_key(self, key):\n if isinstance(key, basestring):\n return key.lower()\n else:\n return key",
"def lowercase_keys(input_dict):\n if not isinstance(input_dict,dict):\n return input_dict\n\n safe = dict()\n for key,value in input_dict.items():\n safe[str(key).lower()] = value\n return safe",
"def lower_keys(x):\n if isinstance(x, dict):\n return dict((k.lower(), v) for k, v in x.items())\n else:\n return \"msg_fromat_incorrect\"",
"def _convert_keys_to_lower(self, dictionary: dict) -> dict:\n lower_case_dictionary = OrderedDict()\n\n for key, value in dictionary.items():\n if not key.islower():\n if key.lower() in lower_case_dictionary.keys():\n raise ValueError(f\"Duplicate (case insensitive) key found: {key.lower()}\")\n if isinstance(value, dict):\n lower_case_dictionary[key.lower()] = self._convert_keys_to_lower(value)\n else:\n lower_case_dictionary[key.lower()] = value\n\n return lower_case_dictionary",
"def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])",
"def to_camel_case(snake_case_word):\n pascal = to_pascal_case(snake_case_word)\n return pascal[0].lower() + pascal[1:]",
"def _snake_to_camel_case(snake: str) -> str:\n components = snake.split(\"_\")\n return components[0] + \"\".join(component.title() for component in components[1:])",
"def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')",
"def __setitem__(self, key, value):\n super(CaseInsensitiveStringDict, self).__setitem__(key.lower(), value)",
"def _lower(self, mapping):\n _mapping = {}\n for k, v in sorted(mapping.items()):\n k = k.lower()\n if k not in _mapping:\n _mapping[k] = v\n return _mapping",
"def camelKey(k):\n camelKey = (\n k.replace(\"_\", \" \").replace(\"/\", \" \").replace(\"-\", \" \").title().replace(\" \", \"\")\n )\n\n # Remove all the characters that are not allowed in table storage as property names\n return re.sub(\"\\ |\\?|\\.|\\!|\\/|\\;|\\:|\\(|\\)\", \"\", camelKey)",
"def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])",
"def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Ensure IPCMessageSubscriber.connect gets wrapped by salt.utils.asynchronous.SyncWrapper. | async def test_ipc_connect_sync_wrapped(io_loop, tmp_path):
if salt.utils.platform.is_windows():
socket_path = ports.get_unused_localhost_port()
else:
socket_path = str(tmp_path / "noexist.ipc")
subscriber = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageSubscriber,
args=(socket_path,),
kwargs={"io_loop": io_loop},
loop_kwarg="io_loop",
)
with pytest.raises(tornado.iostream.StreamClosedError):
# Don't `await subscriber.connect()`, that's the purpose of the SyncWrapper
subscriber.connect() | [
"def _adapter_connect(self):\r\n error = super(AsyncoreConnection, self)._adapter_connect()\r\n if not error:\r\n self.socket = PikaDispatcher(self.socket, None,\r\n self._handle_events)\r\n self.ioloop = self.socket\r\n self._on_connected()\r\n return error",
"async def _async_connect(self): # pragma: no cover\n try:\n self.conn_coro = self.client.connected()\n aenter = type(self.conn_coro).__aenter__(self.conn_coro)\n self.stream = await aenter\n logger.info(f\"Artifact {str(self.jid)} connected and authenticated.\")\n except aiosasl.AuthenticationFailure:\n raise AuthenticationFailure(\n \"Could not authenticate the artifact. Check user and password or use auto_register=True\"\n )",
"async def on_connect(self):\n pass",
"def _patched_connect(self):\n self._stream = mock.MagicMock()\n self._register()",
"async def _async_connect(self) -> None: # pragma: no cover\n try:\n self.conn_coro = self.client.connected()\n aenter = type(self.conn_coro).__aenter__(self.conn_coro)\n self.stream = await aenter\n logger.info(f\"Agent {str(self.jid)} connected and authenticated.\")\n except aiosasl.AuthenticationFailure:\n raise AuthenticationFailure(\n \"Could not authenticate the agent. Check user and password or use auto_register=True\"\n )",
"async def test_double_connect(\n event_loop: asyncio.AbstractEventLoop, hyperion_fixture: HyperionFixture\n) -> None:\n (rw, hc) = hyperion_fixture.rw, hyperion_fixture.hc\n\n with patch(\"asyncio.open_connection\", return_value=(rw, rw)):\n assert await hc.async_client_connect()\n assert hc.is_connected",
"def test_interface(self):\n reactor = self.buildReactor()\n connector = reactor.connectTCP(\"127.0.0.1\", 1234, ClientFactory())\n self.assertTrue(verifyObject(IConnector, connector))",
"async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()",
"def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()",
"def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable",
"def _connect(self):\n# print('DEBUG: enter comm._connect',file=sys.stderr)\n self._reset()\n self._do_readin = True # set False to kill _readthread\n self._readthread.start()\n# print('DEBUG:about to handshake', file=sys.stderr)\n self._call_when_connected()",
"async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True",
"async def async_connect_socket(streamer_obj: class_definition_and_manipulation.StreamerObj) -> None:\r\n reader, writer = await asyncio.open_connection(encryption_key.cfg_host,\r\n int(encryption_key.cfg_port))\r\n\r\n writer.write(f'CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\\r\\n'.encode('utf-8'))\r\n print(f\"Connecting to socket for {streamer_obj.name}\")\r\n\r\n writer.write(\"PASS {}\\r\\n\".format(encryption_key.decrypted_pass).encode('utf-8')) # password\r\n writer.write(\"NICK #zerg3rrbot\\r\\n\".encode('utf-8')) # bot name\r\n writer.write(f\"JOIN #{streamer_obj.name}\\r\\n\".encode('utf-8'))\r\n\r\n await writer.drain()\r\n streamer_obj.stream_socket_writer = writer\r\n streamer_obj.stream_socket_reader = reader",
"def test_connectionMade(self):\n avatar = Avatar()\n avatar.setEventReceiver = create_autospec(avatar.setEventReceiver)\n\n proto = BotLineProtocol(avatar)\n proto.makeConnection(StringTransport())\n avatar.setEventReceiver.assert_called_once_with(proto.eventReceived)",
"def _connect(self):\n self.mqtt = Client()\n self.mqtt.on_connect = self._subscribe_topics\n connect(self.mqtt, self.snips.mqtt)",
"def connect_sync(\n self, signal: qtrio._util.SignalInstance, slot: typing.Callable[..., object]\n ) -> None:\n\n async def async_slot(*args: object) -> None:\n slot(*args)\n\n self.connect(signal=signal, slot=async_slot)",
"def handle_connect(self):\n pass",
"def test_connect(monkeypatch):\n # Import pylsl here so that the test can be skipped if pylsl is not installed\n pylsl = pytest.importorskip(\"pylsl\")\n\n # Constants\n buffer_size = 17\n n_channels = 6\n c_channel_format = pylsl.cf_float32\n numpy_channel_format = np.float32\n\n # Replace pylsl streams with a mock\n monkeypatch.setattr(\n 'pylsl.resolve_streams',\n lambda *args, **kwargs: [\n pylsl.StreamInfo(\n source_id=host,\n channel_count=n_channels,\n channel_format=c_channel_format,\n )\n ],\n )\n lsl_client = LSLClient(host=host, buffer_size=buffer_size)\n # Mock out the pylsl.resolve_streams\n lsl_client._connect()\n\n assert isinstance(lsl_client.client, pylsl.StreamInlet)\n assert lsl_client.buffer.shape == (buffer_size, n_channels)\n assert lsl_client.buffer.dtype == numpy_channel_format",
"def test_connectRoute(self):\n commands = []\n results = []\n class FakeAMP:\n def callRemote(self, cmd, **kw):\n commands.append((cmd, kw))\n results.append(Deferred())\n return results[-1]\n\n amp = FakeAMP()\n sender = CollectingSender()\n router = Router()\n router.startReceivingBoxes(sender)\n\n receiver = SomeReceiver()\n protocol = u\"proto name\"\n\n d = connectRoute(amp, router, receiver, protocol)\n\n self.assertEqual(\n commands, [(Connect, {'origin': u'0', 'protocol': u'proto name'})])\n results[0].callback({'route': u'remote route'})\n\n def cbConnected(receiverAgain):\n self.assertIdentical(receiver, receiverAgain)\n self.assertTrue(receiver.started)\n\n receiver.sender.sendBox({'foo': 'bar'})\n self.assertEqual(\n sender.boxes, [{_ROUTE: 'remote route', 'foo': 'bar'}])\n router.ampBoxReceived({_ROUTE: '0', 'baz': 'quux'})\n self.assertEqual(receiver.boxes, [{'baz': 'quux'}])\n d.addCallback(cbConnected)\n return d"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Receives a list and a search term. Use a loop to go through the list and see if the string is there. if it is return "string found". if not, return "string not found" | def search_for_string(lst_str, stringy):
if stringy in lst_str:
return "Found string"
else:
return "string not found" | [
"def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass",
"def grep(lst, term):\n if isinstance(term, list):\n term = \"\".join([f\"(?=.*{x})\" for x in term])\n\n matches = [i for (i, s) in enumerate(lst) if re.search(term, s, re.IGNORECASE)]\n return -1 if len(matches) == 0 else matches[0]",
"def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff",
"def search_for_substrings_in_list(substring, list_of_stuff):",
"def contains(str_or_list, val_to_find):\n \n return (val_to_find in str_or_list)",
"def _SearchStringInRow(self, entry, string):\n return any(list(filter(lambda x:bool(re.search(string,x.lower())), entry)))",
"def search_for(word,lst):\r\n for char in lst:\r\n if starts_with(word,char) == True:\r\n return char",
"def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break",
"def list_in_string(self, stringlist, string):\n for lstring in stringlist:\n if lstring in string:\n return True\n return False",
"def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def _search_for_first_match(self, search_list):\n for item in search_list:\n ind = self._search_for_label(item, print_msg=False)\n if ind is not None:\n return ind\n\n #if we reached this point, no items were found\n print('did not find any of these items : ' + str(search_list))",
"def is_input_list(sentence_word,input_list):\r\n\t\r\n\tfor input_word in input_list:\r\n\t\tif input_word in sentence_word:\r\n\t\t\treturn input_word\r\n\t\t\r\n\treturn \"none\"",
"def findentity(string):\r\n for x in entitylist:\r\n if x in string:\r\n print(f\"(Doc.{i})--Entity = {x.title()}\")\r\n break",
"def pageContains(page, strList):\n for text in strList:\n if text in page['data']:\n logging.log(5, 'Found string %s' % text)\n return True\n\n return False",
"def search(self, term):",
"def __find_string_in_response(self, fullResponse, searchFor):\n check = True\n rawResponse = fullResponse;\n if \"result\" not in rawResponse.text:\n check = False\n else:\n responseJSON = rawResponse.json()\n length_responseJSON = len(responseJSON[\"result\"])\n for i in range(0,length_responseJSON,1):\n check = searchFor in responseJSON[\"result\"][i][\"first_name\"]\n if check == False:\n return check\n return check",
"def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"",
"def find_substring_occurrences_in_list(my_string:str, my_list:List) -> List:\n return [s for s in my_list if my_string in s]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
andExpr = relationalExpr { "and" relationalExpr } | def andExpr( ): #DOUBLE CHECK THIS
tok = tokens.peek( )
if debug: print("andExpr: ", tok)
left = relationalExpr( ) #does the left side of the grammar
tok = tokens.peek( )
while tok == "and": #checks to see if there is the token "and" and will preform what is inside the curly bracket since it is a series
tokens.next()
right = relationalExpr( )
left = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING
tok = tokens.peek( )
return left | [
"def and_expression(cls, tree):\n if len(tree.children) == 1:\n assert tree.child(0).data == 'cmp_expression'\n return cls.cmp_expression(tree.child(0))\n\n assert tree.child(1).type == 'AND'\n op = tree.child(1)\n return cls.build_binary_expression(\n tree, op,\n cls.and_expression(tree.child(0)),\n cls.cmp_expression(tree.child(2)))",
"def q_and(self, r):\n return self.andOr(r, Query.QAnd)",
"def __and__(self, query):\r\n return And([self, query]).normalize()",
"def AND(logical_expression, *logical_expressions):\n return all((logical_expression,) + logical_expressions)",
"def and_func(p, q):\n return p and q",
"def And(*xs, simplify=True):\n xs = [Expression.box(x).node for x in xs]\n y = exprnode.and_(*xs)\n if simplify:\n y = y.simplify()\n return _expr(y)",
"def AND(*args) -> str:\n return \"AND({})\".format(\",\".join(args))",
"def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred",
"def _AND(self, left, right):\n return '(%s AND %s)' % (self.render(left), self.render(right, left))",
"def _make_and_expr(self, check1, _and, check2):\n\n return [('and_expr', AndCheck([check1, check2]))]",
"def _extend_and_expr(self, and_expr, _and, check):\n\n return [('and_expr', and_expr.add_check(check))]",
"def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded",
"def logical_and(x, y, name=None):\n result = _op_def_lib.apply_op(\"LogicalAnd\", x=x, y=y, name=name)\n return result",
"def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp",
"def Nand(*args):\n return Not(And(*args))",
"def and_expr(self, size, a, b, flags = None):\n\t\treturn self.expr(core.LLIL_AND, a.index, b.index, size = size, flags = flags)",
"def handle_and(operands):\n def rid_not(ns):\n res = []\n for n in ns:\n res.append(n[1])\n return res\n\n classified_operands = classify_operands(operands)\n res = handle_and_word(classified_operands['words'])\n lists = classified_operands['lists']\n lists.append(res)\n res = handle_and_list(lists)\n res = handle_and_not_words(res, rid_not(classified_operands['nwords']))\n res = handle_and_not_lists(res, rid_not(classified_operands['nlists']))\n return res",
"def _and(cls, arg1, arg2):\n return arg1 and arg2",
"def distribute_AND_over_OR(expr):\n if not isinstance(expr, (AND, OR)):\n tmp = set()\n tmp.add(frozenset((expr,)))\n return CNF(tmp)\n\n if isinstance(expr, OR):\n return CNF.all_or(*[distribute_AND_over_OR(arg)\n for arg in expr._args])\n\n if isinstance(expr, AND):\n return CNF.all_and(*[distribute_AND_over_OR(arg)\n for arg in expr._args])"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
relationalExpr = addExpr [ relation addExpr ] | def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS
tok = tokens.peek( )
if debug: print("relationalExpr: ", tok)
left = addExpr( )
expr = ""
tok = tokens.peek( )
if tok in relations:
rel = relation( ) # expecting a relation to start off
right = expression( ) # if there is a relation we expect there to be an expression to the right of the relation
expr = BinaryExpr( rel, left, right )
return expr #fix this for syntax tree maybe
return left | [
"def relation( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"relation: \", tok)\n left = addExpr( )\n tok = tokens.peek( )\n while str(tok) in tokens.relational:\n op = tok\n tokens.next()\n \n right = relation()\n left = BinaryExpr(op, left, right)\n tok = tokens.peek()\n return left",
"def add_relation(self, item1, item2):\n\t\titem1.adjacent.add(item2)",
"def add_rel_exp(memo, mexpr, props):\n\n # 'outs' is the set of columns provided by this relational expression.\n assert 'outs' in props\n\n # 'cols' is the list of column indexes that defines in which order\n # the output columns are presented in each result row.\n assert 'cols' in props\n\n # 'labels' is the list of column labels.\n assert 'labels' in props\n\n # For relational expressions, neededcols is the set of free\n # variables in the expression (correlation dependencies). This is\n # defined as the union of all columns needed by scalar\n # sub-expressions, minus (set difference) all columns provided by\n # the relational expression.\n assert 'neededcols' in props\n\n return memo.newcls(mexpr, props)",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def addExpr( ):\n tok = tokens.peek( )\n if syntaxDebug: print (\"addExpr: \", tok)\n \n left = term( )\n tok = tokens.peek( )\n while tok == \"+\" or tok == \"-\":\n op = tok\n tokens.next()\n \n right = addExpr( )\n left = BinaryExpr(op, left, right)\n tok = tokens.peek( )\n return left",
"def polyrelsimp(expr):\n return expr.replace(lambda rel: isinstance(rel, Rel),\n lambda rel: expand_polyeq(rel))",
"def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)",
"def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)",
"def append_relation(root, utype, global_id1, global_id2):\n unit_id, date = mk_id(author=_AUTHOR)\n\n id1 = global_id1.split('_')\n id2 = global_id2.split('_')\n\n subdoc1 = id1[1]\n subdoc2 = id2[1]\n\n if subdoc1 == subdoc2:\n local_id1 = '_'.join([id1[-2], id1[-1]])\n local_id2 = '_'.join([id2[-2], id2[-1]])\n\n metadata = [('author', _AUTHOR),\n ('creation-date', str(date)),\n ('lastModifier', 'n/a'),\n ('lastModificationDate', '0')]\n elm_relation = ET.SubElement(root, 'relation', {'id': unit_id})\n elm_metadata = ET.SubElement(elm_relation, 'metadata')\n for key, val in metadata:\n ET.SubElement(elm_metadata, key).text = val\n elm_charact = ET.SubElement(elm_relation, 'characterisation')\n ET.SubElement(elm_charact, 'type').text = utype\n\n elm_features = ET.SubElement(elm_charact, 'featureSet')\n comments = ET.SubElement(elm_features, 'feature',\n {'name': 'Comments'})\n comments.text = 'Please write in remarks...'\n argument_scope = ET.SubElement(elm_features, 'feature',\n {'name': 'Argument_scope'})\n argument_scope.text = 'Please choose...'\n\n positioning = ET.SubElement(elm_relation, 'positioning')\n edu1 = ET.SubElement(positioning, 'term', {'id': local_id1})\n edu2 = ET.SubElement(positioning, 'term', {'id': local_id2})\n\n return []\n\n else:\n err1 = \"Implicit relation from subdoc %s to subdoc %s :\" % (\n subdoc1, subdoc2)\n print(err1)\n err2 = \"%s ------ %s -----> %s\" % (global_id1, utype, global_id2)\n print(err2)\n return [err1, err2]",
"def add_relation(self, qid, relation, qid2):\n if self._kg_symbols is not None:\n self._kg_symbols.add_relation(qid, relation, qid2)",
"def _setRelation(self, node):\n if getattr(self, \"relation\", None):\n element = etree.SubElement(node, 'relation')\n element.text = getattr(self, \"relation\")",
"def plus(self, left):\n self.match('PLUS')\n right = self.expression()\n # return {'type': 'PLUS', 'left': left, 'right': right}\n return production.AddExpr(left, right)",
"def insert(self, rel):\r\n self.__ior__(rel)",
"def _follow_relation_set(self, rel_expr,\n inverted):\n if not self.context.is_group(rel_expr.type_name):\n raise RelationNameError(rel_expr.type_name,\n 'Expression type is not a relation group.')\n g = self.context.get_group(rel_expr.type_name)\n if inverted == +1:\n with tf.name_scope('follow_group_%s' % rel_expr.type_name):\n return (self.follow(g.subject_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.object_rel)\n else:\n with tf.name_scope('follow_group_%s_inverse' % rel_expr.type_name):\n return (self.follow(g.object_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.subject_rel)",
"def __push_relation(self, id1, id2, id1_name, id2_name, table):\n # case: No entry about relation is in DB yet\n if not self.__postgre_db.is_in_table(table, id1_name + \"=\" + str(\n id1)):\n self.__postgre_db.insert(table, {\n id1_name: id1, id2_name: [id2], \"aggregation\": 0})\n\n # case: Entry about single_pattern is in DB\n else:\n old_list = self.__postgre_db.get(table, id1_name + \"=\" + str(\n id1), id2_name)\n new_list = list(set(old_list + [id2]))\n self.__postgre_db.update(\n table, id2_name + \"=\" + add_quotes(replace_brackets(str(new_list))), id1_name + \"=\" + str(id1))",
"def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)",
"def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)",
"def augment_relation(argComponents, rels, stances):\n firstMC = None\n for i in range(len(argComponents)):\n if argComponents[i].label == \"MajorClaim\":\n if firstMC == None:\n firstMC = i\n else:\n augRel = Relation()\n augRel.rel_name = \"augmentation\" # augmentation\n augRel.source = argComponents[i].code\n augRel.target = argComponents[firstMC].code\n rels.append(augRel)\n\n # connect claim to firstMC, this is done with another loop since MajorClaim possible not located in the beginning \n for i in range(len(argComponents)):\n if argComponents[i].label == \"Claim\":\n augRel = Relation()\n augRel.rel_name = get_stance(stances, argComponents[i].code)\n if augRel.rel_name == \"for\":\n augRel.rel_name = \"supports\"\n elif augRel.rel_name == \"against\":\n augRel.rel_name = \"attacks\"\n augRel.source = argComponents[i].code\n augRel.target = argComponents[firstMC].code\n rels.append(augRel)\n \n # change the relation naming to suit our scheme\n for i in range(len(rels)):\n if rels[i].rel_name == \"supports\":\n rels[i].rel_name = 'sup'\n elif rels[i].rel_name == \"attacks\":\n rels[i].rel_name = \"att\"\n elif rels[i].rel_name == \"augmentation\":\n rels[i].rel_name = \"=\" # restatement",
"def add_relations(self, relations):\n for relation in relations:\n self.add_relation(relation)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
factor = number | '(' expression ')' | def factor( ):
tok = tokens.peek( )
if debug: print ("Factor: ", tok)
if re.match( Lexer.number, tok ):
expr = Number(tok)
tokens.next( )
tok = tokens.peek( )
return expr
if tok == "(":
tokens.next( ) # or match( tok )
expr = addExpr( )#might need to change to expression( )
tokens.peek( )
tok = match( ")" )
return expr
if re.match( Lexer.identifier, tok ): # added this to take into accout identifiers
expr = VarRef(tok)
tokens.next( )
return expr
if re.match( Lexer.String, tok ): # added this to take into account strings
expr = String( tok )
return expr
error( "Invalid operand" )
return | [
"def factor( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"Factor: \", tok)\n if tok == \"(\":\n tokens.next()\n expr = addExpr()\n tokens.next()\n return expr\n if re.match(tokens.number, tokens.peek()):\n expr = Number(tok)\n tokens.next( )\n return expr\n elif re.match(tokens.string, tokens.peek()):\n expr = String(tok)\n tokens.next()\n return expr\n elif not tokens.check_key(tokens.peek()):\n expr = Variable(tok)\n tokens.next( )\n return expr\n \n error(\"Invalid operand\")\n return",
"def eval_tree_factor(iterator):\r\n\tans = next(iterator)\r\n\tif ans == '(':\r\n\r\n\t\ta = eval_tree_sum(iterator)\r\n\t\tnext(iterator)\r\n\t\treturn a\r\n\r\n\r\n\telse:\r\n\t\tif ans.isdigit():\r\n\t\t\tif (ans == \"-\"):\r\n\t\t\t\tans = 0 - int(next(iterator))\r\n\t\t\treturn Value(str(ans))\r\n\r\n\t\telse:\r\n\t\t\tif(peek(iterator)=='('):\r\n\t\t\t\tfunc = ans\r\n\t\t\t\tnext(iterator)\r\n\t\t\t\tparms = [eval_infix_iter(iterator)]\r\n\t\t\t\tx = peek(iterator)\r\n\t\t\t\twhile peek(iterator) != ')' and peek(iterator) != \";\" and peek(iterator) != \",\":\r\n\t\t\t\t\tparms.append(eval_infix_iter((iterator)))\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\treturn Func(func,parms)\r\n\r\n\t\t\telse:\r\n\t\t\t\treturn Var(ans)",
"def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'",
"def parse_factor(self):\n l_expression = self.parse_unary()\n\n while self.check_type(TokenType.DIVIDE) or self.check_type(TokenType.MULTIPLY):\n token = self.consume()\n op = to_operator_type[token.value]\n r_expression = self.parse_unary()\n l_expression = BinaryOperator(l_expression, op, r_expression, token.line, token.column)\n return l_expression",
"def _parse_factor(self) -> int:\n token = self._lexer.get_next_token()\n return int(token.value)",
"def parseFactors(cmds):\n print(\"Factor\")\n if cmds[0] == 'D':\n parseExpr(cmds[2:len(cmds)-1])\n elif cmds[0] == '(':\n parseExpr(cmds[1:len(cmds)-1])\n else:\n parseNumber(cmds)",
"def multiply(expression):\n value = 1\n for n in expression.split('*'):\n value *= int(n)\n return value",
"def is_factor(num,composite):\n if insist_number([num,composite]):\n if composite % num == 0:\n return True\n return False",
"def exp(x):\n if isinstance(x, int):\n x = Expression(x)\n return _exp(x)",
"def multiple_of(factor):\n\n class multiple_of(int):\n \"\"\"Int type in [A; B] range.\"\"\"\n\n def __init__(self, k):\n assert int(k) % factor == 0, (k, factor)\n super(multiple_of, self).__init__()\n\n return multiple_of",
"def factorial(x):\n return math.factorial(x)",
"def get_factorial():\n\n number = int(input(\"give the number...\"))\n\n for i in range(1, number):\n number *= i\n\n print(\"factorial: {}\".format(number))",
"def multiply(item: int, factor: int = 2):\n return item * factor",
"def f(n):\n return sum(math.factorial(int(ch)) for ch in str(n))",
"def __mul__(self, factor):\n def mul(output, target, params):\n return self(output, target, params) * factor\n return type(self)(type(self).__reserved_init, mul, factor * (1. if self._fact is None else self._fact), self._name)",
"async def calculate_score(expression, score_factor):\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor",
"def factorielle(n):\n result = n\n while n > 1:\n n -= 1\n result *= n\n return result",
"def factorial(number):\n result = 1\n while number:\n result *= number\n number -= 1\n return result",
"def get10Factor(num):\n\tp = 0\n\tfor i in range(-20, 20):\n\t\tif num == num % 10**i:\n\t\t\tp = -(i - 1)\n\t\t\tbreak\n\treturn p"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
term = factor { ('' | '/') factor } | def term( ):
tok = tokens.peek( )
if debug: print ("Term: ", tok)
left = factor( )
tok = tokens.peek( )
while tok == "*" or tok == "/":
tokens.next()
right = factor( )
left = BinaryExpr( tok, left, right )
tok = tokens.peek( )
return left | [
"def term( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"Term: \", tok)\n left = factor( )\n tok = tokens.peek( )\n while tok == \"*\" or tok == \"/\":\n op = tok\n tokens.next()\n \n right = term( )\n left = BinaryExpr(op, left, right)\n tok = tokens.peek( )\n return left",
"def factor( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"Factor: \", tok)\n\tif re.match( Lexer.number, tok ):\n\t\texpr = Number(tok)\n\t\ttokens.next( )\n\t\ttok = tokens.peek( )\n\t\treturn expr\n\tif tok == \"(\":\n\t\ttokens.next( ) # or match( tok )\n\t\texpr = addExpr( )#might need to change to expression( )\n\t\ttokens.peek( )\n\t\ttok = match( \")\" )\n\t\treturn expr\n\tif re.match( Lexer.identifier, tok ): # added this to take into accout identifiers\n\t\texpr = VarRef(tok)\n\t\ttokens.next( )\n\t\treturn expr\n\tif re.match( Lexer.String, tok ): # added this to take into account strings\n\t\texpr = String( tok )\n\t\treturn expr\n\n\terror( \"Invalid operand\" )\n\treturn",
"def parse_like_term(term):\n case_insensitive = term.startswith('*')\n if case_insensitive:\n term = term[1:]\n # apply operators\n if term.startswith('^'):\n oper = 'startswith'\n term = term[1:]\n elif term.startswith('='):\n oper = 'exact'\n term = term[1:]\n else:\n oper = 'contains'\n # add case insensitive flag\n if case_insensitive:\n oper = 'i' + oper\n return oper, term",
"def factor( ):\n \n tok = tokens.peek( )\n if syntaxDebug: print (\"Factor: \", tok)\n if tok == \"(\":\n tokens.next()\n expr = addExpr()\n tokens.next()\n return expr\n if re.match(tokens.number, tokens.peek()):\n expr = Number(tok)\n tokens.next( )\n return expr\n elif re.match(tokens.string, tokens.peek()):\n expr = String(tok)\n tokens.next()\n return expr\n elif not tokens.check_key(tokens.peek()):\n expr = Variable(tok)\n tokens.next( )\n return expr\n \n error(\"Invalid operand\")\n return",
"def parse_factor(self):\n l_expression = self.parse_unary()\n\n while self.check_type(TokenType.DIVIDE) or self.check_type(TokenType.MULTIPLY):\n token = self.consume()\n op = to_operator_type[token.value]\n r_expression = self.parse_unary()\n l_expression = BinaryOperator(l_expression, op, r_expression, token.line, token.column)\n return l_expression",
"def from_term(term):\n if term is None:\n return term\n elif isinstance(term, (six.string_types, int, float)):\n return term\n elif isinstance(term, dict):\n return {k: from_term(v) for k, v in term.items()}\n elif isinstance(term, list):\n return [from_term(t) for i, t in enumerate(term)]\n elif issubclass(term.__class__, (Matcher,)):\n return term.generate()\n else:\n raise ValueError('Unknown type: %s' % type(term))",
"def parseTerms(cmds):\n if len(cmds) != 0:\n print(\"Term\")\n delimit = \"[*|/|%]+\"\n factors = re.split(delimit, cmds[0])\n parseFactors(cmds[0])\n parseTerms(cmds[1:])",
"def parse_term(self):\n l_expression = self.parse_factor()\n\n while self.check_type(TokenType.PLUS) or self.check_type(TokenType.MINUS):\n token = self.consume()\n op = to_operator_type[token.value]\n r_expression = self.parse_factor()\n l_expression = BinaryOperator(l_expression, op, r_expression, token.line, token.column)\n return l_expression",
"def term(self):\r\n result = self.exponential()\r\n while self.current_token is not None and self.current_token.type in (TokenType.MUL, TokenType.DIVIDE):\r\n if self.current_token.type is TokenType.MUL:\r\n self.advance()\r\n result = MulNode(result, self.exponential())\r\n elif self.current_token.type is TokenType.DIVIDE:\r\n self.advance()\r\n result = DivideNode(result, self.number())\r\n\r\n return result",
"def parse_term(self) -> SyntaxNode:\n return self._parse_cat_binary(\"M\", self.parse_value)",
"def eval_tree_factor(iterator):\r\n\tans = next(iterator)\r\n\tif ans == '(':\r\n\r\n\t\ta = eval_tree_sum(iterator)\r\n\t\tnext(iterator)\r\n\t\treturn a\r\n\r\n\r\n\telse:\r\n\t\tif ans.isdigit():\r\n\t\t\tif (ans == \"-\"):\r\n\t\t\t\tans = 0 - int(next(iterator))\r\n\t\t\treturn Value(str(ans))\r\n\r\n\t\telse:\r\n\t\t\tif(peek(iterator)=='('):\r\n\t\t\t\tfunc = ans\r\n\t\t\t\tnext(iterator)\r\n\t\t\t\tparms = [eval_infix_iter(iterator)]\r\n\t\t\t\tx = peek(iterator)\r\n\t\t\t\twhile peek(iterator) != ')' and peek(iterator) != \";\" and peek(iterator) != \",\":\r\n\t\t\t\t\tparms.append(eval_infix_iter((iterator)))\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\treturn Func(func,parms)\r\n\r\n\t\t\telse:\r\n\t\t\t\treturn Var(ans)",
"def compile_term(self):\n self.__tag(\"term\")\n self.__ind_level += 1\n\n val = self.__token_stream.token_type()\n trailing_tok = False\n if val == TokenType.INT_CONST:\n # integerConstant\n self.__w_int()\n elif val == TokenType.STRING_CONST:\n # stringConstant\n self.__w_str()\n elif val == TokenType.KEYWORD:\n # keywordConstant\n keyword_constants = [KeyWordType.TRUE,\n KeyWordType.FALSE,\n KeyWordType.NULL,\n KeyWordType.THIS]\n kw = self.__token_stream.key_word()\n if kw in keyword_constants:\n self.__w_keyword(kw)\n else:\n raise Exception(f\"Invalid keyword '{kw}' in 'term'\")\n elif val == TokenType.SYMBOL:\n unary_ops = [\"-\", \"~\"]\n sym = self.__token_stream.symbol()\n if sym in unary_ops:\n # unaryOp term\n self.__w_symbol(sym)\n self.__token_stream.advance()\n trailing_tok = self.compile_term()\n elif sym == \"(\":\n # '(' expression ')'\n self.__w_symbol(sym)\n self.__token_stream.advance()\n if not self.compile_expression():\n self.__token_stream.advance()\n self.__w_symbol(\")\")\n else:\n raise Exception(f\"Invalid symbol '{sym}' in 'term'\")\n elif val == TokenType.IDENTIFIER:\n # varName | varName '[' expression ']' | subroutineCall\n self.__w_identifier()\n self.__token_stream.advance()\n val = self.__token_stream.token_type()\n if val == TokenType.SYMBOL and self.__w_symbol(\"[\"):\n self.__token_stream.advance()\n if not self.compile_expression():\n self.__token_stream.advance()\n self.__w_symbol(\"]\")\n elif val == TokenType.SYMBOL and self.__w_symbol(\"(\"):\n # subroutineCall case 1\n self.__token_stream.advance()\n self.compile_expression_list()\n self.__token_stream.advance()\n self.__w_symbol(\")\")\n elif val == TokenType.SYMBOL and self.__w_symbol(\".\"):\n # subroutineCall case 2\n self.__token_stream.advance()\n self.__w_identifier()\n self.__token_stream.advance()\n self.__w_symbol(\"(\")\n self.__token_stream.advance()\n self.compile_expression_list()\n self.__w_symbol(\")\")\n else:\n trailing_tok = True\n else:\n raise Exception(f\"Invalid starting token '{val}' for 'term'\")\n\n self.__ind_level -= 1\n self.__tag(\"term\", end=True)\n return trailing_tok",
"def __process_term(self, a_term):\n if '\\\\' in a_term:\n # \\alpha style\n\n # forcing 1 as a coefficient\n if a_term.startswith('\\\\'):\n a_term = '1' + a_term\n\n coef, term = a_term.split('\\\\')\n\n # making sure that a_term is legal\n if term not in greek_names:\n raise Exception('wrong input: ' + term)\n\n # converting a_term to its integer alternative\n # 0 for alpha, 1 for beta, etc\n term = greek_names.index(term)\n\n # confirming that user does not provide too many variables\n if term >= self.__num_vars:\n raise Exception('Too many variables provided!')\n\n # print('\\t\\t', a_term, coef, term)\n\n return coef, term\n else:\n # α and a style\n # a, α, 2a, 10a, 2, 10, 100/3, 10.5, 100/3a, 10.5a\n\n # forcing 1 as a coefficient\n if len(a_term) == 1 and a_term.isalpha():\n a_term = '1' + a_term\n\n if a_term[-1].isalpha():\n coef, term = a_term[:-1], a_term[-1]\n\n # making sure that a_term is legal\n if term not in greek_symbols + latin_symbols:\n raise Exception('wrong input:', term)\n\n # converting a_term to its integer alternative\n # 0 for alpha, 1 for beta, etc\n if term in greek_symbols:\n term = greek_symbols.index(term)\n elif term in latin_symbols:\n term = latin_symbols.index(term)\n\n # confirming that user does not provide too many variables\n if term >= self.__num_vars:\n raise Exception('Too many variables provided!')\n else:\n coef, term = a_term, self.__num_vars - 1\n\n # print('\\t\\t a_term = {}, coef = {}, term = {}'.format(a_term, coef, term))\n\n return coef, term",
"def __resolve_term_expression(expression):\n\n left_term = None\n operator = None\n original_expression = expression\n\n while True:\n if len(expression) == 0:\n return (left_term, original_expression)\n elif expression[0] == \"(\":\n if left_term is None:\n left_term, termexpression = __resolve_term_expression(__extract_term(expression))\n expression = expression[len(termexpression) + 2:]\n continue\n else:\n right_term, termexpression = __resolve_term_expression(__extract_term(expression))\n expression = expression[len(termexpression) + 2:]\n operator.set_left_term(left_term)\n operator.set_right_term(right_term)\n left_term = operator\n operator = None\n continue\n elif expression[0] == \"\\\\\":\n operator = ComplementOperator()\n expression = expression[1:]\n continue\n elif expression[0] == \"&\":\n operator = IntersectOperator()\n expression = expression[1:]\n continue\n elif expression[0] == \"|\":\n operator = UnionOperator()\n expression = expression[1:]\n continue\n else:\n if (left_term is None):\n left_term, primitiveexpression = __resolve_primitive_expression(expression)\n expression = expression[len(primitiveexpression):]\n continue\n else:\n right_term, primitiveexpression = __resolve_primitive_expression(expression)\n expression = expression[len(primitiveexpression):]\n operator.set_left_term(left_term)\n operator.set_right_term(right_term)\n left_term = operator\n operator = None\n continue",
"def category(category, term):",
"def apply_on_each_term(query: str, function: Callable) -> str:\n\n is_inside_a_term = False\n search_term = ''\n final_query = ''\n for character in query:\n\n if character == '[':\n search_term += character\n is_inside_a_term = True\n continue\n\n if is_inside_a_term:\n search_term += character\n if character == ']':\n search_term = function(search_term)\n final_query += search_term\n search_term = ''\n is_inside_a_term = False\n else:\n final_query += character\n\n return final_query",
"def conjterm(term, mode=\"amp\"):\n f = {\"amp\": 1, \"phs\": -1, \"real\": 1, \"imag\": 1j}[\n mode\n ] # if KeyError, mode was invalid\n terms = [[f, t[:-1]] if t.endswith(\"_\") else [t] for t in term]\n return reduce(lambda x, y: x + y, terms)",
"def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)",
"def end_term(query):\n if query.endswith(' '):\n return query[query[:-1].rfind(' ')+1:]\n else:\n return query[query.rfind(' ')+1:]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
addExpr = term { ('+' | '') term } | def addExpr( ):
tok = tokens.peek( )
if debug: print ("addExpr: ", tok)
left = term( )
tok = tokens.peek( )
while tok == "+" or tok == "-":
tokens.next()
right = term( )
left = BinaryExpr( tok, left, right )
tok = tokens.peek( )
return left | [
"def addExpr( ):\n tok = tokens.peek( )\n if syntaxDebug: print (\"addExpr: \", tok)\n \n left = term( )\n tok = tokens.peek( )\n while tok == \"+\" or tok == \"-\":\n op = tok\n tokens.next()\n \n right = addExpr( )\n left = BinaryExpr(op, left, right)\n tok = tokens.peek( )\n return left",
"def plus(self, left):\n self.match('PLUS')\n right = self.expression()\n # return {'type': 'PLUS', 'left': left, 'right': right}\n return production.AddExpr(left, right)",
"def addition(self, var_dict) -> Expr:\n left = self.term(var_dict)\n\n while self.curr_token[0][0] in { Lexer.PLUS.id, Lexer.MINUS.id }:\n operation = self.curr_token[1]\n self.next_token() # advance to the next token\n right = self.term(var_dict)\n left = BinaryExpr(left, right, operation)\n\n return left",
"def _AddBinaryOperator(self, string=None, **unused_kwargs):\n expression = expressions.BinaryExpression(operator=string)\n self._stack.append(expression)\n\n return None",
"def create_plus_operator_token(idx=-1, pos=-1):\n\n return Token(\"+\", TOKEN_TYPE_OPERATOR, _mexp_operators.OPERATOR_PLUS_ID, idx, pos)",
"def operator_addition(A, B):",
"def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)",
"def test_unaryPlus(self):\r\n self.flakes('+1')",
"def buildQuery(terms, operator=None):\n connective = ('+%s+' % operator) if operator else '+'\n terms_with_parens = [('(%s)' % t) if ('+' in t) else t\n for t in terms]\n return connective.join(terms_with_parens)",
"def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)",
"def add(*args):\n\n # Add the two operands.\n sum = int(args[0]) + int(args[1])\n\n return str(sum)",
"def _append_operator(self, operator):",
"def par(expr,gauche=True):\r\n return (\"\\\\left( \" if gauche else \"\\\\right) \") if (expr[0]==\"-\" or \"+\" in expr[1:] or \"-\" in expr[1:]) else \"\"",
"def visit_UnaryOp(self, node):\n if isinstance(node.op, ast.USub):\n expr = f'-{self.visit(node.operand)}'\n elif isinstance(node.op, ast.Not):\n expr = f'not {self.visit(node.operand)}'\n else:\n raise Exception(\"Unary operators except negation are not defined\")\n self.exprs = [expr]\n return expr",
"def plusify(fst):\n fst_pls = {}\n \n for i in range(1,len(fst)):\n cur = fst[0:i], \"+\", fst[i:len(fst)]\n expr = \"\".join([x for x in cur]) \n try:\n fst_pls[eval(expr)].append(expr)\n except:\n fst_pls[eval(expr)] = [expr]\n \n \n return fst_pls",
"def adp(lhs,rhs):\n test=lambda s: s[0]=='`'\n assert test(lhs)==True,'error: lhs should be non-terminal'\n lhs=so.getSymbol(lhs[1:],terminal=False,autocreate=True)\n rhs=[so.getSymbol(s[1:],False,True) if test(s) else so.getSymbol(s,True,True) for s in rhs]\n return addProduction(lhs,rhs)",
"def on_plus(self):",
"def eval_infix_sum(expr):\n\tans = eval_infix_product(expr)\n\toper = expr.peek()\n\twhile oper in '+-':\n\t\texpr.__next__()\n\t\tother = eval_infix_product(expr)\n\t\tif oper == '+':\n\t\t\tans += other\n\t\telif oper == '-':\n\t\t\tans -= other\n\t\toper = expr.peek()\n\treturn ans",
"def replace_spaces_with_plus(string):\n return string.replace(' ', '+')"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
whileStatement = "while" expression block | def parseWhileStatement( ): # parse rountine for while and uses the while class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "whileStatement: ", tok )
start = match( "while" )
expr = expression( )
blk = parseBlock( )
tok = tokens.peek( )
whileString = whileStatement( start, expr, blk )
return whileString | [
"def _parse_while_statement(self):\n self._match('TK_WHILE')\n self._parse_expression()\n self._match('TK_DO')\n self._parse_statement()",
"def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node",
"def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)",
"def convert_while(self, stmt):\n head = Branch(stmt.test)\n tail = Nop()\n body = self.convert_block(stmt.body) # TODO: add break and continue targets\n orelse = self.convert_block(stmt.orelse)\n\n link(head, body)\n link(head, orelse)\n link(body, head)\n link(orelse, tail)\n\n return Block(head, tail)",
"def compile_while(self) -> bool:\n self.__tag(\"whileStatement\")\n self.__ind_level += 1\n\n # 'while'\n self.__w_keyword(KeyWordType.WHILE)\n\n # '('\n self.__token_stream.advance()\n self.__w_symbol(\"(\")\n\n # expression\n self.__token_stream.advance()\n if not self.compile_expression():\n self.__token_stream.advance()\n\n # ')'\n self.__w_symbol(\")\")\n\n # '{'\n self.__token_stream.advance()\n self.__w_symbol(\"{\")\n\n # statements\n self.__token_stream.advance()\n if not self.compile_statements():\n self.__token_stream.advance()\n\n # '}'\n self.__w_symbol(\"}\")\n\n self.__ind_level -= 1\n self.__tag(\"whileStatement\", end=True)\n return False",
"def compile_while(self) -> None:\n self._consume('while')\n self._consume('(')\n\n while_lbl = f\"WHILE_{self._while_count}\"\n while_false_lbl = f\"WHILE_FALSE{self._while_count}\"\n self._while_count += 1\n self.writer.write_label(while_lbl)\n\n self.compile_expression()\n self._consume(')')\n\n self._consume('{')\n self.writer.write_if(while_false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(while_lbl)\n self.writer.write_label(while_false_lbl)\n\n self._consume('}')",
"def compileWhile(self):\n parent = self.current_node\n self.current_node = et.SubElement(self.current_node, 'whileStatement')\n self.writeNode() # Writes 'while'\n self.tokenizer.advance() # Advances to (\n self.writeNode() # Writes (\n self.tokenizer.advance() # Advances to the first token in an expression.\n self.compileExpression() # Ends at )\n self.writeNode() # Writes )\n self.tokenizer.advance() # Advances to {\n self.writeNode() # Writes {\n self.tokenizer.advance() # Advances to a statements block.\n self.compileStatements() # Ends at the } closing the while clause.\n self.writeNode() # Writes }.\n self.current_node = parent",
"def load_while(node, symast, symimp):\n assert node.tag == \"While\"\n xmlcond = node.find(\"./Condition/*\")\n xmlbody = node.find(\"./Body/*\")\n pycond = load_boolean_expression(xmlcond, symast)\n pybody = load_sub(xmlbody, symast, symimp)\n return ast.make_while(pycond, [pybody])",
"def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))",
"def test_41_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: While(IntLiteral(1),[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,441))",
"def _tf_while_stmt(cond, body, local_writes):\n\n # Non-v2 while_loop unpacks the results when there is only one return value.\n # This enforces consistency across versions.\n opts = {'return_same_structure': True}\n\n return tf.while_loop(cond, body, local_writes, **opts)",
"def compile_while(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'while' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.code_writer.write_label(lab1)\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.code_writer.write_if(lab2)\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab1)\r\n self.code_writer.write_label(lab2)",
"def test_do_while_stmt(self):\n input = \"\"\"void main() {\n do{\n print(\"statement 1\");\n }\n {\n print(\"statement 2\");\n }\n while (true);\n}\n\"\"\"\n expect = \"\"\"Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([Block([CallExpr(Id(print),[StringLiteral(statement 1)])]),Block([CallExpr(Id(print),[StringLiteral(statement 2)])])],BooleanLiteral(true))]))])\"\"\"\n self.assertTrue(TestAST.checkASTGen(input,expect,328))",
"def compile_while(self):\r\n start_label = \"WHILE_\" + str(self.__while_count)\r\n end_label = \"WHILE_END_\" + str(self.__while_count)\r\n self.__while_count += 1\r\n self.__advance(n=2) # Advance after the '(' token\r\n self.__vmwriter.write_label(start_label)\r\n self.compile_expression()\r\n self.__advance(n=2) # Advance after the '{' token\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(end_label)\r\n self.compile_statements()\r\n self.__advance() # Advance after the '}' token\r\n self.__vmwriter.write_goto(start_label)\r\n self.__vmwriter.write_label(end_label)",
"def test_do_while_stmt2(self):\n input = \"\"\"int main () {\n /* local variable definition */\n int a;\n a = 0;\n /* do loop execution */\n do {\n printf(\"value of a: \", a);\n a = a + 1;\n }while( a < 20 );\n return 0;\n}\n\"\"\"\n expect=\"\"\"Program([FuncDecl(Id(main),[],IntType,Block([VarDecl(Id(a),IntType),BinaryOp(=,Id(a),IntLiteral(0)),Dowhile([Block([CallExpr(Id(printf),[StringLiteral(value of a: ),Id(a)]),BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)))])],BinaryOp(<,Id(a),IntLiteral(20))),Return(IntLiteral(0))]))])\"\"\"\n self.assertTrue(TestAST.checkASTGen(input,expect,329))",
"def test_do_while_stmt(self):\n input = \"\"\"\n Function: main\n Parameter: num\n Body:\n Do\n num = num + 1;\n While num * 100 EndDo.\n EndBody.\n \"\"\"\n expect = str(TypeMismatchInStatement(\n Dowhile(([], [Assign(Id(\"num\"), BinaryOp(\"+\", Id(\"num\"), IntLiteral(1)))]),\n BinaryOp(\"*\", Id(\"num\"), IntLiteral(100)))))\n self.assertTrue(TestChecker.test(input, expect, 436))",
"def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]",
"def __compile_while(self, xml_tree):\n tk = self.__tokenizer\n # 'while'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # '('\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # expression\n self.__compile_expression(SubElement(xml_tree, \"expression\"))\n # ')'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # '{'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # statements\n self.__compile_statements(SubElement(xml_tree, \"statements\"))\n # '}'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()",
"def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ifStatement = "if" expression block [ "else" block ] | def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "ifStatement: ", tok )
start = match( "if" )
expr = expression( )
blk = parseBlock( )
elseblk = None
tok = tokens.peek( )
if tok == "else":
match( "else" )
elseblk = parseBlock( )
return ifStatement(expr, blk, elseblk) | [
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"",
"def __compile_if(self, xml_tree):\n tk = self.__tokenizer\n # 'if'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # '('\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # expression\n self.__compile_expression(SubElement(xml_tree, \"expression\"))\n # ')'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # '{'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # statements\n self.__compile_statements(SubElement(xml_tree, \"statements\"))\n # '}'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # ('else''{'statements'}')?\n if tk.get_token_type() == KEYWORD and tk.get_next_token() == ELSE:\n # 'else'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # '{'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()\n # statements\n self.__compile_statements(SubElement(xml_tree, \"statements\"))\n # '}'\n SubElement(xml_tree, tk.get_token_type()).text = tk.get_next_token()\n tk.advance()",
"def test_conditional_instruction(self):\n LEXER.input('if x:')\n self.checks_tokens(['IF', 'ID', 'COL'])\n LEXER.input('else:')\n self.checks_tokens(['ELSE', 'COL'])",
"def IF(logical_statement, expression_true, expression_false):\n if(type(logical_statement) == bool):\n if(logical_statement == True):\n return(expression_true)\n else:\n return(expression_false)\n else:\n print('Invalid type: logical statement does not evaluate to True or False.')",
"def else_if_statement(outfile: TextIO, condition: str, indent: int=0):\n write_indent(outfile, indent)\n outfile.write(\"}\\n\")\n write_indent(outfile, indent)\n outfile.write(\"else \")\n if_statement(outfile, condition, indent)",
"def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"",
"def identify_ifelse_block():\n pass",
"def conditional(self) -> global___Statement.Conditional:",
"def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)",
"def if_statement(outfile: TextIO, condition: str, indent: int=0):\n write_indent(outfile, indent)\n outfile.write(\"if(\" + str(condition) + \") {\\n\")",
"def test_nested_if_str(dummy_code_block, another_dummy_code_block):\n else_ = Else(\n code_block=CodeBlock(\n instructions=[If(method=return_true, code_block=another_dummy_code_block,)]\n )\n )\n elif_ = IfElifElse(method=return_false, code_block=dummy_code_block, else_=else_,)\n assert (\n str(elif_)\n == dedent(\n \"\"\"\n if return_false():\n return_true()\n return_true()\n return_true()\n else:\n if return_true():\n return_true()\n return_true()\n return_true()\n \"\"\"\n ).strip()\n )",
"def convert_if(self, stmt):\n head = Branch(stmt.test)\n tail = Nop()\n\n true_branch = self.convert_block(stmt.body)\n link(head, true_branch)\n link(true_branch, tail)\n\n if len(stmt.orelse) > 0:\n false_branch = self.convert_block(stmt.orelse)\n link(head, false_branch)\n link(false_branch, tail)\n else:\n link(head, tail)\n\n return Block(head, tail)",
"def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result",
"def test_parse_will_build_an_if_else_AST(self):\n lexer = LexerStateMachine(' if ( 2 == 3 ) { }\\\n else { } ', self.context)\n parser = Parser(lexer, self.manager)\n self.manager.setParser(parser)\n\n token = parser.parse(0)\n\n\n self.assertEqual('if', token.id)\n self.assertEqual('(', token.data[0].id)\n self.assertEqual('==', token.data[0].data[0].id)\n self.assertEqual(2, token.data[0].data[0].data[0].data[0])\n self.assertEqual(3, token.data[0].data[0].data[1].data[0])\n self.assertEqual('{', token.data[1][0].id)\n self.assertEqual('else', token.data[2].id)\n self.assertEqual('{', token.data[2].data[0][0].id)",
"def with_if_statement():\n if cond():\n return true_func()\n else:\n return false_func()",
"def test_if_else_str(dummy_code_block):\n else_if = make_dummy_if_else(call_if=False)\n assert (\n str(else_if)\n == dedent(\n \"\"\"\n if return_false():\n return_true()\n return_true()\n return_true()\n else:\n return_true()\n return_true()\n return_true()\n \"\"\"\n ).strip()\n )",
"def compileIf(self):\n parent = self.current_node\n self.current_node = et.SubElement(self.current_node, 'ifStatement')\n self.writeNode() # Writes 'if'.\n self.tokenizer.advance() # Advances to (\n self.writeNode() # Writes (\n self.tokenizer.advance() # Advances to expression\n self.compileExpression() # Finishes at )\n self.writeNode() # Writes )\n self.tokenizer.advance() # Advances to {\n self.writeNode() # Writes {\n self.tokenizer.advance() # Advances to statements block.\n self.compileStatements() # When this finishes the current token is }\n self.writeNode() # Writes }\n self.tokenizer.advance() # Advances to 'else' or to another statement or to } ending the statements block.\n if self.tokenizer.tokenVal == 'else':\n self.writeNode() # Writes 'else'\n self.tokenizer.advance() # Advances to {\n self.writeNode() # Writes {\n self.tokenizer.advance() # Advances to a statements block\n self.compileStatements() # Ends at }\n self.writeNode() # Writes }\n self.tokenizer.advance() # Advances to a new statement or to the } after the statements block.\n self.current_node = parent",
"def exec_if(self, stmt: IfStmt):\n self.enter_block(stmt)\n\n condition = bool(self.evaluator.eval_node(stmt.condition))\n self.if_status = (len(self.stack) - 1, condition)\n\n if not condition:\n self.exit_block()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
assign = ident "=" expression eoln | def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string
tok = tokens.peek( )
if debug: print( "assign: ", tok )
if re.match( Lexer.identifier, tok ):
ident = VarRef( tok )
else:
error( "Invalid identifier" )
tok = tokens.next( )
equals = match( "=" )
tok = tokens.peek( )
expr = expression( )
match( ";" )
equals = VarRef( equals )
statement = assign( equals, ident, expr )
return statement | [
"def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(stmt[2:])\n return ast.Assign(targets=[identifier], value=value)",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))",
"def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node",
"def exec_assign(self, stmt: AssignStmt):\n value = None if stmt.value is None else self.evaluator.eval_node(stmt.value)\n self.assign(stmt.variable, value)",
"def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts",
"def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def assign(lvalue, rvalue):\n return AssignOp(lvalue, rvalue)",
"def visit_Assign(self, node):\r\n self.visit(node.node)\r\n self.visit(node.target)",
"def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def assignment_node():\n return RedBaron('a = 1')[0]",
"def parse_set(self):\r\n lineno = self.stream.next().lineno\r\n target = self.parse_assign_target()\r\n self.stream.expect('assign')\r\n expr = self.parse_tuple()\r\n return nodes.Assign(target, expr, lineno=lineno)",
"def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment",
"def _expr_assignment(traverser, node):\n\n traverser._debug('ASSIGNMENT_EXPRESSION')\n traverser.debug_level += 1\n\n traverser._debug('ASSIGNMENT>>PARSING RIGHT')\n right = traverser._traverse_node(node['right'])\n right = JSWrapper(right, traverser=traverser)\n\n # Treat direct assignment different than augmented assignment.\n if node['operator'] == '=':\n from predefinedentities import GLOBAL_ENTITIES, is_shared_scope\n\n global_overwrite = False\n readonly_value = is_shared_scope(traverser)\n\n node_left = node['left']\n traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])\n\n if node_left['type'] == 'Identifier':\n # Identifiers just need the ID name and a value to push.\n # Raise a global overwrite issue if the identifier is global.\n global_overwrite = traverser._is_global(node_left['name'])\n\n # Get the readonly attribute and store its value if is_global\n if global_overwrite:\n global_dict = GLOBAL_ENTITIES[node_left['name']]\n if 'readonly' in global_dict:\n readonly_value = global_dict['readonly']\n\n traverser._declare_variable(node_left['name'], right, type_='glob')\n elif node_left['type'] == 'MemberExpression':\n member_object = trace_member(traverser, node_left['object'],\n instantiate=True)\n global_overwrite = (member_object.is_global and\n not ('overwritable' in member_object.value and\n member_object.value['overwritable']))\n member_property = _get_member_exp_property(traverser, node_left)\n traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'\n % member_property)\n traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)\n\n # Don't do the assignment if we're facing a global.\n if not member_object.is_global:\n if member_object.value is None:\n member_object.value = JSObject()\n\n if not member_object.is_global:\n member_object.value.set(member_property, right, traverser)\n else:\n # It's probably better to do nothing.\n pass\n\n elif 'value' in member_object.value:\n member_object_value = _expand_globals(traverser,\n member_object).value\n if member_property in member_object_value['value']:\n\n # If it's a global and the actual member exists, test\n # whether it can be safely overwritten.\n member = member_object_value['value'][member_property]\n if 'readonly' in member:\n global_overwrite = True\n readonly_value = member['readonly']\n\n traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %\n global_overwrite)\n traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %\n readonly_value)\n\n if callable(readonly_value):\n readonly_value = readonly_value(traverser, right, node['right'])\n\n if readonly_value and global_overwrite:\n\n kwargs = dict(\n err_id=('testcases_javascript_actions',\n '_expr_assignment',\n 'global_overwrite'),\n warning='Global variable overwrite',\n description='An attempt was made to overwrite a global '\n 'variable in some JavaScript code.')\n\n if isinstance(readonly_value, DESCRIPTION_TYPES):\n kwargs['description'] = readonly_value\n elif isinstance(readonly_value, dict):\n kwargs.update(readonly_value)\n\n traverser.warning(**kwargs)\n\n return right\n\n lit_right = right.get_literal_value()\n\n traverser._debug('ASSIGNMENT>>PARSING LEFT')\n left = traverser._traverse_node(node['left'])\n traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')\n traverser.debug_level -= 1\n\n if isinstance(left, JSWrapper):\n if left.dirty:\n return left\n\n lit_left = left.get_literal_value()\n token = node['operator']\n\n # Don't perform an operation on None. Python freaks out\n if lit_left is None:\n lit_left = 0\n if lit_right is None:\n lit_right = 0\n\n # Give them default values so we have them in scope.\n gleft, gright = 0, 0\n\n # All of the assignment operators\n operators = {'=': lambda: right,\n '+=': lambda: lit_left + lit_right,\n '-=': lambda: gleft - gright,\n '*=': lambda: gleft * gright,\n '/=': lambda: 0 if gright == 0 else (gleft / gright),\n '%=': lambda: 0 if gright == 0 else (gleft % gright),\n '<<=': lambda: int(gleft) << int(gright),\n '>>=': lambda: int(gleft) >> int(gright),\n '>>>=': lambda: float(abs(int(gleft)) >> gright),\n '|=': lambda: int(gleft) | int(gright),\n '^=': lambda: int(gleft) ^ int(gright),\n '&=': lambda: int(gleft) & int(gright)}\n\n # If we're modifying a non-numeric type with a numeric operator, return\n # NaN.\n if (not isinstance(lit_left, NUMERIC_TYPES) and\n token in NUMERIC_OPERATORS):\n left.set_value(get_NaN(traverser), traverser=traverser)\n return left\n\n # If either side of the assignment operator is a string, both sides\n # need to be casted to strings first.\n if (isinstance(lit_left, types.StringTypes) or\n isinstance(lit_right, types.StringTypes)):\n lit_left = _get_as_str(lit_left)\n lit_right = _get_as_str(lit_right)\n\n gleft, gright = _get_as_num(left), _get_as_num(right)\n\n traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)\n if token not in operators:\n # We don't support that operator. (yet?)\n traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)\n return left\n elif token in ('<<=', '>>=', '>>>=') and gright < 0:\n # The user is doing weird bitshifting that will return 0 in JS but\n # not in Python.\n left.set_value(0, traverser=traverser)\n return left\n elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and\n (abs(gleft) == float('inf') or abs(gright) == float('inf'))):\n # Don't bother handling infinity for integer-converted operations.\n left.set_value(get_NaN(traverser), traverser=traverser)\n return left\n\n traverser._debug('ASSIGNMENT::L-value global? (%s)' %\n ('Y' if left.is_global else 'N'), 1)\n try:\n new_value = operators[token]()\n except Exception:\n traverser.system_error(exc_info=sys.exc_info())\n new_value = None\n\n # Cap the length of analyzed strings.\n if (isinstance(new_value, types.StringTypes) and\n len(new_value) > MAX_STR_SIZE):\n new_value = new_value[:MAX_STR_SIZE]\n\n traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)\n left.set_value(new_value, traverser=traverser)\n return left\n\n # Though it would otherwise be a syntax error, we say that 4=5 should\n # evaluate out to 5.\n return right",
"def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))",
"def assign_var(var, expr, requires_incref=True):\n return '{} := {}; {}'.format(var.name, expr,\n inc_ref(var) if requires_incref else '')",
"def syntax_var_assign():\n a = 'Hello'\n print(f'{a} is stored at {hex(id(a))}')\n a = \"World\"\n print(f'{a} is stored at {hex(id(a))}')\n\n ## Output\n # Hello is stored at 0x10d251340\n # World is stored at 0x10d251378\n\n ## Notes\n # id()\n # Return the “identity” of an object. This is an integer (or long integer) which is guaranteed\n # to be unique and constant for this object during its lifetime.",
"def create_Assign(left_hand_side, right_hand_side):\n right_hand_side.ctx = ast.Load()\n left_hand_side.ctx = ast.Store()\n return ast.Assign(targets=[left_hand_side], value=right_hand_side)",
"def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
statement = ifStatement | whileStatement | assign | def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught
tok = tokens.peek( )
if debug: print( "statement: ", tok )
if tok == "if":
stat = parseIfStatement( )
return stat
elif tok == "while":
stat = parseWhileStatement( )
return stat
else:
stat = parseAssign( )
return stat | [
"def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()",
"def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)",
"def test_doubleAssignmentConditionally(self):\r\n self.flakes('''\r\n x = 10\r\n if True:\r\n x = 20\r\n ''')",
"def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)",
"def statement_eval(node, table):\n\n if node.kind == \"MOD_OP\":\n table = mod_op_eval(node, table)\n\n elif node.kind == \"SWAP_OP\":\n table = swap_op_eval(node, table)\n\n elif node.kind == \"FROM_LOOP\":\n block_node = node.block\n\n # TODO: check start condition\n\n while True:\n # Execute the block.\n table = block_eval(block_node, table)\n\n # Break if the end condition is satisfied.\n if expr_eval(node.end_condition, table):\n break\n\n elif node.kind == \"FOR_LOOP\":\n var_dec = node.var_declaration\n until_node = node.end_condition\n increment_node = node.increment_statement\n\n # Initialize the variable.\n table[var_dec.name] = expr_eval(var_dec.expr, table)\n\n while True:\n # Execute the block and increment statement.\n if not node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n \n table = block_eval(node.block, table)\n\n if node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n\n # Break if the end condition is satisfied.\n if table.refs[until_node.name] == expr_eval(until_node.expr, table):\n break\n\n table = var_condition_eval(until_node, table)\n\n elif node.kind == \"IF\":\n # Check the condition; if it fails, execute the\n # 'false' branch if it exists.\n\n if expr_eval(node.condition, table):\n table = block_eval(node.true, table)\n elif \"false\" in node.data:\n table = block_eval(node.false, table)\n\n elif node.kind == \"DO/UNDO\":\n # Do the action_block, then do the yielding block,\n # then undo the action block.\n table = block_eval(node.action_block, table)\n\n if \"yielding_block\" in node.data:\n table = block_eval(node.yielding_block, table)\n\n table = block_eval(inverter.unblock(node.action_block), table)\n\n elif node.kind == \"RESULT\":\n # Overwrites the variable 'result' with the given expression.\n table[\"result\"] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_DEC\":\n table[node.name] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_CONDITION\":\n table = var_condition_eval(node, table)\n\n elif node.kind == \"BLOCK\":\n table = block_eval(node, table)\n\n elif node.kind == \"FUNCTION_CALL\":\n # Call the function, then update table with the results.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables.\n table.update_refs(output)\n\n elif node.kind == \"UN\":\n inverted_node = inverter.unstatement(node.statement)\n table = statement_eval(inverted_node, table)\n\n elif node.kind == \"EXIT\":\n if expr_eval(node.condition, table):\n # We return by raising an exception.\n raise shared.ReturnException(expr_eval(node.value, table))\n\n elif node.kind == \"ENTER\":\n # Do nothing when we actually encounter these.\n pass\n\n return table",
"def conditional(self) -> global___Statement.Conditional:",
"def convert_while(self, stmt):\n head = Branch(stmt.test)\n tail = Nop()\n body = self.convert_block(stmt.body) # TODO: add break and continue targets\n orelse = self.convert_block(stmt.orelse)\n\n link(head, body)\n link(head, orelse)\n link(body, head)\n link(orelse, tail)\n\n return Block(head, tail)",
"def switch(cond, ift, iff):",
"def exec_assign(self, stmt: AssignStmt):\n value = None if stmt.value is None else self.evaluator.eval_node(stmt.value)\n self.assign(stmt.variable, value)",
"def assign(lvalue, rvalue):\n return AssignOp(lvalue, rvalue)",
"def test_do_while_stmt2(self):\n input = \"\"\"int main () {\n /* local variable definition */\n int a;\n a = 0;\n /* do loop execution */\n do {\n printf(\"value of a: \", a);\n a = a + 1;\n }while( a < 20 );\n return 0;\n}\n\"\"\"\n expect=\"\"\"Program([FuncDecl(Id(main),[],IntType,Block([VarDecl(Id(a),IntType),BinaryOp(=,Id(a),IntLiteral(0)),Dowhile([Block([CallExpr(Id(printf),[StringLiteral(value of a: ),Id(a)]),BinaryOp(=,Id(a),BinaryOp(+,Id(a),IntLiteral(1)))])],BinaryOp(<,Id(a),IntLiteral(20))),Return(IntLiteral(0))]))])\"\"\"\n self.assertTrue(TestAST.checkASTGen(input,expect,329))",
"def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node",
"def test_compound_statement(self) -> None:\n\n before = \"\"\"\n if x == 1:\n pass\n \"\"\"\n\n expected_after = \"\"\"\n y = 1\n if x == 1:\n pass\n z = 1\n \"\"\"\n\n actual_after = self.insert_statements(\n InsertAssignAroundIntegerVisitor(CodemodContext()), before\n )\n self.assertCodeEqual(expected_after, actual_after)",
"def with_if_statement():\n if cond():\n return true_func()\n else:\n return false_func()",
"def test_do_while_stmt(self):\n input = \"\"\"void main() {\n do{\n print(\"statement 1\");\n }\n {\n print(\"statement 2\");\n }\n while (true);\n}\n\"\"\"\n expect = \"\"\"Program([FuncDecl(Id(main),[],VoidType,Block([Dowhile([Block([CallExpr(Id(print),[StringLiteral(statement 1)])]),Block([CallExpr(Id(print),[StringLiteral(statement 2)])])],BooleanLiteral(true))]))])\"\"\"\n self.assertTrue(TestAST.checkASTGen(input,expect,328))",
"def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))",
"def convert_if(self, stmt):\n head = Branch(stmt.test)\n tail = Nop()\n\n true_branch = self.convert_block(stmt.body)\n link(head, true_branch)\n link(true_branch, tail)\n\n if len(stmt.orelse) > 0:\n false_branch = self.convert_block(stmt.orelse)\n link(head, false_branch)\n link(false_branch, tail)\n else:\n link(head, tail)\n\n return Block(head, tail)",
"def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))",
"def assign(a, b):\n @always_comb\n def assign():\n a.next = b\n\n return assign"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
stmtList = { statement } | def stmtList( ):
tok = tokens.peek( )
if debug: print( "stmtList: ", tok )
stat = statement( )
return stat | [
"def add_statements(self, stmts):\n for stmt in stmts:\n self.statements.append(stmt)",
"def get_statement_list(self, insupdel=0):\n #NOTE: statement = [record, {...}]\n result = []\n try:\n if insupdel == StatementType.INSERT:\n statements = self.statements_insert\n elif insupdel == StatementType.UPDATE:\n statements = self.statements_update\n elif insupdel == StatementType.DELETE:\n statements = self.statements_delete\n if statements is not None:\n for statement in statements:\n result.append(statement[1])\n except Exception as ex:\n print \"Error retrieving statement list: \", ex\n return result",
"def statements_to_txns(statement_list):\n new_list = [[stmt] for stmt in statement_list]\n return new_list",
"def add_statements(self, stmts):\n self.statements += stmts",
"def complete_statement(sql): # real signature unknown; restored from __doc__\n pass",
"def stmts(level, stmtList):\n if stmtList is None:\n return \"\"\n retStr = \"\"\n for aStmt in stmtList:\n # Call stmt to generate the statement, appending the result to the\n # overall resulting string.\n retStr += stmt(level + 1, aStmt)\n return retStr",
"def Statements(self) -> CodeStatementCollection:",
"def statements(self, **kwargs) -> List[str]:\n return [sqlparse.format(s.value, **kwargs) for s in self.statements_parsed]",
"def SetStatements(self) -> CodeStatementCollection:",
"def parse_statement_list(self):\n\n self.debug(f\"Parsing a statement list from {self.tokens}\")\n root = Statements()\n statements = [self.parse_statement()]\n while self.token_at.type == Token.DIAMOND:\n self.eat(Token.DIAMOND)\n statements.append(self.parse_statement())\n\n root.children = statements\n return root",
"def GetStatements(self) -> CodeStatementCollection:",
"def serialize_statements(statement_list):\n return json.dumps([statement_to_dictionary(st) for st in statement_list])",
"def execute_list(self, stmt: List[loxStmtAST.Stmt]) -> None:\n for st in stmt:\n st.accept(self)",
"def _query_stmt_types(agent_strs, params, stmt_types):\n stmts = []\n for stmt_type in stmt_types:\n params['type'] = stmt_type\n params['on_limit'] = 'error' # This really shouldn't be an issue.\n new_stmts = _make_stmts_query(agent_strs, params)\n logger.info(\"Found %d %s statements.\" % (len(new_stmts), stmt_type))\n stmts.extend(new_stmts)\n return stmts",
"def _sqllist(values):\n items = []\n items.append('(')\n for i, v in enumerate(values):\n if i != 0:\n items.append(', ')\n items.append(sqlparam(v))\n items.append(')')\n return SQLQuery(items)",
"def prepare(self, connection, stmt):\n return Statement(connection, stmt)",
"def Statement(self) -> CodeStatement:",
"def prepare_statement(cursor, table_name: str, statement_name: str, n_vars: int):\n\n var_str = ', '.join(['?'] * n_vars) \n\n # cmd = f\"\"\"PREPARE {statement_name} FROM \"INSERT INTO CurrencyExchangeRatesArchive (@in_date, @in_base, @in_target, @in_rate)\";\"\"\"\n # cmd = f\"\"\"PREPARE {statement_name} FROM \"INSERT INTO CurrencyExchangeRatesArchive VALUES (?, ?, ?, ?)\";\"\"\"\n cmd = f\"\"\"PREPARE {statement_name} FROM \"INSERT INTO {table_name} VALUES ({var_str})\";\"\"\"\n print(' ', cmd)\n cursor.execute(cmd)",
"def execute_query_list(cur, conn, query_list):\n try:\n for query in query_list:\n cur.execute(query)\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error executing query list\")\n print(e)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns ssh username for connecting to cluster workers. | def get_ssh_user():
return getpass.getuser() | [
"def get_ssh_user(self):\n if self.configuration.get(\"pg_ssh_user\"):\n return \"%s@\" % self.configuration.get(\"pg_ssh_user\")\n else:\n return \"%s@\" % DEFAULT_SSH_USER",
"def master_username(self) -> str:\n return pulumi.get(self, \"master_username\")",
"def executor_nick_name(self):\n return self._executor_nick_name",
"def username(self) -> pulumi.Input['AddressSpaceSpecConnectorsCredentialsUsernameArgs']:\n return pulumi.get(self, \"username\")",
"def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )",
"def head_node_user(self):\n return self._get_param(\"ClusterUser\")",
"def username(self) -> ConfigNodePropertyString:\n return self._username",
"def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_USERNAME')",
"def running_username():\n # Do not move these deferred imports. It allows running against a Salt\n # onedir build in salt's repo checkout.\n import salt.utils.user # pylint: disable=import-outside-toplevel\n\n return salt.utils.user.get_user()",
"def username(self) -> str:\n return pulumi.get(self, \"username\")",
"def get_remote_user(self, username):\n return 'ec2-user'",
"def get_username():\n return _get_git_config(\"user.name\")",
"def username(self):\n return self._authenticator.username()",
"def login_username(self):\n return self._login_username",
"def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_USERNAME')",
"def remote_hostname(self):\n return self.m_iface.remote_hostname()",
"def get_username():\n click.echo(\" \".join([\"database username:\", config.get_username()]))",
"def git_username(self):\n return self._git_username",
"def get_username(self) -> str:\n return self._username"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes. | def get_ssh_key():
path = os.environ.get("TUNE_CLUSTER_SSH_KEY",
os.path.expanduser("~/ray_bootstrap_key.pem"))
if os.path.exists(path):
return path
return None | [
"def ssh_key(self) -> str:\n return pulumi.get(self, \"ssh_key\")",
"def cluster_key(self):\n node = self.get_node()\n try:\n key = node.oget(\"cluster\", \"secret\")\n return self.prepare_key(key)\n except Exception as exc:\n pass\n import uuid\n key = uuid.uuid1().hex\n from cluster import ClusterSvc\n svc = ClusterSvc()\n svc.set_multi([\"cluster.secret=\"+key], validation=False)\n return self.prepare_key(key)",
"def cluster_ssh_password(self) -> Optional[Any]:\n return pulumi.get(self, \"cluster_ssh_password\")",
"def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")",
"def get_ssh_key_id(self):\n try:\n key = self.driver.ex_describe_keypairs(self.ssh_key_name)\n return key['keyName']\n except Exception:\n # This key has not been uploaded yet\n return",
"def kms_key_id(self) -> str:\n return pulumi.get(self, \"kms_key_id\")",
"def hostkey(self):\n return self.__get_option('hostkey_file')",
"def kms_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kms_key\")",
"def get_cluster_command(cls):\n if 'cluster_command' in cls.global_settings:\n return cls.global_settings['cluster_command']\n else:\n return None",
"def ssh_public_key(self) -> pulumi.Input['SshPublicKeyArgs']:\n return pulumi.get(self, \"ssh_public_key\")",
"def download_kube_key(self):\n if Cloud().target_cloud_gcp():\n return\n logger.info(\"Downloading cluster ssh key from s3 ...\")\n data = self._bucket.get_object(self._s3_cluster_ssh_key)\n assert data is not None, \"No kube ssh key at {}/{}\".format(self._bucket_name, self._s3_cluster_ssh_key)\n dir = os.path.dirname(self._key_file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n with open(self._key_file, \"w\") as f:\n f.write(data)\n os.chmod(self._key_file, 0o0600)\n logger.info(\"Downloaded kube ssh key from %s/%s to %s\", self._bucket_name, self._s3_cluster_ssh_key, self._key_file)\n return self._key_file",
"def admin_user_ssh_public_key(self) -> Optional[str]:\n return pulumi.get(self, \"admin_user_ssh_public_key\")",
"def log_analytics_workspace_shared_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"log_analytics_workspace_shared_key\")",
"def cluster_id(self):\n return self._cluster_id",
"def cluster_resource_id(self) -> str:\n return pulumi.get(self, \"cluster_resource_id\")",
"def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")",
"def cluster_password(self) -> Optional[Any]:\n return pulumi.get(self, \"cluster_password\")",
"def server_side_encryption_aws_kms_key_id(self) -> typing.Optional[builtins.str]:\n result = self._values.get(\"server_side_encryption_aws_kms_key_id\")\n return result",
"def log_analytics_workspace_shared_key(self) -> Optional[str]:\n return pulumi.get(self, \"log_analytics_workspace_shared_key\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
writes uuids and extras of given nodes to a file (json). This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes. | def export_extras(nodes, filename='node_extras.txt'):
#outstring = ''#' node uuid | extras \n'
outdict = {}
for node in nodes:
if isinstance(node, int): #pk
node = load_node(node)
elif isinstance(node, basestring): #uuid
node = load_node(node)
if not isinstance(node, Node):
print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))
continue
uuid = node.uuid
extras_dict = node.get_extras()
outdict[uuid] = extras_dict
#line = '{} | {}\n'.format(uuid, extras_dict)
#outstring = outstring + line
#outfile = open(filename, 'w')
#outfile.write(outstring)
#outfile.close()
json.dump(outdict, open(filename,'w'))
return | [
"def write(node, filepath):\n data = read(node)\n\n if not data:\n return\n\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n return data",
"def import_extras(filename):\n\n all_extras = {}\n\n # read file\n #inputfile = open(filename, 'r')\n #lines = inputfile.readlines()\n #for line in lines[1:]:\n # splitted = line.split(' | ')\n # uuid = splitted[0].rstrip(' ')\n # extras = splitted[1].rstrip(' ')\n # #extras = dict(extras)\n # print(extras)\n # all_extras[uuid] = extras\n #inputfile.close()\n try:\n all_extras = json.load(open(filename))\n except:\n print('The file has to be loadabel by json. i.e json format (which it is not).')\n\n for uuid, extras in all_extras.iteritems():\n\n try:\n node = load_node(uuid)\n except:\n # Does not exists\n print('node with uuid {} does not exist in DB'.format(uuid))\n node = None\n continue\n if isinstance(node, Node):\n node.set_extras(extras)\n else:\n print('node is not instance of an AiiDA node')\n #print(extras)\n return",
"def write_node_features(node_features, node_file):\n dgl.data.utils.save_tensors(node_file, node_features)",
"def write_nodes(self):\n try:\n with self._nodes_lock:\n file_path = os.path.join(self.session_dir, \"nodes\")\n with open(file_path, \"w\") as f:\n for _id in sorted(self.nodes.keys()):\n node = self.nodes[_id]\n f.write(\"%s %s %s %s\\n\" % (_id, node.name, node.apitype, type(node)))\n except IOError:\n logging.exception(\"error writing nodes file\")",
"def write_to_json_file(self, filename):\n data = json_graph.node_link_data(self)\n with open(filename, 'w') as json_file:\n json.dump(data, json_file)",
"def save_config(node, force=False):\n filepath = os.path.join(\"nodes/\", env.host_string + \".json\")\n tmp_filename = 'tmp_{0}.json'.format(env.host_string)\n files_to_create = [tmp_filename]\n if not os.path.exists(filepath) or force:\n # Only save to nodes/ if there is not already a file\n print \"Saving node configuration to {0}...\".format(filepath)\n files_to_create.append(filepath)\n for node_file in files_to_create:\n with open(node_file, 'w') as f:\n f.write(json.dumps(node, indent=4))\n return tmp_filename",
"def write_to_file(g, path):\n\tgraph_data = json_graph.node_link_data(g)\n\twith open(path, 'w') as f:\n\t\tjson.dump(graph_data, f)",
"def _write_temp_files(self):\n\n # Write the serialsed edges.\n with open(self._input_addr, 'w') as f:\n for e1, e2 in self._serialized_edges:\n f.write(\"%d\\t%d\\n\" % (e1, e2))",
"def _writeNodes(nodes, cesiumIconColor, fullDir):\n\n\t# .js file path\n\tjsFilePath = '%s/displayNodes.js' % (fullDir)\n\tf = open(jsFilePath, 'w')\n\n\t# Head description\n\tjsStr = \"// This .js file is auto-generated by `createCesium()` from VeRoViz\\n\"\n\tjsStr += \"// Display nodes for cesium application\\n\\n\"\n\t\n\t# Display the nodes\n\tjsStr += \"function displayNodes() {\\n\"\n\tjsStr += \" var pin = new Array;\\n\"\n\n\tif (nodes is not None):\n\t\t# In case there are any skipped indices\n\t\tindNodes = nodes.copy().reset_index(drop=True)\t\n\t\tindNodes['cesiumIconText'] = indNodes['cesiumIconText'].astype('string')\n\t\tindNodes['popupText'] = indNodes['popupText'].astype('string')\n\t\t\n\n\t\tfor i in range(0, len(indNodes)):\n\t\t\tpopupText = indNodes.iloc[i]['popupText']\n\t\t\ttmpIconText = str(indNodes.iloc[i]['cesiumIconText']).replace(\"'\", r\"\")\n\n\t\t\tjsStr += \" pin[%s] = viewer.entities.add({\\n\" % (i)\n\t\t\tjsStr += \" name : '%s',\\n\" % (tmpIconText)\n\t\t\tjsStr += \" parent : nodePins,\\n\"\n\t\t\tif (popupText is not None):\n\t\t\t\tjsStr += \" description : '%s',\\n\" % (str(popupText).replace(\"'\", r\"\\'\"))\t\n\t\t\tjsStr += \" position : Cesium.Cartesian3.fromDegrees(%s, %s),\\n\" % (indNodes.iloc[i]['lon'], indNodes.iloc[i]['lat'])\n\t\t\tjsStr += \" billboard : {\\n\"\n\t\t\tjsStr += \" image : pinBuilder.fromText('%s', %s, 40).toDataURL(),\\n\" % (tmpIconText, expandCesiumColor(cesiumIconColor) if (cesiumIconColor != None) else expandCesiumColor(indNodes.iloc[i]['cesiumColor']))\n\t\t\tjsStr += \" verticalOrigin : Cesium.VerticalOrigin.BOTTOM\\n\"\n\t\t\tjsStr += \" }\\n\"\n\t\t\tjsStr += \" });\\n\\n\"\n\n\tjsStr += \"}\"\n\n\t# Write contents and close file stream\n\tf.write(jsStr)\n\tf.close()\n\n\tif (config['VRV_SETTING_SHOWOUTPUTMESSAGE']):\n\t\tprint(\"Message: Nodes were written to %s ...\" % (jsFilePath))\n\n\treturn",
"def install_nodes2file(graphname, install_nodes, install_ratio, scenario_no):\n install_no = str(int(install_ratio*100))\n outfile = 'pre_data/' + graphname \\\n + '/install' + install_no \\\n + '_' + str(scenario_no) + '.txt'\n myfile = open(outfile, 'w')\n out = \"\".join([str(node)+'\\n' for node in install_nodes])\n myfile.write(out)\n myfile.close()",
"def export_node(node_name, outputFileName):\n EXPORTER.write(NODE_DICT[node_name], open(outputFileName, \"w\"))",
"def save_devices_to_file():\n global _devices\n output = dict()\n for device in _devices:\n output[str(device)] = device.dict_export()\n with open(JSON_PATH, 'w') as f:\n json.dump(output, f, indent=2, default=str)",
"def write_node_shp(self,shpname,extra_fields=[]):\n assert len(extra_fields)==0 # not yet supported!\n \n # zero-based index of node (why does write_edge_shp create 1-based ids?)\n base_dtype = [('node_id',np.int32)]\n\n node_geoms=[geometry.Point( self.nodes['x'][i] )\n for i in self.valid_node_iter() ]\n\n node_data=self.nodes[~self.nodes['deleted']].copy()\n\n # don't need to write all of the original fields out:\n node_data=utils.recarray_del_fields(node_data,['x','deleted'])\n \n\n wkb2shp.wkb2shp(shpname,input_wkbs=node_geoms,fields=node_data,\n overwrite=True)",
"def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n pass",
"def write_subgraph_nodeids(filename, nodelist):\n with open(filename, 'w') as f:\n f.write('nodeid\\n')\n for i in nodelist:\n f.write(str(i) + '\\n')",
"def _create_parameter_file(self, filename: str):\n # Get all interesting nodes\n nodes = json.loads(self.daq.listNodesJSON('/' + self.devname))\n\n modified_nodes = {}\n\n # Do some name mangling\n for name, node in nodes.items():\n name = name.replace('/' + self.devname.upper() + '/', '')\n node['Node'] = name\n modified_nodes[name] = node\n\n # Dump the nodes\n with open(filename, \"w\") as json_file:\n json.dump(modified_nodes, json_file, indent=4, sort_keys=True)",
"def _write_node_file(module_path, file_name, module_text):\n\n if not os.path.exists(module_path):\n os.makedirs(module_path)\n with open(os.path.join(module_path, file_name), 'w') as node_file:\n node_file.write('\\n'.join(module_text))",
"def write_json(self, data, fichier):",
"def createNodesInfo(path):\n file1 = open(\"NodesInfo.txt\",\"w\")\n for node in path:\n file1.write(str(node[0])+\" \" + str(node[1])+ \"\\n\")\n #print(node[0],node[1])\n file1.close()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
reads in nodes uuids and extras from a file and aplies them to nodes in the DB. This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes. | def import_extras(filename):
all_extras = {}
# read file
#inputfile = open(filename, 'r')
#lines = inputfile.readlines()
#for line in lines[1:]:
# splitted = line.split(' | ')
# uuid = splitted[0].rstrip(' ')
# extras = splitted[1].rstrip(' ')
# #extras = dict(extras)
# print(extras)
# all_extras[uuid] = extras
#inputfile.close()
try:
all_extras = json.load(open(filename))
except:
print('The file has to be loadabel by json. i.e json format (which it is not).')
for uuid, extras in all_extras.iteritems():
try:
node = load_node(uuid)
except:
# Does not exists
print('node with uuid {} does not exist in DB'.format(uuid))
node = None
continue
if isinstance(node, Node):
node.set_extras(extras)
else:
print('node is not instance of an AiiDA node')
#print(extras)
return | [
"def export_extras(nodes, filename='node_extras.txt'):\n\n #outstring = ''#' node uuid | extras \\n'\n outdict = {}\n for node in nodes:\n if isinstance(node, int): #pk\n node = load_node(node)\n elif isinstance(node, basestring): #uuid\n node = load_node(node)\n\n if not isinstance(node, Node):\n print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))\n continue\n uuid = node.uuid\n extras_dict = node.get_extras()\n outdict[uuid] = extras_dict\n #line = '{} | {}\\n'.format(uuid, extras_dict)\n #outstring = outstring + line\n\n #outfile = open(filename, 'w')\n #outfile.write(outstring)\n #outfile.close()\n json.dump(outdict, open(filename,'w'))\n return",
"def loadNodeAttributes(self, filePath):\n self.nodeAttributes = pd.read_csv(filePath)",
"def load_db(domain, file, use_json=False):\n if use_json:\n for line in file.readlines():\n if line:\n data = json.loads(line)\n item = domain.new_item(data['name'])\n item.update(data['attributes'])\n item.save()\n \n else:\n domain.from_xml(file)",
"def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")",
"def read_file(datafile, seedfile):\n global n_nodes, n_edges , graph, seedlist\n lines = open(datafile).readlines()\n n_nodes = lines[0].split()[0]\n n_edges = lines[0].split()[1]\n for i in lines[1:]:\n thisline = i.split()\n graph.add_edge(int(thisline[0]), int(thisline[1]), float(thisline[2]))\n\n lines2 = open(seedfile).readlines()\n for i in lines2:\n seedlist.append(int(i))",
"def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))",
"def load_entity_data(self, data_file):\n load_xml_seed(data_file)",
"def read_adjlist(self, filename):\n self.G = nx.read_adjlist(filename, create_using=nx.DiGraph())\n for i, j in self.G.edges():\n self.G[i][j]['weight'] = 1.0\n self.encode_node()",
"def load_nodes(path):\n global parents\n with open(path, 'r') as r:\n for line in r:\n (taxid, parent, other) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 2)\n parents[taxid] = parent",
"def read_relations(db, openfile):\n pass",
"def load(self, filename):\n\t\tf = open(filename).read().split(\"\\n\")\n\n\t\tfor item in f:\n\t\t\tcommand = item.split(\":\")\n\n\t\t\t# Add node\n\t\t\tif len(command) == 2:\n\t\t\t\t_id = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\n\t\t\t\t# Duplicate id\n\t\t\t\tif _id in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\t# Add node\n\t\t\t\tself.nodes[_id] = Node(_id, _label)\n\n\t\t\t# Add link\n\t\t\telif len(command) == 3:\n\t\t\t\t_from = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\t\t\t\t_to = command[2].strip()\n\n\t\t\t\t# Non-existent Nodes\n\t\t\t\tif _from not in self.nodes or _to not in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\tself.nodes[_from].add_neighbour(self.nodes[_to], _label)",
"def add_from_xml(filename, empty=True):\n # If required empty the table first\n if empty:\n try:\n db.session.query(Interpro).delete()\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n print(e)\n\n interpro_parser = InterproParser()\n\n interpro_parser.readfile(filename)\n\n for i, domain in enumerate(interpro_parser.domains):\n interpro = Interpro(domain.label, domain.description)\n\n db.session.add(interpro)\n\n if i % 40 == 0:\n # commit to the db frequently to allow WHOOSHEE's indexing function to work without timing out\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n print(e)\n\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n print(e)",
"def __load_nodes(self, nodes_file, node_types_file):\n def eval(val):\n # Helper function that can convert csv to an appropiate type. Helpful for cells of lists (positions, etc)\n # TODO: keep column dtypes in metadata and use that for converting each column\n if isinstance(val, float) and math.isnan(val):\n return None\n elif isinstance(val, basestring):\n try:\n # this will be helpful for turning strings into lists where appropiate \"(0, 1, 2)\" --> (0, 1, 2)\n return literal_eval(val)\n except ValueError:\n return val\n return val\n\n if nodes_file is None and node_types_file is None:\n return None\n\n elif nodes_file is not None and node_types_file is not None:\n # Get the array_params from nodes_file and properties from nodes_types_file, combine them to call\n # add_nodes() function and rebuilt the nodes.\n nt_df = pd.read_csv(node_types_file, self.CSV_DELIMITER) #, index_col=self.COL_NODE_TYPE_ID)\n n_df = pd.read_csv(nodes_file, self.CSV_DELIMITER)\n\n for _, row in nt_df.iterrows():\n # iterate through the node_types, find all nodes with matching node_type_id and get those node's\n # parameters as a dictionary of lists\n node_type_props = {l: eval(row[l]) for l in nt_df.columns if eval(row[l]) is not None}\n selected_nodes = n_df[n_df[self.COL_NODE_TYPE_ID] == row[self.COL_NODE_TYPE_ID]]\n N = len(selected_nodes.axes[0])\n array_params = {l: list(selected_nodes[l]) for l in selected_nodes.columns\n if l not in ['node_type_id', 'position']}\n\n # Special function for position_params\n position = None\n position_params = None\n if 'position' in selected_nodes.columns:\n position_params = {'location': [eval(p) for p in selected_nodes['position']]}\n position = 'points'\n\n self._network.add_nodes(N, position=position, position_params=position_params,\n array_params=array_params, **node_type_props)\n\n self._network._build_nodes()\n\n elif node_types_file is not None:\n # nodes_types exists but nodes doesn't. We convert each row (node_type) in the csv to a collection\n # of nodes with N=1, no array_params.\n nt_df = pd.read_csv(node_types_file, self.CSV_DELIMITER)\n for _, row in nt_df.iterrows():\n node_type_props = {l: eval(row[l]) for l in nt_df.columns if eval(row[l]) is not None}\n self._network.add_nodes(N=1, **node_type_props)\n self._network._build_nodes()\n\n elif nodes_file is not None:\n # nodes exists but node_types doesn't. In this case group together all nodes by node_type_id and add them\n # as a single population (with no node_params)\n n_df = pd.read_csv(nodes_file, self.CSV_DELIMITER)\n for nt_id, df in n_df.groupby(self.COL_NODE_TYPE_ID):\n N = len(df.axes[0])\n array_params = {l: list(df[l]) for l in df.columns\n if l not in ['node_type_id', 'position']}\n\n position = None\n position_params = None\n if 'position' in df.columns:\n position_params = {'location': [eval(p) for p in df['position']]}\n position = 'points'\n\n self._network.add_nodes(N, position=position, position_params=position_params,\n array_params=array_params, node_type_id=nt_id)\n self._network._build_nodes()",
"def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()",
"def add_tasks_from_file(file, project, priority, indent, date):\n with open(file) as f:\n content = f.readlines()\n for item in content:\n add_task(item, project, priority, indent, date)",
"def add_graph_attributes(G, filename):\n Ef = dict() # feature -> edges\n Nf = dict() # node -> features\n with open(filename) as f:\n for line in f: # for each node, list of features it belongs to\n d = line.split()\n u = int(d[0])\n features = d[1:]\n for f in features:\n Ef.setdefault(f, []).extend(G.in_edges(u)) # add feature-dependent edges\n #G.node[u]['Fu'] = features\n G.nodes[u]['Fu'] = features\n Nf[u] = features\n print('Read graph attributes')\n return Ef, Nf",
"def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)",
"def add_from_uuid_list(self):\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split(\"\\n\")[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add(\"Invalid uuid lenght.\")\n continue\n\n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return",
"def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This method deletes all AiiDA nodes in the DB, which have a extra trash=True And all their children. Could be advanced to a garbage collector. Be careful to use it. | def delete_trash():
#query db for marked trash
q = QueryBuilder()
nodes_to_delete_pks = []
q.append(Node,
filters = {'extras.trash': {'==' : True}
}
)
res = q.all()
for node in res:
nodes_to_delete_pks.append(node[0].dbnode.pk)
print('pk {}, extras {}'.format(node[0].dbnode.pk, node[0].get_extras()))
#Delete the trash nodes
print('deleting nodes {}'.format(nodes_to_delete_pks))
delete_nodes(nodes_to_delete_pks)
return | [
"def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return",
"def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()",
"def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()",
"def clean():\n os.system('killall -9 lnd')\n os.system('killall -9 btcd')\n \n shutil.rmtree(btcd_dir)\n os.remove(btcd_log)\n\n index = 0\n while True:\n node = Node.from_index(index)\n try:\n shutil.rmtree(node.path())\n os.remove(node.log())\n except:\n click.echo(f'removed {index} nodes.')\n break\n index += 1",
"def clean_up(self):\n query = neo4j.CypherQuery(self.graph_db, 'MATCH (n) OPTIONAL MATCH (n)-[r]-() DELETE n,r')\n query.run()",
"def delete(self):\n for child in self.children:\n child.delete()\n del self",
"def reset_tree(self):\n for i in self.tree.get_children():\n self.tree.delete(i)",
"def delete_nodes(self):\n with self._nodes_lock:\n while self.nodes:\n _, node = self.nodes.popitem()\n node.shutdown()",
"def deleteChildren(self, nodepath, bypassTrash=False):\r\n # Sanity check\r\n if not self.pathexists(nodepath):\r\n return\r\n nodes = self.inFolder(nodepath)\r\n for node in nodes:\r\n node.Delete(bypassTrash)",
"def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'",
"def erase_all_torrents_from_db(self):\n db = sqlite3.connect(self.statdb)\n c = db.cursor()\n c.execute(\"DELETE FROM torrents\")\n db.commit()\n db.close()",
"def delete_garbage(self):\n self.connection.query(\n \"DELETE FROM `{db}`.`{tab}` WHERE \".format(tab=self.table_name, db=self.database) +\n \" AND \".join(\n 'hash NOT IN (SELECT {column_name} FROM {referencing_table})'.format(**ref)\n for ref in self.references) or \"TRUE\")\n print('Deleted %d items' % self.connection.query(\"SELECT ROW_COUNT()\").fetchone()[0])",
"def deleteChildren(self, sess):\n\n if _database!=\"postgres\":\n raise ESGPublishError(\"Database not supported for file children delete: %s\"%_database)\n\n # file_attr ----------------------\n sess.execute(\"delete from file_attr where file_attr.file_id=%s\"%self.id)\n\n # file_var_attr ----------------------\n sess.execute(\"delete from file_var_attr using file_variable as fv where file_var_attr.filevar_id=fv.id and fv.file_id=%s\"%self.id)\n\n # filevar_dimension ----------------------\n sess.execute(\"delete from filevar_dimension using file_variable as fv where filevar_dimension.filevar_id=fv.id and fv.file_id=%s\"%self.id)\n\n # filevar ----------------------\n sess.execute(\"delete from file_variable where file_variable.file_id=%s\"%self.id)",
"def destroy_all(self):\n self.log.info(\"Destroying the %s cluster\" % self.cluster_name)\n for n in self.all_nodes:\n n.destroy()\n remove(self.save_file)",
"def _delete(self):\n\n if not self.node_exists:\n return\n\n for association in self.node._pg_edges:\n setattr(self.node, association, [])\n\n self.transaction.session.delete(self.node)",
"def __del__(self):\n for node in self.nodes:\n node.clear()",
"def delete(self, nodes):\n # Check indices.\n N = len(self)\n if not isinstance(nodes, (set, list, tuple)):\n nodes = [nodes]\n if not all(0 < node <= N for node in nodes):\n raise IndexError()\n\n # Reparent orphaned nodes.\n # Lift the arc until the parent is non-deleted node.\n # If all parents are deleted, we will hit the root eventually.\n deleted = set(nodes)\n alive_heads = [None] * N\n for node in range(1, N + 1):\n head = self.heads(node)\n while head in deleted:\n head = self.heads(head)\n alive_heads[node - 1] = head\n\n # Remap.\n new_nodes = {0: 0}\n new_node = 1\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n new_nodes[node] = new_node\n new_node += 1\n\n # Gather non-deleted stuff.\n forms = []\n lemmas = []\n cpostags = []\n postags = []\n feats = []\n heads = []\n deprels = []\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n forms.append(self.forms(node))\n lemmas.append(self.lemmas(node))\n cpostags.append(self.cpostags(node))\n postags.append(self.postags(node))\n feats.append(self.feats(node))\n heads.append(new_nodes[alive_heads[node - 1]])\n deprels.append(self.deprels(node))\n \n # Construct new tree.\n self.__init__(forms, lemmas, cpostags, postags, feats, heads, deprels)",
"def delete_all_entities(self):\n self._delete_all_acls()\n self._delete_all_containers()\n self._delete_all_orders()\n self._delete_all_secrets()",
"def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
returns a list of node uuids for a given group as, name, pk, uuid or group object | def get_nodes_from_group(group, return_format='uuid'):
from aiida.orm import Group
from aiida.common.exceptions import NotExistent
nodes = []
g_nodes = []
try:
group_pk = int(group)
except ValueError:
group_pk = None
group_name = group
if group_pk is not None:
try:
str_group = Group(dbgroup=group_pk)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group '
'or a Group name. Reference key: "group".'
'given pk= {} is not a valid group'
'(or is your group name integer?)'.format(group_pk))
print(message)
elif group_name is not None:
try:
str_group = Group.get_from_string(group_name)
except NotExistent:
str_group = None
message = ('You have to provide a valid pk for a Group or a Group name.'
'given group name= {} is not a valid group'
'(or is your group name integer?)'.format(group_name))
print(message)
elif isinstance(group, Group):
str_group = group
else:
str_group = None
print('I could not handle given input, either Group, pk, or group name please.')
return nodes
g_nodes = str_group.nodes
for node in g_nodes:
if return_format == 'uuid':
nodes.append(node.uuid)
elif return_format == 'pk':
nodes.append(node.pk)
return nodes | [
"def get_groups_uuid(\n export_data: Dict[str, Dict[int, dict]], silent: bool\n) -> Dict[str, List[str]]:\n EXPORT_LOGGER.debug(\"GATHERING GROUP ELEMENTS...\")\n groups_uuid = defaultdict(list)\n # If a group is in the exported data, we export the group/node correlation\n if GROUP_ENTITY_NAME in export_data:\n group_uuids_with_node_uuids = (\n orm.QueryBuilder()\n .append(\n orm.Group,\n filters={\"id\": {\"in\": export_data[GROUP_ENTITY_NAME]}},\n project=\"uuid\",\n tag=\"groups\",\n )\n .append(orm.Node, project=\"uuid\", with_group=\"groups\")\n )\n\n # This part is _only_ for the progress bar\n total_node_uuids_for_groups = group_uuids_with_node_uuids.count()\n if total_node_uuids_for_groups:\n progress_bar = get_progress_bar(\n total=total_node_uuids_for_groups, disable=silent\n )\n progress_bar.set_description_str(\"Exporting Groups ...\", refresh=False)\n\n for group_uuid, node_uuid in group_uuids_with_node_uuids.iterall():\n progress_bar.update()\n\n groups_uuid[group_uuid].append(node_uuid)\n\n return groups_uuid",
"def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)",
"def nodes(self, group, pattern='*'):\n\t\treturn protocol.Request_NODE_LIST( group = group, pattern = pattern )",
"def extract_serial_group(self, task_node:Dict[str, Any]) -> List[Dict[str, Any]]:\n pass",
"def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)",
"def get_group_devices(self, group):\n pass",
"def _get_nodes_by_instance(self, instance_uuid):\n try:\n node = pecan.request.dbapi.get_node_by_instance(instance_uuid)\n return [node]\n except exception.InstanceNotFound:\n return []",
"def getNodeLVMGroups(self,node):\n data = self.connect('get','nodes/%s/scan/lvm' % (node),None)\n return data",
"def get_groupids(userid):",
"def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids",
"def get_uuids(things):\n return [thing.uuid for thing in things]",
"def getUuidsOfPath(self, node):\n uuids = set()\n acquisition_chain = []\n for n in aq_chain(node.primaryAq()):\n if isinstance(n, DataRoot):\n acquisition_chain.pop()\n break\n acquisition_chain.append(n)\n\n if acquisition_chain:\n for obj in filter(None, acquisition_chain):\n try:\n uuids.add(self.getElementUuid(obj))\n except TypeError:\n log.debug(\"Unable to get a uuid for %s \", obj)\n\n return filter(None, uuids)",
"def create_get_all_group_names(username):\n query = \"MATCH (e:Person)-[:IN_GROUP]->(g:Group) WHERE e.user_name = '%s' RETURN g\" % username\n return query",
"def listGroupsWithAttributes(obj, grp):",
"def test_get_device_group_by_id(self):\n pass",
"def get_provider_uuids_in_tree(self, name_or_uuid):\n with self.lock:\n return self._find_with_lock(\n name_or_uuid, return_root=True).get_provider_uuids()",
"def generate_guid_lookup(self,target=True):\n if hasattr(self,'guid'): \n guidlist=[self.guid]\n else:\n guidlist=[]\n for child in self.getChildNodes():\n guidlist.extend(child.generate_guid_lookup(False))\n if target: self.guid_lookup=guidlist\n return guidlist",
"def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist",
"def getGroupIDsWithAttr(self,attr_name,attr_value):\n group_ids=db_main.getHandle().get(\"group_attrs\",\"attr_name=%s and attr_value=%s\"%(dbText(attr_name),dbText(attr_value)),\n 0,-1,(\"group_id\",True),[\"group_id\"])\n \n return map(lambda dic:dic[\"group_id\"],group_ids)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This function returns the default anchors given the image shapes and the anchors per grid point. The grid has width and height equal to the final's layer output. | def set_anchors(mc):
H, W, C = _get_output_shape(mc)
B = mc.ANCHOR_PER_GRID
X = np.array(mc.INITIAL_ANCHOR_SHAPES)
X[:,0] *= mc.IMAGE_WIDTH
X[:,1] *= mc.IMAGE_HEIGHT
anchor_shapes = np.reshape( # it refers to the anchor width and height
[X] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors | [
"def generate_anchors(img_params, hyper_params):\n anchor_count = hyper_params[\"anchor_count\"]\n stride = hyper_params[\"stride\"]\n height, width, output_height, output_width = img_params\n #\n grid_x = np.arange(0, output_width) * stride\n grid_y = np.arange(0, output_height) * stride\n #\n width_padding = (width - output_width * stride) / 2\n height_padding = (height - output_height * stride) / 2\n grid_x = width_padding + grid_x\n grid_y = height_padding + grid_y\n #\n grid_y, grid_x = np.meshgrid(grid_y, grid_x)\n grid_map = np.vstack((grid_y.ravel(), grid_x.ravel(), grid_y.ravel(), grid_x.ravel())).transpose()\n #\n base_anchors = generate_base_anchors(hyper_params)\n #\n output_area = grid_map.shape[0]\n anchors = base_anchors.reshape((1, anchor_count, 4)) + \\\n grid_map.reshape((1, output_area, 4)).transpose((1, 0, 2))\n anchors = anchors.reshape((output_area * anchor_count, 4)).astype(np.float32)\n anchors = helpers.normalize_bboxes(anchors, height, width)\n anchors = np.clip(anchors, 0, 1)\n return anchors",
"def gen_anchors(self):\n anchor=self.gen_single_anchor()\n k = anchor.shape[0]\n delta_x, delta_y = [x*self.anchor_stride for x in range(self.score_width)], \\\n [y*self.anchor_stride for y in range(self.score_height)]\n\n shift_x, shift_y = np.meshgrid(delta_x, delta_y)\n shifts = np.vstack([shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel()]).transpose()\n a = shifts.shape[0]\n anchors = (anchor.reshape((1, k, 4))+shifts.reshape((a, 1, 4))).reshape((a*k, 4)) # corner format\n anchors = corner_to_center(anchors)\n\n return anchors",
"def get_grid_anchors(cell_anchors, w, h):\n anchors_grid = np.mgrid[0:w,0:h]\n anchors_grid = np.concatenate((anchors_grid, np.zeros_like(anchors_grid)))\n anchors_grid = np.expand_dims(anchors_grid, axis=1)\n anchors_grid = anchors_grid + cell_anchors.T.reshape((4,-1,1,1))\n return anchors_grid",
"def get_all_anchors(stride=None, sizes=None):\n if stride is None:\n stride = cfg.ANCHOR.ANCHOR_STRIDE\n if sizes is None:\n sizes = cfg.ANCHOR.ANCHOR_SIZES\n # Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors\n # are centered on stride / 2, have (approximate) sqrt areas of the specified\n # sizes, and aspect ratios as given.\n cell_anchors = generate_anchors(\n stride,\n scales=np.array(sizes, dtype=np.float) / stride,\n ratios=np.array(cfg.ANCHOR.ANCHOR_RATIOS, dtype=np.float))\n # anchors are intbox here.\n # anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)\n\n max_size = cfg.DATA.MAX_SIZE\n field_size = int(np.ceil(max_size / stride))\n shifts = np.arange(0, field_size) * stride\n shift_x, shift_y = np.meshgrid(shifts, shifts)\n shift_x = shift_x.flatten()\n shift_y = shift_y.flatten()\n shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()\n # Kx4, K = field_size * field_size\n K = shifts.shape[0]\n\n A = cell_anchors.shape[0]\n field_of_anchors = (\n cell_anchors.reshape((1, A, 4)) +\n shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))\n # FSxFSxAx4\n # Many rounding happens inside the anchor code anyway\n # assert np.all(field_of_anchors == field_of_anchors.astype('int32'))\n field_of_anchors = field_of_anchors.astype('float32')\n field_of_anchors[:, :, :, [2, 3]] += 1\n return field_of_anchors",
"def get_cell_anchors(scales, anchors):\n cell_anchors = []\n anchors = [(0.,0.,1.)] + anchors\n for s in scales:\n for dx, dy, whr in anchors:\n w = whr / math.sqrt(whr)\n h = 1 / math.sqrt(whr)\n cell_anchors.append([0.5+s*(dx-w/2), 0.5+s*(dy-h/2), s*w, s*h])\n return np.array(cell_anchors)",
"def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1",
"def anchor_gen(size_feat_x,size_feat_y,rpn_stride,scales,ratios):\n scales, ratios = np.meshgrid(scales,ratios)\n scales, ratios = scales.flatten(), ratios.flatten()\n #width and height of anchor\n scalesY = scales * np.sqrt(ratios) \n scalesX = scales / np.sqrt(ratios)\n #point of anchor\n shiftX = np.arange(0,size_feat_x) * rpn_stride\n shiftY = np.arange(0,size_feat_y) * rpn_stride\n shiftX,shiftY = np.meshgrid(shiftX,shiftY)\n #get all combine anchors\n centerX,anchorX = np.meshgrid(shiftX,scalesX)\n centerY,anchorY = np.meshgrid(shiftY,scalesY)\n #\n anchor_center = np.stack([centerY,centerX],axis=2).reshape(-1,2)\n anchor_size = np.stack([anchorY,anchorX],axis=2).reshape(-1,2)\n boxes = np.concatenate([anchor_center - 0.5*anchor_size, anchor_center+ 0.5*anchor_size],axis=1)\n return boxes",
"def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):\n boxes_all = []\n for _, configs in anchor_configs.items():\n boxes_level = []\n for config in configs:\n stride, octave_scale, aspect = config\n if image_size[0] % stride != 0 or image_size[1] % stride != 0:\n raise ValueError('input size must be divided by the stride.')\n base_anchor_size = anchor_scale * stride * 2**octave_scale\n anchor_size_x_2 = base_anchor_size * aspect[0] / 2.0\n anchor_size_y_2 = base_anchor_size * aspect[1] / 2.0\n\n x = np.arange(stride / 2, image_size[1], stride)\n y = np.arange(stride / 2, image_size[0], stride)\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,\n yv + anchor_size_y_2, xv + anchor_size_x_2))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n # concat anchors on the same level to the reshape NxAx4\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape([-1, 4]))\n\n anchor_boxes = np.vstack(boxes_all)\n return anchor_boxes",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))\n return anchors",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors",
"def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack( (x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) ) )\n return anchors",
"def ssd_anchors_all_layers(img_shape,\n layers_shape,\n anchor_sizes,\n anchor_ratios,\n anchor_steps,\n offset=0.5,\n dtype=np.float32):\n layers_anchors = []\n for i, feat_shape in enumerate(layers_shape):\n anchor_bboxes = ssd_anchor_one_layer(img_shape,\n feat_shape,\n anchor_sizes[i],\n anchor_ratios[i],\n anchor_steps[i],\n offset=offset,\n dtype=dtype)\n layers_anchors.append(anchor_bboxes)\n\n return layers_anchors",
"def _get_anchor_boxes(self, anchors_dims, center_x=0.5, center_y=0.5):\n\n anchors = []\n for dims in anchors_dims:\n anchors.append([center_x, center_y, dims[0], dims[1]])\n return torch.tensor(anchors).to(self.device)",
"def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n A = num_anchors\n total_anchors = all_anchors.size()[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # pytorch (bs, c, h, w)\n height, width = rpn_cls_score.size()[2:4]\n\n # only keep anchors inside the image\n inds_inside = (\n (all_anchors.data[:, 0] >= -_allowed_border) &\n (all_anchors.data[:, 1] >= -_allowed_border) &\n (all_anchors.data[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors.data[:, 3] < im_info[0] + _allowed_border) # height\n ).nonzero()[:, 0].long()\n\n if DEBUG:\n print('total_anchors', total_anchors)\n print('inds_inside', inds_inside.size()[0])\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n if DEBUG:\n print('anchors.shape', anchors.size())\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = inds_inside.new(inds_inside.size()[0]).fill_(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt) shape is A x G\n overlaps = bbox_overlaps(\n anchors.data,\n gt_boxes[:, :4].data)\n max_overlaps, argmax_overlaps = torch.max(overlaps, dim=1)\n gt_max_overlaps, gt_argmax_overlaps = torch.max(overlaps, dim=0)\n gt_argmax_overlaps = (overlaps == (gt_max_overlaps.unsqueeze(0).expand_as(overlaps))).nonzero()[:, 0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = (labels == 1).nonzero()[:, 0]\n if fg_inds.numel() > num_fg:\n inds = fg_inds.new(\n npr.choice(np.arange(0, fg_inds.numel()), size=int((len(fg_inds) - num_fg)), replace=False)).long()\n disable_inds = fg_inds[inds]\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - (labels == 1).sum()\n bg_inds = (labels == 0).nonzero()[:, 0]\n if bg_inds.numel() > num_bg:\n inds = bg_inds.new(\n npr.choice(np.arange(0, bg_inds.numel()), size=int((len(bg_inds) - num_bg)), replace=False)).long()\n disable_inds = bg_inds[inds]\n labels[disable_inds] = -1\n\n bbox_targets = _compute_targets(anchors.data, gt_boxes[argmax_overlaps][:, :4].data)\n bbox_inside_weights = bbox_targets.new(inds_inside.size()[0], 4).zero_()\n # only the positive ones have regression targets\n inds = (labels == 1).nonzero().view(-1)\n # dim1_inds = inds.unsqueeze(1).expand(inds.size(0), 4)\n # dim2_inds = inds.new((0,1,2,3)).view(-1,4).expand_as(dim1_inds)\n dim_value = bbox_targets.new(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS).view(-1, 4).expand(inds.size(0), 4)\n bbox_inside_weights[inds, :] = dim_value\n\n bbox_outside_weights = bbox_targets.new(inds_inside.size()[0], 4).zero_()\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = (labels >= 0).sum()\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n (labels == 1).sum())\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n (labels == 0).sum())\n\n inds = (labels == 1).nonzero().view(-1)\n # dim1_inds = inds.unsqueeze(1).expand(inds.size(0), 4)\n # dim2_inds = inds.new((0,1,2,3)).view(-1,4).expand_as(dim1_inds)\n dim_value = bbox_targets.new(positive_weights).view(-1, 4).expand(inds.size(0), 4)\n bbox_outside_weights[inds, :] = dim_value\n\n inds = (labels == 0).nonzero().view(-1)\n # dim1_inds = inds.unsqueeze(1).expand(inds.size(0), 4)\n # dim2_inds = inds.new((0,1,2,3)).view(-1,4).expand_as(dim1_inds)\n dim_value = bbox_targets.new(negative_weights).view(-1, 4).expand(inds.size(0), 4)\n bbox_outside_weights[inds, :] = dim_value\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.view((1, height, width, A)).permute(0, 3, 1, 2).contiguous()\n labels = labels.view((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .view((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .view((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .view((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights",
"def calcRandomAnchors():\n anchors = []\n rng = (args.patchRadius, args.worldSize - args.patchRadius)\n for i in range(args.patchesPerSide*args.patchesPerSide):\n anchors.append((random.randrange(rng[0], rng[1]), random.randrange(rng[0], rng[1])))\n\n return anchors",
"def adjust_regular_roi_anchors(bounds: QRectF, anchors: list):\n for point in anchors:\n off = point.boundingRect().width() / 2\n if point.position == AnchorPosition.LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.TOP:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off)\n elif point.position == AnchorPosition.BOTTOM:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_LEFT:\n point.setPos(bounds.left() - off, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_RIGHT:\n point.setPos(bounds.right() - off, bounds.bottom() - off)",
"def create_default_boxes():\n default_boxes_loc = []\n # cal coords of all default boxes of every feat layer\n for i, feat_shape in enumerate(Config.model.feat_shape):\n num_box = len(Config.model.anchor_sizes[i]) + len(Config.model.anchor_ratios[i])\n\n cy, cx = np.mgrid[0:feat_shape[0], 0:feat_shape[1]]\n # set center in each pix as centers, and relative position of image, range(0,1)\n cy = (cy + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[0]\n cx = (cx + 0.5) * Config.model.anchor_steps[i] / Config.model.image_shape[1]\n # cy,cx --shape[H,W,1]\n cy = np.expand_dims(cy, -1).astype('float32')\n cx = np.expand_dims(cx, -1).astype('float32')\n w = np.zeros(num_box, dtype='float32')\n h = np.zeros(num_box, dtype='float32')\n # use anchor_sizes, anchor_ratios and original image size to get relative H,W , shape:[B,]\n h[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[0]\n w[0] = Config.model.anchor_sizes[i][0] / Config.model.image_shape[1]\n h[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[0]\n w[1] = np.sqrt(Config.model.anchor_sizes[i][0] * Config.model.anchor_sizes[i][1]) / Config.model.image_shape[1]\n for j, ratio in enumerate(Config.model.anchor_ratios[i]):\n h[j + 2] = h[0] / np.sqrt(ratio)\n w[j + 2] = w[0] * np.sqrt(ratio)\n default_boxes_loc.append([cy, cx, h, w])\n return default_boxes_loc",
"def calcEvenAnchors():\n anchors = []\n dist = (args.worldSize+1)/(args.patchesPerSide+1)\n for i in range(dist-1, args.worldSize, dist):\n for j in range(dist-1, args.worldSize, dist):\n anchors.append((i,j))\n return anchors",
"def _generate_anchors(self, shape, base_anchor_size, feature_stride, ratios, scales, name='generate_anchors'):\n\n # Define a variable scope\n with tf.variable_scope(name):\n\n # Generate a base anchor\n base_anchor = tf.constant([0, 0, base_anchor_size, base_anchor_size], tf.float32)\n base_anchors = self._enum_ratios(self._enum_scales(base_anchor, scales), ratios)\n _, _, ws, hs = tf.unstack(base_anchors, axis=1)\n\n # Create sequence of numbers\n x_centers = tf.range(shape, dtype=tf.float32) * feature_stride\n y_centers = tf.range(shape, dtype=tf.float32) * feature_stride\n\n # Broadcast parameters to a grid of x and y coordinates\n x_centers, y_centers = tf.meshgrid(x_centers, y_centers)\n ws, x_centers = tf.meshgrid(ws, x_centers)\n hs, y_centers = tf.meshgrid(hs, y_centers)\n\n # Stack anchor centers and box sizes. Reshape to get a list of (x, y) and a list of (h, w)\n anchor_centers = tf.reshape(tf.stack([x_centers, y_centers], 2), [-1, 2])\n box_sizes = tf.reshape(tf.stack([ws, hs], axis=2), [-1, 2])\n\n # Convert to corner coordinates\n anchors = tf.concat([anchor_centers - 0.5 * box_sizes, anchor_centers + 0.5 * box_sizes], axis=1)\n\n return anchors"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns a list of created posts for the given author | def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT):
posts = []
for i in range(num):
posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility))
return posts | [
"def get_posts(from_users):\r\n articles = list()\r\n load_dotenv()\r\n resp = requests.get(URL, auth=(os.getenv('NEWSGROUP_USR'), os.getenv('NEWSGROUP_PASS')))\r\n if resp.ok:\r\n content = BeautifulSoup(resp.content, 'html.parser')(\"tr\")\r\n id_pattern = re.compile(r'id=(\\d+)')\r\n for row in content[2:]:\r\n data = [cell.text.strip() for cell in row(\"td\")]\r\n post_id = id_pattern.search(row.find_all(href=True)[0]['href']).group(1)\r\n if 'Re: ' not in data[1] or ALL_POSTS:\r\n articles.append(Article( article_id=post_id, title=data[1], author=data[3], date_posted=data[0]))\r\n if from_users:\r\n return list(filter(lambda a: a.author in from_users, articles))\r\n else:\r\n return articles",
"def get_queryset(self):\r\n\r\n user = get_object_or_404(User, username=self.kwargs.get('username'))\r\n return Post.objects.filter(author=user).order_by('-date_posted')",
"def author_posts(request, author_id):\n id = int(author_id)\n user = myUser.objects.get(user_id=id)\n if user.is_admin:\n posts = Post.objects.select_related('author').order_by('-modified')\n else:\n posts = Post.objects.select_related('author').filter(author_id=id).order_by('-modified')\n\n return render(request, 'posts/authors.html',\n {'posts': posts})",
"def test_get_post_list_exclude_author_is_none(self):\n user = self.create_user()\n num_author_none_posts = randint(1, 10)\n num_posts = randint(11, 20)\n for i in range(num_author_none_posts):\n self.create_post()\n for i in range(num_posts):\n self.create_post(author=user)\n\n response = self.client.get(self.URL_API_POST_LIST)\n # author가 없는 Post개수는 response에 포함되지 않는지 확인\n self.assertEqual(len(response.data), num_posts)",
"def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')",
"def get_queryset(self):\n author = self.kwargs['author']\n target_author = get_object_or_404(Blog, author=author)\n return Blog.objects.filter(author=target_author)",
"def get_quotes_for_author(self, author: str) -> List[Quote]:\n params = (f'%{author}%',)\n query = '''\n SELECT *\n FROM quotes\n WHERE author LIKE ?\n ORDER BY created_at DESC\n '''\n\n ret = self.__execute_query(query, params)\n\n return self.__build_quotes_from_query_result(ret.fetchall())",
"def get_posts(self):\n return Post.select().where (Post.user == self)",
"def do_get_post_authors(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError, \"'%s' tag takes two arguments\" % bits[0]\n if bits[1] != 'as':\n raise template.TemplateSyntaxError, \"First argument to '%s' tag must be 'as'\" % bits[0]\n return QueryNode(User.objects.all(), bits[2])",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def find_by_author(cls, author_id):\n news = []\n all_news = cls.db.newsdb.find({'author_id': ObjectId(author_id)})\n for n in all_news:\n news.append(\n NewsModel(title=n['title'],\n content=n['content'],\n author_id=str(n['author_id']),\n _id=str(n['_id']))\n )\n\n return news",
"def single_user_posts(username, num):\n\tcurrent_user = User.objects.get(username=username)\n\tfor post in range(1, num):\n\t\tcurrent_user.blogpost_set.create(\n\t\t\ttitle=fake.sentence(\n\t\t\t\tnb_words=5,\n\t\t\t\tvariable_nb_words=True)[:-1],\n\t\t\tcontent=fake.paragraph(\n\t\t\t\tnb_sentences=10,\n\t\t\t\tvariable_nb_sentences=True)\n\t\t\t)\n\t\tcurrent_user.save()\n\treturn BlogPost.objects.all().order_by('-id')[:num-1]",
"def calls_authors(*args):\n calls_posts = CallsPost.objects.published()\n authors = User.objects.filter(callsposts__in=calls_posts)\n return list(authors.annotate(post_count=Count(\"callsposts\")))",
"def _retrieve_posts(self, blog_name) -> list:\n posts = []\n offset = 0\n limit = 50\n while True:\n payload = self.client.posts(blog_name, limit=limit, offset=offset, reblog_info=True)\n posts += payload['posts']\n if not payload['posts']:\n break\n offset += limit\n return posts",
"def _get_friend_posts(num):\n\tfriends = _get_friend_list(num)\n\tposts = Post.objects.filter(owner__in=friends).order_by('-date')\n\treturn posts",
"def get_posts(self):\n return self.blog_posts.all()",
"def post_list(self, **params):\n return self._get('posts.json', params)",
"def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts",
"def find_posts(self):\n\n self.clear()\n\n for path in self.app.jinja_env.list_templates(filter_func=lambda t: t.startswith('posts/') and t.endswith('.html')):\n template = self.app.jinja_env.get_template(path)\n\n filename = path[6:-5]\n slug = filename[7:]\n date_fragment = filename[0:6]\n published_on = datetime.strptime(date_fragment, '%y%m%d').date()\n\n self.append(Post(title=template.module.title, slug=slug, published_on=published_on, path=path))\n\n self.sort(key=lambda post: post.published_on, reverse=True)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test to ensure that all authors added to relationship are in the returned data Called after a retrieve relationship test has passed | def authors_in_relation(context, data, authors):
guids = [a.id for a in authors]
guids = map( lambda x: str(x).replace('-', ''), guids)
for guid in guids:
context.assertTrue(unicode(guid) in data) | [
"def test_item_add_authors(self):\n\n actual_item = Item.objects.get(id=101)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_add=users)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))",
"def test_connector_gets_all_authors(self):\n self.assertTrue(self.connector.authors is not None)\n self.assertTrue('test-slug' in self.connector.authors)\n self.assertEqual(self.connector.authors['test-slug']['id'], 2)",
"def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)",
"def testArticleListWithAuthorSearch(self):\n authorUri = self.er.getAuthorUri(\"associated\")\n q = QueryArticles(authorUri = authorUri)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res = self.er.execQuery(q)\n for art in res.get(\"articles\", {}).get(\"results\", []):\n foundAuthor = False\n for author in art.get(\"authors\"):\n if author[\"uri\"] == authorUri:\n foundAuthor = True\n assert foundAuthor == True\n\n cq = ComplexArticleQuery(BaseQuery(authorUri = authorUri))\n q = QueryArticles.initWithComplexQuery(cq)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res2 = self.er.execQuery(q)\n\n self.ensureSameResults(res, res2, '[articles][].totalResults')",
"def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def test_Artists(self):\n for data in manager.getArtists():\n self.assertIsNotNone(data['name'])\n self.assertIsNotNone(data['albums'])",
"def test_author_list(self):\n\n request = self.factory.get('api-author-list')\n force_authenticate(request, self.alien.host.user_auth)\n\n response = views.AuthorViewset.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n\n uids = [u[\"id\"] for u in response.data[\"authors\"]]\n self.assertIn(self.user.get_url(), uids)\n self.assertNotIn(self.inactive_user.get_url(), uids)\n self.assertNotIn(self.alien.get_url(), uids)",
"def test_item_remove_authors(self):\n\n actual_item = Item.objects.get(id=103)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_del=users)\n expected_item = Item.objects.get(id=103)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))",
"def test_create_authors(self):\n payload = {\n 'first_name': 'testname1',\n 'last_name': 'testname2',\n 'nickname': 'testnick1'\n }\n\n res = self.client.post(reverse('authors'), payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n author = Author.objects.get(id=res.data['id'])\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))",
"def test_item_add_superiors(self):\n\n actual_item = Item.objects.get(id=101)\n superior_first = Item.objects.get(id=102)\n superior_second = Item.objects.get(id=103)\n superiors = [superior_first, superior_second]\n actual_item.update_superiors(superiors_add=superiors)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))",
"def relationships(self):",
"def test_conferencepapers_authors_exist(self) -> None:\n for id_conferencepaper, conferencepaper in self.data['conferencepapers'].items():\n for id_author in conferencepaper['authors']:\n self.assertIn(\n id_author,\n self.data['authors'],\n '{} references author {} not found in authors.yml'.format(id_conferencepaper, id_author)\n )",
"def test_authors():\n assert(hasattr(tekel, '__authors__'))",
"def test_get_relations():\n with get_fixture_uri((\n 'collections/artists.json',\n 'collections/albums.json',\n 'collections/tracks.json'\n )) as (\n artists_uri,\n albums_uri,\n tracks_uri\n ):\n # Artists\n artists = Collection(Artist, artists_uri, plugins=[\n byte.compilers.operation,\n byte.executors.file,\n byte.formats.json\n ])\n\n # Albums\n albums = Collection(Album, albums_uri, plugins=[\n byte.compilers.operation,\n byte.executors.file,\n byte.formats.json\n ])\n\n albums.connect(Album.Properties.artist, artists)\n\n # Tracks\n tracks = Collection(Track, tracks_uri, plugins=[\n byte.compilers.operation,\n byte.executors.file,\n byte.formats.json\n ])\n\n tracks.connect(Track.Properties.album, albums)\n tracks.connect(Track.Properties.artist, artists)\n\n # Fetch track, and ensure relations can be resolved\n assert_that(tracks.get(Track['id'] == 1), has_properties({\n 'id': 1,\n 'title': 'Ascension (feat. Vince Staples)',\n\n 'artist': has_properties({\n 'id': 1,\n 'title': 'Gorillaz'\n }),\n\n 'album': has_properties({\n 'id': 1,\n 'title': 'Humanz',\n\n 'artist': has_properties({\n 'id': 1,\n 'title': 'Gorillaz'\n })\n })\n }))",
"def test_get_relationship_templates(self):\n pass",
"def test_resource_relation_resource_find_relations_get(self):\n pass",
"def test_serialize_related(self):\n\n s = serialize(self.author, include=[('books', dict())])\n self.assertEqual(s['name'], 'User Foo')\n self.assertEqual(len(s['books']), len(self.books))\n for b in s['books']:\n self.assertTrue(b['title'].startswith('Book '))\n self.assertTrue(b['isbn'].startswith('123-1-12-123456-'))",
"def test_build_author_set_inclusion_filter(self):\n a, b, c, d = [protein(namespace=\"test\", name=n()) for _ in range(4)]\n a1, a2, a3, a4 = n(), n(), n(), n()\n\n c1 = {\n NAMESPACE: CITATION_TYPE_PUBMED,\n IDENTIFIER: n(),\n CITATION_AUTHORS: [a1, a2, a3],\n }\n c2 = {\n NAMESPACE: CITATION_TYPE_PUBMED,\n IDENTIFIER: n(),\n CITATION_AUTHORS: [a1, a4],\n }\n\n graph = BELGraph()\n keyword, url = n(), n()\n graph.namespace_url[keyword] = url\n graph.add_increases(a, b, evidence=n(), citation=c1)\n graph.add_increases(a, b, evidence=n(), citation=c2)\n graph.add_increases(b, c, evidence=n(), citation=c1)\n graph.add_increases(c, d, evidence=n(), citation=c2)\n\n subgraph1 = get_subgraph_by_authors(graph, [a1, a2])\n\n self.assertIsInstance(subgraph1, BELGraph)\n self.assert_all_nodes_are_base_entities(subgraph1)\n\n self.assertIn(keyword, subgraph1.namespace_url)\n self.assertEqual(url, subgraph1.namespace_url[keyword])\n\n self.assertIn(a, subgraph1)\n self.assertIn(b, subgraph1)\n self.assertIn(c, subgraph1)\n self.assertIn(d, subgraph1)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create Friends and Friends of Friends and associated posts | def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT):
for friendor in friendors:
friend.add_friend(friendor)
friendor.add_friend(friend)
# FriendRelationship.objects.create(friendor = friendor, friend = friend)
if create_post:
Post.objects.create(content = TEXT, author = friendor, visibility = visibility) | [
"def add_friend(request, pk):\n new_friend = User.objects.get(pk=pk)\n Friend.make_friend(request.user, new_friend)\n return redirect('posts:posts-list')",
"def create_friends(user, existing_friends):\n #ToDo Add error handling\n bulk_insert = []\n existing_friend_ids = []\n for friend in existing_friends:\n bulk_insert.append(pm.FriendData(uid=user, friend_id=friend.user))\n bulk_insert.append(pm.FriendData(uid=friend.user, friend_id=user))\n existing_friend_ids.append(friend.uid)\n\n pm.FriendData.objects.bulk_create(\n bulk_insert\n )\n\n #Create Graph Node object\n graph_db = neo4j.GraphDatabaseService()\n\n user_index = graph_db.get_or_create_index(neo4j.Node, \"User\")\n user_node = user_index.get_or_create(\"uid\", UserSocialAuth.objects.get(user=user).uid, {\"uid\":UserSocialAuth.objects.get(user=user).uid})\n user_node.add_labels(\"User\")\n\n #Create friend relationships to existing friends\n for friend_uid in existing_friend_ids:\n\n friend_created = user_index.create_if_none(\"uid\", friend_uid, {\"uid\":friend_uid})\n if friend_created:\n friend_created.add_labels(\"User\")\n else:\n friend_created = user_index.get(\"uid\", friend_uid)[0]\n\n rel = graph_db.create((user_node, \"FRIENDS\", friend_created))",
"def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend",
"def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})",
"def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )",
"def share_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n # self._posts.append(post)\n self.write_post(post.tags, friend)\n friend.update_relation(self, SHARE_POST)\n friend.append_share(post, user=self)\n # self.update_relation(friend, SHARE_POST)\n break",
"def create_friend():\r\n\r\n try:\r\n json_payload = api_helpers.json_payload(request)\r\n api_helpers.verify_required_data_present(\r\n request_payload=json_payload,\r\n required_elements=FRIEND_RESOURCE_ELEMENTS)\r\n except ValueError as error:\r\n error_response = make_response(jsonify({\"error\": str(error)}), 400)\r\n return error_response\r\n\r\n if datastore.get_friend(g.datastore, json_payload['id']):\r\n error_response = make_response(\r\n jsonify(\r\n {\"error\": \"An friend resource already exists with the \"\r\n \"given id: {}\".format(json_payload['id'])}),\r\n 400)\r\n return error_response\r\n\r\n datastore.add_friend(g.datastore, json_payload)\r\n response = make_response(jsonify({\"message\": \"Friend resource created.\"}),\r\n 201)\r\n return response",
"def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()",
"def test_friends_post_valid(self):\n uid = str(self.user.id)\n request = self.factory.post(\n reverse('api-author-friends', args=(uid,)),\n {\n \"query\": \"friends\",\n \"author\": \"author_id\",\n \"authors\": [\n self.friend.get_url(),\n self.alien.get_url(),\n self.not_friend.get_url()\n ]\n },\n format=\"json\",)\n force_authenticate(request, self.alien.host.user_auth)\n\n response = views.FriendsView.as_view()(request, pk=uid)\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.friend.get_url(), response.data[\"authors\"])\n self.assertEqual(len(response.data[\"authors\"]), 1)",
"def post(self, request, *args, **kwargs):\n frompath = urlparse(request.DATA.get('from_person')).path\n topath = urlparse(request.DATA.get('to_person')).path\n\n #print(request.DATA)\n if type(frompath) is str and type(topath) is str:\n frompath_elements = frompath.split('/')\n topath_elements = topath.split('/')\n else:\n return Response({'error: invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n fromPerson = get_object_or_404(Person, username=frompath_elements[-2])\n toPerson = get_object_or_404(Person, username=topath_elements[-2])\n count = Relationship.objects.filter(from_person=fromPerson, to_person=toPerson).count()\n\n #Reject a request to create Relationship with self\n if request.user.person.username == toPerson.username or count > 0:\n return Response({'error: Relationship with self not permitted'}, status=status.HTTP_400_BAD_REQUEST)\n\n if request.user.person.username == fromPerson.username or request.user.is_staff:\n return self.create(request, *args, **kwargs)\n return Response({'error': 'from_user does not match authenticated User'}, status=status.HTTP_400_BAD_REQUEST)",
"def create_friend_request():\n if request.method == \"GET\":\n friend_requests = [f.to_dict() for f in g.user.get_friend_requests()]\n return jsonify({'success': True, 'friend_requests': friend_requests})\n\n if request.method == \"POST\":\n # Get recieving user id from request\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n\n if 'recieving_user_id' not in json:\n raise CustomError(400, message=\"Must include recieving_user_id\")\n\n recieving_user_id = json['recieving_user_id']\n\n # Get the user object\n recieving_user = User.query.get(recieving_user_id)\n if recieving_user is None:\n raise CustomError(\n 404,\n message='User with id: {} was not found.'.format(\n recieving_user_id)\n )\n\n # Check friendship does not already exist\n friendship_exists = Friendship.query.filter(\n (Friendship.actioning_user_id == g.user.id) |\n (Friendship.recieving_user_id == g.user.id),\n (Friendship.actioning_user_id == recieving_user_id) |\n (Friendship.recieving_user_id == recieving_user_id)\n ).first()\n\n if friendship_exists:\n raise CustomError(\n 409,\n message=\"There is either a pending friend request between the\"\n \"two users or the two users are already friends.\"\n )\n\n # Insert friend request\n friend_request = Friendship(g.user, recieving_user)\n db.session.add(friend_request)\n db.session.commit()\n\n return jsonify({'success': True}), 201",
"def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))",
"def testFriendQueryPostSuccessOneFriend(self):\n # The uuid of the authors\n id1 = str(self.author1.uuid)\n id2 = str(self.author2.uuid)\n\n # Set authors to be friends\n self.author1.friends.add(self.author2)\n self.author2.friends.add(self.author1)\n\n # Some uuids of unknown authors\n fakeUUID = str(uuid_import.uuid4())\n fakeUUID2 = str(uuid_import.uuid4())\n\n # JSON request with some non-friend uuids\n JSONdata = json.dumps({\"query\": \"friends\", \"author\": id1, \"authors\": [fakeUUID, id2, fakeUUID2]})\n\n # POST request for friend querying\n response = self.client.post('/api/friends/%s' % id1, data=JSONdata, content_type='application/json; charset=utf')\n\n self.assertEquals(response.status_code, 200, \"Response not 200\")\n\n decoded = json.loads(response.content.decode('utf-8'))\n\n # Ensure the response only includes the real friend\n self.assertEquals(decoded['query'], \"friends\", \"JSON response needs \\\"query\\\":\\\"friends\\\"\")\n self.assertEquals(decoded['author'], id1, \"Author has incorrect ID\")\n self.assertEquals(len(decoded['friends']), 1, \"Author should have exactly one friend\")\n self.assertEquals(decoded['friends'][0], id2, \"Authors are not friends but they should be\")",
"def react_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n reaction = Reaction(attitude, self.unique_id)\n post.add_reaction(reaction)\n friend.update_relation(self, REACT)\n friend.append_reaction(post, reaction)\n # self.update_relation(friend, REACT)\n break",
"def post(self):\n args = self.reqparse.parse_args()\n uid = uuid.uuid4()\n if args['name'] is not None and len(args['name']) > 0 and \\\n args['lastname'] is not None and len(args['lastname']) > 0 and \\\n args['sex'] is not None and args['sex'].upper() in ('M', 'F') and \\\n args['email'] is not None and len(args['email']) > 0 and \\\n args['bday'] > 0 and args['bmonth'] > 0 and args['byear'] > 1900:\n if not args['mailauto']:\n # Generate a simple Facebook account\n manager.store_uuid(uid)\n gen_thread = threading.Tread(target=generator.generate_facebook,\n args=(uid, args['name'],\n args['lastname'],\n args['email'], args['sex'],\n args['bday'], args['bmonth'],\n args['byear'],\n args['friends']))\n gen_thread.start()\n return jsonify(uuid=uid, code=200)\n else:\n if args['mailtype'].upper() in api_common.EMAIL_SERVICES:\n manager.store_uuid(uid)\n gen_thread = threading.Tread(target=generator.generate_facebook,\n args=(uid, args['name'],\n args['lastname'],\n args['email'],\n args['mailtype'],\n args['sex'], args['bday'],\n args['bmonth'],\n args['byear'],\n args['friends']))\n gen_thread.start()\n return jsonify(uuid=uid, code=200)\n else:\n return jsonify(error=\"Incorrect params\", code=400)",
"def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)",
"def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')",
"def add_friend(self, friend_id):\n if Relationship.objects.filter(from_user_id=self.pk, to_user_id=friend_id).exists():\n return False\n else:\n friend = RegisteredUser.objects.filter(id=friend_id)\n if friend.count() == 1:\n friend = friend[0]\n Relationship.objects.create(from_user=self, to_user=friend, balance=0.0)\n return True\n else:\n return False",
"def add_direct(request):\n friend = request.POST['friend'].strip()\n\n if userauth_models.User.objects.filter(username=friend).exists():\n friendUser = userauth_models.User.objects.get(username=friend)\n elif userauth_models.User.objects.filter(phone_number=friend):\n friendUser = userauth_models.User.objects.get(phone_number=friend)\n elif userauth_models.User.objects.filter(email=friend):\n friendUser = userauth_models.User.objects.get(email=friend)\n else:\n return HttpResponse(status=403) #no friend :(\n\n threadName = request.user.username + friendUser.username\n\n if models.MessageThread.objects.filter(title=threadName).exists():\n thread = models.MessageThread.objects.get(title=threadName)\n elif models.MessageThread.objects.filter(title=(friendUser.username + \\\n request.user.username)).exists():\n thread = models.MessageThread.objects.get(title=(friendUser.username \\\n + request.user.username))\n else:\n thread = models.MessageThread(title=threadName, psk=threadName, \\\n admin=request.user.username, friend1 = friendUser.username, is_direct=True)\n #thread = models.MessageThread(title=threadName, psk=threadName)\n thread.save()\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n #thread.clients.add(friendUser)\n channel_layer = get_channel_layer()\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n \n #if not friendUser in thread.clients.all():\n # thread.clients.add(friendUser)\n # channel_layer = get_channel_layer()\n\n # if 'channel_name' in request.session:\n # async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n thread_data = serializers.MessageThreadSerializer(thread).data\n\n return HttpResponse(status=200)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes post author, comment author and creates a post and associated comment | def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext):
post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility)
comment = Comment.objects.create(comment = ctext, post = post, author = cauthor)
return (post, comment) | [
"def create_comment(post, author, content):\n return Comment.objects.create(post=post, author=author, content=content)",
"def create(cls, author, raw_comment, parent):\n\n html_comment = mistune.markdown(raw_comment)\n # todo: any exceptions possible?\n comment = cls(author=author,\n author_name=author.user.username,\n raw_comment=raw_comment,\n html_comment=html_comment)\n\n if isinstance(parent, Submission):\n submission = parent\n comment.submission = submission\n elif isinstance(parent, Comment):\n submission = parent.submission\n comment.submission = submission\n comment.parent = parent\n else:\n return\n submission.comment_count += 1\n submission.save()\n\n return comment",
"def add_post(author_id: int, post: dict) -> int:\n result = db.run(\"CREATE (n:Post {{content: '{}', creation_datetime: '{}', update_datetime: '{}', photo_address: '{}'}}) RETURN id(n)\"\n .format(post['content'], post['creation_datetime'], post['update_datetime'], post['photo_address']))\n post_id = result.data()[0]['id(n)']\n result = db.run(\"MATCH (u:User), (p:Post) WHERE id(u) = {} AND id(p) = {} CREATE (u)-[r:AUTHOR_OF]->(p)\"\n .format(author_id, post_id))\n return post_id",
"def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()",
"def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex",
"def save(self, commit=True, author: VisitorModel = None, post_id: int = None):\n comment = super().save(commit=False)\n comment.author = author\n comment.post_id = post_id\n if commit:\n comment.save()\n return comment",
"def user_post_comment(\n self,\n parent_post = None,\n body_text = None,\n timestamp = None,\n by_email = False\n ):\n\n if body_text is None:\n raise ValueError('body_text is required to post comment')\n if parent_post is None:\n raise ValueError('parent_post is required to post comment')\n if timestamp is None:\n timestamp = datetime.datetime.now()\n\n self.assert_can_post_comment(parent_post = parent_post)\n\n comment = parent_post.add_comment(\n user = self,\n comment = body_text,\n added_at = timestamp,\n by_email = by_email\n )\n comment.add_to_groups([self.get_personal_group()])\n\n parent_post.thread.invalidate_cached_data()\n award_badges_signal.send(\n None,\n event = 'post_comment',\n actor = self,\n context_object = comment,\n timestamp = timestamp\n )\n return comment",
"async def create_reply(*, comment: models.Comment = Depends(resolve_comment), created_comment: CreateComment,\n current_user: models.User = Depends(resolve_current_user), db: Session = Depends(get_db)):\n return crud.create_comment(db, author_id=current_user.id, parent_resub_id=comment.parent_resub_id,\n parent_post_id=comment.parent_post_id, parent_comment_id=comment.id,\n content=created_comment.content)",
"def create_new_post(cls, title: str, body: str, author: User) -> Post:\n\n return cls(\n title=title,\n body=body,\n author=author,\n comments=list()\n )",
"def create_post():\r\n try:\r\n comment = form[\"comment\"].value\r\n except:\r\n comment = \"I didn't enter a comment :(\"\r\n\r\n try:\r\n name = form[\"name\"].value\r\n except:\r\n print(\"Content-type: text/html\")\r\n print()\r\n print(\"You need to at least submit a name. \\\r\n Please go back and try again!\")\r\n raise SystemExit\r\n\r\n try:\r\n email = form[\"email\"].value\r\n except:\r\n email = None\r\n\r\n try:\r\n website = form[\"website\"].value\r\n except:\r\n website = None\r\n\r\n post = Post.create(\r\n comment=comment,\r\n name=form[\"name\"].value,\r\n email=email,\r\n website=website,\r\n date=datetime.now().strftime(\"%H:%M - %d/%m/%y\")\r\n )",
"def create_comment_immediately_below_post():\n post = create_a_post()\n comment = Comment.create(post=post, body=\"I'm a comment right below a post\")\n comment.save()\n return comment",
"def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)",
"def post_comment(self):\n article = self.client.post(\n self.post_article_url,\n data=self.article,\n format='json'\n )\n data = json.loads(article.content)\n article_slug = data['article']['slug']\n\n comment = self.client.post(\n self.post_get_url.format(\n article_slug=article_slug),\n data=self.create_comment,\n format='json'\n )\n comment_data = json.loads(comment.content)\n comment_id = comment_data['id']\n return comment_id",
"def post(self):\n # Set comments name key to the stage number of the current page\n current_path = self.request.path\n stage_number_index = 7\n current_stage_number = current_path[stage_number_index]\n comments_name = \"stage-\" + current_stage_number + \"-comments\"\n\n comment = Comment(parent=comments_key(comments_name))\n\n # When the person is making the post, check to see whether the person\n # is logged into Google\n if users.get_current_user():\n comment.author = Author(identity=users.get_current_user().user_id(),\n name=users.get_current_user().nickname(),\n email=users.get_current_user().email())\n else:\n comment.author = Author(name=\"anonymous@anonymous.com\",\n email=\"anonymous@anonymous.com\")\n\n # Get the content from our request parameters, in this case,\n # the message is in the parameter \"content\"\n comment.content = self.request.get(\"content\")\n\n if not comment.content:\n # Redirect to the comments section of the current notes page with\n # the error query parameter\n error = \"&error=error\"\n self.redirect(current_path + \"?\" + comments_name + error +\n \"#comment-section\")\n else:\n # Write to the Google Database\n comment.put()\n # Redirect to the comments section after posting a comment\n self.redirect(\"?\" + comments_name + \"#comment-section\")",
"def test_add_comment(self):\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)",
"def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return",
"def test_comment_creation(self):\n response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n }, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data, {\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n })",
"def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict",
"def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes response.data and confirms no repeated guids (No repeated posts) | def assertNoRepeatGuids(context, posts):
guids = [p['guid'] for p in posts]
context.assertTrue(len(set(guids)) == len(posts), "Some guids repeated") | [
"def check_duplicates():\n print \"Building data set...\\n\"\n\n rows = db.links.find()\n seen = set()\n count = 0\n for row in rows:\n value = hashlib.md5(row['body'].encode('utf8')).hexdigest()\n if value in seen:\n count += 1\n print row['category'], row['_id']\n # db.links.remove({'_id': row['_id']})\n else:\n seen.add(value)\n print count, 'duplicate(s)'\n print \"-------------------\\n\"",
"def getLastPostID(response):",
"def checkUniquenessIds(self) :\n l = set([len(x[\"id\"]) for x in self.GO])\n if (not l == set([1])) :\n raise GOparserError(\"Not all entries have exactly one id\")\n ids = set([x[\"id\"][0] for x in self.GO])\n if (not len(ids) == len(self.GO)) :\n raise GOparserError(\"Not all entries have a unique ids\")",
"def is_unique(self, id):\n return id not in self.post_ids",
"def _is_duplicate(self, guid: str) -> bool:\n\n if not guid or not isinstance(guid, str):\n raise ValueError(\"guid value is not correct\")\n for pub in self._all_publications:\n if pub.guid == guid:\n return True\n return False",
"def test_response_reusage_after_replied(self):\n\n post1 = self._create_tweet(\n content=\"I need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n\n resp1 = Response.objects.upsert_from_post(post1)\n\n support = UserProfile.objects.upsert('Twitter', dict(screen_name='@test2'))\n self._create_tweet(\n user_profile=support,\n content=\"We cant help you right now. Sorry.\",\n channel=self.outbound,\n demand_matchables=True,\n in_reply_to=post1)\n\n post2 = self._create_tweet(\n content=\"I still need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertNotEqual(resp1.id, resp2.id)",
"def test_uniqueness_on_post_requests(self):\n amount_of_events = self.TEST_MODEL.objects.filter(user=self.user_1).count()\n\n self._make_post_request(self.client_1, self.DEFAULT_POST_PARAMS)\n amount_of_events_post_request = self.TEST_MODEL.objects.filter(user=self.user_1).count()\n\n self._make_post_request(self.client_1, self.DEFAULT_POST_PARAMS)\n amount_of_events_post_request_repeated = self.TEST_MODEL.objects.filter(user=self.user_1).count()\n\n self.assertTrue(amount_of_events_post_request > amount_of_events)\n self.assertEqual(amount_of_events_post_request_repeated, amount_of_events_post_request)",
"def test_no_posts_ids(self):\n response = self.client.post(\n self.api_link,\n json.dumps({\n 'posts': [],\n }),\n content_type=\"application/json\",\n )\n self.assertContains(\n response, \"You have to specify at least one post to split.\", status_code=400\n )",
"def test_invalid_posts_ids(self):\n response = self.client.post(\n self.api_link,\n json.dumps({\n 'posts': [1, 2, 'string'],\n }),\n content_type=\"application/json\",\n )\n self.assertContains(\n response, \"One or more post ids received were invalid.\", status_code=400\n )",
"def test_import_duplicates(self):\n post_data = {\n 'sounds': [\n {'uuid': '73671ba8-71a4-463a-a836-eb79ecf50386', 'duration': 345, 'created_on': '2016-08-17 20:49:53.345678+08:00'},\n {'uuid': '73671ba8-71a4-463a-a836-eb79ecf50386', 'duration': 1, 'created_on': '2016-08-17 20:49:53.345678+08:00'},\n {'uuid': '73671ba8-71a4-463a-a836-eb79ecf50386', 'duration': 0, 'created_on': '2016-08-17 20:49:53.345678+08:00'},\n {'uuid': '41f94400-2a3e-408a-9b80-1774724f62af', 'duration': 123, 'created_on': '2016-08-17 20:49:53.123456+08:00'},\n ]\n }\n num_duplicates = 2\n num_expected = len(post_data['sounds']) - num_duplicates\n\n compressed_json = gzip.compress(json.dumps(post_data).encode('utf-8'))\n response = self.client.post(reverse('sounds:import_json'), compressed_json, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(Sound.objects.count(), num_expected)\n\n json_response = response.json()\n self.assertDictEqual(json_response, {'num_imported': num_expected})\n\n for sound in post_data['sounds']:\n found_sound = Sound.objects.filter(uuid=sound['uuid']).count()\n self.assertEqual(found_sound, 1)",
"def check_for_duplicate_phone_numbers(d):\n\n print('# This function is under maintenance. Please try again later.')\n return d",
"def test_to_create__duplicate_entry(self):\n tester = self.app.test_client(self)\n res = tester.post('/API/v1/entries', data=TestBase.duplicate_entry,\n headers=self.access_header,\n content_type='application/json')\n self.assertEqual(res.status_code, 409)\n self.assertIn(b\"Duplicate Entry data\",\n res.data)",
"def test_response_reusage(self):\n\n post1 = self._create_db_post(content=\"@test I need a foo.\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n self.assertTrue(self.sc.inbound_channel.is_assigned(post1))\n\n conv1 = self.sc.upsert_conversation(post1)\n post2 = self._create_db_post(content=\"I still need a foo!\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n conv2 = self.sc.upsert_conversation(post2)\n\n resp1 = Response.objects.upsert_from_post(post1)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertEqual(conv1.id, conv2.id)\n self.assertEqual(resp1.id, resp2.id)\n self.assertTrue(resp2.post_date > resp1.post_date)",
"def check_soundcloud_ids_mismatch():\n wiki = pywikibot.Site('en', 'wikipedia')\n category = pywikibot.Category(wiki, CATEGORY)\n pages = pagegenerators.CategorizedPageGenerator(category)\n\n total_pages = 0\n processed = 0\n result = []\n\n for page in pages:\n total_pages += 1\n res = compare_soundcloud_ids(page, wiki)\n\n if res == True:\n # The IDs are the same, nothing to do. The category may contains cached entries\n print('The ID for \"%s\" are the same in both the article and Wikidata.' % page.title())\n processed += 1\n continue\n elif not res:\n print('Skipping %s. It has no SoundCloud ID' % page.title())\n processed += 1\n continue\n\n result.append([res, page.title()])\n\n for ids, title in result:\n # Now we have two IDs (one from article, another from repo).\n # Let us check their associated movie titles in the website\n repoId = ids['repoId']\n wikiId = ids['articleId']\n c_url, response_code1 = check_soundcloud_id(repoId)\n c_url2, response_code2 = check_soundcloud_id(wikiId)\n\n if c_url == c_url2:\n # Both valid\n print('''Both SoundClouds IDs are valid for the title. %s''' % title)\n processed += 1\n elif response_code1 == 404 and response_code1 != response_code2:\n # Handle case\n processed += 1\n elif response_code2 == 404 and response_code2 != response_code1:\n # Handle case\n processed += 1\n else:\n # Handle final case\n pass\n\n print('Finished! Total pages: %s. Processed: %s' %(total_pages, processed))",
"def test_duplicate_meetups(self):\n\n res = self.post_duplicate_meetup()\n self.assertEqual(res.status_code, 409)",
"def is_duplicate(phonebook, person_id, number, email):\r\n value = \"\"\r\n for item in phonebook:\r\n if item[\"person_id\"] == person_id or item[\"number\"] == number or item[\"email\"] == email:\r\n if item[\"person_id\"] == person_id:\r\n value += \"\\n- ID\"\r\n if item[\"number\"] == number:\r\n value += \"\\n- Phone number\"\r\n if item[\"email\"] == email:\r\n value += \"\\n- Email\"\r\n print(\"\\n== Please enter again. ==\\nThe value must be unique in: {}\".format(value))\r\n return True",
"def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))",
"def test_get_unique(self):\n program_label = 'demo-program'\n url = 'https://sshs.qualtrics.com/SE?Q_DL=foobar_foobar_MLRP_foobar&Q_CHL=gl'\n sl = SurveyLink(id=url, program_label=program_label, survey_ordinal=1)\n sl.put()\n # Survey links have time to become consistent as they are uploaded well\n # ahead of participation.\n sl.key.get()\n\n response = self.testapp.post_json(\n '/api/survey_links/demo-program/1/get_unique',\n {'program_label': program_label, 'survey_ordinal': 1},\n )\n response_dict = json.loads(response.body)\n expected = {\n 'program_label': program_label,\n 'survey_ordinal': 1,\n 'url': url,\n }\n self.assertEqual(expected, response_dict)",
"def _process_response(self, resp):\n signals = []\n resp = resp.json()\n fresh_posts = posts = resp['data']\n paging = resp.get(self._paging_field) is not None\n self.logger.debug(\"Facebook response contains %d posts\" % len(posts))\n\n # we shouldn't see empty responses, but we'll protect our necks.\n if len(posts) > 0:\n self.update_freshness(posts)\n fresh_posts = self.find_fresh_posts(posts)\n paging = len(fresh_posts) == self.limit()\n\n # store the timestamp of the oldest fresh post for use in url\n # preparation later.\n if len(fresh_posts) > 0:\n self.prev_stalest = self.created_epoch(fresh_posts[-1])\n\n signals = [FacebookSignal(p) for p in fresh_posts]\n self.logger.debug(\"Found %d fresh posts\" % len(signals))\n\n return signals, paging"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Compares a list of authors against a list of displaynames | def cross_check(context, authors, poscom):
displaynames = [x['author']['displayname'] for x in poscom]
for author in authors:
if author.user.username not in displaynames:
context.assertFalse(True, "%s not in list" %author.user.username) | [
"def compare_authors(query_author, rg_author):\n \n # Checks if rg_author has any special non-ASCII characters. Translates query_author based on that and sets the author's first and last name strings.\n # Still doesn't address if one half of name uses UTF-8 only characters and the other half doesn't) but unlikely case\n if unidecode(rg_author) == rg_author:\n author_first = unidecode(query_author[0]).split()\n author_last = unidecode(query_author[2]).split()\n else:\n author_first = query_author[0].split()\n author_last = query_author[2].split()\n \n # Splits rg_author into tokens\n rg_tokens = rg_author.split()\n \n # Removes Jr from last name. Need to put the last check in case someone is just named \"Jr\"\n if len(rg_tokens) > 1 and rg_tokens[-1] == \"Jr\" or rg_tokens[-1] == \"Jr.\":\n rg_tokens.pop(-1)\n \n # Deals with no first_name in query_author\n if author_first == \"\" and compare_names(author_last, rg_tokens[-1]):\n return True\n \n # Incase rg_author uses first name initial, compares the first letter of queried author's first name to that string\n if initial_check(rg_tokens[0]):\n author_first[0] = str(author_first[0][0]) + \".\"\n \n # Merges first name for queried author\n merged_author_first = \"\"\n for name in author_first:\n \n # Incase rg_author uses first name initial, compares the first letter of queried author's first name to that string\n if initial_check(rg_tokens[0]):\n name = name[0] + \".\"\n \n merged_author_first = merged_author_first + name\n \n # Assigns last name to last part of last name\n author_last = author_last[-1]\n \n # Merges all but last name for researchgate name\n merged_rg_first = \"\"\n for name in rg_tokens[:-1]:\n \n # Adds periods to initials in the name\n if len(name) == 1:\n name = name + \".\"\n \n # Converts name to initial of queried author is in initial\n if initial_check(author_first[0]):\n name = name[0] + \".\"\n \n merged_rg_first = merged_rg_first + name\n \n return ( compare_names(merged_author_first, merged_rg_first) and author_last.casefold() == rg_tokens[-1].casefold() )",
"def match_author_publication(firstname, lastnames, author, bib_key):\n author = [xname.replace(\".\", \" \").strip() for xname in author]\n first, von, last, jr = author\n first = first.lower()\n last = \"-\".join(last.lower().replace(\"-\", \" \").replace(\" \", \" \").split(\" \"))\n jr = jr.lower()\n\n # Additional variable that may help to avoid incorrect name matching #77\n von_last = \"-\".join([von, last])\n von_last = von_last.replace(\" \", \"-\").lower()\n\n if last.lower() in lastnames:\n # First match based on last name\n if (\n len(first) > 1\n and first.lower() == firstname.lower()\n or len(jr) > 1\n and jr.lower() in lastnames\n ):\n # Easy match, the first name is complete and matches up\n return True\n elif len(first) > 1 and \" \" in first:\n # Incomplete match, some bib entries have authors as 'R Manniesing' instead of the full name\n # or 'J A W M van der Laak' where firstname contains 'J A W M'\n # or 'Jeroen AWM van der Laak' where firstname contains 'Jeroen A W M'\n # This piece of code makes sure there is only one name and no spaces in between\n # von = ' '.join(first.split(' ')[1:]) + ' ' + von\n first = first.split(\" \")[0].lower()\n if first == firstname.lower():\n return True\n # if 'first' contains a single letter, it will continue\n\n if len(first) == 1 and first[0].lower() == firstname[0].lower():\n # If only one letter is provided as first name (incomplete in the bib entry).\n # An additional variable stores the initial lastnames\n initials_lastnames = [x[0].lower() for x in lastnames]\n if len(von) == 0 and len(jr) == 0:\n # If there is no 'von' neither 'jr', then it is a match\n return True\n elif len(jr) >= 1 and jr[0].lower() in initials_lastnames:\n # If 'jr' contains something, it will have to be listed on 'initials_lastnames'\n # to become a match\n return True\n elif len(von.strip()) >= 1 and von.strip()[0].lower() in initials_lastnames:\n # If 'von' contains something, it will have to be listed on 'initials_lastnames'\n # to become a match\n return True\n elif (\n \"-\" != von_last[0] and len(lastnames) >= 2 and lastnames[-1] in von_last\n ):\n # If none of the previous methods worked, an additional checkup is done.\n # This is done only when having at least two last names.\n # the last lastname should be in 'von_last'.\n # For instance 'J A W M van der Laak' will become 'A-W-M-van-der-Laak', this matches up with 'van-der-laak'\n return True\n return False\n else:\n return False",
"def list_authors(train_data, test_data):\n train_authors = [x.author for x in train_data]\n test_authors = [x.author for x in test_data]\n both = train_authors + test_authors\n\n author_counter = Counter(both)\n log('Most common authors in both: {}'.format(author_counter.most_common(3)))\n\n test_unique = list(set(test_authors) - set(train_authors))\n test_authors_percentage = get_percentage(len(test_unique), len(set(test_authors)))\n log('Unique authors in test_data: {} ({}%)'.format(len(test_unique), test_authors_percentage))\n\n ratings = np.array([x.rating for x in train_data])\n train_authors = np.array(train_authors)\n zero_authors = train_authors[ratings == 0]\n log(\"Authors with zero ratings: (length:{}) {}\".format(len(zero_authors), set(zero_authors)))\n\n test_authors = np.array(test_authors)\n for author_tuple in author_counter.most_common(2):\n author = author_tuple[0]\n author_percentage = get_percentage(author_tuple[1], len(test_data) + len(train_data))\n log(\"Amount of test reviews by {}: {} ({}%)\".format(author, len(test_authors[test_authors == author]), author_percentage))",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def filter_authors(probable_authors):\n #second_names = lambda x: x.split()[-1]\n names, ids = zip(*probable_authors)\n cleaned = []\n counter = Counter(list(map(second_names, names)))\n for (name, aid) in probable_authors:\n if counter[second_names(name)] == 1:\n cleaned.append((name, aid))\n return cleaned",
"def get_author_list(text):\n newline_fixed_text = text\n for newline_char in LINE_TERMINATOR_CHARS:\n newline_fixed_text = newline_fixed_text.replace(newline_char, ' , ')\n potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')\n filtered_authors = list()\n my_name_pattern = re.compile(\"(-?\\\\w\\\\.\\\\ ?)+([\\\\w]{2,}\\\\ ?)+\")\n # the allowance of an optional hyphen preceding an initial is to satisfy a\n # common pattern observed with the papers coming out of asia.\n for author in potential_authors:\n if my_name_pattern.match(author): # match has an implied ^ at the start\n # which is ok for our purposes.\n filtered_authors.append(author)\n return filtered_authors",
"def get_author_list(text):\n newline_fixed_text = text\n for newline_char in LINE_TERMINATOR_CHARS:\n newline_fixed_text = newline_fixed_text.replace(newline_char, ', ')\n potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')\n filtered_authors = list()\n my_name_pattern = re.compile(\"(-?\\\\w\\\\.\\\\ ?)+([\\\\w]{2,}\\\\ ?)+\")\n # the allowance of an optional hyphen preceding an initial is to satisfy a\n # common pattern observed with the papers coming out of asia.\n for author in potential_authors:\n if my_name_pattern.match(author): # match has an implied ^ at the start\n # which is ok for our purposes.\n filtered_authors.append(author)\n return filtered_authors",
"def __get_author_list_shorten(self, author_list, num_authors, format, m, n):\n format_etal = u'{}, et al.'\n format_n_authors = u'{} and {}'\n format_with_n_colleagues = u'{}, and {} colleagues'\n format_escape_emph = u'{} \\emph{{et al.}}'\n format_plus = u'{}+'\n\n if (num_authors <= n) or (num_authors <= m):\n return author_list\n if (format == 'A'):\n # in db we have LastName, FirstName (or FirstInitial.) MiddleInitial.\n # hence the first part is the LastName\n return format_etal.format(self.__get_n_authors(author_list, n, u',', 2, u', and'))\n if (format == 'G'):\n # return LastName et. al. - list is separated by a space\n return format_etal.format(self.__get_n_authors(author_list, n, u' ', 2, u''))\n if (format == 'M'):\n # return n authors (LastName) et. al. - list is separated by a comma\n return format_etal.format(self.__get_n_authors(author_list, n, u',', 1, u', and'))\n if (format == 'm'):\n # return n authors (LastName) et. al. - list is separated by a space\n return format_etal.format(self.__get_n_authors(author_list, n, u',', 1, u', \\&'))\n if (format == 'H'):\n # return the asked number of authors - list is separated by space,\n # there is an and before the last author\n authors = author_list.split(' ')\n if (m == 1):\n return authors[0]\n else:\n authors.remove('and')\n return format_n_authors.format(' '.join(authors[:m-1]), authors[m])\n if (format == 'I'):\n # return LastName and count - list is separated by a space\n authors = author_list.split(',')\n return format_with_n_colleagues.format(authors[0], num_authors-1)\n if (format == 'L') or (format == 'N') or (format == 'g'):\n # return LastName and count list is separated by a comma\n authors = author_list.split(',')\n return format_with_n_colleagues.format(authors[0], num_authors-1)\n if (format == 'l'):\n # return n author(s) et. al. - list is separated by a comma\n return format_etal.format(self.__get_n_authors(author_list, n, u',', 2, u' \\&'))\n if (format == 'h'):\n authors = author_list.split(' ')\n return format_escape_emph.format(authors[0])\n if (format == 'n'):\n authors = author_list.split(' ')\n return format_plus.format(authors[0])\n if (format == 'a'):\n # return first authors Lastname only\n # this is already done at the CSL level so just return what was passed in\n return author_list\n return author_list",
"def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def sanitize_author(authors: list, author: str) -> str:\n # Find the best match in authors. Indeed if we search \"John Doe\", Google Scholar\n # returns \"J Doe\" and so the self.reshape_entries will drop the records.\n # In practice, the cutoff must be quite low, but not too low. Otherwise,\n # co-authors may be wrongly renamed! Empirically, 0.4 is a good tradeoff).\n l = difflib.get_close_matches(author, authors, n=1, cutoff=0.4)\n if len(l) != 1:\n # We are trying to rename a co-author without knowing is real name.\n return author\n else:\n # We are renaming an author with his/her real name.\n return l[0]",
"def testArticleListWithAuthorSearch(self):\n authorUri = self.er.getAuthorUri(\"associated\")\n q = QueryArticles(authorUri = authorUri)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res = self.er.execQuery(q)\n for art in res.get(\"articles\", {}).get(\"results\", []):\n foundAuthor = False\n for author in art.get(\"authors\"):\n if author[\"uri\"] == authorUri:\n foundAuthor = True\n assert foundAuthor == True\n\n cq = ComplexArticleQuery(BaseQuery(authorUri = authorUri))\n q = QueryArticles.initWithComplexQuery(cq)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res2 = self.er.execQuery(q)\n\n self.ensureSameResults(res, res2, '[articles][].totalResults')",
"def find_authors(filename_list):\n authors_set = set()\n for filename in filename_list:\n with open(filename, 'r') as file:\n for line in file:\n line.strip().lower()\n if line.startswith('author'):\n author = line[6:].strip()\n authors_set.add[author]\n elif line == '':\n continue\n else:\n break\n return authors_set",
"def render_authors(self, e, width, **kwargs):\n buf = []\n authors = list(e.iterdescendants('author'))\n for i, author in enumerate(authors):\n if i == len(authors) - 1 and len(authors) > 1:\n buf.append('and ')\n organization = author.find('organization')\n initials, surname = short_author_name_parts(author)\n if surname:\n initials = initials or ''\n if i == len(authors) - 1 and len(authors) > 1:\n # Last author is rendered in reverse\n if len(initials) > 0:\n buf.append(initials + ' ' + \\\n surname)\n else:\n buf.append(surname)\n elif len(initials) > 0:\n buf.append(surname + ', ' + initials)\n else:\n buf.append(surname)\n if author.attrib.get('role', '') == 'editor':\n buf.append(', Ed.')\n elif organization is not None and organization.text:\n # Use organization instead of name\n buf.append(organization.text.strip())\n else:\n continue\n if len(authors) == 2 and i == 0:\n buf.append(' ')\n elif i < len(authors) - 1:\n buf.append(', ')\n return ''.join(buf)",
"def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)",
"def _needs_beard_reprocessing(authors_before, authors_after):\n if len(authors_before) == len(authors_after):\n for index, author_before in enumerate(authors_before):\n # Not every author has an affiliation.\n before_affiliations = author_before.get(\n 'affiliations', [])\n\n # We don't iterate over authors_after, we take the index.\n after_affiliations = authors_after[index].get(\n 'affiliations', [])\n\n before = (author_before['full_name'], before_affiliations)\n after = (authors_after[index]['full_name'], after_affiliations)\n\n if before != after:\n return True\n\n return False\n else:\n return True",
"def authors(self):\n docs = [u'article', u'book', u'link', u'thesis']\n authors = []\n if self.publication_type in docs and 'v10' in self.data:\n for author in self.data['v10']:\n authordict = {}\n if 's' in author:\n authordict['surname'] = author['s']\n if 'n' in author:\n authordict['given_names'] = author['n']\n if 's' in author or 'n' in author:\n authors.append(authordict)\n\n if len(authors) > 0:\n return authors",
"def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cross checks a list of authors against post | def assertAuthorsInPosts(context, authors, posts):
cross_check(context, authors, posts) | [
"def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)",
"def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def _needs_beard_reprocessing(authors_before, authors_after):\n if len(authors_before) == len(authors_after):\n for index, author_before in enumerate(authors_before):\n # Not every author has an affiliation.\n before_affiliations = author_before.get(\n 'affiliations', [])\n\n # We don't iterate over authors_after, we take the index.\n after_affiliations = authors_after[index].get(\n 'affiliations', [])\n\n before = (author_before['full_name'], before_affiliations)\n after = (authors_after[index]['full_name'], after_affiliations)\n\n if before != after:\n return True\n\n return False\n else:\n return True",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def assertAuthorsInComments(context, authors, comments):\n cross_check(context, authors, comments)",
"def testArticleListWithAuthorSearch(self):\n authorUri = self.er.getAuthorUri(\"associated\")\n q = QueryArticles(authorUri = authorUri)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res = self.er.execQuery(q)\n for art in res.get(\"articles\", {}).get(\"results\", []):\n foundAuthor = False\n for author in art.get(\"authors\"):\n if author[\"uri\"] == authorUri:\n foundAuthor = True\n assert foundAuthor == True\n\n cq = ComplexArticleQuery(BaseQuery(authorUri = authorUri))\n q = QueryArticles.initWithComplexQuery(cq)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res2 = self.er.execQuery(q)\n\n self.ensureSameResults(res, res2, '[articles][].totalResults')",
"def test_author_list(self):\n\n request = self.factory.get('api-author-list')\n force_authenticate(request, self.alien.host.user_auth)\n\n response = views.AuthorViewset.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n\n uids = [u[\"id\"] for u in response.data[\"authors\"]]\n self.assertIn(self.user.get_url(), uids)\n self.assertNotIn(self.inactive_user.get_url(), uids)\n self.assertNotIn(self.alien.get_url(), uids)",
"def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def filter_authors(probable_authors):\n #second_names = lambda x: x.split()[-1]\n names, ids = zip(*probable_authors)\n cleaned = []\n counter = Counter(list(map(second_names, names)))\n for (name, aid) in probable_authors:\n if counter[second_names(name)] == 1:\n cleaned.append((name, aid))\n return cleaned",
"def test_item_add_authors(self):\n\n actual_item = Item.objects.get(id=101)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_add=users)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))",
"def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })",
"def test_conferencepapers_authors_exist(self) -> None:\n for id_conferencepaper, conferencepaper in self.data['conferencepapers'].items():\n for id_author in conferencepaper['authors']:\n self.assertIn(\n id_author,\n self.data['authors'],\n '{} references author {} not found in authors.yml'.format(id_conferencepaper, id_author)\n )",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def fill_authors(data):\r\n authors = [d.get('Author') for d in data]\r\n for a in authors:\r\n curs.execute(r'select distinct aut_name from authors;')\r\n if (a,) not in curs.fetchall():\r\n curs.execute(r\"insert into authors(aut_name) values(?)\", [a])\r\n conn.commit()",
"def calls_authors(*args):\n calls_posts = CallsPost.objects.published()\n authors = User.objects.filter(callsposts__in=calls_posts)\n return list(authors.annotate(post_count=Count(\"callsposts\")))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cross checks a list of authors against comments | def assertAuthorsInComments(context, authors, comments):
cross_check(context, authors, comments) | [
"def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)",
"def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))",
"def _needs_beard_reprocessing(authors_before, authors_after):\n if len(authors_before) == len(authors_after):\n for index, author_before in enumerate(authors_before):\n # Not every author has an affiliation.\n before_affiliations = author_before.get(\n 'affiliations', [])\n\n # We don't iterate over authors_after, we take the index.\n after_affiliations = authors_after[index].get(\n 'affiliations', [])\n\n before = (author_before['full_name'], before_affiliations)\n after = (authors_after[index]['full_name'], after_affiliations)\n\n if before != after:\n return True\n\n return False\n else:\n return True",
"def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"",
"def filter_authors(probable_authors):\n #second_names = lambda x: x.split()[-1]\n names, ids = zip(*probable_authors)\n cleaned = []\n counter = Counter(list(map(second_names, names)))\n for (name, aid) in probable_authors:\n if counter[second_names(name)] == 1:\n cleaned.append((name, aid))\n return cleaned",
"def build_author_inclusion_filter(authors: Strings) -> EdgePredicate:\n warnings.warn('use pybel.struct.build_author_inclusion_filter', DeprecationWarning)\n import pybel.struct\n return pybel.struct.build_author_inclusion_filter(authors)",
"def get_author_list(text):\n newline_fixed_text = text\n for newline_char in LINE_TERMINATOR_CHARS:\n newline_fixed_text = newline_fixed_text.replace(newline_char, ' , ')\n potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')\n filtered_authors = list()\n my_name_pattern = re.compile(\"(-?\\\\w\\\\.\\\\ ?)+([\\\\w]{2,}\\\\ ?)+\")\n # the allowance of an optional hyphen preceding an initial is to satisfy a\n # common pattern observed with the papers coming out of asia.\n for author in potential_authors:\n if my_name_pattern.match(author): # match has an implied ^ at the start\n # which is ok for our purposes.\n filtered_authors.append(author)\n return filtered_authors",
"def get_author_list(text):\n newline_fixed_text = text\n for newline_char in LINE_TERMINATOR_CHARS:\n newline_fixed_text = newline_fixed_text.replace(newline_char, ', ')\n potential_authors = newline_fixed_text.replace(NON_BREAKING_SPACE, ' ').replace(' and ', ', ').split(', ')\n filtered_authors = list()\n my_name_pattern = re.compile(\"(-?\\\\w\\\\.\\\\ ?)+([\\\\w]{2,}\\\\ ?)+\")\n # the allowance of an optional hyphen preceding an initial is to satisfy a\n # common pattern observed with the papers coming out of asia.\n for author in potential_authors:\n if my_name_pattern.match(author): # match has an implied ^ at the start\n # which is ok for our purposes.\n filtered_authors.append(author)\n return filtered_authors",
"def test_author_list(self):\n\n request = self.factory.get('api-author-list')\n force_authenticate(request, self.alien.host.user_auth)\n\n response = views.AuthorViewset.as_view({'get': 'list'})(request)\n self.assertEqual(response.status_code, 200)\n\n uids = [u[\"id\"] for u in response.data[\"authors\"]]\n self.assertIn(self.user.get_url(), uids)\n self.assertNotIn(self.inactive_user.get_url(), uids)\n self.assertNotIn(self.alien.get_url(), uids)",
"def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])",
"def testArticleListWithAuthorSearch(self):\n authorUri = self.er.getAuthorUri(\"associated\")\n q = QueryArticles(authorUri = authorUri)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res = self.er.execQuery(q)\n for art in res.get(\"articles\", {}).get(\"results\", []):\n foundAuthor = False\n for author in art.get(\"authors\"):\n if author[\"uri\"] == authorUri:\n foundAuthor = True\n assert foundAuthor == True\n\n cq = ComplexArticleQuery(BaseQuery(authorUri = authorUri))\n q = QueryArticles.initWithComplexQuery(cq)\n q.setRequestedResult(RequestArticlesInfo(count = 100, returnInfo = self.returnInfo))\n res2 = self.er.execQuery(q)\n\n self.ensureSameResults(res, res2, '[articles][].totalResults')",
"def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)",
"def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]",
"def by_author(authorname, bibs):\n keepindex = []\n i = 0\n\n an = authorname.replace(\" \", \"\")\n\n authorname = authorname.replace(',', ', ')\n authorname = authorname.replace(\" \", \" \")\n\n authorshort = 'xxxxxxx'\n if ',' in authorname and len(an) > (1+an.find(',')):\n authorshort = (authorname[:authorname.find(',')]\n + ', '\n + an[an.find(',')+1])\n\n print('number of bibs', len(bibs))\n\n for bib in bibs:\n if 'author' in bib:\n bibauthor = bib['author']\n bibauthor = bibauthor.replace(',', ', ')\n bibauthor = bibauthor.replace(' ', ' ')\n\n if authorname in bibauthor:\n keepindex.append(i)\n i += 1\n elif authorshort in bibauthor:\n print('Close name WARNING- is bib entry correct?')\n print(bib['author'], ': ', bib['title'])\n author_bibs = [bibs[i] for i in keepindex]\n return author_bibs",
"def is_courtesy(self):\n\n this_organization = Organization.objects.get(pk=1)\n for author in self.credit.all():\n if author.organization != this_organization:\n return True\n return False",
"def _get_authors_soup_text_clean(self, author_soup_text):\n authors = []\n for author in author_soup_text:\n initial_strip = author.strip()\n clean_text = re.sub('^by\\s+', '', initial_strip)\n strip_again = clean_text.strip()\n authors.append(strip_again)\n return(authors)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Takes a list of cachedauthors and adds them to the author follower list | def create_cached_author_followers(author, followers):
for f in followers:
author.followers.add(f) | [
"def set_authors(self, authors):\n\t\tself.authors = authors",
"def add_all_followers(twitter, users):\n for u in users:\n #print(\"Outside: Requesting followers for screen_name %s\" % u['screen_name'])\n if u['protected'] != True:\n response = get_followers(twitter, u['screen_name'])\n if response == \"404\":\n u['followers'] = []\n else:\n u['followers'] = response\n else:\n u['followers'] = []",
"def testAddMultipleFollowers(self):\r\n self._tester.AddFollowers(self._cookie, self._user.private_vp_id,\r\n ['Email:extra.user1@emailscrubbed.com',\r\n 'Email:extra.user2@emailscrubbed.com',\r\n {'user_id': self._extra_users[0].user_id}])",
"def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))",
"def follow_followers():\n for follower in tweepy.Cursor(api.followers).items():\n follower.follow()",
"def authors(self, authors):\n\n self._authors = authors",
"def add_followers(task_id: str, followers: List[str]):\n return AsanaClient.singleton().add_followers(task_id, followers)",
"def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)",
"def artists_follow(self, artist_ids: list) -> None:\n return self._put(\"me/following\", type=\"artist\", ids=\",\".join(artist_ids))",
"def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in",
"def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]",
"def update_authors(pid, name, key):\r\n if pid in authorList:\r\n author = authorList[pid]\r\n author[\"pubs\"].add(key)\r\n authorList[pid] = author\r\n else:\r\n author = {\r\n 'name': name,\r\n 'pubs': {key}\r\n }\r\n authorList[pid] = author",
"def get_authors(self):\n return [aer.author for aer in self.authorentryrank_set.all()]",
"def get_authors(self, entry):\n try:\n authors = ['<a href=\"%s\" target=\"blank\">%s</a>' %\n (reverse('zinnia:author_detail',\n args=[author.username]), author.username) \\\n for author in entry.authors.all()]\n except NoReverseMatch:\n authors = [author.username for author in entry.authors.all()]\n return ', '.join(authors)",
"def author_list(self):\n return [a.full_name for a in self.authors.all()]",
"def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list",
"def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)",
"def test_item_add_authors(self):\n\n actual_item = Item.objects.get(id=101)\n user_first = CustomUser.objects.get(id=101)\n user_second = CustomUser.objects.get(id=102)\n users = [user_first, user_second]\n actual_item.update_authors(authors_add=users)\n expected_item = Item.objects.get(id=101)\n self.assertListEqual(list(actual_item.authors.all()), list(expected_item.authors.all()))",
"def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Creates an interior node with the given operator (a token), and left and right operands (other nodes). | def __init__(self, opToken, leftOper, rightOper):
self.operator = opToken
self.leftOperand = leftOper
self.rightOperand = rightOper | [
"def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))",
"def __init__(self, token, left=None, right=None):\n super().__init__() # LinkedBinaryTree initialization\n if not isinstance(token, str):\n raise TypeError(\"Token must be a string.\")\n self._add_root(token)\n if left is not None:\n if token not in \"+-x*/\":\n raise ValueError(\"Token must be valid operator.\")\n self._attach(self.root(), left, right)",
"def _generate_binary_expression(operator: BinaryOperator, docstring: str):\n\n class Expr(BinaryExpression):\n __doc__ = docstring\n\n def __init__(\n self, loc: Optional[SourceLocation], left: Expression, right: Expression\n ):\n super().__init__(loc, operator, left, right)\n\n return Expr",
"def make_operator(self) -> Token:\n if self.current_char == '+':\n token = Token(TokenType.PLUS, self.current_char)\n elif self.current_char == '-':\n token = Token(TokenType.MINUS, self.current_char)\n elif self.current_char == '*':\n token = Token(TokenType.MULT, self.current_char)\n elif self.current_char == '/':\n token = Token(TokenType.DIV, self.current_char)\n elif self.current_char == '^':\n token = Token(TokenType.EXP, self.current_char)\n elif self.current_char == '(':\n token = Token(TokenType.LPAREN, self.current_char)\n elif self.current_char == ')':\n token = Token(TokenType.RPAREN, self.current_char)\n elif self.current_char == '!':\n id_str = self.current_char\n self.advance()\n id_str += self.current_char\n token = Token(TokenType.N_EQ, id_str)\n elif self.current_char == '=':\n if self.text[self.pos + 1] == '=':\n id_str = self.current_char\n self.advance()\n id_str += self.current_char\n token = Token(TokenType.IS_EQ, id_str)\n else:\n token = Token(TokenType.EQ, self.current_char)\n elif self.current_char == '<':\n if self.text[self.pos + 1] == '=':\n id_str = self.current_char\n self.advance()\n id_str += self.current_char\n token = Token(TokenType.LTE, id_str)\n else:\n token = Token(TokenType.LT, self.current_char)\n elif self.current_char == '>':\n if self.text[self.pos + 1] == '=':\n id_str = self.current_char\n self.advance()\n id_str += self.current_char\n token = Token(TokenType.GTE, id_str)\n else:\n token = Token(TokenType.GT, self.current_char)\n\n self.advance()\n return token",
"def compileExpression(self):\n parent = self.current_node\n self.current_node = et.SubElement(self.current_node, 'expression')\n self.compileTerm() # When this finishes the current token is either an operator or: ')', ';' or ']'.\n while self.tokenizer.tokenVal in ['+', '-', '*', '/', '|', '&', '<', '>', '=']:\n self.writeNode() # Writes the operator\n self.tokenizer.advance() # Advances to the first token in a term.\n self.compileTerm() # When this finishes the current token is an operator or ')', ';' or ']'.\n self.current_node = parent",
"def _ParseOp(self):\r\n left = self._term_parser()\r\n op = self._operator()\r\n commit()\r\n right = self._expr_parser()\r\n whitespace()\r\n node = self._op_classes[op](self._schema, left)\r\n return node.Merge(right)",
"def _CombineBinaryExpressions(self, operator):\n operator_lower = operator.lower()\n\n item_index = 1\n number_of_items = len(self._stack) - 1\n while item_index < number_of_items:\n item = self._stack[item_index]\n if (isinstance(item, expressions.BinaryExpression) and\n item.operator.lower() == operator_lower and not item.args):\n previous_item = self._stack[item_index - 1]\n next_item = self._stack[item_index + 1]\n\n if (isinstance(previous_item, expressions.Expression) and\n isinstance(next_item, expressions.Expression)):\n item.AddOperands(previous_item, next_item)\n\n self._stack.pop(item_index + 1)\n self._stack.pop(item_index - 1)\n\n item_index -= 2\n number_of_items -= 2\n\n item_index += 1\n if item_index == 0:\n item_index += 1",
"def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node",
"def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left",
"def convert_operator(cls, en):\n op = en.__class__.__name__.lower()\n assert op in [\"intersection\", \"union\", \"difference\"]\n \n children = en.geometry()\n nchild = len(children)\n\n if nchild == 2:\n\n cn = CSG(op, name=en.name)\n cn.left = cls.convert(children[0]) \n cn.right = cls.convert(children[1]) \n\n elif nchild == 3:\n\n cn = CSG(op, name=en.name)\n\n ln = CSG(op, name=en.name + \"_split3\")\n ln.left = cls.convert(children[0])\n ln.right = cls.convert(children[1])\n\n cn.left = ln\n cn.right = cls.convert(children[2])\n\n else:\n assert 0, \"CSG operator nodes must have 2 or 3 children\" \n pass\n return cn",
"def __init__(self, *args, **kwargs):\n # except the case where just one expression was passed.\n # this situation is handled in __new__\n if args and (len(args) != 1 or not isinstance(args[0], Expr)):\n children = []\n for child in args:\n if not isinstance(child, (Expr, Param, Field, Alias)):\n child = Literal(child)\n children.append(child)\n self.children = children\n self.operator = kwargs.get('operator')",
"def addExpr( ):\n tok = tokens.peek( )\n if syntaxDebug: print (\"addExpr: \", tok)\n \n left = term( )\n tok = tokens.peek( )\n while tok == \"+\" or tok == \"-\":\n op = tok\n tokens.next()\n \n right = addExpr( )\n left = BinaryExpr(op, left, right)\n tok = tokens.peek( )\n return left",
"def _generate_unary_expression(operator: UnaryOperator, docstring: str):\n\n class Expr(UnaryExpression):\n __doc__ = docstring\n\n def __init__(self, loc: Optional[SourceLocation], argument: Expression):\n super().__init__(loc, operator, True, argument)\n\n return Expr",
"def _instantiate_binary_node(pattern: Pattern, left_subtree: TreePlanNode, right_subtree: TreePlanNode):\n pattern_structure = pattern.positive_structure\n if isinstance(pattern_structure, AndOperator):\n operator_type = OperatorTypes.AND\n elif isinstance(pattern_structure, SeqOperator):\n operator_type = OperatorTypes.SEQ\n else:\n raise Exception(\"Unsupported binary operator\")\n return TreePlanBinaryNode(operator_type, left_subtree, right_subtree)",
"def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()",
"def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-x*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()",
"def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand",
"def parse_expression(self):\n l_expression = self.parse_equality()\n\n while self.check_type(TokenType.AND) or self.check_type(TokenType.OR):\n token = self.consume()\n op = to_operator_type[token.value]\n r_expression = self.parse_equality()\n l_expression = BinaryOperator(l_expression, op, r_expression, token.line, token.column)\n return l_expression",
"def _visit_operator_node(self, operator_node, **kwargs):\n\n visited_operands = [VisitedNode(operand, self.visit(operand, **kwargs))\n for operand in operator_node._operands]\n dispatch_methods = [\n self._visit_nullary_node,\n self._visit_unary_node,\n self._visit_binary_node,\n ]\n return dispatch_methods[operator_node.arity](operator_node,\n *visited_operands)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the expression in prefix form. | def prefix(self):
return str(self.operator) + " " + self.leftOperand.prefix() + " " + self.rightOperand.prefix() | [
"def generate_prefix_expression(self, prefix):\n list_prefixes = [prefix]\n\n for index in range(1, len(prefix)):\n expr = \"{}{}\".format('|^', prefix[index:])\n list_prefixes.append(expr)\n\n return ''.join(list_prefixes)",
"def prefix_to_infix(self, expr):\n p, r = self._prefix_to_infix(expr)\n if len(r) > 0:\n raise InvalidPrefixExpression(f\"Incorrect prefix expression \\\"{expr}\\\". \\\"{r}\\\" was not parsed.\")\n return f'({p})'",
"def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr",
"def sympy_to_prefix(self, expr):\n if isinstance(expr, sp.Symbol):\n return [str(expr)]\n elif isinstance(expr, sp.Integer):\n return self.write_int(int(str(expr)))\n elif isinstance(expr, sp.Rational):\n return ['div'] + self.write_int(int(expr.p)) + self.write_int(int(expr.q))\n elif expr == sp.E:\n return ['E']\n elif expr == sp.pi:\n return ['pi']\n elif expr == sp.I:\n return ['I']\n # SymPy operator\n for op_type, op_name in self.SYMPY_OPERATORS.items():\n if isinstance(expr, op_type):\n return self._sympy_to_prefix(op_name, expr)\n # environment function\n for func_name, func in self.functions.items():\n if isinstance(expr, func):\n return self._sympy_to_prefix(func_name, expr)\n # unknown operator\n raise UnknownSymPyOperator(f\"Unknown SymPy operator: {expr}\")",
"def clean_prefix(self, expr):\n if not self.clean_prefix_expr:\n return expr\n expr = \" \".join(expr)\n expr = expr.replace(\"f x\", \"Y\")\n expr = expr.replace(\"derivative Y x\", \"Y'\")\n expr = expr.replace(\"derivative Y' x\", \"Y''\")\n expr = expr.split()\n return expr",
"def trans_infix_prefix(expression):\n expression = expression.replace(' ', '')\n symbol_priority = {'*': 10, '/': 10, '+': 5, '-': 5, '(': 0, ')': 0}\n symbol_stack = []\n new_expression = ''\n\n for i in range(len(expression) - 1, -1, -1):\n item = expression[i]\n\n if item.isdigit():\n new_expression += item\n else:\n while True:\n top_symbol_priority = symbol_priority[symbol_stack[len(symbol_stack) - 1]] if len(symbol_stack) else 0\n current_symbol_priority = symbol_priority[item]\n if item == ')':\n symbol_stack.append(item)\n break\n elif item == '(':\n top_symbol = symbol_stack.pop()\n if top_symbol != ')':\n new_expression += top_symbol\n else:\n break\n elif top_symbol_priority > current_symbol_priority: # 如果栈顶操作符 > 当前操作符, 那么弹出栈顶操作符,继续循环,比较更新后的栈顶操作符和当前操作符\n top_symbol = symbol_stack.pop()\n new_expression += top_symbol\n else:\n symbol_stack.append(item)\n break\n\n while len(symbol_stack) != 0:\n top_symbol = symbol_stack.pop()\n new_expression += top_symbol\n\n finally_str = ''\n for i in range(len(new_expression) - 1, -1, -1):\n finally_str += new_expression[i]\n return finally_str",
"def unclean_prefix(self, expr):\n if not self.clean_prefix_expr:\n return expr\n expr = \" \".join(expr)\n expr = expr.replace(\"Y''\", \"derivative Y' x\")\n expr = expr.replace(\"Y'\", \"derivative Y x\")\n expr = expr.replace(\"Y\", \"f x\")\n expr = expr.split()\n return expr",
"def prefixes(self):\n if self._prefixes is None:\n self.generate_instruction_parts()\n return self._prefixes",
"def prefix(pattern):\r\n return pattern[0:len(pattern)-1]",
"def prefix(pattern):\n return pattern[0:len(pattern)-1]",
"def getPrefix(self):\n return _libsbml.ASTBasePlugin_getPrefix(self)",
"def get_expression(self) -> str:",
"def prefix(self) -> str:\n return pulumi.get(self, \"prefix\")",
"def expand_prefix(prefix):\n if prefix and not prefix.endswith('.'):\n return prefix + '.'\n return prefix",
"def prefix(self):\n return self.__prefix",
"def get_prefix(self):\n return self.prefix or ''",
"def get_prefix(self):\n return self.prefix",
"def getPrefix(self):\n return _libsbml.MultiASTPlugin_getPrefix(self)",
"def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns the expression in infix form (fully parenthesized). | def infix(self):
return "(" + self.leftOperand.infix() + " " + str(self.operator) + " " + self.rightOperand.infix() + ")" | [
"def trans_infix_prefix(expression):\n expression = expression.replace(' ', '')\n symbol_priority = {'*': 10, '/': 10, '+': 5, '-': 5, '(': 0, ')': 0}\n symbol_stack = []\n new_expression = ''\n\n for i in range(len(expression) - 1, -1, -1):\n item = expression[i]\n\n if item.isdigit():\n new_expression += item\n else:\n while True:\n top_symbol_priority = symbol_priority[symbol_stack[len(symbol_stack) - 1]] if len(symbol_stack) else 0\n current_symbol_priority = symbol_priority[item]\n if item == ')':\n symbol_stack.append(item)\n break\n elif item == '(':\n top_symbol = symbol_stack.pop()\n if top_symbol != ')':\n new_expression += top_symbol\n else:\n break\n elif top_symbol_priority > current_symbol_priority: # 如果栈顶操作符 > 当前操作符, 那么弹出栈顶操作符,继续循环,比较更新后的栈顶操作符和当前操作符\n top_symbol = symbol_stack.pop()\n new_expression += top_symbol\n else:\n symbol_stack.append(item)\n break\n\n while len(symbol_stack) != 0:\n top_symbol = symbol_stack.pop()\n new_expression += top_symbol\n\n finally_str = ''\n for i in range(len(new_expression) - 1, -1, -1):\n finally_str += new_expression[i]\n return finally_str",
"def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr",
"def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")",
"def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output",
"def infix_to_prefix(self, infix: list) -> list:\n # Start with empty result list.\n output = []\n\n # Prepare `infix` notation by replacing parentheses and reversing\n # original expression.\n infix = infix[::-1]\n for i, val in enumerate(infix):\n if val == ')':\n infix[i] = '('\n elif val == '(':\n infix[i] = ')'\n\n # Calculate `postfix` of an `infix` expression.\n for ch in infix:\n if ch not in self.OPERATORS.keys():\n output.append(ch)\n elif ch == '(':\n self.stack.append('(')\n elif ch == ')':\n while self.stack and self.stack[-1] != '(':\n output += self.stack.pop()\n self.stack.pop()\n else:\n while self.stack and self.stack[-1] != '(' and self.OPERATORS[ch] <= self.OPERATORS[self.stack[-1]]:\n output += self.stack.pop()\n self.stack.append(ch)\n\n while self.stack:\n output += self.stack.pop()\n\n return output[::-1]",
"def prefix_to_infix(self, expr):\n p, r = self._prefix_to_infix(expr)\n if len(r) > 0:\n raise InvalidPrefixExpression(f\"Incorrect prefix expression \\\"{expr}\\\". \\\"{r}\\\" was not parsed.\")\n return f'({p})'",
"def expr_handle_infix_ops(x):\n for op in infix_ops:\n x = x.replace(op, '|' + repr(op) + '|')\n return x",
"def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)",
"def infixToPostfix(infixexpr):\n prec = {}\n prec[\"*\"] = 3\n prec[\"/\"] = 3\n prec[\"+\"] = 2\n prec[\"-\"] = 2\n prec[\"(\"] = 1\n opStack = Stack()\n postfixList = []\n tokenList = infixexpr.split()\n\n for token in tokenList:\n if token in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" or token in \"0123456789\":\n ## 假設是字母或是數字,則直接 append \n postfixList.append(token)\n elif token == '(':\n ## 把 \"(\" 記起來,以便紀錄當遇到 \")\" 時,需操作所有在括弧內的運算。\n opStack.push(token)\n elif token == ')':\n ## 把屬於這組括弧內的操作清空,直到遇到最上層的右括號為止\n ## (利用最上層的右括弧作為停止點)\n topToken = opStack.pop()\n while topToken != '(':\n postfixList.append(topToken)\n topToken = opStack.pop()\n else:\n ## 基本運算符號\n while not opStack.isEmpty() and (prec[opStack.peek()] >= prec[token]):\n ## 若當前的 token 順位比 opStack 中的 pop 操作符來得小,\n ## 則需要先把 opStack 的 pop item 先放到 postfixList 中\n ## ==> 代表先進行 opStack 優先次序高的操作\n ##\n ## eg: A * B + C\n ## 當 token 是 '+',而 top token 是 \"*\"\n ## 則需要先操作 top token ==> 把 '*' 加入 postfixList\n postfixList.append(opStack.pop())\n opStack.push(token)\n \n while not opStack.isEmpty():\n postfixList.append(opStack.pop())\n \n return \"\".join(postfixList)",
"def infix_to_postfix(infix_expr):\n prec = {\"**\": 4, \"//\": 3, \"*\": 3, \"/\": 3, \"+\": 2, \"-\": 2, \"(\": 1}\n op_stack = Stack() # Stack to hold operators\n postfix_list = [] # Where we will insert our postfix expression to print\n token_list = []\n tmp_str = ''\n operator_str = ''\n for ch in infix_expr: # Convert expression into a list 3.0*5 + 4\n if ch in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" or ch in \"0123456789.\":\n if operator_str != '':\n token_list.append(operator_str)\n operator_str = ''\n tmp_str = tmp_str + ch\n elif ch in \"*+/-\":\n if tmp_str != '':\n token_list.append(tmp_str)\n tmp_str = ''\n operator_str = operator_str + ch\n elif ch in \"()\":\n if tmp_str != '':\n token_list.append(tmp_str)\n tmp_str = ''\n elif operator_str != '':\n token_list.append(operator_str)\n operator_str = ''\n token_list.append(ch)\n if tmp_str != '':\n token_list.append(tmp_str)\n if tmp_str != '':\n token_list.append(operator_str)\n\n for token in token_list: # Go through each item in the list.\n if token not in \"+-**//()\":\n postfix_list.append(token) # add expression to list, not operator\n elif token == '(': # Notify that we'll have an operator of top priority coming up\n op_stack.push(token)\n elif token == ')':\n top_token = op_stack.pop()\n while top_token != '(': # Take the operator out of the stack and insert into our pfix list\n postfix_list.append(top_token) # continue for as many tokens were in the ()\n top_token = op_stack.pop()\n elif token == '':\n pass\n else:\n while (not op_stack.isEmpty()) and \\\n (prec[op_stack.peek()] >= prec[token]): # compare operator precedence, decide which goes first\n postfix_list.append(op_stack.pop())\n op_stack.push(token)\n\n while not op_stack.isEmpty():\n postfix_list.append(op_stack.pop())\n return \" \".join(postfix_list)",
"def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)",
"def infix_to_postfix(infix_str):\n\n infix_list = infix_str.split()\n opstack = Stack()\n postfix_list = []\n\n for token in infix_list:\n if is_operand(token):\n postfix_list.append(token)\n elif token == '(':\n opstack.push('(')\n elif token == ')':\n find_open_paren(opstack, postfix_list)\n elif is_operator(token):\n remove_ops_of_gt_eq_precedence(token, opstack, postfix_list)\n opstack.push(token)\n\n empty_opstack(opstack, postfix_list)\n\n return ' '.join(postfix_list)",
"def print_infix(self):\n if self.is_empty():\n return \"\"\n else:\n if self.is_leaf():\n return str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value()) + \" \" \\\n + str(self.get_right().print_infix())\n else:\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value())\n else:\n return str(self.root_value()) + \" \" + str(self.get_right().print_infix())",
"def shunt(infix):\n # special characters for regular expressions and their precidence\n specials = {'*': 50, '+': 50, '?': 50, '.': 45, '^' : 40, '|': 30}\n # will eventually be the output\n pofix = \"\"\n # operator stack\n stack = \"\"\n # loop throuh the string a character at a time\n for c in infix:\n # If an open bracket, push to the stack\n if c== '(':\n stack = stack + c\n # If a closing bracket, pop from the stack, push to output until open bracket\n elif c == ')':\n while stack[-1] != '(':\n pofix = pofix + stack[-1]\n stack = stack[:-1]\n stack = stack[:-1]\n # If it's an operator, push to the stack after popping lower or equal precedence\n # operators from top of stack into output\n elif c in specials:\n while stack and specials.get(c, 0) <= specials.get(stack[-1], 0):\n pofix = pofix + stack[-1]\n stack = stack[:-1]\n stack = stack + c\n # Regular characters are pushed immediately to the output\n else:\n pofix = pofix + c\n # Pop all remaining operators from the stack to output\n while stack:\n pofix = pofix + stack[-1]\n stack = stack[:-1]\n # Return postfix regex\n return pofix",
"def parse_infix(input: str) -> Node:\n parsed = ParsedString(input).tokenize()\n ans = parse_e(parsed)\n return ans",
"def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n \n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1",
"def infix_to_assembly(formula: str) -> str:\n asm = \"\"\n postfix = infix_to_postfix(formula)\n for value in postfix:\n if value == \"+\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nadd ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"-\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nsub ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"*\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nmul bx\"\n asm += \"\\npush ax\"\n elif value == \"/\":\n asm += \"\\nmov dx, 0h\"\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\ndiv bx\"\n asm += \"\\npush ax\"\n else:\n # asm += \"\\npush 0\" + value + \"h\"\n # the line above is commented out as the emulator has a bug\n # which pushes immediate 0bbh as 0ffbbh to the stack\n asm += \"\\nmov cx, 0\" + value + \"h\"\n asm += \"\\npush cx\"\n return asm",
"def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex",
"def get_expression(self) -> str:"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns all possible velocity dispersons from all particles found in the data set. A particle filter can be passed using "filter" which is a list | def compute_velocity_dispersion(data, types = None, fields = None, filter = None):
types_to_fields = {'x': 'particle_velocity_x',
'y': 'particle_velocity_y',
'z': 'particle_velocity_z',
'r': 'particle_velocity_spherical_radius',
'theta': 'particle_velocity_spherical_theta',
'phi': 'particle_velocity_spherical_phi'}
if types is None and fields is None:
fields = types_to_fields.values()
keys = types_to_fields.keys()
elif fields is None:
fields = [ types_to_fields[x] for x in types ]
keys = types
else:
keys = fields
dispersion = {}
for i,x in enumerate(fields):
if filter is not None:
v = data[x][filter]
else:
v = data[x]
if np.size(v) == 0:
dispersion[keys[i]] = 0.0
else:
dispersion[keys[i]] = vel_dispersion( v.convert_to_units('km/s') )
return dispersion | [
"def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])",
"def __repr__(self):\n print(\"Particle Filter (each element: [particle_x, particle_y, particle_weight])\")\n print(self.all_particles_coordinates())",
"def particle_filter(Y, initial, transition, emission, L):\n\n T, p = Y.shape\n d, = initial().shape\n means = empty(T)\n X = [] # : [X] <- markov, only need prev particles\n\n print '--- Particle Filter ---'\n print 'p =', p\n print 'd =', d\n print 'L =', L\n\n particles = [initial() for l in range(L)]\n weights = [float(emission(Y[0], particles[l])) for l in range(L)]\n weights /= sum(weights)\n X = [multinomial(particles, weights) for l in range(L)]\n\n for t in range(1,T):\n # sample from p(x[t] | x[t-1])\n particles = a([transition(X[l]) for l in range(L)])\n\n # weight by p(y[t] | x[t])\n weights = a([float(emission(Y[t], particles[l])) for l in range(L)])\n weights /= sum(weights)\n\n # weighted mean before resampling (resampling preserves mean, so why? less variance?)\n positions = particles[:,0]\n means[t] = dot(positions, weights)\n \n # resample from multinomial(particles, weights)\n X = [multinomial(particles, weights) for l in range(L)]\n\n return None, means[1:]",
"def particle_forceV(R,N,sigma,epsilon,D):\n F = np.zeros((3,N))\n x = np.zeros(N-1)\n y = np.zeros(N-1)\n z = np.zeros(N-1)\n r = np.zeros(N-1)\n # loop over all particles\n for i in range(N):\n # Distances for x,y,z between particles\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n a = (c*4*(sigma/epsilon)*(12/r**14-6/r**8))\n F[:,i] = -np.sum(a,1)\n return F",
"def particle_filter(particle_set_t, measurement_t):\n global count\n n_samples, dim = particle_set_t.shape # no of particles and dimension of each particle\n\n pred_state = np.zeros((n_samples, dim), dtype=\"float64\") # store the predicted state \n weights = np.zeros(n_samples, dtype=\"float64\") # corresponding weights for resampling\n\n particle_set_t1 = np.zeros((n_samples, dim), dtype=\"float64\") # next iteration of particles\n\n\n # this loop calculates \\bar{X_t}, i.e. the predicted belief.\n for n in range(n_samples):\n # predicted motion step:\n xn_t1 = sample_motion_model(particle_set_t[n]) # 3x1 vector: hypothetical state\n\n # measurement correction step:\n weight_xn_t1 = state_likelihood(measurement_t, xn_t1) # scalar value\n\n pred_state[n] = xn_t1\n weights[n] = weight_xn_t1\n\n \n # It was observed that if all weights are 0, the resampling step breaks. \n # Thus, adding a uniform distribution. This is obviously a very bad idea \\ \n # as the true state can easily be discarded in the resampling step: TODO!\n if np.sum(weights) > 0.0:\n weights = weights/np.sum(weights) # normalize array only when sum in not 0\n else:\n print(\"possile divergence!\")\n weights[:] = 1 / n_samples # if sum is 0 then assign uniform distribution throughout\n\n\n # the resampling step:\n # indices = monte_carlo.residual_resample(weights)\n indices = monte_carlo.stratified_resample(weights)\n count += 1\n print(count)\n\n # new particle set is particles at index locations\n for i, index in enumerate(indices):\n particle_set_t1[i] = pred_state[index]\n\n return particle_set_t1",
"def particles_in(self):\n for p in toiter(self.particles_in_const_begin(),\n self.particles_in_const_end()):\n yield p",
"def ParticleFilterParams(fix_params=False):\n\n ## Particle filter parameters\n\n # Q_c will be the time continuous covariance matrix. \n #This should be the errors in the model.\n # in the form [x_cov, y_cov, z_cov, \n # vel_x_cov, vel_y_co, vel_z_cov, \n # mass_cov, \n # sigma_cov, shape_cov, brightness_cov, tau_cov]\n \n\n Q_c = [10., 2., 2., \n 150., 50., 50., \n 5., 0, 0,\n 1e-3, 1e-10, 0., 0.0001]\n\n\n print('Qc values used:', Q_c)\n\n Q_c = np.asarray([i**2 for i in Q_c])\n\n \n # Q_c_frag is used at reinitialisation if the fragmentation option is used\n \n Q_c_frag = [0., 0., 0., \n 0.02, 0.02, 0.02, \n 0.5, 0, 0,\n 2e-3, 5e-9, 0., 0.]\n\n Q_c_frag = [i**2 for i in Q_c_frag]\n\n ## P: starting uncertainty to initialise gaussian spread of particals. \n ## P2: starting uncertainty at reinitialisation if the fragmentation option is used\n ## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]\n P = [50., 50., 50., 250., 250., 250.]\n P2 = [50., 50., 50., 250., 250., 250.]\n\n ## Initialise state ranges\n\n\n ## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)\n A_min = 1.21\n A_max = 3.0 \n\n ## luminosity coefficient\n tau_min = 0.0001\n tau_max = 0.1\n\n ## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]\n pm_mean = [3000, 3100, 4500, 7500, 850]\n pm_std = [420, 133, 133, 167, 117 ]\n\n ## to choose density values according to a distribution of meteorite percentages:\n particle_choices = []\n\n # this is created using lines 257-266; uncomment if needs changing.\n random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]\n\n #random_meteor_type = []\n #for i in range(80): # 80 % Chondrites\n # random_meteor_type.append(0)\n #for i in range(11): # 11 % Achondrites\n # random_meteor_type.append(1)\n #for i in range(2):\n # random_meteor_type.append(2) # 2 % Stony-Iron\n #for i in range(5):\n # random_meteor_type.append(3) # 5 % iron\n #for i in range(2):\n # random_meteor_type.append(4) # 2 % cometary\n\n ## ablation coefficeint \n #sigma_min = 0.001*1e-6\n #sigma_max = 0.5*1e-6\n\n\n #range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]\n range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]\n\n if fix_params:\n \tQ_c[-4:] = [0., 0., 0., 0.]\n \tQ_c_frag[-4:] = [0., 0., 0., 0.]\n return Q_c, Q_c_frag, P, range_params",
"def particles_in_radius(position_particle, position_particles, velocities_particles):\n\n # array with all indecies of all particles within range for velocities\n velocities_within_r = []\n\n # array with all indecies of all particles within range for positions\n positions_within_r = []\n\n\n\n # check over all particles in positions\n for index in range(N):\n # variable used to aid if its in radius\n in_size = True\n\n # check if it is smaller than the radius in all\n for i in range(dimensions):\n\n inside_distance = abs(position_particle[i] - position_particles[index][i])\n\n wrap_distance = L-inside_distance\n\n distance = min(inside_distance, wrap_distance)\n\n # if the size is over then break out of loop as it won't be in radius\n if distance > r:\n in_size = False\n break\n\n # If it is within radius, add velocity to all velociites within r\n if in_size == True:\n # get the index of the particle for velocity\n velocities_within_r.append(velocities_particles[index])\n\n # get the index of the particle for position\n # and add position to all positions within r\n positions_within_r.append(position_particles[index])\n\n\n return velocities_within_r, positions_within_r",
"def _fuse_particles(self):\n for particle in self.member_particles:\n particle_class_from_flavor(particle.get(\"flavor\"))\n if not particle.get(\"pcf_name\"):\n particle[\"pcf_name\"] = self.name\n multiplier = particle.get(\"multiplier\", False)\n base_particle_config = particle.get(\"base_particle_config\", False)\n\n if base_particle_config:\n particle = self.update_particle_definition(particle, self.pcf_field.get_particle_from_pcf_id(particle[\"flavor\"] + \":\" + base_particle_config))\n\n # when there is no multiplier add quasiparticle parents to particle and add particle to pcf field\n if not multiplier:\n if self.particle_definition.get(\"parents\"):\n particle = self.add_parents_to_particle(particle)\n\n self.pcf_field.load_particle_definition(particle)\n\n # when there is a multiplier get the unique identifers, index them, add quasiparticle parents, and then add all particles to the pcf field\n else:\n particle_name = particle[\"pcf_name\"]\n unique_identifier_list = pcf_util.get_particle_unique_identifiers(particle['flavor'])\n unqiue_value_dict = dict([(x, pcf_util.find_nested_dict_value(particle, x.split('.'))) for x in unique_identifier_list])\n for i in range(multiplier):\n particle_multiple = deepcopy(particle)\n\n particle_multiple[\"pcf_name\"] = particle_name + \"-\" + str(i)\n # appends the correct index to each item in particle definition that is unique\n particle_multiple = functools.reduce((lambda d,l: pcf_util.replace_value_nested_dict(d, l.split('.'), unqiue_value_dict.get(l) + '-' + str(i))), unique_identifier_list, particle_multiple)\n if self.particle_definition.get(\"parents\"):\n particle_multiple = self.add_parents_to_particle(particle_multiple)\n\n self.pcf_field.load_particle_definition(particle_multiple)\n\n self.pcf_field.link_particles(self.pcf_field.particles)",
"def getDifferentialFlowDataForAllEvents(self, particleName=\"pion\", order=2, pT_range=None, where=\"\", orderBy=\"event_id\"):\n pid = self._pid(particleName)\n whereClause = \"pid=%d and n=%d\" % (pid, order)\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n RawdiffvnData = np.asarray(self.db.selectFromTable(\"diff_vn\", (\"pT\", \"vn_real\", \"vn_imag\"), whereClause=whereClause, orderByClause=orderBy))\n #nevent = self.getNumberOfEvents()\n nevent = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n npT = len(RawdiffvnData[:,0])/nevent\n diffvnData = RawdiffvnData.reshape(nevent, npT, 3)\n return diffvnData",
"def particleset_potential(particles, smoothing_length_squared = zero, G = constants.G, gravity_code = None, block_size = 0):\n n = len(particles)\n if block_size == 0:\n max = 100000 * 100 #100m floats\n block_size = max // n\n if block_size == 0:\n block_size = 1 #if more than 100m particles, then do 1 by one\n\n mass = particles.mass\n x_vector = particles.x\n y_vector = particles.y\n z_vector = particles.z\n\n potentials = VectorQuantity.zeros(len(mass),mass.unit/x_vector.unit) \n inf_len = numpy.inf | x_vector.unit\n offset = 0\n newshape =(n, 1)\n x_vector_r = x_vector.reshape(newshape)\n y_vector_r = y_vector.reshape(newshape)\n z_vector_r = z_vector.reshape(newshape)\n mass_r=mass.reshape(newshape)\n while offset < n:\n if offset + block_size > n:\n block_size = n - offset\n x = x_vector[offset:offset+block_size] \n y = y_vector[offset:offset+block_size] \n z = z_vector[offset:offset+block_size] \n indices = numpy.arange(block_size)\n dx = x_vector_r - x \n dy = y_vector_r - y\n dz = z_vector_r - z\n dr_squared = (dx * dx) + (dy * dy) + (dz * dz)\n dr = (dr_squared+smoothing_length_squared).sqrt()\n index = (indices + offset, indices)\n dr[index] = inf_len\n potentials += (mass[offset:offset+block_size]/dr).sum(axis=1)\n offset += block_size\n\n return -G * potentials",
"def filter_data(velocity, vflag, vnyquist, vshift, delta_vmax, nfilter=10):\n nrays = velocity.shape[0]\n ngate = velocity.shape[1]\n for j in range(0, nrays):\n for n in range(0, ngate):\n if vflag[j, n] == -3:\n continue\n\n vmoy = 0\n vmoy_plus = 0\n vmoy_minus = 0\n\n n1 = n\n n2 = n1 + nfilter\n n2 = np.min(np.array([ngate, n2]))\n\n idx_selected = vflag[j, n1:n2]\n if np.all((idx_selected == -3)):\n continue\n\n v_selected = velocity[j, n1:n2][idx_selected != -3]\n vmoy = np.median(v_selected)\n\n if np.any((v_selected > 0)):\n vmoy_plus = np.median(v_selected[v_selected > 0])\n else:\n vmoy_plus = np.NaN\n if np.any((v_selected < 0)):\n vmoy_minus = np.median(v_selected[v_selected < 0])\n else:\n vmoy_minus = np.NaN\n\n k = 0\n nselect = np.sum(idx_selected != -3)\n for k in range(nselect):\n vk = v_selected[k]\n dv1 = np.abs(vk - vmoy)\n if dv1 >= delta_vmax:\n if vmoy >= 0:\n vk_unfld = unfold(vk, vmoy_plus, vnyquist, vshift)\n dvk = np.abs(vk - vmoy_plus)\n else:\n vk_unfld = unfold(vk, vmoy_minus, vnyquist, vshift)\n dvk = np.abs(vk - vmoy_minus)\n\n dvkm = np.abs(vk_unfld - vmoy)\n if dvkm < delta_vmax or dvk < delta_vmax:\n velocity[j, n + k] = vk_unfld\n\n return velocity, vflag",
"def extract_particle_dynamics(story):\n particles = []\n if story.kind == \"Particle\":\n if len(story.story_subobjects) > 4:\n # get r, v, as center of mass quantities\n for subobj in story.story_subobjects:\n if subobj.kind == 'Dynamics':\n xcom, ycom, zcom = subobj.story_vals['r'].split(\" \")\n vxcom, vycom, vzcom = subobj.story_vals['v'].split(\" \")\n # get relative positions and velocities of sub-particles\n subparticles = []\n for subobj in story.story_subobjects:\n if subobj.kind == 'Particle':\n subparticles.extend(extract_particle_dynamics(subobj))\n \n # add COM r, v to sub-particles, and append\n for particle in subparticles:\n particles.append((particle[0] + float(xcom),\n particle[1] + float(ycom),\n particle[2] + float(zcom),\n particle[3] + float(vxcom),\n particle[4] + float(vycom),\n particle[5] + float(vzcom),\n particle[6]))\n else: # only 4 subobjects, so this is an individual star\n for subobj in story.story_subobjects:\n if subobj.kind == 'Dynamics':\n x,y,z = subobj.story_vals['r'].split(\" \")\n vx,vy,vz = subobj.story_vals['v'].split(\" \")\n m = subobj.story_vals['m']\n particles.append((float(x), float(y), float(z), float(vx), float(vy), float(vz), float(m)) )\n return particles",
"def P_vapor(self):\n mol = self.mol\n species = self._species\n indices = species._equilibrium_indices(mol>0)\n compounds = species._compounds\n N = len(indices)\n P_vapor = np.zeros_like(mol)\n if N==0: return P_vapor\n species = [compounds[i] for i in indices]\n mol = self.mol[indices]\n x = mol/mol.sum()\n T = self.T\n Psat = [s.VaporPressure(T) for s in species]\n self._gamma.species = species\n P_vapor[indices] = x * Psat * self._gamma(x, T)\n return P_vapor",
"def k_particles(chosen_particle, positions, velocities):\n\n\n # array with all indecies of all k particles for positions\n positions_k = []\n velocities_k = []\n\n # array of new distances considering boundary conditions\n new_distances = []\n\n # check over all particles in positions\n for index in range(N):\n\n distance_x, distance_y = per_boun_distance(chosen_particle, positions[index])\n\n # distance from selected particle to particle with index\n d = np.sqrt(distance_x**2 + distance_y**2)\n\n # append this distance to array of distances\n new_distances.append(d)\n\n # Now we need a sorting algorithm (merge)\n for j in range(k+1):\n low = min(new_distances)\n\n index_k = new_distances.index(low)\n\n # get the index of the particle for velocity\n velocities_k.append(velocities[index_k])\n\n # get the index of the particle for position\n # and add position to all positions within r\n positions_k.append(positions[index_k])\n\n new_distances.pop(index_k)\n\n return velocities_k, positions_k",
"def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V",
"def population(count, l, vmin, vmax):\n return [ particle(l, vmin, vmax) for x in range(count) ]",
"def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)",
"def getTallyParticles(self):\n\n\t\tparticleNames = []\n\n\t\tif self.typeNumber > 0:\n\t\t\tparticleNames.append(particleListShort[self.typeNumber]) \n\t\telse:\n\t\t\tfor i,name in enumerate(self.particleList):\n\t\t\t\ttry:\n\t\t\t\t\tif self.tallyParticles[i] == 1:\n\t\t\t\t\t\tparticleNames.append(self.particleList[i])\n\t\t\t\texcept:\n\t\t\t\t\tpass # For some reasons there can be less than 35 particles listed. Skip in case.\n\t\treturn particleNames"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
This api does not return xml | def xml(self):
raise NotImplementedError('This api does not return xml') | [
"def content_api_xml(url, request):\n headers = {'content-type': 'application/xml'}\n content = 'xml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)",
"def get_document_xml():",
"def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree",
"def xml_response(self, data):\n response = make_response(data)\n response.headers[\"Content-Type\"] = \"application/xml\"\n\n return response",
"def test_xml_direct(self): \n response = client.result(True, 'xml', 'unittest', test_data = self.test_data)\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')",
"def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response",
"def xml():\n number = request.form.get('WebClientParam_number')\n xml = inboundxml.Response(inboundxml.Dial(\n inboundxml.Number(\n str(number), \n )\n ))\n return Response(str(xml), mimetype='text/xml')",
"def _create_response(self) -> str:",
"def get_response(self):\n pass",
"def __call_api(self, values):\n # Add auth key to the request dictionary if not supplie\n if 'auth' not in values:\n values['auth'] = self.auth_data['auth']\n\n # Encode the data for a GET request\n data = urllib.parse.urlencode(values)\n\n #print values\n\n # Try to make the request\n xml_string = urllib.request.urlopen(self.xml_rpc + '?' + data).read()\n\n # Parse the XML\n response_data = xmltodict(self.__sanitize(xml_string))\n\n # Ensure that there was XML to parse\n if not response_data:\n return None\n\n # Grab the root element\n response_data = response_data['root'][0]['child']\n\n return response_data",
"def test_02_XML(self):\n pass",
"def xml(cls, res, *args, **kwargs):\n return parse_xml(res.text, *args, **kwargs)",
"def get_requests(self):",
"def __call__(self, value, system):\n request = system.get(\"request\")\n\n if request is None or not isinstance(value, VoiceResponse):\n raise httpexceptions.HTTPServerError()\n\n request.response.content_type = \"application/xml\"\n return value.to_xml()",
"def read(self, return_string=False):\r\n # Get result data from debugger engine and verify length of response\r\n data = self.read_data()\r\n\r\n # Show debug output\r\n debug('[Response data] %s' % data)\r\n\r\n # Return data string\r\n if return_string:\r\n return data\r\n\r\n # Remove special character quoting\r\n data = self.unescape(data)\r\n\r\n # Replace invalid XML characters\r\n data = ILLEGAL_XML_RE.sub('?', data)\r\n\r\n # Create XML document object\r\n document = ET.fromstring(data)\r\n return document",
"def xml(self):\n return self._xml",
"def xml_api() -> XMLClient:\n return XMLClient()",
"def packagesXml(request):\n data_packages = serializers.serialize('xml', GetPackages())\n return HttpResponse(data_packages, mimetype='application/xml; charset=utf8')",
"def get_usercp_xml(self,):\n response = self.session.get('https://ngb.to/usercp.php?type=xml')\n return response.text"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Returns whether erorr is NOAUTH | def noauth(self):
try:
# some endpoints dont return json
return self.json['response'].get('error_id') == 'NOAUTH'
except:
return False | [
"def unauthorized():\n return HttpError(401)",
"def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403",
"def send_not_authenticate_resp():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})",
"def try_recover_auth_failure(self):\r\n return False",
"def is_auth_error(error: Exception) -> bool:\n if not isinstance(error, Fault):\n return False\n return (\n any(\n \"NotAuthorized\" in code\n for code in extract_subcodes_as_strings(error.subcodes)\n )\n or \"auth\" in stringify_onvif_error(error).lower()\n )",
"def _auth_check():\n try:\n if not _is_authenticated():\n raise web.unauthorized()\n except:\n raise web.unauthorized()",
"def _http_unauthorized(start_response):\n start_response(falcon.HTTP_401, [('Content-Length', '0')])\n return []",
"def is_auth_error(response):\n return isinstance(response, AuthError)",
"def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)",
"def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)",
"def get_authenticated_denied(self):",
"def _ebMaybeBadAuth(self, reason):\n reason.trap(error.NotEnoughAuthentication)\n self.transport.sendPacket(\n MSG_USERAUTH_FAILURE, NS(b\",\".join(self.supportedAuthentications)) + b\"\\xff\"\n )",
"def authenticated_403(self):\n if self.get_current_user() is None:\n raise web.HTTPError(403)",
"def test_no_header(self):\n rv = self.client.get('/api/v1/user/info')\n self.assertEqual(rv.status_code, 401)",
"def test_is_unauthenticated(self):\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"async def handle_unauthorized(self, request: web.Request) -> web.Response:\n raise HTTPUnauthorized()",
"def _unauthorized_callback():\n abort(401)",
"def test_auth_required(self):\n # Make a GET Request\n res = self.client.get(RECIPE_URL)\n\n # Assertion\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_bad_credentials_mean_401_returned(self):\n sender = hawk_auth_sender()\n response = APIClient().get(\n test_url,\n content_type=\"\",\n HTTP_AUTHORIZATION=sender.request_header,\n HTTP_X_FORWARDED_FOR=\"1.2.3.4, 123.123.123.123\",\n )\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n error = {\"detail\": \"Incorrect authentication credentials.\"}\n assert response.json() == error"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Verify can select Maven option | def test_should_choose_maven(self):
search_str = "//*[text()='Maven Project']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Maven project is not found!')
els[0].click() | [
"def check():\n if not has('cpan-outdated', 'cpanm'):\n return False\n return True",
"def can_install_project(self):\n return True",
"def test_validate_project(self):\n pass",
"def validate_project():\n conf = get_config() \n # TODO: Run checks on partitioning paramters",
"def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")",
"def test_default_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check dev\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)",
"def test_no_repo(self):\n self.assertEqual(versiontag.get_version(), 'r0.0.0')\n self.assertEqual(versiontag.get_version(pypi=True), '0.0.0')",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def mvn(version):\n\tif version is None:\n\t\tmvn_list = get_mvn_list()\n\t\t_err('Available Maven versions: {0}'.format(mvn_list))\n\tget_mvn(version)",
"def test_default(self):\r\n self.assertEqual(self.option.default, False)",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)",
"def test_version_dropdown(plugin_dialog):\n widget = plugin_dialog.available_list.item(1).widget\n assert widget.version_choice_dropdown.currentText() == \"3\"\n # switch from PyPI source to conda one.\n widget.source_choice_dropdown.setCurrentIndex(1)\n assert widget.version_choice_dropdown.currentText() == \"4.5\"",
"def test_build_tools(self):\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)\n if self.status: self.status.Warning(\"By default build tools is Xilinx this can be changed in demo/nysa_platform.py\")\n if find_xilinx_path() is None:\n return False\n return True",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def verify_package_status(self):\n pass",
"def check_options(options, parser):\n if not options.get('release_environment', None):\n print(\"release environment is required\")\n parser.print_help()\n return os.EX_USAGE\n\n return 0",
"def maven_scm(self, type=\"default\"):\n try:\n pomConfig = self.__meta_data__.get(\"java\").get(\"poms\").get(type)\n if \"maven_scm\" in pomConfig:\n return pomConfig.get(\"maven_scm\")\n else:\n return self.__meta_data__.get(\"java\").get(\"poms\").get(\"default\").get(\"maven_scm\")\n except:\n return None",
"def _in_travis(): # pragma: no cover\n return 'TRAVIS' in os.environ",
"def testCheckBuildRequired(self):\n project_definition = projects.ProjectDefinition('test')\n build_helper = interface.BuildHelper(project_definition, '', {})\n\n result = build_helper.CheckBuildRequired(None)\n self.assertTrue(result)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Checks that all transformers in self.transformer_list are compatible with methods fit, transform and fit_transform. | def _check_transformers(self):
assert all([hasattr(trf, "fit") for trf in self.transformer_list]), "At least one transformer object is not " \
"compatible with 'fit' method."
assert all([hasattr(trf, "transform") for trf in self.transformer_list]), "At least one classifier object " \
"is not compatible with " \
"'transform' method."
assert all([hasattr(trf, "fit_transform") for trf in self.transformer_list]), "At least one classifier " \
"object is not compatible with " \
"'fit_transform' method." | [
"def _validate_transformer(\n self,\n ) -> None:\n if not (\n hasattr(self.transformer, \"fit\") # noqa: WPS421\n and hasattr(self.transformer, \"transform\") # noqa: WPS421\n and hasattr(self.transformer, \"fit_transform\") # noqa: WPS421\n ):\n raise TypeError(\n \"Transformer should implement fit and \"\n \"transform. \" + str(self.transformer)\n + \" (type \" + str(type(self.transformer)) + \")\"\n \" doesn't\",\n )\n\n tags = self.transformer._get_tags() # noqa: WPS437\n\n if tags['stateless']:\n warnings.warn(\n f\"Parameter 'transformer' with type \"\n f\"{type(self.transformer)} should use the data for \"\n f\" fitting.\"\n f\"It should have the 'stateless' tag set to 'False'\",\n )\n\n if tags['requires_y']:\n warnings.warn(\n f\"Parameter 'transformer' with type \"\n f\"{type(self.transformer)} should not use the class label.\"\n f\"It should have the 'requires_y' tag set to 'False'\",\n )",
"def check_transformer_common(self, transformer_short_code):\n TransformerClass = task_map[transformer_short_code]\n transf_object = TransformerClass()\n self.check_fit_method(transf_object)\n self.check_transform_method(transf_object)",
"def _ensure_transform(\n self, message: dict, transformers: Optional[List[Callable]] = None\n ) -> None:\n required_transformers = self.__requiredtransformers__\n\n missing_transformers = None\n if required_transformers and not transformers:\n missing_transformers = required_transformers\n\n called = set()\n if transformers:\n for func in transformers:\n if isinstance(func, functools.partial):\n called.add(func.func.__name__)\n else:\n called.add(func.__name__)\n\n func(message=message)\n\n if required_transformers != called:\n missing_transformers = required_transformers.difference(called)\n\n if missing_transformers:\n raise MissingTransformersError(self.__class__.__name__, missing_transformers)",
"def check_model_transformer_common(self, model_trans_short_code):\n\n EstimatorClass = task_map[model_trans_short_code]\n estim_object = EstimatorClass()\n self.check_fit_method(estim_object)\n self.check_predict_method(estim_object)",
"def check_if_it_can_fit(object):\n if hasattr(object, \"fit\") and hasattr(object, \"predict\") and hasattr(object, \"get_params\") and hasattr(object,\n \"set_params\"):\n return object\n else:\n raise Exception(\"Pass an estimator that has methods fit predict set_params get_params\")",
"def validate_bettertransformer(self):\n if self.num_heads is None:\n raise ValueError('Number of heads not set for `BetterTransformer` integration.')\n if self.embed_dim is None:\n raise ValueError('Embedding dimension not set for `BetterTransformer` integration.')\n if self.norm2_eps is None or self.norm1_eps is None:\n raise ValueError('`norm2_eps` and `norm1_eps` not set for `BetterTransformer` integration.')\n if self.pos_emb_type is not None and self.pos_emb_type != 'absolute':\n raise ValueError(f'Positional embedding type {self.pos_emb_type} not supported for `BetterTransformer` integration')\n if self.norm1_eps != self.norm2_eps:\n raise ValueError('norm1_eps and norm2_eps must be equal for `BetterTransformer` integration.')\n if self.act_fn in USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS:\n logger.warning(f'Overridding {self.act_fn} activation with gelu. Use the transformed model at your own risk, the output logits could be significantly different.')\n self.act_fn = 'gelu'\n elif self.act_fn not in SUPPORTED_ACTIVATION_FUNCTIONS:\n raise ValueError(f'Activation function {self.act_fn} not supported for `BetterTransformer` integration.')\n self.use_gelu = self.act_fn == 'gelu' or self.act_fn == 'gelu_new'\n if self.num_heads % 2 == 1:\n raise ValueError(f'Number of heads {self.num_heads} is not supported for `BetterTransformer` integration. Number of heads must be even.')",
"def _ensure_basetransformer_init_called(self):\n assert isinstance(self, BaseTransformer), \"This class should be of type BaseTransformer.\"\n if not all(map(\n lambda x: hasattr(self, x),\n ('name', 'savers', 'is_initialized', 'is_train', 'is_invalidated', 'setup', '_teardown')\n )):\n raise RuntimeError(\n f'Please initialize Mixins in the good order. The present Mixin should '\n f'be initialized after BaseTransformer. '\n f'Got: {inspect.getmro(self.__class__)}. '\n f'Visit https://www.neuraxle.org/stable/classes_and_modules_overview.html '\n f'for more information.'\n )",
"def _validate_and_set_transforms(\n self,\n transform_prediction_and_target: Union[Callable, None],\n transform_target: Union[Callable, None],\n transform_inference: Union[Callable, None],\n transform_support: Union[Tuple, None],\n ) -> None:\n # Checks\n assert not (\n (transform_prediction_and_target is not None)\n and (transform_target is not None)\n ), \"Please specify at most one of `transform_prediction_and_target` and `transform_target`\"\n if (transform_target is not None) != (transform_inference is not None):\n self.warning(\n \"Setting one of `transform_target` and `transform_inference`, but not \"\n \"the other.\"\n )\n\n if transform_target is not None:\n assert transform_target is not None\n assert transform_inference is not None\n\n if transform_support is not None:\n assert transform_support is not None\n\n assert (\n len(transform_support) == 2\n ), \"Please specify min and max for transformation support.\"\n x_test = torch.from_numpy(\n np.linspace(transform_support[0], transform_support[1], 10)\n )\n else:\n x_test = np.logspace(-6, 6, 12 + 1)\n x_test = torch.from_numpy(\n np.concatenate([-x_test[::-1], [0], x_test])\n )\n\n # Add feature dimension before inference transformation to make it\n # match the dimensions of a standard prediction. Remove it again\n # before comparison. Temporary\n try:\n t_test = torch.unsqueeze(transform_target(x_test), -1)\n t_test = torch.squeeze(transform_inference(t_test), -1)\n valid = torch.isfinite(t_test)\n\n assert torch.allclose(t_test[valid], x_test[valid]), (\n \"The provided transforms for targets during training and \"\n \"predictions during inference are not inverse. Please \"\n \"adjust transformation functions or support.\"\n )\n del x_test, t_test, valid\n\n except IndexError:\n self.warning(\n \"transform_target and/or transform_inference rely on \"\n \"indexing, which we won't validate. Please make sure that \"\n \"they are mutually inverse, i.e. that\\n\"\n \" x = transform_inference(transform_target(x))\\n\"\n \"for all x that are within your target range.\"\n )\n\n # Set transforms\n if transform_prediction_and_target is not None:\n self._transform_prediction_training = (\n transform_prediction_and_target\n )\n self._transform_target = transform_prediction_and_target\n else:\n if transform_target is not None:\n self._transform_target = transform_target\n if transform_inference is not None:\n self._transform_prediction_inference = transform_inference",
"def check_model_list():\n # Get the models from the directory structure of `src/transformers/models/`\n models_dir = os.path.join(PATH_TO_TRANSFORMERS, \"models\")\n _models = []\n for model in os.listdir(models_dir):\n if model == \"deprecated\":\n continue\n model_dir = os.path.join(models_dir, model)\n if os.path.isdir(model_dir) and \"__init__.py\" in os.listdir(model_dir):\n _models.append(model)\n\n # Get the models in the submodule `transformers.models`\n models = [model for model in dir(transformers.models) if not model.startswith(\"__\")]\n\n missing_models = sorted(set(_models).difference(models))\n if missing_models:\n raise Exception(\n f\"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}.\"\n )",
"def test_all_steps_fit_transform():\n with pytest.raises(TypeError):\n TransformerPipeline([('svc', NoTransformT())])\n\n with pytest.raises(TypeError):\n TransformerPipeline([('svc', NoFitT())])",
"def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])",
"def check_transform(self, X):\n m = X.shape[1]\n\n if self.ncols != m:\n print(\"Number of Columns in input data of fit function and', \\\n 'transform function are different\")\n return False\n\n for i in range(m):\n ls = np.unique(X[:, i])\n n_unique = len(ls)\n\n if self.arr_nunique[i] != n_unique:\n print('Mismatch in the number of unique values in',\n 'the '+str(i)+'th column')\n return False\n for val in ls:\n if val not in self.arr_dic[i].keys():\n print(str(i)+'th column contain a value which was',\n 'not in the data used to fit data')\n return False\n\n return True",
"def _is_transformable(self):\n if not self._app.get_paths():\n raise NotTransformable(\"No image to\")\n elif not edit_supported(self._app.get_path()):\n raise NotTransformable(\"Filetype not supported for\")\n # Some operations only make sense if we are allowed to save to file\n elif not settings[\"autosave_images\"].get_value():\n message = \"\"\n if self._app[\"thumbnail\"].toggled:\n message = 'When operating in thumbnail mode ' \\\n '\"autosave_images\" must be enabled for'\n elif self._app[\"mark\"].marked:\n message = 'When images are marked ' \\\n '\"autosave_images\" must be enabled for'\n if message:\n raise NotTransformable(message)",
"def is_sklearn_transformer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"transformer\"",
"def test_compatible(self):\n\n # a card should be compatible with itself\n self.assertTrue(self.cardA.compatible(self.cardA))\n # and another card with all the same features\n self.assertTrue(self.cardA.compatible(self.cardB))\n # make sure cards with different features are not compatible\n self.assertFalse(self.cardA.compatible(self.cardC))\n # make sure a card with a superset of features is not compatible with the subset\n self.assertFalse(self.cardC.compatible(self.cardD))\n\n # test against feature lists\n self.assertTrue(self.cardA.compatible([\"Color\", \"Number\", \"Shape\", \"Fill\"]))\n self.assertFalse(self.cardA.compatible([\"Color\", \"Number\", \"Shape\"]))\n self.assertFalse(self.cardA.compatible([\"Color\", \"Direction\", \"Polarity\", \"Temperature\"]))",
"def fit_transform(self, data: Union[TimeSeries, Sequence[TimeSeries]]) -> Union[TimeSeries, Sequence[TimeSeries]]:\n for transformer in self._transformers:\n if isinstance(transformer, FittableDataTransformer):\n transformer.fit(data)\n\n data = transformer.transform(data)\n return data",
"def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )",
"def test_sklearn_transformer_checks():\n\n check_estimator(SymbolicTransformer(population_size=50,\n hall_of_fame=10,\n generations=5))",
"def _check_estimators(\n self,\n estimators,\n attr_name=\"steps\",\n cls_type=None,\n allow_mix=True,\n clone_ests=True,\n ):\n msg = (\n f\"Invalid {attr_name!r} attribute, {attr_name!r} should be a list\"\n \" of estimators, or a list of (string, estimator) tuples. \"\n )\n if cls_type is None:\n msg += f\"All estimators in {attr_name!r} must be of type BaseEstimator.\"\n cls_type = BaseEstimator\n elif isclass(cls_type) or isinstance(cls_type, tuple):\n msg += (\n f\"All estimators in {attr_name!r} must be of type \"\n f\"{cls_type.__name__}.\"\n )\n else:\n raise TypeError(\"cls_type must be a class or tuple of classes\")\n\n if (\n estimators is None\n or len(estimators) == 0\n or not isinstance(estimators, list)\n ):\n raise TypeError(msg)\n\n def is_est_is_tuple(obj):\n \"\"\"Check whether obj is estimator of right type, or (str, est) tuple.\"\"\"\n is_est = isinstance(obj, cls_type)\n is_tuple = self._is_name_and_est(obj, cls_type)\n\n return is_est, is_tuple\n\n if not all(any(is_est_is_tuple(x)) for x in estimators):\n raise TypeError(msg)\n\n msg_no_mix = (\n f\"elements of {attr_name} must either all be estimators, \"\n f\"or all (str, estimator) tuples, mix of the two is not allowed\"\n )\n\n if not allow_mix and not all(is_est_is_tuple(x)[0] for x in estimators):\n if not all(is_est_is_tuple(x)[1] for x in estimators):\n raise TypeError(msg_no_mix)\n\n return self._get_estimator_tuples(estimators, clone_ests=clone_ests)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deduce correct spark dtype from pandas dtype for column col of pandas dataframe df | def infer_spark_dtype(df, col):
logger = logging.getLogger(__name__ + ".infer_spark_dtype")
pd_dtype = df.dtypes[col]
# get a sample from column col
sample = df[col].dropna()
if sample.shape[0] == 0:
logger.warning("column %s of dtype %s containing nulls found" % (col, pd_dtype))
sample = None
else:
sample = sample.iloc[0]
# infer spark dtype
# datetimes
if pd.api.types.is_datetime64_any_dtype(pd_dtype):
ret = T.TimestampType()
# ints
elif (pd_dtype == 'int8') or (pd_dtype == 'int16'): # int8, int16
ret = T.ShortType()
elif pd_dtype == 'int32':
ret = T.IntegerType()
elif pd.api.types.is_int64_dtype(pd_dtype):
ret = T.LongType()
# uints
elif pd_dtype == 'uint8':
ret = T.ShortType()
elif pd_dtype == 'uint16':
ret = T.IntegerType()
elif pd_dtype == 'uint32':
ret = T.LongType()
elif pd_dtype == 'uint64':
logger.warning("converting column %s of type uint64 to spark LongType - overflows will be nulls" % col)
ret = T.LongType()
# floats
elif (pd_dtype == 'float16') or (pd_dtype == 'float32'):
ret = T.FloatType()
elif pd_dtype == 'float64': # float64
ret = T.DoubleType()
elif pd_dtype == 'bool':
ret = T.BooleanType()
# object
elif pd_dtype == 'object':
if (sample is None) or (isinstance(sample, str)):
logger.warning("converting column %s of type object to spark StringType" % col)
ret = T.StringType()
elif isinstance(sample, tuple):
raise NotImplementedError("cannot convert column %s containing tuples to spark" % col)
else:
raise NotImplementedError("values in column %s of type object not understood" % col)
# category
elif pd.api.types.is_categorical_dtype(pd_dtype):
logger.warning("converting column %s of type category to spark StringType" % col)
ret = T.StringType()
else:
raise NotImplementedError("column %s of type %s not understood" % (col, pd_dtype))
return ret | [
"def get_data_type(df, col):\n if col not in df.columns:\n raise KeyError(f'Column \"{col:s}\" not in input dataframe.')\n dt = dict(df.dtypes)[col]\n\n if hasattr(dt, \"type\"):\n # convert pandas types, such as pd.Int64, into numpy types\n dt = type(dt.type())\n\n try:\n # spark conversions to numpy or python equivalent\n if dt == \"string\":\n dt = \"str\"\n elif dt == \"timestamp\" or dt == \"date\":\n dt = np.datetime64\n elif dt == \"boolean\":\n dt = bool\n elif dt == \"bigint\":\n dt = np.int64\n except TypeError:\n pass\n\n return np.dtype(dt)",
"def convert_dtype_string(df, col):\n dtype = df[col].dtype.__repr__()\n if dtype in {\"StringDtype\"}:\n return df[col]\n else:\n return df[col].convert_dtypes().copy()",
"def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df",
"def convert_dtype_float(df, col):\n dtype = df[col].dtype.__repr__()\n if dtype in {\"Float32Dtype()\", \"Float64Dtype()\"}:\n return df[col]\n else:\n return df[col].convert_dtypes().copy()",
"def cast_to_datatype(_df, _colname, _datatype):\n _coldatatype = _df.schema[_colname].dataType\n if isinstance(_coldatatype, _datatype):\n raise ValueError('Already this datatype')\n else:\n if _datatype == DateType:\n _df = _df.withColumn(_colname, to_date(_df[_colname], 'dd/MM/yy').cast(_datatype()))\n return _df.filter(_df[_colname].isNotNull())\n else:\n return _df.withColumn(_colname, _df[_colname].cast(_datatype()))",
"def type_convert_cast(dataframe, colname, data_type):\n\n def try_cast(x):\n try:\n return data_type(x)\n except ValueError:\n return x\n\n dataframe[colname] = dataframe[colname].apply(try_cast)\n\n return dataframe",
"def convert_dtype_array(df, col):\n df[col] = df[col].apply(convert_to_tuple).fillna(pd.NA).copy()\n return df[col]",
"def convert_dtype_int(df, col):\n dtype = df[col].dtype.__repr__()\n if dtype in {\"Int32Dtype()\", \"Int64Dtype()\"}:\n return df[col]\n else:\n return df[col].convert_dtypes().copy()",
"def get_col_dtype(col):\n\tif col.dtype ==\"object\":\n\n\t # try numeric\n\t try:\n\t col_new = pd.to_datetime(col)\n\t return col_new.dtype\n\t except:\n\t try:\n\t col_new = pd.to_numeric(col)\n\t # print(col_new.dtype)\n\t return col_new.dtype\n\t except:\n\t return \"Object\"\n\tif col.dtype ==\"bool\": return \"Object\"\n\telse:\n\t if col.dtype == 'float64' or col.dtype == 'int64':\n\t return 'Numeric'\n\t else: \n\t return col.dtype",
"def _infer_atomic_data_type(column: pd.Series) -> Any:\n\n return infer_dtype(column[column.apply(_check_valid_values, 0)])",
"def _downcast_memory(df: pd.DataFrame) -> pd.DataFrame:\n cols = df.dtypes.index.tolist()\n types = df.dtypes.values.tolist()\n for i, j in enumerate(types):\n if \"Int\" in str(j) or \"int\" in str(j):\n if (\n df[cols[i]].min() > np.iinfo(np.int8).min\n and df[cols[i]].max() < np.iinfo(np.int8).max\n ):\n df[cols[i]] = df[cols[i]].astype(\"Int8\")\n elif (\n df[cols[i]].min() > np.iinfo(np.int16).min\n and df[cols[i]].max() < np.iinfo(np.int16).max\n ):\n df[cols[i]] = df[cols[i]].astype(\"Int16\")\n elif (\n df[cols[i]].min() > np.iinfo(np.int32).min\n and df[cols[i]].max() < np.iinfo(np.int32).max\n ):\n df[cols[i]] = df[cols[i]].astype(\"Int32\")\n else:\n df[cols[i]] = df[cols[i]].astype(\"Int64\")\n # Avoid forcing \"float16\" because it loses precision and is fragile\n elif \"Float\" in str(j) or \"float\" in str(j):\n if (\n df[cols[i]].min() > np.finfo(np.float16).min\n and df[cols[i]].max() < np.finfo(np.float32).max\n ):\n df[cols[i]] = pd.to_numeric(df[cols[i]], downcast=\"float\")\n else:\n df[cols[i]] = df[cols[i]].astype(\"Float64\")\n elif \"Object\" in str(j) or \"object\" in str(j):\n df[cols[i]] = df[cols[i]].astype(\"category\")\n\n return df",
"def ibis_schema_apply_to(schema, df):\n\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n if isinstance(dtype, dt.Interval):\n df[column] = df[column].values.astype(pandas_dtype)\n else:\n df[column] = df[column].astype(pandas_dtype, errors='ignore')\n\n if PY2 and dtype == dt.string:\n df[column] = df[column].str.decode('utf-8', errors='ignore')\n\n return df",
"def convert_dtypes(self):\n i = -1\n dtypes = list(self.df.dtypes)\n for dtype in dtypes:\n i += 1\n if dtype == np.dtype('float64'):\n continue\n # not sure if this is needed\n #elif 'float' in dtype.name:\n # df[df.columns[i]] = df[df.columns[i]].astype('float64')\n # convert 64-bit integers to 32-bit for shapefile format\n elif dtype == np.dtype('int64'):\n self.df[self.df.columns[i]] = self.df[self.df.columns[i]].astype('int32')\n # convert boolean values to strings\n elif dtype == np.dtype('bool'):\n self.df[self.df.columns[i]] = self.df[self.df.columns[i]].astype('str')\n\n # strip dtype names just down to 'float' or 'int'\n dtypes = [''.join([c for c in d.name if not c.isdigit()]) for d in list(self.df.dtypes)]\n\n # also exchange any 'object' dtype for 'str'\n dtypes = [d.replace('object', 'str') for d in dtypes]\n self.properties = dict(zip(self.df.columns, dtypes))\n\n # delete the geometry column\n del self.properties[self.geo_column]",
"def cast(elem, psql_type):\n if psql_type == 'real':\n return float(format(elem, '.6g'))\n elif psql_type == 'double precision':\n return float(format(elem, '.15g'))\n elif psql_type == 'timestamp':\n if isinstance(elem, pd.Timestamp):\n return elem.to_pydatetime()\n else:\n return elem\n elif psql_type == 'text':\n if type(elem) == float:\n return \"NaN\"\n return str(elem)\n else:\n return elem",
"def test_column_latitude_dtype():\n\tdef column_latitude_dtype(df):\n\n\t\tassert df['latitude'].dtype == float\n\n\tcolumn_latitude_dtype(data)\n\tcolumn_latitude_dtype(data_corrupt)",
"def detect_and_cast_column_format(column):\n if numpy.issubdtype(column.dtype, numpy.number):\n return column, DataFormats.numerical\n try:\n column = column.astype(numpy.float32)\n except ValueError:\n pass\n else:\n return column, DataFormats.numerical\n\n return column, DataFormats.character",
"def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })",
"def _enforce_column_types(dataframe: \"pd.Dataframe\", has_source: bool = True) -> None:\n\n dataframe.astype(\n {\n \"brain_region\": str,\n \"cell_type\": str,\n \"measurement\": float,\n \"measurement_unit\": str,\n \"standard_deviation\": float,\n \"measurement_type\": str,\n },\n copy=False,\n )\n\n if has_source:\n dataframe.astype(\n {\n \"comment\": str,\n \"source_title\": str,\n \"specimen_age\": str,\n },\n copy=False,\n )",
"def test_downcast_df_int_columns(self):\n\n data = pd.DataFrame({\n 'Float64': [1.0],\n 'Int64': [1],\n \"Object\": 'Hi'\n })\n\n result = condense_csv.downcast_df_int_columns(data)\n result = [str (i) for i in list (result.dtypes.values)]\n answer = ['float64', 'int8', 'object']\n\n self.assertEqual(result, answer)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Run a command and echo it first | def run_cmd(call, cmd, *, echo=True, **kwargs):
if echo:
print('$> ' + ' '.join(map(pipes.quote, cmd)))
return call(cmd, **kwargs) | [
"def echo(what):\n if '\\n' not in what:\n cmd_print(f'echo {shlex.quote(what)}')\n return CommandOutput(what + '\\n')",
"def run_single_process(command):\n try:\n print command\n call(command, shell=1)\n sys.stdout.write('.')\n sys.stdout.flush()\n except KeyboardInterrupt:\n pass",
"def system_call(command):\n print(\"\\n### {}\".format(command))\n stderr = subprocess.STDOUT\n pipe = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n stdout, stderr = pipe.communicate()\n print(stdout)",
"def execute(command):\r\n \r\n print 'Running:', command\r\n os.system(command)\r\n print '--------------------\\n'",
"def run_command(opts, cmd):\n print(cmd)\n if not opts.dryrun:\n print(check_output(cmd, shell=True))",
"def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()",
"def execute(sefl, cmd, hold_on=True):\n # we want not exit terminal when `cmd` is done, so '&& cat' at the end\n cmd = cmd + \" && cat\" if hold_on else cmd\n print(\"Executed command: {}\".format(cmd))\n system(f\"gnome-terminal -- bash -c \\\"{cmd}\\\"\")\n # system(\"exo-open --launch TerminalEmulator {}\".format(cmd))",
"def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out",
"def run_command(self, cmd): \n self.log(cmd)\n return self.module.run_command(cmd.split())",
"def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response",
"def run_command(cmd, component=\"\", display=True, return_output=False):\n now = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n if display:\n click.secho(\"[{0}] \".format(now), bold=True, nl=False, fg=\"green\")\n click.secho(\"{0}: \".format(component), bold=True, nl=False, fg=\"yellow\")\n click.secho(\"{0}\".format(cmd), bold=True)\n if component:\n os.chdir(get_srcdir(component))\n try:\n if return_output:\n result = subprocess.check_output(cmd, shell=True)\n return result.decode().rstrip(\"\\r\\n\")\n else:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as err:\n if display:\n click.secho(\"[{0}] \".format(now), bold=True, nl=False, fg=\"green\")\n click.secho(\"{0}: \".format(component), bold=True, nl=False, fg=\"yellow\")\n click.secho(\"{0}\".format(err), bold=True, fg=\"red\")\n sys.exit(err.returncode)",
"def _run_system_command(self, cmd):\n self.logger.debug(\"CMD='{}'\".format(cmd))\n stdin, stdout, stderr = self.ssh.exec_command(cmd)\n cmd_output=stdout.read()\n self.logger.debug(\"CMD Output='{}'\".format(cmd_output))\n return cmd_output",
"def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo",
"def run_command(cmd):\n run = subprocess.check_output\n try:\n return run(cmd, shell=True, stderr=subprocess.PIPE).strip()\n except Exception:\n return None",
"def run_command(self, cmd):\n cmd = self.cmd.precmd(cmd)\n sig = self.cmd.onecmd(cmd)\n return self.cmd.postcmd(sig, cmd)",
"def run_user_command(self) -> None:\n if self.magic_run_command:\n proc = subprocess.Popen(\n self.magic_run_command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=False, # Allows inheriting file descriptors\n universal_newlines=True, # which is useful for process substitution\n encoding='UTF-8'\n )\n\n self.magic_stdout, self.magic_stderr = proc.communicate()\n self.magic_stdout = self.magic_stdout or '\\n'\n self.magic_returncode = proc.returncode",
"def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response",
"def _run_cmd(self, command, env):\n env_complete = dict(os.environ)\n env_complete.update(env)\n proc = subprocess.Popen(command,\n env=env_complete,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True)\n while True:\n try:\n print(proc.stdout.readline(), end='')\n except Exception:\n proc.kill()\n break\n if proc.poll() is not None:\n print(proc.stdout.read())\n print('== End of Duplicity output ==')\n if proc.returncode == 0:\n print('Duplicity returned NORMALLY.\\n')\n else:\n print('Duplicity returned with ERROR CODE {}'.format(proc.returncode))\n sys.exit(proc.returncode)\n break\n time.sleep(1)",
"def echo_command(command: Sequence[str], *, save_to: Optional[Path]) -> Sequence[str]:\n output = \" \".join(shlex.quote(part) for part in command)\n print(output)\n if save_to is not None:\n with save_to.open(mode=\"a\", encoding=\"utf-8\") as save_to_file:\n print(output, file=save_to_file)\n return command"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the last commit to modify the given paths | def last_modified_commit(*paths, **kwargs):
return check_output([
'git',
'log',
'-n', '1',
'--pretty=format:%h',
'--',
*paths
], **kwargs).decode('utf-8') | [
"def get_last_commit(self, repo):\n return self.get_commits(repo, 1)[0]",
"def last_commit(self, tree, path):\n raise RepositoryError(\"Abstract Repository\")",
"def last_commit(repopath):\n command = \"cd %s; git log -1i --date=iso\" % repopath\n out = run.command(command)\n if out:\n creg = re.compile(r\"commit\\s+(?P<remote_host>([a-f0-9]+))\") # Commitid\n areg = re.compile(r\"Author:\\s+(?P<author>(.*$))\") # author\n dreg = re.compile(r\"Date:\\s+(?P<date>(\\d{4}-\\d{2}-\\d{2}\\s+\\d{2}:\\d{2}:\\d{2}))\") # Date\n last = {}\n\n for line in out.splitlines():\n commit = creg.search(line)\n author = areg.search(line)\n date = dreg.search(line)\n if commit:\n last['id'] = commit.group(1)\n elif author:\n last['author'] = author.group(1)\n elif date:\n last['date'] = date.group(1)\n\n return last\n else:\n return None",
"def latest_tag_or_mod_commit(*paths, **kwargs):\n latest_modification_commit = check_output(\n [\n 'git', 'log',\n '--max-count=1',\n '--pretty=format:%h',\n '--',\n *paths,\n ],\n **kwargs,\n ).decode('utf-8').strip()\n\n try:\n git_describe_head = check_output(\n [\n 'git', 'describe', '--tags', '--long',\n ],\n **kwargs,\n ).decode('utf-8').strip().rsplit(\"-\", maxsplit=2)\n except subprocess.CalledProcessError:\n # no tags on branch\n return latest_modification_commit\n\n latest_tag = git_describe_head[0]\n latest_tagged_commit = check_output(\n [\n 'git', 'rev-list', '--abbrev-commit', '-n', '1', latest_tag,\n ],\n **kwargs,\n ).decode('utf-8').strip()\n\n try:\n check_call(\n [\n 'git', 'merge-base', '--is-ancestor', latest_tagged_commit, latest_modification_commit,\n ],\n **kwargs,\n )\n except subprocess.CalledProcessError:\n # latest_tagged_commit was newer than latest_modification_commit\n return latest_tagged_commit\n else:\n return latest_modification_commit",
"def get_first_last_commit_date(path):\n # %at specifies a UNIX time stamp\n process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n log = stdout.decode().strip('\\n').split('\\n')\n last = int(log[0])\n first = int(log[-1])\n return (first, last)",
"def get_last_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[0] if commits else None",
"def last_modified_date(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%cd',\n '--date=iso',\n '--',\n *paths\n ], **kwargs).decode('utf-8')",
"def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev",
"def get_first_commit(repo, changes, since):\n if since:\n first = since\n else:\n first = get_latest_rev(changes)\n\n if first:\n try:\n return repo.rev_parse(first)\n except GitRepositoryError:\n if since:\n raise GbsError(\"Invalid commit: %s\" % (first))\n else:\n raise GbsError(\"Can't find last commit ID in the log, \"\\\n \"please specify it by '--since'\")",
"def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()",
"def find_previous_commit(self, paths, revision=\"HEAD\", return_first=False, full=False):\n kwargs = {}\n\n if full:\n kwargs[\"full_history\"] = True\n\n if return_first:\n file_commits = list(self.repo.iter_commits(revision, paths=paths, **kwargs))\n else:\n file_commits = list(self.repo.iter_commits(revision, paths=paths, max_count=1, **kwargs))\n\n if not file_commits:\n raise KeyError(\"Could not find a file {0} in range {1}\".format(paths, revision))\n\n return file_commits[-1 if return_first else 0]",
"def get_git_commit(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n commit = repo.head.commit.hexsha\n return commit\n except Exception:\n return None",
"def last_commit_id(self) -> Optional[str]:\n return pulumi.get(self, \"last_commit_id\")",
"def find_latest_modified_directory(paths):\n\n ret_path = None\n newest_timestamp = None\n\n for p in paths:\n modified_timestamp = os.path.getmtime(p)\n\n if newest_timestamp is None or modified_timestamp > newest_timestamp:\n newest_timestamp = modified_timestamp\n ret_path = p\n\n return ret_path",
"def get_git_changeset():\n repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n shell=True, cwd=repo_dir, universal_newlines=True)\n timestamp = git_log.communicate()[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')",
"def current_commit(self) -> str:\n # TODO: Do we want short ids?\n head = self.open_repo().head\n if head is None:\n return None # TODO: This is bad\n else:\n return str(head.target)",
"def get_last_changeset(self):\n return self.repository.changesets.get(date=self.last_changed)",
"def svn_client_commit_item_t_path_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"",
"def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return the last modified date (as a string) for the given paths | def last_modified_date(*paths, **kwargs):
return check_output([
'git',
'log',
'-n', '1',
'--pretty=format:%cd',
'--date=iso',
'--',
*paths
], **kwargs).decode('utf-8') | [
"def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = os.path.basename(root)\n # checking if is a hidden path\n if not base.startswith(\".\") and not base.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(root))\n\n # check files of interest in the skill root directory\n files = [f for f in files\n if not f.endswith('.pyc') and f != 'settings.json']\n for f in files:\n last_date = max(last_date, os.path.getmtime(os.path.join(path, f)))\n return last_date",
"def find_latest_modified_directory(paths):\n\n ret_path = None\n newest_timestamp = None\n\n for p in paths:\n modified_timestamp = os.path.getmtime(p)\n\n if newest_timestamp is None or modified_timestamp > newest_timestamp:\n newest_timestamp = modified_timestamp\n ret_path = p\n\n return ret_path",
"def modification_date(filepath):\n if not filepath:\n return ''\n t = os.path.getmtime(filepath)\n return datetime.datetime.utcfromtimestamp(t).strftime('%Y-%m-%d %H:%M:%S')",
"def moddate_of(filepath):\n return os.stat(filepath).st_mtime",
"def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)",
"def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')",
"def last_modified(self):\n last_changed_file = Session.query(sa.func.max(Entity.last_modified_date)).filter_by(project=self).first()[0]\n\n if last_changed_file:\n return max(self.last_modified_date, last_changed_file)\n \n return self.last_modified_date",
"def get_file_modified_date(filepath):\n return datetime.datetime.fromtimestamp(os.path.getmtime(filepath))",
"def last_modified(self):\n\n if not self._absuri:\n self._absuri = self._getcell('URI')\n\n if self._absuri is None:\n raise DataError('Cannot get file: does not exists')\n\n info = self._intf._get_head(self._absuri)\n return info['last-modified']",
"def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")",
"def get_modified_time(fname):\n return os.stat(fname).st_mtime",
"def get_file_mtime(path):\n return datetime.datetime.fromtimestamp(pathlib.Path(path).stat().st_mtime)",
"def file_last_modification_date(cls, filepath):\n try:\n timestamp = os.path.getmtime(filepath)\n return datetime.date.fromtimestamp(timestamp)\n except OSError as e:\n PyDuplicateLogger.exception(e)\n raise FileSystemException(e)",
"def get_latest_file(self, path, *paths):\n # print \"%%%%%%%%%%%%%\",path\n # fullpath = os.path.join(path, *paths)\n # # print \"############\",fullpath\n # list_of_files = glob.glob(fullpath) # You may use iglob in Python3\n # if not list_of_files: # I prefer using the negation\n # return None # because it behaves like a shortcut\n # latest_file = max(list_of_files, key=os.path.getctime)\n # # print \"@@@@@@@@@@@@\",latest_file\n # _, filename = os.path.split(latest_file)\n # return filename\n list_of_files = os.listdir(path)\n if not list_of_files:\n return None\n filename = [i for i in list_of_files if i.endswith('.ma') and not i.endswith('_tmp.ma')][-1]\n return filename",
"def compute_date_modified(\n self, extensions: Optional[Sequence[str]] = None\n ) -> datetime.datetime:\n dates_modified = []\n for git_file in self.files:\n if extensions and git_file.extension not in extensions:\n continue\n dates_modified.append(git_file.date_modified)\n\n return max(dates_modified)",
"def _mtime(filepath):\n from time import ctime\n try:\n return ctime(os.path.getmtime(filepath))\n except os.error:\n return ''",
"def get_last_modified() -> str:\n service = get_authenticated_service(\"drive\", \"v3\")\n response = (\n service.files().get(fileId=SPREADSHEET_ID, fields=\"modifiedTime\").execute()\n )\n return response[\"modifiedTime\"]",
"def convert_mtime_to_formatted_string(current_path):\n timestamp = datetime.fromtimestamp(path.getmtime(current_path))\n return datetime.strftime(timestamp, \"%Y%m%d%H%M%S\")",
"def get_recently_modified_scratch_file(settings):\n dir_contents = os.listdir(settings.location)\n full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)\n files = filter(lambda f: os.path.isfile(str(f)), full_paths)\n if not files:\n return \"\"\n files = sorted(files, key=_get_mtime)\n return files[-1]"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether the given paths have been changed in the commit range Used to determine if a build is necessary | def path_touched(*paths, commit_range):
return check_output([
'git', 'diff', '--name-only', commit_range, '--', *paths
]).decode('utf-8').strip() != '' | [
"def check_if_changed():\n if not len(os.listdir(images_path)) == 0:\n current_staging_hashes = get_all_path_hashes(staging_path)\n head_path = get_wit_path(keyword=get_current_commit_id())\n head_hashes = get_all_path_hashes(head_path)\n else:\n return True\n changed = False\n if len(current_staging_hashes) != len(head_hashes):\n changed = True\n else:\n for staging_hash in current_staging_hashes:\n if staging_hash not in head_hashes:\n changed = True\n if changed:\n return True\n return False",
"def any_changed(sources, target):\r\n\r\n if not os.path.exists(target):\r\n return True\r\n\r\n target_time = os.path.getmtime(target)\r\n return any(target_time < getmtime(source) for source in sources)",
"def _has_code_changes(self) -> bool:\n for source in self.sources:\n if source.substantive_changes(self.build_dir.hash_cache):\n return True\n return False",
"def _ListContainsFilesModifiedByGazelle(paths):\n for path in paths:\n if path.endswith(\".go\") or path.endswith(\".proto\"):\n return True\n return False",
"def sources_modified(self) -> bool:\n try:\n own_m_time = self.m_time\n except FileNotFoundError:\n return True\n return any([own_m_time <= dep.m_time for dep in self.sources])",
"def changed(source, target):\r\n return (not os.path.exists(target) or\r\n os.path.getmtime(target) < os.path.getmtime(source))",
"def hasChanges(self, oprj, opkg, orev, tprj, tpkg):\n try:\n tsrcmd5 = self.getPackageChecksum(tprj, tpkg)\n except urllib2.HTTPError, e:\n if e.code == 404:\n return True\n else:\n raise\n osrcmd5 = self.getPackageChecksum(oprj, opkg, rev=orev)\n if osrcmd5 == tsrcmd5:\n return False\n return True",
"def is_uptodate(fns,refs):\n # make sure fns and refs are both lists...\n if isinstance(fns,basestring):\n fns=[fns];\n if isinstance(refs,basestring):\n refs=[refs];\n\n for fn in fns:\n for ref in refs:\n ok=os.path.exists(fn) and ((not os.path.exists(ref)) or (os.path.getctime(fn) > os.path.getctime(ref)));\n if not ok:\n return False;\n\n return True;",
"def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)",
"def check_files_committed(self):\n # staged but uncommitted\n uncommitted = self.repo.index.diff(self.current_hexsha)\n # unstaged changes\n unstaged = self.repo.index.diff(None)\n if uncommitted or unstaged:\n raise BuildError(\n 'There are uncommitted changes in the repo. Please stash or '\n 'commit before starting a new build.'\n )",
"def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)",
"def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False",
"def check_dependency_change(targets: List[str], dependencies: List[str]) -> bool:\n min_target_mtime = min([get_mtime(path) for path in targets])\n max_dep_mtime = max([get_mtime(path) for path in dependencies])\n return max_dep_mtime > min_target_mtime",
"def _code_has_changed(self,live_dirs,mtimes):\n for filepath in self._command._find_live_code_files(live_dirs):\n try:\n stat = os.stat(filepath)\n except EnvironmentError:\n continue\n if filepath not in mtimes:\n mtimes[filepath] = stat.st_mtime\n else:\n if mtimes[filepath] != stat.st_mtime:\n return True",
"def _file_has_changed(db_file, modified_files):\n file_path = os.path.join(DB_CACHE_FILEPATH, db_file)\n return file_path in modified_files",
"def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True",
"def IsModified(course):\n rawMod = ModTimeIfExists(\"raw/{}.html\".format(course))\n outMod = ModTimeIfExists(\"out/{}.html\".format(course))\n wordMod = ModTimeIfExists(\"word/{}.docx\".format(course))\n\n lastBuilt = min(rawMod, outMod, wordMod)\n\n if(lastBuilt == 0):\n return True\n\n # if any of these have changed since the last build, rebuild\n srcMod = ModTimeIfExists(\"courses/%s.md\" % (course,))\n cssMod = ModTimeIfExists(\"css/adelphi.css\")\n customCssMod = ModTimeIfExists(\"css/%s.css\" % (course,))\n tmplMod = ModTimeIfExists(\"tmpl/%s.html\" % (\"adelphi\",))\n rawTmplMod = ModTimeIfExists(\"tmpl/%s.html\" % (\"raw\",))\n footerMod = ModTimeIfExists(\"tmpl/%s.html\" % (\"footer\",))\n \n assert srcMod > 0\n assert tmplMod > 0\n assert rawTmplMod > 0\n \n #if the source files are 0, then we don't have them\n changes = [d for d in [srcMod,cssMod,customCssMod,tmplMod,rawTmplMod,footerMod] if d > 0]\n \n mostRecentChange = max(changes)\n\n return mostRecentChange > lastBuilt",
"def _check_changed(self):\n if self.repo.dirstate.parents()[1] != node.nullid:\n raise util.Abort(_('outstanding uncommitted merge'))\n\n if self.opts.get('upload') and self.useGlobal and not self.opts.get('only'):\n status = self.repo.status('.', None, _matchfiles(self.repo, ['.hgtags']))\n if max(status):\n raise util.Abort(_('outstanding uncommitted .hgtags'))",
"def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n for (nm, fnm, typ) in old:\n if mtime(fnm) > last_build:\n print \"building because %s changed\" % fnm\n return True\n elif pyc and mtime(fnm[:-1]) > last_build:\n print \"building because %s changed\" % fnm[:-1]\n return True\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get docker build args dict, rendering any templated args. | def render_build_args(options, ns):
build_args = options.get('buildArgs', {})
for key, value in build_args.items():
build_args[key] = value.format(**ns)
return build_args | [
"def render_build_args(image_options, ns):\n build_args = image_options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args",
"def dockerargs(self) -> Dict:\n return (\n {'PIP_REQUIREMENTS': self.requirements}\n if self.build == DaemonBuild.DEVEL\n else {\n 'PIP_REQUIREMENTS': self.requirements,\n 'PY_VERSION': self.python.name.lower(),\n 'JINA_VERSION': self.jinav,\n }\n )",
"def docker_build_context(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"docker_build_context\")",
"def docker_build_context(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"docker_build_context\")",
"def read_dockerfile_for_args(target):\n import colorama\n build_args = {}\n missing_args = {}\n empty_string = \"\"\n\n # read dockerfile for args that have no value\n try:\n with open(target + '/Dockerfile') as dockerfile:\n for line in dockerfile:\n if line.startswith(\"ARG \"):\n dockerfile_args = line.replace(\n \"ARG \", \"\").strip(\"\\n\").split(\"=\")\n\n arg_name = dockerfile_args[0]\n arg_value = \"\"\n\n if len(dockerfile_args) > 1:\n arg_value = dockerfile_args[1].strip(\"\\n\")\n\n env_value = os.environ.get(arg_name)\n\n build_args[arg_name] = arg_value\n if not env_value is None:\n build_args[arg_name] = env_value\n\n if build_args[arg_name] is empty_string:\n missing_args[arg_name] = arg_name\n except FileNotFoundError:\n exit(f\"Dockerfile not found: {target}/Dockerfile\")\n\n if len(missing_args) > 1:\n message = \"WARNING: Arguments found with no defined value \" \\\n \"found in Dockerfile or environment [{}]\"\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +\n message.format(\", \".join(missing_args)))\n\n return build_args",
"def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args",
"def _get_docker_args(self, dagster_version: str, python_version: str) -> Dict[str, str]:\n with open(os.path.join(self.path, \"versions.yaml\"), \"r\", encoding=\"utf8\") as f:\n versions = yaml.safe_load(f.read())\n image_info = versions.get(python_version, {})\n\n docker_args = image_info.get(\"docker_args\", {})\n\n if \"base_image\" in image_info:\n check.invariant(\n \"BASE_IMAGE\" not in docker_args, \"Cannot override an existing BASE_IMAGE\"\n )\n\n base_image = DagsterDockerImage(\n image_info[\"base_image\"][\"name\"], images_path=self.images_path\n )\n source = image_info[\"base_image\"][\"source\"]\n\n if source == \"aws\":\n docker_args[\"BASE_IMAGE\"] = base_image.aws_image(python_version)\n elif source == \"local\":\n docker_args[\"BASE_IMAGE\"] = base_image.local_image(python_version)\n else:\n raise Exception(f\"Unrecognized source {source}\")\n\n # Set Dagster version\n docker_args[\"DAGSTER_VERSION\"] = dagster_version\n return docker_args",
"def _render_command_args(self, args, chevron_vars):\n retval = args\n\n if isinstance(args, str):\n retval = chevron.render(args, chevron_vars)\n elif isinstance(args, list):\n retval = []\n for i in args:\n retval.append(self._render_command_args(i, chevron_vars))\n elif isinstance(args, dict):\n retval = {}\n for k, v in args.items():\n retval[k] = self._render_command_args(v, chevron_vars)\n elif isinstance(args, FunctionArgument):\n args.args = self._render_command_args(args.args, chevron_vars)\n args.kwargs = self._render_command_args(args.kwargs, chevron_vars)\n\n return retval",
"def build_docker_build_command(configuration):\n parts = configuration.pop('docker', 'docker').split()\n parts.append('build')\n\n build = configuration.pop('build')\n\n build['path'] = os.path.join(configuration['workspace'], build['path'])\n build['file'] = os.path.join(build['path'], build['file'])\n\n parts.extend(build_parameter_parts(\n build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))\n\n parts.extend(build_dict_parameter_parts(build, 'build-arg'))\n parts.append(build.pop('path'))\n\n return parts",
"def build_render_context(context: 'Context'):\n # TODO: get_vars should be instantiated earlier...\n special_variables = get_vars(context)\n if context.tackle_gen == 'cookiecutter':\n render_context = {'cookiecutter': context.output_dict}\n render_context.update(special_variables)\n else:\n render_context = dict(\n context.output_dict, **{context.context_key: dict(context.output_dict)}\n )\n render_context.update(special_variables)\n return render_context",
"def handle_buildDocker(args):\n makeDocker(latest=args[\"latest\"])\n cmd = build_docker_build(latest=args[\"latest\"])\n run_subprocess(cmd)",
"def _get_context(data):\n try:\n docker_options = DockerRunCommandOptions(cmd=\"docker run --help\",\n start=\"Options:\",\n end=None).get_options_json()\n except Exception as ex:\n print(ex)\n docker_options = {}\n context = DEFAULT_DATA.copy()\n context[\"docker_options\"] = docker_options\n context.update(data)\n context[\"registry\"][\"address_select\"] = \"\"\n if context[\"registry\"][\"address\"] in context[\"registry_options\"].keys():\n context[\"registry\"][\"address_select\"] = context[\"registry\"][\"address\"]\n return context",
"def docker_params(self):\n return {}",
"def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args",
"def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )",
"def feedstock_args(self):\n build_args = [\"--working_directory\", self.repository]\n\n if self.channels:\n for channel in self.channels:\n build_args += [\"--channels\", channel]\n\n if self.python:\n build_args += [\"--python_versions\", self.python]\n if self.build_type:\n build_args += [\"--build_types\", self.build_type]\n if self.mpi_type:\n build_args += [\"--mpi_types\", self.mpi_type]\n if self.cudatoolkit:\n build_args += [\"--cuda_versions\", self.cudatoolkit]\n\n\n if self.recipe:\n build_args += [\"--recipes\", self.recipe]\n\n return build_args",
"def task_prod():\n return {\n 'actions': [\"docker run -p 8080:8080 %s python3 -m %s %s\" % (IMAGE, PACKAGE_PATH, \"%(args)s\")],\n 'task_dep': [\"build\"],\n 'params': PARAMS\n }",
"def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )",
"def _get_args_contents(self):\n return json.dumps(self.args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Cached getter for docker client | def docker_client():
return docker.from_env() | [
"async def get_docker_client(self) -> \"DockerClient\":",
"def docker(self) -> DockerClient:\n return self.__docker",
"def docker_client() -> DockerClient:\n return docker.from_env()",
"def highlevel_docker_client(self):\n \n return utils.init_docker_client(self.client_kwargs, self.tls_config)",
"def get_docker_client():\n return docker.from_env(version=DOCKER_API_VERSION)",
"def dockerClientLowLevel():\n cli = MagicMock()\n class Items(object):\n def __init__(self, dct):\n self.dct = dct\n\n def get(self, key):\n ret = self.dct.get(key)\n if ret is None:\n raise docker.errors.NotFound(key)\n return ret\n \n cli.images = Items({'sha256:12345': 'animage'})\n cli.containers = Items({'12347': 'acontainer'})\n cli.networks = Items({'12349': 'anetwork'})\n cli.volumes = Items({'12351': 'avolume'})\n cli.plugins = Items({\"vieux/sshfs\": 'aplugin'})\n return cli",
"def get_docker(self):\n self._refresh_cache()\n if not self._msg.docker_metadata.request_port:\n raise ValueError(\"Docker image information has not been logged\")\n\n return DockerImage._from_model_ver_proto(self._msg)",
"def client():\n if config:\n # Check if memcached is configured in sdk.yaml file\n if config.get(\"memcached\") and len(config.get(\"memcached\")) > 0:\n cache = []\n for node in config.get(\"memcached\"):\n cache.append([node.split(\":\")[0], node.split(\":\")[1]])\n\n if len(config.get(\"memcached\")) > 1:\n nodes = map(lambda x: (x[0], int(x[1])), cache)\n\n return hash.HashClient(nodes)\n\n node = config.get(\"memcached\")[0]\n node = (node.split(\":\")[0], int(node.split(\":\")[1]))\n\n return base.Client(node)\n\n return False",
"def get(cls: type, name: str = None) -> Redis:\n if name is None:\n return cls.default()\n if name not in cls._clients:\n raise KeyError(f'Redis client {name} not found')\n return cls._clients[name]",
"def get_client() :\n global _redis_client\n if _redis_client: return _redis_client\n else :\n _redis_client = redis.StrictRedis(\n host=os.environ.get('REDIS_HOST', 'localhost'),\n port=int(os.environ.get('REDIS_PORT', '6000')),\n db=0\n )\n return _redis_client",
"def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client",
"def version(self):\n try:\n res = self.client.version()\n except docker.errors.APIError as e:\n raise DockerError(\"Failed to query docker version: %s\" % e)\n return res",
"def docker_client_api_version(self) -> Optional[str]:\n return self.__docker_client_api_version",
"def _connect_docker_client(self):\n # lets check if Docker ENV information is set and use local socket as fallback\n if os.environ.get(\"DOCKER_HOST\") is None:\n os.environ[\"DOCKER_HOST\"] = \"unix://var/run/docker.sock\"\n LOG.warning(\"ENV variable 'DOCKER_HOST' not set. Using %r as fallback.\" % os.environ[\"DOCKER_HOST\"])\n\n # lets connect to the Docker instance specified in current ENV\n # cf.: http://docker-py.readthedocs.io/en/stable/machine/\n dc = docker.from_env(assert_hostname=False)\n # do a call to ensure that we are connected\n dc.info()\n LOG.info(\"Connected to Docker host: %r\" % dc.base_url)\n return dc",
"def get_docker_client(addr):\n host = 'tcp://%s' % addr\n ip = addr.split(':', 1)[0]\n\n os.environ['DOCKER_HOST'] = host\n if DOCKER_CERT_PATH:\n os.environ['DOCKER_TLS_VERIFY'] = '1'\n os.environ['DOCKER_CERT_PATH'] = os.path.join(DOCKER_CERT_PATH, ip)\n\n client = docker.Client(**kwargs_from_env(assert_hostname=False))\n\n if DOCKER_REGISTRY_USERNAME:\n client.login(\n DOCKER_REGISTRY_USERNAME,\n password=DOCKER_REGISTRY_PASSWORD,\n email=DOCKER_REGISTRY_EMAIL,\n registry=DOCKER_REGISTRY_URL,\n )\n return client",
"def from_config():\n return DockerService(settings.DOCKER_HOST, settings.PROXY_NETWORK)",
"async def login(self) -> \"DockerClient\":",
"def get_client():\n\n return MongoClientManager().client",
"def _get_container(self):\n raise NotImplementedError"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether an image needs pushing | def image_needs_pushing(image):
d = docker_client()
try:
d.images.get_registry_data(image)
except docker.errors.APIError:
# image not found on registry, needs pushing
return True
else:
return False | [
"def isImageAvailable(self) -> bool:\n if not self.GUIFeatures:\n if self.imageReturnQueue.empty():\n return False\n else:\n return True\n\n else:\n if len(self.images)==0:\n return False\n else:\n return True",
"def hasImage(self):\n return self._image is not None",
"def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]",
"def image_needs_building(image):\n d = docker_client()\n\n # first, check for locally built image\n try:\n d.images.get(image)\n except docker.errors.ImageNotFound:\n # image not found, check registry\n pass\n else:\n # it exists locally, no need to check remote\n return False\n\n # image may need building if it's not on the registry\n return image_needs_pushing(image)",
"def hasImages(self):\n return len(self.getImages()) > 0",
"def filter_push(move: dict):\n if move.get(\"pushes\") > 0:\n return True\n else:\n return False",
"def hasPileup(self):\n return self._putype is not None",
"def should_send_image(id):\n global IMAGE_FRAMES\n global CACHE\n global SHOULD_LOG\n return should_send(id, IMAGE_FRAMES, CACHE, SHOULD_LOG)",
"def can_push(self) -> bool:\n return pulumi.get(self, \"can_push\")",
"def request_image(self, source, connection):\n try:\n self.__image_queue.put_nowait((source, connection))\n return True\n except Queue.Full:\n return False",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def is_currently_prefetching_any_images(self):\n return len(os.listdir(self.prefetching_path)) > 0",
"def has_media(self):\r\n if self.image:\r\n return True\r\n return False",
"def is_image(pos, image, start_pos, dim_square):\n # Grab image on real board\n im = region_grabber((start_pos[0] + pos[1] * dim_square[0],\n start_pos[1] - (pos[0] + 1.0) * dim_square[1],\n start_pos[0] + (pos[1] + 1.0) * dim_square[0],\n start_pos[1] - pos[0] * dim_square[1]))\n\n pos_image = imagesearcharea(image, 0, 0, 0, 0, 0.9, im)\n return pos_image != [-1, -1]",
"def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None",
"def has_jpeg_preview(self) -> bool:\n return True",
"def images_exist(self):\n pass",
"def push_image(self, tag_list, push_to_defaults, additional_registries=[], version_release_tuple=None,\n push_late=False, dry_run=False):\n\n # Late pushes allow certain images to be the last of a group to be\n # pushed to mirrors. CI/CD systems may initiate operations based on the\n # update a given image and all other images need to be in place\n # when that special image is updated. The special images are there\n # pushed \"late\"\n # Actions that need to push all images need to push all images\n # need to make two passes/invocations of this method: one\n # with push_late=False and one with push_late=True.\n\n is_late_push = False\n if self.config.push.late is not Missing:\n is_late_push = self.config.push.late\n\n if push_late != is_late_push:\n return True\n\n push_names = []\n\n if push_to_defaults:\n push_names.extend(self.metadata.get_default_push_names())\n\n push_names.extend(self.metadata.get_additional_push_names(additional_registries))\n\n # Nothing to push to? We are done.\n if not push_names:\n return True\n\n with Dir(self.distgit_dir):\n\n if version_release_tuple:\n version = version_release_tuple[0]\n release = version_release_tuple[1]\n else:\n\n # History\n # We used to rely on the \"release\" label being set in the Dockerfile, but this is problematic for several reasons.\n # (1) If 'release' is not set, OSBS will determine one automatically that does not conflict\n # with a pre-existing image build. This is extremely helpful since we don't have to\n # worry about bumping the release during refresh images. This means we generally DON'T\n # want the release label in the file and can't, therefore, rely on it being there.\n # (2) People have logged into distgit before in order to bump the release field. This happening\n # at the wrong time breaks the build.\n\n # If the version & release information was not specified,\n # try to detect latest build from brew.\n # Read in version information from the Distgit dockerfile\n _, version, release = self.metadata.get_latest_build_info()\n\n try:\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"image\": self.config.name,\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n # pull just the main image name first\n image_name_and_version = \"%s:%s-%s\" % (self.config.name, version, release)\n brew_image_url = \"/\".join((constants.BREW_IMAGE_HOST, image_name_and_version))\n pull_image(brew_image_url)\n record['message'] = \"Successfully pulled image\"\n record['status'] = 0\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pulling %s: %s\" % (self.metadata.name, err))\n raise\n finally:\n self.runtime.add_record('pull', **record)\n\n push_tags = list(tag_list)\n\n # If no tags were specified, build defaults\n if not push_tags:\n push_tags = self.metadata.get_default_push_tags(version, release)\n\n for image_name in push_names:\n try:\n\n repo = image_name.split('/', 1)\n\n action = \"push\"\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"repo\": repo, # ns/repo\n \"name\": image_name, # full registry/ns/repo\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"tags\": \", \".join(push_tags),\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n for push_tag in push_tags:\n push_url = '{}:{}'.format(image_name, push_tag)\n\n if dry_run:\n rc = 0\n self.logger.info('Would have tagged {} as {}'.format(brew_image_url, push_url))\n self.logger.info('Would have pushed {}'.format(push_url))\n else:\n rc, out, err = exectools.cmd_gather([\"docker\", \"tag\", brew_image_url, push_url])\n\n if rc != 0:\n # Unable to tag the image\n raise IOError(\"Error tagging image as: %s\" % push_url)\n\n for r in range(10):\n self.logger.info(\"Pushing image to mirror [retry=%d]: %s\" % (r, push_url))\n rc, out, err = exectools.cmd_gather([\"docker\", \"push\", push_url])\n if rc == 0:\n break\n self.logger.info(\"Error pushing image -- retrying in 60 seconds\")\n time.sleep(60)\n\n if rc != 0:\n # Unable to push to registry\n raise IOError(\"Error pushing image: %s\" % push_url)\n\n record[\"message\"] = \"Successfully pushed all tags\"\n record[\"status\"] = 0\n\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pushing %s: %s\" % (self.metadata.name, err))\n raise\n\n finally:\n self.runtime.add_record(action, **record)\n\n return True",
"def hasOnePushDC1394(self, *args):\n return _yarp.IFrameGrabberControlsDC1394_hasOnePushDC1394(self, *args)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Return whether an image needs building Checks if the image exists (ignores commit range), either locally or on the registry. | def image_needs_building(image):
d = docker_client()
# first, check for locally built image
try:
d.images.get(image)
except docker.errors.ImageNotFound:
# image not found, check registry
pass
else:
# it exists locally, no need to check remote
return False
# image may need building if it's not on the registry
return image_needs_pushing(image) | [
"def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False",
"def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def _image_exists_locally(docker_client: docker.DockerClient, image_name: str) -> bool:\n\n try:\n docker_client.images.get(image_name)\n except docker.errors.ImageNotFound:\n return False\n else:\n return True",
"def check_molns_image(self):\n if 'molns_image_name' in self.config and self.config['molns_image_name'] is not None \\\n and self.config['molns_image_name'] != '':\n return self.docker.image_exists(self.config['molns_image_name'])\n return False",
"def is_available_skopeo_image(self, image, registry, task_vars):\n\n cmd_str = \"skopeo inspect docker://{registry}/{image}\".format(\n registry=registry,\n image=image,\n )\n\n args = {\"_raw_params\": cmd_str}\n result = self.module_executor(\"command\", args, task_vars)\n return not result.get(\"failed\", False) and result.get(\"rc\", 0) == 0",
"def req_build(container):\n try:\n return 'dockerfile' in self.kard.env.get_container(container)\n except KeyError:\n return False",
"def isImageAvailable(self) -> bool:\n if not self.GUIFeatures:\n if self.imageReturnQueue.empty():\n return False\n else:\n return True\n\n else:\n if len(self.images)==0:\n return False\n else:\n return True",
"def verify_image_exists():\n if (os.path.exists(imagePath.get())):\n valid_image = 1\n else:\n valid_image = 0\n return valid_image",
"def is_image_local(self, image):\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")",
"def hasImage(self):\n return self._image is not None",
"def is_image_exists(c, name):\n res = c.run('sudo docker images', hide='stdout')\n for image in res.stdout.split('\\n'):\n if name == image.split(' ')[0]:\n print('Image {name} exists'.format(name=name))\n return True\n\n print('Image {name} doesn\\'t exist'.format(name=name))\n return False",
"def image_exists_on_dockerhub(image_name: str, image_tag: str) -> bool:\n url = (\n \"https://auth.docker.io/token?scope=repository:\"\n f\"{image_name}:pull&service=registry.docker.io\"\n )\n res = requests.get(url=url)\n res.raise_for_status()\n token = res.json()[\"token\"]\n res = requests.get(\n url=f\"https://registry-1.docker.io/v2/{image_name}/manifests/{image_tag}\",\n headers={\n \"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\",\n \"Authorization\": f\"Bearer {token}\",\n },\n )\n return res.status_code == 200",
"def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()",
"def check_image(request):\n system = request['system']\n if system not in CONFIG['Platforms']:\n raise KeyError('%s is not in the configuration' % system)\n sysconf = CONFIG['Platforms'][system]\n\n fmt = get_image_format(request)\n image_filename = \"%s.%s\" % (request['id'], fmt)\n image_metadata = \"%s.meta\" % (request['id'])\n\n return transfer.imagevalid(sysconf, image_filename, image_metadata,\n logging)",
"def _isready(self, image):\n query = {\n 'status': 'READY',\n 'system': image['system'],\n 'itype': image['itype'],\n 'tag': {'$in': [image['tag']]}\n }\n rec = self._images_find_one(query)\n if rec is not None:\n return True\n return False",
"def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]",
"def images_exist(self):\n pass",
"def is_available_skopeo_image(self, image, default_registries):\n registries = default_registries\n\n # If image already includes a registry, only use that.\n # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.\n # registry.access.redhat.com/rhel7 as if the registry were a namespace.\n # It's not clear that there's any way to distinguish them, but fortunately\n # the current set of images all look like [registry/]namespace/name[:version].\n if image.count(\"/\") > 1:\n registry, image = image.split(\"/\", 1)\n registries = [registry]\n\n for registry in registries:\n if registry not in self.reachable_registries:\n self.reachable_registries[registry] = self.connect_to_registry(registry)\n if not self.reachable_registries[registry]:\n continue\n\n args = {\"_raw_params\": self.skopeo_img_check_command.format(registry=registry, image=image)}\n result = self.execute_module_with_retries(\"command\", args)\n if result.get(\"rc\", 0) == 0 and not result.get(\"failed\"):\n return True\n if result.get(\"rc\") == 124: # RC 124 == timed out; mark unreachable\n self.reachable_registries[registry] = False\n\n return False"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Update name/values.yaml with modifications | def build_values(name, values_mods):
values_file = os.path.join(name, 'values.yaml')
with open(values_file) as f:
values = yaml.load(f)
for key, value in values_mods.items():
parts = key.split('.')
mod_obj = values
for p in parts:
mod_obj = mod_obj[p]
print(f"Updating {values_file}: {key}: {value}")
if isinstance(mod_obj, MutableMapping):
keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()
if keys:
for key in keys:
mod_obj[key] = value['repository']
else:
possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)
raise KeyError(
f'Could not find {possible_keys} in {values_file}:{key}'
)
mod_obj['tag'] = value['tag']
else:
raise TypeError(
f'The key {key} in {values_file} must be a mapping.'
)
with open(values_file, 'w') as f:
yaml.dump(values, f) | [
"def _write_values(self, app_name, chart_dir, values):\n\n data = self._get_values(app_name, chart_dir)\n new_data = {**data, **values}\n new_raw = yaml.dump(new_data)\n\n values_path = \"%s/%s/values.yaml\" % (chart_dir, app_name)\n with open(values_path, mode=\"w\") as values_file:\n values_file.write(new_raw)",
"def build_values(name, values_mods):\n values_file = os.path.join(name, 'values.yaml')\n\n with open(values_file) as f:\n values = yaml.load(f)\n\n for key, value in values_mods.items():\n if not isinstance(value, dict) or set(value.keys()) != {'repository', 'tag'}:\n raise ValueError(f\"I only understand image updates with 'repository', 'tag', not: {value!r}\")\n parts = key.split('.')\n mod_obj = parent = values\n for p in parts:\n if p.isdigit():\n # integers are indices in lists\n p = int(p)\n parent = mod_obj\n mod_obj = mod_obj[p]\n last_part = p\n\n if isinstance(mod_obj, MutableMapping):\n keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys()\n if keys:\n for repo_key in keys:\n before = mod_obj.get(repo_key, None)\n if before != value['repository']:\n print(f\"Updating {values_file}: {key}.{repo_key}: {value}\")\n mod_obj[repo_key] = value['repository']\n else:\n possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS)\n raise KeyError(\n f'Could not find {possible_keys} in {values_file}:{key}'\n )\n\n before = mod_obj.get('tag', None)\n if before != value['tag']:\n print(f\"Updating {values_file}: {key}.tag: {value}\")\n mod_obj['tag'] = value['tag']\n elif isinstance(mod_obj, str):\n # scalar image string, not dict with separate repository, tag keys\n image = \"{repository}:{tag}\".format(**value)\n try:\n before = parent[last_part]\n except (KeyError, IndexError):\n before = None\n if before != image:\n print(f\"Updating {values_file}: {key}: {image}\")\n parent[last_part] = image\n else:\n raise TypeError(\n f'The key {key} in {values_file} must be a mapping or string, not {type(mod_obj)}.'\n )\n\n\n with open(values_file, 'w') as f:\n yaml.dump(values, f)",
"def update_from_yaml(\n self, path: str = join(\"config\", \"hdx_resource_static.yml\")\n ) -> None:\n super().update_from_yaml(path)",
"def test_set_a_value_in_simple_yaml_file():",
"def updateBuckconfigWithDict(self, values):\n for section, kvps in values.items():\n for key, value in kvps.items():\n self.buckconfig[section][key] = value",
"def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value",
"def update_default_versions(self):\n new_default_dict = {'gaits': self.gait_version_map}\n\n try:\n with open(self.default_yaml, 'w') as default_yaml_content:\n yaml_content = yaml.dump(new_default_dict)\n default_yaml_content.write(yaml_content)\n return [True, 'New default values were written to: {pn}'.format(pn=self.default_yaml)]\n\n except IOError:\n return [False, 'Error occurred when writing to file path: {pn}'.format(pn=self.default_yaml)]",
"def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()",
"def copyYaml(filename, newName):\n data = loadYaml(filename)\n saveYaml(newName, data)",
"def update(self, plugin, context, vnf_id, vnf_dict, vnf, auth_attr):\n # initialize Kubernetes APIs\n auth_cred, file_descriptor = self.get_auth_creds(auth_attr)\n try:\n core_v1_api_client = \\\n self.kubernetes.get_core_v1_api_client(auth=auth_cred)\n\n # update config attribute\n config_yaml = vnf_dict.get('attributes', {}).get('config', '')\n update_yaml = vnf['vnf'].get('attributes', {}).get('config', '')\n LOG.debug('yaml orig %(orig)s update %(update)s',\n {'orig': config_yaml, 'update': update_yaml})\n # If config_yaml is None, yaml.safe_load() will raise Attribute\n # Error. So set config_yaml to {}, if it is None.\n if not config_yaml:\n config_dict = {}\n else:\n config_dict = yaml.safe_load(config_yaml) or {}\n update_dict = yaml.safe_load(update_yaml)\n if not update_dict:\n return\n LOG.debug('dict orig %(orig)s update %(update)s',\n {'orig': config_dict, 'update': update_dict})\n utils.deep_update(config_dict, update_dict)\n LOG.debug('dict new %(new)s update %(update)s',\n {'new': config_dict, 'update': update_dict})\n new_yaml = yaml.safe_dump(config_dict)\n vnf_dict.setdefault('attributes', {})['config'] = new_yaml\n\n deployment_info = vnf_id.split(\",\")\n for i in range(0, len(deployment_info), 2):\n namespace = deployment_info[i]\n deployment_name = deployment_info[i + 1]\n configmap_resp = core_v1_api_client.read_namespaced_config_map(\n namespace=namespace,\n name=deployment_name)\n configmap_data = configmap_resp.data\n new_configmap = {key: update_dict.get(key, configmap_data[key])\n for key in configmap_data}\n configmap_resp.data = new_configmap\n core_v1_api_client.\\\n patch_namespaced_config_map(namespace=namespace,\n name=deployment_name,\n body=configmap_resp)\n except Exception as e:\n LOG.error('Updating VNF got an error due to %s', e)\n raise\n finally:\n self.clean_authenticate_vim(auth_cred, file_descriptor)",
"def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)",
"def patch(yaml_file):\n svdtools.patch.main(yaml_file)",
"def write_yaml(self, filename=\"modulation.yaml\"):\n self._write_yaml(filename=filename)",
"def update_settings(value_type, new_value):\n data = get_settings()\n new_values = {\n value_type: new_value\n }\n data.update(new_values)\n\n\n with open('settings.json', 'w') as settings:\n json.dump(data, settings, indent=4)",
"def update_rosdistro_yaml(stack_name, version, distro_file):\n if not os.path.exists(distro_file):\n raise ReleaseException(\"[%s] does not exist\"%distro_file)\n\n with open(distro_file) as f:\n d = [d for d in yaml.load_all(f.read())]\n if len(d) != 1:\n raise ReleaseException(\"found more than one release document in [%s]\"%distro_file)\n d = d[0]\n\n distro_d = d\n if not 'stacks' in d:\n d['stacks'] = {}\n d = d['stacks']\n if not stack_name in d:\n d[stack_name] = {}\n d = d[stack_name]\n # set the version key, assume not overriding properties\n d['version'] = str(version)\n\n print \"Writing new release properties to [%s]\"%distro_file\n with open(distro_file, 'w') as f:\n f.write(yaml.safe_dump(distro_d))",
"def update_values(self, config, dest):\n for section in config.keys():\n if section in dest:\n for option in config[section].keys():\n if option in (\"desc\", \"outline\"):\n continue\n\n if option in dest[section]:\n dest[section][option][\"value\"] = config[section][option][\n \"value\"\n ]\n\n # else:\n # dest[section][option] = config[section][option]\n\n # else:\n # dest[section] = config[section]",
"def update_model_config(key_path: List[str], value: object) -> None:\n fname = PROJECT_DIR / \"sg_covid_impact\" / \"model_config.yaml\"\n lock = FileLock(str(fname) + \".lock\")\n with lock:\n # Read existing config\n with open(fname, \"r\") as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n # Update\n config_ = t.assoc_in(config, key_path, value)\n # Write\n with open(fname, \"w\") as f:\n f.write(yaml.dump(config_, default_flow_style=False))",
"def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)",
"def update_zookeeper_config(self, config_name, values, node_id=None, flush=False):\n nodes = self.nodes.keys() if not node_id else [node_id]\n\n for node_id in nodes:\n zoo_configs = self.nodes[node_id]['zoo_configs']\n\n if config_name in zoo_configs:\n if isinstance(values, dict):\n zoo_configs.get(config_name).update(values)\n if isinstance(values, list):\n zoo_configs[config_name]['values'] += values\n\n if flush:\n self._write_config_file(config_name, node_id)\n else:\n raise ZooException('Config name {} is not allowed config. Allowed configs are: zoo.cfg, env.cfg'\n .format(config_name))"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Publish helm chart index to github pages | def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | [
"def publish_pages(chart_name, chart_version, chart_repo_github_path, chart_repo_url, extra_message=''):\n\n # clone the Helm chart repo and checkout its gh-pages branch,\n # note the use of cwd (current working directory)\n checkout_dir = '{}-{}'.format(chart_name, chart_version)\n check_call(\n [\n 'git', 'clone', '--no-checkout',\n git_remote(chart_repo_github_path),\n checkout_dir,\n ],\n # warning: if echoed, this call could reveal the github token\n echo=True,\n )\n check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir, echo=True)\n\n # package the latest version into a temporary directory\n # and run helm repo index with --merge to update index.yaml\n # without refreshing all of the timestamps\n with TemporaryDirectory() as td:\n check_call([\n 'helm', 'package', chart_name,\n '--dependency-update',\n '--destination', td + '/',\n ])\n\n check_call([\n 'helm', 'repo', 'index', td,\n '--url', chart_repo_url,\n '--merge', os.path.join(checkout_dir, 'index.yaml'),\n ])\n\n # equivalent to `cp td/* checkout/`\n # copies new helm chart and updated index.yaml\n for f in os.listdir(td):\n shutil.copy2(\n os.path.join(td, f),\n os.path.join(checkout_dir, f)\n )\n\n # git add, commit, and push\n extra_message = f'\\n\\n{extra_message}' if extra_message else ''\n message = f'[{chart_name}] Automatic update for commit {chart_version}{extra_message}'\n\n check_call(['git', 'add', '.'], cwd=checkout_dir)\n check_call(['git', 'commit', '-m', message], cwd=checkout_dir)\n check_call(['git', 'push', 'origin', 'gh-pages'], cwd=checkout_dir)",
"def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")",
"def publish_docs():\n\n if shell('git diff-index --quiet HEAD --', check=False).status_code != 0:\n shell('git status')\n raise EnvironmentError('The working directory is dirty. Please commit any pending changes.')\n\n if shell('git show-ref refs/heads/gh-pages', check=False).status_code != 0:\n # initialized github pages branch\n shell(dedent(\"\"\"\n git checkout --orphan gh-pages\n git reset --hard\n git commit --allow-empty -m \"Initializing gh-pages branch\"\n git push gh-pages\n git checkout master\n \"\"\").strip())\n print('created github pages branch')\n\n # deleting old publication\n shell('rm -rf public')\n shell('mkdir public')\n shell('git worktree prune')\n shell('rm -rf .git/worktrees/public/')\n # checkout out gh-pages branch into public\n shell('git worktree add -B gh-pages public gh-pages')\n # generating docs\n context = click.get_current_context()\n context.invoke(docs, no_browser=True)\n # push to github\n with cd('public'):\n shell('git add .')\n shell('git commit -m \"Publishing to gh-pages (Fabfile)\"')\n shell('git push origin gh-pages')",
"def gh_pages(c):\n preview(c)\n c.run(\"ghp-import -b {github_pages_branch} {deploy_path} -p\".format(\n **CONFIG))",
"async def cutecharts():\n\n put_markdown(t(r\"\"\"## Cutecharts.py\n \n [cutecharts.py](https://github.com/cutecharts/cutecharts.py) is a hand drawing style charts library for Python which uses [chart.xkcd](https://github.com/timqian/chart.xkcd) as underlying implementation.\n\n In PyWebIO, you can use the following code to output the cutecharts.py chart instance:\n\n ```python\n # `chart` is cutecharts chart instance\n pywebio.output.put_html(chart.render_notebook())\n ``` \n For details, please refer to the source code of the demo below.\n\n ## Demos List\n \"\"\", r\"\"\"## Cutecharts.py\n \n [cutecharts.py](https://github.com/cutecharts/cutecharts.py) 是一个可以创建具有卡通风格的可视化图表的python库。底层使用了 [chart.xkcd](https://github.com/timqian/chart.xkcd) Javascript库。\n \n PyWebIO 支持输出使用 cutecharts.py 库创建的图表。使用方式为在PyWebIO会话中调用 \n ```python\n # chart 为 cutecharts 的图表实例\n pywebio.output.put_html(chart.render_notebook())\n ``` \n 具体可以参考下面demo中的源码。\n\n ## Demos List\n \"\"\"), strip_indent=4)\n set_scope('demo-list')\n\n put_buttons(list(all_demos.keys()), onclick=show_demo)\n\n await hold()",
"def to_gh_pages(commit_msg, site_obj):\n # make sure all files are added\n os.system(\"git add *\")\n\n # commit and push to main branch\n os.system(\"git commit -m '{}' -a\".format(commit_msg))\n os.system(\"git push\")\n\n # push to gh-pages branch\n os.system(\"git subtree push --prefix {} origin gh-pages\".format(site_obj.get_static_dir()))\n\n term_prompt_header = colored(\"[{}] \".format(site_obj.name), \"cyan\")\n print(term_prompt_header + \"Dumped to Github Pages.\")",
"def index():\n\n # open the README file\n with open(os.path.dirname(app.root_path) + '/README.md',\n 'r', encoding=\"utf-8\") as markdown_file:\n\n # Read the content of the file\n content = markdown_file.read()\n\n # convert to html\n return markdown.markdown(content)",
"def test_fetch_helm_chart(self):\n temp_dir = tempfile.mkdtemp()\n output_dir = tempfile.mkdtemp()\n output_chart_dir = os.path.join(output_dir, \"charts\", \"prometheus\")\n chart_name = \"prometheus\"\n version = \"11.3.0\"\n repo = \"https://github.com/BurdenBear/kube-charts-mirror/raw/master/docs/\"\n dep = [\n {\n \"output_path\": output_chart_dir,\n \"version\": version,\n \"chart_name\": chart_name,\n \"source\": repo,\n }\n ]\n fetch_helm_chart((HelmSource(repo, chart_name, version, None), dep), temp_dir, force=False)\n self.assertTrue(os.path.isdir(output_chart_dir))\n self.assertTrue(os.path.isfile(os.path.join(output_chart_dir, \"Chart.yaml\")))\n self.assertTrue(os.path.isdir(os.path.join(output_chart_dir, \"charts\", \"kube-state-metrics\")))\n rmtree(temp_dir)\n rmtree(output_dir)",
"def publish_metadata(): \n \n folder = 'metadata'\n name = get_dataset_filename()\n \n # Create a kml folder in the temp directory if it does not exist\n temp_working_folder = os.path.join(temp_workspace,folder)\n \n # Publish the metadata to the download folder\n publish_file(temp_working_folder, name + '.xml','metadata')",
"def get_chart(self, release_name, values):\n values_options = self._get_values_string(values)\n output = subprocess.check_output(\n [\"helm\", \"template\", release_name, self.source_directory] + values_options\n )\n\n if self.chart.source.type == \"git\":\n subpath = self.chart.source.get(\"subpath\", \"\")\n template_path = os.path.join(\n self._source_tmp_dir, subpath, \"mlbench_template.yaml\"\n )\n else:\n template_path = os.path.join(tempfile.mkdtemp(), \"template.yaml\")\n\n with open(template_path, \"wb\") as f:\n f.write(output)\n return template_path",
"def write_index_html(self):\n print(\"- writing index.md\")\n index_toc = [f\"### [Table of Contents]({config['github_pages_url']}/toc.html)\"] if self.notebooks else []\n if os.path.isfile(os.path.join(self.dst_dir, \"data_index.html\")):\n index_toc += [f\"### [Data Index]({config['github_pages_url']}/data_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"figure_index.html\")):\n index_toc += [f\"### [Figure Index]({config['github_pages_url']}/figure_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"python_index.html\")):\n index_toc += [f\"### [Python Module Index]({config['github_pages_url']}/python_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"tag_index.html\")):\n index_toc += [f\"### [Tag Index]({config['github_pages_url']}/tag_index.html)\"]\n index_toc += [f\"- {nb.link}\" if type(nb) == Section else f\"\\n### {nb.link}\" for nb in self.notebooks]\n env = Environment(loader=FileSystemLoader(\"templates\"))\n with open(os.path.join(self.dst_dir, \"index.md\"), 'w') as f:\n f.write(env.get_template('index.md.tpl').render(\n readme_toc=index_toc, page_title=config['github_repo_name'], github_url=config['github_repo_url']))",
"def publish_index(filename, index, css='body {background-color:#F8FFFF;}'):\n fmt = '<li><a href=\"%s\">%s</a></li>'\n links = '\\n'.join(fmt % (src, lbl) for src, lbl in sorted(index.items()))\n html = index_template.format(title='Python', css=css, links=links)\n with open(filename, 'w') as f:\n f.write(html)",
"def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')",
"def index():\n # create table for original dataset\n table_1 = data_table_low(filepath = \"sparkify_data.csv\", title='Raw Sparkify Data')\n\n table_2 = data_table_low(filepath = \"cleaned_data.csv\", title='Cleaned Sparkify Data')\n\n # create and append plotly visuals into an array to be passed later for graphJSON file\n graphs = [table_1, table_2]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template(\"master.html\", ids=ids, graphJSON=graphJSON)",
"def install_helm_plugins():\n plugins = {\n 'https://github.com/technosophos/helm-gpg': '0.1.0',\n }\n for plugin_url, version in plugins.items():\n install_cmd = \"helm plugin install {0} --version={1}\".format(\n plugin_url,\n version)\n logging.info(\"installing helm plugin with command: {0}\".format(install_cmd))\n sp.call(install_cmd, shell=True)",
"def write_homepage(self, statistics):\n self.logger.info('writing homepage')\n dt = datetime.now()\n with AtomicReplaceFile(self.output_path / 'index.html',\n encoding='utf-8') as index:\n index.file.write(self.templates['index'](\n layout=self.templates['layout']['layout'],\n timestamp=dt.strftime('%Y-%m-%d %H:%M'),\n page='home',\n **statistics))",
"def publish_static_webapp(session):\n build_frontend(session)\n session.run(\"git\", \"checkout\", \"gh-pages\", external=True)\n session.run(\"rm\", \"-rf\", \"connect/\", external=True)\n session.run(\"mkdir\", \"connect\", external=True)\n session.run(\"cp\", \"-rT\", \"termpair/frontend_build/\", \"connect/\", external=True)\n session.run(\"git\", \"add\", \"connect\", external=True)\n session.run(\"git\", \"commit\", \"-m\", \"commit built frontend\", external=True)\n session.run(\"git\", \"push\", \"origin\", \"gh-pages\", external=True)",
"def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')",
"def index():\n return redirect(\"/apidocs\")"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Add the domain restrictions. | def add_domains_restriction(self, domain_restriction):
self._domain_restricion = domain_restriction
self._size_var = self._get_size_var()
self._nr_of_bits = self._get_nr_of_bits() | [
"def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)",
"async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")",
"def set_google_apps_domains(domains):\n organization = models.Organization.query.first()\n k = models.Organization.SETTING_GOOGLE_APPS_DOMAINS\n organization.settings[k] = domains.split(\",\")\n models.db.session.add(organization)\n models.db.session.commit()\n print(\n \"Updated list of allowed domains to: {}\".format(\n organization.google_apps_domains\n )\n )",
"def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain",
"def restrictions(self, restrictions):\n\n self._restrictions = restrictions",
"def ad_domains(self, ad_domains):\n self._ad_domains = ad_domains",
"def add_domains(self, domains):\n return self.bulk_create(domains)",
"def add_domain(self, feature, other_domain):\n for value in other_domain:\n self.domains[feature].add(value)\n if not value in self.CPN[\"CPT\"][feature][\"domain\"]:\n self.CPN[\"CPT\"][feature][\"domain\"].append(value)\n pass",
"def relevant_domains(self):\n pass",
"def sl_domains(self, sl_domains):\n self._sl_domains = sl_domains",
"def domains(self, domains):\n\n self._domains = domains",
"def add_domains():\n awis = domain_intel.awis.actions.UrlInfo()\n domains_file = os.path.join('domain_intel',\n 'test',\n 'files',\n 'samples',\n 'gtr_unique_domains.csv')\n with open(domains_file) as _fh:\n metrics = awis.add_domains(_fh)\n\n return metrics",
"def create_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.create_domain(name)",
"def _domain(self) -> List[Constraint]:\n return [self.args[1] >> 0]",
"def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()",
"def _add_name_search_domain(self):\n domain = []\n ctx = self._context.copy()\n for i in self._rescommon_name_search_list:\n if ctx.get(i):\n domain += [(i, '=', ctx.get(i))]\n return domain",
"def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}",
"def exclude_domain(self) -> None:\n self.exclude_domains.add(current_domain.get())",
"def par_domain(self):"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get the number of bits needed for an item. | def _get_nr_of_bits(self):
return sum(self._size_var) | [
"def getBitSize(self) -> int:\n return self._bitSize",
"def get_item_size(item_type):\n\n if item_type == \"Boolean\":\n return 1\n elif item_type == \"Unsigned_8\":\n return 8\n elif item_type == \"Unsigned_16\":\n return 16\n elif item_type == \"Unsigned_32\":\n return 32\n else:\n raise ValueError(\"Unrecognised type value\")",
"def bitSizeOf() -> int:\n\n return 64",
"def bit_length(self): # real signature unknown; restored from __doc__\n return 0",
"def bit_length(self, ???):",
"def bitSizeOf() -> int:\n\n return 32",
"def get_bitsize(self) -> int:\n return self._surface.get_bitsize()",
"def bit_length(self): # real signature unknown; restored from __doc__\n pass",
"def getNbrOfBit(self):\n return DPxGetDinNumBits()",
"def BitsRemaining(self):\n return self.NumBits() - (8*self.idx_byte + self.idx_boff) - 1",
"def bitSizeOf() -> int:\n\n return 16",
"def getByteSize(self) -> int:\n return (self._bitSize + 7) // 8",
"def count(item):\n return len(item)",
"def number_bits_in_cardinality(self,card):\n return 32 - self.count_lead_zs(card)",
"def nbytes(self) -> int:\n nbits = self.nbits()\n if nbits % 8 == 0:\n return int(nbits / 8)\n return int(nbits / 8) + 1",
"def item_count(self):\n return len(self.items)",
"def get_bitfield_length(bit_count: int) -> int:\n return (bit_count + 7) // 8",
"def _calculate_bit_size(self, history: sizing_executor.SizeAndDTypes) -> int:\n bit_size = 0\n for num_elements, dtype in history:\n bit_size += num_elements * self._bits_per_element(dtype)\n return bit_size",
"def getSupport(self, item):\n return self.itemCountDict[item] / self.transLength"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Get a random genom. | def get_random(self):
base_genom = "1" * sum(self._size_var)
return utils.randomise_a_string(base_genom) | [
"def get_random_genome(self):\n return random.choice(self.genomes)",
"def generate_random_genome (size):\n return Genome(\"\".join([str(randint(0, 3)) for i in range(size)]))",
"def generate():\n s = random_data.random_bytes(100)\n return generate_from_string(s)",
"def get_random(self, miRormRNA):\r\n if miRormRNA == 'miR':\r\n abs_dist = self.miR2dist\r\n dist = methods.absolute2dist(self.miR2dist, self.totalmiR) \r\n elif miRormRNA == 'mRNA':\r\n abs_dist = self.mRNA2dist\r\n dist = methods.absolute2dist(self.mRNA2dist, self.totalmRNA) \r\n else:\r\n methods.logERROR(\"Error in molecule type, exiting...\")\r\n exit()\r\n genes = list(dist.keys())\r\n probabilities = list(dist.values())\r\n total = sum(dist.values())\r\n # fixing the probabilities so it will sum to 1\r\n if 1-total != 0:\r\n diff = 1-total\r\n if diff > 0:\r\n genes.append(\"\")\r\n probabilities.append(diff)\r\n else:\r\n fixed = False\r\n rand_idx = nprand.choice(range(len(probabilities)))\r\n while not fixed and rand_idx < len(probabilities):\r\n if probabilities[rand_idx] + diff >= 0:\r\n probabilities[rand_idx] += diff\r\n fixed = True\r\n else:\r\n rand_idx += 1\r\n if rand_idx == len(probabilities):\r\n rand_idx = 0\r\n try:\r\n rand_mol = nprand.choice(genes, p=probabilities)\r\n if abs_dist[rand_mol] < 1:\r\n methods.logERROR(\"molecule count < 1: \" + rand_mol + \", total count: \" + str(abs_dist[rand_mol]))\r\n return rand_mol\r\n except:\r\n if not total == 1:\r\n methods.logERROR(\"probabilities are not equal to 1 for: \" + miRormRNA + \" total: \" + str(total))\r\n return \"\"",
"def get_random_individual(self, generation):\n if len(self.generations) <= generation < 0:\n raise ValueError('Please enter a valid generation.')\n return self.get_individual(\n generation=generation,\n index=random.randint(0, len(self.generations[generation]) - 1))",
"def random(self) -> Gadza:\n return choice(self.gadzas)",
"def _random_genre(self) -> Genre:\n genres = ('Action and Adventure', 'Classics', 'Comic Book or Graphic Novel',\n 'Detective and Mystery', 'Fantasy', 'Historical Fiction',\n 'Horror', 'Literary Fiction', 'Romance', 'Science Fiction (Sci-Fi)',\n 'Short Stories', 'Suspense and Thrillers', \"Women's Fiction\",\n \"Biographies and Autobiographies\", \"Cookbooks\", \"Poetry\", \"Self-Help\")\n name = random.choice(genres)\n return Genre.objects.get_or_create(name=name)[0]",
"def get_random_object():\n\n return random.choice([\n get_random_alphabetic_string,\n get_random_alphanumeric_string,\n get_random_integer,\n get_random_real_number\n ])()",
"def random_plush_gene(self):\n atom = random.choice(list(self.atom_generators))\n return self.atom_to_plush_gene(atom)",
"def _get_gaussian_random(self):\n u1 = generateRandom()\n u2 = generateRandom()\n if u1 < 1e-6:\n u1 = 1e-6\n return sqrt(-2 * log(u1)) * cos(2 * pi * u2)",
"def __generate_random_gene_sequence(self):\n genes = []\n for j in range(self.chromosome_size):\n genes.append(random.choice(self.gene_pool))\n\n return genes",
"def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people",
"def get_random(cls):\n\n\t\tnum = randint(0, 6)\n\n\t\treturn Tetromino(num)",
"def get_random_number(self):\n return np.random.uniform()",
"def _gen_random_number() -> float:\n return uniform(0, 1000)",
"def generate_random(model):\n return generate_music(\"\", model)",
"def rand_individual(self):\n return self.operator.rand_individual()",
"def get_random(X):\n size = len(X)\n idx = np.random.choice(range(size))\n return X[idx]",
"def _random_getnode():\n import random\n return random.getrandbits(48) | 0x010000000000"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Create a new block cipher, configured in CTR mode. | def __init__(self, block_cipher, initial_counter_block,
prefix_len, counter_len, little_endian):
if len(initial_counter_block) == prefix_len + counter_len:
self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)
"""Nonce; not available if there is a fixed suffix"""
self._state = VoidPointer()
result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),
c_uint8_ptr(initial_counter_block),
c_size_t(len(initial_counter_block)),
c_size_t(prefix_len),
counter_len,
little_endian,
self._state.address_of())
if result:
raise ValueError("Error %X while instantiating the CTR mode"
% result)
# Ensure that object disposal of this Python object will (eventually)
# free the memory allocated by the raw library for the cipher mode
self._state = SmartPointer(self._state.get(),
raw_ctr_lib.CTR_stop_operation)
# Memory allocated for the underlying block cipher is now owed
# by the cipher mode
block_cipher.release()
self.block_size = len(initial_counter_block)
"""The block size of the underlying cipher, in bytes."""
self._next = [self.encrypt, self.decrypt] | [
"def _create_ctr_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n\n counter = kwargs.pop(\"counter\", None)\n nonce = kwargs.pop(\"nonce\", None)\n initial_value = kwargs.pop(\"initial_value\", None)\n if kwargs:\n raise TypeError(\"Invalid parameters for CTR mode: %s\" % str(kwargs))\n\n if counter is not None and (nonce, initial_value) != (None, None):\n raise TypeError(\"'counter' and 'nonce'/'initial_value'\"\n \" are mutually exclusive\")\n\n if counter is None:\n # Crypto.Util.Counter is not used\n if nonce is None:\n if factory.block_size < 16:\n raise TypeError(\"Impossible to create a safe nonce for short\"\n \" block sizes\")\n nonce = get_random_bytes(factory.block_size // 2)\n else:\n if len(nonce) >= factory.block_size:\n raise ValueError(\"Nonce is too long\")\n \n # What is not nonce is counter\n counter_len = factory.block_size - len(nonce)\n\n if initial_value is None:\n initial_value = 0\n\n if is_native_int(initial_value):\n if (1 << (counter_len * 8)) - 1 < initial_value:\n raise ValueError(\"Initial counter value is too large\")\n initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)\n else:\n if len(initial_value) != counter_len:\n raise ValueError(\"Incorrect length for counter byte string (%d bytes, expected %d)\" % (len(initial_value), counter_len))\n initial_counter_block = nonce + initial_value\n\n return CtrMode(cipher_state,\n initial_counter_block,\n len(nonce), # prefix\n counter_len,\n False) # little_endian\n\n # Crypto.Util.Counter is used\n\n # 'counter' used to be a callable object, but now it is\n # just a dictionary for backward compatibility.\n _counter = dict(counter)\n try:\n counter_len = _counter.pop(\"counter_len\")\n prefix = _counter.pop(\"prefix\")\n suffix = _counter.pop(\"suffix\")\n initial_value = _counter.pop(\"initial_value\")\n little_endian = _counter.pop(\"little_endian\")\n except KeyError:\n raise TypeError(\"Incorrect counter object\"\n \" (use Crypto.Util.Counter.new)\")\n\n # Compute initial counter block\n words = []\n while initial_value > 0:\n words.append(struct.pack('B', initial_value & 255))\n initial_value >>= 8\n words += [ b'\\x00' ] * max(0, counter_len - len(words))\n if not little_endian:\n words.reverse()\n initial_counter_block = prefix + b\"\".join(words) + suffix\n\n if len(initial_counter_block) != factory.block_size:\n raise ValueError(\"Size of the counter block (%d bytes) must match\"\n \" block size (%d)\" % (len(initial_counter_block),\n factory.block_size))\n\n return CtrMode(cipher_state, initial_counter_block,\n len(prefix), counter_len, little_endian)",
"def _create_cbc_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n iv = kwargs.pop(\"IV\", None)\n IV = kwargs.pop(\"iv\", None)\n\n if (None, None) == (iv, IV):\n iv = get_random_bytes(factory.block_size)\n if iv is not None:\n if IV is not None:\n raise TypeError(\"You must either use 'iv' or 'IV', not both\")\n else:\n iv = IV\n\n if len(iv) != factory.block_size:\n raise ValueError(\"Incorrect IV length (it must be %d bytes long)\" %\n factory.block_size)\n\n if kwargs:\n raise TypeError(\"Unknown parameters for CBC: %s\" % str(kwargs))\n\n return CbcMode(cipher_state, iv)",
"def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher",
"def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)",
"def encrypt_cbc_using_ecb(text, key):\n iv = '\\x00'*16\n cipher = ''\n aes = AES.new(key, AES.MODE_ECB)\n for block in Crypto.get_blocks(Crypto.pad_pkcs7(text)):\n block = FrequencyAnalyzer.get_repeating_xor(block, iv)\n iv = aes.encrypt(block)\n cipher += iv\n return cipher",
"def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))",
"def new(key, nonce=None):\n\n if nonce is None:\n nonce = get_random_bytes(8)\n\n return Salsa20Cipher(key, nonce)",
"def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)",
"def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)",
"def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())",
"def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))",
"def ecb_or_cbc_encrypt(plaintext, mode='random'):\n if mode == 'random':\n mode = 'ECB' if randint(0, 1) == 0 else 'CBC'\n\n key = randstr(AES_BSZ)\n plaintext = (\n ''.join([randstr(1) for _ in range(randint(5, 10))]) +\n plaintext +\n ''.join([randstr(1) for _ in range(randint(5, 10))])\n )\n plaintext = pad_to_blocksize(plaintext)\n\n if mode == 'ECB':\n ecb = AES.new(key, AES.MODE_ECB)\n ciphertext = ecb.encrypt(plaintext)\n elif mode == 'CBC':\n iv = randstr(AES_BSZ)\n cbc = AES.new(key, AES.MODE_CBC, iv)\n ciphertext = cbc.encrypt(plaintext)\n else:\n raise Exception(\"invalid mode\")\n\n return ciphertext",
"def new(key,mode=MODE_ECB,IV=None,counter=None,segment_size=None):\n return IDEA(key,mode,IV,counter,segment_size)",
"def _generate_cipher(self):\n if not (os.path.exists(PRIVATE_KEY_PATH) and os.path.exists(PUBLIC_KEY_PATH)):\n key = RSA.generate(2048)\n\n with open(PRIVATE_KEY_PATH, \"wb\") as file_out:\n file_out.write(key.export_key())\n\n with open(PUBLIC_KEY_PATH, \"wb\") as file_out:\n file_out.write(key.publickey().export_key())\n\n self.session_key = get_random_bytes(16)\n\n self._encryption_cipher = AES.new(self.session_key, AES.MODE_EAX)",
"def get_cipher(algo, mode=blockalgo.MODE_ECB):\n if algo in [AES, ARC2, Blowfish, CAST, DES, DES3]:\n return BlockCipher(algo, mode)\n elif algo in [ARC4, XOR]:\n return StreamCipher(algo)",
"def make_OPc( K, OP ):\n return xor_buf( AES_ECB(K).encrypt(OP), OP )",
"def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()",
"def encryptor(iv = os.urandom(16), key = os.urandom(32), bc = backend,key_type = 'AES128',mode='CBC'):\n\tif key_type == 'AES128':\n\t\talgo = algorithms.AES(key)\n\telif key_type == 'ChaCha20':\n\t\talgo = algorithms.ChaCha20(key,nonce=os.urandom(32))\n\telse:\n\t\traise('Error algorithm ' + key_type + ' not supported!')\n\tif mode == 'CBC':\n\t\tmode = modes.CBC(iv)\n\telif mode == 'GCM':\n\t\tmode = modes.GCM(iv)\n\telse :\n\t\traise('Error mode ' + mode + ' not supported!')\n\tcipher = Cipher(algo,mode,backend = bc)\n\treturn iv,key,cipher.encryptor()",
"def aes_cbc_encrypt(self, key: bytes, plain_data: bytes, iv: bytes = None) -> bytes:\n if len(key) not in AES.key_size:\n raise SPSDKError(f\"The key must be a valid AES key length: {', '.join([str(k) for k in AES.key_size])}\")\n init_vector = iv or bytes(AES.block_size)\n if len(init_vector) != AES.block_size:\n raise SPSDKError(f\"The initial vector length must be {AES.block_size}\")\n cipher = AES.new(key, mode=AES.MODE_CBC, iv=init_vector)\n return cipher.encrypt(plain_data)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Instantiate a cipher object that performs CTR encryption/decryption. | def _create_ctr_cipher(factory, **kwargs):
cipher_state = factory._create_base_cipher(kwargs)
counter = kwargs.pop("counter", None)
nonce = kwargs.pop("nonce", None)
initial_value = kwargs.pop("initial_value", None)
if kwargs:
raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs))
if counter is not None and (nonce, initial_value) != (None, None):
raise TypeError("'counter' and 'nonce'/'initial_value'"
" are mutually exclusive")
if counter is None:
# Crypto.Util.Counter is not used
if nonce is None:
if factory.block_size < 16:
raise TypeError("Impossible to create a safe nonce for short"
" block sizes")
nonce = get_random_bytes(factory.block_size // 2)
else:
if len(nonce) >= factory.block_size:
raise ValueError("Nonce is too long")
# What is not nonce is counter
counter_len = factory.block_size - len(nonce)
if initial_value is None:
initial_value = 0
if is_native_int(initial_value):
if (1 << (counter_len * 8)) - 1 < initial_value:
raise ValueError("Initial counter value is too large")
initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)
else:
if len(initial_value) != counter_len:
raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" % (len(initial_value), counter_len))
initial_counter_block = nonce + initial_value
return CtrMode(cipher_state,
initial_counter_block,
len(nonce), # prefix
counter_len,
False) # little_endian
# Crypto.Util.Counter is used
# 'counter' used to be a callable object, but now it is
# just a dictionary for backward compatibility.
_counter = dict(counter)
try:
counter_len = _counter.pop("counter_len")
prefix = _counter.pop("prefix")
suffix = _counter.pop("suffix")
initial_value = _counter.pop("initial_value")
little_endian = _counter.pop("little_endian")
except KeyError:
raise TypeError("Incorrect counter object"
" (use Crypto.Util.Counter.new)")
# Compute initial counter block
words = []
while initial_value > 0:
words.append(struct.pack('B', initial_value & 255))
initial_value >>= 8
words += [ b'\x00' ] * max(0, counter_len - len(words))
if not little_endian:
words.reverse()
initial_counter_block = prefix + b"".join(words) + suffix
if len(initial_counter_block) != factory.block_size:
raise ValueError("Size of the counter block (%d bytes) must match"
" block size (%d)" % (len(initial_counter_block),
factory.block_size))
return CtrMode(cipher_state, initial_counter_block,
len(prefix), counter_len, little_endian) | [
"def __init__(self, block_cipher, initial_counter_block,\n prefix_len, counter_len, little_endian):\n\n if len(initial_counter_block) == prefix_len + counter_len:\n self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)\n \"\"\"Nonce; not available if there is a fixed suffix\"\"\"\n\n self._state = VoidPointer()\n result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),\n c_uint8_ptr(initial_counter_block),\n c_size_t(len(initial_counter_block)),\n c_size_t(prefix_len),\n counter_len,\n little_endian,\n self._state.address_of())\n if result:\n raise ValueError(\"Error %X while instantiating the CTR mode\"\n % result)\n\n # Ensure that object disposal of this Python object will (eventually)\n # free the memory allocated by the raw library for the cipher mode\n self._state = SmartPointer(self._state.get(),\n raw_ctr_lib.CTR_stop_operation)\n\n # Memory allocated for the underlying block cipher is now owed\n # by the cipher mode\n block_cipher.release()\n\n self.block_size = len(initial_counter_block)\n \"\"\"The block size of the underlying cipher, in bytes.\"\"\"\n\n self._next = [self.encrypt, self.decrypt]",
"def __create_cipher(self, nonce=None, iv=None):\r\n cipher = None\r\n if self.__encryption_method == EncryptionMethod.AES:\r\n if nonce is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = AES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES3:\r\n if nonce is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES3.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.DES:\r\n if nonce is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], nonce=nonce)\r\n elif iv is not None:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode], iv=iv)\r\n else:\r\n cipher = DES.new(self.__encryption_key, _block_mode_dict[self.__block_mode])\r\n elif self.__encryption_method == EncryptionMethod.SHIFT:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"Shift only supports ECB\")\r\n cipher = SimpleShiftCipher(self.__encryption_key)\r\n elif self.__encryption_method == EncryptionMethod.XOR:\r\n if not self.__block_mode == BlockMode.ECB:\r\n raise Exception(\"XOR only supports ECB\")\r\n cipher = SimpleXorCipher(self.__encryption_key)\r\n else:\r\n raise Exception(\"Unknown encryption method \" + str(self.__encryption_method))\r\n return cipher",
"def aes_ctr(key, counter=None):\n return AES.new(key, AES.MODE_CTR, counter=(counter if counter is not None else Counter.new(128)))",
"def encryptAESCTR(key, plaintext):\n # 128-bit iv, securely generated\n iv = os.urandom(16)\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return (iv, ciphertext)",
"def encrypt_ctr(self, plaintext, iv):\n assert len(iv) == 16\n\n plaintext = pad(plaintext)\n\n blocks = []\n nonce = iv\n for plaintext_block in split_blocks(plaintext):\n # CTR mode encrypt: plaintext_block XOR encrypt(nonce)\n block = xor_bytes(plaintext_block, self.encrypt_block(nonce))\n blocks.append(block)\n nonce = inc_bytes(nonce)\n\n return b''.join(blocks)",
"def __CreateCipher(self, key_bytes, iv_bytes, mode=AES.MODE_CBC):\n # can we use M2Crypto and was it requested?\n if ACTIVE_CRYPT_LIB.lower() == 'm2crypto' and EVP:\n # yes, so do so\n return self.EVPAdaptor(key_bytes, iv_bytes, mode)\n else:\n # default to PyCrypto\n return self.AESAdaptor(key_bytes, iv_bytes, mode)",
"def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()",
"def _create_cbc_cipher(factory, **kwargs):\n\n cipher_state = factory._create_base_cipher(kwargs)\n iv = kwargs.pop(\"IV\", None)\n IV = kwargs.pop(\"iv\", None)\n\n if (None, None) == (iv, IV):\n iv = get_random_bytes(factory.block_size)\n if iv is not None:\n if IV is not None:\n raise TypeError(\"You must either use 'iv' or 'IV', not both\")\n else:\n iv = IV\n\n if len(iv) != factory.block_size:\n raise ValueError(\"Incorrect IV length (it must be %d bytes long)\" %\n factory.block_size)\n\n if kwargs:\n raise TypeError(\"Unknown parameters for CBC: %s\" % str(kwargs))\n\n return CbcMode(cipher_state, iv)",
"def _generate_cipher(self):\n if not (os.path.exists(PRIVATE_KEY_PATH) and os.path.exists(PUBLIC_KEY_PATH)):\n key = RSA.generate(2048)\n\n with open(PRIVATE_KEY_PATH, \"wb\") as file_out:\n file_out.write(key.export_key())\n\n with open(PUBLIC_KEY_PATH, \"wb\") as file_out:\n file_out.write(key.publickey().export_key())\n\n self.session_key = get_random_bytes(16)\n\n self._encryption_cipher = AES.new(self.session_key, AES.MODE_EAX)",
"def __init__(self, key):\n self.block_size = 16\n self.cipher = Cipher(algorithms.AES(key), modes.ECB(), default_backend())",
"def encrypt_cbc_using_ecb(text, key):\n iv = '\\x00'*16\n cipher = ''\n aes = AES.new(key, AES.MODE_ECB)\n for block in Crypto.get_blocks(Crypto.pad_pkcs7(text)):\n block = FrequencyAnalyzer.get_repeating_xor(block, iv)\n iv = aes.encrypt(block)\n cipher += iv\n return cipher",
"def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n if len(key) not in (16, 24, 32):\n raise ValueError(\"The key must be a valid AES key length\")\n if len(nonce) != 16:\n raise ValueError(\"The nonce length is not valid\")\n assert len(plain_data) <= len(nonce)\n aes = AES.new(key, AES.MODE_ECB)\n ctr = aes.encrypt(nonce)\n return bytes([p ^ c for p, c in zip(plain_data, ctr)])",
"def aes_ctr_decrypt(self, key: bytes, encrypted_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.decryptor()\n return enc.update(encrypted_data) + enc.finalize()",
"def getCipherFromKey(key):\n \n return AES.new(key)",
"def new(key, nonce=None):\n\n if nonce is None:\n nonce = get_random_bytes(8)\n\n return Salsa20Cipher(key, nonce)",
"def initCTR(self, iv=0):\r\n if not struct.calcsize(\"Q\") == self._BLOCK_SIZE:\r\n raise ValueError(\"Struct-type 'Q' must have a length of %(target-len)i bytes, not %(q-len)i bytes; this module cannot be used on your platform\" % {\r\n 'target-len': self._BLOCK_SIZE,\r\n 'q-len': struct.calcsize(\"Q\"),\r\n })\r\n \r\n self._ctr_iv = iv\r\n self._calcCTRBuf()",
"def decryptAESCTR(key, iv, ciphertext):\n cipher = Cipher(algorithms.AES(key), modes.CTR(iv), backend=default_backend())\n decryptor = cipher.decryptor()\n return decryptor.update(ciphertext) + decryptor.finalize()",
"def aes_cbc(key, iv=None):\n return AES.new(key, AES.MODE_CBC, iv if iv is not None else get_zero_vector(16))",
"def __init__(self,**kwargs):\n self.msg = kwargs.get('msg','')\n self.shift = kwargs.get('shift','')\n op = kwargs.get('op', False)\n if op:\n try:\n op = getattr(self,op)\n except AttributeError as e: \n raise CipherError(\"valid operations: (encode|decode).\")\n op()\n print \"cipher={c}|key={s}|{r}\".format(c=self.__module__.split('.')[2],\n s=self.shift,\n r=self.result)"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.