query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Clear serbian text(convert to latinic, ignore stop words, lemmatization and stemming)
def clear_serbian_text(self, ordinal, three_classes): clean_text = [] data_text = loader.load_text_dictionary(ordinal, self._dictionary_path, three_classes) for w, tag, lemma in data_text: # convert word to lowercase and delete spaces word = w.lower().strip() # if is word in cyrillic convert to latinic if converter.is_cyrillic_text(word): word = converter.convert_text_to_latinic(word) # ignore stop words if word in self._serbian_stop_words: continue if not (tag.startswith("ADV") or tag.startswith("A:") or tag.startswith("N:") or tag.startswith("V:")): continue result_word = lemma.lower().strip() clean_text.append(result_word) return clean_text
[ "def clear_english_text(self, text):\n clean_text = []\n\n tagged_text = pos_tag(word_tokenize(text))\n\n for word, tag in tagged_text:\n wn_tag = converter.penn_to_wn(tag)\n\n # ignore words with wrong tag\n if wn_tag not in (wn.NOUN, wn.ADJ, wn.ADV):\n continue\n\n # ignore stop words\n if word in self._english_stop_words:\n continue\n\n # lemmatization\n lemma = self._lemmatizer.lemmatize(word, pos=wn_tag)\n if not lemma:\n continue\n\n clean_text.append((lemma, wn_tag))\n\n return clean_text", "def preprocess(text):\n text = normalize_unicode(text)\n text = remove_newline(text)\n text = text.lower()\n text = decontracted(text)\n text = replace_negative(text)\n text = removePunctuations(text)\n text = remove_number(text)\n text = remove_space(text)\n text = removeArticlesAndPronouns(text)\n text = removeNLTKStopWords(text)\n #text = performStemming(text)\n return text", "def clear_text(sourse_text, exclude_symbol):\n for i in exclude_symbol:\n sourse_text = sourse_text.replace(i, '')\n\n return sourse_text", "def remove_punctuation_stop_words_and_stem(text):\n\n text = regex.sub(r'\\p{P}+', \"\", text)\n text = text.lower()\n\n stop_words_set = set(stopwords.words('english'))\n stop_words = []\n\n for s in stop_words_set:\n s = regex.sub(r'[^\\w\\s]', '',s)\n stop_words.append(s)\n\n song_words = text.split()\n song_no_stop_words = [word for word in song_words if word not in stop_words]\n\n porter = PorterStemmer() # PorterStemmer avoids over-stemming words relative to other stemming algorithms\n stemmed_word_list = [porter.stem(word) for word in song_no_stop_words]\n text= ' '.join(stemmed_word_list)\n\n return text", "def cleantext(text):\n # Remove illegal characters from the text\n cleanedtext = \"\"\n illegal_chars = [\",\", \".\", \"'\", \"\\\"\", \"\\n\", \"\\r\", \"£\", \"$\", \"(\", \")\", \"-\"]\n text = text.replace(\"\\n\", \" \") # replace newlines with space\n text = text.replace(\"\\r\", \" \") # replace carr. return with space\n stop = set(stopwords.words(\"english\"))\n for i in text: # loop every character\n i = i.lower() # make text lower case\n i = i.encode('ascii',errors='ignore').decode() # get the code to ASCII so pandas does not panic\n # check if the variable is in the illegal chars list or a number\n if i not in illegal_chars and not i.isnumeric():\n cleanedtext += i\n words = cleanedtext.split() # splits the cleaned text into words\n\n word_frequencies = dict() # create a new empty dictionary\n for i in words: # check for every word if it is in the dictionary\n i = ps.stem(i)\n if i in word_frequencies: # if in dictionary, add 1 to the total\n word_frequencies[i] += 1\n else:\n if i not in stop:\n word_frequencies[i] = 1 # if not, add entry to dict\n return word_frequencies", "def remove_czech_stopwords(text) -> str:\n replacements = {x: '' for x in\n ProjectCommon.read_czech_stopwords(CZECH_STOPWORDS_FILE_PATH)}\n output = [w for w in text.split(' ') if w not in replacements]\n\n return ' '.join(output)", "def clear_text(self):\n self.set_text('')", "def snowballstem(texts):\n print(\"Stemming using Snowball Stemmer\")\n #texts_gen = back_to_string(texts)\n texts_out = []\n # KEEP ONLY NOUNS, ADJ, VERB, ADV\n for sent in tqdm(texts):\n texts_out.append([snowball.stem(word) for word in sent])#lemmatize2(sent))\n return texts_out", "def clean_text(text):\n # find all unique characters in the text - set will output unique characters\n text_unique = list(set(text))\n\n # remove as many non-english characters and character sequences as you can \n # loop through each of the unique character\n for c in text_unique:\n # check if the character is non-english character\n # if so replace with ' ' \n if not c.isalnum():\n text = text.replace(c, ' ')\n\n # shorten any extra dead space created above \n # finally replace double space with singel space that might araise removal \n # non english characters and numbers\n text = text.replace(' ',' ')", "def remove_plurals(self):\n if self.word[self.end] == 's':\n if self.ends_with(\"sses\"):\n self.end = self.end - 2\n elif self.ends_with(\"ies\"):\n self.set_to(\"i\")\n elif self.word[self.end - 1] != 's':\n self.end = self.end - 1\n if self.ends_with(\"eed\"):\n if self.m() > 0:\n self.end = self.end - 1\n elif (self.ends_with(\"ed\") or self.ends_with(\"ing\")) and self.contains_vowel():\n self.end = self.offset\n if self.ends_with(\"at\"):\n self.set_to(\"ate\")\n elif self.ends_with(\"bl\"):\n self.set_to(\"ble\")\n elif self.ends_with(\"iz\"):\n self.set_to(\"ize\")\n elif self.contains_double_consonant(self.end):\n self.end = self.end - 1\n ch = self.word[self.end]\n if ch == 'l' or ch == 's' or ch == 'z':\n self.end = self.end + 1\n elif self.m() == 1 and self.is_of_form_cvc(self.end):\n self.set_to(\"e\")", "def cleanup(text):\n text = text.upper()\n clean_text = []\n for c in text:\n if c=='J':\n clean_text.append('I')\n elif c in letters_noj:\n clean_text.append(c)\n return ''.join(clean_text)", "def removeStopWords(self, text):\n\n for stopword in self.stopWords:\n text = re.sub(rf' {stopword} ', ' ', text)\n return text", "def remove_non_ascii(text):\n words = nltk.word_tokenize(text)\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word). \\\n encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words", "def _flush_text(self):\n if self._text:\n if not self._devnull:\n self._nodestack[-1].append_text(''.join(self._text))\n self._text = []", "def filter_sentence(citing_sentence):\r\n\r\n if citing_sentence == None:\r\n return \" \" #filtered_sentences_noNone.append(\" \")\r\n\r\n if citing_sentence != None:\r\n citing_sentence = re.sub(\"[\\<\\[].*?[\\>\\]]\", \"\", citing_sentence) #to remove citations\r\n #citing_sentence = re.sub(\"[*?]\", \"\", citing_sentence) #to remove citations\r\n citing_sentence = re.sub('[0-9]+', '', citing_sentence)\r\n to_delete = [\"Introduction\", \"Background\", \"Conclusions\",\"the\", \"and\", \"therefore\", \"thus\", \"et\", \"al.\"]#, \"\\n\", \"<\\sub>\", \"bibr\", \"ref\", \"rid\", \"type\", \"xref\"] #, \"/p\\np\\n\", \"\\p\"]\r\n for word in to_delete:\r\n citing_sentence = re.sub(word, \"\", citing_sentence)\r\n #print(word)\r\n #print(citing_sentence)\r\n #citing_sentence = re.sub(\"\\?\", \"\", citing_sentence) #to remove citations\r\n citing_sentence = ' '.join([word for word in citing_sentence.split() if word not in (stopwords.words('english'))])\r\n return citing_sentence", "def preprocessing_text(self):\n print(' >>> Cleaning text...', end='', flush=True)\n self.text = regexp.sub(\" \", \"__\", self.text.lower(), flags=regexp.MULTILINE)\n self.text = \"_\" + regexp.sub(\"[^_a-z]\", \"\", self.text, flags=regexp.MULTILINE) + \"_\"\n print(ANSI.ok_green, 'OK !', ANSI.endc)", "def clean():\n\n sents = list(brown.sents())\n sents_copy = list(brown.sents())\n n = len(sents)\n print 'Removing special chars...'\n for i in range(0, n):\n for word in sents[i]:\n if not bool(re.search('[A-Za-z0-9]', word)):\n sents_copy[i].remove(word)\n print 'Removed special chars.'\n\n print 'Lowering all the words...'\n for i in range(0, n):\n m = len(sents_copy[i])\n for j in range(0, m):\n sents_copy[i][j] = sents_copy[i][j].lower()\n print 'Lowered all the words.'\n return sents_copy", "def clear_text():\r\n input_website.delete(0, 'end')\r\n input_pass.delete(0, 'end')", "def normalize(text):\n\n return white_space_fix(remove_articles(remove_punc(lower(text))))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for calculating positive and negative score for serbian word
def get_score_for_serbian_word(self, word, wnsrb_param, is_prefix): if wnsrb_param == 'c': sentiments = self._wnsrb_data_changed elif wnsrb_param == 'd': sentiments = self._wnsrb_data_deleted else: sentiments = self._wnsrb_data_original pos_scores = [] neg_scores = [] for i in range(len(sentiments["literal"])): lit = sentiments["literal"][i] if is_prefix: if lit.startswith(word): pos_scores.append(sentiments["pos_score"][i]) neg_scores.append(sentiments["neg_score"][i]) else: if word == lit: pos_scores.append(sentiments["pos_score"][i]) neg_scores.append(sentiments["neg_score"][i]) if len(pos_scores) > 0: return sum(pos_scores) / len(pos_scores), sum(neg_scores) / len(neg_scores) else: return -1, -1
[ "def cal_doc_scores(self, sentences) :\n doc_pos_score =0\n doc_neg_score = 0\n for label, pos, neg in sentences:\n if label != 0 :\n doc_pos_score += pos\n doc_neg_score += neg\n return doc_pos_score, doc_neg_score", "def spa_polarity_score(self, doc):\n mlsscore = 0\n for word in doc.split():\n lem_word = self.lemmatize_spa(word)\n if word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[word]\n elif lem_word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[lem_word]\n if mlsscore > self.max_score:\n self.max_score = mlsscore\n if mlsscore < self.min_score:\n self.min_score = mlsscore\n return mlsscore", "def classify(self, sText):\n sumPos = 0.0\n sumNeg = 0\n sumAll = 0.0\n sText = sText.lower()\n tokens = self.tokenize(sText)\n strPos = self.pos.keys()\n for i in range (len(self.pos)):\n sumAll += self.pos[strPos[i]]+self.neg[strPos[i]]\n for i in range (len(tokens)):\n if self.pos.has_key(tokens[i])==False:\n self.pos[tokens[i]]=0\n self.neg[tokens[i]]=0\n sumPos += math.log(float(self.pos[tokens[i]]+1)/float(sumAll))\n sumNeg += math.log(float(self.neg[tokens[i]]+1)/float(sumAll))\n print sumPos\n print sumNeg\n if sumPos >= sumNeg:\n return \"Positive\"\n else:\n return \"Negtive\"", "def negative_word(self, tweet):\n negative_words = set(['wrong', 'worst', 'warned', 'dont like', 'upset', 'ugh', 'bad']) # Using the tweet data to find negative words\n dense = self.tfidf_vectorizer.transform([tweet]).toarray()[0]\n dense = np.where(dense > 0)[0]\n terms = set([self.tfidf_vectorizer.get_feature_names()[x] for x in dense])\n return len(terms.intersection(negative_words))/(len(terms) + 1.0)", "def calculate_sentiment(positive_words,negative_words,tweet_text):\n\tpos = 0\n\tneg = 0\n\tfor x in tweet_text:\n\t\tif np.any(positive_words==x):\n\t\t\tpos+=1\n\t\telif np.any(negative_words==x):\n\t\t\tneg+=1\n\treturn(pos,neg)", "def positive_word(self, tweet):\n positive_words = set(['wow', 'beautiful', 'amazing', 'won', 'want', 'really cool', 'feel better', 'good']) # Constructing a set of postive words from tweet messages. \n dense = self.tfidf_vectorizer.transform([tweet]).toarray()[0] # Find the tokens of tweet which are part of vocabulary \n dense = np.where(dense > 0)[0] \n terms = set([self.tfidf_vectorizer.get_feature_names()[x] for x in dense]) # Converting the index list to actual feature names\n return len(terms.intersection(positive_words))/(len(terms) + 1.0) # Adding 1 in denominator to prevent division by 0. ", "def get_score_for_english_word(self, lemma, wn_tag):\n pos_scores = []\n neg_scores = []\n for i in range(len(self._wnen_data[\"tag\"])):\n tag = self._wnen_data[\"tag\"][i]\n literals = self._wnen_data[\"literals\"][i]\n\n for lit in literals:\n if lit == lemma and tag == wn_tag:\n pos, neg = self._wnen_data[\"score\"][i]\n pos_scores.append(pos)\n neg_scores.append(neg)\n\n if len(pos_scores) > 0:\n return sum(pos_scores) / len(pos_scores), sum(neg_scores) / len(neg_scores)\n else:\n return -1, -1", "def calculate_score(self, score_data):\n asl = score_data['num_words'] / score_data['num_sentences']\n asw = score_data['num_syllables'] / score_data['num_words']\n return self.SCORE_CONSTANT - (1.015 * asl) - (84.6 * asw)", "def test_encode_sp_positive(self):\n sentence = [\"A\", \"B\"]\n\n for word in sentence:\n self.sign_index.add(word)\n\n ris = []\n for word in sentence:\n ri = self.sign_index.get_ri(word)\n ris.append(ri)\n\n result = ris_to_sp_tensor_value(ris, self.sign_index.feature_dim(), all_positive=True)\n print(result)", "def normalizeSentiment(scores):\n return (1 * scores['Positive'])+(0.5 * (scores['Neutral'] + scores['Mixed']))+(0 * scores['Negative'])", "def nltk_sentiment_analyzer(summary):\n score = SentimentIntensityAnalyzer().polarity_scores(summary)\n print(score)", "def _score_negative(self):\n negative_score = 0\n for result in self.response_results.values():\n result = float(result)\n if result < self.grace_period:\n pass\n else:\n result -= self.grace_period\n negative_score += 10*(log(result)/(log(self.review_length)))\n print negative_score\n return negative_score", "def predict(note):\n\n # Patterns for information extraction\n p = re.compile(r\"edss\", re.IGNORECASE)\n p_score = re.compile(r\"\\d\\.\\d\")\n p_num = re.compile(r\"zero|one|two|three|four|five|six|seven|eight|nine\", re.IGNORECASE)\n num_dict = {\n \"zero\":0,\n \"one\":1,\n \"two\":2,\n \"three\":3,\n \"four\":4,\n \"five\":5,\n \"six\":6,\n \"seven\":7,\n \"eight\":8,\n \"nine\":9\n }\n score = -1\n sentences = sent_tokenize(note)\n for sent in sentences:\n # Find sentence with \"EDSS\"\n if len(re.findall(p, sent)) > 0:\n # Find score with format \"x.x\"\n if len(re.findall(p_score, sent)) > 0:\n score = float(re.findall(p_score, sent)[0])\n break\n # Find score with format \"EDSS is x\"\n elif len(re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)) > 0:\n number = re.findall(r\"\\s+(?:0|1|2|3|4|5|6|7|8|9)(?:\\.|\\,|\\s+|\\))\", sent)[0]\n score = float(re.sub(r\"\\s|\\.|\\,|\\)\", r\"\", number))\n break\n # Find score writtent in \"zero/one ...\"\n elif len(re.findall(p_num, sent)) > 0:\n score = float(num_dict[re.findall(p_num, sent)[0].lower()])\n break\n \n if score not in [0.0, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5]:\n score = -1\n \n \n label_dict = {0.0:0,\n 1.0:1,\n 1.5:2,\n 2.0:3,\n 2.5:4,\n 3.0:5,\n 3.5:6,\n 4.0:7,\n 4.5:8,\n 5.0:9,\n 5.5:10,\n 6.0:11,\n 6.5:12,\n 7.0:13,\n 7.5:14,\n 8.0:15,\n 8.5:16,\n 9.0:17,\n 9.5:18,\n -1:-1}\n \n return label_dict[score]", "def sentence_positivity(list_1, list_positive, list_negative):\n\n sentence_weights = []\n\n negative_expressions = ['ne']\n\n list_1 = list(filter(None, list_1))\n\n for sentence in list_1:\n\n sentence_tokens = word_tokenize(sentence)\n sentence_score = 0\n \n # Iterate through each sentence\n for idx, word in enumerate(sentence_tokens):\n \n # If word 'ne' is found, change the whole sentence to 'ne'\n # and decrease positivity by -1 for each occurrence\n if word in negative_expressions:\n sentence_tokens[idx:] = ['ne'] * (len(sentence_tokens) - idx)\n sentence_score -= 1\n\n # Calculate positivity respectively\n for word_positive in list_positive:\n if(word == word_positive[0]):\n sentence_score += float(word_positive[1])\n elif word in negative_expressions: # Skips 'ne'\n continue\n\n # Calculate negativity respectively\n for word_negative in list_negative:\n if(word == word_negative[0]):\n sentence_score -= float(word_negative[1])\n elif word in negative_expressions: # Skips 'ne'\n continue\n \n try:\n sentence_score = sentence_score / len(sentence)\n except ZeroDivisionError:\n sentence_score = 0\n finally:\n # Add calculated positivity to a list\n sentence_weights.append(sentence_score)\n\n total_mean = np.mean(sentence_weights)\n\n return total_mean", "def seniority(self):\n s = sum(map(abs,self.occ['alpha'] - self.occ['beta']))\n return s", "def tone_count_with_negation_check(dict, article):\n pos_count = 0\n neg_count = 0\n \n pos_words = []\n neg_words = []\n \n input_words = lemmatizer(article)\n \n word_count = len(input_words)\n \n for i in range(0, word_count):\n if input_words[i] in dict['Negative']:\n neg_count += 1\n neg_words.append(input_words[i])\n if input_words[i] in dict['Positive']:\n if i >= 3:\n if negated(input_words[i - 1]) or negated(input_words[i - 2]) or negated(input_words[i - 3]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 2:\n if negated(input_words[i - 1]) or negated(input_words[i - 2]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 1:\n if negated(input_words[i - 1]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 0:\n pos_count += 1\n pos_words.append(input_words[i])\n \n '''\n print('The results with negation check:', end='\\n\\n')\n print('The # of positive words:', pos_count)\n print('The # of negative words:', neg_count)\n print('The list of found positive words:', pos_words)\n print('The list of found negative words:', neg_words)\n print('\\n', end='')\n '''\n \n results = [word_count, pos_count, neg_count, pos_words, neg_words]\n \n return results", "def cal_sent_scores(self, sentence):\n word_count = 0\n max_word_pos_score = 0\n max_word_neg_score = 0\n for word, tag in sentence:\n pos_score = 0\n neg_score = 0\n synsets = self.iswn.senti_synsets(word, tag) \n num_synsets = len(synsets) \n word_pos_score = 0\n word_neg_score = 0\n if num_synsets >=1 : \n for synset in synsets:\n word_pos_score += synset.pos_score\n word_neg_score += synset.neg_score\n word_pos_score = word_pos_score/num_synsets #average synsets scores\n word_neg_score = word_neg_score/num_synsets\n if max_word_pos_score < word_pos_score :\n max_word_pos_score = word_pos_score\n if max_word_neg_score < word_neg_score :\n max_word_neg_score = word_neg_score\n \n return max_word_pos_score, max_word_neg_score", "def get_score(img):\n target = img.copy()\n\n target = get_drawing(target)\n\n pos = numpy.where(numpy.logical_and(target == 0, POS_SCORE == 0), 0, 255)\n neg = numpy.where(numpy.logical_and(target == 0, NEG_SCORE == 0), 0, 255)\n \n pos_count = numpy.count_nonzero(pos == 0)\n neg_count = numpy.count_nonzero(neg == 0)\n \n # negative threshold\n neg_count = 0 if neg_count < TOTAL_NEG/10 else neg_count - TOTAL_NEG/10\n \n # subsract negative pixels and scale with maximum scaler\n score = (pos_count - neg_count)/TOTAL_POS\n score = 0 if score < 0 else score\n \n return pos, neg, score", "def calc_word_value(word):\n score = 0\n for w in word.upper():\n for k, v in LETTER_SCORES.items():\n if k == w:\n score += v\n return score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for calculating positive and negative score for english word
def get_score_for_english_word(self, lemma, wn_tag): pos_scores = [] neg_scores = [] for i in range(len(self._wnen_data["tag"])): tag = self._wnen_data["tag"][i] literals = self._wnen_data["literals"][i] for lit in literals: if lit == lemma and tag == wn_tag: pos, neg = self._wnen_data["score"][i] pos_scores.append(pos) neg_scores.append(neg) if len(pos_scores) > 0: return sum(pos_scores) / len(pos_scores), sum(neg_scores) / len(neg_scores) else: return -1, -1
[ "def calc_word_value(word):\n score = 0\n for w in word.upper():\n for k, v in LETTER_SCORES.items():\n if k == w:\n score += v\n return score", "def calculate_sentiment(positive_words,negative_words,tweet_text):\n\tpos = 0\n\tneg = 0\n\tfor x in tweet_text:\n\t\tif np.any(positive_words==x):\n\t\t\tpos+=1\n\t\telif np.any(negative_words==x):\n\t\t\tneg+=1\n\treturn(pos,neg)", "def getWordScore(word):\r\n wordScore = 0\r\n for char in word:\r\n wordScore += getScore(char)\r\n return wordScore", "def spa_polarity_score(self, doc):\n mlsscore = 0\n for word in doc.split():\n lem_word = self.lemmatize_spa(word)\n if word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[word]\n elif lem_word in self.mlsent.keys():\n mlsscore = mlsscore + self.mlsent[lem_word]\n if mlsscore > self.max_score:\n self.max_score = mlsscore\n if mlsscore < self.min_score:\n self.min_score = mlsscore\n return mlsscore", "def negative_word(self, tweet):\n negative_words = set(['wrong', 'worst', 'warned', 'dont like', 'upset', 'ugh', 'bad']) # Using the tweet data to find negative words\n dense = self.tfidf_vectorizer.transform([tweet]).toarray()[0]\n dense = np.where(dense > 0)[0]\n terms = set([self.tfidf_vectorizer.get_feature_names()[x] for x in dense])\n return len(terms.intersection(negative_words))/(len(terms) + 1.0)", "def score_english(s):\n\n # Get frequency of each character\n count = Counter()\n for c in s:\n count[c.upper()] += 1\n\n # For each letter in the alphabet, see if this string's frequency % is\n # similar and score on how close it is.\n score = 0.0\n for key, value in ENG_FREQ_MAP.iteritems():\n freq = float(count[key]) / float(len(s)) * 100\n score += freq * value\n\n return score", "def cal_doc_scores(self, sentences) :\n doc_pos_score =0\n doc_neg_score = 0\n for label, pos, neg in sentences:\n if label != 0 :\n doc_pos_score += pos\n doc_neg_score += neg\n return doc_pos_score, doc_neg_score", "def score_word(word): \n score = 0\n words = get_scrabble_dictionary()\n if word.upper() in words:\n for i in word:\n lowered = i.lower()\n score += letter_scores[lowered]\n return score", "def analyze(self, text):\n # split sentences into words\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n \n score = 0\n \n for word in tokens:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n \n return score", "def get_english_count(msg):\n\n msg = msg.upper()\n msg = remove_non_letters(msg)\n possible_words = msg.split()\n\n if possible_words == []:\n return 0.0 # no English words in the message\n\n matches = 0\n for word in possible_words:\n if word in ENGLISH_WORDS:\n matches += 1\n return float(matches) / len(possible_words)", "def positive_word(self, tweet):\n positive_words = set(['wow', 'beautiful', 'amazing', 'won', 'want', 'really cool', 'feel better', 'good']) # Constructing a set of postive words from tweet messages. \n dense = self.tfidf_vectorizer.transform([tweet]).toarray()[0] # Find the tokens of tweet which are part of vocabulary \n dense = np.where(dense > 0)[0] \n terms = set([self.tfidf_vectorizer.get_feature_names()[x] for x in dense]) # Converting the index list to actual feature names\n return len(terms.intersection(positive_words))/(len(terms) + 1.0) # Adding 1 in denominator to prevent division by 0. ", "def classify(self, sText):\n sumPos = 0.0\n sumNeg = 0\n sumAll = 0.0\n sText = sText.lower()\n tokens = self.tokenize(sText)\n strPos = self.pos.keys()\n for i in range (len(self.pos)):\n sumAll += self.pos[strPos[i]]+self.neg[strPos[i]]\n for i in range (len(tokens)):\n if self.pos.has_key(tokens[i])==False:\n self.pos[tokens[i]]=0\n self.neg[tokens[i]]=0\n sumPos += math.log(float(self.pos[tokens[i]]+1)/float(sumAll))\n sumNeg += math.log(float(self.neg[tokens[i]]+1)/float(sumAll))\n print sumPos\n print sumNeg\n if sumPos >= sumNeg:\n return \"Positive\"\n else:\n return \"Negtive\"", "def scrabble_score(word):\r\n if word == '':\r\n return 0\r\n else:\r\n t = letter_score(word[0])\r\n return t+scrabble_score(word[1:])", "def classify(self, words):\n \n posScore = math.log(1.0 * self.posExampleNum / (self.posExampleNum + self.negExampleNum))\n negScore = math.log(1.0 * self.negExampleNum / (self.posExampleNum + self.negExampleNum))\n posTermNum = len(self.posDict)\n negTermNum = len(self.negDict)\n \n for word in words:\n posScore += math.log(1.0 * (self.posDict.get(word, 0) + 1) / (self.posTokenNum + posTermNum))\n negScore += math.log(1.0 * (self.negDict.get(word, 0) + 1) / (self.negTokenNum + negTermNum))\n\n if posScore > negScore: return 'pos'\n else: return 'neg'", "def nltk_sentiment_analyzer(summary):\n score = SentimentIntensityAnalyzer().polarity_scores(summary)\n print(score)", "def getScore(word, letterScores):\n score = 0\n aIndex = ord('a')\n for c in word:\n index = ord(c) - aIndex\n score += letterScores[index]\n return score", "def calculate_score(self, score_data):\n asl = score_data['num_words'] / score_data['num_sentences']\n asw = score_data['num_syllables'] / score_data['num_words']\n return self.SCORE_CONSTANT - (1.015 * asl) - (84.6 * asw)", "def tone_count_with_negation_check(dict, article):\n pos_count = 0\n neg_count = 0\n \n pos_words = []\n neg_words = []\n \n input_words = lemmatizer(article)\n \n word_count = len(input_words)\n \n for i in range(0, word_count):\n if input_words[i] in dict['Negative']:\n neg_count += 1\n neg_words.append(input_words[i])\n if input_words[i] in dict['Positive']:\n if i >= 3:\n if negated(input_words[i - 1]) or negated(input_words[i - 2]) or negated(input_words[i - 3]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 2:\n if negated(input_words[i - 1]) or negated(input_words[i - 2]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 1:\n if negated(input_words[i - 1]):\n neg_count += 1\n neg_words.append(input_words[i] + ' (with negation)')\n else:\n pos_count += 1\n pos_words.append(input_words[i])\n elif i == 0:\n pos_count += 1\n pos_words.append(input_words[i])\n \n '''\n print('The results with negation check:', end='\\n\\n')\n print('The # of positive words:', pos_count)\n print('The # of negative words:', neg_count)\n print('The list of found positive words:', pos_words)\n print('The list of found negative words:', neg_words)\n print('\\n', end='')\n '''\n \n results = [word_count, pos_count, neg_count, pos_words, neg_words]\n \n return results", "def list_contains(list_1, list_positive, list_negative):\n total = 0\n\n for word in list_1:\n for word_positive in list_positive:\n if(word == word_positive[0]):\n total+=float(word_positive[1])\n\n for word_negative in list_negative:\n if(word == word_negative[0]):\n total-=float(word_negative[1]) \n\n try:\n total_score = total/len(list_1)\n except:\n total_score = 0\n pass\n\n return total_score" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test get salario total Este test comprueba el correcto funcionamiento del metodo Get_salario_total de la clase sucursal.
def test_get_salario_total(self): # Creamos mocks de Empleado emp1 = mock(Empleado) emp2 = mock(Empleado) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Simulamos comportamiento when(emp1).get_salario().thenReturn(1500) when(emp2).get_salario().thenReturn(1500) # Incluimos empleados suc.aniadir_empleado(emp1) suc.aniadir_empleado(emp2) # Hacemos el test self.assertEqual(suc.get_salario_total(), 3000)
[ "def test_get_user_totals(self):\n response = base.get_totals(self.credentials)\n self.assertEqual(response.status_code, 200)", "def test_get_salario_total_mensual(self):\n dep = Departamento(\"Desarrollo de pruebas\", 1)\n i = 1\n while i <= 3:\n emock = mock(Empleado)\n when(emock).get_salario_mensual().thenReturn((i * 1000) / 12.0)\n dep.anyadir_empleado(emock)\n i += 1\n self.assertEqual(dep.get_salario_total_mensual(), 6000 / 12.0)", "def test_total(self):\n\n self.assertEqual(self.basket_1.total(), 4.17)\n self.assertEqual(self.basket_2.total(), 6.01)", "def test_total_sales(self):\n order = create_order()\n self.assertEquals(Order.total_sales(), 2.5)", "def subtotal(self):\r\n return self.cantidad * self.precio", "def calcularSubtotal(self):", "def test_salary(self):\n self.assertEqual(self.test_manager.get_salary(), self.TEST_SALARY)", "def test__StickerCounts__total():\n animated = 1\n lottie = 2\n static = 4\n \n expected_value = animated + lottie + static\n \n sticker_counts = StickerCounts(\n animated = animated,\n lottie = lottie,\n static = static,\n )\n vampytest.assert_eq(sticker_counts.total, expected_value)", "def testGetTotalUsageUnitsPerMonth(self):\n selector = {\n 'apiUsageType': 'TOTAL_USAGE_API_UNITS_PER_MONTH'\n }\n self.assert_(isinstance(self.__class__.service.Get(selector), tuple))\n self.assertEqual(\n Utils.GetMethodCost(self.__class__.VERSION,\n self.__class__.service.__class__.__name__,\n 'get',\n client.GetLastOperations(),\n True),\n client.GetLastUnits())", "def test_sum(self):\n # Prepare arguments\n args = {'number_one': 1, 'number_two': 1}\n # Construct request\n r = requests.get(self.url, params=args)\n # Check that api result is equal to expected\n self.assertEqual(r.text, '2')", "def test__StickerCounts__normal_total():\n animated = 1\n static = 2\n \n expected_value = animated + static\n \n sticker_counts = StickerCounts(\n animated = animated,\n static = static,\n )\n vampytest.assert_eq(sticker_counts.normal_total, expected_value)", "def test_execute_sum_query(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, AzureInstanceTypeView)\n handler = AzureReportQueryHandler(query_params)\n\n filters = self.ten_day_filter\n for filt in handler._mapper.report_type_map.get(\"filter\"):\n qf = QueryFilter(**filt)\n filters.update({qf.composed_query_string(): qf.parameter})\n current_totals = self.get_totals_costs_by_time_scope(handler, filters)\n expected_cost_total = current_totals.get(\"cost_total\")\n self.assertIsNotNone(expected_cost_total)\n query_output = handler.execute_query()\n\n self.assertIsNotNone(query_output.get(\"data\"))\n self.assertIsNotNone(query_output.get(\"total\"))\n total = query_output.get(\"total\")\n self.assertIsNotNone(total.get(\"usage\", {}).get(\"value\"))\n self.assertEqual(total.get(\"usage\", {}).get(\"value\"), current_totals.get(\"usage\"))\n result_cost_total = total.get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertIsNotNone(result_cost_total)\n self.assertEqual(result_cost_total, expected_cost_total)", "def total(self):\n return self.sum.value", "def test_get_pages_total(self):\n additional_pages = self.spider._get_pages_total(self.fake_principal_index_page)\n self.assertEqual(34, additional_pages)", "def calcul_Salaire():\n salaireHoraire = 10 #definition du taux horraire\n nombreHeureTravail = 250 #definition du nombre d'heure de travail\n salaireMensuel = nombreHeureTravail * salaireHoraire #calcul du salaire mensuel\n if(nombreHeureTravail>160):\n salaireMensuel = salaireMensuel + (1.25*salaireHoraire-salaireHoraire)*(nombreHeureTravail-160) #majoration a 25%\n if(nombreHeureTravail>200):\n salaireMensuel += (1.50*salaireHoraire-salaireHoraire)*(nombreHeureTravail-200) #majoration a 50%\n \n return salaireMensuel", "def test_get_total_amount_received(self):\n order = mommy.make('eshop.Order')\n mommy.make('eshop.OrderPayment', amount=100, order=order)\n mommy.make('eshop.OrderPayment', amount=220, order=order)\n self.assertEqual(order.get_total_amount_received(), 320)", "def total_profit(self):\n self.store_profit += self.sale_profit\n return self.store_profit", "def test_execute_sum_query_costs(self):\n url = \"?\"\n query_params = self.mocked_query_params(url, OCICostView)\n handler = OCIReportQueryHandler(query_params)\n current_totals = self.get_totals_costs_by_time_scope(handler, self.ten_day_filter)\n expected_cost_total = current_totals.get(\"cost_total\")\n self.assertIsNotNone(expected_cost_total)\n query_output = handler.execute_query()\n self.assertIsNotNone(query_output.get(\"data\"))\n self.assertIsNotNone(query_output.get(\"total\"))\n result_cost_total = query_output.get(\"total\").get(\"cost\", {}).get(\"total\", {}).get(\"value\")\n self.assertIsNotNone(result_cost_total)\n self.assertEqual(result_cost_total, expected_cost_total)", "def get_total_salary_and_loan_amounts(self):\n\n\t\ttotals = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tsum(principal_amount) as total_principal_amount,\n\t\t\t\tsum(interest_amount) as total_interest_amount,\n\t\t\t\tsum(total_loan_repayment) as total_loan_repayment,\n\t\t\t\tsum(rounded_total) as rounded_total\n\t\t\tfrom\n\t\t\t\t`tabSalary Slip` t1\n\t\t\twhere\n\t\t\t\tt1.docstatus = 1\n\t\t\tand\n\t\t\t\tstart_date >= %s\n\t\t\tand\n\t\t\t\tend_date <= %s\n\t\t\t\"\"\" % ('%s','%s'), (getdate(self.start_date), getdate(self.end_date)), as_dict=True)\n\t\treturn totals[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test aniadir empleado Este test comprueba que los empleados se agregan correctamente a la lista de empleados de la sucursal.
def test_aniadir_empleado(self): # Creamos mocks de Empleado emp1 = mock(Empleado) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Simulamos comportamiento when(emp1).get_ID().thenReturn(1) # Incluimos empleados suc.aniadir_empleado(emp1) lista = suc.get_listaempleados() # Hacemos el test self.assertEqual(lista[0].get_ID(), 1)
[ "def test_eliminar_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Eliminamos un empleado\n suc.eliminar_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_aniadir_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n\n # Creamos proveedor\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(pro1).get_ID().thenReturn(1)\n\n # Incluimos proveedor\n suc.aniadir_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_ajouterCreneau_mauvais_horaire(self):\n\t\td = [\"pas int\", 14, 18]\n\t\tf = [25, \"pas int\", 14]\n\t\ti = 0\n\t\tcible = Jour(15, LUNDI)\n\t\twhile i < len(f):\n\t\t\twith self.assertRaises(ValueError):\n\t\t\t\tcible.ajouterCreneau(d[i], f[i])\n\t\t\t#with\n\t\t\ti += 1\n\t\t#while", "def salasSinConectar(self):\n habitaciones = []\n for i in self.puertas:\n habitaciones.append(i.habitacion1.numero)\n habitaciones.append(i.habitacion2.numero)\n\n # Mirar todas la habitaciones\n for i in range(2, len(self.listaHabitaciones)):\n # Si no tienen las habitaciones en la lista implica que no tienen puerta y hay que generar la puerta\n if i not in habitaciones:\n habitacion1 = self.listaHabitaciones[i]\n posicion_puerta = random.uniform(0, 1)\n habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)]\n\n while (not self.colisiona_puerta(habitacion1, posicion_puerta) and\n not self.colisiona_puerta(habitacion2, posicion_puerta)):\n posicion_puerta = random.uniform(0, 1)\n habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)]\n\n self.puertas.append(Puerta(habitacion1, habitacion2, self.posicionPuerta(posicion_puerta)))", "def test_get_salario_total(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_salario().thenReturn(1500)\n when(emp2).get_salario().thenReturn(1500)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Hacemos el test\n self.assertEqual(suc.get_salario_total(), 3000)", "def test_aniadir_producto(self):\n # Creamos mocks de Producto\n prod1 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(prod1).get_ID().thenReturn(1)\n\n # Incluimos producto\n suc.aniadir_producto(prod1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_equipo(self):\n u1 = Usuario.objects.create(username= 'juan',nombre = 'Isidro', apellido = 'Brizuela', password = 'isidro', cedula = 3841270)\n S3 = Sprint.objects.get(nombre='Sprint 3')\n E = Equipo()\n E.usuario = u1\n E.horas_sprint = 5\n E.save()\n S3.equipo.add(E)\n S3.save()\n\n print('Asignacion de equipo a Sprint ejecutada correctamente.')", "def test_inserer_plusieurs_element(self):\n\t\tcible = Jour(18)\n\t\toracle = [-254, 18, 56, 56, 256, 7852]\n\t\tfor i in [18, 256, 56, 7852, -254, 56]:\n\t\t\tcible.insererCreneau(i)\n\t\t#for\n\t\tself.assertEqual(cible.creneaux, oracle)", "def test_list_solicitudes(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n }\n for i in range(4):\n self.client.post(url, data, format=\"json\")\n\n response = self.client.get(\n \"/api/seguimientos/solicitudes/list/\", format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(type(response.data[\"results\"]), ReturnList)\n self.assertEqual(len(response.data[\"results\"]), 4)", "def test_investidor_logado_com_investimentos(self):\n nizbel = User.objects.create_user('nizbel', 'nizbel@teste.com', 'nizbel')\n self.nizbel = nizbel.investidor \n \n # Cadastrar investimentos\n #CDB/RDB\n cdb_rdb_1 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 1', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, vencimento=Decimal(365))\n \n cdb_rdb_2 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 2', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, vencimento=Decimal(365))\n \n # CDB 1\n # Vence em 5 dias\n self.operacao_cdb_rdb_1 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_cdb_rdb_2 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_cdb_rdb_3 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_cdb_rdb_4 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # CDB 2\n # Vence em 4 dias\n self.operacao_cdb_rdb_5 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_cdb_rdb_6 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n # Vence em 6 dias\n self.operacao_cdb_rdb_7 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=359)), tipo_operacao='C')\n \n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo_isin='BRCRITESTE1', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=370)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), investidor=self.nizbel)\n cri_cra_2 = CRI_CRA.objects.create(nome='CRI teste 3', codigo_isin='BRCRITESTE3', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=20)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), investidor=self.nizbel)\n \n # CRI 1\n # Vence em 5 dias\n self.operacao_cri_cra_1 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_1, preco_unitario=Decimal(1200), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=60)), tipo_operacao='C',\n taxa=0)\n # CRI 2\n # Vence em 355 dias\n self.operacao_cri_cra_2 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_2, preco_unitario=Decimal(1050), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=1)), tipo_operacao='C',\n taxa=0)\n \n # Debentures\n debenture_1 = Debenture.objects.create(codigo='TESTE91', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=370)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=35, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=Decimal('34.3'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n debenture_2 = Debenture.objects.create(codigo='TESTE92', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=20)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=3, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=Decimal('2.78'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n # Debenture 1\n # Vence em 5 dias\n self.operacao_deb_1 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_1, preco_unitario=Decimal(1200),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=60)), taxa=0,\n tipo_operacao='C')\n # Debenture 2\n # Vence em 355 dias\n self.operacao_deb_2 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_2, preco_unitario=Decimal(1050),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=1)), taxa=0,\n tipo_operacao='C')\n \n # LC\n lc_1 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_1, vencimento=Decimal(365))\n \n lc_2 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_2, vencimento=Decimal(365))\n \n # LC 1\n # Vence em 5 dias\n self.operacao_lc_1 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lc_2 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lc_3 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lc_4 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LC 2\n # Vence em 4 dias\n self.operacao_lc_5 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lc_6 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # LCI/LCA\n lci_lca_1 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_1, vencimento=Decimal(365))\n \n lci_lca_2 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_2, vencimento=Decimal(365))\n \n # LCI 1\n # Vence em 5 dias\n self.operacao_lci_lca_1 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lci_lca_2 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lci_lca_3 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lci_lca_4 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LCI 2\n # Vence em 4 dias\n self.operacao_lci_lca_5 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lci_lca_6 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # Tesouro direto\n titulo_1 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n titulo_2 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=370)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n \n # Título 1\n # Vence em 5 dias\n self.operacao_titulo_1 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_1, consolidada=True)\n \n # Título 2\n # Vence em 370 dias\n self.operacao_titulo_2 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_2, consolidada=True)\n \n self.client.login(username='nizbel', password='nizbel')\n response = self.client.get(reverse('inicio:proximos_vencimentos'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n \n # Contexto\n self.assertIn('prox_vencimentos', response.context)\n self.assertEqual(len(response.context['prox_vencimentos']), 10)\n # Apenas os com vencimento mais recente deve estar na lista\n self.assertIn(self.operacao_cdb_rdb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_7, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cri_cra_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_deb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_titulo_1, response.context['prox_vencimentos'])", "def test_aniadir_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(inc1).get_id().thenReturn(1)\n\n # Incluimos incidencia\n suc.aniadir_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(lista[0].get_id(), 1)", "def test_create_solicitud_seguimiento_alum_invalidos(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso3.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(SolicitudSeguimiento.objects.count(), 0)", "def test_inserer_1_element(self):\n\t\tcible = Jour(18)\n\t\tvaleur = 1254\n\t\toracle = [valeur]\n\t\tcible.insererCreneau(valeur)\n\t\t\n\t\tself.assertFalse(not cible.creneaux)\n\t\tself.assertEqual(cible.creneaux, oracle)", "def test_create_solicitud_seguimiento(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(SolicitudSeguimiento.objects.count(), 1)\n self.assertEqual(\n response.data[\"estado\"][0][\"estado_solicitud\"], \"Pendiente\"\n )\n self.assertEqual(\n SolicitudSeguimiento.objects.get().motivo_solicitud,\n \"Motivo muy justificado\",\n )", "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_add_employee(self):\n empId = random.randint(100000, 9999999)\n fake = Faker()\n first_name = fake.first_name()\n last_name = fake.last_name()\n expected_job_title = 'QA Manager'\n expected_job_status = 'Full-time'\n\n driver = self.driver\n driver.find_element_by_id('txtUsername').send_keys('admin')\n driver.find_element_by_id('txtPassword').send_keys('Password')\n driver.find_element_by_id('btnLogin').click()\n\n welcome_text = get_welcome_message(driver)\n\n self.assertEqual('Welcome Admin', welcome_text)\n\n driver.find_element_by_id('menu_pim_viewPimModule').click()\n driver.find_element_by_id('btnAdd').click()\n driver.find_element_by_id('firstName').send_keys(first_name)\n driver.find_element_by_id('lastName').send_keys(last_name)\n driver.find_element_by_id('employeeId').clear()\n driver.find_element_by_id('employeeId').send_keys(empId)\n driver.find_element_by_id('btnSave').click()\n\n driver.find_element_by_xpath('//*[@id=\"sidenav\"]/li[6]/a').click()\n # driver.find_element_by_link_text('Job').click()\n driver.find_element_by_id('btnSave').click()\n driver.find_element(By.TAG_NAME,'iframe')\n\n Select(driver.find_element_by_id('job_job_title')).select_by_visible_text(expected_job_title)\n Select(driver.find_element_by_id('job_emp_status')).select_by_visible_text(expected_job_status)\n\n driver.find_element_by_id('btnSave').click()\n locator = (By.CSS_SELECTOR, '.message.success')\n self.wait.until(expected_conditions.presence_of_element_located(locator))\n\n\n driver.find_element_by_id('menu_pim_viewPimModule').click()\n driver.find_element_by_id('empsearch_id').send_keys(empId)\n driver.find_element_by_id('searchBtn').click()\n\n #Expected: 1 record back\n # self.assertTrue(len(driver.find_elements_by_xpath('//td[3]/a' ))== 1)\n\n #expected correct name and empID\n firstName = driver.find_element_by_xpath('//td[3]/a').text\n lastName = driver.find_element_by_xpath('//td[4]/a').text\n employeeId = driver.find_element_by_xpath('//td[2]/a').text\n job_title = driver.find_element_by_xpath('//td[5]').text\n emp_status = driver.find_element_by_xpath('//td[6]').text\n\n self.assertEqual(first_name,firstName)\n self.assertEqual(last_name,lastName)\n self.assertEqual(empId,int(employeeId))\n self.assertEqual(job_title,expected_job_title)\n self.assertEqual(emp_status, expected_job_status)", "def test_get_salario_total_mensual(self):\n dep = Departamento(\"Desarrollo de pruebas\", 1)\n i = 1\n while i <= 3:\n emock = mock(Empleado)\n when(emock).get_salario_mensual().thenReturn((i * 1000) / 12.0)\n dep.anyadir_empleado(emock)\n i += 1\n self.assertEqual(dep.get_salario_total_mensual(), 6000 / 12.0)", "def mostrar_todos(self):\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n #llama a la funcion mostrar_registros de la clase conexion_equipos\r\n registros=self.obj_conexion.mostrar_registros()\r\n #si el estatus es true\r\n if registros:\r\n print(Fore.GREEN+str(registros)+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en la base de datos\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test aniadir producto Este test comprueba que los productos se agregan correctamente a la lista de productos de la sucursal.
def test_aniadir_producto(self): # Creamos mocks de Producto prod1 = mock(Producto) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Simulamos comportamiento when(prod1).get_ID().thenReturn(1) # Incluimos producto suc.aniadir_producto(prod1) lista = suc.get_listaproductos() # Hacemos el test self.assertEqual(lista[0].get_ID(), 1)
[ "def test_eliminar_producto(self):\n # Creamos mocks de Producto\n pro1 = mock(Producto)\n pro2 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos productos\n suc.aniadir_producto(pro1)\n suc.aniadir_producto(pro2)\n # Eliminamos un producto\n suc.eliminar_producto(pro1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_aniadir_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n\n # Creamos proveedor\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(pro1).get_ID().thenReturn(1)\n\n # Incluimos proveedor\n suc.aniadir_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_product(self):\n self.assertEqual(len(self.first_menu.search_product('tomato')), 50)\n self.assertEqual(\n len(self.second_menu.search_product('mango juice')), 50)\n self.first_menu.choose_product('tomatoes, raw', 100)\n self.assertAlmostEqual(self.first_menu.daily_calories, 18.0)\n self.assertAlmostEqual(self.first_menu.daily_proteins, 0.88)\n self.assertAlmostEqual(self.first_menu.daily_fats, 0.2)\n self.assertAlmostEqual(self.first_menu.daily_carbohydrates, 3.89)\n self.first_menu.choose_product('mango nectar', 150)\n self.assertAlmostEqual(self.first_menu.daily_calories, 94.5)\n self.assertAlmostEqual(self.first_menu.daily_proteins, 1.045)\n self.assertAlmostEqual(self.first_menu.daily_fats, 0.29)\n self.assertAlmostEqual(self.first_menu.daily_carbohydrates, 23.54)", "def test_product_creation(self):\n\n old_products = Product.objects.count()\n create_products(self.products['products'])\n new_products = Product.objects.count()\n self.assertNotEqual(old_products, new_products)", "def test_add_product(self, open_product_page):\n product_page = open_product_page\n old_names = product_page.get_product_names()\n product_page.add_product()\n assert product_page.title() == \"Products\"\n product_page.get_photo(\"add_name.png\")\n new_names = product_page.get_product_names()\n assert all([i in new_names for i in old_names])\n assert len(new_names) == len(old_names) + 1", "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_preco_produto() -> None:\n lista_de_preco = [49.90, 170.00, 32.50]\n preco_mais_barato = 32.50\n assert preco_produto_mais_barato(lista_de_preco) == preco_mais_barato", "def test_products_are_products(self):\r\n prd1 = prd.Product.objects.get(\r\n code='0000000000001',\r\n name='product 001',\r\n generic_name='product prd 001',\r\n brands='Brand of prd 001',\r\n stores='stores001',\r\n url='url001')\r\n\r\n prd2 = prd.Product.objects.get(\r\n code='0000000000002',\r\n name='product 002',\r\n generic_name='product prd 002',\r\n brands='Brand of prd 002',\r\n stores='stores002',\r\n url='url002')\r\n\r\n self.assertEqual(prd1.code, '0000000000001')\r\n self.assertEqual(prd2.code, '0000000000002')", "def test_can_add_multiple_items_of_product_to_cart(self):\n product_to_add = Product.objects.get(sku=add_demo_products.Command.SKU001)\n num_to_add = 3\n\n self.client.force_login(self.customer)\n\n # Add the items twice.\n response = self.client.post(\n reverse(\"add-to-cart\"),\n {\n 'pk': product_to_add.pk,\n 'num_items': num_to_add\n }\n )\n self.assertEqual(response.status_code, 200)\n\n response = self.client.post(\n reverse(\"add-to-cart\"),\n {\n 'pk': product_to_add.pk,\n 'num_items': num_to_add\n }\n )\n self.assertEqual(response.status_code, 200) \n\n # Check what's in the cart.\n cart = helpers.find_active_cart_for_user(self.customer)\n cart_item = cart.shopping_cart_items.select_related(\"product\").get(product=product_to_add)\n self.assertEqual(cart_item.num_items, 2*num_to_add)\n\n self.client.logout()", "def test_products_count_result(self):\n self.add_products()\n self.assertEqual(3, self.admin.products_count(self.company))", "def test_default_num_products(self):\r\n self.assertEqual(len(generate_products()), 30)", "def test_manager_can_add_new_product(self):\n SKU = \"sku\"\n NAME = \"name\"\n DESCRIPTION = \"description\"\n PRICE = \"price\"\n NUM_IN_STOCK = \"num_in_stock\"\n\n test_product_details = {\n SKU: \"SKU005\",\n NAME: \"testname\",\n DESCRIPTION: \"test description\",\n PRICE: decimal.Decimal(\"9.99\"),\n NUM_IN_STOCK: 123\n }\n\n # Create the new product.\n self.client.force_login(self.manager)\n response = self.client.post(\n reverse(\"product-management\"),\n test_product_details\n )\n # TODO: The standard HTTP status for \"created\" would be 201.\n self.assertEqual(response.status_code, 200)\n\n # Find the new product and check that the details match.\n product = Product.objects.get(sku=test_product_details[SKU])\n\n self.assertEqual(product.sku, test_product_details[SKU])\n self.assertEqual(product.name, test_product_details[NAME])\n self.assertEqual(product.description, test_product_details[DESCRIPTION])\n self.assertEqual(product.price, test_product_details[PRICE])\n self.assertEqual(product.num_in_stock, test_product_details[NUM_IN_STOCK])\n\n self.client.logout()", "def create_products():", "def test_aniadir_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_ID().thenReturn(1)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_create_product(self):\n access_token = self.user_token_get()\n response = self.client().post('/api/v1/products', data=self.add_product,\n content_type='application/json',\n headers=dict(Authorization=\"Bearer \" + access_token),\n )\n self.assertEqual(response.status_code, 201)", "def test_added_product_exists(self):\n product = Product.objects.get(unitprice=4.1)\n self.assertEqual(product.productname, \"Hillo\")", "def test_add_product_to_order(self):\n # Add product to order\n url = \"/cart\"\n data = { \"product_id\": 1 }\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # Get cart and verify product was added\n url = \"/cart\"\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)\n response = self.client.get(url, None, format='json')\n json_response = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(json_response[\"id\"], 1)\n self.assertEqual(json_response[\"size\"], 1)\n self.assertEqual(len(json_response[\"lineitems\"]), 1)", "def test_custom_product(self):\n pm = config_get(\"PRODUCT\", \"PRODUCT_TYPES\")\n pm.update([\"product::ConfigurableProduct\", \"product::ProductVariation\", \"product::CustomProduct\", \"product::SubscriptionProduct\"])\n\n response = self.client.get(prefix + \"/\")\n self.assertContains(response, \"Computer\", count=1)\n response = self.client.get(prefix + \"/product/satchmo-computer/\")\n self.assertContains(response, \"Memory\", count=1)\n self.assertContains(response, \"Case\", count=1)\n self.assertContains(response, \"Monogram\", count=1)\n response = self.client.post(prefix + '/cart/add/', {\"productname\": \"satchmo-computer\",\n \"5\": \"1.5gb\",\n \"6\": \"mid\",\n \"custom_monogram\": \"CBM\",\n \"quantity\": 1\n })\n self.assertRedirects(response, prefix + '/cart/',\n status_code=302, target_status_code=200)\n response = self.client.get(prefix + '/cart/')\n self.assertContains(response, '/satchmo-computer/\">satchmo computer', status_code=200)\n self.assertContains(response, smart_str(\"%s168.00\" % config_value('SHOP', 'CURRENCY')), count=4)\n self.assertContains(response, smart_str(\"Monogram: CBM %s10.00\" % config_value('SHOP', 'CURRENCY')), count=1)\n self.assertContains(response, smart_str(\"Case - External Case: Mid %s10.00\" % config_value('SHOP', 'CURRENCY')), count=1)\n self.assertContains(response, smart_str(\"Memory - Internal RAM: 1.5 GB %s25.00\" % config_value('SHOP', 'CURRENCY')), count=1)\n response = self.client.post(url('satchmo_checkout-step1'), get_step1_post_data(self.US))\n self.assertRedirects(response, url('DUMMY_satchmo_checkout-step2'),\n status_code=302, target_status_code=200)\n data = {\n 'credit_type': 'Visa',\n 'credit_number': '4485079141095836',\n 'month_expires': '1',\n 'year_expires': '2012',\n 'ccv': '552',\n 'shipping': 'FlatRate'}\n response = self.client.post(url('DUMMY_satchmo_checkout-step2'), data)\n self.assertRedirects(response, url('DUMMY_satchmo_checkout-step3'),\n status_code=302, target_status_code=200)\n response = self.client.get(url('DUMMY_satchmo_checkout-step3'))\n self.assertContains(response, smart_str(\"satchmo computer - %s168.00\" % config_value('SHOP', 'CURRENCY')), count=1, status_code=200)\n response = self.client.post(url('DUMMY_satchmo_checkout-step3'), {'process': 'True'})\n self.assertRedirects(response, url('DUMMY_satchmo_checkout-success'),\n status_code=302, target_status_code=200)\n self.assertEqual(len(mail.outbox), 1)", "def test_payload_add_products_success(\n talpa_ecom_payment_provider: TalpaEComProvider,\n order_with_products: Order,\n default_talpa_product_accounting: List[TalpaProductAccounting],\n):\n payload = {}\n talpa_ecom_payment_provider.payload_add_products(\n payload,\n order_with_products,\n order_with_products.lease.application.language,\n )\n\n assert payload[\"priceNet\"] == rounded(\n order_with_products.total_pretax_price, as_string=True\n )\n assert payload[\"priceTotal\"] == rounded(\n order_with_products.total_price, as_string=True\n )\n assert payload[\"priceVat\"] == str(\n rounded(order_with_products.total_price)\n - rounded(order_with_products.total_pretax_price)\n )\n\n assert \"items\" in payload\n products = payload.get(\"items\")\n assert len(products) == 5 # 1 place product + 4 additional products\n # As there's no guaranteed order in nested dict, it's not possible\n # to check reliably for values, but at least assert that all keys are added\n for product in products:\n keys = set(product.keys()) - {\n \"productId\",\n \"quantity\",\n \"productName\",\n \"unit\",\n \"rowPriceNet\",\n \"rowPriceVat\",\n \"rowPriceTotal\",\n \"vatPercentage\",\n \"priceNet\",\n \"priceVat\",\n \"priceGross\",\n \"meta\",\n }\n assert len(keys) == 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test aniadir incidencia Este test comprueba que las incidencias se agregan correctamente a la lista de incidencias de la sucursal.
def test_aniadir_incidencia(self): # Creamos mocks de Incidencia inc1 = mock(Incidencia) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Simulamos comportamiento when(inc1).get_id().thenReturn(1) # Incluimos incidencia suc.aniadir_incidencia(inc1) lista = suc.get_listaincidencias() # Hacemos el test self.assertEqual(lista[0].get_id(), 1)
[ "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_aniadir_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_ID().thenReturn(1)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_add_incident(self):\n alert = Alert.objects.get(pk=1)\n old_incidents = alert.incidents\n alert.add_incident()\n alert_updated = Alert.objects.get(pk=1)\n self.assertEqual(alert_updated.incidents, old_incidents + 1)", "def test_get_all_inicidents(self):\n\n # when empty should return 404\n response = self.app.get(\"/api/v1/incidents\")\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(result[\"status\"], 404)\n\n # add an incident\n self.add_incident(\"Corruption Case 1\")\n\n # when poulates should return status 200\n response = self.app.get(\"/api/v1/incidents\")\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result[\"status\"], 200)", "def test_aniadir_producto(self):\n # Creamos mocks de Producto\n prod1 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(prod1).get_ID().thenReturn(1)\n\n # Incluimos producto\n suc.aniadir_producto(prod1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_aniadir_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n\n # Creamos proveedor\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(pro1).get_ID().thenReturn(1)\n\n # Incluimos proveedor\n suc.aniadir_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_investidor_logado_com_investimentos_vencidos(self):\n vencido = User.objects.create_user('vencido', 'vencido@teste.com', 'vencido')\n self.vencido = vencido.investidor \n \n # Cadastrar investimentos\n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo_isin='BRCRITESTE1', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=470)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=460)),\n data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), investidor=self.vencido)\n \n # CRI 1\n self.operacao_cri_cra_1 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_1, preco_unitario=Decimal(1200), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=160)), tipo_operacao='C',\n taxa=0)\n \n # Debentures\n debenture_1 = Debenture.objects.create(codigo='TESTE91', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=470)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=460)), \n data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), incentivada=True, \n padrao_snd=True)\n \n # Debenture 1\n self.operacao_deb_1 = OperacaoDebenture.objects.create(investidor=self.vencido, debenture=debenture_1, preco_unitario=Decimal(1200),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=160)), taxa=0,\n tipo_operacao='C')\n \n # Tesouro direto\n titulo_1 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n \n # Título 1\n self.operacao_titulo_1 = OperacaoTitulo.objects.create(investidor=self.vencido, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=150)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_1, consolidada=True)\n \n \n self.client.login(username='vencido', password='vencido')\n response = self.client.get(reverse('inicio:proximos_vencimentos'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n \n # Contexto\n self.assertIn('prox_vencimentos', response.context)\n self.assertEqual(response.context['prox_vencimentos'], [])", "def test_process_add_client_schedule(self):\n error = self.process_add_client_schedule()\n for err in error: assert err == 0", "def test_get_legisladores_comisiones_por_ids(self):\n response = self.client.get('/apirest/legisladores_comisiones/?id=1,2,3') \n self.assertEqual(response.status_code, self.CODIGO_EXITO)\n self.assertEqual(response.data[\"count\"], self.CANT_COMISION_IDS)", "def test_investidor_logado_com_investimentos(self):\n nizbel = User.objects.create_user('nizbel', 'nizbel@teste.com', 'nizbel')\n self.nizbel = nizbel.investidor \n \n # Cadastrar investimentos\n #CDB/RDB\n cdb_rdb_1 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 1', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, vencimento=Decimal(365))\n \n cdb_rdb_2 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 2', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, vencimento=Decimal(365))\n \n # CDB 1\n # Vence em 5 dias\n self.operacao_cdb_rdb_1 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_cdb_rdb_2 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_cdb_rdb_3 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_cdb_rdb_4 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # CDB 2\n # Vence em 4 dias\n self.operacao_cdb_rdb_5 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_cdb_rdb_6 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n # Vence em 6 dias\n self.operacao_cdb_rdb_7 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=359)), tipo_operacao='C')\n \n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo_isin='BRCRITESTE1', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=370)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), investidor=self.nizbel)\n cri_cra_2 = CRI_CRA.objects.create(nome='CRI teste 3', codigo_isin='BRCRITESTE3', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=20)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), investidor=self.nizbel)\n \n # CRI 1\n # Vence em 5 dias\n self.operacao_cri_cra_1 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_1, preco_unitario=Decimal(1200), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=60)), tipo_operacao='C',\n taxa=0)\n # CRI 2\n # Vence em 355 dias\n self.operacao_cri_cra_2 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_2, preco_unitario=Decimal(1050), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=1)), tipo_operacao='C',\n taxa=0)\n \n # Debentures\n debenture_1 = Debenture.objects.create(codigo='TESTE91', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=370)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=35, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=Decimal('34.3'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n debenture_2 = Debenture.objects.create(codigo='TESTE92', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=20)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=3, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=Decimal('2.78'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n # Debenture 1\n # Vence em 5 dias\n self.operacao_deb_1 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_1, preco_unitario=Decimal(1200),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=60)), taxa=0,\n tipo_operacao='C')\n # Debenture 2\n # Vence em 355 dias\n self.operacao_deb_2 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_2, preco_unitario=Decimal(1050),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=1)), taxa=0,\n tipo_operacao='C')\n \n # LC\n lc_1 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_1, vencimento=Decimal(365))\n \n lc_2 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_2, vencimento=Decimal(365))\n \n # LC 1\n # Vence em 5 dias\n self.operacao_lc_1 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lc_2 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lc_3 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lc_4 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LC 2\n # Vence em 4 dias\n self.operacao_lc_5 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lc_6 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # LCI/LCA\n lci_lca_1 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_1, vencimento=Decimal(365))\n \n lci_lca_2 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_2, vencimento=Decimal(365))\n \n # LCI 1\n # Vence em 5 dias\n self.operacao_lci_lca_1 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lci_lca_2 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lci_lca_3 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lci_lca_4 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LCI 2\n # Vence em 4 dias\n self.operacao_lci_lca_5 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lci_lca_6 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # Tesouro direto\n titulo_1 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n titulo_2 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=370)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n \n # Título 1\n # Vence em 5 dias\n self.operacao_titulo_1 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_1, consolidada=True)\n \n # Título 2\n # Vence em 370 dias\n self.operacao_titulo_2 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_2, consolidada=True)\n \n self.client.login(username='nizbel', password='nizbel')\n response = self.client.get(reverse('inicio:proximos_vencimentos'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n \n # Contexto\n self.assertIn('prox_vencimentos', response.context)\n self.assertEqual(len(response.context['prox_vencimentos']), 10)\n # Apenas os com vencimento mais recente deve estar na lista\n self.assertIn(self.operacao_cdb_rdb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_7, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cri_cra_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_deb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_titulo_1, response.context['prox_vencimentos'])", "def test_list_solicitudes(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n }\n for i in range(4):\n self.client.post(url, data, format=\"json\")\n\n response = self.client.get(\n \"/api/seguimientos/solicitudes/list/\", format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(type(response.data[\"results\"]), ReturnList)\n self.assertEqual(len(response.data[\"results\"]), 4)", "def test_list_integrantes(self):\n url = reverse(\"seguimiento-create\")\n data = {\n \"anio_lectivo\": self.anio_lectivo.pk,\n \"nombre\": \"Primer Seguimiento\",\n \"descripcion\": \"La gran descripción de este seguimiento\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n \"fecha_cierre\": \"12/12/2020\",\n \"integrantes\": [\n {\"usuario\": self.user.pk, \"rol\": self.rol_pedagogo.pk},\n {\"usuario\": self.user_docente.pk, \"rol\": self.rol_profesor.pk},\n ],\n \"materias\": [self.materia.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n\n response = self.client.get(\n f\"/api/seguimientos/{response.data['id']}/integrantes/list/\",\n data,\n format=\"json\",\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(type(response.data), ReturnList)", "def test_process_add_schedule_client(self):\n error = self.process_add_schedule_client()\n for err in error: assert err == 0", "def test_add(self):\n r = self.client.get(reverse('makeReports:add-announ'))\n self.assertEquals(r.status_code,200)\n r = self.client.post(reverse('makeReports:add-announ'),{\n 'text':'All reports must be submitted this week.',\n 'expiration_month':2,\n 'expiration_day': 17,\n 'expiration_year':2020\n })\n num = Announcement.objects.filter(text='All reports must be submitted this week.',expiration=date(2020,2,17)).count()\n self.assertEquals(num,1)", "def test__parse_add_negocios_realizados(self):\n print(\"_parse_add_negocios_realizados\")\n\n id_test_cases = [0, 1, 2, 3, 4, 5, 6]\n for id_test in id_test_cases:\n in_case = hio.import_object_as_literal(\n os.path.join(\n path_data,\n f\"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.in\",\n )\n )\n out_case = hio.import_object_as_literal(\n os.path.join(\n path_data,\n f\"_parse_add_negocios_realizados_table_negocios_realizados_{id_test}.out\",\n )\n )\n out_test = pystock.parse_notas_corretagem._parse_add_negocios_realizados(\n in_case, out_case[0][-1]\n )\n self.assertListEqual(out_case, out_test)", "def test_create_solicitud_seguimiento_alum_invalidos(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso3.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(SolicitudSeguimiento.objects.count(), 0)", "def test_create_seguimiento_con_integrantes(self):\n url = reverse(\"seguimiento-create\")\n data = {\n \"anio_lectivo\": self.anio_lectivo.pk,\n \"nombre\": \"Primer Seguimiento\",\n \"descripcion\": \"La gran descripción de este seguimiento\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n \"integrantes\": [\n {\"usuario\": self.user.pk, \"rol\": self.rol_pedagogo.pk},\n {\"usuario\": self.user_docente.pk, \"rol\": self.rol_profesor.pk},\n ],\n \"fecha_cierre\": \"12/12/2021\",\n \"materias\": [self.materia.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Seguimiento.objects.count(), 1)\n self.assertEqual(\n Seguimiento.objects.get().nombre, \"PRIMER SEGUIMIENTO\"\n )", "def test_create_solicitud_seguimiento(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(SolicitudSeguimiento.objects.count(), 1)\n self.assertEqual(\n response.data[\"estado\"][0][\"estado_solicitud\"], \"Pendiente\"\n )\n self.assertEqual(\n SolicitudSeguimiento.objects.get().motivo_solicitud,\n \"Motivo muy justificado\",\n )", "def test_add_proper_red_flag(self):\n response = base.post_incident(self.credentials)\n self.assertEqual(response.status_code, 201)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test aniadir proveedor Este test comprueba que los proveedores se agregan correctamente a la lista de proveedores de la sucursal.
def test_aniadir_proveedor(self): # Creamos mocks de Proveedor pro1 = mock(Proveedor) # Creamos proveedor suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Simulamos comportamiento when(pro1).get_ID().thenReturn(1) # Incluimos proveedor suc.aniadir_proveedor(pro1) lista = suc.get_listaproveedores() # Hacemos el test self.assertEqual(lista[0].get_ID(), 1)
[ "def test_aniadir_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_ID().thenReturn(1)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_aniadir_producto(self):\n # Creamos mocks de Producto\n prod1 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(prod1).get_ID().thenReturn(1)\n\n # Incluimos producto\n suc.aniadir_producto(prod1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_eliminar_producto(self):\n # Creamos mocks de Producto\n pro1 = mock(Producto)\n pro2 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos productos\n suc.aniadir_producto(pro1)\n suc.aniadir_producto(pro2)\n # Eliminamos un producto\n suc.eliminar_producto(pro1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Eliminamos un empleado\n suc.eliminar_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_investidor_logado_com_investimentos_vencidos(self):\n vencido = User.objects.create_user('vencido', 'vencido@teste.com', 'vencido')\n self.vencido = vencido.investidor \n \n # Cadastrar investimentos\n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo_isin='BRCRITESTE1', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=470)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=460)),\n data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), investidor=self.vencido)\n \n # CRI 1\n self.operacao_cri_cra_1 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_1, preco_unitario=Decimal(1200), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=160)), tipo_operacao='C',\n taxa=0)\n \n # Debentures\n debenture_1 = Debenture.objects.create(codigo='TESTE91', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=470)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=460)), \n data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), incentivada=True, \n padrao_snd=True)\n \n # Debenture 1\n self.operacao_deb_1 = OperacaoDebenture.objects.create(investidor=self.vencido, debenture=debenture_1, preco_unitario=Decimal(1200),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=160)), taxa=0,\n tipo_operacao='C')\n \n # Tesouro direto\n titulo_1 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() - datetime.timedelta(days=95)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n \n # Título 1\n self.operacao_titulo_1 = OperacaoTitulo.objects.create(investidor=self.vencido, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=150)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_1, consolidada=True)\n \n \n self.client.login(username='vencido', password='vencido')\n response = self.client.get(reverse('inicio:proximos_vencimentos'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n \n # Contexto\n self.assertIn('prox_vencimentos', response.context)\n self.assertEqual(response.context['prox_vencimentos'], [])", "def test_investidor_logado_com_investimentos(self):\n nizbel = User.objects.create_user('nizbel', 'nizbel@teste.com', 'nizbel')\n self.nizbel = nizbel.investidor \n \n # Cadastrar investimentos\n #CDB/RDB\n cdb_rdb_1 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 1', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, vencimento=Decimal(365))\n \n cdb_rdb_2 = CDB_RDB.objects.create(investidor=self.nizbel, nome='CDB teste 2', tipo='C', tipo_rendimento=2)\n HistoricoPorcentagemCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, porcentagem=Decimal(100))\n HistoricoCarenciaCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, carencia=Decimal(365))\n HistoricoVencimentoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, vencimento=Decimal(365))\n \n # CDB 1\n # Vence em 5 dias\n self.operacao_cdb_rdb_1 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_cdb_rdb_2 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_cdb_rdb_3 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_cdb_rdb_4 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # CDB 2\n # Vence em 4 dias\n self.operacao_cdb_rdb_5 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_cdb_rdb_6 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n # Vence em 6 dias\n self.operacao_cdb_rdb_7 = OperacaoCDB_RDB.objects.create(cdb_rdb=cdb_rdb_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=359)), tipo_operacao='C')\n \n # CRI/CRA\n cri_cra_1 = CRI_CRA.objects.create(nome='CRI teste 1', codigo_isin='BRCRITESTE1', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=370)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), investidor=self.nizbel)\n cri_cra_2 = CRI_CRA.objects.create(nome='CRI teste 3', codigo_isin='BRCRITESTE3', tipo=CRI_CRA.TIPO_CRI, tipo_indexacao=CRI_CRA.TIPO_INDEXACAO_DI,\n porcentagem=Decimal(98), juros_adicional=0, data_emissao=(datetime.date.today() - datetime.timedelta(days=20)),\n valor_emissao=Decimal(1000), data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)),\n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), investidor=self.nizbel)\n \n # CRI 1\n # Vence em 5 dias\n self.operacao_cri_cra_1 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_1, preco_unitario=Decimal(1200), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=60)), tipo_operacao='C',\n taxa=0)\n # CRI 2\n # Vence em 355 dias\n self.operacao_cri_cra_2 = OperacaoCRI_CRA.objects.create(cri_cra=cri_cra_2, preco_unitario=Decimal(1050), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=1)), tipo_operacao='C',\n taxa=0)\n \n # Debentures\n debenture_1 = Debenture.objects.create(codigo='TESTE91', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=370)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=360)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=35, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_1, valor_nominal=1000, juros=Decimal('34.3'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n debenture_2 = Debenture.objects.create(codigo='TESTE92', indice=Debenture.PREFIXADO, porcentagem=Decimal('6.5'), \n data_emissao=(datetime.date.today() - datetime.timedelta(days=20)), valor_emissao=Decimal(1000),\n data_inicio_rendimento=(datetime.date.today() - datetime.timedelta(days=10)), \n data_vencimento=(datetime.date.today() + datetime.timedelta(days=355)), incentivada=True, \n padrao_snd=True)\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=3, premio=0, data=datetime.date.today())\n HistoricoValorDebenture.objects.create(debenture=debenture_2, valor_nominal=1000, juros=Decimal('2.78'), premio=0, \n data=datetime.date.today() - datetime.timedelta(days=1))\n \n # Debenture 1\n # Vence em 5 dias\n self.operacao_deb_1 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_1, preco_unitario=Decimal(1200),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=60)), taxa=0,\n tipo_operacao='C')\n # Debenture 2\n # Vence em 355 dias\n self.operacao_deb_2 = OperacaoDebenture.objects.create(investidor=self.nizbel, debenture=debenture_2, preco_unitario=Decimal(1050),\n quantidade=1, data=(datetime.date.today() - datetime.timedelta(days=1)), taxa=0,\n tipo_operacao='C')\n \n # LC\n lc_1 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_1, vencimento=Decimal(365))\n \n lc_2 = LetraCambio.objects.create(investidor=self.nizbel, nome='LC teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCambio.objects.create(lc=lc_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCambio.objects.create(lc=lc_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCambio.objects.create(lc=lc_2, vencimento=Decimal(365))\n \n # LC 1\n # Vence em 5 dias\n self.operacao_lc_1 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lc_2 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lc_3 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lc_4 = OperacaoLetraCambio.objects.create(lc=lc_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LC 2\n # Vence em 4 dias\n self.operacao_lc_5 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lc_6 = OperacaoLetraCambio.objects.create(lc=lc_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # LCI/LCA\n lci_lca_1 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 1', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_1, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_1, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_1, vencimento=Decimal(365))\n \n lci_lca_2 = LetraCredito.objects.create(investidor=self.nizbel, nome='LCI teste 2', tipo_rendimento=2)\n HistoricoPorcentagemLetraCredito.objects.create(letra_credito=lci_lca_2, porcentagem=Decimal(100))\n HistoricoCarenciaLetraCredito.objects.create(letra_credito=lci_lca_2, carencia=Decimal(365))\n HistoricoVencimentoLetraCredito.objects.create(letra_credito=lci_lca_2, vencimento=Decimal(365))\n \n # LCI 1\n # Vence em 5 dias\n self.operacao_lci_lca_1 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=360)), tipo_operacao='C')\n # Vence em 10 dias\n self.operacao_lci_lca_2 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 10 dias\n# self.operacao_lci_lca_3 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n# data=(datetime.date.today() - datetime.timedelta(days=355)), tipo_operacao='C')\n # Vence em 365 dias\n self.operacao_lci_lca_4 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_1, investidor=self.nizbel, quantidade=Decimal(1000), \n data=datetime.date.today(), tipo_operacao='C')\n \n # LCI 2\n # Vence em 4 dias\n self.operacao_lci_lca_5 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=361)), tipo_operacao='C')\n # Vence em 9 dias\n self.operacao_lci_lca_6 = OperacaoLetraCredito.objects.create(letra_credito=lci_lca_2, investidor=self.nizbel, quantidade=Decimal(1000), \n data=(datetime.date.today() - datetime.timedelta(days=356)), tipo_operacao='C')\n \n # Tesouro direto\n titulo_1 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=5)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n titulo_2 = Titulo.objects.create(tipo='LTN', data_vencimento=(datetime.date.today() + datetime.timedelta(days=370)), \n data_inicio=(datetime.date.today() - datetime.timedelta(days=725)))\n \n # Título 1\n # Vence em 5 dias\n self.operacao_titulo_1 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_1, consolidada=True)\n \n # Título 2\n # Vence em 370 dias\n self.operacao_titulo_2 = OperacaoTitulo.objects.create(investidor=self.nizbel, preco_unitario=Decimal(700), quantidade=1, \n data=(datetime.date.today() - datetime.timedelta(days=50)), taxa_bvmf=0,\n taxa_custodia=0, tipo_operacao='C', titulo=titulo_2, consolidada=True)\n \n self.client.login(username='nizbel', password='nizbel')\n response = self.client.get(reverse('inicio:proximos_vencimentos'), {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 200)\n \n # Contexto\n self.assertIn('prox_vencimentos', response.context)\n self.assertEqual(len(response.context['prox_vencimentos']), 10)\n # Apenas os com vencimento mais recente deve estar na lista\n self.assertIn(self.operacao_cdb_rdb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cdb_rdb_7, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_cri_cra_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_deb_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lc_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_1, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_lci_lca_5, response.context['prox_vencimentos'])\n self.assertIn(self.operacao_titulo_1, response.context['prox_vencimentos'])", "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_aniadir_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(inc1).get_id().thenReturn(1)\n\n # Incluimos incidencia\n suc.aniadir_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(lista[0].get_id(), 1)", "def test_change_paos_tallentaja(self):\n # tallentaja_vakajarjestaja_1\n tester2_client = SetUpTestClient('tester2').client()\n # paakayttaja_vakajarjestaja_1\n tester_4_client = SetUpTestClient('tester4').client()\n # tallentaja_vakajarjestaja_2\n tester5_client = SetUpTestClient('tester5').client()\n # tallentaja_toimipaikka_5\n tester8_client = SetUpTestClient('tester8').client()\n\n resp = tester2_client.get('/api/v1/varhaiskasvatussuhteet/4/')\n vakasuhde_4 = resp.content\n\n resp = tester2_client.get('/api/v1/varhaiskasvatuspaatokset/4/')\n vakapaatos_4 = resp.content\n\n self._test_paos_get_put('/api/v1/varhaiskasvatussuhteet/4/', vakasuhde_4, edit_client_list=(tester2_client,),\n no_edit_client_list=(tester5_client, tester8_client,))\n self._test_paos_get_put('/api/v1/varhaiskasvatuspaatokset/4/', vakapaatos_4, edit_client_list=(tester2_client,),\n no_edit_client_list=(tester5_client, tester8_client,))\n\n # Change paos-tallentaja\n jarjestaja_organisaatio = Organisaatio.objects.get(organisaatio_oid='1.2.246.562.10.34683023489')\n tuottaja_organisaatio = Organisaatio.objects.get(organisaatio_oid='1.2.246.562.10.93957375488')\n paos_oikeus = PaosOikeus.objects.get(jarjestaja_kunta_organisaatio=jarjestaja_organisaatio,\n tuottaja_organisaatio=tuottaja_organisaatio)\n paos_oikeus_patch = {\n 'tallentaja_organisaatio_oid': tuottaja_organisaatio.organisaatio_oid\n }\n assert_status_code(tester_4_client.patch(f'/api/v1/paos-oikeudet/{paos_oikeus.id}/', paos_oikeus_patch),\n status.HTTP_200_OK)\n\n self._test_paos_get_put('/api/v1/varhaiskasvatussuhteet/4/', vakasuhde_4,\n edit_client_list=(tester5_client, tester8_client,),\n no_edit_client_list=(tester2_client,))\n self._test_paos_get_put('/api/v1/varhaiskasvatuspaatokset/4/', vakapaatos_4,\n edit_client_list=(tester5_client, tester8_client,),\n no_edit_client_list=(tester2_client,))\n\n # Disable the paos-link between the organizations\n paos_oikeus.delete()\n\n self._test_paos_get_put('/api/v1/varhaiskasvatussuhteet/4/', vakasuhde_4,\n no_edit_client_list=(tester2_client, tester5_client, tester8_client,))\n self._test_paos_get_put('/api/v1/varhaiskasvatuspaatokset/4/', vakapaatos_4,\n no_edit_client_list=(tester2_client, tester5_client, tester8_client,))", "def test_prospects_add_prospect(self):\n pass", "def salasSinConectar(self):\n habitaciones = []\n for i in self.puertas:\n habitaciones.append(i.habitacion1.numero)\n habitaciones.append(i.habitacion2.numero)\n\n # Mirar todas la habitaciones\n for i in range(2, len(self.listaHabitaciones)):\n # Si no tienen las habitaciones en la lista implica que no tienen puerta y hay que generar la puerta\n if i not in habitaciones:\n habitacion1 = self.listaHabitaciones[i]\n posicion_puerta = random.uniform(0, 1)\n habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)]\n\n while (not self.colisiona_puerta(habitacion1, posicion_puerta) and\n not self.colisiona_puerta(habitacion2, posicion_puerta)):\n posicion_puerta = random.uniform(0, 1)\n habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)]\n\n self.puertas.append(Puerta(habitacion1, habitacion2, self.posicionPuerta(posicion_puerta)))", "def test_create_seguimiento(self):\n url = reverse(\"seguimiento-create\")\n data = {\n \"anio_lectivo\": self.anio_lectivo.pk,\n \"nombre\": \"Primer Seguimiento\",\n \"descripcion\": \"La gran descripción de este seguimiento\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n \"integrantes\": [\n {\"usuario\": self.user.pk, \"rol\": self.rol_pedagogo.pk},\n {\"usuario\": self.user_docente.pk, \"rol\": self.rol_profesor.pk},\n ],\n \"fecha_cierre\": \"12/12/2021\",\n \"materias\": [self.materia.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Seguimiento.objects.count(), 1)\n self.assertEqual(\n Seguimiento.objects.get().nombre, \"PRIMER SEGUIMIENTO\"\n )", "def test_success_plan_duplication(self):\n\n self.plan_destrezas.objetivos.set(\n [self.objetivo_1, self.objetivo_2])\n self.plan_destrezas.objetivos_generales.set(\n [self.general_1, self.general_2])\n self.plan_destrezas.destrezas.set(\n [self.destreza_1, self.destreza_2])\n\n self.client.login(username='tester@tester.com',\n password='P455w0rd_testing',)\n url = reverse('plan_destrezas_duplicate',\n kwargs={'pk': self.plan_destrezas.pk})\n response = self.client.post(url, {}, follow=True)\n\n assert response.status_code == 200, 'Should return a success code'\n\n # Test success message\n messages = list(response.context.get('messages'))\n assert len(messages) == 1, 'Should return one message'\n assert messages[0].message == 'Plan de Destrezas duplicado '\\\n 'exitosamente.', 'Should return a success message'\n assert messages[0].tags == 'alert-success', \\\n 'Should return a success message'\n self.assertRedirects(response, reverse('plan_destrezas_list'))\n\n # Test plan de destreza\n plan_destrezas_new = PlanDestrezas.objects.last()\n\n assert self.plan_destrezas.pk != plan_destrezas_new.pk, \\\n 'Should be another instance'\n assert plan_destrezas_new.name == '{} (copia)'.format(\n self.plan_destrezas.name), 'Should have a new name'\n assert plan_destrezas_new.curso == self.plan_destrezas.curso, \\\n 'Should have the same property values'\n # Debe tener igual todos los campos many to many al original\n assert plan_destrezas_new.objetivos.first() == self.objetivo_1\n assert plan_destrezas_new.objetivos.last() == self.objetivo_2\n assert plan_destrezas_new.objetivos_generales.first() == self.general_1\n assert plan_destrezas_new.objetivos_generales.last() == self.general_2\n assert plan_destrezas_new.destrezas.first() == self.destreza_1\n assert plan_destrezas_new.destrezas.last() == self.destreza_2\n\n assert self.plan_destrezas.updated_at != \\\n plan_destrezas_new.updated_at, \\\n 'The updated_at field should not be copied'\n\n # Test second duplication\n\n request = RequestFactory().post('/', {})\n request.user = self.user\n request = add_middleware_to_request(request)\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 2)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso\n\n # Test third duplication\n\n PlanDestrezasDuplicateView.as_view()(\n request,\n pk=self.plan_destrezas.pk\n )\n\n plan_destrezas_new = PlanDestrezas.objects.last()\n assert plan_destrezas_new.name == '{} (copia 3)'.format(\n self.plan_destrezas.name)\n assert plan_destrezas_new.curso == self.plan_destrezas.curso", "def test_create_seguimiento_con_integrantes(self):\n url = reverse(\"seguimiento-create\")\n data = {\n \"anio_lectivo\": self.anio_lectivo.pk,\n \"nombre\": \"Primer Seguimiento\",\n \"descripcion\": \"La gran descripción de este seguimiento\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n \"integrantes\": [\n {\"usuario\": self.user.pk, \"rol\": self.rol_pedagogo.pk},\n {\"usuario\": self.user_docente.pk, \"rol\": self.rol_profesor.pk},\n ],\n \"fecha_cierre\": \"12/12/2021\",\n \"materias\": [self.materia.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Seguimiento.objects.count(), 1)\n self.assertEqual(\n Seguimiento.objects.get().nombre, \"PRIMER SEGUIMIENTO\"\n )", "def test_preco_produto() -> None:\n lista_de_preco = [49.90, 170.00, 32.50]\n preco_mais_barato = 32.50\n assert preco_produto_mais_barato(lista_de_preco) == preco_mais_barato", "def test_add_donor_already_in_list():\n mail_room2.add_donor('Steve')\n assert len(mail_room2.list_of_donors) == 2\n mail_room2.add_donor(\"Steve\")\n assert len(mail_room2.list_of_donors) == 2", "def test_create_solicitud_seguimiento(self):\n url = reverse(\"solicitudes-create\")\n data = {\n \"motivo_solicitud\": \"Motivo muy justificado\",\n \"alumnos\": [self.alumno_curso1.pk, self.alumno_curso2.pk],\n }\n\n response = self.client.post(url, data, format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(SolicitudSeguimiento.objects.count(), 1)\n self.assertEqual(\n response.data[\"estado\"][0][\"estado_solicitud\"], \"Pendiente\"\n )\n self.assertEqual(\n SolicitudSeguimiento.objects.get().motivo_solicitud,\n \"Motivo muy justificado\",\n )", "def test_ajouterCreneau_mauvais_horaire(self):\n\t\td = [\"pas int\", 14, 18]\n\t\tf = [25, \"pas int\", 14]\n\t\ti = 0\n\t\tcible = Jour(15, LUNDI)\n\t\twhile i < len(f):\n\t\t\twith self.assertRaises(ValueError):\n\t\t\t\tcible.ajouterCreneau(d[i], f[i])\n\t\t\t#with\n\t\t\ti += 1\n\t\t#while" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test eliminar empleado Este test comprueba que los empleados se eliminan correctamente de la lista de empleados de la sucursal.
def test_eliminar_empleado(self): # Creamos mocks de Empleado emp1 = mock(Empleado) emp2 = mock(Empleado) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Incluimos empleados suc.aniadir_empleado(emp1) suc.aniadir_empleado(emp2) # Eliminamos un empleado suc.eliminar_empleado(emp1) lista = suc.get_listaempleados() # Hacemos el test self.assertEqual(len(lista), 1)
[ "def test_eliminar_actividad(self):\n c = Client()\n c.login(username='admin', password='admin1')\n #creamos un US para luego eliminar\n self.test_crear_actividad()\n #eliminacion de un us existente\n resp = c.get('/actividades/actividad_eliminar/1/')\n self.assertTrue(resp.status_code, 200)\n print ('\\n Se elimina logicamente el us creado del sistema')\n #eliminacion de un us inexistente, (ya se borro)\n #resp = c.get('/userstories/eliminaruserstory/100/')\n #self.assertTrue(resp.status_code, 404)\n #print ('\\n Error al querer eliminar un us que no existe en el sistema')", "def eliminar(self):\r\n #muestra los reistros actuales\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n print(Fore.GREEN+\" Lista de registros actuales\"+Fore.RESET)\r\n print(Fore.LIGHTMAGENTA_EX+str(self.obj_conexion.mostrar_registros())+Fore.RESET)\r\n #pedirle al usuario que ingrese el nombre del equipo a eliminar\r\n nombre=input(Fore.YELLOW+\"Ingresa el nombre del equipo a eliminar: \"+Fore.RESET)\r\n #llama a la funcion eliminar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.eliminar_registro(nombre)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro eliminado correctamente\\n\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no eliminado, no se encontro coeincidencias con lo ingresado\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass", "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_remove_emp_no(self):\r\n remove_emp_no(0)\r\n # does emp retain emp_number?\r\n self.assertEqual(self.emp_2.number, 1)\r\n # does Employee.all_emps reflect removal of said emp\r\n self.assertEqual(Employee.all_emps, [self.emp_2])", "def test_eliminacion(self):\n S2 = Sprint.objects.get(nombre= 'Sprint 2')\n S2.delete()\n\n print('Eliminacion de Sprints ejecutada correctamente.')", "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_delete_escalation(self):\n pass", "def test_eliminar_producto(self):\n # Creamos mocks de Producto\n pro1 = mock(Producto)\n pro2 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos productos\n suc.aniadir_producto(pro1)\n suc.aniadir_producto(pro2)\n # Eliminamos un producto\n suc.eliminar_producto(pro1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def pedir_etiqueta_a_eliminar():\r\n #Pedir el nombre de la etiqueta\r\n diccionario = guardar_en_diccionarios.guardar_en_diccionarios()\r\n etiqueta = input(\"Ingrese el nombre de la nota que desea eliminar: \")\r\n if etiqueta == \"\":\r\n etiqueta = \"Sin etiqueta\"\r\n etiqueta = validar_etiqueta(etiqueta)\r\n #Mostrarle al usuario las opciones a borrar\r\n for i,elem in enumerate(diccionario[etiqueta]):\r\n print(\"{}-{}\".format(i+1,elem))\r\n cantidad = len(diccionario[etiqueta])\r\n #Preguntar qué opciones quiere borrar e imprimirla\r\n texto_a_borrar = input(\"Ingrese el número de la opción que desea borrar: \")\r\n texto_a_borrar = validar_numero(texto_a_borrar,cantidad)\r\n print(\"[{}]\".format(etiqueta))\r\n print(\"-{}\".format(diccionario[etiqueta][int(texto_a_borrar)-1]))\r\n eliminar = input(\"Eliminar [s/n]: \")\r\n #Llamar función para que elimine el valor\r\n eliminar_etiqueta(etiqueta,texto_a_borrar,eliminar)", "def test_delete_checker_result(self):\n pass", "def delete(self):\r\n # Delete from the list first.\r\n self.remove_employee(self)\r\n #Is this the problem?\r\n del self", "def elimina(self):\n\tindex =self.ui.grilla.currentIndex()\n\tdata = self.ui.grilla.model()\n\tpeli = data.item(index.row(),0).peli\n\tiD = str(peli['id'])\n\tresp = QtGui.QMessageBox.question(self, \"Pregunta\",\"Desea realmente eliminar la pelicula seleccionada?\",QtGui.QMessageBox.Ok,QtGui.QMessageBox.No)\n if resp == QtGui.QMessageBox.Ok:\n Modelo_pelicula.borrar(iD);\n self.cargar_peliculas();", "def test_deleting_completed_todo(todoApp):\n # Get the completed todos\n completed_todos = todoApp.find_completed_todos()\n \n # Delete an completed todo from the list\n text = completed_todos[0].text\n todoApp.delete_todo(text)\n\n # Check the active todo count is not changed\n assert todoApp.count_active_todos() == '3 items left'\n \n # ASSERTION\n try:\n todo = todoApp.find_todo(text)\n except NoSuchElementException:\n return # Expected result\n \n assert False # Something wrong", "def eliminadorDeListas(nombres, nombresEliminar):\n for i in nombres:\n if i in nombresEliminar:\n nombres.remove(i)\n \n print(nombres)\n print(nombresEliminar)", "def test_aniadir_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_ID().thenReturn(1)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_delete_success(self):\n self.client.login(username='tester@tester.com',\n password='P455w0rd_testing',)\n url = reverse('plan_destrezas_delete', kwargs={'pk':\n self.plan_destrezas.pk})\n response = self.client.post(url, {}, follow=True)\n\n assert response.status_code == 200, 'Should return a success code'\n messages = list(response.context.get('messages'))\n assert len(messages) == 1, 'Should return one message'\n assert messages[0].message == 'Plan de Destrezas eliminado '\\\n 'exitosamente.', 'Should return a success message'\n assert messages[0].tags == 'alert-success', \\\n 'Should return a success message'\n self.assertRedirects(response, reverse('plan_destrezas_list'))\n\n # The instances should no longer exist in database\n with pytest.raises(PlanDestrezas.DoesNotExist):\n PlanDestrezas.objects.get(pk=self.plan_destrezas.pk)", "def test_deleting_active_todo(todoApp):\n # Get the active todos\n active_todos = todoApp.find_active_todos()\n \n # Update an active todo from the list\n text = active_todos[0].text\n todoApp.delete_todo(text)\n\n # Check the active todo count\n assert todoApp.count_active_todos() == '2 items left'\n \n # ASSERTION\n try:\n todo = todoApp.find_todo(text)\n except NoSuchElementException:\n return # Expected result\n \n assert False # Something wrong", "def eliminar_pieza(self, cantidad_a_eliminar):\n pass", "def test_employee_was_deleted(self):\n delete_employee(2, productionDB=False)\n self.cur.execute('SELECT COUNT(*) FROM employees WHERE manager_id = 1')\n data = self.cur.fetchone()\n self.new_number_of_employees = data[0]\n self.assertEqual(self.new_number_of_employees + 1, self.old_number_of_employees, \"\"\"The number of accounts did \n not change\"\"\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test eliminar producto Este test comprueba que los productos se eliminan correctamente de la lista de productos de la sucursal.
def test_eliminar_producto(self): # Creamos mocks de Producto pro1 = mock(Producto) pro2 = mock(Producto) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Incluimos productos suc.aniadir_producto(pro1) suc.aniadir_producto(pro2) # Eliminamos un producto suc.eliminar_producto(pro1) lista = suc.get_listaproductos() # Hacemos el test self.assertEqual(len(lista), 1)
[ "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Eliminamos un empleado\n suc.eliminar_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_delete_product(self, open_product_page):\n product_page = open_product_page\n count = product_page.get_all_quantity()\n product_page.delete_product(\"\")\n product_page.get_photo(\"del_name.png\")\n assert (count - 1) == product_page.get_all_quantity()", "def test_eliminacion(self):\n S2 = Sprint.objects.get(nombre= 'Sprint 2')\n S2.delete()\n\n print('Eliminacion de Sprints ejecutada correctamente.')", "def eliminar(self):\r\n #muestra los reistros actuales\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n print(Fore.GREEN+\" Lista de registros actuales\"+Fore.RESET)\r\n print(Fore.LIGHTMAGENTA_EX+str(self.obj_conexion.mostrar_registros())+Fore.RESET)\r\n #pedirle al usuario que ingrese el nombre del equipo a eliminar\r\n nombre=input(Fore.YELLOW+\"Ingresa el nombre del equipo a eliminar: \"+Fore.RESET)\r\n #llama a la funcion eliminar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.eliminar_registro(nombre)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro eliminado correctamente\\n\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no eliminado, no se encontro coeincidencias con lo ingresado\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass", "def test_eliminar_actividad(self):\n c = Client()\n c.login(username='admin', password='admin1')\n #creamos un US para luego eliminar\n self.test_crear_actividad()\n #eliminacion de un us existente\n resp = c.get('/actividades/actividad_eliminar/1/')\n self.assertTrue(resp.status_code, 200)\n print ('\\n Se elimina logicamente el us creado del sistema')\n #eliminacion de un us inexistente, (ya se borro)\n #resp = c.get('/userstories/eliminaruserstory/100/')\n #self.assertTrue(resp.status_code, 404)\n #print ('\\n Error al querer eliminar un us que no existe en el sistema')", "def test_delete_muveto_pmt_item(self):\n pass", "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def elimina(self):\n\tindex =self.ui.grilla.currentIndex()\n\tdata = self.ui.grilla.model()\n\tpeli = data.item(index.row(),0).peli\n\tiD = str(peli['id'])\n\tresp = QtGui.QMessageBox.question(self, \"Pregunta\",\"Desea realmente eliminar la pelicula seleccionada?\",QtGui.QMessageBox.Ok,QtGui.QMessageBox.No)\n if resp == QtGui.QMessageBox.Ok:\n Modelo_pelicula.borrar(iD);\n self.cargar_peliculas();", "def test_uninstall(self):\n self.installer.uninstallProducts(['rapido.plone'])\n self.assertFalse(self.installer.isProductInstalled('rapido.plone'))", "def delete_uncomplete_products(self, products):\n\n complete_products = []\n with FillingSquaresBar(\n \"Removing corrupted products...\",\n max=len(products), suffix=\"%(percent)d%%\") as bar:\n\n for list in products:\n for p in list:\n if (\n p.get(\"product_name_fr\")\n and p.get(\"brands\")\n and p.get(\"nutriscore_grade\")\n and p.get(\"url\")\n and p.get('image_front_url')\n and p.get(\"nutriscore_grade\") is not None\n ):\n complete_products.append(p)\n\n bar.next()\n bar.finish()\n self.get_categories(complete_products)", "def test_remove_invalid_products(self):\n with NamedTemporaryFile() as invalid_products_file, NamedTemporaryFile() as processed_file:\n remove_invalid_products(\n self.valid_parsed_xml, processed_file.name, invalid_products_file=invalid_products_file.name\n )\n\n # Make assertions of the processed xml\n with open(processed_file.name) as f:\n processed_xml = f.readlines()\n with open(self.invalid_products_removed_xml) as f:\n assertion_xml = f.readlines()\n self.assertEqual(len(processed_xml), len(assertion_xml))\n self.assertEqual(processed_xml, assertion_xml)\n\n # Make assertions of the invalid products\n with open(invalid_products_file.name) as f:\n invalid_products_xml = f.readlines()\n with open(self.invalid_products_xml) as f:\n assertion_xml = f.readlines()\n self.assertEqual(len(invalid_products_xml), len(assertion_xml))\n self.assertEqual(invalid_products_xml, assertion_xml)", "def test_delete_install_item(self):\n pass", "def test_aniadir_producto(self):\n # Creamos mocks de Producto\n prod1 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(prod1).get_ID().thenReturn(1)\n\n # Incluimos producto\n suc.aniadir_producto(prod1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(lista[0].get_ID(), 1)", "def test_delete_product(self):\n instance = ProductFactory.create()\n url = reverse('catalog:product-detail', kwargs={'pk': instance.id})\n\n response = self.client.delete(url, format='json', **self.headers)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def eliminar_pieza(self, cantidad_a_eliminar):\n pass", "def test_delete_muveto_current_change1t_item(self):\n pass", "def test_delete_order(self):\n\n Pizza.objects.create(\n flavour='Dessert',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00}\n )\n Pizza.objects.create(flavour='Vegan',\n prices={\"S\": 10.00, \"M\": 15.00, \"L\": 20.00})\n\n pizzas = Pizza.objects.all().order_by('-flavour')\n orders = create_orders(pizzas, self.user)\n\n url = detail_url(orders[0].uuid)\n res = self.client.delete(url)\n\n orders_state = Order.objects.all()\n\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(len(orders_state), 1)\n self.assertNotEqual(orders_state[0].uuid, orders[0].uuid)", "def remove_single(self, product, options=[]):\n item_index = self.__index__(product, options)\n if item_index != -1:\n if self._items_list[item_index].quantity <= 1:\n del self._items_list[item_index]\n else:\n self._items_list[item_index].quantity -= 1\n self.update_session()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test eliminar incidencia Este test comprueba que las incidencias se eliminan correctamente de la lista de incidencias de la sucursal.
def test_eliminar_incidencia(self): # Creamos mocks de Incidencia inc1 = mock(Incidencia) inc2 = mock(Incidencia) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Incluimos incidencias suc.aniadir_incidencia(inc1) suc.aniadir_incidencia(inc2) # Eliminamos una incidencia suc.eliminar_incidencia(inc1) lista = suc.get_listaincidencias() # Hacemos el test self.assertEqual(len(lista), 1)
[ "def test_eliminar_actividad(self):\n c = Client()\n c.login(username='admin', password='admin1')\n #creamos un US para luego eliminar\n self.test_crear_actividad()\n #eliminacion de un us existente\n resp = c.get('/actividades/actividad_eliminar/1/')\n self.assertTrue(resp.status_code, 200)\n print ('\\n Se elimina logicamente el us creado del sistema')\n #eliminacion de un us inexistente, (ya se borro)\n #resp = c.get('/userstories/eliminaruserstory/100/')\n #self.assertTrue(resp.status_code, 404)\n #print ('\\n Error al querer eliminar un us que no existe en el sistema')", "def test_eliminar_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Eliminamos un empleado\n suc.eliminar_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_delete_escalation(self):\n pass", "def test_eliminacion(self):\n S2 = Sprint.objects.get(nombre= 'Sprint 2')\n S2.delete()\n\n print('Eliminacion de Sprints ejecutada correctamente.')", "def eliminar(self):\r\n #muestra los reistros actuales\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n print(Fore.GREEN+\" Lista de registros actuales\"+Fore.RESET)\r\n print(Fore.LIGHTMAGENTA_EX+str(self.obj_conexion.mostrar_registros())+Fore.RESET)\r\n #pedirle al usuario que ingrese el nombre del equipo a eliminar\r\n nombre=input(Fore.YELLOW+\"Ingresa el nombre del equipo a eliminar: \"+Fore.RESET)\r\n #llama a la funcion eliminar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.eliminar_registro(nombre)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro eliminado correctamente\\n\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no eliminado, no se encontro coeincidencias con lo ingresado\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass", "def test_eliminar_producto(self):\n # Creamos mocks de Producto\n pro1 = mock(Producto)\n pro2 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos productos\n suc.aniadir_producto(pro1)\n suc.aniadir_producto(pro2)\n # Eliminamos un producto\n suc.eliminar_producto(pro1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_proveedor(self):\n # Creamos mocks de Proveedor\n pro1 = mock(Proveedor)\n pro2 = mock(Proveedor)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos proveedores\n suc.aniadir_proveedor(pro1)\n suc.aniadir_proveedor(pro2)\n # Eliminamos un proveedor\n suc.eliminar_proveedor(pro1)\n lista = suc.get_listaproveedores()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_delete_checker_result(self):\n pass", "def test_removeRes_back():\n Top1 = DXXXTopology('homoAA.itp','homoA.gro')\n \n for i in range(2,10):\n resID = 2\n Top1.removeRes(resID)\n assert len(Top1.atomlist) == 1\n assert len(Top1.bondlist) == 0\n assert len(Top1.anglist) == 0\n assert len(Top1.conlist) == 0\n assert len(Top1.dihlist) == 0\n assert Top1.atomlist[0].number == 1\n assert Top1.atomlist[0].resNo == 1\n assert Top1.atomlist[0].resname == 'ALA'", "def testDeleteRows(self):\n self.assertEqual(self.glp.getNumRows(), 904)\n rowCoef = self.glp.getRowCoef(800)\n self.glp.deleteRows([800])\n self.assertEqual(self.glp.getNumRows(), 903)\n # now we check if this can be undone\n self.glp.undo()\n self.assertEqual(self.glp.getNumRows(), 904)\n self.assertEqual(self.glp.getRowCoef(904), rowCoef)", "def eliminar_pieza(self, cantidad_a_eliminar):\n pass", "def test_removeRes_front():\n Top1 = DXXXTopology('homoAA.itp','homoA.gro')\n \n for i in range(1,9):\n resID = 1\n Top1.removeRes(resID)\n assert len(Top1.atomlist) == 1\n assert len(Top1.bondlist) == 0\n assert len(Top1.anglist) == 0\n assert len(Top1.conlist) == 0\n assert len(Top1.dihlist) == 0\n assert Top1.atomlist[0].number == 1\n assert Top1.atomlist[0].resNo == 1\n assert Top1.atomlist[0].resname == 'ALA'", "def test_delete_incident(self):\n # add an incident\n self.add_incident(\"Corruption Case 2\")\n\n response = self.app.delete(\n '/api/v1/incidents/1') # record created by test_create_incident\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n # print(\"JSON_BUMPZ::\", result[\"data\"])\n self.assertEqual(result[\"data\"][0][\"message\"],\n \"incident record has been deleted\")", "def test_delete_runs(self):\n pass", "def test_delete_incident(self):\n response = base.delete_incident(self.credentials, '1')\n self.assertEqual(response.status_code, 200)", "def eliminar(ids):\n\n [db_maestro(db_maestro[id.split('.')[1]].id == id.split('.')[0]).delete() for id in ids]", "def test_delete_specific_intervention(self):\n self.app.post(\"/api/v2/interventions\", headers=self.headers,\n data=json.dumps(self.redflag_data))\n response = self.app.delete(\n \"/api/v2/interventions/1\", headers=self.headers)\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['data']['message'],\n 'Intervention record has been deleted')", "def test_delete_muveto_pmt_item(self):\n pass", "def eliminadorDeListas(nombres, nombresEliminar):\n for i in nombres:\n if i in nombresEliminar:\n nombres.remove(i)\n \n print(nombres)\n print(nombresEliminar)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test eliminar proveedor Este test comprueba que los proveedores se eliminan correctamente de la lista de proveedores de la sucursal.
def test_eliminar_proveedor(self): # Creamos mocks de Proveedor pro1 = mock(Proveedor) pro2 = mock(Proveedor) # Creamos sucursal suc = Sucursal("Sevilla", "Pino Montano", "Sucursal1") # Incluimos proveedores suc.aniadir_proveedor(pro1) suc.aniadir_proveedor(pro2) # Eliminamos un proveedor suc.eliminar_proveedor(pro1) lista = suc.get_listaproveedores() # Hacemos el test self.assertEqual(len(lista), 1)
[ "def test_eliminar_empleado(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Eliminamos un empleado\n suc.eliminar_empleado(emp1)\n lista = suc.get_listaempleados()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_producto(self):\n # Creamos mocks de Producto\n pro1 = mock(Producto)\n pro2 = mock(Producto)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos productos\n suc.aniadir_producto(pro1)\n suc.aniadir_producto(pro2)\n # Eliminamos un producto\n suc.eliminar_producto(pro1)\n lista = suc.get_listaproductos()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminar_actividad(self):\n c = Client()\n c.login(username='admin', password='admin1')\n #creamos un US para luego eliminar\n self.test_crear_actividad()\n #eliminacion de un us existente\n resp = c.get('/actividades/actividad_eliminar/1/')\n self.assertTrue(resp.status_code, 200)\n print ('\\n Se elimina logicamente el us creado del sistema')\n #eliminacion de un us inexistente, (ya se borro)\n #resp = c.get('/userstories/eliminaruserstory/100/')\n #self.assertTrue(resp.status_code, 404)\n #print ('\\n Error al querer eliminar un us que no existe en el sistema')", "def test_eliminar_incidencia(self):\n # Creamos mocks de Incidencia\n inc1 = mock(Incidencia)\n inc2 = mock(Incidencia)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Incluimos incidencias\n suc.aniadir_incidencia(inc1)\n suc.aniadir_incidencia(inc2)\n # Eliminamos una incidencia\n suc.eliminar_incidencia(inc1)\n lista = suc.get_listaincidencias()\n # Hacemos el test\n self.assertEqual(len(lista), 1)", "def test_eliminacion(self):\n S2 = Sprint.objects.get(nombre= 'Sprint 2')\n S2.delete()\n\n print('Eliminacion de Sprints ejecutada correctamente.')", "def test_delete_escalation(self):\n pass", "def test_delete_checker_result(self):\n pass", "def test_delete_muveto_pmt_item(self):\n pass", "def test_delete_professor(self):\n url = reverse('institute-professors-detail', args=['IC', 'Pedro Rezende'])\n # non admin and unauthenticated user\n BaseAPITest.check_user_permissions(self, None, 'delete',\n status.HTTP_403_FORBIDDEN, url)\n\n # admin user\n BaseAPITest.check_user_permissions(self, 'admin', 'delete',\n status.HTTP_204_NO_CONTENT, url)\n\n # non admin and authenticated user\n BaseAPITest.check_user_permissions(self, 'user', 'delete',\n status.HTTP_403_FORBIDDEN, url)", "def eliminar(self):\r\n #muestra los reistros actuales\r\n resgistros_estatus=self.obj_conexion.mostrar_registros()\r\n if resgistros_estatus:\r\n print(Fore.GREEN+\" Lista de registros actuales\"+Fore.RESET)\r\n print(Fore.LIGHTMAGENTA_EX+str(self.obj_conexion.mostrar_registros())+Fore.RESET)\r\n #pedirle al usuario que ingrese el nombre del equipo a eliminar\r\n nombre=input(Fore.YELLOW+\"Ingresa el nombre del equipo a eliminar: \"+Fore.RESET)\r\n #llama a la funcion eliminar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.eliminar_registro(nombre)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro eliminado correctamente\\n\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no eliminado, no se encontro coeincidencias con lo ingresado\"+Fore.RESET,Back.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" No hay registros en el base de datos, Debe agregar un registro primero\"+Fore.RESET,Back.RESET)\r\n opcion_valida=input(Fore.YELLOW+\" Desea crear un nuevo registro? (S/N): \"+Fore.RESET)\r\n if opcion_valida.upper() in [\"S\"]:\r\n self.registra()\r\n else:\r\n pass", "def test_delete_success(self):\n self.client.login(username='tester@tester.com',\n password='P455w0rd_testing',)\n url = reverse('plan_destrezas_delete', kwargs={'pk':\n self.plan_destrezas.pk})\n response = self.client.post(url, {}, follow=True)\n\n assert response.status_code == 200, 'Should return a success code'\n messages = list(response.context.get('messages'))\n assert len(messages) == 1, 'Should return one message'\n assert messages[0].message == 'Plan de Destrezas eliminado '\\\n 'exitosamente.', 'Should return a success message'\n assert messages[0].tags == 'alert-success', \\\n 'Should return a success message'\n self.assertRedirects(response, reverse('plan_destrezas_list'))\n\n # The instances should no longer exist in database\n with pytest.raises(PlanDestrezas.DoesNotExist):\n PlanDestrezas.objects.get(pk=self.plan_destrezas.pk)", "def test_delete_person(self):\n pass", "def test_vrfs_delete(self):\n pass", "def removeFromFavorites11(favoriteContactdelete,favorites,diccionarioMaestro):\n value = diccionarioMaestro.get('favorites')\n if value != 'None':\n listSplit = favoriteContactdelete.split(',')\n verifiedSplit = []\n invalidSplit = []\n iteration1 = 0\n for items in listSplit:\n if listSplit[iteration1] in diccionarioMaestro:\n verifiedSplit.append(listSplit[iteration1])\n else:\n invalidSplit.append(listSplit[iteration1])\n \n iteration1 = iteration1 + 1\n\n \n \n if len(invalidSplit) != 0:\n print('Contacto(s) → {} ← parece(n) no existir en la lista de contactos'.format(', '.join(invalidSplit)))\n print(\"Porfavor asegúrese de haber escrito el contacto correctamente y que el contacto si exista en el directorio.\")\n\n elif len(invalidSplit) == 0:\n iteration2 = 0\n \n for n in verifiedSplit:\n try:\n del diccionarioMaestro['favorites'][verifiedSplit[iteration2]]\n del favorites[verifiedSplit[iteration2]]\n iteration2 = iteration2 + 1\n except:\n print('Tu contacto no pudo ser eliminado por que no se encuentra en \"favoritos\".')\n print(', '.join(verifiedSplit))\n else:\n print('No tienes lista de favoritos.')", "def test_auth_delete_participant(self):\n pass", "def elimina(self):\n\tindex =self.ui.grilla.currentIndex()\n\tdata = self.ui.grilla.model()\n\tpeli = data.item(index.row(),0).peli\n\tiD = str(peli['id'])\n\tresp = QtGui.QMessageBox.question(self, \"Pregunta\",\"Desea realmente eliminar la pelicula seleccionada?\",QtGui.QMessageBox.Ok,QtGui.QMessageBox.No)\n if resp == QtGui.QMessageBox.Ok:\n Modelo_pelicula.borrar(iD);\n self.cargar_peliculas();", "def test_delete_participants_success(self):\n participants = self.get_participants(\n \"test_delete_participants_success\")\n added_tokens = self.api.token.add_participants(\n survey_id=self.survey_id, participant_data=participants)\n self.token_ids = [x[\"tid\"] for x in added_tokens]\n\n deleted = self.api.token.delete_participants(\n survey_id=self.survey_id, token_ids=self.token_ids)\n for token_id, token_result in deleted.items():\n self.assertIn(token_id, self.token_ids)\n self.assertEqual(\"Deleted\", token_result)", "def eliminadorDeListas(nombres, nombresEliminar):\n for i in nombres:\n if i in nombresEliminar:\n nombres.remove(i)\n \n print(nombres)\n print(nombresEliminar)", "def test_delete_decision_tree_using_delete(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Preprocess PGN files to get board stated, move and game winner as a pandas dataframe, and save it as csv file.
def preprocess_pgn_files(path_pgn_files, num_moves_database, train_val_split, path_save_csv_database): # create empty pandas dataframe to save the information df_train = pd.DataFrame({"board_state": pd.Series([], dtype='str'), "move": pd.Series([], dtype='str'), "game_winner": pd.Series([], dtype='int')}) df_val = pd.DataFrame({"board_state": pd.Series([], dtype='str'), "move": pd.Series([], dtype='str'), "game_winner": pd.Series([], dtype='int')}) # create counter for total number of moves counter_samples = 0 pbar = tqdm(total=num_moves_database, ascii=True) # find and iterate over all PGN files pgn_files = glob.glob(path_pgn_files + "/*.pgn") for path_pgn_file in pgn_files: pgn_file = open(path_pgn_file, encoding="ISO-8859-1") while True: game = chess.pgn.read_game(pgn_file) # no more games in the PGN file if game is None: break # iterate through all moves and play them on a board. game_winner = {"0-1": -1, "1-0": 1, "1/2-1/2": 0}[game.headers["Result"]] board = game.board() for move in game.main_line(): # get board state board_state = board.fen() # get move corresponding to this state as UCI standard move_uci = move.uci() # update board state board.push(move) # append information to pandas dataframe if random.uniform(0, 1) < train_val_split: df_train = df_train.append({"board_state": board_state, "move": move_uci, "game_winner": game_winner}, ignore_index=True) else: df_val = df_val.append({"board_state": board_state, "move": move_uci, "game_winner": game_winner}, ignore_index=True) # update move counter and progress bar counter_samples += 1 pbar.update() if num_moves_database is not None and counter_samples >= num_moves_database: # save pandas dataframe as dataframe df_train = df_train.sample(frac=1).reset_index(drop=True) df_val = df_val.sample(frac=1).reset_index(drop=True) df_train.to_csv(os.path.join(path_save_csv_database, "chess_train_database_" + str(counter_samples) + ".csv"), index=False) df_val.to_csv(os.path.join(path_save_csv_database, "chess_val_database_" + str(counter_samples) + ".csv"), index=False) return 1 # save pandas dataframe as dataframe df_train = df_train.sample(frac=1).reset_index(drop=True) df_val = df_val.sample(frac=1).reset_index(drop=True) df_train.to_csv(os.path.join(path_save_csv_database, "chess_train_database_" + str(counter_samples) + ".csv"), index=False) df_val.to_csv(os.path.join(path_save_csv_database, "chess_val_database_" + str(counter_samples) + ".csv"), index=False) return 0
[ "def read_pgn_file(inp_file):\n my_pgn_file = open(inp_file).readlines()\n with open('datasets/chess_games.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"White rating\", \"Black rating\",\n \"White result\", \"Black result\", \"Victory by checkmate\", \"Victory by time\",\n \"Victory by resignation\", \"Opposite side castling\", \"Other development\"])\n i = 0\n for i in tqdm(range(len(my_pgn_file))):\n try:\n # 18th row is a row having the result of a game\n result = collect_result(my_pgn_file[i + 17])\n\n # 6 and 7 row contain info about players' ratings\n rating_list = [collect_rating(my_pgn_file[i + 5]), collect_rating(my_pgn_file[i + 6])]\n\n # other info on the way the game was played.\n game_data_list = collect_castling_mate_data(my_pgn_file[i + 19])\n line_data = rating_list + result + game_data_list\n writer.writerow(line_data)\n i += 1\n except (TypeError, ValueError) as e:\n # here, we iterate through lines till we get to the next game,\n # as each game starts with an empty line and the subsequent \"Event\" substring\n while i < len(my_pgn_file) and not my_pgn_file[i].startswith(\"[Event\"):\n i += 1\n except IndexError:\n break", "def extractPlayerGameLogData(self): \n\t\thtmlFiles = self.getHtmlFiles(self.sourceGameLogDir)\n\t\tfor htmlFile in htmlFiles:\n\t\t\tplayerName = splitext(basename(htmlFile))[0]\n\t\t\tsoup = BeautifulSoup(open(htmlFile))\n\t\t\t\n\t\t\t# Get both the regular season and playoff tables from the html\n\t\t\tregSeasStatsTab, poStatsTab = self.getGamelogStatsTables(soup)\t\n\t\t\t\n\t\t\t# Get the format of the regular season and playoff tables \n\t\t\tregSeasStatsForm = self.getTabFormat(regSeasStatsTab)\n\t\t\tpoStatsForm = self.getTabFormat(poStatsTab)\n\t\t\t\n\t\t\t# transform the column header data into python lists\n\t\t\tregSeasStatsHeader = self.utils.bsThResultSetToList(regSeasStatsForm)\n\t\t\tpoStatsHeader = self.utils.bsThResultSetToList(poStatsForm)\n\t\t\t\n\t\t\t# Get just the rows from the table that have meaningful data,\n\t\t\t# discarding embedded extra headers\n\t\t\tregSeasonCleanStats = self.extractStatsRows(regSeasStatsTab)\n\t\t\tpoCleanStats = self.extractStatsRows(poStatsTab)\n\t\t\t\n\t\t\t# turn the cleaned up data stats rows into a friendlier python list of lists\n\t\t\tregSeasStatList = self.utils.bsTrResultSetToList(regSeasonCleanStats)\n\t\t\tpoStatList = self.utils.bsTrResultSetToList(poCleanStats)\n\n\t\t\t# affix header to data\n\t\t\tregSeasStatList.insert(0, regSeasStatsHeader)\n\t\t\tpoStatList.insert(0, poStatsHeader)\n\n\t\t\t#print(regSeasStatList)\n\t\t\t#print(poStatsForm)\n\t\t\t#print(poStatList)\n\t\t\tself.writeListToFile(\n\t\t\t\tjoin(self.destDataDir, playerName) + '_reg', \n\t\t\t\tregSeasStatList\n\t\t\t)\n\n\t\t\tself.writeListToFile(\n\t\t\t\tjoin(self.destDataDir, playerName) + '_po', \n\t\t\t\tpoStatList\n\t\t\t)", "def parse_and_preprocess_game_log(file_name):\n df = pd.read_csv(file_name, delimiter='|')\n # Converting the columns that contains lists from str to list()\n for c in [\"snake_pos\", \"apple_pos\" ,\"snake_dir\"]:\n df[c] = df[c].fillna(\"[]\").apply(ast.literal_eval)\n # Once some values in the 'other' columns contains NaNs\n # We have to fill those with {}\n df['other'] = df['other'].fillna(\"{}\").apply(ast.literal_eval)\n\n # Converting list type columns to np.array\n list_cols = df.columns[[type(v) == list for v in df.iloc[0]]]\n df[list_cols] = df[list_cols].applymap(np.array)\n\n # The time between two consecutive apples, in the same game\n # For the first apple is measured starting from the first key press\n df[\"time_to_apple\"] = df.time_secs - df.time_secs.shift(1)\n df.loc[df.status == \"start\", \"time_to_apple\"] = 0\n\n return df", "def parse_ground_truth_file(filename):\n df = pandas.read_csv(filename, sep='[\\s\\t]+', names=['file_path', 'is_relevant'], engine='python')\n if platform.system() == 'Windows':\n df.file_path = df.file_path.map(lambda path: path.replace('/', '\\\\'))\n return df", "def read_csv_files():\n file_names = glob.glob(\"../data_users_moves/*.csv\")\n\n all_dfs = pd.DataFrame(columns=['Timestamp', 'id1', 'id2', 'label'])\n\n for file in file_names:\n print('Currently using file - ', file)\n\n df = pd.read_csv(file, header=None)\n df.columns = ['Timestamp', 'id1', 'id2']\n df['Timestamp'] = pd.to_datetime(df['Timestamp'], unit='s')\n df['date'] = [d.date() for d in df['Timestamp']]\n df['time'] = [d.time() for d in df['Timestamp']]\n # time_stamp = int(['Timestamp'])\n # formatted_time_stamp = datetime.utcfromtimestamp(time_stamp).strftime('%Y-%m-%d %H:%M:%S')\n # splitted_time_stamp = formatted_time_stamp.split()\n # df['date']= splitted_time_stamp[0]\n # df['time'] = splitted_time_stamp[1]\n\n x = 1\n if 'attack' in file:\n rel_type = 'attacks'\n\n elif 'trade' in file:\n rel_type = 'trades'\n else:\n rel_type = 'messages'\n\n df['label'] = rel_type\n\n all_dfs = pd.concat([all_dfs, df])\n G = nx.from_pandas_edgelist(df=all_dfs, source='id1', target='id2', edge_attr=True,\n create_using=nx.DiGraph(name='Travian_Graph'))\n\n # pos = nx.spring_layout(G, k=10)\n # nx.draw(G, pos, with_labels=True)\n labels = {e: G.edges[e]['label'] for e in G.edges}\n # nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n # G = nx.from_pandas_edgelist(edges, edge_attr=True)\n\n # plt.show()\n return G, all_dfs, labels", "def writePlayerCSV(self) -> None:\r\n with open(self.csv_location, \"w\", encoding=\"utf-16\") as file:\r\n for extracted_player in self._extracted_players:\r\n player_name = extracted_player\r\n print(self._extracted_players[extracted_player])\r\n assert len(self._extracted_players[extracted_player]) == 4 #ensures length is 5 to confirm the values can be unpacked\r\n player_long_name, player_position, player_rating, player_club = self._extracted_players[extracted_player]\r\n csv_format = re.compile(\r\n player_name + \",\" + player_long_name + \",\" + player_position + \",\" + player_rating + \",\" + player_club + \",\" + self._season + \"\\n\")\r\n file.write(csv_format.pattern) #Writes the compiled RegEx pattern with the values inserted\r", "def readPGN(file):\n pgn=open(file,encoding=\"utf-8-sig\")\n first_game=chess.pgn.read_game(pgn)\n pgn.close()\n if first_game is not None:\n board = first_game.board()\n # Iterate through all moves and play them on a board.\n for move in first_game.mainline_moves():\n board.push(move)\n return [board,first_game.headers[\"White\"],first_game.headers[\"Black\"],first_game.headers[\"Result\"]]\n else:\n return None", "def get_data_with_metadata(path_metadata= \"metadata.csv\",\n data_dir= 'scores/Bach_JohannSebastian', \n prior_counts= 1,\n n_times= 9, \n no_of_notes= 12, \n metaparam= ['filename','composer', 'no_notes', 'entropy', 'display_year',\n 'work_group', 'work catalogue','opus', 'no'],\n report_broken_files= False,\n store_csv_path = None):\n # metaparms need to contain filename for peace identification\n assert 'filename' in metaparam\n\n # dataframe from metadata.csv file \n df_metadata = pd.read_csv(path_metadata, error_bad_lines=False, sep = '\\t')\n\n # number of points in triangle\n data_points_per_scape = int(n_times * (n_times + 1) / 2)\n\n # to be converted to final dataframe \n data = []\n # folder walk\n for r, _, f in os.walk(data_dir):\n for file in f:\n try:\n peace = np.zeros((data_points_per_scape, no_of_notes + 2))\n # read the score into a pitch scape\n scape = rd.get_pitch_scape(os.path.join(r, file), prior_counts= prior_counts)\n times = np.linspace(scape.min_time, scape.max_time, n_times + 1)\n cnt = 0\n if pitch_scape.min_time < 0:\n continue\n for start, end in product(times, times):\n if start >= end:\n continue\n pitch_scape = scape[start, end]\n normalized_pitch_scape = pitch_scape / np.sum(pitch_scape) #.reshape(data_points_per_scape,1)\n peace[cnt,:no_of_notes] = normalized_pitch_scape\n peace[cnt,-2] = (end - start)\n peace[cnt,-1] = (end - start) / (scape.max_time - scape.min_time)\n cnt += 1\n \n # creating dictionary for data frame\n for i in range(peace.shape[0]): # iterate over scapes\n # dataframe for specific peace\n peace_dict = {}\n for j in range(no_of_notes): # iterate over notes\n # add pitch scape columns to peace dataframe \n peace_dict[str(j)] = peace[i][j]\n df_peace = df_metadata.loc[df_metadata['filename'] == file]\n # adding metadata parameters\n for param in metaparam:\n peace_dict[param] = df_peace[param].values[0]\n data.append(peace_dict)\n except Exception as e:\n if report_broken_files:\n print(os.path.join(r, file) + \": Error: \" + str(e))\n continue\n\n data = pd.DataFrame(data)\n # adding peace identification\n data['song_id'] = pd.factorize(data['filename'])[0]\n # removing NaNs from notes columns\n data = removeNaNs(data, no_of_notes)\n data = data.rename(columns={str(no_of_notes): \"time_window_absolute\", str(no_of_notes + 1): \"time_window_normalized\"})\n # save dataframe\n if store_csv_path is not None:\n data.to_csv(store_csv_path, float_format='%.15f')\n return data", "def create_pre_match_features(row):\n\n\tv=[] #vector to be populated\n\tv.append(row[\"tourney_date\"])\n\tv.append(row[\"tourney_name\"])\n\n\t#print(\"creating pre-match features for {} vs {}\".format(row[\"winner_name\"],row[\"loser_name\"]))\n\n\tdate=row[\"tourney_date\"]\n\tr=row[\"round\"]\n\tsur=row[\"surface\"]\n\n\t#We consider only matches of present year + past 5 years! So we need the year.\n\tyear=int(str(date)[0:4])\n\n\t#player1 (randomly assigned!)\n\tplayer1=random.choice([row[\"winner_name\"],row[\"loser_name\"]])\n\t#player2\n\tif player1==row[\"winner_name\"]:\n\t\tplayer2=row[\"loser_name\"]\n\telse:\n\t\tplayer2=row[\"winner_name\"]\n\n\tv.append(player1)\n\tv.append(player2)\n\n\t#rank difference\n\t#v.append(row[\"winner_rank\"]-row[\"loser_rank\"]) #Execution for matches.csv had this, but it's WRONG!!! It sets always the winner as player 1! \n\t# (Corrected directly in final_csv)\n\tif player1==row[\"winner_name\"]:\n\t\tv.append(row[\"winner_rank\"]-row[\"loser_rank\"])\n\telse:\n\t\tv.append(row[\"loser_rank\"]-row[\"winner_rank\"])\n\n\t#the function retrieve_player_stats should return a dataframe with the average stats of player against each common opponent with the other player\n\tavg_p1=retrieve_player_stats(player1,player2,date,r,sur,year)\n\tavg_p2=retrieve_player_stats(player2,player1,date,r,sur,year)\n\n\t#print(avg_p1)\n\n\t#print(avg_p2)\n\n\t#overall uncertainty of the data at disposal for the past matches of p1 and p2 against their common opponents\n\tif ((avg_p1.shape[0]>0) and (avg_p2.shape[0]>0)):\n\t\ts=0\n\t\t#uncertainty on the match\n\t\tfor i in range(avg_p1.shape[0]):\n\t\t\ts+=(avg_p1.iloc[i][\"data_amount\"]*avg_p2.iloc[i][\"data_amount\"])\n\t\t\t#print(\"Uncertainty for {}: {} x {} \".format(avg_p2.iloc[i][\"opponent\"],avg_p1.iloc[i][\"uncertainty\"],avg_p2.iloc[i][\"uncertainty\"]))\n\t\tu=1/s #u is the overall uncertainty of our feature vector for the match!\n\t\t#print(\"Overall uncertainty: {}\".format(u))\n\n\t\t#mean stats\n\t\tstats_p1=list(avg_p1.mean(axis=0,numeric_only=True)[0:13])\n\t\tstats_p2=list(avg_p2.mean(axis=0,numeric_only=True)[0:13])\n\n\t\t#WEIGHTED mean stats\n\t\t#we need to take mean value of each column to get average player performances against the list of common opponents\n\t\t#weight opponents by measure of data_amount at disposal?\n\t\t#No: this would make, for ex, a player look worse if he played lots of times against Novak Djokovic!\n\t\t#sum_unc_1=avg_p1[\"data_amount\"].sum()\n\t\t#avg_p1[\"weight\"]=avg_p1.apply(lambda row: (row[\"data_amount\"]/sum_unc_1),axis=1)\n\t\t#print(stats_p1)\n\n\t\tdiffs=list(np.subtract(stats_p1,stats_p2))\n\n\t\tv.extend(diffs)\n\n\t\tv.append(round(stats_p1[3]*stats_p1[4]-stats_p2[3]*stats_p2[4],4)) #complete\n\t\tv.append((stats_p1[3]-stats_p2[4])-(stats_p2[3]-stats_p1[4])) #serveadv\n\n\t\t#h2h\n\t\th2h_1=df[((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r)) & (year-df[\"year\"]<=5))].shape[0]\n\t\th2h_2=df[((df[\"winner_name\"]==player2) & (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r)) & (year-df[\"year\"]<=5))].shape[0]\n\t\tif (h2h_1+h2h_2)>0:\n\t\t\tv.append(round((h2h_1/(h2h_1+h2h_2))-(h2h_2/(h2h_1+h2h_2)),4))\n\t\telse:\n\t\t\tv.append(0) #dummy value\n\n\t\t#fatigue feature\n\t\t# NOTE we don't have the data of each match, but only the starting date of the tournament;\n\t\t# therefore, the only thing that can be done is counting the num. of games played since the beginning of the tournament and give a % difference btw the 2 players.\n\t\t# This does not take into account the exact distance in days of previous matches from the current one, nor matches of the previous tournament,\n\t\t# but from our perspective it seems quite impossible to do differently.\n\t\ttourney_p1=df[(((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]==date) & (df[\"round\"]<r)))]\n\t\tp1_games=tourney_p1[\"tot_games\"].sum()\n\t\ttourney_p2=df[(((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]==date) & (df[\"round\"]<r)))]\n\t\tp2_games=tourney_p2[\"tot_games\"].sum()\n\n\t\tif np.isnan(p1_games):\n\t\t\tp1_games=0\n\t\tif np.isnan(p2_games):\n\t\t\tp2_games=0\n\n\t\tif p1_games==0 and p2_games==0:\n\t\t\tv.append(0) #no games played by either player, we put zero\n\t\telse:\n\t\t\tv.append(round((p1_games/(p1_games+p2_games))-(p2_games/(p1_games+p2_games)),4))\n\n\t\tv.append(u) #append uncertainty!\n\n\t\tif player1==row[\"winner_name\"]:\n\t\t\tv.append(0)\n\t\telse:\n\t\t\tv.append(1)\n\n\t\treturn v\n\telse:\n\t\treturn False", "def extractPlayerData(self):\n\t\thtmlFiles = self.getHtmlFiles(self.sourcePlayerDir)\n\t\tfor htmlFile in htmlFiles:\n\t\t\tsoup = BeautifulSoup(open(htmlFile))\n\t\t\tplayerName = splitext(basename(htmlFile))[0]\n\t\t\tposition = self.getPosition(soup)\n\n\t\t\tplayerPage = PlayerPage(playerName, position)\n\n\t\t\t# Get stat tables from the html\n\t\t\tplayerPage = self.getPlayerPageStats(soup, playerPage)\n\n\t\t\t\n\t\t\tfor statsType, statsTab in playerPage.statsTabs.items():\t\n\t\t\t\t# Get the format of the regular season and playoff tables \n\t\t\t\tif(statsType == 'Pass'):\n\t\t\t\t\ttabForm = self.getTabFormat(statsTab, statsType)\n\t\t\t\telse:\n\t\t\t\t\ttabForm = self.getTabFormat(statsTab)\n\n\t\t\t\t# transform the column header data into python lists\n\t\t\t\theader = self.utils.bsThResultSetToList(tabForm)\n\t\t\t\t\n\t\t\t\t# Get just the rows from the table that have meaningful data,\n\t\t\t\tcleanStats = self.extractStatsRows(statsTab)\n\n\t\t\t\t# turn the cleaned up data stats rows into a friendlier python list of lists\n\t\t\t\tstatsList = self.utils.bsTrResultSetToList(cleanStats)\n\t\t\t\t\n\t\t\t\t# affix header to data\n\t\t\t\tstatsList.insert(0, header)\n\n\t\t\t\tplayerPage.stats[statsType] = statsList\n\n\t\t\t\tfileName = join(self.destDataDir, playerPage.name) + \"_\" + playerPage.position + \"_\" + statsType\n\t\t\t\tprint(\"Writing \" + fileName)\n\t\t\t\tself.writeListToFile(fileName, statsList)", "def load_player_list(outname):\n print(\"Loading with label {}.\".format(outname))\n if path.isfile(outname + \".csv\"):\n ap = pd.read_csv(outname + \".csv\", index_col=[\"player\", \"team\", \"pos\"])\n else:\n logging.error(\"Could not find file %s.csv!\", outname)\n if path.isfile(outname + \"_picked.csv\"):\n pp = pd.read_csv(outname + \"_picked.csv\", index_col=[\"player\", \"team\", \"pos\"])\n else:\n logging.error(\"Could not find file %s_picked.csv!\", outname)\n return ap, pp", "def CreateNewStatsFile():\r\n # create a table of stats for all players\r\n columnNames = [\"FixtureSet\", \"Game\", \"Player\", \"Opponent\", \"Result\", \"Win\", \"Loss\", \"Draw\", \r\n \"PlayerScore\", \"OpponentScore\", \"CompleteCityScore\", \"CompleteRoadScore\", \r\n \"CompleteMonasteryScore\", \"IncompleteCityScore\", \"IncompleteRoadScore\", \r\n \"IncompleteMonasteryScore\", \"FarmScore\",\"MeeplesPlayed\", \"MeepleTurns\", \r\n \"MeepleFeatures\", \"Turns\", \"AvgTurnTime\"]\r\n dfStats = pd.DataFrame(columns = columnNames)\r\n \r\n # current list of files\r\n current_logs = os.listdir('logs')\r\n \r\n # get files of matching names\r\n matching = [file for file in current_logs if 'Match_Stats' in file]\r\n print(f'Matching: {matching}')\r\n\r\n # if no files exist so far\r\n if matching == []:\r\n new_file = 'logs/0_Match_Stats.csv'\r\n print(new_file)\r\n dfStats.to_csv(new_file, index=False) \r\n return new_file, dfStats\r\n \r\n # get file number of latest file \r\n highest_number = max([int(file.split('_')[0]) for file in matching])\r\n \r\n # new file number\r\n next_number = highest_number + 1\r\n new_file = 'logs/' + str(next_number) + '_Match_Stats.csv'\r\n print(new_file)\r\n \r\n # create new file\r\n dfStats.to_csv(new_file, index=False) \r\n return new_file, dfStats", "def load_and_preprocesss():\n #####################################\n # 1. Load Dataset #\n #####################################\n loadDataset = LoadDataset()\n review_list, rating_list, gender_list, location_list, age_list = loadDataset.load_file(dataset_dir + file_path)\n\n\n #####################################\n # 2. Data Pre-processing #\n #####################################\n dataPreprocessing = DataPreprocessing()\n\n labeled_gender_list = dataPreprocessing.label_gender(gender_list)\n labeled_age_list = dataPreprocessing.label_age(age_list)\n assert len(review_list) == len(rating_list) == len(labeled_age_list) == len(labeled_gender_list) == len(\n location_list)\n\n # Check if there exists a directory to save processed files\n if not os.path.exists(processed_langid_dir):\n os.mkdir(processed_langid_dir)\n\n # Form csv files and save\n form_csv(review_list, rating_list, labeled_gender_list, labeled_age_list, location_list,\n processed_langid_dir + csv_filename)\n\n print(\"Write to csv successfully!\\n\")\n\n\n #####################################\n # 3. Language Double Check #\n #####################################\n # Check if there exists a directory to save fasttext processed files\n if not os.path.exists(processed_fasttext_dir):\n os.mkdir(processed_fasttext_dir)\n\n for file in sorted(os.listdir(processed_langid_dir)):\n if file.endswith(\".csv\"):\n fasttext_language_detection(filename=os.path.join(processed_langid_dir, file),\n new_filename=os.path.join(processed_fasttext_dir, file))", "def swipe_tr():\n\t\"\"\" Copy files to dataSets_Results_ts from psychopy \"\"\"\n\n\ttotalDf = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t## init empty list\t\t\t\n\n\tfor file in os.listdir(thisDirPath + '\\\\R\\\\dataSets_Results_tr'):\t\t## loop throught dataSets_Results_ts dir\n\t\tresultsFile = pd.read_csv(thisDirPath + targetFile_tr + str(file)) ## read files\n\t\ttotalDf.append(resultsFile)\t\t\t\t\t\t\t\t\t\t\t## append the dfs to the empty list\n\n\ttotalDf_2 = pd.concat(totalDf, sort=False)\t\t\t\t\t\t\t\t## concatanate the dfs in one df\n\tpd.DataFrame(totalDf_2).to_csv(\"dataSetsFromPy\\\\tr_tot.csv\")\t\t\t## output csv to dataSetsFromPy - maybe adjust that", "def save_matches(self):\n\n self._df_matches = pd.DataFrame(self._matches)\n self._df_matches.to_csv(self.matches_filename, index=False)\n self._df_matches_pairwise = pd.DataFrame(self._extract_matches_pairwise())\n self._df_matches_pairwise.to_csv(self.matches_pairwise_filename, index=False)", "def matrix_p1p2(matrix):\r\n #print('Start converting data notation from winner/loser to player_1/player_2')\r\n # define a new matrix for trainning and re-arange the information for winner and loser as player 1 and player 2. For each pair, player_1_id < player_2_id.\r\n matrix_n = pd.DataFrame()\r\n \r\n # match information\r\n col_match = ['tourney_name', 'surface', 'draw_size', 'tourney_level', 'tourney_date','year', 'month', 'day', 'day_week',\r\n 'match_num', 'best_of', 'round', 'minutes']\r\n \r\n matrix_n[col_match] = matrix[col_match]\r\n \r\n # columns for winner and loser\r\n \r\n col_w = [item for item in matrix.columns if 'winner' in item] \r\n col_l = [item for item in matrix.columns if 'loser' in item] \r\n \r\n # new columns for player 1 and player 2\r\n col_p1 = [item.replace('winner', 'p1') for item in col_w] \r\n col_p2 = [item.replace('winner', 'p2') for item in col_w] \r\n \r\n # re-arange the columns based on p1 and p2\r\n matrix[['winner_id','loser_id']]=matrix[['winner_id','loser_id']].astype(np.float64)\r\n \r\n matrix_n[col_p1] = matrix.loc[matrix.winner_id<matrix.loser_id,col_w] \r\n matrix_n[col_p2] = matrix.loc[matrix.winner_id>matrix.loser_id,col_w] \r\n \r\n matrix_n['p1_win'] = matrix_n['p1_id'].map(lambda x: 1 if x>0 else 0, na_action = 'ignore').fillna(0)\r\n matrix_n['p2_win'] = matrix_n['p2_id'].map(lambda x: 1 if x>0 else 0, na_action = 'ignore').fillna(0)\r\n \r\n for i in range(len(col_p1)):\r\n matrix_n[col_p1[i]].fillna(matrix[matrix.winner_id>matrix.loser_id][col_l[i]],inplace = True)\r\n matrix_n[col_p2[i]].fillna(matrix[matrix.winner_id<matrix.loser_id][col_l[i]],inplace = True)\r\n \r\n # add information for the number of set won by each player\r\n matrix_n['p1_sets_win'] = 0.0\r\n matrix_n['p2_sets_win'] = 0.0\r\n \r\n for i in range(1,6):\r\n matrix_n['p1_sets_win'] = matrix_n['p1_sets_win'] + 1.0*(matrix_n['p1_set_'+str(i)]>matrix_n['p2_set_'+str(i)])\r\n matrix_n['p2_sets_win'] = matrix_n['p2_sets_win'] + 1.0*(matrix_n['p1_set_'+str(i)]<matrix_n['p2_set_'+str(i)])\r\n \r\n matrix_n[['p1_id','p2_id']].astype(np.int64)\r\n \r\n \r\n #print('Conversion finished')\r\n \r\n return matrix_n", "def process(self):\n self.preprocess()\n self.find_unique()\n self.remove_duplicates()\n self.data_without_duplicates = self.data_without_duplicates.drop(columns=['full_name', 'find_unique'])\n self.data_without_duplicates.to_csv(f\"{self.folder_to_save}/relateddata.csv\")", "def read_fixations_file(datafile, surface_label):\n\tdata = pd.read_csv(datafile, sep=\"\\t\")\n\tif not len(data.index):\n\t\tprint(\"\\n{0} appears to be empty\".format(datafile))\n\t\treturn None\n\n\t# Only keep fixations that were inside a surface\n\tdata = data.query(\"on_srf == True\")\n\t# If dataframe is empty, issue a warning message and simply return None\n\tif not len(data.index):\n\t\tprint(\"\\n{0} has no fixations on surface\".format(datafile))\n\t\treturn None\n\n\tdata = data.rename(columns={\n\t\t\"start_timestamp\":\"timestamp\",\n\t\t\"norm_pos_x\":\"x\",\n\t\t\"norm_pos_y\":\"y\",\n\t\t\"duration\":\"fixation_duration_ms\"\n\t\t}\n\t)\n\t# Go up one folder as pupil exports data to \"metrics_xxx\" subfolder\n\ttrial_folder = os.path.split(os.path.split(datafile)[0])[0]\n\n\t(folder, trial_no) = os.path.split(trial_folder)\n\tparticipant = os.path.split(folder)[1]\n\n\t# Parse the subject nr from the folder name\n\ttry:\n\t\tsubject_nr = int(re.findall(r'\\d+',participant)[0])\n\t\tdata.loc[:,\"subject_nr\"] = subject_nr\n\t\ttry:\n\t\t\tphase = int(re.findall(r'\\d+',participant)[1])\n\t\t\tdata.loc[:,\"phase\"] = phase\n\t\texcept:\n\t\t\tpass\n\n\texcept:\n\t\tprint(\"\\nFile {0}: Could not parse participant number from folder name\".format(participant))\n\t\tprint(data)\n\t\tsys.exit(0)\n\n\ttry:\n\t\tdata.loc[:,\"subject_file\"] = participant\n\t\tdata.loc[:,\"trial_no\"] = int(trial_no)\n\t\tdata.loc[:,\"surface_label\"] = surface_label\n\n\texcept Exception as e:\n\t\tprint(\"\\nError reading participant fixations file {}, trial {}: {} (empty data file or no fixations on surface?)\\n\".format(participant, trial_no, e))\n\t\treturn None\n\n\t# Set fixation indices\n\tdata.loc[:,\"fixation_index\"] = data.reset_index().index+1\n\t# Value is normally in seconds, Convert to ms\n\tdata.loc[:,\"fixation_duration_ms\"] = data[\"fixation_duration_ms\"].astype(float)*1000\n\n\tcolumns = [\"id\",\"subject_file\",\"subject_nr\",\"trial_no\",\"surface_label\",\\\n\t\"fixation_index\",\"fixation_duration_ms\",\"x\",\"y\",\"timestamp\"]\n\n\ttry:\n\t\tphase\n\t\tcolumns.append('phase')\n\texcept:\n\t\tpass\n\n\tdata = data.reindex(columns=columns)\n\treturn data", "def pd_to_petrel_points( df, file_name, columns = []):\n print('pd_to_petrel_points');\n tmp =df[ columns ]\n print('Exporting rows ', len(tmp))\n tmp.to_csv(file_name, sep=' ', index = False)\n print('done')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns percentage of col that is less than or equal to x.
def within(col, x): col = col.sort_values() number = 0 while col.iloc[number] <= x and number < len(col): number += 1 return number+1
[ "def percent(x):\n \"*** YOUR CODE HERE ***\"\n c=center(x)\n p=(100*abs(lower_bound(x)-c))/c\n return p", "def percent(x):\n \"*** YOUR CODE HERE ***\"\n a, b = lower_bound(x), upper_bound(x)\n w = (b - a) /2\n c = (b + a) /2\n return abs(w / c) * 100", "def percent(x):\n return width(x) / center(x) * 100", "def count_less_than(self, x: T) -> int:\n ans = 0\n for bucket in self._buckets:\n if bucket[-1] >= x:\n return ans + bisect_left(bucket, x)\n ans += len(bucket)\n return ans", "def outlier_percent(col):\n # Logic is to find the upper and lower limit and check how many fall into the range and calculate the percentage of those that don't.\n q1 = get_Q1(col)\n q3 = get_Q3(col)\n out_range = []\n iqr = abs(q3 - q1)\n lower_limit = abs(q1 - (1.5 * iqr))\n upper_limit = abs(q3 + (1.5 * iqr))\n # Handling error for 0 division.\n try:\n for i in col:\n if (\n i > upper_limit or i < lower_limit\n ): # Checking if it is out of the upper and lower limit.\n out_range.append(i)\n outlier_per = (len(out_range) / len(col)) * 100\n return outlier_per\n except ZeroDivisionError:\n return print(\"No outliers\")", "def count_less_than_or_equal_to(self, x: T) -> int:\n ans = 0\n for bucket in self._buckets:\n if bucket[-1] > x:\n return ans + bisect_right(bucket, x)\n ans += len(bucket)\n return ans", "def active_perc(X, threshold):\n thres_X = X >= threshold\n active_samples = np.sum(thres_X, axis=0)\n active_perc = active_samples / np.float(thres_X.shape[0])\n return(active_perc)", "def _percentageOfGrid(self, percent):\n numSpecialPoints = int((self.numRows * self.numCols) * percent)\n return numSpecialPoints", "def threshold_from_contamination(self, X):\n values = self.decision_function(X, raw_values=True)\n threshold = sp.stats.scoreatpercentile(\n values, 100. * (1. - self.contamination))\n return threshold", "def percent_clipper(x, percentiles):\n \n LOWERBOUND, UPPERBOUND = np.percentile(x, [percentiles[0], percentiles[1])\n \n return np.clip(x, LOWERBOUND, UPPERBOUND)", "def grid_score(self, threshold=0, percentile=75):\r\n array = self.get_array()\r\n masked_array = np.ma.masked_less_equal(array, threshold)\r\n values = masked_array.compressed()\r\n\r\n if len(values) == 0:\r\n return 0\r\n else:\r\n return np.percentile(values, percentile)", "def percentile_confidence_interval(x, percentile=68, confidence_level=0.95):\n sorted_x = np.sort(x)\n z = norm.ppf(confidence_level)\n if len(x) == 0:\n return 0, 0\n q = percentile / 100.\n\n j = np.max([0, int(len(x) * q - z * np.sqrt(len(x) * q * (1 - q)))])\n k = np.min([int(len(x) * q + z * np.sqrt(len(x) * q * (1 - q))), len(x) - 1])\n return sorted_x[j], sorted_x[k]", "def get_fraction_above_coverage_percentile(county, state, year, percentile=85):\n try:\n county_col = '%s, %s' % (county, state)\n fraction_col = '%s_fraction_above_%d' % (year, percentile)\n df = pd.read_csv(\n '%s/us_counties_fraction_above_2014_2016.csv' % data_folder)\n fraction = df[df.name == county_col].iloc[0][fraction_col]\n return float(fraction.item())\n except:\n return None", "def make_pct(self, X):\n return 100.0*X/self.ev.pop", "def get_marginal(x):\n px = np.array([np.sum(x==xi) \n for xi in np.unique(x)])/len(x)\n return px", "def threshold_decision(self, x, threshold):\n \n if (x >= threshold):\n return 1\n \n else:\n return 0", "def percents(x,y):\n one_percent = x/100\n result = y / one_percent\n return result", "def row_count_less_x(self, statement, x):\n\n count = self.row_count(statement)\n\n if count > int(x):\n raise AssertionError(\"Expected the count to be less than \\\n %s from %s but it was %d instead\" % (x, statement, count))", "def above_quantile_threshold(X, source_col=None, quantile_threshold=None, new_colname=None):\n # Create new column name if none specified\n if not new_colname:\n new_colname = source_col + '_above_' + str(quantile_threshold) + '_quantile'\n if not source_col:\n raise 'No source column to compute quantile threshold from specified'\n new_colname = source_col + '_above_' + str(quantile_threshold) + '_quantile'\n if not quantile_threshold:\n raise 'No source column to quantile threshold specified. Should be float in range 0-1, eg .75' \n \n # New column is array with 1 where source col is above specified quantile\n new_col = np.where(X[source_col] > X[source_col].quantile(quantile_threshold), 1, 0)\n return X.assign(**{new_colname: new_col})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the piece count difference, mobility (current player moves available) and potential mobility (amount of empty spaces around opponent pieces). Returns a weighted sum of the heuristics.
def calculate_utility(self, boardstate): #return self.mycount_difference(boardstate) #diff = self.mycount_difference(boardstate) legMovs = len(boardstate.calculate_legal_moves()) potMob = self.get_potential_mobility(boardstate) return legMovs + potMob
[ "def weighted_score(player, board):\n opp = othello.opponent(player)\n total = 0\n for sq in othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total", "def calculate_weight(self):\n\n\t\tweight = 0\n\t\tfor item in self.items:\n\t\t\tif item == \"Health Potions\" or item == \"Magic Potions\":\n\t\t\t\tweight += self.items[item]\n\n\t\tself.weight = weight", "def custom_heuristic(gameState):\r\n center_weight = 0.5\r\n lib_weight = 1.5\r\n own_loc = gameState.locs[self.player_id]\r\n opp_loc = gameState.locs[1- self.player_id]\r\n own_liberties = gameState.liberties(own_loc)\r\n opp_liberties = gameState.liberties(opp_loc)\r\n # Custom 1: distanceToCenter(own_loc)\r\n # Custom 2: len(own_liberties) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 3: len(own_liberties) - ( len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) ) \r\n # Custom 4: len(own_liberties) - ( lib_weight * len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 5: ( lib_weight * (len(own_liberties) / len(opp_liberties)) - ( center_weight * distanceToCenter(own_loc)) )\r\n return ( lib_weight * (len(own_liberties) / len(opp_liberties)) - (center_weight * distanceToCenter(own_loc)) )", "def get_potential_mobility(self, boardstate):\n potential_mobility = 0\n for space in boardstate._board:\n if space == opponent(self.mycolor):\n if space + 1 == Empty:\n potential_mobility += 1\n elif space - 1 == Empty:\n potential_mobility += 1\n elif space + 10 == Empty:\n potential_mobility += 1\n elif space - 10 == Empty:\n potential_mobility += 1\n elif space + 9 == Empty:\n potential_mobility += 1\n elif space + 11 == Empty:\n potential_mobility += 1\n elif space - 9 == Empty:\n potential_mobility += 1\n elif space - 11 == Empty:\n potential_mobility += 1\n return potential_mobility", "def __heuristic(self, game_map, square):\n if square.owner == 0 and square.strength > 0:\n return square.production / square.strength\n else:\n return sum(neighbor.strength\n for neighbor in game_map.neighbors(square)\n if neighbor.owner not in (0, self.bot_id))", "def calc_relative_weight(self):\n relative_weight = self.weight\n for agent in self.agents:\n if relative_weight > 0:\n relative_weight -= self.agents[agent]\n return relative_weight", "def total_weight(self) -> float:\r\n return sum([self.neighbours[x] for x in self.neighbours])", "def get_calc_weight(self):\n # TODO: Make sure piece weight is being imported correctly\n weight = db.run_sql(\"SELECT SUM(bl_inventories.quantity * parts.weight) FROM bl_inventories JOIN parts\"\n \" ON bl_inventories.piece_id = parts.id\"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return weight", "def calculate_weight_bonus(self):\n self.weight_bonus = round(self.skijumper.height /\n self.skijumper.weight, 2)\n self.jump_distance += self.weight_bonus\n self.estimate += self.weight_bonus\n pass", "def total_cost(self):\n return self.heuristic() + self.backward_cost", "def baseline_heuristic(gameState):\r\n own_loc = gameState.locs[self.player_id]\r\n opp_loc = gameState.locs[1 - self.player_id]\r\n own_liberties = gameState.liberties(own_loc)\r\n opp_liberties = gameState.liberties(opp_loc)\r\n return len(own_liberties) - len(opp_liberties)", "def compute_heuristic(self, global_board: np.ndarray, depth: int) -> float:\n return sum([self._precalc_boards[b] for b in global_board]) / depth", "def materialScore(board:chess.Board):\n score = 0\n for piece in wights:\n score += wights[piece] * getPieceCountDiff(board,piece)\n return score", "def weight_heuristic(board, weight_function):\r\n counter = 0\r\n x = board.player.get_x_coordinate()\r\n y = board.player.get_y_coordinate()\r\n board = board.current_board\r\n for i, row in enumerate(board):\r\n for j, col in enumerate(row):\r\n if board[i][j] != ' ' and board[i][j] != 'X':\r\n counter += weight_function(x, y, j, i)\r\n return counter", "def heuristic(state, heuristic_dict):\n h = 0\n for piece in state:\n h += heuristic_dict[piece]\n return h", "def calculate_score(self):\n\t\tlocation_score = 0\n\t\tfor location in self.locations:\n\t\t\tlocation_score += location.get_vps()\n\t\t\n\t\tlord_score = 0\n\t\tfor lord in self.lords:\n\t\t\tlord_score += lord.vps\n\t\t\t\n\t\taffiliate_score = 0\n\t\tfor affiliate_race in self.affiliates:\n\t\t\tif (affiliate_race):\n\t\t\t\taffiliate_score += min(affiliate_race)\n\t\t\t\n\t\tmonster_score = sum(self.monsters)\n\t\t\n\t\treturn location_score + lord_score + affiliate_score + monster_score", "def _numWorthless(self):\r\n self.inventoryManager.refreshInventory()\r\n inv = self.inventoryManager.inventory()\r\n n = inv.get(43, 0) + inv.get(44, 0) + inv.get(45, 0)\r\n return n", "def __calculate_board_heuristic(self, board: np.ndarray) -> int:\n diagonal_one, diagonal_two, opponent_diagonal_one, opponent_diagonal_two, winner_d, loser_d = self.calculate_diagonal(\n board)\n vertical_one, vertical_two, opponent_vertical_one, opponent_vertical_two, winner_v, loser_v = self.__calculate_vertical(\n board)\n horizontal_one, horizontal_two, opponent_horizontal_one, opponent_horizontal_two, winner_h, loser_h = self.__calculate_horizontal(\n board)\n\n winner = winner_d + winner_v + winner_h\n loser = loser_d + loser_v + loser_h\n my_two = vertical_two + diagonal_two + horizontal_two\n my_one = vertical_one + diagonal_one + horizontal_one\n opp_two = opponent_vertical_two + opponent_diagonal_two + opponent_horizontal_two\n opp_one = opponent_vertical_one + opponent_diagonal_one + opponent_horizontal_one\n\n heuristic = self._win * winner + self._lose * loser + self._alpha * my_two + self._beta * my_one - self._gamma * opp_two - self._delta * opp_one\n\n return heuristic", "def weight_check(self, program):\n Wp = self.weights.get(program, 0)\n Ws = 0\n if program in self.tower.keys():\n Ws_dict = {p:self.weight_check(p) for p in self.tower[program]}\n if self.complete: return \n print(Ws_dict)\n balanced = len(set(Ws_dict.values())) <= 1\n if not balanced:\n self.solution = self.corrected_weight(Ws_dict)\n self.complete = True\n return\n Ws = sum(Ws_dict.values())\n return Wp + Ws" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determine the potential mobility by determining how many opponent pieces have adjacent empty spaces.
def get_potential_mobility(self, boardstate): potential_mobility = 0 for space in boardstate._board: if space == opponent(self.mycolor): if space + 1 == Empty: potential_mobility += 1 elif space - 1 == Empty: potential_mobility += 1 elif space + 10 == Empty: potential_mobility += 1 elif space - 10 == Empty: potential_mobility += 1 elif space + 9 == Empty: potential_mobility += 1 elif space + 11 == Empty: potential_mobility += 1 elif space - 9 == Empty: potential_mobility += 1 elif space - 11 == Empty: potential_mobility += 1 return potential_mobility
[ "def check_if_strong_enough(MyMoves, middle_coord):\n\n ## GET ACTUAL COORDS/DISTANCE OF THE ENEMY\n value = MyMoves.myMatrix.matrix[MyMoves.myMap.my_id][0] ## 1 IS FOR HP MATRIX\n # v_enemy = MyCommon.get_section_with_padding(value, ship_coords, MyCommon.Constants.ATTACKING_RADIUS, 0)\n v_enemy = MyCommon.get_section_with_padding(value, middle_coord, MyCommon.Constants.ATTACKING_RADIUS, 0)\n\n value = MyMoves.myMatrix.ally_matrix\n # v_ally = MyCommon.get_section_with_padding(value, ship_coords, MyCommon.Constants.ATTACKING_RADIUS, 0)\n v_ally = MyCommon.get_section_with_padding(value, middle_coord, MyCommon.Constants.ATTACKING_RADIUS, 0)\n\n ## INSTEAD OF USING ABOVE, COUNT -1 AND 1 ONLY. SINCE ABOVE INCLUDES ENEMY MINING\n ## ONLY GRAB A SECTION (STRONG ENOUGH RADIUS) OF THE SECTION (ATTACKING RADIUS)\n ## INCLUDE DOCKED SHIPS WHEN CALCULATING ALLY POWER\n ## TO PREVENT ONE SHIP FROM BACKING OUT WHEN PROTECTING DOCKED SHIPS AGAINST 1 ENEMY SHIP\n # num_enemy_in_section = (v_enemy==-1).sum()\n # num_ally_in_section = (v_ally==1).sum()\n num_enemy_in_section = (v_enemy[\n MyCommon.Constants.ATTACKING_RADIUS - MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS + MyCommon.Constants.STRONG_ENOUGH_RADIUS + 1,\n MyCommon.Constants.ATTACKING_RADIUS - MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS + MyCommon.Constants.STRONG_ENOUGH_RADIUS + 1] == -1).sum() ## JUST GET A 7x7 matrix\n # num_ally_in_section = (v_ally[MyCommon.Constants.ATTACKING_RADIUS-MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS+MyCommon.Constants.STRONG_ENOUGH_RADIUS+1,\n # MyCommon.Constants.ATTACKING_RADIUS-MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS+MyCommon.Constants.STRONG_ENOUGH_RADIUS+1] == 1).sum() \\\n # + (v_ally[MyCommon.Constants.ATTACKING_RADIUS-MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS+MyCommon.Constants.STRONG_ENOUGH_RADIUS+1,\n # MyCommon.Constants.ATTACKING_RADIUS-MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS+MyCommon.Constants.STRONG_ENOUGH_RADIUS+1] == 0.75).sum()\n ## MATRIX ALLY CONTAINS SHIP ID NOW\n num_ally_in_section = (v_ally[\n MyCommon.Constants.ATTACKING_RADIUS - MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS + MyCommon.Constants.STRONG_ENOUGH_RADIUS + 1,\n MyCommon.Constants.ATTACKING_RADIUS - MyCommon.Constants.STRONG_ENOUGH_RADIUS:MyCommon.Constants.ATTACKING_RADIUS + MyCommon.Constants.STRONG_ENOUGH_RADIUS + 1] != -1).sum()\n\n strong_enough = num_ally_in_section > num_enemy_in_section\n\n return strong_enough, v_enemy", "def get_empty_spaces(self, boardstate):\n empty = 0\n for space in range(11, 90):\n if boardstate._board[space] == 0:\n empty += 1\n return empty", "def calculate_utility(self, boardstate):\n #return self.mycount_difference(boardstate)\n #diff = self.mycount_difference(boardstate)\n legMovs = len(boardstate.calculate_legal_moves())\n potMob = self.get_potential_mobility(boardstate)\n return legMovs + potMob", "def enemyOnBoard():\n zCount, fZCount, bCount, mCount, pCount = self.count_pieces()\n return zCount > 0 or fZCount > 0 or bCount > 0 or mCount > 0", "def nimm()->None:\n pile = INITIAL_STONES\n while pile > 0:\n for player in range(PLAYER_COUNT):\n player += 1\n pile = remove_stones(pile, player)\n if pile == 0:\n game_over(player)\n break", "def spaces_in_zone_one(self):\r\n element_offset = 0\r\n free_spaces = 0\r\n for row in range(5, 10):\r\n for element in range(9 + element_offset, 10):\r\n if self.board[row][element] == 0:\r\n free_spaces += 1\r\n element_offset -= 1\r\n return free_spaces", "def count_obstacles_in_my_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_enemy_portals():\n if portal.distance(elf) + portal.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count += 2 # portals are harder to kill so i consider them as 2 (in comperisson it wont matter)\n \n for mana_fountain in game.get_enemy_mana_fountains():\n if mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) < elf.distance(game.get_enemy_castle()) + game.portal_size or \\\n mana_fountain.distance(elf) + mana_fountain.distance(game.get_enemy_castle()) > elf.distance(game.get_enemy_castle()) - game.portal_size:\n \n count +=1\n\n return count", "def count_pieces(self):\n state = board2state(self.board)\n zombieCount, fZombieCount, bombCount, multCount, pumpCount = [0] * 5\n for token in state:\n if token == '1' or token == '8':\n zombieCount += 1\n elif token == '2':\n fZombieCount += 1\n elif token == '4' or token == '11':\n bombCount += 1\n elif token == '5' or token == '12':\n multCount += 1\n elif token == '6':\n pumpCount += 1\n return zombieCount, fZombieCount, bombCount, multCount, pumpCount", "def balanced(m):\n \"*** YOUR CODE HERE ***\"\n l, r = sides(m)[0], sides(m)[1]\n l_len, r_len = l[0], r[0]\n l_wei_or_mob, r_wei_or_mob = l[1], r[1]\n if is_weight(l_wei_or_mob) and is_weight(r_wei_or_mob): ##two weights\n return size(l_wei_or_mob) * l_len == size(r_wei_or_mob)*r_len\n elif is_weight(l_wei_or_mob) and not is_weight(r_wei_or_mob): ##lweight rmobile\n return size(l_wei_or_mob) * l_len == total_weight(r_wei_or_mob)*r_len and balanced(r_wei_or_mob)\n elif not is_weight(l_wei_or_mob) and is_weight(r_wei_or_mob): ##lmobile rweight\n return total_weight(l_wei_or_mob)*l_len == size(r_wei_or_mob)*r_len and balanced(l_wei_or_mob)\n elif not is_weight(l_wei_or_mob) and not is_weight(r_wei_or_mob): ##two mobiles\n return total_weight(l_wei_or_mob)*l_len == total_weight(r_wei_or_mob)*r_len and balanced(l_wei_or_mob) and balanced(r_wei_or_mob)", "def occupied(self):\n return self._hole or self._nutrient or self._obstacle", "def count_obstacles_in_enemy_elf_way_to_castle(game, elf):\n count = 0\n for portal in game.get_my_portals():\n if portal.distance(elf) + portal.distance(game.get_my_castle()) < elf.distance(game.get_my_castle()) + game.portal_size or \\\n portal.distance(elf) + portal.distance(game.get_my_castle()) > elf.distance(game.get_my_castle()) - game.portal_size:\n \n count += 2 # portals are harder to kill so i consider them as 2 (in comperisson it wont matter)\n \n for mana_fountain in game.get_my_mana_fountains():\n if mana_fountain.distance(elf) + mana_fountain.distance(game.get_my_castle()) < elf.distance(game.get_my_castle()) + game.portal_size or \\\n mana_fountain.distance(elf) + mana_fountain.distance(game.get_my_castle()) > elf.distance(game.get_my_castle()) - game.portal_size:\n \n count +=1\n \n return count", "def is_board_full():\n num_pieces = np.sum(np.abs(board))\n print(\"Number of pieces in board {}\".format(num_pieces))\n if num_pieces == 9:\n return True\n return False", "def is_diplomacy_full(self):\n return (len(self.track['reputation']) + len(self.track['diplomacy']) >= self.tile_max or\n len(self.track['diplomacy']) >= self.diplomacy_max)", "def calculate_full(self):\n self.full = len(self.nodes(type=Agent)) >= self.max_size", "def is_not_full(self):\n if len(self.players) >= self.max_players:\n return False\n\n return True", "def get_remaining_pegs(self):\n return len(list(filter(lambda x: x.has_piece(), itertools.chain(*self.board.content))))", "def count_pieces(self):\n white_pieces = (self.board == 1).sum()\n black_pieces = (self.board == -1).sum()\n return white_pieces, black_pieces", "def checkMatchStatistic(self):\n numOfNan = self.matches[self.matches['w_ace'].isnull() | self.matches['w_df'].isnull() |\n self.matches['w_svpt'].isnull() | self.matches['w_1stIn'].isnull() |\n self.matches['w_1stWon'].isnull() | self.matches['w_2ndWon'].isnull() |\n self.matches['w_SvGms'].isnull() | self.matches['w_bpSaved'].isnull() |\n self.matches['w_bpFaced'].isnull()].shape[0]\n\n numOfNan += self.matches[self.matches['l_ace'].isnull() | self.matches['l_df'].isnull() |\n self.matches['l_svpt'].isnull() | self.matches['l_1stIn'].isnull() |\n self.matches['l_1stWon'].isnull() | self.matches['l_2ndWon'].isnull() |\n self.matches['l_SvGms'].isnull() | self.matches['l_bpSaved'].isnull() |\n self.matches['l_bpFaced'].isnull()].shape[0]\n\n print(\"Sanity checking match statistic: \" + str(numOfNan))\n\n self.matches.dropna(\n subset=['w_ace', 'w_df', 'w_svpt', 'w_1stIn', 'w_1stWon', 'w_2ndWon', 'w_SvGms', 'w_bpSaved', 'w_bpFaced'],\n inplace=True)\n\n self.matches.dropna(\n subset=['l_ace', 'l_df', 'l_svpt', 'l_1stIn', 'l_1stWon', 'l_2ndWon', 'l_SvGms', 'l_bpSaved', 'l_bpFaced'],\n inplace=True)", "def test_total_neighbors(st: SpaceTime):\n # This is actually only true if the space_time is large enough. WHen it is small enough one node may be two different neighors reducing the total number of neighbors.\n for n in events(st):\n assert len(n.neighbors) >= 4" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the of empty spaces from the boardstate.
def get_empty_spaces(self, boardstate): empty = 0 for space in range(11, 90): if boardstate._board[space] == 0: empty += 1 return empty
[ "def _get_empty(self):\n empty_cells = []\n row_i = 0\n column_i = 0\n\n for row in self._grid:\n column_i = 0\n for column in row:\n if column == 0:\n empty_cells.append([row_i, column_i])\n column_i += 1\n row_i += 1\n\n return empty_cells", "def spaces_in_zone_one(self):\r\n element_offset = 0\r\n free_spaces = 0\r\n for row in range(5, 10):\r\n for element in range(9 + element_offset, 10):\r\n if self.board[row][element] == 0:\r\n free_spaces += 1\r\n element_offset -= 1\r\n return free_spaces", "def number_empty_spaces(self):\n return self._number_of_empty_spaces", "def get_blank_space_index(self):\n return self.initial_puzzle.get_blank_space_index()", "def empty_cells(self):\n return self.__empty_cells", "def get_moves(self):\n return self._game_board.get_empty_squares()", "def blank_space(brd):\n\n\tfor i in range(len(brd)): \t\t#length of the board is 9\n\t\tfor j in range(len(brd)):\n\t\t\tif brd[i][j] == 0:\n\t\t\t\treturn (i,j)\n\n\treturn 0", "def list_empty_positions(self, random_pawn=False):\n if random_pawn:\n return [i for i in range(len(self._brd))\n if self[i] == EMPTY and floor(i / 10) not in (2, 9)]\n return [i for i in range(len(self._brd)) if self[i] == EMPTY]", "def get_empty_cell(self):\n return ' ' * self.width", "def get_blank(self):\n if self.chainMode == ChainMode.CHARS:\n return \" \"\n elif self.chainMode == ChainMode.WORDS:\n return [\" \"]", "def empty_corner(self):\n return [square for square in range(0, len(self.board), 2) if not self.board[square]]", "def get_free_cells(self):\n free_cells = []\n for i in range(3):\n for j in range(3):\n if self[i, j] == \" \":\n free_cells.append((i, j))\n return free_cells", "def empty_square(self):\n size = len(self.grid)\n return [(x, y) for y in range(size) for x in range(size) if self.grid[x][y] == None]", "def get_empty_squares(board):\n empty_squares = [] # List of (row, col) coordinates that are empty in the grid.\n for x, y in np.ndindex((len(board), len(board))):\n tile = board[x][y]\n if tile == 0:\n empty_squares.append([x, y])\n return empty_squares", "def num_empty(state):\n return sum([row.count(0) for row in state])", "def getTile(board):\r\n position = []\r\n for row in range(len(board)):\r\n for col in range(len(board[row])):\r\n if board[row][col] == 0: #only adds empty spaces\r\n position.append((row, col))\r\n return position", "def get_spaces(self):\n pass", "def num_empty(self) -> int:\n return np.count_nonzero(self.state == EMPTY)", "def is_space_free(self, move):\r\n return self.board[move] == ' ' # Return True of empty, else return False.\r" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that fills up a square with a color in the grid, coressponding to arr position
def fill_rect(win, arr_pos, coordinates, sq_dimensions, color): color_dict = {"maroon": (128,0,0), "navyblue": (0,0,128), "green":(0,255,0), "black":(0,0,0), "white":(255,255,255)} x, y = coordinates[str(arr_pos)] sq_width, sq_height = sq_dimensions pygame.draw.rect(win, color_dict[color], (x+1, y+1, sq_width-2, sq_height-2))
[ "def color_position(row: int, col: int):\r\n self.__grid[block.y_pos+row][block.x_pos+col] = block.color", "def fill_grid(self, board):\r\n for x in range(CELLSIZE, FULLSIZE[0] + CELLSIZE, CELLSIZE):\r\n for y in range(CELLSIZE, FULLSIZE[0] + CELLSIZE, CELLSIZE):\r\n if str(board[(y // CELLSIZE - 1) % 9][(x // CELLSIZE - 1) % 9]) == \"0\":\r\n self.fill_number(\" \", x * 2, y * 2)\r\n else:\r\n self.fill_number(str(board[(y // CELLSIZE - 1) % 9][(x // CELLSIZE - 1) % 9]), x*2, y*2)", "def flood_fill(array, start_pos, fill_value):\n for row, col in flood_select(array, start_pos):\n array[row][col] = fill_value", "def rect(self, a, b):\n for row in self.grid[:b]:\n for i in range(a):\n row[i] = 1", "def draw_grid():\n for y in range(num_rows):\n for x in range(num_cols):\n led_matrix.point(x, y, curr_gen[y][x])", "def copy_color_area(self, arr, color):\n arr_color = np.where(np.all(arr == color, axis=-1))\n location_color = list(zip(arr_color[0], arr_color[1]))\n return location_color", "def __add_reference_grid(self):\n if self.compact:\n return\n center = self.size // 2\n ring_radius = 5 if self.compact else 7\n for x in range(-center, center + 1):\n for y in range(-center, center + 1):\n # skip finder pattern\n if -ring_radius <= x <= ring_radius and -ring_radius <= y <= ring_radius:\n continue\n # set pixel\n if x % 16 == 0 or y % 16 == 0:\n val = '#' if (x + y + 1) % 2 != 0 else ' '\n self.matrix[center + y][center + x] = val", "def brushsize(array, x_index, y_index, size, rgb_tuple):\n\n for new_pos in range(size):\n new_x_index_left = x_index - new_pos\n new_x_index_right = x_index + new_pos\n new_y_index_left = y_index - new_pos\n new_y_index_right = y_index + new_pos\n positions = [\n (\n new_x_index_left,\n y_index\n ),\n (\n new_x_index_right,\n y_index\n ),\n (\n new_y_index_left,\n x_index\n ),\n (\n new_y_index_right,\n x_index\n )\n ]\n\n for entry in positions:\n x = entry[0]\n y = entry[1]\n array[x, y] = rgb_tuple\n return array", "def fill(self, value): # type: (Color) -> None\n for row in self._matrix:\n row.fill(value)", "def fill(fn, grid, ngrid, voxel, volumetric, icell, j=2):\n def p_grid(index, grid, icell):\n \"\"\"Get point in grid unit from the specified voxel index and cell.\"\"\"\n return ((voxel.T * index).T.sum(axis=0) @ icell) * grid\n\n a = np.empty(ngrid)\n for k in product(range(ngrid[0]), range(ngrid[1]), range(ngrid[2])):\n a[k] = fn(p_grid(k, grid, icell), grid, volumetric)\n return a", "def fill_color(self, x, y, color):\r\n layout = copy.deepcopy(self._layout_matrix)\r\n fill_color = FillColor(layout)\r\n layout_matrix = fill_color.draw_shape(x, y, color)\r\n self._layout_matrix = copy.deepcopy(layout_matrix)\r\n return layout_matrix", "def draw_Square():\r\n t.down()\r\n t.color(\"purple\")\r\n t.fillcolor(\"purple\")\r\n t.begin_fill()\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.left(90)\r\n t.fd(boundary() * 2)\r\n t.end_fill()\r\n t.up", "def create_grid(grid):\r\n for t in range(4):\r\n grid.append([0,0,0,0])", "def grid(self):\n # Blank the grid\n for y in range(1, 21):\n self.addstr(y, 1, \" \" * 10)\n # Draw the new grid\n for x, column in enumerate(self.game.grid):\n for y, color in enumerate(column):\n y -= self.game.grid.top_buffer\n if y >= 0:\n self.pixel(x, y, color)\n\n # Finally refresh the screen\n self.refresh()", "def create_grid(grid):\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)\r\n grid.append([0]*4)", "def fill(color):\n screen.surface.fill(color)", "def fill_cell(board, position):\n if not is_filled_at(board, position) and inside_board(board, position):\n board[position] = True", "def color_of(self, coord):\n\n return self.grid[int(coord[1] * self.unit_size), int(coord[0] * self.unit_size), :]", "def add(self):\n for i in range(len(self.current.shape)):\n for j in range(len(self.current.shape[i])):\n if self.current.shape[i][j]==1:\n self.grid[self.s[0]+i][self.s[1]+j]=self.current.number\n self.colors.append(self.current.color)\n # for row in self.grid:\n # print row" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the shape of images returned by next_batch_train
def get_images_shape(): return (self.batch_size, self.OUTPUT_SIZE, self.OUTPUT_SIZE, self.NUM_CHANNELS)
[ "def state_shape(self, batch_size):\n return [self.num_layers * self.num_dirs, batch_size, self.num_units],", "def image_shape(self):\n return self.mri_imgs[0].shape", "def state_shape(self, batch_size):\n return ([self.num_layers * self.num_dirs, batch_size, self.num_units],\n [self.num_layers * self.num_dirs, batch_size, self.num_units])", "def get_batch_size(input_nchw: torch.Tensor) -> int:\n return input_nchw.size(dim=0)", "def getNumpyArraySize(image):\n return image.shape", "def _train_batch_sizes(self):\n for device in device_lib.list_local_devices():\n # TODO(b/141475121): We need some way to check which batch sizes would\n # work using a public API.\n if tf.DeviceSpec.from_string(device.name).device_type == 'GPU':\n # Avoid OOM errors with larger batch sizes, which seem to cause errors\n # later on even if caught.\n #\n # TODO(allenl): Base this on device memory; memory limit information\n # during the test seems to exclude the amount TensorFlow has allocated,\n # which isn't useful.\n if 'K20' in device.physical_device_desc:\n return (16,)\n # Quardro P1000.\n if 'P1000' in device.physical_device_desc:\n return (16,)\n if 'P100' in device.physical_device_desc:\n return (16, 32, 64)\n\n if tf.DeviceSpec.from_string(device.name).device_type == 'TPU':\n return (32,)\n return (16, 32)", "def _get_batch_size(batch: Union[MappedTriples, Tuple[MappedTriples, torch.FloatTensor]]) -> int:\n raise NotImplementedError", "def get_image_shape(self) -> Tuple[int, int]:", "def compute_output_shape(observation_space, layers):\n # [None] adds a batch dimension to the random observation\n torch_obs = torch.tensor(observation_space.sample()[None])\n with torch.no_grad():\n sample = preprocess_obs(torch_obs, observation_space, normalize_images=True)\n for layer in layers:\n # forward prop to compute the right size\n sample = layer(sample)\n\n # make sure batch axis still matches\n assert sample.shape[0] == torch_obs.shape[0]\n\n # return everything else\n return sample.shape[1:]", "def get_batch_shape(self, x):\n return self.get_shape(x, sample=False, batch=True, event=False)", "def get_batch_size(self):\n pass", "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n output_shape = None\n for p in self.processings:\n new_output_shape = p.infer_image_input_shape()\n if new_output_shape is not None:\n output_shape = new_output_shape\n\n return output_shape", "def get_batch_size(batch: Any) -> int:\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')", "def infer_image_input_shape(self) -> Optional[Tuple[int, int]]:\n return self.output_shape", "def shape(self):\n image, _ = self.datasets[0][0]\n return image.unsqueeze(dim=0)", "def get_dimensions(batch: Sequence) -> List[int]:\n return list(map(max, _get_all_dimensions(batch)))", "def get_input_dim(self):\n if self.input_dim is None:\n if self.multiple_file:\n with open(self.paths[0], 'rb') as file:\n x = load(file)\n x = np.array(x)\n self.input_dim = len(x[0]) - 1\n return len(x[0]) - 1\n else:\n self.input_dim = len(self.test_paths[0])- 1\n return self.input_dim\n else:\n return self.input_dim", "def shape(self):\n return self._input.shape", "def batch_size(self):\n return len(self._edges[0])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the next batch for the test data with the requested batch_size or the current default. This function takes care of all the data augmentation techniques.
def next_batch_test(self, batch_size=None): # set the batch_size and output_size to class default if batch_size is None: batch_size = self.test_batch_size output_size = self.OUTPUT_SIZE input_size = self.INPUT_SIZE # create an array of indicies to retrieve idxs = self.test_idxs[self.test_cursor:self.test_cursor+batch_size] if self.test_cursor+batch_size >= self.test_idxs.size: idxs = np.append(idxs, self.test_idxs[:(self.test_cursor+batch_size - self.test_idxs.size)]) # retrieve the images and labels & apply data augmentation labels = np.tile(self.dataset_labels[idxs, :].reshape(-1), 10) images_raw = self.dataset_images[idxs, :, :, :].swapaxes(1, 3) images = np.concatenate((images_raw[:, 0:output_size, 0:output_size, :], images_raw[:, input_size-output_size:input_size+1, 0:output_size, :], images_raw[:, 0:output_size, input_size-output_size:input_size+1, :], images_raw[:, input_size-output_size:input_size+1, input_size-output_size:input_size+1, :], images_raw[:, (input_size-output_size+1)/2:input_size - (input_size - output_size + 1) / 2 + 1, (input_size-output_size+1)/2:input_size - (input_size - output_size + 1) / 2 + 1, :]), axis=0) images = np.concatenate((images, np.fliplr(images)), axis=0) # move the cursors self.test_cursor = (self.test_cursor + batch_size) % (self.NUM_TEST_ITEMS_PER_CLASS * self.NUM_CLASSES) return (255.0 - images, labels - 1)
[ "def get_batch(batch_size=None, test=False):\n examples = TEST_EXAMPLES if test else TRAIN_EXAMPLES\n batch_examples = random.sample(\n examples, batch_size) if batch_size else examples\n return batch_examples", "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(self._num_examples)\n np.random.shuffle(perm)\n self._data = self._data[perm]\n self._labels = self._labels[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._data[start:end], self._labels[start:end]", "def next_batch(self, batch_size):\n\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n # Finished epoch\n self._epochs_completed += 1\n # Shuffle the data\n perm = np.arange(int(self._num_examples))\n np.random.shuffle(perm)\n self._data = self._data[perm]\n self._label = self._label[perm]\n # Start next epoch\n start = 0\n self._index_in_epoch = batch_size\n assert batch_size <= self._num_examples\n end = self._index_in_epoch\n return self._data[start:end], self._label[start:end]", "def get_batch(self, batch_size):\n\n if batch_size < len(self.train_set) :\n img_indices = random.sample(range(len(self.train_set)),batch_size)\n else:\n img_indices = range(len(self.train_set))\n\n image_list = [self.train_set[i] for i in img_indices]\n return self.get_image_set(image_list,self.train_data_dir,self.train_truth_dir, self.train_postfix_len)", "def get_test_batch(batch_size=12):\n \n global original_size\n image_paths = glob(os.path.join(data_dir, 'images', '*.jpg'))\n image = imageio.imread(image_paths[0])\n original_size = (image.shape[1], image.shape[0])\n \n if shuffle:\n random.shuffle(image_paths)\n for i in range(0, len(image_paths), batch_size):\n images = []\n labels = []\n names = []\n for path in image_paths[i:i+batch_size]:\n image_name = os.path.basename(path)\n names.append(image_name)\n label_name = image_name[:-4] + '_train_id.png'\n label_path = os.path.join(data_dir, 'labels', label_name)\n label = imageio.imread(label_path)\n image = imageio.imread(path)\n if relabel:\n relabel_vehicles(label)\n relabel_pedestrian(label)\n relabel_background(label)\n if new_labels:\n new_label_20(label)\n new_label_30(label)\n if trim:\n image = image[trim_ind[0]:trim_ind[1]]\n new_label = np.zeros((original_size[1], original_size[0]), dtype=np.uint8)\n new_label[trim_ind[0]:trim_ind[1]] = label[trim_ind[0]:trim_ind[1]]\n label = new_label\n if reshape:\n image = cv2.resize(image, new_shape)\n if preprocess:\n image = image_preprocessing(image, denoise=denoise)\n label = one_hot_label(label, values)\n images.append(image)\n labels.append(label)\n\n images = np.array(images, dtype=np.uint8)\n labels = np.array(labels, dtype=np.uint8)\n yield images, labels, names", "def next_batch(self, batch_size: int) -> (np.array, np.array):\n next_index = self._last_batch_index + batch_size\n if next_index > self.N:\n batch_size = self.N - self._last_batch_index\n next_index = self.N\n\n label_slice = self._labels[self._last_batch_index:next_index]\n\n boxes_slice = np.zeros([batch_size,\n self._dataconfig.boxshape[0],\n self._dataconfig.boxshape[1],\n self._dataconfig.boxshape[2],\n self._dataconfig.num_props], dtype=self._dataconfig.dtype)\n\n # logger.debug(\"boxqueue size before batch retrieval: %d\", self._box_future_queue.qsize())\n\n for i in range(batch_size):\n # get future, retrieve result\n fut = self._box_future_queue.get()\n\n # store output data\n boxes_slice[i, :, :, :] = fut.result()\n\n # signal that we are done with this item\n self._box_future_queue.task_done()\n\n self._last_batch_index = next_index\n\n return label_slice, boxes_slice", "def next_batch(self, batch_size):\r\n # Get next batch of image (path) and labels\r\n paths = self.generate_empty_lst()\r\n for i in range(self.num_ch):\r\n paths[i] = self.images[i][self.pointer:self.pointer + batch_size]\r\n\r\n labels = self.labels[self.pointer:self.pointer + batch_size]\r\n\r\n # update pointer\r\n self.pointer += batch_size\r\n\r\n # Read images\r\n images = np.ndarray([self.num_ch, batch_size, self.scale_size[0], self.scale_size[1], 3])\r\n\r\n\r\n for i in range(self.num_ch):\r\n # img = cv2.imread(paths[i])\r\n for j in range(len(paths[0])):\r\n\r\n img = Image.open(paths[i][j])\r\n\r\n if self.histogram_eq:\r\n img = ImageOps.equalize(img)\r\n\r\n if self.horizontal_flip and np.random.random() < 0.5:\r\n img = ImageOps.mirror(img)\r\n\r\n img = img.convert(\"RGB\")\r\n img = img.resize((self.scale_size[0], self.scale_size[1]), resample=Image.LANCZOS)\r\n img = np.ndarray.astype(np.array(img), np.float32)\r\n img -= self.mean\r\n images[i][j] = img\r\n\r\n\r\n # Expand labels to one hot encoding\r\n one_hot_labels = np.zeros((batch_size, self.nb_classes))\r\n\r\n for i in range(len(labels)):\r\n one_hot_labels[i][labels[i]-1] = 1\r\n\r\n # return array of images and labels\r\n return images, one_hot_labels", "def next(self):\n batch = self.dataset.skip(self.batch_index).take(1)\n self._batch_index += 1\n return batch", "def get_origin_batch(self, batch_size, dtype=None):\n raise NotImplementedError()", "def next_batch(self):\n # If the batch queue is empty, print a warning\n if self.batch_queue.qsize() == 0:\n tf.logging.warning(\"Bucket queue size: %i, Input queue size: %i\", \\\n self.batch_queue.qsize(), self.example_queue.qsize())\n if self.single_pass and self.finished_reading:\n tf.logging.info(\"Finished reading dataset in single_pass mode.\")\n return None\n # get the next Batch\n batch = self.batch_queue.get()\n return batch", "def next_batch(train_images_dir, train_annotations_dir, image_filenames_list, random_crop=False,\n crop_width=246, crop_height=112, batch_size=5):\n batch_images = None\n batch_labels = None\n\n for image_counter, image_name in enumerate(image_filenames_list, 1):\n\n image_np, annotation_np = read_image_and_annotation(train_images_dir, train_annotations_dir, image_name.strip())\n\n if random_crop:\n image_np, annotation_np = random_crop(image_np, annotation_np, crop_width, crop_height)\n else:\n # if the random_crop option is False, then batch size is always 1 because\n # the dataset might have different image sizes\n batch_size = 1\n\n image_np = np.expand_dims(image_np, axis=0)\n annotation_np = np.expand_dims(annotation_np, axis=0)\n\n if batch_images is None:\n batch_images = image_np\n batch_labels = annotation_np\n else:\n batch_images = np.concatenate((batch_images, image_np), axis=0)\n batch_labels = np.concatenate((batch_labels, annotation_np), axis=0)\n\n if batch_images.shape[0] == batch_size:\n yield batch_images, batch_labels\n batch_images = None\n batch_labels = None", "def get_batch(batch_size, data):\n s_index = 0\n e_index = batch_size\n if isinstance(data, np.ndarray):\n while e_index < len(data):\n batch = data[s_index: e_index]\n temp = e_index\n e_index = e_index + batch_size\n s_index = temp\n yield batch\n elif (isinstance(data, tuple) or isinstance(data, list)) \\\n and isinstance(data[0], np.ndarray):\n while e_index < len(data[0]):\n batch = []\n for one in data:\n batch.append(one[s_index: e_index])\n temp = e_index\n e_index = e_index + batch_size\n s_index = temp\n yield batch\n else:\n print(\"check data type !!!\")\n sys.exit(1)", "def train_batches(self, batch_size):\n if not self.batch_size == batch_size:\n self.batches = generator(list(self.train), batch_size)\n return self.batches", "def get_random_batch(self, batch_size, verbose=False):\n global currently_loaded\n if currently_loaded != self:\n golois.load(self.path, verbose)\n currently_loaded = self\n\n input = np.empty((batch_size, 19, 19, 8), dtype=np.float32)\n policy_output = np.empty((batch_size, 361), dtype=np.float32)\n value_output = np.empty((batch_size,), dtype=np.float32)\n end = np.empty((batch_size, 19, 19, 2), dtype=np.float32)\n\n golois.get_random_batch(input, policy_output, value_output, end)\n\n return input, policy_output, value_output", "def next_batch(data_iter, data_loader, curr_epoch, device):\n try:\n data = data_iter.next()\n if len(data) == 2:\n inputs, targets = data\n perturbed_inputs = None\n elif len(data) == 3:\n inputs, targets, perturbed_inputs = data\n else:\n raise Exception(\"Data type not matched... Use STN dataset.\")\n\n except StopIteration:\n # Epoch finished.\n curr_epoch += 1\n data_iter = iter(data_loader)\n data = data_iter.next()\n if len(data) == 2:\n inputs, targets = data\n perturbed_inputs = None\n elif len(data) == 3:\n inputs, targets, perturbed_inputs = data\n else:\n raise Exception(\"Data type not matched.\")\n\n inputs, targets = inputs.to(device), targets.to(device)\n perturbed_inputs = perturbed_inputs if perturbed_inputs is None else perturbed_inputs.to(device)\n return inputs, perturbed_inputs, targets, data_iter, curr_epoch", "def sample(self, batch_size):\n\n batch = []\n\n # if getting a full batch from the latest retrieve point would \n # overflow the buffer, get the most recent elements and reset the \n # retrieve point\n if self.count - self.retrieve_point < batch_size:\n batch = self.buffer[self.count - batch_size:self.count]\n self.retrieve_point = 0\n else:\n if self.retrieve_point + batch_size > self.count:\n # SHUFFLE BUFFER\n self.retrieve_point = 0\n random.shuffle(self.buffer)\n batch = self.buffer[self.retrieve_point:self.retrieve_point + batch_size]\n self.retrieve_point += batch_size\n\n return batch", "def generate_batch_from_buffer(self, batch_size: int) -> BatchedProcessedInputs:\n pass", "def subsample_batchify_data(dataset, batch_size=None, q=None, with_replacement=False, rng_suite=strong_rng, return_mask=False):\n if batch_size is None and q is None:\n raise ValueError(\"Either batch_size or batch ratio q must be given\")\n if batch_size is not None and q is not None:\n raise ValueError(\"Only one of batch_size and batch ratio q must be given\")\n if not dataset:\n raise ValueError(\"The data set must not be empty\")\n\n num_records = example_count(dataset[0])\n for arr in dataset:\n if num_records != example_count(arr):\n raise ValueError(\"All arrays constituting the data set must have the same number of records\")\n\n if batch_size is None:\n batch_size = q_to_batch_size(q, num_records)\n\n @jax.jit\n def init(rng_key: rng_suite.PRNGState):\n \"\"\" Initializes the batchifier for a new epoch.\n\n :param rng_key: The base PRNG key the batchifier will use for randomness.\n :return: tuple consisting of: number of batches in the epoch,\n initialized state of the batchifier for the epoch\n \"\"\"\n return num_records // batch_size, rng_key\n\n @jax.jit\n def get_batch_with_replacement(i, batchifier_state):\n \"\"\" Fetches the next batch for the current epoch.\n\n :param i: The number of the batch in the epoch.\n :param batchifier_state: The initialized state returned by init.\n :return: batch or, if `return_mask` was `True`, tuple (batch, mask), where\n - batch is a tuple of arrays, each of length `batch_size` and containing\n the sampled elements from the arrays in `data` corresponding to the batch,\n - mask is a Boolean array of length `batch_size` indicating which elements in\n batch arrays correspond to batch elements (`True`) and which constitute padding (`False`).\n \"\"\"\n rng_key = batchifier_state\n batch_rng_key = rng_suite.fold_in(rng_key, i)\n ret_idx = rng_suite.randint(batch_rng_key, (batch_size,), 0, num_records)\n\n batch = tuple(jnp.take(a, ret_idx, axis=0) for a in dataset)\n if return_mask:\n mask = jnp.ones(batch_size, dtype=bool)\n return batch, mask\n return batch\n \n\n @jax.jit\n def get_batch_without_replacement(i, batchifier_state):\n \"\"\" Fetches the next batch for the current epoch.\n\n :param i: The number of the batch in the epoch.\n :param batchifier_state: The initialized state returned by init.\n :return: batch or, if `return_mask` was `True`, tuple (batch, mask), where\n - batch is a tuple of arrays, each of length `batch_size` and containing\n the sampled elements from the arrays in `data` corresponding to the batch,\n - mask is a Boolean array of length `batch_size` indicating which elements in\n batch arrays correspond to batch elements (`True`) and which constitute padding (`False`).\n \"\"\"\n rng_key = batchifier_state\n batch_rng_key = rng_suite.fold_in(rng_key, i)\n ret_idx = sample_from_array(batch_rng_key, jnp.arange(num_records), batch_size, 0, rng_suite=rng_suite)\n\n batch = tuple(jnp.take(a, ret_idx, axis=0) for a in dataset)\n if return_mask:\n mask = jnp.ones(batch_size, dtype=bool)\n return batch, mask\n return batch\n\n return init, get_batch_with_replacement if with_replacement else get_batch_without_replacement", "def test_total_example_less_batch_size():\n batch_size = num_examples * 2\n generator = batch_generator(batch_size)\n batch1_x, batch1_y = generator.next()\n batch2_x, batch2_y = generator.next()\n assert num_examples == batch1_x.shape[0]\n np.testing.assert_array_equal(batch1_x, batch2_x)\n np.testing.assert_array_equal(batch1_y, batch2_y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the controller that returns the torque for the desired state.
def qp_controller(current_state, desired_state, dt, dim=2): # torque PD controller values wheel_kp = 50.0 wheel_kd = 10.0 max_torque = 20.0 # cost on obtaining next state and velocity kp = 0.0 kd = 1.0 # half state length hl = len(current_state) / 2 mp = MathematicalProgram() x = mp.NewContinuousVariables(len(current_state), "x") u = mp.NewContinuousVariables(1, "u") force = mp.NewContinuousVariables(8, "force") # set the initial state set_initial_state(mp, x, current_state, dim) # enforce the dynamics with linearized theta state = x + get_nd_dynamics(x, u, force, dim, current_state[dim])*dt # stay on floor # add_floor_constraint(mp, state, dim) # for corner to ground # fix_corner_to_ground(mp, state, 0, -0.5, dim) # don't pull on ground dont_pull_on_ground(mp, force, dim) # bounded to not leave the ground # stay_on_ground(mp, state, dim) # only force when on ground complimentarity_constraint(mp, state, force, dim) # linearize theta to set this cost add_corner_cost(mp, state, 0, -0.5, dim, current_state[dim]) # unpack the states x_s = state[0] y = state[1] theta = state[dim] alpha = state[hl-1] xdot = state[0+hl] ydot = state[1+hl] theta_dot = state[dim+hl] alpha_dot = state[-1] # unpack the desired states x_des = desired_state[0] y_des = desired_state[1] theta_des = desired_state[dim] alpha_des = desired_state[hl-1] xdot_des = desired_state[0+hl] ydot_des = desired_state[1+hl] theta_dot_des = desired_state[dim+hl] alpha_dot_des = desired_state[-1] # current_pos = np.asarray([x_s,y,theta,alpha]) # des_pos = np.asarray([x_des,y_des,theta_des,alpha_des]) # pos_diff = current_pos - des_pos current_pos = np.asarray([x_s,y,theta,0]) des_pos = np.asarray([x_des,y_des,theta_des,0]) pos_diff = current_pos - des_pos # current_vel = np.asarray([xdot,ydot,theta_dot,alpha_dot]) # des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,alpha_dot_des]) # vel_diff = current_vel - des_vel current_vel = np.asarray([xdot,ydot,theta_dot,0]) des_vel = np.asarray([xdot_des,ydot_des,theta_dot_des,0]) vel_diff = current_vel - des_vel pos = pos_diff.dot(pos_diff) vel = vel_diff.dot(vel_diff) mp.AddQuadraticCost(kp*pos) mp.AddQuadraticCost(kd*vel) # torque PD controller input_torque = wheel_kp*(current_state[dim] - np.pi/4.0) + wheel_kd*current_state[dim+hl] input_torque = np.clip(input_torque, -max_torque, max_torque) mp.AddConstraint(u[0] == input_torque) sol = mp.Solve() # print(sol) my_torque = mp.GetSolution(u) my_force = mp.GetSolution(force) my_start = mp.GetSolution(x) return my_start, my_torque, my_force
[ "def torque(b):\n return length(b) * total_weight(contents(b))", "def calc_control(self, pose_goal):\n cmd_vel = Twist()\n\n # TODO: fill in your favorite controller\n\n return cmd_vel", "def run_controller(self):\n # Check if the goal has been set.\n if self.goal_pos is None or self.goal_ori is None:\n raise ValueError(\"Set goal first.\")\n\n # Default desired velocities and accelerations are zero.\n desired_vel_pos = np.asarray([0.0, 0.0, 0.0])\n desired_acc_pos = np.asarray([0.0, 0.0, 0.0])\n desired_vel_ori = np.asarray([0.0, 0.0, 0.0])\n desired_acc_ori = np.asarray([0.0, 0.0, 0.0])\n\n # Get interpolated goal for position\n if self.interpolator_pos is not None:\n desired_pos = self.interpolator_pos.get_interpolated_goal()\n else:\n desired_pos = np.array(self.goal_pos)\n\n # Get interpolated goal for orientation.\n if self.interpolator_ori is not None:\n\n desired_ori = T.quat2mat(self.interpolator_ori.get_interpolated_goal())\n ori_error = C.orientation_error(desired_ori, self.model.ee_ori_mat)\n else:\n desired_ori = np.array(self.goal_ori)\n\n ori_error = C.orientation_error(desired_ori, self.model.ee_ori_mat)\n\n # Calculate desired force, torque at ee using control law and error.\n position_error = desired_pos - self.model.ee_pos\n vel_pos_error = desired_vel_pos - self.model.ee_pos_vel\n desired_force = (\n np.multiply(np.array(position_error),\n np.array(self.kp[0:3])) + np.multiply(vel_pos_error, self.kv[0:3])) + desired_acc_pos\n\n vel_ori_error = desired_vel_ori - self.model.ee_ori_vel\n desired_torque = (\n np.multiply(np.array(ori_error),\n np.array(self.kp[3:])) + np.multiply(vel_ori_error, self.kv[3:])) + desired_acc_ori\n\n # Calculate Operational Space mass matrix and nullspace.\n lambda_full, lambda_pos, lambda_ori, nullspace_matrix = \\\n C.opspace_matrices(self.model.mass_matrix,\n self.model.J_full,\n self.model.J_pos,\n self.model.J_ori)\n\n self.nullspace_matrix = nullspace_matrix\n\n # If uncoupling position and orientation use separated lambdas.\n if self.uncoupling:\n decoupled_force = np.dot(lambda_pos, desired_force)\n decoupled_torque = np.dot(lambda_ori, desired_torque)\n decoupled_wrench = np.concatenate([decoupled_force, decoupled_torque])\n else:\n desired_wrench = np.concatenate([desired_force, desired_torque])\n decoupled_wrench = np.dot(lambda_full, desired_wrench)\n\n # Project torques that acheive goal into task space.\n self.torques = np.dot(self.model.J_full.T, decoupled_wrench) + self.model.torque_compensation\n\n return self.torques", "def wheels_state(self):\n state = p.getJointStates(bodyUniqueId=self._robot_id, jointIndices=[0, 1],\n physicsClientId=self._physics_client_id)\n velocities = (state[0][1], state[1][1])\n applied_torques = (state[0][3], state[1][3])\n return velocities, applied_torques", "def controller_lee(qd, t, model_drone):\n\n k_pi = model_drone.k_pi\n k_di = model_drone.k_di\n\n k_p = model_drone.k_p\n k_d = model_drone.k_d\n\n u = np.zeros(4)\n\n # Compute error in world frame where error = current - desired\n e_pos = (qd.pos - qd.pos_des)\n e_vel = (qd.vel - qd.vel_des)\n\n r_acc_des = qd.acc_des - k_di * e_vel - k_pi * e_pos\n r_acc_total = r_acc_des + np.array([0, 0, 1]) * model_drone.grav\n\n r_acc_mag = np.sqrt(np.sum(r_acc_total**2))\n r_acc_xymag = np.sqrt(np.sum(r_acc_total[:2]**2))\n\n # If drone is falling, emergency recover by limiting XY movement and raising Z\n if e_pos[-1] < -5:\n r_acc_total[:2] *= model_drone.maxXYaccel / r_acc_xymag\n\n # Limit max tilt angle\n tiltangle = np.arccos(r_acc_total[2] / r_acc_mag)\n scale_acc = 1\n if tiltangle > model_drone.maxangle:\n xy_mag_max = r_acc_total[2] * np.tan(model_drone.maxangle)\n scale_acc = xy_mag_max / r_acc_xymag\n r_acc_total[:2] = r_acc_total[:2] * scale_acc\n\n # Compute desired rotations\n a_psi = np.array([np.cos(qd.yaw_des), np.sin(qd.yaw_des), 0])\n b3_des = np.array(r_acc_total)\n b3_des /= np.sqrt(np.sum(b3_des**2))\n b2_des = np.cross(b3_des, a_psi)\n b2_des /= np.sqrt(np.sum(b2_des**2))\n b1_des = np.cross(b2_des, b3_des)\n b1_des /= np.sqrt(np.sum(b1_des**2))\n\n f_dot = model_drone.mass * scale_acc * k_pi * (-e_vel) # + qd.jrk_des\n f_mag = model_drone.mass * r_acc_mag\n b3_dot = np.cross(np.cross(b3_des, f_dot / f_mag), b3_des)\n a_psi_dot = np.array([-np.cos(qd.yaw_des) * qd.yawdot_des, -np.sin(qd.yaw_des) * qd.yawdot_des, 0])\n b1_dot = np.cross(np.cross(b1_des, (np.cross(a_psi_dot, b3_des) + np.cross(a_psi, b3_dot)) / np.sqrt(np.sum(np.cross(a_psi, b3_des)**2))), b1_des)\n b2_dot = np.cross(b3_dot, b1_des) + np.cross(b3_des, b1_dot)\n\n # Form rotation matrices\n R_des = np.vstack((b1_des, b2_des, b3_des)).T\n R_desdot = np.vstack((b1_dot, b2_dot, b3_dot)).T\n\n omega_hat = R_des.T @ R_desdot\n omega = np.array([omega_hat[2, 1], omega_hat[0, 2], omega_hat[1, 0]])\n\n # Calculate desired Euler angles\n euler_des = rot_to_eulerzxy(R_des)\n\n R = eulerzxy_to_rot(qd.euler)\n\n e_euler = 0.5 * vee_map(R_des.T @ R - R.T @ R_des)\n\n u[0] = model_drone.mass * np.sum(R[:, 2] * r_acc_total)\n u[1:] = model_drone.I @ (- k_p * e_euler - k_d * qd.omega)\n\n # Thrust\n F = model_drone.mass * np.sum(R[:, 2] * r_acc_total)\n\n # print('F = {0:2f}'.format(F))\n\n # Moment\n M = u[1:] # note: params.I has the moment of inertia\n\n # Output trpy and drpy as in hardware\n trpy = np.array([F, euler_des[0], euler_des[1], euler_des[2]])\n drpy = np.array([0, 0, 0, 0])\n\n # print(\"F: {0} XY: {1}\".format(F, r_acc_xymag))\n\n return F, M, trpy, drpy", "def servoTorque(self):\r\n if robot.alive:\r\n if self.motorButton.cget('text') == 'ON' and self.id_entry.get():\r\n self.motorButton.config(relief=\"sunken\", bg=\"tomato\",\r\n text=\"OFF\")\r\n robot.torqueServo(int(self.id_entry.get()), 1)\r\n\r\n elif self.motorButton.cget('text') == 'OFF':\r\n self.motorButton.config(relief=\"raised\", bg=\"green3\",\r\n text=\"ON\")\r\n robot.torqueServo(int(self.id_entry.get()), 0)\r\n else:\r\n messagebox.showerror(\"Comm Error\",\r\n \"Comm Port is not Connected..!!\")", "def controller_linear(qd, t, model_drone):\n \n k_pi = model_drone.k_pi\n k_di = model_drone.k_di\n \n k_p = model_drone.k_p\n k_d = model_drone.k_d\n \n u = np.zeros(4)\n\n # Compute error in world frame where error = current - desired\n e_pos = (qd.pos - qd.pos_des)\n e_vel = (qd.vel - qd.vel_des)\n\n r_acc_des = qd.acc_des - k_di * e_vel - k_pi * e_pos\n r_acc_total = r_acc_des + np.array([0, 0, 1]) * model_drone.grav\n\n # Limit max tilt angle\n tiltangle = np.arccos(r_acc_total[2] / np.sqrt(np.sum(r_acc_total**2)))\n if tiltangle > model_drone.maxangle:\n xy_mag = np.sqrt(np.sum(r_acc_total[:2]**2))\n xy_mag_max = r_acc_total[2] * np.tan(model_drone.maxangle)\n r_acc_total[:2] = r_acc_total[:2] / xy_mag * xy_mag_max\n\n # Compute desired rotations and Euler error\n psi_des = qd.yaw_des\n theta_des = (np.cos(psi_des) * r_acc_total[0] + np.sin(psi_des) * r_acc_total[1]) / model_drone.grav\n phi_des = (-np.cos(psi_des) * r_acc_total[1] + np.sin(psi_des) * r_acc_total[0]) / model_drone.grav\n euler_des = np.array([phi_des, theta_des, psi_des])\n \n e_euler = qd.euler - euler_des\n\n # Assume that drone is around hover point\n u[0] = r_acc_total[2] * model_drone.mass\n u[1:] = model_drone.I @ (- k_p * e_euler - k_d * qd.omega)\n\n # Thrust\n F = u[0]\n\n # print('F = {0:2f}'.format(F))\n \n # Moment\n M = u[1:] # note: params.I has the moment of inertia\n \n # Output trpy and drpy as in hardware\n trpy = np.array([F, phi_des, theta_des, psi_des])\n drpy = np.array([0, 0, 0, 0])\n \n return F, M, trpy, drpy", "def get_robot_state(self):", "def act(self, state, eps):\n dice = random.random()\n if (dice < eps):\n # Random act\n action = np.random.choice(self.action_space)\n else:\n # Follow policy\n #print(state.shape)\n if self.conv_mode:\n action = self.policy_net.predict(state[np.newaxis, :,:,:])\n else:\n action = self.policy_net.predict(state[np.newaxis, :])\n action = np.argmax(action, axis=1)[0]\n #print(\"The action: {}\".format(action))\n \n return action", "def play_cartpole_q_learning():\n Q = defaultdict(float)\n gamma = 0.99 # Discounting factor\n alpha = 0.5 # Soft update param\n\n env = gym.make('CartPole-v0')\n actions = env.action_space\n\n def update_Q(s, r, a, s_next, done):\n \"\"\"Updates the current q value.\n\n This learns the action value (Q-value) and estimates the next action\n using the Bellman equation, estimating the next action by adopting the\n best Q value instead of following the current policy.\n\n TODO: Document parameters.\n \"\"\"\n max_q_next = max([Q[s_next, action] for action in actions])\n # Do not include the next state's value if currently at the terminal state.\n Q[s, a] += alpha * (r + gamma * max_q_next * (1.0 - done) - Q[s, a])", "def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)", "def _get_recent_torque(self, current_time):\n\n current_reading = self._get_readings(current_time)\n print(\"Current torques reading\", current_reading)\n self.torques = np.vstack((self.torques, current_reading))\n\n return self.total_torque", "def _network_2_robot_action(self, state):\n with torch.no_grad():\n if self.is_spike:\n state = self._state_2_state_spikes(state)\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state, 1).to('cpu')\n elif self.is_scale:\n state = self._state_2_scale_state(state)\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state).to('cpu')\n else:\n state = np.array(state).reshape((1, -1))\n if self.is_record:\n self.record_data.append(state)\n state = torch.Tensor(state).to(self.device)\n action = self.actor_net(state).to('cpu')\n action = action.numpy().squeeze()\n noise = np.random.randn(2) * self.action_rand\n action = noise + (1 - self.action_rand) * action\n action = np.clip(action, [0., 0.], [1., 1.])\n action = wheeled_network_2_robot_action_decoder(\n action, self.max_spd, self.min_spd\n )\n return action", "def get(self, timeout) -> TPModel:", "def simulate_controllers(client, startCTE, startHE, startDTP, \n taxi_controller=TaxiController(0.015, 0.008), \n takeoff_controller=TakeoffController(0.07, 0.035, 0.01, 0.01),\n climb_controller=ClimbController(0.001, 0.01),\n simSpeed=1.0):\n # Reset to the desired starting position\n client.sendDREF(\"sim/time/sim_speed\", simSpeed)\n xpc3_helper.reset(client, cteInit = startCTE, heInit = startHE, dtpInit = startDTP)\n xpc3_helper.sendBrake(client, 0)\n\n time.sleep(2) # 5 seconds to get terminal window out of the way\n client.pauseSim(False)\n\n time.sleep(0.001)\n init_elevation = client.getDREF(\"sim/flightmodel/position/elevation\")[0]\n dtp = startDTP\n startTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n endTime = startTime\n \n # Lets start witht the taxi controller\n controller = taxi_controller\n\n print(\"Taxiing!\")\n\n while True:\n \n # Get relevant state variables\n speed = xpc3_helper.getSpeed(client)\n cte, dtp, he = xpc3_helper.getHomeState(client)\n lat, lon, el = coords(client)\n psi, theta, phi = angles(client)\n roll_speed, pitch_speed, yaw_speed = getSpins(client)\n vert_speed = getVertSpeed(client)\n \n # Store them in a state dictionary\n state = {\"speed\" : speed, \"cte\" : cte, \"he\" : he,\n \"lat\" : lat, \"lon\" : lon, \"el\" : el,\n \"psi\" : psi, \"theta\" : theta, \"phi\" : phi,\n \"roll_speed\" : roll_speed, \"pitch_speed\" : pitch_speed, \"yaw_speed\" : yaw_speed,\n \"vert_speed\" : vert_speed}\n \n \n # print(state)\n \n # Set the controller here if you need to\n \n # If we are taxiing and we reach the center of the runway, lets take off!\n if controller == taxi_controller and abs(state[\"he\"]) < 1 and abs(state[\"cte\"]) < 1:\n print(\"Taking off!\")\n controller = takeoff_controller\n \n if controller == takeoff_controller and abs(state[\"speed\"]) > 30:\n print(\"Climbing!\")\n controller = climb_controller\n \n \n\n # Get and send the controls from our controller\n ctrl = controller.control(client, state)\n send_controls(client, ctrl)\n \n # Wait for next timestep\n while endTime - startTime < 1:\n time.sleep(0.01)\n endTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n \n\n # Set things for next round\n time.sleep(0.01)\n startTime = client.getDREF(\"sim/time/zulu_time_sec\")[0]\n endTime = startTime\n \n time.sleep(0.001)\n\n client.pauseSim(True)", "def R(self,state):\n return self.reward[state]", "def _get_recent_torque(self, current_time):\n current_reading = self._get_readings(current_time)\n self.torques = np.vstack((self.torques, current_reading))\n return self.total_torque", "def reach_target_consistantly(controllers: list[PenaltyController]) -> cas.MX:\n\n nx = controllers[-1].states[\"q\"].cx_start.shape[0]\n\n q_sym = cas.MX.sym(\"q_sym\", nx)\n qdot_sym = cas.MX.sym(\"qdot_sym\", nx)\n if \"cholesky_cov\" in controllers[0].stochastic_variables.keys():\n cov_sym = cas.MX.sym(\"cov\", controllers[0].stochastic_variables[\"cholesky_cov\"].cx_start.shape[0])\n cov_sym_dict = {\"cholesky_cov\": cov_sym}\n cov_sym_dict[\"cholesky_cov\"].cx_start = cov_sym\n l_cov_matrix = (\n controllers[0]\n .stochastic_variables[\"cholesky_cov\"]\n .reshape_to_cholesky_matrix(\n cov_sym_dict,\n controllers[0].states.cx_start.shape[0],\n Node.START,\n \"cholesky_cov\",\n )\n )\n cov_matrix = l_cov_matrix @ l_cov_matrix.T\n else:\n cov_sym = cas.MX.sym(\"cov\", controllers[0].stochastic_variables[\"cov\"].cx_start.shape[0])\n cov_sym_dict = {\"cov\": cov_sym}\n cov_sym_dict[\"cov\"].cx_start = cov_sym\n cov_matrix = (\n controllers[0]\n .stochastic_variables[\"cov\"]\n .reshape_to_matrix(\n cov_sym_dict,\n controllers[0].states.cx_start.shape[0],\n controllers[0].states.cx_start.shape[0],\n Node.START,\n \"cov\",\n )\n )\n\n hand_pos = controllers[0].model.markers(q_sym)[2][:2]\n hand_vel = controllers[0].model.marker_velocities(q_sym, qdot_sym)[2][:2]\n\n jac_marker_q = cas.jacobian(hand_pos, q_sym)\n jac_marker_qdot = cas.jacobian(hand_vel, cas.vertcat(q_sym, qdot_sym))\n\n cov_matrix_q = cov_matrix[:2, :2]\n cov_matrix_qdot = cov_matrix[:4, :4]\n\n pos_constraint = jac_marker_q @ cov_matrix_q @ jac_marker_q.T\n vel_constraint = jac_marker_qdot @ cov_matrix_qdot @ jac_marker_qdot.T\n\n out = cas.vertcat(pos_constraint[0, 0], pos_constraint[1, 1], vel_constraint[0, 0], vel_constraint[1, 1])\n\n fun = cas.Function(\"reach_target_consistantly\", [q_sym, qdot_sym, cov_sym], [out])\n val = fun(\n controllers[-1].states[\"q\"].cx_start,\n controllers[-1].states[\"qdot\"].cx_start,\n (\n controllers[-1].stochastic_variables[\"cholesky_cov\"].cx_start\n if \"cholesky_cov\" in controllers[-1].stochastic_variables.keys()\n else controllers[-1].stochastic_variables[\"cov\"].cx_start\n ),\n )\n # Since the stochastic variables are defined with ns+1, the cx_start actually refers to the last node (when using node=Node.END)\n\n return val", "def performAction(self, action):\n self.t += 1\n # Map the action integer to a torque and displacement.\n assert round(action[0]) == action[0]\n\n if self.only_steer:\n T = 2 * (action[0] / 4.0 - 1.0)\n d = 0.\n else:\n # -1 for action in {0, 1, 2}, 0 for action in {3, 4, 5}, 1 for\n # action in {6, 7, 8}\n torque_selector = np.floor(action[0] / 3.0) - 1.0\n T = 2 * torque_selector\n # Random number in [-1, 1]:\n p = 2.0 * np.random.rand() - 1.0\n # -1 for action in {0, 3, 6}, 0 for action in {1, 4, 7}, 1 for\n # action in {2, 5, 8}\n disp_selector = action[0] % 3 - 1.0\n d = 0.02 * disp_selector + self._butt_disturbance_amplitude * p\n super(BalanceTask, self).performAction([T, d])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a known_good.json file and extract its git url + revisions from it.
def parse_known_good_file(good_data): result = {} SITE_MAP = {'github': 'https://github.com'} deps = json.loads(good_data) assert 'commits' in deps for dep in deps['commits']: name = dep['name'] site = dep['site'] site_url = SITE_MAP.get(site) assert site_url, 'Unknown site value: %s' % site subrepo = dep['subrepo'] revision = dep['commit'] result[str(name)] = '{0}/{1}@{2}'.format(site_url, subrepo, revision) return result
[ "def FindURLSInJSON(json_file, gs_urls):\n output = subprocess.check_output(['svn', 'cat', json_file])\n json_content = json.loads(output)\n for dict_type in ['actual-results']:\n for result_type in json_content[dict_type]:\n if json_content[dict_type][result_type]:\n for result in json_content[dict_type][result_type].keys():\n hash_type, hash_value = json_content[dict_type][result_type][result]\n gs_url = FileNameToGSURL(result, hash_type, str(hash_value))\n if gs_urls.get(gs_url):\n gs_urls[gs_url].append(json_file)\n else:\n gs_urls[gs_url] = [json_file]", "def get_build_info(json_file):\n try:\n fp = open(json_file, 'rU')\n text = fp.read()\n fp.close()\n text = re.sub(COMMENT_RE, '', text, flags=re.M)\n build_info = json.loads(text)\n except Exception as e:\n print \"Error in {0}:\\n{1}\".format(json_file, str(e))\n return None\n if not check_build_info(build_info):\n return None\n # if necessary add a value for \"visualizer_url\"\n if \"visualizer_url\" not in build_info:\n build_info[\"visualizer_url\"] = DEFALUT_VIZ_URL\n # merge DEFAULT_VIZ_CONFIG with the supplied \"default_viz_config\"\n config = DEFAULT_VIZ_CONFIG\n config[\"editCodeBaseURL\"] = build_info[\"visualizer_url\"]\n config.update(build_info.get(\"default_viz_config\", {}))\n build_info[\"default_viz_config\"] = config\n # update all the \n for key, value in build_info.iteritems():\n if key.endswith('.html'):\n for py_key, py_dict in value[1].iteritems():\n update_dict(py_dict, build_info.get(\"default_viz_config\", {}))\n return build_info", "def parse_got_revision(gclient_output, got_revision_mapping, use_svn_revs):\n properties = {}\n solutions_output = {\n # Make sure path always ends with a single slash.\n '%s/' % path.rstrip('/') : solution_output for path, solution_output\n in gclient_output['solutions'].iteritems()\n }\n for dir_name, property_name in got_revision_mapping.iteritems():\n # Make sure dir_name always ends with a single slash.\n dir_name = '%s/' % dir_name.rstrip('/')\n if dir_name not in solutions_output:\n continue\n solution_output = solutions_output[dir_name]\n if solution_output.get('scm') is None:\n # This is an ignored DEPS, so the output got_revision should be 'None'.\n git_revision = revision = commit_position = None\n else:\n # Since we are using .DEPS.git, everything had better be git.\n assert solution_output.get('scm') == 'git'\n git_revision = git('rev-parse', 'HEAD', cwd=dir_name).strip()\n if use_svn_revs:\n revision = get_svn_rev(git_revision, dir_name)\n if not revision:\n revision = git_revision\n else:\n revision = git_revision\n commit_position = get_commit_position(dir_name)\n\n properties[property_name] = revision\n if revision != git_revision:\n properties['%s_git' % property_name] = git_revision\n if commit_position:\n properties['%s_cp' % property_name] = commit_position\n\n return properties", "def load_revisions(file_path: str):\n with open(file_path, \"r\") as f_reader:\n revisions = json.load(f_reader)\n\n for name in revisions.keys():\n print(name)\n counter = 0\n revisions_tobe_deleted = []\n for revId in revisions[name].keys():\n content = revisions[name][revId][\"content\"]\n if len(content) == 0:\n revisions_tobe_deleted.append(revId)\n counter += 1\n else:\n revisions[name][revId][\"content\"] = revision_preprocessing(content)\n for revId in revisions_tobe_deleted:\n del revisions[name][revId]\n print(f\"{counter} revisions have been deleted\")\n \n return revisions", "def tutorial_branches(config, url):\n if url.startswith('file://') or url == '':\n # no branches for file urls or missing urls\n return []\n try:\n jsontxt = urllib2.urlopen(url)\n except urllib2.HTTPError:\n return []\n branches = json.load(jsontxt)\n jsontxt.close()\n return [b['name'] for b in branches]", "def urls_from_json(self, json_file_):\n\n try:\n json_data_ = helper.READ_JSON_FILE(json_file_)\n urls_ = {}\n for entry_ in json_data_:\n urls_[entry_['attachment']['href']] = entry_['created_time']\n\n return urls_\n except Exception:\n self._logger.log_error(\"Error parsing JSON\", traceback.format_exc())\n temp_ = {}\n return temp_", "def ParseJsonFileWithComments(path):\n prog = re.compile(r'\\s*#.*')\n lines = osutils.ReadFile(path).splitlines()\n lines = ['' if prog.match(line) else line for line in lines]\n parsed_contents = json.loads('\\n'.join(lines))\n return parsed_contents", "def parse_json_links_index(out_dir=OUTPUT_DIR):\n index_path = os.path.join(out_dir, 'index.json')\n if os.path.exists(index_path):\n with open(index_path, 'r', encoding='utf-8') as f:\n links = json.load(f)['links']\n check_links_structure(links)\n return links\n\n return []", "def parse_gitmodules(raw):\n\n result = {}\n locals_ = {}\n\n def reset():\n locals_.clear()\n\n def add_result():\n if locals_.get('added'):\n return\n\n path = locals_.get('path')\n url = locals_.get('url')\n\n if (path is None or url is None):\n return\n result[path] = url\n locals_['added'] = True\n\n for line in raw.splitlines():\n if not line.strip():\n continue \n\n if line.startswith('[submodule '):\n reset()\n continue\n\n try:\n name, value = line.split('=', 1)\n except:\n # too few values?\n continue\n locals_[name.strip()] = value.strip()\n add_result()\n\n return result", "def read_data(data_file):\n with open(data_file) as json_data:\n all_data = json.load(json_data)\n urls = all_data[\"urls\"]\n holdings = all_data[\"holdings\"]\n return urls, holdings", "def parse_libs():\n\n _ = []\n\n with open('./version.json', 'r', encoding='utf-8') as f:\n file_data = json.loads(f.read())\n\n for lib in file_data['libraries']:\n _.append(lib['downloads']['artifact']['path'])\n\n return(_)", "def _parseJSON(self):\r\n #print self.filename + \"\\n\"\r\n tempMap = json.load(open(self.filename))\r\n self.version = tempMap[\"version\"]\r\n self.data = tempMap[\"Material library\"]", "def get_changed_prs(self) -> List:\n from syapse_gitdata.pull import PullRequest\n pull_requests = []\n with open('syapse_gitdata/output.json', 'r') as file_read:\n written_data = json.load(file_read)\n LOG.info('File Loaded Successfully')\n pr_dict = {}\n for pr in written_data:\n pr_dict.update({pr['url'] : pr})\n for pr in requests.get(self._pr_url, headers=self._header).json():\n if pr['url'] not in pr_dict.keys():\n req = PullRequest(pr['url'],self._header)\n req.parse_json()\n pull_requests.append(req)\n elif pr['updated_at'] != pr_dict[pr['url']]['updated']:\n req = PullRequest(pr['url'],self._header)\n req.parse_json()\n pull_requests.append(req)\n file_read.seek(0)\n return pull_requests", "def get_from_vegref(file_name_path):\n\n open_data = json.load(open(file_name_path))\n from_vegrefs = []\n\n for feature in open_data[\"features\"]:\n properties = feature[\"properties\"]\n from_vegrefs.append(str(properties[\"from_vegref\"]))\n return convert_vegref(from_vegrefs)", "def load_links():\n # if .hn doesn't exist, return empty list\n if not os.path.isfile(HN_PATH):\n return []\n # otherwise, load it up\n hn_links = json.load(open(HN_PATH, 'r'))\n return hn_links", "def get_repos():\n try:\n with open(\"repos.json\") as data_file: \n repos = json.load(data_file)\n return repos\n except:\n print \"Error loading repos.json\"\n sys.exit()", "def FindURLs(url):\n gs_urls = {}\n for json_file in FindJSONFiles(url):\n print 'Looking for checksums in %s' % json_file\n FindURLSInJSON(json_file, gs_urls)\n return gs_urls", "def _read_json_with_comments(json_path):\n file_lines = []\n with open(json_path) as json_file:\n file_lines = json_file.readlines()\n lines_without_comments = []\n for line in file_lines:\n if line.strip()[0:2] != '//' and len(line.strip()) > 0:\n lines_without_comments.append(line)\n file_content_without_comments = ''.join(lines_without_comments)\n json_data = {}\n try:\n json_data = json.loads(file_content_without_comments)\n except Exception:\n print('There was an error reading file {}'.format(json_path))\n print(traceback.format_exc())\n return json_data", "def parse_recipe(url, k_base):\n if k_base == None:\n k_base = kb.KnowledgeBase()\n k_base.load()\n\n step_list = []\n if validate_url(url) is None:\n return\n\n system_type = platform.system()\n # url = \"http://allrecipes.com/recipe/219173/simple-beef-pot-roast/\"\n if system_type == 'Windows':\n fn = os.path.join(os.path.dirname(__file__), 'RecipeParser/bin/parse_recipe')\n try:\n recipe_json = subprocess.check_output(['php.exe', fn, url, \"json\"], stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as problem:\n print problem.output\n print problem.returncode\n recipe_json = None\n\n else:\n fn = os.path.join(os.path.dirname(__file__), 'RecipeParser/bin/parse_recipe')\n # print fn\n try:\n recipe_json = subprocess.check_output([fn, url, \"json\"])\n except subprocess.CalledProcessError as problem:\n print problem.output\n print problem.returncode\n recipe_json = None\n\n # sometimes the PHP parse_recipe is too verbose. this corrects that issue.\n recipe_json = recipe_json.rpartition('}')\n recipe_json = recipe_json[0] + recipe_json[1]\n\n print recipe_json\n parsed_json = json.loads(recipe_json)\n print parsed_json\n\n # clean up the ingredients formatting\n if parsed_json['ingredients'][0] is not None:\n parsed_json['ingredients'] = parsed_json['ingredients'][0]['list']\n print parsed_json['ingredients']\n\n if parsed_json['instructions'][0]['list'] is not None:\n parsed_json['instructions'] = parsed_json['instructions'][0]['list']\n for step in parsed_json['instructions']:\n for sent in find_sentences(step):\n step_list.append(util.sanitize_step(util.handle_fractions(sent.encode(encoding='ascii', errors='ignore'))))\n\n new_title = util.sanitize_step(parsed_json['title']).encode('ascii','ignore')\n new_recipe = recipe.Recipe(new_title, parsed_json['yield'], parsed_json['ingredients'], step_list, parsed_json['photo_url'])\n new_recipe.tools = find_cooking_tools(new_recipe.instructions, k_base)\n new_recipe.methods = find_cooking_methods(new_recipe.instructions, k_base)\n\n for i in range(len(new_recipe.ingredients)):\n new_recipe.ingredients[i] = util.sanitize_step(util.handle_fractions(new_recipe.ingredients[i].encode(encoding='utf-8', errors='ignore')))\n\n for i in range(len(new_recipe.instructions)):\n new_recipe.instructions[i] = util.sanitize_step(new_recipe.instructions[i])\n\n # find_temps(new_recipe.instructions, k_base)\n # print new_recipe.title, new_recipe.ingredients, new_recipe.instructions\n # print parsed_json['title']\n return new_recipe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a .gitmodules file to extract a { name > url } map from it.
def parse_git_submodules(gitmodules_data): gitmodules_data = gitmodules_data.decode("utf-8") result = {} # NOTE: configparser.ConfigParser() doesn't seem to like the file # (i.e. read_string() always returns None), so do the parsing # manually here. section_name = None in_submodule_section = False submodule_name = None submodule_prefix = 'submodule "' urls = {} branches = {} for line in gitmodules_data.splitlines(): if line.startswith('['): section_name = line[1:-1] is_submodule_section = section_name.startswith(submodule_prefix) if is_submodule_section: submodule_name = section_name[len(submodule_prefix):-1] elif is_submodule_section: key, _, value = line.strip().partition('=') if not value: continue key = key.strip() value = value.strip() if key == 'url': urls[submodule_name] = value elif key == 'branch': branches[submodule_name] = value result = {} for submodule, url in urls.items(): branch = branches.get(submodule) if not branch: branch = get_git_remote_ref(url, 'heads/master') result[submodule] = '%s@%s' % (url, branch) return result
[ "def parse_gitmodules(raw):\n\n result = {}\n locals_ = {}\n\n def reset():\n locals_.clear()\n\n def add_result():\n if locals_.get('added'):\n return\n\n path = locals_.get('path')\n url = locals_.get('url')\n\n if (path is None or url is None):\n return\n result[path] = url\n locals_['added'] = True\n\n for line in raw.splitlines():\n if not line.strip():\n continue \n\n if line.startswith('[submodule '):\n reset()\n continue\n\n try:\n name, value = line.split('=', 1)\n except:\n # too few values?\n continue\n locals_[name.strip()] = value.strip()\n add_result()\n\n return result", "def parse_gitmodule(path):\n rel_path_subm = []\n regex = r\"^path = \"\n with open(os.path.join(path, \".gitmodules\")) as f:\n for line in f:\n line = line.strip()\n match = re.search(regex, line)\n if match:\n rel_path_subm.append(re.sub(regex, '', line))\n rel_path_subm = [os.path.join(path, elem) for elem in rel_path_subm]\n return rel_path_subm", "def get_submodules_config() -> Dict[str,Dict[str,str]]:\n gitmodules_fn = os.path.join(get_gitwdir(), '.gitmodules')\n gitmodules_data = run_cmd(['git', 'config', '--list', '--file', gitmodules_fn])\n prefix = 'submodule.'\n config: Dict[str, Dict[str,str]] = {}\n for line in gitmodules_data.splitlines():\n if not line.startswith(prefix):\n continue\n splitted = line.split('=', maxsplit=1)\n if len(splitted) != 2:\n continue\n section, val = splitted\n # remove \"submodule.\" prefix\n section = section[len(prefix):]\n # split section into module name and variable\n splitted = section.rsplit('.', maxsplit=1)\n if len(splitted) != 2:\n continue\n module_name, var = splitted\n if module_name not in config:\n config[module_name] = {}\n config[module_name][var] = val\n\n return config", "def parse_mod(self, modfile):\n with open(modfile, 'rt') as filehandle:\n for line in filehandle:\n if line.startswith(\"+\") or line.startswith(\"-\"):\n enable, name, version, path = self.parse_mod_entry(line)\n yield modTuple(enable == \"+\", name, version, path, modfile)", "def moduleInfo(moduleName=\"string\", version=bool, definition=bool, listModules=bool, path=bool):\n pass", "def _get_modules_file(self, data, src):\n pass", "def get_module_sources(parsed_lockfile: dict, include_devel: bool = True) -> list:\n sources = []\n hash_re = re.compile(r\"(sha1|sha224|sha384|sha256|sha512|md5):([a-f0-9]+)\")\n for section, packages in parsed_lockfile.items():\n if section == \"package\":\n for package in packages:\n if (\n package[\"category\"] == \"dev\"\n and include_devel\n and not package[\"optional\"]\n or package[\"category\"] == \"main\"\n and not package[\"optional\"]\n ):\n # Check for old metadata format (poetry version < 1.0.0b2)\n if \"hashes\" in parsed_lockfile[\"metadata\"]:\n hashes = parsed_lockfile[\"metadata\"][\"hashes\"][package[\"name\"]]\n # Else new metadata format\n else:\n hashes = []\n for package_name in parsed_lockfile[\"metadata\"][\"files\"]:\n if package_name == package[\"name\"]:\n package_files = parsed_lockfile[\"metadata\"][\"files\"][\n package[\"name\"]\n ]\n num_files = len(package_files)\n for num in range(num_files):\n match = hash_re.search(package_files[num][\"hash\"])\n if match:\n hashes.append(match.group(2))\n url, hash = get_pypi_source(\n package[\"name\"], package[\"version\"], hashes\n )\n source = {\"type\": \"file\", \"url\": url, \"sha256\": hash}\n sources.append(source)\n return sources", "def get_metadata(module_path):\n matches = re.finditer(\n r\"^__(\\w+?)__ *= *'(.*?)'$\",\n read(module_path),\n re.MULTILINE)\n return dict(\n (match.group(1), match.group(2).decode('unicode_escape'))\n for match in matches)", "def get_symbols_in_submodule(name):\n symbols = {}\n for k, v in _API_SYMBOLS.items():\n if k.startswith(name):\n symbols[k] = v\n return symbols", "def submodules(self) -> Iterable[str]:\n exit_code, stdout, _ = self.run(\n \"git\",\n \"config\",\n \"--null\",\n \"--file\",\n \".gitmodules\",\n \"--get-regexp\",\n # Get only the path key of each submodule.\n r\"^submodule\\..*\\.path$\",\n record=False,\n )\n if exit_code != 0:\n # The command fails if the project doesn't have submodules (the .gitmodules file doesn't exist).\n return []\n\n keys_and_values = stdout.split(\"\\0\")\n for key_and_value in keys_and_values:\n try:\n key, value = key_and_value.split(\"\\n\", maxsplit=1)\n except ValueError:\n # This should never happen, but we log a warning just in case\n # Git doesn't return the expected format.\n log.warning(\"Wrong key and value format.\", key_and_value=key_and_value)\n continue\n\n if key.endswith(\".path\"):\n yield value\n else:\n # This should never happen, but we log a warning just in case the regex is wrong.\n log.warning(\"Unexpected key extracted fom .gitmodules.\", key=key)", "def _get_mod_info_from_mbs(mod_id):\n mod_info = {}\n url = MBS_URL + '/' + mod_id\n response = json.loads(requests.get(url).text)\n mod_info['state_name'] = response.get(\"state_name\")\n mod_info['scmurl'] = response.get(\"scmurl\")\n mod_info['koji_tag'] = response.get(\"koji_tag\")\n mod_info['rpms'] = response.get(\"tasks\").get(\"rpms\")\n return mod_info", "def read_module_leaders():\n MODULE_LEADER_COL = 'module leader'\n MODULE_CODE_COL = 'module'\n\n # lookups for data\n leader2modules = defaultdict(list)\n module2leader = defaultdict(str)\n\n # file with data\n leader2module_file = os.path.join(INPUT_DIR, MODULE_TO_MODULE_LEADER_FILE)\n with open(leader2module_file, 'r') as input_file:\n reader = csv.DictReader(input_file)\n # loop through and populate lookups\n for row in reader:\n leader2modules[row[MODULE_LEADER_COL]].append(row[MODULE_CODE_COL])\n module2leader[row[MODULE_CODE_COL]] = row[MODULE_LEADER_COL]\n\n return leader2modules, module2leader", "def parse_known_good_file(good_data):\n result = {}\n SITE_MAP = {'github': 'https://github.com'}\n deps = json.loads(good_data)\n assert 'commits' in deps\n for dep in deps['commits']:\n name = dep['name']\n site = dep['site']\n site_url = SITE_MAP.get(site)\n assert site_url, 'Unknown site value: %s' % site\n subrepo = dep['subrepo']\n revision = dep['commit']\n result[str(name)] = '{0}/{1}@{2}'.format(site_url, subrepo, revision)\n return result", "def get_module_info(self, mod_name):\n module_info = self.name_to_module_info.get(mod_name)\n # Android's build system will automatically adding 2nd arch bitness\n # string at the end of the module name which will make atest could not\n # find the matched module. Rescan the module-info with the matched module\n # name without bitness.\n if not module_info:\n for _, mod_info in self.name_to_module_info.items():\n if mod_name == mod_info.get(constants.MODULE_NAME, ''):\n return mod_info\n return module_info", "def parse_git_url(url):\n match = re.match(git_re, url)\n if not match:\n raise ValueError(\"bad git URL: %s\" % url)\n\n # initial parse\n scheme, user, hostname, port, path = match.groups()\n\n # special handling for ~ paths (they're never absolute)\n if path.startswith(\"/~\"):\n path = path[1:]\n\n if port is not None:\n try:\n port = int(port)\n except ValueError:\n raise ValueError(\"bad port in git url: %s\" % url)\n\n return (scheme, user, hostname, port, path)", "def _parse_and_return_modules(resolved_model_dir):\n repos = pyang.FileRepository(resolved_model_dir, False)\n ctx = pyang.Context(repos)\n\n statements.add_validation_fun(\n 'reference_3', ['deviation'],\n _add_i_deviation)\n statements.add_validation_fun(\n 'reference_3', ['deviation'],\n _add_d_info)\n statements.add_validation_fun(\n 'reference_3', ['deviate'],\n _remove_d_info)\n\n\n filenames = []\n\n #(name, rev, handle)\n # where handle is (format, absfilename)\n for (_, _, (_, filename)) in repos.get_modules_and_revisions(ctx):\n filenames.append(filename)\n\n modules = []\n\n r = re.compile(r\"^(.*?)(\\@(\\d{4}-\\d{2}-\\d{2}))?\\.(yang|yin)$\")\n for filename in filenames:\n f = filename\n if filename.startswith('file://'):\n f = filename[len('file://') - 1:]\n try:\n fd = open(f)\n text = fd.read()\n except IOError as ex:\n err_msg = \"error %s: %s\\n\" % (filename, str(ex))\n logger.error(err_msg)\n raise YdkGenException(err_msg)\n\n m = r.search(filename)\n ctx.yin_module_map = {}\n if m is not None:\n (name, _dummy, rev, _) = m.groups()\n name = os.path.basename(name)\n logger.debug(\n 'Parsing file %s format %s name %s revision %s', filename, format, name, rev)\n module = ctx.add_module(filename, text, format, name, rev,\n expect_failure_error=False)\n else:\n module = ctx.add_module(filename, text)\n if module is None:\n raise YdkGenException('Could not add module ')\n else:\n modules.append(module)\n\n # all the module have been added so get the context to validate\n # call prevalidate before this and post validate after\n ctx.validate()\n\n def keyfun(e):\n if e[0].ref == filenames[0]:\n return 0\n else:\n return 1\n\n ctx.errors.sort(key=lambda e: (e[0].ref, e[0].line))\n if len(filenames) > 0:\n # first print error for the first filename given\n ctx.errors.sort(key=keyfun)\n\n error_messages = []\n for (epos, etag, eargs) in ctx.errors:\n\n elevel = error.err_level(etag)\n if error.is_warning(elevel):\n logger.warning('%s: %s\\n' %\n (str(epos), error.err_to_str(etag, eargs)))\n else:\n err_msg = '%s: %s\\n' % (str(epos), error.err_to_str(etag, eargs))\n logger.error(err_msg)\n error_messages.append(err_msg)\n\n if len(error_messages) > 0:\n err_msg = '\\n'.join(error_messages)\n raise YdkGenException(err_msg)\n\n return [m for m in modules if m.keyword == 'module']", "def fetch_dags_modules() -> dict:\n\n # Try to get value from env variable first, saving costs from GC secret usage\n dags_modules_str = EnvironmentVariablesBackend().get_variable(AirflowVars.DAGS_MODULE_NAMES)\n if not dags_modules_str:\n dags_modules_str = Variable.get(AirflowVars.DAGS_MODULE_NAMES)\n logging.info(f\"dags_modules str: {dags_modules_str}\")\n dags_modules_ = json.loads(dags_modules_str)\n logging.info(f\"dags_modules: {dags_modules_}\")\n return dags_modules_", "def get_git_info(fn: pathlib.Path) -> dict[str, Any]:\n if git is None:\n raise RuntimeError(\"gitpython not installed\")\n repo = git.Repo(find_git_root(fn))\n urls = [url for remote in repo.remotes for url in remote.urls]\n repo_slugs = [_to_repo_slug(url) for url in urls]\n head_sha = repo.head.commit.hexsha\n if repo.git is not None:\n try:\n desc = repo.git.describe(\"--contains\", head_sha)\n except git.GitCommandError:\n desc = repo.git.describe(\"--always\", \"--tags\")\n else:\n desc = \"unknown\"\n\n return {\n \"describe\": desc or \"unknown\",\n \"sha\": head_sha,\n \"repo_slug\": repo_slugs[0] if repo_slugs else None,\n \"repo_slugs\": repo_slugs,\n \"doc_urls\": [_to_doc_url(url) for url in urls],\n \"repo_urls\": [_to_http_url(url) for url in urls],\n \"tree_urls\": [_to_tree_url(url, head_sha) for url in urls],\n \"repo\": repo,\n }", "def readModule(self, name=None):\n\n import json\n filedir = os.path.join(DATA_PATH,'module.json')\n with open( filedir ) as configfile:\n data = json.load(configfile)\n\n modulenames = data.keys()\n if name is None:\n\n return modulenames\n\n if name in modulenames:\n moduleDict = data[name]\n self.moduletype = name\n\n radfile = moduleDict['modulefile']\n self.x = moduleDict['x'] # width of module.\n self.y = moduleDict['y'] # length of module.\n self.z = moduleDict['z']\n self.bifi = moduleDict['bifi'] # panel bifaciality. Not used yet\n if 'scenex' in moduleDict:\n self.scenex = moduleDict['scenex']\n else:\n self.scenex = moduleDict['x']\n if 'sceney' in moduleDict:\n self.sceney = moduleDict['sceney']\n else:\n self.sceney = moduleDict['y']\n if 'offsetfromaxis' in moduleDict:\n self.offsetfromaxis = moduleDict['offsetfromaxis']\n else:\n self.offsetfromaxis = 0\n #\n #create new .RAD file\n if not os.path.isfile(radfile):\n # py2 and 3 compatible: binary write, encode text first\n with open(radfile, 'wb') as f:\n f.write(moduleDict['text'].encode('ascii'))\n #if not os.path.isfile(radfile):\n # raise Exception('Error: module file not found {}'.format(radfile))mod\n self.modulefile = radfile\n\n return moduleDict\n else:\n print('Error: module name {} doesnt exist'.format(name))\n return {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extract_indices(indices, start_index = 0, stepsize = 1, length = 2) returns all indices in indices, that are not contained in the series generated by start_index and step_size.
def extract_indices(indices, start_index = 0, stepsize = 1, length = 2): samples = np.arange(start_index, length, stepsize).astype('int') return np.setdiff1d(indices, samples)
[ "def process_start_indices(start_indices: Union[int, Iterable[int]],\n max_length: int) -> List[int]:\n if isinstance(start_indices, Number):\n start_indices = range(int(start_indices))\n\n start_indices = np.array(start_indices, dtype=int)\n\n # check, whether index set is not too big\n start_indices = [start_index for start_index in start_indices if\n start_index < max_length]\n\n return start_indices", "def where_not(indx, size):\n return (numpy.setdiff1d(numpy.arange(0,size), indx[0]),)", "def selected_indices(total_number_of_indices, desired_number_of_indices=None):\n\n if desired_number_of_indices is None or desired_number_of_indices >= \\\n total_number_of_indices or desired_number_of_indices < 0:\n return range(total_number_of_indices)\n increase = float(total_number_of_indices) / \\\n float(desired_number_of_indices)\n # generate a regular quasi-random index list\n return [int((i + .5) * increase) for i in range(desired_number_of_indices)]", "def split(xs: Collection, indices: List[int]) -> List[Collection]:\n\n return [\n xs[start:stop]\n for start, stop in zip(\n itertools.chain([None], indices),\n itertools.chain(indices, [None]),\n )\n ]", "def _deduplicate_indexed_slices(values, indices):\n unique_indices, new_index_positions = array_ops.unique(indices)\n summed_values = math_ops.unsorted_segment_sum(\n values, new_index_positions,\n array_ops.shape(unique_indices)[0])\n return (summed_values, unique_indices)", "def split_indices(self, indices):\n out_ind = [[] for _ in range(self.num_patitions)]\n for key in indices:\n part = self.get_partition_index(key)\n ind = self.mapping_to_partition[part][key]\n out_ind[part].append(ind)\n return out_ind", "def slice(self, indices):\r\n for index in xrange(*indices): #loop over range of indices\r\n yield self[index]", "def _get_indices(self, n_indices):\n raise NotImplementedError", "def filter_list_by_index(lst, indices):\r\n if not type(lst) == type(list()):\r\n raise Exception(\"Expected argument type - list but received: \" + str(type(lst)))\r\n \r\n # Make hashable for speed \r\n count = len(lst)\r\n\r\n return [lst[i] \r\n for i in indices\r\n if (i < 0 and abs(i) <= count) or (i >= 0 and i <= count)]", "def remove_all_at(seq, remove_indices):\n if not isinstance(seq, (list, tuple)):\n raise TypeError(\"param 'seq' must be a list or tuple\")\n\n if not isinstance(remove_indices, (list, tuple, set)):\n raise TypeError(\"param 'remove_indices' must be a list, tuple, or set\")\n\n for index in remove_indices:\n if not isinstance(index, int) or index < 0:\n raise ValueError(\n \"param 'remove_indices' must contain only positive integers\"\n )\n\n index = 0\n while index < len(seq):\n if not index in remove_indices:\n yield seq[index]\n index = index + 1", "def without(iterable, remove_indices):\n\tif not hasattr(remove_indices, '__iter__'):\n\t\tremove_indices = {remove_indices}\n\telse:\n\t\tremove_indices = set(remove_indices)\n\tfor k, item in enumerate(iterable):\n\t\tif k in remove_indices:\n\t\t\tcontinue\n\t\tyield item", "def split_by_idxs(seq, idxs):\n\tlast = 0\n\tfor idx in idxs:\n\t\tif not (-len(seq) <= idx < len(seq)):\n\t\t\traise KeyError(f'Idx {idx} is out-of-bounds')\n\t\tyield seq[last:idx]\n\t\tlast = idx\n\tyield seq[last:]", "def get_unique_indices(all_deltas, indices=np.array([0]), threshold=0):\n \n n, J = all_deltas[0].shape\n \n triu=np.triu_indices(J, k=1)\n \n for i, delta in enumerate(all_deltas):\n temp_diff = pairwise_distances(delta.T)\n candidates = np.array([i for i in range(J) if i not in indices])\n \n new_indices = []\n \n j = 0\n \n while j < len(candidates):\n candidates[j], temp_diff[:, indices]\n if np.sum(temp_diff[candidates[j], indices] < threshold) == 1:\n indices = np.concatenate((indices, [candidates[j]]))\n \n j+=1\n \n return np.sort(indices)", "def split_dataset_by_indices():", "def indices():\n return [1.0, 3.0, 1.0, 3.0, 1.0]", "def split_into_chunks(alist, indices):\n\n ret = []\n alist_c = copy.deepcopy(alist)\n if np.sum(indices) != len(alist):\n print('Error')\n\n for i in indices:\n ret += [alist_c[:i]]\n del alist_c[:i]\n\n return ret", "def unique_indices(y, column1, column2, n_indices):\n n_samples = len(y[:, 0])\n # np.delete(y, column1, 1)==1).any(axis)1) returns one if one element or more on the current line is equal to one\n # Which we don't want\n mask1 = (y[:, column1] == 1) & np.logical_not((np.delete(y, [column1], 1)==1).any(axis=1))\n mask2 = (y[:, column2] == 1) & np.logical_not((np.delete(y, [column2], 1)==1).any(axis=1))\n # We want a mask of indices, not of Booleans\n mask1 = np.arange(n_samples)[mask1==True]\n mask2 = np.arange(n_samples)[mask2==True]\n indices = np.concatenate((mask1[:n_indices], mask2[:n_indices]))\n return indices", "def select_indices(tensor, indices):\n return tensor.gather(1, indices.unsqueeze(1)).squeeze()", "def remove_short_idx(dataX, dataY, idxs, length):\n\tnew_X = []\n\tnew_Y = []\n\tnew_idxs = []\n\t\n\tfor x in list(range(len(dataX))):\n\t\tif len(dataX[x]) >= length:\n\t\t\tnew_X.append(dataX[x][:length])\n\t\t\tnew_Y.append(dataY[x][:length])\n\t\t\tnew_idxs.append(idxs[x])\n\t\n\tx,y = to_numpy_tensors(new_X, new_Y)\n\treturn x, y, np.array(new_idxs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
backprop_square(ancestor, mode = 'pos')
def backprop_square(ancestor, mode = 'pos'): series = ancestor.series positions = np.arange(0,series.size) #the positions which are not prooven to be squares if mode == 'pos': positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '+')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares ancestor.series[indices] = series[indices]**2 else: positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '-')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares ancestor.series[indices] = -series[indices]**2 pass
[ "def backprop_cube(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be cubes\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes\n ancestor.series[indices] = series[indices]**3\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes\n ancestor.series[indices] = -series[indices]**3\n pass", "def backprop_prime(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be prime\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes\n ancestor.series[indices] = get_prime(series[indices])\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes\n ancestor.series[indices] = -get_prime(series[indices])\n pass", "def backpropogate(self, node, result):\n node.update_stats(result)\n if node.is_root():\n return\n self.backpropogate(node.parent, result)", "def arithmetic_co(parent1, parent2, max_points=25): \n\n\n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n alpha = uniform(0,1) # select a random alpha between 0 and 1\n \n #print(idx1,idx2,idx3)\n #print(alpha)\n \n point1 = parent1[idx1][idx2][idx3] * alpha + (1 - alpha) * parent2[idx1][idx2][idx3] # new value for the weight on offspring 1\n point2 = parent2[idx1][idx2][idx3] * alpha + (1 - alpha) * parent1[idx1][idx2][idx3] # new value for the weight on offspring 2\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n return offspring1, offspring2", "def blend_co(parent1,parent2,max_points=25,alpha=0.01): \n \n \n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n #print('indexes:', idx1, idx2, idx3) \n \n gamma = (1. + 2. * alpha) * random.random() - alpha # generating a random gamma\n \n x1 = offspring1[idx1][idx2][idx3] # saving the value of point 1\n x2 = offspring2[idx1][idx2][idx3] # saving the value of point 2\n \n #print('x1:',x1)\n #print('x2:',x2)\n \n point1 = (1. - gamma) * x1 + gamma * x2 # new value for point 1\n point2 = gamma * x1 + (1. - gamma) * x2 # new value for point 2\n \n #print('point1:', point1)\n #print('point2:', point2)\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n #print('\\n')\n \n return offspring1, offspring2", "def compute_belief(self, node, tree):\n if not node.has_children(): # is leaf\n product = node.initial_potentials\n else:\n product = sum([tree.get_edge_by_nodes(node, child).up_msg for child in node.get_children()])\n if node.is_root(): # no potential for root\n return product\n else:\n return project_kbest(product + node.potentials) if self.approximate else product + node.potentials", "def _apply_crossover(self, pop):\n # Probabilities here should strictly be divided by the overall sum, but\n # instead a random number is generated in U(0, sum).\n # Probabilities associated to each individual\n inv_ranks = [1.0 / (indiv[1] + 1) for indiv in pop]\n # Total probability\n total_prob = sum(inv_ranks)\n # Each element is (p_i, i),\n # where i is an individual and p_i its probability\n decorated_pop = zip(inv_ranks, pop)\n \n children_pop = []\n for _ in range(self._pop_size):\n # Random number for the selection of the first parent pval1\n # The selected individual is the first for which the accumulated\n # probability is >= the random number\n pval1 = self._random.uniform(0, total_prob)\n acc_prob = 0\n idx1 = 0\n while decorated_pop[idx1][0] + acc_prob < pval1:\n acc_prob += decorated_pop[idx1][0]\n idx1 += 1\n # The probability accumulator contains the accumulated probability\n # up to and including idx1 - 1\n # Remove the part corresponding to the individual at idx1 from the\n # total and generate the new random number in the new uniform range\n pval2 = self._random.uniform(0, total_prob - decorated_pop[idx1][0])\n\n # pval2 > accumulator means that we can skip the individuals before\n # idx1, and idx1 itself, as we are considering it removed (and is\n # not included in the total probability). We start from idx1 + 1,\n # with the existing value in the accumulator, which is equivalent to\n # the approach used for idx1 with the individual at idx1 removed,\n # but idx2 will still refer to the index of the selected individual\n # without actually removing the one at idx1\n if pval2 > acc_prob:\n idx2 = idx1 + 1\n # The new selected individual will appear before idx1, so we can\n # repeat exactly the same process as for idx1\n else:\n acc_prob = 0\n idx2 = 0\n # The loop is the same for both conditions, with the updated index\n # and accumulator\n while decorated_pop[idx2][0] + acc_prob < pval2:\n acc_prob += 1\n idx2 += 1\n\n # Add the result of the crossover to the list of children\n # Pass just the individual, not the fitness (the final [0])\n parent1 = decorated_pop[idx1][1][0]\n parent2 = decorated_pop[idx2][1][0]\n children_pop.append(self._crossover(parent1, parent2))\n children_pop.append(self._crossover(parent2, parent1))\n return children_pop", "def polyFlipEdge():\n pass", "def nf(outerop, innerop, btree) :\n #if btree[0] == \"forall\" :\n # error(\"Cannot normalize a forall assertion. Sorry\")\n # answer = [] # sorry, won't try to format a universal...\n #else :\n nf1 = nfOf(outerop, innerop, nnfOf(btree))\n answer = removeOpposites(removeDuplicates(flatten(outerop, innerop, nf1)))\n return answer", "def compute_max_belief(self, node, tree):\n if not node.has_children(): # is leaf\n product = node.initial_potentials\n else:\n product = sum([tree.get_edge_by_nodes(node, child).max_up_msg for child in node.get_children()])\n if node.is_root(): # no potential for root\n # if len(node.get_children()) == 2:\n # print([tree.get_edge_by_nodes(node, child).up_msg for child in node.get_children()])\n return product\n else:\n return product + node.potentials", "def weights_swap_co(parent1, parent2, max_swaps=25):\n \n \n number_co_points = randint(1,max_swaps) # number of crossover points\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points): # performed number_co_points times\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1 # matrix index\n idx2 = randint(1,len(parent1[idx1])) - 1 # array index\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1 # weight index\n \n swap1 = parent1[idx1][idx2][idx3] \n swap2 = parent2[idx1][idx2][idx3] \n \n offspring1[idx1][idx2][idx3] = swap2 # swapping value 1 with value 2\n offspring2[idx1][idx2][idx3] = swap1 # swapping value 2 with value 1\n \n return offspring1, offspring2", "def crossover(parents, fitness, population):\n _, nchannels, h, w = population.shape\n fitness_pairs = fitness[parents.long()].view(-1, 2)\n prob = fitness_pairs[:, 0] / fitness_pairs.sum(1)\n parental_bernoulli = td.Bernoulli(prob)\n inherit_mask = parental_bernoulli.sample_n(nchannels * h * w) # [N-1, nchannels * h * w]\n inherit_mask = inherit_mask.view(-1, nchannels, h, w)\n parent_features = population[parents.long()]\n children = torch.cuda.FloatTensor(inherit_mask.shape)\n children = where(inherit_mask, parent_features[::2], parent_features[1::2])\n return children", "def contract_squares(state, svd_option=None):\n from .peps import PEPS\n tn = state.grid\n new_tn = np.empty((int((state.nrow + 1) / 2), state.ncol), dtype=object)\n for ((i, j), a), b in zip(np.ndenumerate(tn[:-1:2,:]), tn[1::2,:].flat):\n new_tn[i,j] = sites.contract_x(a, b)\n if svd_option is not None and j > 0 and new_tn.shape != (1, 2):\n new_tn[i,j-1], new_tn[i,j] = sites.reduce_y(new_tn[i,j-1], new_tn[i,j], svd_option)\n # append the left edge if nrow/ncol is odd\n if state.nrow % 2 == 1:\n for i, a in enumerate(tn[-1]):\n new_tn[-1,i] = a.copy()\n # base case\n if new_tn.shape == (1, 1):\n return new_tn[0,0].item() if new_tn[0,0].size == 1 else new_tn[0,0]\n # alternate the neighboring relationship and contract recursively\n return contract_squares(PEPS(new_tn, state.backend).rotate(), svd_option)", "def test_crossover_function(l):\n g1 = graphs.RandomGNP(20, .5)\n g2 = graphs.RandomGNP(20, .5)\n child_graph = l(g1, g2)\n assert child_graph.order() == 20", "def susceptible(g, agent, belief):\n############ Changes ############\n q = g.nodes[agent]['q'] # probability of not getting adopted\n try:\n if nx.shortest_path_length(g.nodes[agent]['M'], *belief) <= 2:\n q *= triangle_sensitivity\n except (nx.NetworkXNoPath, nx.NodeNotFound):\n # no path exists between the nodes\n pass\n\n familiarity = sum([v for k,v in g.nodes[agent]['M'].degree(belief)])\n q *= familiarity_sensitivity**familiarity\n\n adopt = np.random.binomial(1, p=1-q) == 1\n#################\n return adopt", "def get_parent_snap(f):\n parent = f.ancestor\n while parent != f:\n f = parent\n parent = f.ancestor\n \n return parent", "def crossover(self):\n\n # here we'll restrict the breeding to the fittest N/2 parents,\n # but other variations exist\n p = list(self.parents)\n p.sort()\n p = p[0:self.N//2]\n random.shuffle(p)\n\n children = []\n\n for n in range(len(p)//2):\n p1 = p.pop()\n p2 = p.pop()\n\n c1 = Chromosome(p1.n_params, p1.fit_func)\n c2 = Chromosome(p1.n_params, p1.fit_func)\n\n c1.r[0:p1.n_params//2] = p1.r[0:p1.n_params//2]\n c1.r[p1.n_params//2:] = p2.r[p1.n_params//2:]\n\n children.append(c1)\n\n c2.r[0:p1.n_params//2] = p2.r[0:p1.n_params//2]\n c2.r[p1.n_params//2:] = p1.r[p1.n_params//2:]\n\n children.append(c2)\n\n # we now have the new generation\n self.generation += 1\n\n self.population = self.parents + children\n self.population.sort()", "def flatness(a,b, alpha, mu): \n flatness = 1/(b-a) * (-np.sqrt(mu)*(b**2-a**2)+ alpha/3*(b**3-a**3))\n \n return flatness", "def forest(x,y):\n for a in range (0, y):\n for b in range (0,x):\n tree()\n print \"\\n\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
backprop_cube(ancestor, mode = 'pos')
def backprop_cube(ancestor, mode = 'pos'): series = ancestor.series positions = np.arange(0,series.size) #the positions which are not prooven to be cubes if mode == 'pos': positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '+')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes ancestor.series[indices] = series[indices]**3 else: positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '-')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes ancestor.series[indices] = -series[indices]**3 pass
[ "def backprop_square(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be squares\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares\n ancestor.series[indices] = series[indices]**2\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares\n ancestor.series[indices] = -series[indices]**2\n pass", "def backprop_prime(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be prime\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes\n ancestor.series[indices] = get_prime(series[indices])\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes\n ancestor.series[indices] = -get_prime(series[indices])\n pass", "def blend_co(parent1,parent2,max_points=25,alpha=0.01): \n \n \n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n #print('indexes:', idx1, idx2, idx3) \n \n gamma = (1. + 2. * alpha) * random.random() - alpha # generating a random gamma\n \n x1 = offspring1[idx1][idx2][idx3] # saving the value of point 1\n x2 = offspring2[idx1][idx2][idx3] # saving the value of point 2\n \n #print('x1:',x1)\n #print('x2:',x2)\n \n point1 = (1. - gamma) * x1 + gamma * x2 # new value for point 1\n point2 = gamma * x1 + (1. - gamma) * x2 # new value for point 2\n \n #print('point1:', point1)\n #print('point2:', point2)\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n #print('\\n')\n \n return offspring1, offspring2", "def cube():\n vtype = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('a_color', np.float32, 4)]\n # Vertices positions\n v = [[1, 1, 1], [-1, 1, 1], [-1, -1, 1], [1, -1, 1],\n [1, -1, -1], [1, 1, -1], [-1, 1, -1], [-1, -1, -1]]\n # Face Normals\n n = [[0, 0, 1], [1, 0, 0], [0, 1, 0],\n [-1, 0, 1], [0, -1, 0], [0, 0, -1]]\n # Vertice colors\n colors = [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],\n [1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1]]\n\n V = np.array([(v[0], n[0], colors[0]), (v[1], n[0], colors[1]),\n (v[2], n[0], colors[2]), (v[3], n[0], colors[3]),\n (v[0], n[1], colors[0]), (v[3], n[1], colors[3]),\n (v[4], n[1], colors[4]), (v[5], n[1], colors[5]),\n (v[0], n[2], colors[0]), (v[5], n[2], colors[5]),\n (v[6], n[2], colors[6]), (v[1], n[2], colors[1]),\n (v[1], n[3], colors[1]), (v[6], n[3], colors[6]),\n (v[7], n[3], colors[7]), (v[2], n[3], colors[2]),\n (v[7], n[4], colors[7]), (v[4], n[4], colors[4]),\n (v[3], n[4], colors[3]), (v[2], n[4], colors[2]),\n (v[4], n[5], colors[4]), (v[7], n[5], colors[7]),\n (v[6], n[5], colors[6]), (v[5], n[5], colors[5])],\n dtype=vtype)\n I1 = np.resize(np.array([0, 1, 2, 0, 2, 3], dtype=np.uint32), 6 * (2 * 3))\n I1 += np.repeat(4 * np.arange(2 * 3, dtype=np.uint32), 6)\n\n I2 = np.resize(\n np.array([0, 1, 1, 2, 2, 3, 3, 0], dtype=np.uint32), 6 * (2 * 4))\n I2 += np.repeat(4 * np.arange(6, dtype=np.uint32), 8)\n\n return V, I1, I2", "def backpropogate(self, node, result):\n node.update_stats(result)\n if node.is_root():\n return\n self.backpropogate(node.parent, result)", "def arithmetic_co(parent1, parent2, max_points=25): \n\n\n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n alpha = uniform(0,1) # select a random alpha between 0 and 1\n \n #print(idx1,idx2,idx3)\n #print(alpha)\n \n point1 = parent1[idx1][idx2][idx3] * alpha + (1 - alpha) * parent2[idx1][idx2][idx3] # new value for the weight on offspring 1\n point2 = parent2[idx1][idx2][idx3] * alpha + (1 - alpha) * parent1[idx1][idx2][idx3] # new value for the weight on offspring 2\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n return offspring1, offspring2", "def crossover(parents, fitness, population):\n _, nchannels, h, w = population.shape\n fitness_pairs = fitness[parents.long()].view(-1, 2)\n prob = fitness_pairs[:, 0] / fitness_pairs.sum(1)\n parental_bernoulli = td.Bernoulli(prob)\n inherit_mask = parental_bernoulli.sample_n(nchannels * h * w) # [N-1, nchannels * h * w]\n inherit_mask = inherit_mask.view(-1, nchannels, h, w)\n parent_features = population[parents.long()]\n children = torch.cuda.FloatTensor(inherit_mask.shape)\n children = where(inherit_mask, parent_features[::2], parent_features[1::2])\n return children", "def get_out_vertex(self):", "def test_backprop(self):\n keys = torch.tensor(\n [\n [0, 0, 2.1, 0, 0],\n [0, 0, 2.2, 0, 0],\n [0, 0, 1.9, 0, 0],\n # [1, 0, 0.0, 0, 0],\n # [0, 1, 0.0, 0, 0],\n # [0, 0, 0.0, 1, 0],\n # [0, 0, 0.0, 0, 1],\n ]\n )\n values = torch.tensor([2.1, 2.3, 2.]).unsqueeze(1)\n N, key_size = keys.shape\n assert len(keys) == len(values) == N\n\n # filll DND\n dnd = DND(key_size, torch.device(\"cpu\"), max_size=N, knn_no=3)\n for k, v in zip(keys, values):\n dnd.write(k, v, update_rule=lambda old, new: new)\n\n self.assertEqual(len(dnd), N) # the DND should be of size N\n\n # high dimensional new state\n obs = torch.tensor([[0, 0, 1.0, 2.0, 1.0, 0, 0]])\n params = torch.randn(key_size, obs.shape[1])\n params.requires_grad_(True)\n\n h = obs @ params.t()\n dnd.rebuild_tree()\n val = dnd.lookup(h)\n\n h.register_hook(lambda g: print(\"h hook: \", g))\n val.register_hook(lambda g: print(\"v hook: \", g))\n\n # print(\"h: \", h)\n # print(\"h.grad? \", h.requires_grad)\n # print(\"h.grad: \", h.grad)\n # print(\"val: \", val)\n # loss = (val - torch.tensor([0.0])).pow(2).squeeze()\n # print(\"loss: \", loss)\n # loss.backward()\n # print(\"h.grad: \", h.grad)\n # print(\"w.grad: \", params.grad)\n raise NotImplementedError(\"Implement DND backprop test!\")", "def testReproducibleBackpropToBoxes(self):\n self._testReproducibleBackprop(test_image_not_boxes=False)", "def calculate_cube():\n # initial values for edge_length, volume and area\n edge_length = 1\n volume = 1\n area = 6\n while volume != area:\n edge_length = edge_length + 1\n volume = edge_length ** 3\n area = 6 * edge_length ** 2\n print(f\"Number of bricks: {volume}\")\n print(f\"Dimensions: {edge_length} x {edge_length} x {edge_length}\")", "def polyFlipEdge():\n pass", "def draw_cube(p):\n p.set('linecolor', 'g')\n p.vector(0, 1)\n p.vector(1, 0)\n p.vector(0, -1)\n p.vector(-1, 0)\n p.draw()\n p.set('linecolor', 'b')", "def calculate_change_mesh(self):", "def transfer_operators(self):\n coarse = self\n fine = self.child\n\n\n all_tris = np.arange(fine.topology.P2).reshape(coarse.topology.P2, 4)\n central_tris = all_tris[:,0]\n corner_tris = all_tris[:,1:]\n #first, compute contribution to transfer matrices from the central refined triangle\n\n coarse_dual = coarse.dual\n fine_dual = fine.dual[central_tris]\n face_edge_mid = util.gather(fine.topology.FV[0::4], fine.primal)\n\n fine_edge_normal = [np.cross(face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_edge_mid = [(face_edge_mid[:,i-2,:] + face_edge_mid[:,i-1,:])/2 for i in range(3)]\n fine_edge_dual = [np.cross(fine_edge_mid[i], fine_edge_normal[i]) for i in range(3)]\n fine_edge_normal = np.array(fine_edge_normal)\n fine_edge_mid = np.array(fine_edge_mid)\n fine_edge_dual = np.array(fine_edge_dual)\n\n coarse_areas = [triangle_area_from_corners(coarse_dual, face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_areas = [triangle_area_from_corners(fine_dual , face_edge_mid[:,i-2,:], face_edge_mid[:,i-1,:]) for i in range(3)]\n fine_areas = [(fine_areas[i-2]+fine_areas[i-1])/2 for i in range(3)]\n coarse_areas = np.array(coarse_areas)\n fine_areas = np.array(fine_areas)\n\n #normal of edge midpoints to coarse dual\n interior_normal = np.array([np.cross(face_edge_mid[:,i,:], coarse_dual) for i in range(3)])\n\n #the 0-3 index of the overlapping domains\n #biggest of the subtris formed with the coarse dual vertex seems to work; but cant prove why it is so...\n touching = np.argmax(coarse_areas, axis=0)\n## print touching\n## print fine_areas\n## print coarse_areas\n\n #indexing arrays\n I = np.arange(len(touching))\n m = touching #middle pair\n l = touching-1 #left-rotated pair\n r = touching-2 #right-rotated pair\n\n #compute sliver triangles\n sliver_r = triangle_area_from_normals(\n +fine_edge_normal[l, I],\n +fine_edge_dual [l, I],\n +interior_normal [r, I])\n sliver_l = triangle_area_from_normals(\n +fine_edge_normal[r, I],\n -fine_edge_dual [r, I],\n -interior_normal [l, I])\n\n## print 'slivers'\n## print sliver_l\n## print sliver_r\n\n assert(np.all(sliver_l>-1e-10))\n assert(np.all(sliver_r>-1e-10))\n\n\n #assemble area contributions of the middle triangle\n areas = np.empty((len(touching),3,3)) #coarsetris x coarsevert x finevert\n #the non-overlapping parts\n areas[I,l,l] = 0\n areas[I,r,r] = 0\n #triangular slivers disjoint from the m,m intersection\n areas[I,r,l] = sliver_l\n areas[I,l,r] = sliver_r\n #subset of coarse tri bounding sliver\n areas[I,r,m] = coarse_areas[r,I] - sliver_l\n areas[I,l,m] = coarse_areas[l,I] - sliver_r\n #subset of fine tri bounding sliver\n areas[I,m,l] = fine_areas[l,I] - sliver_l\n areas[I,m,r] = fine_areas[r,I] - sliver_r\n #square middle region; may compute as fine or caorse minus its flanking parts\n areas[I,m,m] = coarse_areas[m,I] - areas[I,m,l] - areas[I,m,r]\n\n #we may get numerical negativity for 2x2x2 symmetry, with equilateral fundemantal domain,\n #or high subdivision levels. or is error at high subdivision due to failing of touching logic?\n assert(np.all(areas > -1e-10))\n\n #areas maps between coarse vertices and fine edge vertices.\n #add mapping for coarse to fine vertices too\n\n #need to grab coarsetri x 3coarsevert x 3finevert arrays of coarse and fine vertices\n fine_vertex = np.repeat( fine .topology.FV[0::4, None, :], 3, axis=1)\n coarse_vertex = np.repeat( coarse.topology.FV[: , : , None], 3, axis=2)\n\n def coo_matrix(data, row, col):\n \"\"\"construct a coo_matrix from data and index arrays\"\"\"\n return util.coo_matrix(\n (data.ravel(),(row.ravel(), col.ravel())),\n shape=(coarse.topology.D2, fine.topology.D2))\n\n center_transfer = coo_matrix(areas, coarse_vertex, fine_vertex)\n\n\n #add corner triangle contributions; this is relatively easy\n #coarsetri x 3coarsevert x 3finevert\n corner_vertex = util.gather(corner_tris, fine.topology.FV)\n corner_dual = util.gather(corner_tris, fine.dual)\n corner_primal = util.gather(corner_vertex, fine.primal)\n\n #coarsetri x 3coarsevert x 3finevert\n corner_areas = triangle_areas_around_center(corner_dual, corner_primal)\n #construct matrix\n corner_transfer = coo_matrix(corner_areas, coarse_vertex, corner_vertex)\n self.transfer = util.csr_matrix(center_transfer + corner_transfer)\n\n #calc normalizations\n self.coarse_area = self.transfer * np.ones(fine .topology.D2)\n self.fine_area = self.transfer.T * np.ones(coarse.topology.D2)\n\n self.f = np.sqrt( self.fine_area)[:,None]\n self.c = np.sqrt( self.coarse_area)[:,None]\n\n #test for consistency with metric calculations\n assert(np.allclose(self.coarse_area, coarse.D2P0, 1e-10))\n assert(np.allclose(self.fine_area , fine .D2P0, 1e-10))", "def test_compose_front(self):\n # UnitaryChannel evolution\n chan1 = SuperOp(self.sopX)\n chan2 = SuperOp(self.sopY)\n chan = chan1.compose(chan2, front=True)\n targ = SuperOp(self.sopZ)\n self.assertEqual(chan, targ)\n\n # 50% depolarizing channel\n chan1 = SuperOp(self.depol_sop(0.5))\n chan = chan1.compose(chan1, front=True)\n targ = SuperOp(self.depol_sop(0.75))\n self.assertEqual(chan, targ)\n\n # Random superoperator\n mat1 = self.rand_matrix(4, 4)\n mat2 = self.rand_matrix(4, 4)\n chan1 = SuperOp(mat1)\n chan2 = SuperOp(mat2)\n targ = SuperOp(np.dot(mat2, mat1))\n self.assertEqual(chan2.compose(chan1, front=True), targ)\n targ = SuperOp(np.dot(mat1, mat2))\n self.assertEqual(chan1.compose(chan2, front=True), targ)\n\n # Compose different dimensions\n chan1 = SuperOp(self.rand_matrix(16, 4))\n chan2 = SuperOp(self.rand_matrix(4, 16))\n chan = chan1.compose(chan2, front=True)\n self.assertEqual(chan.dim, (4, 4))\n chan = chan2.compose(chan1, front=True)\n self.assertEqual(chan.dim, (2, 2))", "def my_cube (x):\n return (x**3)", "def crossover(self):\n\n # here we'll restrict the breeding to the fittest N/2 parents,\n # but other variations exist\n p = list(self.parents)\n p.sort()\n p = p[0:self.N//2]\n random.shuffle(p)\n\n children = []\n\n for n in range(len(p)//2):\n p1 = p.pop()\n p2 = p.pop()\n\n c1 = Chromosome(p1.n_params, p1.fit_func)\n c2 = Chromosome(p1.n_params, p1.fit_func)\n\n c1.r[0:p1.n_params//2] = p1.r[0:p1.n_params//2]\n c1.r[p1.n_params//2:] = p2.r[p1.n_params//2:]\n\n children.append(c1)\n\n c2.r[0:p1.n_params//2] = p2.r[0:p1.n_params//2]\n c2.r[p1.n_params//2:] = p1.r[p1.n_params//2:]\n\n children.append(c2)\n\n # we now have the new generation\n self.generation += 1\n\n self.population = self.parents + children\n self.population.sort()", "def decayVertex ( mcp ) :\n return LoKi.MCVertices.decayVertex ( mcp )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
backprop_prime(ancestor, mode = 'pos')
def backprop_prime(ancestor, mode = 'pos'): series = ancestor.series positions = np.arange(0,series.size) #the positions which are not prooven to be prime if mode == 'pos': positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '+')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes ancestor.series[indices] = get_prime(series[indices]) else: positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'prime' and e[3] == '-')] for element in ancestor.positive_tests[positive_indices]: positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size) indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain primes ancestor.series[indices] = -get_prime(series[indices]) pass
[ "def backprop_square(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be squares\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares\n ancestor.series[indices] = series[indices]**2\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'square' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain squares\n ancestor.series[indices] = -series[indices]**2\n pass", "def backpropogate(self, node, result):\n node.update_stats(result)\n if node.is_root():\n return\n self.backpropogate(node.parent, result)", "def backprop_cube(ancestor, mode = 'pos'):\n series = ancestor.series\n positions = np.arange(0,series.size) #the positions which are not prooven to be cubes\n if mode == 'pos':\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '+')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes\n ancestor.series[indices] = series[indices]**3\n else:\n positive_indices = [i for i, e in enumerate(ancestor.positive_tests) if (e[0] == 'cube' and e[3] == '-')]\n for element in ancestor.positive_tests[positive_indices]:\n positions = extract_indices(positions, start_index = element[1], stepsize = element[2], length = series.size)\n indices = np.setdiff1d(np.arange(0,series.size),positions) #the positions which contain cubes\n ancestor.series[indices] = -series[indices]**3\n pass", "def lnprior(p):\n return 0", "def _apply_crossover(self, pop):\n # Probabilities here should strictly be divided by the overall sum, but\n # instead a random number is generated in U(0, sum).\n # Probabilities associated to each individual\n inv_ranks = [1.0 / (indiv[1] + 1) for indiv in pop]\n # Total probability\n total_prob = sum(inv_ranks)\n # Each element is (p_i, i),\n # where i is an individual and p_i its probability\n decorated_pop = zip(inv_ranks, pop)\n \n children_pop = []\n for _ in range(self._pop_size):\n # Random number for the selection of the first parent pval1\n # The selected individual is the first for which the accumulated\n # probability is >= the random number\n pval1 = self._random.uniform(0, total_prob)\n acc_prob = 0\n idx1 = 0\n while decorated_pop[idx1][0] + acc_prob < pval1:\n acc_prob += decorated_pop[idx1][0]\n idx1 += 1\n # The probability accumulator contains the accumulated probability\n # up to and including idx1 - 1\n # Remove the part corresponding to the individual at idx1 from the\n # total and generate the new random number in the new uniform range\n pval2 = self._random.uniform(0, total_prob - decorated_pop[idx1][0])\n\n # pval2 > accumulator means that we can skip the individuals before\n # idx1, and idx1 itself, as we are considering it removed (and is\n # not included in the total probability). We start from idx1 + 1,\n # with the existing value in the accumulator, which is equivalent to\n # the approach used for idx1 with the individual at idx1 removed,\n # but idx2 will still refer to the index of the selected individual\n # without actually removing the one at idx1\n if pval2 > acc_prob:\n idx2 = idx1 + 1\n # The new selected individual will appear before idx1, so we can\n # repeat exactly the same process as for idx1\n else:\n acc_prob = 0\n idx2 = 0\n # The loop is the same for both conditions, with the updated index\n # and accumulator\n while decorated_pop[idx2][0] + acc_prob < pval2:\n acc_prob += 1\n idx2 += 1\n\n # Add the result of the crossover to the list of children\n # Pass just the individual, not the fitness (the final [0])\n parent1 = decorated_pop[idx1][1][0]\n parent2 = decorated_pop[idx2][1][0]\n children_pop.append(self._crossover(parent1, parent2))\n children_pop.append(self._crossover(parent2, parent1))\n return children_pop", "def crossover(parents, fitness, population):\n _, nchannels, h, w = population.shape\n fitness_pairs = fitness[parents.long()].view(-1, 2)\n prob = fitness_pairs[:, 0] / fitness_pairs.sum(1)\n parental_bernoulli = td.Bernoulli(prob)\n inherit_mask = parental_bernoulli.sample_n(nchannels * h * w) # [N-1, nchannels * h * w]\n inherit_mask = inherit_mask.view(-1, nchannels, h, w)\n parent_features = population[parents.long()]\n children = torch.cuda.FloatTensor(inherit_mask.shape)\n children = where(inherit_mask, parent_features[::2], parent_features[1::2])\n return children", "def arithmetic_co(parent1, parent2, max_points=25): \n\n\n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n alpha = uniform(0,1) # select a random alpha between 0 and 1\n \n #print(idx1,idx2,idx3)\n #print(alpha)\n \n point1 = parent1[idx1][idx2][idx3] * alpha + (1 - alpha) * parent2[idx1][idx2][idx3] # new value for the weight on offspring 1\n point2 = parent2[idx1][idx2][idx3] * alpha + (1 - alpha) * parent1[idx1][idx2][idx3] # new value for the weight on offspring 2\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n return offspring1, offspring2", "def compute_belief(self, node, tree):\n if not node.has_children(): # is leaf\n product = node.initial_potentials\n else:\n product = sum([tree.get_edge_by_nodes(node, child).up_msg for child in node.get_children()])\n if node.is_root(): # no potential for root\n return product\n else:\n return project_kbest(product + node.potentials) if self.approximate else product + node.potentials", "def relative_prunner(self,candidates):\n thr = np.log(self.args.rel_prunning) + candidates[0]._lprob\n filtered = [x for x in candidates if x._lprob >= thr ]\n if len(filtered)==0:\n pdb.set_trace()\n\n return filtered", "def step(parents: be.Population, fitness: be.Fitness) -> tuple:\n recombination_schema = 'edge-3' # Other possible options are: 'pmx', 'order' or 'cycle'\n mutation_schema = 'inversion' # Other possible options are: 'swap', 'insert' or 'scramble'\n mutation_probability = 0.3 \n mutation_possible_events = 3\n ranking_selection_schema = 'tournament' # Other possible options for ranking selection are: 'sus' or 'roulette'\n tournament_k = 2\n tournament_w = 1\n tournament_replacement = False\n elite_size = 0.1 # Select the 10% of the best individuals for the next generation\n annihilation_size = 0.1 # Remove the 10% of the least-fitted individuals\n\n # -- ALGORITHM STEPS -- #\n\n # Generate offspring (offspring size == parents size)\n offspring = be.recombination(population=parents, n=len(parents), schema=recombination_schema)\n\n # Mutate offspring\n be.mutation(population=offspring, probability=mutation_probability,\n possible_events=mutation_possible_events, schema=mutation_schema)\n\n # Evaluate offspring fitness\n be.evaluate(population=offspring, fitness_function=fitness)\n\n # Merge offspring and parents\n parents_offspring = be.merge_populations(parents, offspring)\n\n # Select elite population\n elite = be.survivor_selection(population=parents_offspring, schema='elitism', select=elite_size)\n\n # Annihilate least-fitted individuals\n parents_offspring = be.survivor_selection(\n population=parents_offspring, schema='annihilation', annihilate=annihilation_size)\n\n # Apply ranking selection (by selecting a population with a similar size to the parents minus the size of the elite)\n next_generation = be.ranking_selection(\n population=parents_offspring, n=len(parents) - len(elite), schema=ranking_selection_schema,\n w=tournament_w, k=tournament_k, replacement=tournament_replacement)\n\n # Adding the elite to the next generation population\n next_generation = be.merge_populations(next_generation, elite)\n\n # Create the population report\n report.create_report(population=next_generation, population_name='Basic GA population', increment_generation=True)\n\n # If we only wanted to return the first solution found, we could return an EarlyStopping object, which will indicate\n # to the algorithm that the execution is finished\n for individual in next_generation:\n if individual.fitness[0] == np.inf:\n return next_generation, be.EarlyStopping(individual)\n\n return next_generation, None", "def discontinuite_absolue(values, feature, parent):\n return max(values[0],values[1]) - min(values[0],values[1])", "def crossOver(self, parents, nbChildren):\n xdim = self.numParameters\n # assert xdim == parents[0][0].shape[0]\n children = []\n diff = 0\n for i in range(nbChildren):\n if xdim < 2:\n children.append(choice(parents))\n else:\n res = zeros(xdim)\n point = choice(range(xdim-1))\n if not self.tournament:\n p1 = choice(parents)\n p2 = choice(parents)\n c = (p1 - p2).all()\n print p1.shape\n diff += where(c, 1, 0)\n else:\n p1, p2 = parents[i]\n print 'p1', p1.shape\n print 'p2', p2.shape\n print self._allGenerations[0][0][0].shape\n res[:point] = p1[:point]\n res[point:] = p2[point:]\n children.append(res)\n assert diff < nbChildren\n print diff / float(nbChildren)\n print array(children).shape\n return children", "def greedy_policy(current_state: tuple, eps: float):\n prob = [eps / actions_set_len] * actions_set_len\n arg_min_index = np.where(state_action_values[current_state] == max(\n state_action_values[current_state]))[0]\n prob[np.random.choice(arg_min_index)] = 1 - eps + eps / actions_set_len\n return prob", "def blend_co(parent1,parent2,max_points=25,alpha=0.01): \n \n \n number_co_points = randint(1,max_points)\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points):\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1\n idx2 = randint(1,len(parent1[idx1])) - 1\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1\n \n #print('indexes:', idx1, idx2, idx3) \n \n gamma = (1. + 2. * alpha) * random.random() - alpha # generating a random gamma\n \n x1 = offspring1[idx1][idx2][idx3] # saving the value of point 1\n x2 = offspring2[idx1][idx2][idx3] # saving the value of point 2\n \n #print('x1:',x1)\n #print('x2:',x2)\n \n point1 = (1. - gamma) * x1 + gamma * x2 # new value for point 1\n point2 = gamma * x1 + (1. - gamma) * x2 # new value for point 2\n \n #print('point1:', point1)\n #print('point2:', point2)\n \n offspring1[idx1][idx2][idx3] = point1 # updating\n offspring2[idx1][idx2][idx3] = point2 # updating\n \n #print('\\n')\n \n return offspring1, offspring2", "def crossover(self):\n\n # here we'll restrict the breeding to the fittest N/2 parents,\n # but other variations exist\n p = list(self.parents)\n p.sort()\n p = p[0:self.N//2]\n random.shuffle(p)\n\n children = []\n\n for n in range(len(p)//2):\n p1 = p.pop()\n p2 = p.pop()\n\n c1 = Chromosome(p1.n_params, p1.fit_func)\n c2 = Chromosome(p1.n_params, p1.fit_func)\n\n c1.r[0:p1.n_params//2] = p1.r[0:p1.n_params//2]\n c1.r[p1.n_params//2:] = p2.r[p1.n_params//2:]\n\n children.append(c1)\n\n c2.r[0:p1.n_params//2] = p2.r[0:p1.n_params//2]\n c2.r[p1.n_params//2:] = p1.r[p1.n_params//2:]\n\n children.append(c2)\n\n # we now have the new generation\n self.generation += 1\n\n self.population = self.parents + children\n self.population.sort()", "def replacement_parents_better(population, parents, offspring):\n for i in range(len(offspring)):\n p = parents[i]\n if p.fitness.values > offspring[i].fitness.values:\n idx = np.where((population == p).all(axis=1))[0]\n if len(idx) > 0:\n population[i] = offspring[i]\n\n return population", "def discontinuite_relative(values, feature, parent):\n return max(float(values[0]),float(values[1]))/min(float(values[0]),float(values[1]))", "def weights_swap_co(parent1, parent2, max_swaps=25):\n \n \n number_co_points = randint(1,max_swaps) # number of crossover points\n \n offspring1 = parent1.copy()\n offspring2 = parent2.copy()\n \n for i in range(number_co_points): # performed number_co_points times\n \n # randomly get a weight index to perform the crossover\n idx1 = randint(1,len(parent1)) - 1 # matrix index\n idx2 = randint(1,len(parent1[idx1])) - 1 # array index\n idx3 = randint(1,len(parent1[idx1][idx2])) - 1 # weight index\n \n swap1 = parent1[idx1][idx2][idx3] \n swap2 = parent2[idx1][idx2][idx3] \n \n offspring1[idx1][idx2][idx3] = swap2 # swapping value 1 with value 2\n offspring2[idx1][idx2][idx3] = swap1 # swapping value 2 with value 1\n \n return offspring1, offspring2", "def joint_probability(people, one_gene, two_genes, have_trait):\n #--Initialize probability for this 'round' (specific situation), to be modified...\n probability = 1.0\n\n #--Loop all people in pop. data:\n for person in people:\n #\n #--Get Person's number of genes\n gene_num = gene_count(person, one_gene, two_genes)\n #\n #--Get whether that Person has trait exhibited or not:\n if person in have_trait: # check list\n has_trait = True # hearing impairment expressed\n else:\n has_trait = False # no hearing impairment\n #\n #--Parent data (check, could be None):\n mom = people[person]['mother']\n dad = people[person]['father']\n\n\n #--Unconditional probability: for person IF NOT parent data:\n if dad is None and mom is None:\n probability *= PROBS[\"trait\"][gene_num][has_trait] * PROBS[\"gene\"][gene_num]\n #\n # O R :\n #\n #--Conditional probability: for person IF parent data available (child):\n else:\n #--Get mom & dad's num of genes:\n mom_genes = gene_count(mom, one_gene, two_genes)\n dad_genes = gene_count(dad, one_gene, two_genes)\n\n #\n # Child gets probability from ONE of the following paths:\n #\n\n #--Child has 0 copies, 1 way to get (not mom AND not dad)\n if gene_num == 0:\n probability *= inherit(mom_genes, False) * inherit(dad_genes, False)\n\n #--Child has 1 copy, 2 ways to get (mom not dad, OR dad not mom):\n elif gene_num == 1:\n probability *= inherit(mom_genes, True) * inherit(dad_genes, False) + inherit(mom_genes, False) * inherit(dad_genes, True)\n\n #--Child has 2 copies, 1 way to get (mom AND dad):\n elif gene_num == 2:\n probability *= inherit(mom_genes, True) * inherit(dad_genes, True)\n\n #--Lastly, the probability of child having the trait expressed or not with their given genes:\n probability *= PROBS[\"trait\"][gene_num][has_trait]\n #\n #\n #print(f\">>>>>>> {probability}\")\n return probability" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify that a block actually has the correct hash when submitted by a different miner
def verify_block(self, block): sha = hasher.sha256('a') sha.update( str(block.block_id) + str(block.miner_id) + str(block.timestamp) + str(block.data) + str(block.previous_hash)) verify_hashed = sha.hexdigest() if verify_hashed != block.hash: print("Miner ({}) could not verify the previous generated block.", self.mid) return 0. return 1.
[ "def test_last_block_hash():\n # Setup\n mock_ledger = Ledger('monies')\n mock_miner = Miner('Mock', mock_ledger)\n mock_header = {'hdr': 'foo'}\n mock_ledger.block_count = 1\n mock_ledger.all_transactions.append(mock_header)\n mock_message = mock_ledger.all_transactions[0]['hdr']\n mock_nonce_attempts = 1000000\n mock_difficulty = 4\n mock_hash = cryptohash(mock_message, mock_nonce_attempts, mock_difficulty)\n # Assert\n assert mock_miner.last_block_hash() == mock_hash", "def block_hash(self, block):\n return block._hash.hexdigest() + \"\\n\"", "def verify_proof_of_work(self) -> bool:\n block_dict = copy.deepcopy(self.__dict__)\n block_dict['transactions'] = [str(tx) for tx in block_dict['transactions']]\n incoming_hash = block_dict.pop('hash') # remove hash from object to verify the rest of the contents\n verify_hash = hashlib.sha256(json.dumps(block_dict).encode()).hexdigest() # recompute hash value of contents\n return verify_hash == incoming_hash", "def test_get_xrp__ripple_block_details_by_block_hash(self):\n pass", "def verify_block(self, block, data):\n existing_data = self.read_block(block)\n print(\"Verifying...\", end='')\n if existing_data != data:\n print(\" VERIFICATION ERROR!\", end='\\n\\n')\n raise VerificationError\n print(\" VERIFICATION OK\", end='\\n\\n')", "def __eq__(self, block: 'Block'):\n return self.hash == block.hash", "def hash(self, block):\r\n block_string = json.dumps(block, sort_keys=True).encode()\r\n return hashlib.sha256(block_string).hexdigest()", "def test_signature():\n blockchain = Blockchain()\n blockchain.read_metadata()\n blockchain.read_address_pool_data()\n blockchain.read_genesis_data()\n block = blockchain._blocks[0]\n blockchain.verify_transaction('Eric Chen', block.transactions[0])", "def validate_pow(self, block):\n compareStr='0'\n for idx in range(self.difficulty - 1):\n compareStr += '0'\n return block.getHeaderHash()[:self.difficulty] == compareStr and block.previousBlockHash == self.blockchain[-1].hash", "def validate(self, block, parent):\n if not self.check_hash(block) == block.hash_val:\n # block's stored hash matches\n return False\n\n if (block.hash_val[:self.difficulty] !=\n \"\".join([\"0\" for _ in range(self.difficulty)])):\n # block's hash has the required number of zerores\n return False\n\n if parent is not None:\n # checks for non-genesis blocks (parent required)\n if block.timestamp < parent.timestamp:\n # block must have been created after its parent\n return False\n\n if parent.hash_val != block.parent_hash:\n # block's stored hash of its parent should match the parent\n # block's hash\n # n.b. the parent's hash is verified to be valid of its stored\n # hash since it is part of the chain, thus `validate` approved\n # it before\n return False\n\n if block.index != parent.index+1:\n # block should immediately follow its parent in the chain\n return False\n\n return True", "def _check_hash_parse(self, spec):\n # full hash\n self.check_parse(str(spec), \"/\" + spec.dag_hash())\n\n # partial hash\n self.check_parse(str(spec), \"/ \" + spec.dag_hash()[:5])\n\n # name + hash\n self.check_parse(str(spec), spec.name + \"/\" + spec.dag_hash())\n\n # name + version + space + partial hash\n self.check_parse(\n str(spec), spec.name + \"@\" + str(spec.version) + \" /\" + spec.dag_hash()[:6]\n )", "def block_hash_send(self, f, block):\n if block:\n blockhash = self.block_hash(block)\n else:\n # Send invalid block hash to force retransmit.\n blockhash = \"0\"*40+\"\\n\"\n self._write(f, blockhash)\n return blockhash", "def validate_file_md5_hash(file, original_hash):\n\n if get_file_md5_hash(file) == original_hash:\n return True\n\n return False", "def test_hash():\n \n # Create a Dealer\n dealer = Dealer(p256, n_participants, s_secrets, access_structures)\n\n # test hash function - it should be repeatable for the same Dealer object\n hash1 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n hash2 = common.hash(b'BYTESEQUENCE', dealer.hash_len, dealer.hash_aes_nonce)\n assert_equal(hash1, hash2)", "def validate_block_to_commit(self, block_hash: bytes):\n assert isinstance(block_hash, bytes)\n\n node: 'PrecommitDataManager.Node' = self._precommit_data_mapper.get(block_hash)\n if node is None:\n raise InvalidParamsException(\n f'No precommit data: block_hash={bytes_to_hex(block_hash)}')\n\n block = node.block\n prev_block = self._root.block\n\n if block.height == prev_block.height + 1 \\\n and (block.height == 0 or node.block.prev_hash == prev_block.hash):\n return\n\n raise InvalidParamsException(\n f'Invalid precommit block: prev_block({prev_block}) block({block})')", "def test_list_xrp__ripple_transactions_by_block_hash(self):\n pass", "def _verify_hash(filename: Path, access_calculated_hash: str) -> None:\n calculated_hash = FileAPI.calculate_hash(filename)\n if access_calculated_hash != calculated_hash:\n raise ValueError(\n f\"access log contains hash {access_calculated_hash} but calculated hash of {filename} is {calculated_hash}\"\n )", "def test_block_creation(self):\n CommonTestCases.admin_token_assert_equal(\n self,\n create_block_query,\n create_block_response\n )", "def _validate_random_hashes(self):\n if not os.path.exists(self.src_path) or os.path.isdir(self.src_path) or self.maintype == 'image':\n # Images are converted, we don't have to fear TOCTOU\n return True\n for start_pos, hashed_src in self.random_hashes:\n with open(self.dst_path, 'rb') as f:\n f.seek(start_pos)\n hashed = hashlib.sha256(f.read(self.block_length)).hexdigest()\n if hashed != hashed_src:\n # Something fucked up happened\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create and return an AthenaHook.
def hook(self) -> AthenaHook: return AthenaHook(self.aws_conn_id, log_query=self.log_query)
[ "def hook(self) -> DynamoDBHook:\n return DynamoDBHook(self.aws_conn_id, region_name=self.region_name)", "def add_webhook(self, **kwargs):\r\n allowed = ['scaling_group', 'policy', 'name', 'metadata']\r\n self._check_args(kwargs, allowed)\r\n webhook_id = str(self.webhook_counter.next())\r\n webhook = FakeWebHook(webhook_id, **kwargs)\r\n self.webhooks[webhook_id] = webhook\r\n return webhook", "def create(self, webhook):\n raise NotImplementedError('create webhook is not implemented')", "def get_hook(self) -> IndeximaHook:\n return self._hook", "def create_webhook(self, account_id, webhook):\n response = self.client.post(f'/{account_id}/webhooks', data=webhook.to_json())\n return Response(response, Webhook)", "def add_hook(self, hook):\n h = hook.hash\n self.hooks[h] = hook", "def webhook_create(self, full_name, hook_url, events=None):\n if events is None:\n events = self.WEBHOOKS\n data = {\n 'name': 'web',\n 'active': True,\n 'events': events,\n 'config': {\n 'url': hook_url,\n 'content_type': 'json',\n 'secret': self.webhooks_secret\n }\n }\n response = self.session.post(\n self.API_URL + '/repos/{}/hooks'.format(full_name),\n data=json.dumps(data),\n headers=self._get_headers()\n )\n if response.status_code == 201:\n return response.json()\n return None", "def store_hook(self, hook: str, hook_function: HookFunction, route: Route):\n raise NotImplementedError", "async def create_hook() -> bool:\n await self.__entity_created__()\n return True", "def hooked(fn):\n\n @functools.wraps(fn)\n def hooked_inner(*args, **kwargs):\n hs = HookedState(\n key=fn.func_name,\n fn=fn,\n args=args,\n kwargs=kwargs\n )\n return hs()\n\n return hooked_inner", "def _create_hook_manager() -> PluginManager:\n manager = PluginManager(HOOK_NAMESPACE)\n manager.trace.root.setwriter(logger.debug)\n manager.enable_tracing()\n manager.add_hookspecs(NodeSpecs)\n manager.add_hookspecs(PipelineSpecs)\n manager.add_hookspecs(DataCatalogSpecs)\n manager.add_hookspecs(DatasetSpecs)\n manager.add_hookspecs(KedroContextSpecs)\n return manager", "def hook(func: Callable):\n parameters, return_annotation = _extract_params(func, extract_return=True)\n return Hook(str(func), parameters, return_annotation)", "def generateS3Hook(aws_conn_id:str,**kwargs) -> S3Hook:\n return S3Hook(aws_conn_id)", "def hook(self):\n path = '{}:{}'.format(self.hook_cls.module, self.hook_cls.classname)\n cls = import_object(path)\n return cls", "def build_test_hooks(cfg_filename, log_period, num_warmup=4):\n assert log_period > num_warmup\n return TestHook(cfg_filename, log_period, num_warmup)", "def defineHook (self, hook):\n self._hooks.append (hook)", "def test__Webhook__precreate__2():\n webhook_id = 202302050056\n webhook = Webhook.precreate(webhook_id)\n \n test_webhook = Webhook.precreate(webhook_id)\n vampytest.assert_is(webhook, test_webhook)", "def logging_hook():\n\n def decorator(cls):\n \"\"\"\n decorates the given class and registers an instance\n of it into available logging hooks.\n\n :param type cls: logging hook class.\n\n :returns: logging hook class.\n :rtype: type\n \"\"\"\n\n instance = cls()\n logging_services.register_hook(instance)\n\n return cls\n\n return decorator", "def _before(hook):\n return wraps(hook)(hooks.before(hook))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
There are no movie projections on the given date
def test_show_movie_projections_valid_movie_invalid_date(self): user_input = "show movie projections 1 2014-12-31" expected_output = """Projections for movie 'The Hunger Games: Catching Fire' on date 2014-12-31:""" output = StringIO() try: sys.stdin = StringIO(user_input) sys.stdout = output read_spell(self.cinema) self.assertTrue(expected_output in output.getvalue()) finally: sys.stdin = sys.__stdin__ sys.stdout = sys.__stdout__
[ "def is_projection_empty(self):\n ret_val = self._is_projection_empty()\n return ret_val", "def _is_null_date(fecha):\n return year(fecha) == YEAR_NULL_TERADATA", "def check_missing_dates(dates, data_location):\n suffix = proximus_mobility_suffix()\n full_list = []\n for f in os.listdir(data_location):\n date = f[:-len(suffix)][-8:]\n full_list.append(date)\n missing = set(dates).difference(set(full_list))\n return missing", "def test_make_prediction_no_dates(self):\n current_date = self.expiryInference.expiryDetector.current_date\n\n result = self.expiryInference.make_prediction(None, current_date, None)\n self.assertEqual(result, \"No expiration date found\")", "def check_season():\n # Get current time\n now = time.localtime()\n # element 1 of the now tuple is the month\n if now[1] in (7, 8):\n return False\n else:\n return True", "def test_make_prediction_no_future_dates(self):\n current_date = self.expiryInference.expiryDetector.current_date\n dates = [datetime(2010, 9, 20), datetime(2018, 10, 20), datetime(2013, 3, 17)]\n\n result = self.expiryInference.make_prediction(dates, current_date, None)\n self.assertEqual(result, \"No future expiration date found\")", "def ISO_date_filter(metadata_with_variants):\n print(\"Filtering metadata with variants for entries with a collection day specified.\")\n pattern=\"\\d{4}-\\d{2}-\\d{2}\"\n #Re.search() returns None if there is no match, and an re.match object if there is a match.\n #pd.notna() will return True for the rows that match the expression. \n standard_date=metadata_with_variants[metadata_with_variants.date.apply(lambda x: pd.notna(re.search(pattern,x)))]\n #Report the number of non-standard dates removed\n n_removed=len(metadata_with_variants)-len(standard_date)\n print(\"Excluded {} entries with no defined collection day. Entries remaining: {}\".format(n_removed,len(standard_date)))\n return standard_date", "def check_season():\n # Get current time\n now = datetime.now()\n if now.month in (7, 8):\n return False\n else:\n return True", "def years_in_existence(self):\n return self.franchise.cosmos.year-self.franchise.founded", "def filter_mb_df_dates(mb_df):\n \n mb_df = mb_df[mb_df['release_date'].str[-4:].map(lambda x: int(x)) >= 2010]\n mb_df.drop('release_date', axis = 1, inplace = True)\n \n return mb_df", "def projectionExists(self):\n \n # Go through all matrices and check if the parameters coincide with what we want\n files = glob.glob('matrices/transformationMatrices*.npz')\n for f in files:\n out = np.load(f)\n heights = out['arr_1']\n nStars = out['arr_2']\n nZernike = out['arr_3']\n fov = out['arr_4']\n DTel = out['arr_5']\n ind = np.where(np.in1d(heights, self.heights))[0]\n if (len(ind) == self.nHeight):\n if (nStars == self.nStars and nZernike >= self.nZernike and \n fov == self.fov and DTel == self.DTel):\n self.M = out['arr_0'][0:self.nZernike,0:self.nZernike,ind,:]\n\n # We have found a dataset with the matrices we want. Read it.\n if (self.verbose):\n print(\"Projection matrix exists : {0}\".format(f))\n print(\" - Zernike modes: {0}\".format(self.nZernike))\n print(\" - Number of heights : {0} -> {1} km\".format(self.nHeight, self.heights * 1e-3))\n print(\" - FOV: {0} arcsec\".format(206265.*self.fov))\n print(\" - Number of stars : {0}\".format(self.nStars))\n self.MComputed = True\n self.stackProjection()\n return True\n \n return False", "def married(self):\n return not self.divorce_date.split()", "def exclude_date(self, date):\n all_files = self.files # make a copy \n new_files = []\n for fi in all_files: # get data for every file \n hdu_temp = fits.open(f\"{self.data_dir}/{fi}\")\n hdu = hdu_temp[0]\n d = (hdu.header[\"DATE\"][0:10]).replace(\"-\",\"\")\n hdu_temp.close()\n \n if not(date in d): # if file is NOT from the input date \n new_files.append(fi)\n\n if len(new_files) == 0:\n raise NoDataError(\"After exclusion, RawData object would have \"+\n \"no remaining data\") \n \n self.__files = new_files\n self.__dates_init() # rebuild list/dict of dates\n self.__filter_init() # rebuild list/dict of filters ", "def test_valid_date_episode_by_date(self):\n show = Show(show_id=1)\n episodes = show.episodes_by_date('2013-07-01')\n for episode in episodes:\n self.assertEqual('2013-07-01', episode['airdate'])", "def test_no_project_showproj(self):\n self._test_non_admin_operation(ccdlib.OP_SHOWPROJ)", "def test_user_date_not_present(self):\n self.add_expected()\n wrong_date = datetime.now()\n # make the date year 1, month 1, day 1\n wrong_date = wrong_date.replace(1, 1, 1)\n # given the user_list\n self.user.transactions = self.expected_list\n user_history = self.user.get_records_by_date(wrong_date, False)\n # there should be no records\n assert user_history == self.create_transaction()\n user_history = self.user.get_records_by_date(wrong_date, True)\n # there should be no records\n assert user_history == self.create_transaction()", "def precipitation():\r\n # Query all past 12 month\r\n one_year_ago = '2016-08-23'\r\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= one_year_ago).order_by(Measurement.date)\r\n\r\n # Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\r\n results_list = []\r\n for date, prcp in results:\r\n results_list.append({date: prcp})\r\n \r\n return jsonify(results_list)", "def test_find_tracks_with_spc_date_minus1(self):\n\n these_track_indices = best_tracks_smart_io._find_tracks_with_spc_date(\n STORM_OBJECT_TABLE, STORM_TRACK_TABLE, spc_date_unix_sec=-1)\n self.assertTrue(numpy.array_equal(\n these_track_indices, TRACK_INDICES_FOR_SPC_DATE_MINUS1))", "def precipitation():\n session = Session(engine)\n \n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date\n\n last_date = dt.datetime.strptime(last_date, \"%Y-%m-%d\")\n\n first_date = last_date - timedelta(days=365)\n\n last_year_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= first_date).all()\n return jsonify(last_year_data)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read and append errors to the log
def read_errors(self): self.output.append(str(self.process.readAllStandardError()))
[ "def print_errors(self):\n if not os.path.exists(self.errlog):\n print('Error file have been not created!')\n return\n\n with open(self.errlog, 'r') as fid:\n print(fid.read())", "def _log_errors(errors):\n # NOTE: DataCiteError is a tuple with the errors on the first\n errors = json.loads(errors.args[0])[\"errors\"]\n for error in errors:\n field = error[\"source\"]\n reason = error[\"title\"]\n logging.warning(f\"Error in {field}: {reason}\")", "def log_error(err, data):\n with open(ERROR_FILE,'a', encoding=\"utf-8\") as fo:\n fo.write('Error: {} \\nData:{}\\n\\n'.format(err,data))", "def log_error(e):\r\n\tprint(e)", "def _read_error(err, name): # pragma: no cover\n\n err.info((\"testcases_content\",\n \"_read_error\",\n \"read_error\"),\n \"File could not be read: %s\" % name,\n \"\"\"A File in the archive could not be read. This may be\n due to corruption or because the path name is too\n long.\"\"\",\n name)", "def errReceived(self, data):\n self.log.error(data)", "def write_scan_error(msg):\n with open(os.path.join('persist', 'scan_err.log'), 'a') as scan_errors_file:\n scan_errors_file.write(msg + '\\n')", "def log_error(self, error_dict):\n error_file = os.path.join(self.run_dir, 'errors.json')\n errors = []\n if os.path.exists(error_file):\n with open(error_file, 'r') as f:\n errors = json.load(f)\n errors.append(error_dict)\n with open(error_file, 'w') as f:\n json.dump(errors, f, indent=4)", "def log_error(self, error, save=False):\n self.log[\"errors\"].append(f\"{error}\")\n if save:\n self.save_log()", "def find_exception_from_logs_and_save(self, start_time, stop_time, name_prefix=\"\", copy_location=\"\"):\n self.run_folder = get_config_value(\"reporting_folder_run\")\n self.report_folder = get_config_value(\"reporting_folder\")\n error_log_file = open(self.report_folder + os.sep + \"error_logs.txt\", \"w\")\n error_log_file.write(\"\\nLOG START TIME: \" + start_time + \"\\n\")\n has_error = False\n for log_file in strings.ss_all_logs:\n log_file_name = log_file.split(\"/\")[-1]\n try:\n log_content = get_file_content(os.path.join(self.run_folder, log_file_name))\n except:\n continue\n\n for line in log_content:\n if \"] ERROR\" in line.upper():\n has_error = True\n print(log_file_name + \": \" + line)\n error_log_file.write(log_file_name + \": \" + line)\n elif \".EXCEPTION\" in line.upper():\n has_error = True\n error_log_file.write(log_file_name + \": \" + line)\n elif \"HTTPERROR\" in line.upper():\n has_error = True\n error_log_file.write(log_file_name + \": \" + line)\n\n error_log_file.write(\"\\nLOG STOP TIME: \" + stop_time)\n error_log_file.close()\n if has_error:\n self.warning(\"Error log has errors\")\n for log_file in strings.ss_all_logs:\n log_file_name = log_file.split(\"/\")[-1]\n copy_location = copy_location.split(\"error_logs.txt\")[0]\n print(copy_location)\n try:\n if not os.path.exists(copy_location):\n os.makedirs(copy_location)\n command = \"sudo cp \" + self.run_folder + os.sep + log_file_name + \" \" + copy_location + name_prefix + \"_\" + log_file_name\n\n self.run_bash_command(command, False)\n except AssertionError:\n self.warning(\"Could not copy file \" + log_file_name)\n\n return has_error", "def get_errors(self, queue_id):\n try:\n errorlog = self._get_stderr_path(queue_id)\n except ValueError, e:\n errors = str(e)\n else:\n if os.path.exists(errorlog):\n err_f = open(errorlog, 'r')\n errors = err_f.read()\n err_f.close()\n \n\terrors += \"\\nReturned exit_status %d\"%self._check_job_return_status(queue_id) \n\n return errors", "def errors(logs_dir, worker):\n root_log_dir = logs_dir\n worker_log_dir = logs_dir + \"/workers/\"\n if worker is None:\n worker = \"all\"\n\n if worker == \"all\":\n files = []\n directories = []\n for (_, _, filenames) in walk(root_log_dir):\n for file in filenames:\n if file.endswith(\".err\"):\n print_log(file, root_log_dir)\n break\n\n files = []\n directories = []\n for (dirpath, dirnames, filenames) in walk(worker_log_dir):\n directories.extend(dirnames)\n break\n\n for directory in directories:\n specific_worker_log_dir = worker_log_dir + directory\n for (_, _, filenames) in walk(specific_worker_log_dir):\n files.extend(filenames)\n for file in [file for file in filenames if \"collection\" in file and file.endswith(\".err\")]:\n print_log(file, specific_worker_log_dir)\n break\n else:\n files = []\n specific_worker_log_dir = worker_log_dir + \"/\" + worker + \"/\"\n for (_, _, filenames) in walk(specific_worker_log_dir):\n files.extend(filenames)\n for file in [file for file in filenames if \"collection\" in file and file.endswith(\".err\")]:\n print_log(file, specific_worker_log_dir)\n break", "def read_errors(self):\n result = \"\"\n\n while not self.stderr_queue.empty():\n # There is no potential for race conditions here because this is the only place\n # where we read from the stderr queue.\n result += _read_line(self.stderr_queue, None, \"utf-8\", \"replace\", False)\n\n return result", "def test_stderr(self):\n tmp_file = os.path.join(tmp_dir_path,'tmp_log')\n saved_stderr = sys.stderr\n tmp_stderr = os.path.join(tmp_dir_path,'tmp_stderr')\n with open(tmp_stderr,'w') as sys.stderr:\n with EppLogger(tmp_file, prepend=False) as epp_logger:\n print('stderr nosetest', file=sys.stderr)\n sys.stderr = saved_stderr\n with open(tmp_stderr,'r') as stderr:\n stream_lines = stderr.readlines()\n assert 'stderr nosetest' in stream_lines[-1]\n\n with open(tmp_file,'r') as log_file:\n log_lines = log_file.readlines()\n assert 'stderr nosetest' in log_lines[-1]", "def reduce_log():\n _logger = logging.getLogger('werkzeug')\n _logger.setLevel(logging.ERROR)", "def set_error_logger():\n\n\n today = datetime.today().strftime(DATE_FORMAT)\n filename = ERROR_FILENAME.format(today)\n\n if not os.path.exists(DIRECTORY_ERROR):\n os.makedirs(DIRECTORY_ERROR)\n\n logging.basicConfig(\n filename=filename, level=logging.DEBUG,\n format=FORMAT_LOGGING,\n datefmt=DATE_TIME_FORMAT\n )\n logging.debug(DATA_ERROR_LOG)", "def _readStdErr(self, *dumArgs):\n self.logWdg.addOutput(self.subProc.stderr.read(), severity=RO.Constants.sevError)\n if self.subProc.poll() is not None:\n self._cleanup()", "def cmd_error(self):\n self.log.setLevel(logging.ERROR)\n self.log.error('Switching to ERROR threshold')", "def check_eval_log(self) -> None:\n\n error = 0\n if not os.path.exists('eval.log'):\n self.log.error('Evaluation failure: eval.log not found')\n else:\n log_msgs: Set[str] = set()\n with open('eval.log', 'r', errors='replace') as filep:\n for line in filep:\n if line.find('ERROR') != -1:\n msg = line[line.find(':') + 2:-1]\n if msg not in log_msgs:\n self.log.error(msg)\n log_msgs.add(msg)\n error += 1\n if error > 0:\n self.log.error(\n 'The default point encounters %d errors. See %s/evaluate for details', error,\n self.args.work_dir)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the URI for a route named name.
def reverse(name, *args, **kwargs): return webapp2.uri_for(name, *args, **kwargs)
[ "def url_for(self, name: str, **kwargs) -> str:\n route = self._router.get_route_or_404(name)\n return route.url(**kwargs)", "def route(self, name, params={}, full=False):\n from config import application\n if full:\n route = application.URL + self._get_named_route(name, params)\n else:\n try:\n route = self._get_named_route(name, params)\n except KeyError:\n params = {}\n params.update(self.url_params)\n route = self._get_named_route(name, params)\n\n if not route:\n raise RouteException(\"Route with the name of '{}' was not found.\".format(name))\n\n return route", "def route_path(route_name, request=None, **kwargs):\n if not request:\n request = DummyRequest()\n\n return pyramid.url.route_path(route_name, request, **kwargs)", "def _url(route):\n return \"%s%s\" % (c['base_address'], route)", "def _get_named_route(self, name, params):\n web_routes = self.container.make('WebRoutes')\n\n for route in web_routes:\n if route.named_route == name:\n return self.compile_route_to_url(route.route_url, params)\n\n raise RouteException(\"Could not find the route with the name of '{}'\".format(name))", "def _build_url(self, route):\n return \"{0}/{1}\".format(self.base_url, route)", "def fireplace_route(path, name=None):\n kwargs = {}\n if name:\n kwargs['name'] = name\n return url('^%s$' % path, views.commonplace, {'repo': 'fireplace'},\n **kwargs)", "def route_name(self, fmt=\"{self.route_short_name}-{self.route_long_name}\"):\n if not self.is_cached_data_valid('_route_name'):\n log.warn(\"query route name\")\n ret_val = self.route_long_name\n if self.route_long_name and self.route_short_name:\n ret_val = self.route_short_name + \"-\" + self.route_long_name\n elif self.route_long_name is None:\n ret_val = self.route_short_name\n self._route_name = ret_val\n self.update_cached_data('_route_name')\n\n return self._route_name", "def get_url_from_viewname(self, viewname):\n\n return reverse(\n viewname,\n kwargs={\n 'newsletter_slug': self.newsletter.slug,\n 'action': self.action\n }\n )", "def get_matching_route_path(scope: Dict[Any, Any], routes: List[Route], route_name: Optional[str] = None) -> str:\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount) and route.routes:\n child_route_name = get_matching_route_path(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path", "def find_route_by_name(route, deployment, app):\n unique_name = generate_name(route, deployment)\n for i, route in enumerate(app.router.routes):\n if route.name == unique_name:\n return route, i\n return None, -1", "def route(self, address):\n return self._routing_table[address][0]", "def uri_for(reference):\n return reference._uri", "def get_uri(host, port):\n if port:\n return '%s:%s' % (host, port)\n return host", "def rule_path(rule_name: str, args: dict = None) -> str:\n return _rules.path(rule_name, args)", "def get_uri(self, request):", "def reverse_url(self, name, *args):\r\n if name in self.named_handlers:\r\n return self.named_handlers[name].reverse(*args)\r\n raise KeyError(\"%s not found in named urls\" % name)", "def build_uri(self, request):\n return request.build_absolute_uri(self.path)", "def route_host_name(self) -> Optional[str]:\n return pulumi.get(self, \"route_host_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Custom Jinja2 factory method to insert our own extensions.
def jinja2_factory(app): config = { 'globals': {'reverse': reverse}, 'filters': {} } return jinja2.Jinja2(app, config)
[ "def instantiate_extensions(self, template):\n return [ ext_cls(template) for ext_cls in self.extensions ]", "def jinja_engine():\n\treturn JinjaEngine()", "def register_template_extensions(\n cls,\n exts_fn: Callable[[CompileCtx], Dict[str, Any]]\n ) -> None:\n assert not cls._template_extensions_frozen\n CompileCtx._template_extensions_fns.append(exts_fn)", "def generate_extensions(render_math):\n # We need jsmath to get pretty plain-text latex in docstrings\n extensions = []\n if sphinx.__version__ < \"1.1\" or not render_math:\n extensions = ['sphinx.ext.jsmath']\n else:\n extensions = ['sphinx.ext.mathjax']\n\n # For scipy and matplotlib docstrings, which need this extension to\n # be rendered correctly (see Spyder Issue #1138)\n extensions.append('sphinx.ext.autosummary')\n\n # Plots\n try:\n # TODO: Add an option to avoid importing mpl every time\n import matplotlib # analysis:ignore\n extensions.append('plot_directive')\n except ImportError:\n pass\n\n return extensions", "def load_extension(import_path: str) -> type[jinja2.ext.Extension]:\n with reraise(TemplateExtensionNotFoundError(import_path)):\n extension = import_object(import_path)\n\n if not (\n isinstance(extension, type) and issubclass(extension, jinja2.ext.Extension)\n ):\n raise TemplateExtensionTypeError(import_path, str(type(extension)))\n\n return extension", "def create_jinja_environment(self):\n return super(CTFdFlask, self).create_jinja_environment()", "def jinja_loader(self):\n return ModuleTemplateLoader(\n self.database_name, searchpath=self.template_folder,\n )", "def passthrough_engine():\n\treturn JinjaEngine()", "def set_jinja2_options(self, **kw):\n\t\tglobal jinja_env\n\t\tjinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), **kw)", "def __init__(self, **kwargs: Any) -> None:\n template_loader = jinja2.FileSystemLoader(searchpath=str(pathlib.Path(__file__).parent / \"jinja\" / \"templates\"))\n self.template_parser = jinja2.Environment(\n loader=template_loader,\n lstrip_blocks=True,\n autoescape=True,\n **kwargs,\n )", "def _process_jinja(self,\n spec: PosixPath,\n options: CombinedOptions) -> str:\n self.logger.debug('Using jinja mode')\n data = yaml.safe_load(open(spec, encoding=\"utf8\"))\n additional = options.get('additional_json_path')\n if additional:\n if not additional.exists():\n self._warning(f'Additional swagger spec file {additional} is missing. Skipping')\n else:\n add = yaml.safe_load(open(additional, encoding=\"utf8\"))\n data = {**add, **data}\n if options.is_default('template') and not Path(options['template']).exists():\n copyfile(\n resource_filename(\n __name__,\n 'template/' + self.defaults['template']\n ),\n options['template']\n )\n return self._to_md(data, options['template'])", "def get_template_ext(self) -> str:", "def register_extension(extension):\n if not extension in markdown_extensions:\n markdown_extensions.append(extension)", "def templateFilter(func):\n jinja2_env.filters[func.__name__] = func", "def add_plim_renderer(config, extension, mako_settings_prefix='mako.', preprocessor='plim.preprocessor'):\r\n renderer_factory = MakoRendererFactory()\r\n config.add_renderer(extension, renderer_factory)\r\n\r\n def register():\r\n settings = copy.copy(config.registry.settings)\r\n settings['{prefix}preprocessor'.format(prefix=mako_settings_prefix)] = preprocessor\r\n\r\n opts = parse_options_from_settings(settings, mako_settings_prefix, config.maybe_dotted)\r\n lookup = PkgResourceTemplateLookup(**opts)\r\n\r\n renderer_factory.lookup = lookup\r\n\r\n # read about config.action() at\r\n # http://docs.pylonsproject.org/projects/pyramid/en/latest/narr/extconfig.html#using-config-action-in-a-directive\r\n config.action(('plim-renderer', extension), register)", "def expandTemplate(*args):\n ret = generate(args[0], args[1:])\n return ret", "def extend_template_exceptions(more_exceptions: '', klass=None):\n if klass is None:\n from synctree.templates import DefaultTemplate\n klass = DefaultTemplate\n return f\"{klass._exceptions} {more_exceptions}\"", "def inject_js(js):\n if 'injected_js' not in g:\n g.injected_js = []\n g.injected_js.append(Markup(js))", "def configure_extension(name: str, path: str):\n if isinstance(path, str):\n path = [path]\n return Extension(\n name,\n path,\n language='c++',\n include_dirs=INCLUDE_DIRS,\n libraries=LIBS,\n library_dirs=LIB_DIRS,\n extra_compile_args=COMPILE_ARGS,\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a secure cookie session for flash messages. Django and Flask have a similar implementation. If you do not use flash messages, then no secure cookie is written. To add a flash message self.flash.add_flash('Foobar!') To get all flash messages messages = [value for value, level in self.flash.get_flashes()] It is fine that the flash messages are visible in a secure cookie because the user will see them in the next response any way.
def flash(self): # Need to supply a name to avoid using the same default cookie name return self.session_store.get_session( name='gordon', backend='securecookie')
[ "def messages(self, clear=True):\n flashes = self.controller.session.get('__flash', None) or {}\n if clear:\n self.controller.session['__flash'] = {}\n return flashes", "def set_secure_cookie(self, name, value):\n secure_val = make_secure_val(value)\n self.response.set_cookie(name, secure_val)", "def joined_flash(*args, **kwargs):\n \n return join(request.session.flash, *args, **kwargs)", "def secure_cookie():\n return request.environ['wsgi.url_scheme'] == 'https'", "def common_flashes(flash):\r\n flashes = {\r\n \"not_logged\":[\"You are not logged in. Please login or signup and try again\",'alert-danger'],\r\n \"not_authorized\":[\"You Are Not Authorized To View That Account\",'alert-danger'],\r\n \"fb_deleted\":[\"Feedback Has Been Deleted\",\"alert-success\"],\r\n \"missing_user\": [\"Account Not In The System, Please Create An Account\",\"alert-warning\"],\r\n \"password_reset\": [\"Your Password Has Successfully Been Reset, Try Logging In Again\",\"alert-success\"]\r\n }\r\n message = flashes[flash] \r\n return message", "def flash(self):\n\n pass", "def get_flash(self, key):\n try:\n return self.flash_data()[key]\n except KeyError:\n return None", "def persist_apple_session(request, response):\n patch_vary_headers(response, (\"Cookie\",))\n request.apple_login_session.save()\n kwargs = {}\n if django.VERSION >= (2, 1):\n samesite = getattr(settings, \"SESSION_COOKIE_SAMESITE\", None)\n if samesite:\n kwargs[\"samesite\"] = samesite\n response.set_cookie(\n APPLE_SESSION_COOKIE_NAME,\n request.apple_login_session.session_key,\n max_age=None,\n expires=None,\n domain=settings.SESSION_COOKIE_DOMAIN,\n # The cookie is only needed on this endpoint\n path=urlparse(response.url).path,\n secure=True,\n httponly=None,\n **kwargs\n )", "def success(message):\n content = {'type': 'success', 'content': message} \n flash(content)", "def set_cookie_secure(f):\r\n @wraps(f)\r\n def wrapped(self, *args, **kwargs):\r\n # Default to secure=True unless:\r\n # - feature disabled or\r\n # - secure=* defined in set_cookie call or\r\n # - this is not an HTTPS request.\r\n if (getattr(settings, 'COOKIES_SECURE', True) and\r\n 'secure' not in kwargs and\r\n os.environ.get('HTTPS', 'off') == 'on'):\r\n kwargs['secure'] = True\r\n\r\n # Set httponly flag unless feature disabled. Defaults to httponly=True\r\n # unless httponly=* was defined in set_cookie call.\r\n if (getattr(settings, 'COOKIES_HTTPONLY', True) and\r\n 'httponly' not in kwargs):\r\n kwargs['httponly'] = True\r\n\r\n return f(self, *args, **kwargs)\r\n\r\n return wrapped", "def make_secure_cookie(user_id):\n secure_id = hmac.new(secret, user_id).hexdigest()\n return \"%s|%s\" % (user_id, secure_id)", "def add_message_to_session(request, message):\n i = 0\n\n if 'messages' in request.session:\n while str(i) in request.session['messages']:\n i += 1\n else:\n request.session['messages'] = dict()\n\n request.session.modified = True\n request.session['messages'][i] = message\n return request", "def write_cookie(self):\n c = SimpleCookie()\n sid = self.generate_sid()\n while sid in self.sessions:\n sid = self.generate_sid()\n c['sid'] = sid\n self.send_header('Set-Cookie', c.output(header=''))\n return sid", "def process_response(self, request, response):\n try:\n accessed = request.session.accessed\n modified = request.session.modified\n empty = request.session.is_empty()\n except AttributeError:\n pass\n else:\n # First check if we need to delete this cookie.\n # The session should be deleted only if the session is entirely empty\n if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:\n response.delete_cookie(\n settings.SESSION_COOKIE_NAME, domain=settings.SESSION_COOKIE_DOMAIN)\n else:\n if accessed:\n patch_vary_headers(response, ('Cookie',))\n if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:\n if request.session.get_expire_at_browser_close():\n max_age = None\n expires = None\n else:\n max_age = request.session.get_expiry_age()\n expires_time = time.time() + max_age\n expires = cookie_date(expires_time)\n # Save the session data and refresh the client cookie.\n # Skip session save for 500 responses, refs #3881.\n if response.status_code != 500:\n try:\n request.session.save()\n # except UpdateError:\n except Exception:\n # The user is now logged out; redirecting to same\n # page will result in a redirect to the login page\n # if required.\n return redirect(request.path)\n cookie_path = self._get_cookie_path(request)\n logger.info(\n \"step:cas-7.4:set cookie-path to %s\" % cookie_path)\n\n response.set_cookie(\n settings.SESSION_COOKIE_NAME,\n request.session.session_key, max_age=max_age,\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\n path=cookie_path,\n # path=\"/\",\n secure=settings.SESSION_COOKIE_SECURE or None,\n httponly=settings.SESSION_COOKIE_HTTPONLY or None,\n )\n logger.info(\"Create session %s for path: %s\" % (\n request.session.session_key, cookie_path))\n\n if response.has_header('set-cookie'):\n logger.info(\n \"step:cas-7.4: Set-Cookie response Header set to: %s\" % response['Set-Cookie'])\n return response", "def save(self,django_response=None):\n Loader.capi.cppcms_capi_session_save(self.d)\n self.check()\n if django_response:\n ck = self.cookies()\n for c in ck:\n key=c.name()\n value=c.value()\n max_age = None\n if(c.max_age_defined()):\n max_age = c.max_age()\n expires=None\n if(c.expires_defined()):\n expires=datetime.utcfromtimestamp(c.expires())\n path=None\n if c.path()!='':\n path=c.path()\n domain=None\n if c.domain()!='':\n domain=c.domain()\n secure=None\n if c.is_secure():\n secure=True\n django_response.set_cookie(key, value, max_age, None, path, domain, secure)\n #django_response['Set-Cookie']=c.header_content()", "def process_response(self, request, response):\n try:\n accessed = request.web_session.accessed\n modified = request.web_session.modified\n except AttributeError:\n pass\n else:\n if accessed:\n patch_vary_headers(response, ('Cookie',))\n if modified or settings.SESSION_SAVE_EVERY_REQUEST:\n if request.web_session.get_expire_at_browser_close():\n max_age = None\n expires = None\n else:\n max_age = request.web_session.get_expiry_age()\n expires_time = time.time() + max_age\n expires = cookie_date(expires_time)\n # Save the session data and refresh the client cookie.\n # Skip session save for 500 responses, refs #3881.\n if response.status_code != 500:\n request.web_session.save()\n response.set_cookie(settings.SESSION_COOKIE_NAME,\n request.web_session.session_key, max_age=max_age,\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\n path=settings.SESSION_COOKIE_PATH,\n secure=settings.SESSION_COOKIE_SECURE or None,\n httponly=settings.SESSION_COOKIE_HTTPONLY or None)\n return response", "def get_messages(request):\r\n messages = request.session.get('messages', [])\r\n request.session['messages'] = []\r\n return messages", "def check_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)", "def show_session():\n if CONFIGURATION.DEBUG:\n try:\n return '%s' % (session['user'])\n except KeyError:\n return 'Not logged in'\n else:\n redirect(url_for('.root'))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Will check to see if the user has a role specified in roles for thie object It needs to find the object. It does this in two ways. You specify a param. It will look in the arguments passed to the decorated function for a param named whatever you pass to param in this ctor. If that fails, you can specify get_from, which is the string name of another object that will be in check()'s kwargs. This object must have a property of whatever is passed to param in this ctor.
def __init__(self, roles, param=None, get_from=None): self.param = param self.roles = roles self.get_from = None if get_from: self.get_from = isinstance(get_from, (list, tuple)) and get_from or [get_from]
[ "def _existing_only(func):\n\n @wraps(func)\n def _check_existence(db, entity, role=None, *, rolename=None):\n if isinstance(role, str):\n rolename = role\n if rolename is not None:\n # if given as a str, lookup role by name\n role = orm.Role.find(db, rolename)\n if role is None:\n raise ValueError(f\"Role {rolename} does not exist\")\n\n return func(db, entity, role)\n\n return _check_existence", "def user_is(role, user=None):\n def wrapper(func):\n @wraps(func)\n def inner(*args, **kwargs):\n from .models import Role\n desired_role = Role.query.filter_by(\n name=role).first()\n if not user:\n try:\n from flast.ext.login import current_user as user\n except ImportError:\n raise ImportError(\n 'User argument not passed and Flask-Login current_user could not be imported.')\n if desired_role in user.roles:\n return func(*args, **kwargs)\n else:\n # Make this do someting way better.\n return \"You do not have access\"\n return inner\n return wrapper", "def validate_role(context, param, value):\n role = context.obj.api.role_by_name(value)\n if role:\n return role\n else:\n raise click.BadParameter(\"\\\"%s\\\" was not found\" % value)", "def get_role(obj, role_name):\n for role in obj.roles:\n if role.name == role_name:\n return role\n return None", "def _role_is_present(self, role):\n if role in self._present:\n return True\n if role in self._not_present:\n return False\n if self.actor is not None:\n if role not in self.obj.__roles__:\n self._not_present.add(role)\n return False\n # granted_via says a role may be granted by a secondary object that sits\n # in a relationship between the current object and the actor. The secondary\n # could be a direct attribute of the current object, or could be inside a\n # list or query relationship. _roles_via_relationship will check.\n # The related object may grant roles in one of three ways:\n # 1. By its mere existence (default).\n # 2. By offering roles via an `offered_roles` property (see `RoleGrantABC`).\n # 3. By being a `RoleMixin` instance that has a `roles_for` method.\n if 'granted_via' in self.obj.__roles__[role]:\n for relattr, actor_attr in self.obj.__roles__[role][\n 'granted_via'\n ].items():\n offer_map = self.obj.__relationship_role_offer_map__.get(relattr)\n if (relattr, actor_attr) not in self._scanned_granted_via:\n relationship = self.obj._get_relationship(relattr)\n if relationship is not None:\n # Optimization: does the same relationship grant other roles\n # via the same actor_attr? Gather those roles and check all\n # of them together. However, we will use a single role\n # offer map and not consult the one specified on the other\n # roles. They are expected to be identical. This is\n # guaranteed if the offer map was specified using\n # `with_roles(grants_via=)` but not if specified directly\n # in `__roles__[role]['granted_via']`.\n possible_roles = {role}\n for arole, actions in self.obj.__roles__.items():\n if (\n arole != role\n and 'granted_via' in actions\n and relattr in actions['granted_via']\n and _attrs_equal(\n actions['granted_via'][relattr], actor_attr\n )\n ):\n possible_roles.add(arole)\n\n granted_roles = _roles_via_relationship(\n self.actor,\n relationship,\n actor_attr,\n possible_roles,\n offer_map,\n )\n self._present.update(granted_roles)\n self._scanned_granted_via.add((relattr, actor_attr))\n if role in granted_roles:\n return True\n # granted_by says a role is granted by the actor being present in a\n # relationship\n if 'granted_by' in self.obj.__roles__[role]:\n for relattr in self.obj.__roles__[role]['granted_by']:\n if relattr not in self._scanned_granted_by:\n relationship = self.obj._get_relationship(relattr)\n is_present = _actor_in_relationship(self.actor, relationship)\n if is_present:\n self._present.add(role)\n # Optimization: does this relationship grant other roles?\n # Get them rightaway. Don't query again later.\n for arole, actions in self.obj.__roles__.items():\n if (\n arole != role\n and 'granted_by' in actions\n and relattr in actions['granted_by']\n ):\n self._present.add(arole)\n return True\n self._scanned_granted_by.add(relattr)\n self._not_present.add(role)\n return False", "def require_role(role):\n\n def make_wrapper(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if not role in g.roles:\n raise Forbidden('RBAC Forbidden')\n\n return fn(*args, **kwargs)\n\n return wrapper\n return make_wrapper", "def user_has_role_backend(*args, **kwargs):\n return user_has_role(*args, **kwargs)", "def isMember(*args, **kwargs):\n \n pass", "def check_member(self, role_id, user_id):\n return self.check_relationship(\n address_function=addresser.make_role_members_address,\n role_id=role_id,\n user_id=user_id,\n )", "def _roles_via_relationship(actor, relationship, actor_attr, roles, offer_map):\n relobj = None # Role-granting object found via the relationship\n\n # There is no actor_attr. Check if the relationship is a RoleMixin and call\n # roles_for to get offered roles, then remap using the offer map.\n if actor_attr is None:\n if isinstance(relationship, RoleMixin):\n offered_roles = relationship.roles_for(actor)\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n raise TypeError(\n f\"{relationship!r} is not a RoleMixin and no actor attribute was specified\"\n )\n\n # We have a relationship. If it's a collection, find the item in it that relates\n # to the actor.\n if isinstance(relationship, (AppenderMixin, Query)):\n # Query-like relationship. Run a query. It is possible to have multiple matches\n # for the actor, so use .first()\n # TODO: Consider retrieving all and consolidating roles from across them in case\n # the objects are RoleGrantABC. This is not a current requirement and so is not\n # currently supported; using the .first() object is sufficient\n if isinstance(actor_attr, QueryableAttribute):\n relobj = relationship.filter(operator.eq(actor_attr, actor)).first()\n else:\n relobj = relationship.filter_by(**{actor_attr: actor}).first()\n elif isinstance(relationship, abc.Iterable):\n # List-like object. Scan through it looking for item related to actor.\n # Note: strings are also collections. Checking for abc.Iterable is only safe\n # here because of the unlikeliness of a string relationship. If that becomes\n # necessary in future, add `and not isinstance(relationship, str)`\n for relitem in relationship:\n if getattr(relitem, actor_attr) == actor:\n relobj = relitem\n break\n\n # Not any sort of collection. May be a scalar relationship\n elif getattr(relationship, actor_attr) == actor:\n relobj = relationship\n if not relobj:\n # Didn't find a relationship object. Actor gets no roles\n return ()\n\n # We have a related object. Get roles from it\n if isinstance(relobj, RoleGrantABC):\n # If this object grants roles, get them. It may not grant the one we're looking\n # for and that's okay. Grab the others\n offered_roles = relobj.offered_roles\n # But if we have an offer_map, remap the roles and only keep the ones\n # specified in the map\n if offer_map:\n offered_roles = set(\n chain.from_iterable(\n offer_map[role] for role in offered_roles if role in offer_map\n )\n )\n return offered_roles\n # Not a role granting object. Implies that the default roles are granted\n # by its very existence.\n return roles", "def init_role(role): # -> None:\n ...", "def cmd_role_get(self, args):\n role_id = args[0]\n self._get_obj(role_id, 'role')", "def role_id_arg(f):\n return click.argument(\"role_id\")(f)", "async def whohas(self, ctx, *, role: str):\n\n role = await helpers.role_by_substring(ctx, role)\n\n members_with_role = []\n for member in ctx.guild.members:\n if role in member.roles:\n members_with_role.append(member.mention)\n if not members_with_role:\n await ctx.send(\"Nobody has that role. :<\")\n else:\n embed = discord.Embed(title=f\"Members with {role.name}: {len(members_with_role)}\")\n embed.description = \", \".join(members_with_role[:30])\n if len(members_with_role) > 30:\n embed.set_footer(text=f\"...and {len(members_with_role)-30} others.\")\n await ctx.send(embed=embed)", "async def searchin(self, ctx, *args):\n\t\tif len(args)<2:\n\t\t\tmessage = await ctx.send('Invalid syntax for the command.')\n\t\t\tself.bot.set_answer(ctx.message.id, message)\n\t\t\treturn\n\n\t\trole = ' '.join(args[:-1]).lower()\n\t\tname = args[-1].lower()\n\n\t\tfor r in ctx.guild.roles:\n\t\t\tif r.name.lower()==role:\n\t\t\t\trole = r\n\t\t\t\tbreak\n\t\telse:\n\t\t\tmessage = await ctx.send(\"Error: I haven't found the role on this server.\")\n\t\t\tself.bot.set_answer(ctx.message.id, message)\n\t\t\treturn\n\n\t\tmembers = []\n\t\tfor m in ctx.guild.members:\n\t\t\tif role not in m.roles:\n\t\t\t\tcontinue\n\t\t\tif m.nick is not None and name in m.nick.lower() or name in m.name.lower():\n\t\t\t\tmembers.append(f\"{self.get_emoji(m.status.value)} {m.display_name}\")\n\n\t\tembed = discord.Embed()\n\t\tembed.title = f\"Members with the role `{role}`:\"\n\t\tembed.description = \"\\n\".join(members)\n\t\tmessage = await ctx.send(embed=embed)\n\t\tself.bot.set_answer(ctx.message.id, message)", "def access_for(self, roles=None, actor=None, anchors=(), datasets=None):\n if roles is None:\n roles = self.roles_for(actor=actor, anchors=anchors)\n elif actor is not None or anchors:\n raise TypeError(\n 'If roles are specified, actor/anchors must not be specified'\n )\n return RoleAccessProxy(\n self, roles=roles, actor=actor, anchors=anchors, datasets=datasets\n )", "def CheckOwnership(*args, **kwargs):\n pass", "def event_creator_role_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n try:\n if current_user.user_role >= 2:\n return f(*args, **kwargs)\n else:\n flash(\"You need to be a event creator for this action.\")\n return jsonify(Error=\"Event creator role required \"),401\n except AttributeError as e:\n return jsonify(Error=\"You need to Log in: \"),403\n return wrap", "def test_read_role(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A function that produces a list of unique pixel values for a set of images
def extract_pixel_vals(ref_img_list): from scipy import misc import numpy as np imRef = [] for ref in range(len(ref_img_list)): tmpRef = misc.imread(ref_img_list[ref]) for i in range(tmpRef.shape[0]): for j in range(tmpRef.shape[1]): imRef.append(tuple(tmpRef[i,j,:])) test = set(imRef) return test
[ "def unique(kernels):\n r, s = list(), set()\n for kernel in kernels:\n if isinstance(kernel.length, list):\n key = tuple(kernel.length) + (kernel.scheme,)\n else:\n key = (kernel.length, kernel.scheme)\n if key not in s:\n s.add(key)\n r.append(kernel)\n return r", "def load_image_values(location):\n im = Image.open(location)\n im = im.resize((y_dim, x_dim))\n lst = np.array(im)\n lst = (lst / 128) - 1\n return lst", "def img_sets():\n return [\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train',\n 'tvmonitor']", "def get_image_uuids(ibs, gid_list):\n image_uuid_list = ibs.db.get(IMAGE_TABLE, ('image_uuid',), gid_list)\n return image_uuid_list", "def unique(array):\n\tpass", "def unique(inputs):\n return tf.unique(inputs)", "def unique_rows(a):\n return np.array(list(set(tuple(p) for p in a)))", "def getAllUniqueImgLinks(baseUrl):\n\tgetBodyLinks = getAllImages(baseUrl) \n\t#remove all pound sign elements\n\tsubList = removeFromList(getBodyLinks, '#')\t\n\t#remove all non base url links\n\t#mainList = removeNonSubUrlLinks(subList)\n\tmainList = subList\n\t#remove all duplicates\n\tfinalList = list(set(mainList))\t\t\n\t#Strip out similar duplicates by removing all / from urls that need it\n\tmasterList = set(map(lambda url: url.rstrip('/'), finalList))\n\tlistOfLinks = list(masterList)\n\treturn listOfLinks", "def unique_factvalues(raw_facts):\n factvalues = set([])\n for fact in raw_facts:\n factvalues.add(fact.value)\n return factvalues", "def getRed(img):\n indices = np.where(np.all(img == (200/255, 72/255, 72/255), axis=-1))\n return list(zip(indices[0], indices[1]))", "def unbatchify(data):\n images_ = []\n labels_ = []\n\n for image, label in data.unbatch().as_numpy_iterator():\n images_.append(image)\n labels_.append(unique_breeds[np.argmax(label)])\n return images_, labels_", "def _filterImgIds(self, imgIds):\n imgIds = imgIds if isinstance(imgIds, list) else [imgIds]\n if len(imgIds) == 0:\n return self.imgIds\n imgIdsSet = set(imgIds)\n refexpImgsSet = set(self.imgIds)\n notRefexpImages = imgIdsSet - refexpImgsSet\n if len(notRefexpImages) > 0:\n warnings.warn('Images ' + str(notRefexpImages) + ' are not part of the GoogleRefexp dataset and will be ignored from the answer.', RuntimeWarning)\n imgIds = imgIdsSet.intersection(refexpImgsSet)\n return list(imgIds)", "def unique(arr):\n arr = arr.cpu().numpy()\n arr_ = np.ascontiguousarray(arr).view(np.dtype((np.void, arr.dtype.itemsize*arr.shape[1])))\n _, idxs = np.unique(arr_, return_index=True)\n if torch.cuda.is_available():\n return torch.LongTensor(np.sort(idxs)).cuda()\n return torch.LongTensor(np.sort(idxs))", "def getUniqueGenes(raw_counts, communities):\n # Sum each community's genecounts, and stack up those gene profile vectors\n profiles = np.concatenate([np.sum(raw_counts[communities == i], axis=0, keepdims=True) for i in\n np.unique(communities)], axis=0)\n\n binary = np.zeros_like(profiles)\n binary[profiles != 0] = 1\n\n # Only 1 - sum(everything) + 1 > 0\n uniques = binary - np.sum(binary, axis=0) + binary\n uniques[uniques < 0] = 0\n\n return uniques", "def equalize_images(images):\n images_equalized = np.zeros(shape=images.shape, dtype=\"uint8\")\n for i, image in enumerate(images):\n image_equalized = cv.equalizeHist(image)\n image_equalized = np.expand_dims(image_equalized, axis=-1)\n images_equalized[i] = image_equalized\n return images_equalized", "def unique(self):\n if self.unique_values and self.name in self.unique_values.keys():\n return np.array(self.unique_values[self.name])\n else:\n return super().unique()", "def pixel_values(roi):\r\n pixel = []\r\n mask = roi.getMask() # polygon rois are defined by a mask\r\n box = roi.getBounds()\r\n boxLeft = box.x\r\n boxRight = boxLeft + box.width\r\n boxTop = box.y\r\n boxBottom = boxTop + box.height\r\n for v in range (boxTop, boxBottom):\r\n for u in range (boxLeft, boxRight):\r\n if mask.getPixel(u - boxLeft, v - boxTop) > 0:\r\n pixel.append(imp.getProcessor().getPixel(u,v))\r\n return pixel", "def get_unique_elements(self, field: str) -> list:\n return self.properties.distinct(field)", "def unique_elements(a_list):\n result = []\n for elem in a_list:\n if not elem in result:\n result.append(elem)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Codec defaults to Latin1 / ISO 88591
def test_default(self): self.assertEqual(Codec.default(), Latin1Codec())
[ "def get_data_encoding():", "def __init__(self, encoding):\n self.trans = {}\n for char in 'ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ':\n self.trans[char] = 'A'\n for char in 'ȀǞ':\n self.trans[char] = 'Ä'\n self.trans['Ǻ'] = 'Å'\n self.trans['Ä'] = 'Ae'\n self.trans['Å'] = 'Aa'\n for char in 'àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ':\n self.trans[char] = 'a'\n for char in 'ȁǟ':\n self.trans[char] = 'ä'\n self.trans['ǻ'] = 'å'\n self.trans['ä'] = 'ae'\n self.trans['å'] = 'aa'\n for char in 'ḂḄḆƁƂ':\n self.trans[char] = 'B'\n for char in 'ḃḅḇƀɓƃ':\n self.trans[char] = 'b'\n for char in 'ĆĈĊÇČƇ':\n self.trans[char] = 'C'\n for char in 'ćĉċçčƈȼ':\n self.trans[char] = 'c'\n self.trans['Ḉ'] = 'Ç'\n self.trans['ḉ'] = 'ç'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ĎḊḌḎḐḒĐƉƊƋ':\n self.trans[char] = 'D'\n for char in 'ďḋḍḏḑḓđɖɗƌ':\n self.trans[char] = 'd'\n for char in 'ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ':\n self.trans[char] = 'E'\n for char in 'ỀẾỄỆỂ':\n self.trans[char] = 'Ê'\n for char in 'èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ':\n self.trans[char] = 'e'\n for char in 'ềếễệể':\n self.trans[char] = 'ê'\n for char in 'ḞƑ':\n self.trans[char] = 'F'\n for char in 'ḟƒ':\n self.trans[char] = 'f'\n for char in 'ǴḠĞĠĢǦǤƓ':\n self.trans[char] = 'G'\n for char in 'ǵḡğġģǧǥɠ':\n self.trans[char] = 'g'\n self.trans['Ĝ'] = 'Gx'\n self.trans['ĝ'] = 'gx'\n for char in 'ḢḤḦȞḨḪH̱ĦǶ':\n self.trans[char] = 'H'\n for char in 'ḣḥḧȟḩḫ̱ẖħƕ':\n self.trans[char] = 'h'\n for char in 'IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ':\n self.trans[char] = 'I'\n for char in 'ıìȉíîĩḭïḯīĭȋįǐiịỉɨ':\n self.trans[char] = 'i'\n for char in 'ĴJ':\n self.trans[char] = 'J'\n for char in 'ɟĵ̌ǰ':\n self.trans[char] = 'j'\n for char in 'ḰǨĶḲḴƘ':\n self.trans[char] = 'K'\n for char in 'ḱǩķḳḵƙ':\n self.trans[char] = 'k'\n for char in 'ĹĻĽḶḸḺḼȽŁ':\n self.trans[char] = 'L'\n for char in 'ĺļľḷḹḻḽƚłɫ':\n self.trans[char] = 'l'\n for char in 'ḾṀṂ':\n self.trans[char] = 'M'\n for char in 'ḿṁṃɱ':\n self.trans[char] = 'm'\n for char in 'ǸŃÑŅŇṄṆṈṊŊƝɲȠ':\n self.trans[char] = 'N'\n for char in 'ǹńñņňṅṇṉṋŋɲƞ':\n self.trans[char] = 'n'\n for char in 'ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ':\n self.trans[char] = 'O'\n for char in 'òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ':\n self.trans[char] = 'o'\n for char in 'ȌŐȪ':\n self.trans[char] = 'Ö'\n for char in 'ȍőȫ':\n self.trans[char] = 'ö'\n for char in 'ỒỐỖỘỔȎ':\n self.trans[char] = 'Ô'\n for char in 'ồốỗộổȏ':\n self.trans[char] = 'ô'\n for char in 'ṔṖƤ':\n self.trans[char] = 'P'\n for char in 'ṕṗƥ':\n self.trans[char] = 'p'\n self.trans['ᵽ'] = 'q'\n for char in 'ȐŔŖŘȒṘṚṜṞ':\n self.trans[char] = 'R'\n for char in 'ȑŕŗřȓṙṛṝṟɽ':\n self.trans[char] = 'r'\n for char in 'ŚṤŞȘŠṦṠṢṨ':\n self.trans[char] = 'S'\n for char in 'śṥşșšṧṡṣṩȿ':\n self.trans[char] = 's'\n self.trans['Ŝ'] = 'Sx'\n self.trans['ŝ'] = 'sx'\n for char in 'ŢȚŤṪṬṮṰŦƬƮ':\n self.trans[char] = 'T'\n for char in 'ţțťṫṭṯṱŧȾƭʈ':\n self.trans[char] = 't'\n for char in 'ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ':\n self.trans[char] = 'U'\n for char in 'ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ':\n self.trans[char] = 'u'\n for char in 'ȔŰǛǗǕǙ':\n self.trans[char] = 'Ü'\n for char in 'ȕűǜǘǖǚ':\n self.trans[char] = 'ü'\n self.trans['Û'] = 'Ux'\n self.trans['û'] = 'ux'\n self.trans['Ȗ'] = 'Û'\n self.trans['ȗ'] = 'û'\n self.trans['Ừ'] = 'Ù'\n self.trans['ừ'] = 'ù'\n self.trans['Ứ'] = 'Ú'\n self.trans['ứ'] = 'ú'\n for char in 'ṼṾ':\n self.trans[char] = 'V'\n for char in 'ṽṿ':\n self.trans[char] = 'v'\n for char in 'ẀẂŴẄẆẈ':\n self.trans[char] = 'W'\n for char in 'ẁẃŵẅẇẉ':\n self.trans[char] = 'w'\n for char in 'ẊẌ':\n self.trans[char] = 'X'\n for char in 'ẋẍ':\n self.trans[char] = 'x'\n for char in 'ỲÝŶŸỸȲẎỴỶƳ':\n self.trans[char] = 'Y'\n for char in 'ỳýŷÿỹȳẏỵỷƴ':\n self.trans[char] = 'y'\n for char in 'ŹẐŻẒŽẔƵȤ':\n self.trans[char] = 'Z'\n for char in 'źẑżẓžẕƶȥ':\n self.trans[char] = 'z'\n self.trans['ɀ'] = 'zv'\n\n # Latin: extended Latin alphabet\n self.trans['ɑ'] = 'a'\n for char in 'ÆǼǢ':\n self.trans[char] = 'AE'\n for char in 'æǽǣ':\n self.trans[char] = 'ae'\n self.trans['Ð'] = 'Dh'\n self.trans['ð'] = 'dh'\n for char in 'ƎƏƐ':\n self.trans[char] = 'E'\n for char in 'ǝəɛ':\n self.trans[char] = 'e'\n for char in 'ƔƢ':\n self.trans[char] = 'G'\n for char in 'ᵷɣƣᵹ':\n self.trans[char] = 'g'\n self.trans['Ƅ'] = 'H'\n self.trans['ƅ'] = 'h'\n self.trans['Ƕ'] = 'Wh'\n self.trans['ƕ'] = 'wh'\n self.trans['Ɩ'] = 'I'\n self.trans['ɩ'] = 'i'\n self.trans['Ŋ'] = 'Ng'\n self.trans['ŋ'] = 'ng'\n self.trans['Œ'] = 'OE'\n self.trans['œ'] = 'oe'\n self.trans['Ɔ'] = 'O'\n self.trans['ɔ'] = 'o'\n self.trans['Ȣ'] = 'Ou'\n self.trans['ȣ'] = 'ou'\n self.trans['Ƽ'] = 'Q'\n for char in 'ĸƽ':\n self.trans[char] = 'q'\n self.trans['ȹ'] = 'qp'\n self.trans[''] = 'r'\n self.trans['ſ'] = 's'\n self.trans['ß'] = 'ss'\n self.trans['Ʃ'] = 'Sh'\n for char in 'ʃᶋ':\n self.trans[char] = 'sh'\n self.trans['Ʉ'] = 'U'\n self.trans['ʉ'] = 'u'\n self.trans['Ʌ'] = 'V'\n self.trans['ʌ'] = 'v'\n for char in 'ƜǷ':\n self.trans[char] = 'W'\n for char in 'ɯƿ':\n self.trans[char] = 'w'\n self.trans['Ȝ'] = 'Y'\n self.trans['ȝ'] = 'y'\n self.trans['IJ'] = 'IJ'\n self.trans['ij'] = 'ij'\n self.trans['Ƨ'] = 'Z'\n for char in 'ʮƨ':\n self.trans[char] = 'z'\n self.trans['Ʒ'] = 'Zh'\n self.trans['ʒ'] = 'zh'\n self.trans['Ǯ'] = 'Dzh'\n self.trans['ǯ'] = 'dzh'\n for char in 'ƸƹʔˀɁɂ':\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in 'Cʗǃ':\n self.trans[char] = '!'\n\n # Punctuation and typography\n for char in '«»“”„¨':\n self.trans[char] = u'\"'\n for char in '‘’′':\n self.trans[char] = u\"'\"\n self.trans['•'] = '*'\n self.trans['@'] = '(at)'\n self.trans['¤'] = '$'\n self.trans['¢'] = 'c'\n self.trans['€'] = 'E'\n self.trans['£'] = 'L'\n self.trans['¥'] = 'yen'\n self.trans['†'] = '+'\n self.trans['‡'] = '++'\n self.trans['°'] = ':'\n self.trans['¡'] = '!'\n self.trans['¿'] = '?'\n self.trans['‰'] = 'o/oo'\n self.trans['‱'] = 'o/ooo'\n for char in '¶§':\n self.trans[char] = '>'\n self.trans['…'] = '...'\n for char in '‒–—―':\n self.trans[char] = '-'\n self.trans['·'] = ' '\n self.trans['¦'] = '|'\n self.trans['⁂'] = '***'\n self.trans['◊'] = '<>'\n self.trans['‽'] = '?!'\n self.trans['؟'] = ';-)'\n self.trans['¹'] = '1'\n self.trans['²'] = '2'\n self.trans['³'] = '3'\n\n # Cyrillic\n self.trans.update({'А': 'A', 'а': 'a', 'Б': 'B', 'б': 'b',\n 'В': 'V', 'в': 'v', 'Г': 'G', 'г': 'g',\n 'Д': 'D', 'д': 'd', 'Е': 'E', 'е': 'e',\n 'Ж': 'Zh', 'ж': 'zh', 'З': 'Z', 'з': 'z',\n 'И': 'I', 'и': 'i', 'Й': 'J', 'й': 'j',\n 'К': 'K', 'к': 'k', 'Л': 'L', 'л': 'l',\n 'М': 'M', 'м': 'm', 'Н': 'N', 'н': 'n',\n 'О': 'O', 'о': 'o', 'П': 'P', 'п': 'p',\n 'Р': 'R', 'р': 'r', 'С': 'S', 'с': 's',\n 'Т': 'T', 'т': 't', 'У': 'U', 'у': 'u',\n 'Ф': 'F', 'ф': 'f', 'х': 'kh', 'Ц': 'C',\n 'ц': 'c', 'Ч': 'Ch', 'ч': 'ch', 'Ш': 'Sh',\n 'ш': 'sh', 'Щ': 'Shch', 'щ': 'shch', 'Ь': \"'\",\n 'ь': \"'\", 'Ъ': '\"', 'ъ': '\"', 'Ю': 'Yu',\n 'ю': 'yu', 'Я': 'Ya', 'я': 'ya', 'Х': 'Kh',\n 'Χ': 'Kh'})\n\n # Additional Cyrillic letters, most occuring in only a few languages\n self.trans.update({\n 'Ы': 'Y', 'ы': 'y', 'Ё': 'Ë', 'ё': 'ë',\n 'Э': 'È', 'Ѐ': 'È', 'э': 'è', 'ѐ': 'è',\n 'І': 'I', 'і': 'i', 'Ї': 'Ji', 'ї': 'ji',\n 'Є': 'Je', 'є': 'je', 'Ґ': 'G', 'Ҝ': 'G',\n 'ґ': 'g', 'ҝ': 'g', 'Ђ': 'Dj', 'ђ': 'dj',\n 'Љ': 'Lj', 'љ': 'lj',\n 'Њ': 'Nj', 'њ': 'nj', 'Ћ': 'Cj', 'ћ': 'cj',\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n 'Ќ': 'Kj', 'ќ': 'kj', 'Ӣ': 'Ii', 'ӣ': 'ii',\n 'Ҳ': 'H', 'ҳ': 'h',\n 'Ҷ': 'Dz', 'ҷ': 'dz', 'Ө': 'Ô', 'Ӫ': 'Ô',\n 'ө': 'ô', 'ӫ': 'ô', 'Ү': 'Y', 'ү': 'y', 'Һ': 'H',\n 'һ': 'h', 'Ә': 'AE', 'Ӕ': 'AE', 'ә': 'ae',\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n 'ѝ': 'ì', 'Ѝ': 'Ì', 'Ӑ': 'A', 'ă': 'a', 'Ӓ': 'Ä',\n 'Ҽ': 'Ts', 'Ҿ': 'Ts', 'ҽ': 'ts', 'ҿ': 'ts',\n 'Ҙ': 'Dh', 'ҙ': 'dh', 'Ӏ': '', 'ӏ': '', 'Ӆ': 'L',\n 'ӆ': 'l', 'Ӎ': 'M', 'ӎ': 'm', 'Ӧ': 'Ö', 'ӧ': 'ö',\n 'Ҩ': 'u', 'ҩ': 'u', 'Ҧ': 'Ph', 'ҧ': 'ph', 'Ҏ': 'R',\n 'ҏ': 'r', 'Ҫ': 'Th', 'ҫ': 'th', 'Ҭ': 'T', 'ҭ': 't',\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n 'ӹ': 'u', 'Ҵ': 'Tts', 'ҵ': 'tts', 'Ӵ': 'Ch', 'ӵ': 'ch'})\n\n for char in 'ЈӤҊ':\n self.trans[char] = 'J'\n for char in 'јӥҋ':\n self.trans[char] = 'j'\n for char in 'ЏӁӜҶ':\n self.trans[char] = 'Dzh'\n for char in 'џӂӝҷ':\n self.trans[char] = 'dzh'\n for char in 'ЅӞӠӋҸ':\n self.trans[char] = 'Dz'\n for char in 'ѕӟӡӌҹ':\n self.trans[char] = 'dz'\n for char in 'ҒӶҔ':\n self.trans[char] = 'G'\n for char in 'ғӷҕ':\n self.trans[char] = 'g'\n for char in 'ҚҞҠӃ':\n self.trans[char] = 'Q'\n for char in 'қҟҡӄ':\n self.trans[char] = 'q'\n for char in 'ҢҤӉӇ':\n self.trans[char] = 'Ng'\n for char in 'ңҥӊӈ':\n self.trans[char] = 'ng'\n for char in 'ӖѢҌ':\n self.trans[char] = 'E'\n for char in 'ӗѣҍ':\n self.trans[char] = 'e'\n for char in 'ӲӰҮ':\n self.trans[char] = 'Ü'\n for char in 'ӳӱү':\n self.trans[char] = 'ü'\n\n # Archaic Cyrillic letters\n self.trans.update({\n 'Ѹ': 'Ou', 'ѹ': 'ou', 'Ѡ': 'O', 'Ѻ': 'O', 'ѡ': 'o',\n 'ѻ': 'o', 'Ѿ': 'Ot', 'ѿ': 'ot', 'Ѣ': 'E', 'ѣ': 'e',\n 'Ѥ': 'Ei', 'Ѧ': 'Ei', 'ѥ': 'ei', 'ѧ': 'ei', 'Ѫ': 'Ai',\n 'ѫ': 'ai', 'Ѯ': 'X', 'ѯ': 'x', 'Ѱ': 'Ps', 'ѱ': 'ps',\n 'Ѳ': 'Th', 'ѳ': 'th', 'Ѵ': 'Ü', 'Ѷ': 'Ü', 'ѵ': 'ü'})\n\n # Hebrew alphabet\n for char in 'אע':\n self.trans[char] = u\"'\"\n self.trans['ב'] = 'b'\n self.trans['ג'] = 'g'\n self.trans['ד'] = 'd'\n self.trans['ה'] = 'h'\n self.trans['ו'] = 'v'\n self.trans['ז'] = 'z'\n self.trans['ח'] = 'kh'\n self.trans['ט'] = 't'\n self.trans['י'] = 'y'\n for char in 'ךכ':\n self.trans[char] = 'k'\n self.trans['ל'] = 'l'\n for char in 'םמ':\n self.trans[char] = 'm'\n for char in 'ןנ':\n self.trans[char] = 'n'\n self.trans['ס'] = 's'\n for char in 'ףפ':\n self.trans[char] = 'ph'\n for char in 'ץצ':\n self.trans[char] = 'ts'\n self.trans['ק'] = 'q'\n self.trans['ר'] = 'r'\n self.trans['ש'] = 'sh'\n self.trans['ת'] = 'th'\n\n # Arab alphabet\n for char in 'اﺍﺎ':\n self.trans[char] = 'a'\n for char in 'بﺏﺐﺒﺑ':\n self.trans[char] = 'b'\n for char in 'تﺕﺖﺘﺗ':\n self.trans[char] = 't'\n for char in 'ثﺙﺚﺜﺛ':\n self.trans[char] = 'th'\n for char in 'جﺝﺞﺠﺟ':\n self.trans[char] = 'g'\n for char in 'حﺡﺢﺤﺣ':\n self.trans[char] = 'h'\n for char in 'خﺥﺦﺨﺧ':\n self.trans[char] = 'kh'\n for char in 'دﺩﺪ':\n self.trans[char] = 'd'\n for char in 'ذﺫﺬ':\n self.trans[char] = 'dh'\n for char in 'رﺭﺮ':\n self.trans[char] = 'r'\n for char in 'زﺯﺰ':\n self.trans[char] = 'z'\n for char in 'سﺱﺲﺴﺳ':\n self.trans[char] = 's'\n for char in 'شﺵﺶﺸﺷ':\n self.trans[char] = 'sh'\n for char in 'صﺹﺺﺼﺻ':\n self.trans[char] = 's'\n for char in 'ضﺽﺾﻀﺿ':\n self.trans[char] = 'd'\n for char in 'طﻁﻂﻄﻃ':\n self.trans[char] = 't'\n for char in 'ظﻅﻆﻈﻇ':\n self.trans[char] = 'z'\n for char in 'عﻉﻊﻌﻋ':\n self.trans[char] = u\"'\"\n for char in 'غﻍﻎﻐﻏ':\n self.trans[char] = 'gh'\n for char in 'فﻑﻒﻔﻓ':\n self.trans[char] = 'f'\n for char in 'قﻕﻖﻘﻗ':\n self.trans[char] = 'q'\n for char in 'كﻙﻚﻜﻛک':\n self.trans[char] = 'k'\n for char in 'لﻝﻞﻠﻟ':\n self.trans[char] = 'l'\n for char in 'مﻡﻢﻤﻣ':\n self.trans[char] = 'm'\n for char in 'نﻥﻦﻨﻧ':\n self.trans[char] = 'n'\n for char in 'هﻩﻪﻬﻫ':\n self.trans[char] = 'h'\n for char in 'وﻭﻮ':\n self.trans[char] = 'w'\n for char in 'یيﻱﻲﻴﻳ':\n self.trans[char] = 'y'\n # Arabic - additional letters, modified letters and ligatures\n self.trans['ﺀ'] = \"'\"\n for char in 'آﺁﺂ':\n self.trans[char] = u\"'a\"\n for char in 'ةﺓﺔ':\n self.trans[char] = 'th'\n for char in 'ىﻯﻰ':\n self.trans[char] = 'á'\n for char in 'یﯼﯽﯿﯾ':\n self.trans[char] = 'y'\n self.trans['؟'] = '?'\n # Arabic - ligatures\n for char in 'ﻻﻼ':\n self.trans[char] = 'la'\n self.trans['ﷲ'] = 'llah'\n for char in 'إأ':\n self.trans[char] = u\"a'\"\n self.trans['ؤ'] = \"w'\"\n self.trans['ئ'] = \"y'\"\n for char in '◌◌':\n self.trans[char] = \"\" # indicates absence of vowels\n # Arabic vowels\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'i'\n self.trans['◌'] = 'a'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'ay'\n self.trans['◌'] = 'u'\n self.trans['◌'] = 'iy'\n # Arab numerals\n for char in '٠۰':\n self.trans[char] = '0'\n for char in '١۱':\n self.trans[char] = '1'\n for char in '٢۲':\n self.trans[char] = '2'\n for char in '٣۳':\n self.trans[char] = '3'\n for char in '٤۴':\n self.trans[char] = '4'\n for char in '٥۵':\n self.trans[char] = '5'\n for char in '٦۶':\n self.trans[char] = '6'\n for char in '٧۷':\n self.trans[char] = '7'\n for char in '٨۸':\n self.trans[char] = '8'\n for char in '٩۹':\n self.trans[char] = '9'\n # Perso-Arabic\n for char in 'پﭙﭙپ':\n self.trans[char] = 'p'\n for char in 'چچچچ':\n self.trans[char] = 'ch'\n for char in 'ژژ':\n self.trans[char] = 'zh'\n for char in 'گﮔﮕﮓ':\n self.trans[char] = 'g'\n\n # Greek\n self.trans.update({\n 'Α': 'A', 'α': 'a', 'Β': 'B', 'β': 'b', 'Γ': 'G',\n 'γ': 'g', 'Δ': 'D', 'δ': 'd', 'Ε': 'E', 'ε': 'e',\n 'Ζ': 'Z', 'ζ': 'z', 'Η': 'I', 'η': 'i', 'θ': 'th',\n 'Θ': 'Th', 'Ι': 'I', 'ι': 'i', 'Κ': 'K', 'κ': 'k',\n 'Λ': 'L', 'λ': 'l', 'Μ': 'M', 'μ': 'm', 'Ν': 'N',\n 'ν': 'n', 'Ξ': 'X', 'ξ': 'x', 'Ο': 'O', 'ο': 'o',\n 'Π': 'P', 'π': 'p', 'Ρ': 'R', 'ρ': 'r', 'Σ': 'S',\n 'σ': 's', 'ς': 's', 'Τ': 'T', 'τ': 't', 'Υ': 'Y',\n 'υ': 'y', 'Φ': 'F', 'φ': 'f', 'Ψ': 'Ps', 'ψ': 'ps',\n 'Ω': 'O', 'ω': 'o', 'ϗ': '&', 'Ϛ': 'St', 'ϛ': 'st',\n 'Ϙ': 'Q', 'Ϟ': 'Q', 'ϙ': 'q', 'ϟ': 'q', 'Ϻ': 'S',\n 'ϻ': 's', 'Ϡ': 'Ss', 'ϡ': 'ss', 'Ϸ': 'Sh', 'ϸ': 'sh',\n '·': ':', 'Ά': 'Á', 'ά': 'á', 'Έ': 'É', 'Ή': 'É',\n 'έ': 'é', 'ή': 'é', 'Ί': 'Í', 'ί': 'í', 'Ϊ': 'Ï',\n 'ϊ': 'ï', 'ΐ': 'ï', 'Ό': 'Ó', 'ό': 'ó', 'Ύ': 'Ý',\n 'ύ': 'ý', 'Ϋ': 'Y', 'ϋ': 'ÿ', 'ΰ': 'ÿ', 'Ώ': 'Ó',\n 'ώ': 'ó'})\n\n # Japanese (katakana and hiragana)\n for char in 'アァあ':\n self.trans[char] = 'a'\n for char in 'イィい':\n self.trans[char] = 'i'\n for char in 'ウう':\n self.trans[char] = 'u'\n for char in 'エェえ':\n self.trans[char] = 'e'\n for char in 'オォお':\n self.trans[char] = 'o'\n for char in 'ャや':\n self.trans[char] = 'ya'\n for char in 'ュゆ':\n self.trans[char] = 'yu'\n for char in 'ョよ':\n self.trans[char] = 'yo'\n for char in 'カか':\n self.trans[char] = 'ka'\n for char in 'キき':\n self.trans[char] = 'ki'\n for char in 'クく':\n self.trans[char] = 'ku'\n for char in 'ケけ':\n self.trans[char] = 'ke'\n for char in 'コこ':\n self.trans[char] = 'ko'\n for char in 'サさ':\n self.trans[char] = 'sa'\n for char in 'シし':\n self.trans[char] = 'shi'\n for char in 'スす':\n self.trans[char] = 'su'\n for char in 'セせ':\n self.trans[char] = 'se'\n for char in 'ソそ':\n self.trans[char] = 'so'\n for char in 'タた':\n self.trans[char] = 'ta'\n for char in 'チち':\n self.trans[char] = 'chi'\n for char in 'ツつ':\n self.trans[char] = 'tsu'\n for char in 'テて':\n self.trans[char] = 'te'\n for char in 'トと':\n self.trans[char] = 'to'\n for char in 'ナな':\n self.trans[char] = 'na'\n for char in 'ニに':\n self.trans[char] = 'ni'\n for char in 'ヌぬ':\n self.trans[char] = 'nu'\n for char in 'ネね':\n self.trans[char] = 'ne'\n for char in 'ノの':\n self.trans[char] = 'no'\n for char in 'ハは':\n self.trans[char] = 'ha'\n for char in 'ヒひ':\n self.trans[char] = 'hi'\n for char in 'フふ':\n self.trans[char] = 'fu'\n for char in 'ヘへ':\n self.trans[char] = 'he'\n for char in 'ホほ':\n self.trans[char] = 'ho'\n for char in 'マま':\n self.trans[char] = 'ma'\n for char in 'ミみ':\n self.trans[char] = 'mi'\n for char in 'ムむ':\n self.trans[char] = 'mu'\n for char in 'メめ':\n self.trans[char] = 'me'\n for char in 'モも':\n self.trans[char] = 'mo'\n for char in 'ラら':\n self.trans[char] = 'ra'\n for char in 'リり':\n self.trans[char] = 'ri'\n for char in 'ルる':\n self.trans[char] = 'ru'\n for char in 'レれ':\n self.trans[char] = 're'\n for char in 'ロろ':\n self.trans[char] = 'ro'\n for char in 'ワわ':\n self.trans[char] = 'wa'\n for char in 'ヰゐ':\n self.trans[char] = 'wi'\n for char in 'ヱゑ':\n self.trans[char] = 'we'\n for char in 'ヲを':\n self.trans[char] = 'wo'\n for char in 'ンん':\n self.trans[char] = 'n'\n for char in 'ガが':\n self.trans[char] = 'ga'\n for char in 'ギぎ':\n self.trans[char] = 'gi'\n for char in 'グぐ':\n self.trans[char] = 'gu'\n for char in 'ゲげ':\n self.trans[char] = 'ge'\n for char in 'ゴご':\n self.trans[char] = 'go'\n for char in 'ザざ':\n self.trans[char] = 'za'\n for char in 'ジじ':\n self.trans[char] = 'ji'\n for char in 'ズず':\n self.trans[char] = 'zu'\n for char in 'ゼぜ':\n self.trans[char] = 'ze'\n for char in 'ゾぞ':\n self.trans[char] = 'zo'\n for char in 'ダだ':\n self.trans[char] = 'da'\n for char in 'ヂぢ':\n self.trans[char] = 'dji'\n for char in 'ヅづ':\n self.trans[char] = 'dzu'\n for char in 'デで':\n self.trans[char] = 'de'\n for char in 'ドど':\n self.trans[char] = 'do'\n for char in 'バば':\n self.trans[char] = 'ba'\n for char in 'ビび':\n self.trans[char] = 'bi'\n for char in 'ブぶ':\n self.trans[char] = 'bu'\n for char in 'ベべ':\n self.trans[char] = 'be'\n for char in 'ボぼ':\n self.trans[char] = 'bo'\n for char in 'パぱ':\n self.trans[char] = 'pa'\n for char in 'ピぴ':\n self.trans[char] = 'pi'\n for char in 'プぷ':\n self.trans[char] = 'pu'\n for char in 'ペぺ':\n self.trans[char] = 'pe'\n for char in 'ポぽ':\n self.trans[char] = 'po'\n for char in 'ヴゔ':\n self.trans[char] = 'vu'\n self.trans['ヷ'] = 'va'\n self.trans['ヸ'] = 'vi'\n self.trans['ヹ'] = 've'\n self.trans['ヺ'] = 'vo'\n\n # Japanese and Chinese punctuation and typography\n for char in '・·':\n self.trans[char] = ' '\n for char in '〃『』《》':\n self.trans[char] = u'\"'\n for char in '「」〈〉〘〙〚〛':\n self.trans[char] = u\"'\"\n for char in '(〔':\n self.trans[char] = '('\n for char in ')〕':\n self.trans[char] = ')'\n for char in '[【〖':\n self.trans[char] = '['\n for char in ']】〗':\n self.trans[char] = ']'\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in '•◦':\n self.trans[char] = '_'\n for char in '※*':\n self.trans[char] = '*'\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in ',、':\n self.trans[char] = ','\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in 'ეჱ':\n self.trans[char] = 'e'\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in 'ყ':\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in 'წ':\n self.trans[char] = u\"ts'\"\n for char in 'ჭ':\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in 'पप':\n self.trans[char] = 'p'\n self.trans['अ'] = 'a'\n for char in 'आा':\n self.trans[char] = 'aa'\n self.trans['प'] = 'pa'\n for char in 'इि':\n self.trans[char] = 'i'\n for char in 'ईी':\n self.trans[char] = 'ii'\n for char in 'उु':\n self.trans[char] = 'u'\n for char in 'ऊू':\n self.trans[char] = 'uu'\n for char in 'एे':\n self.trans[char] = 'e'\n for char in 'ऐै':\n self.trans[char] = 'ai'\n for char in 'ओो':\n self.trans[char] = 'o'\n for char in 'औौ':\n self.trans[char] = 'au'\n for char in 'ऋृर':\n self.trans[char] = 'r'\n for char in 'ॠॄ':\n self.trans[char] = 'rr'\n for char in 'ऌॢल':\n self.trans[char] = 'l'\n for char in 'ॡॣ':\n self.trans[char] = 'll'\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in 'टत':\n self.trans[char] = 't'\n for char in 'ठथ':\n self.trans[char] = 'th'\n for char in 'डद':\n self.trans[char] = 'd'\n for char in 'ढध':\n self.trans[char] = 'dh'\n for char in 'णन':\n self.trans[char] = 'n'\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in 'षस':\n self.trans[char] = 's'\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in 'क़':\n self.trans[char] = 'q'\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in 'डढ':\n self.trans[char] = 'r'\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in 'ख्':\n self.trans[char] = 'khn'\n self.trans['त'] = 'tn'\n for char in 'द्':\n self.trans[char] = 'dn'\n self.trans['श'] = 'cn'\n for char in 'ह्':\n self.trans[char] = 'fn'\n for char in 'अँ':\n self.trans[char] = 'm'\n for char in '॒॑':\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in 'Տ':\n self.trans[char] = u\"T'\"\n for char in 'տ':\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in 'க்':\n self.trans[char] = 'k'\n for char in 'ஙண்ந்ன்':\n self.trans[char] = 'n'\n self.trans['ச'] = 'c'\n for char in 'ஞ்':\n self.trans[char] = 'ñ'\n for char in 'ட்':\n self.trans[char] = 'th'\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in 'ம்':\n self.trans[char] = 'm'\n for char in 'ய்':\n self.trans[char] = 'y'\n for char in 'ர்ழ்ற':\n self.trans[char] = 'r'\n for char in 'ல்ள':\n self.trans[char] = 'l'\n for char in 'வ்':\n self.trans[char] = 'v'\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in 'க்ஷ':\n self.trans[char] = 'x'\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in 'আা':\n self.trans[char] = 'a'\n for char in 'ইিঈী':\n self.trans[char] = 'i'\n for char in 'উুঊূ':\n self.trans[char] = 'u'\n for char in 'ঋৃ':\n self.trans[char] = 'ri'\n for char in 'এেয়':\n self.trans[char] = 'e'\n for char in 'ঐৈ':\n self.trans[char] = 'oi'\n for char in 'ওো':\n self.trans[char] = 'o'\n for char in 'ঔৌ':\n self.trans[char] = 'ou'\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in 'টত':\n self.trans[char] = 't'\n for char in 'ঠথ':\n self.trans[char] = 'th'\n for char in 'ডদ':\n self.trans[char] = 'd'\n for char in 'ঢধ':\n self.trans[char] = 'dh'\n for char in 'ণন':\n self.trans[char] = 'n'\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in 'য়':\n self.trans[char] = '-'\n for char in 'ড়':\n self.trans[char] = 'r'\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in 'ขฃคฅฆ':\n self.trans[char] = 'kh'\n self.trans['ง'] = 'ng'\n for char in 'จฉชฌ':\n self.trans[char] = 'ch'\n for char in 'ซศษส':\n self.trans[char] = 's'\n for char in 'ญย':\n self.trans[char] = 'y'\n for char in 'ฎด':\n self.trans[char] = 'd'\n for char in 'ฏต':\n self.trans[char] = 't'\n for char in 'ฐฑฒถทธ':\n self.trans[char] = 'th'\n for char in 'ณน':\n self.trans[char] = 'n'\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in 'ผพภ':\n self.trans[char] = 'ph'\n for char in 'ฝฟ':\n self.trans[char] = 'f'\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in 'ลฬ':\n self.trans[char] = 'l'\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in 'หฮ':\n self.trans[char] = 'h'\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in 'อวโิ':\n self.trans[char] = 'o'\n for char in 'ะัา':\n self.trans[char] = 'a'\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in 'เ็':\n self.trans[char] = 'e'\n self.trans['แ'] = 'ae'\n for char in 'ใไ':\n self.trans[char] = 'ai'\n for char in '่้๊๋็์':\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans['ಅ'] = 'a'\n for char in 'ಆಾ':\n self.trans[char] = 'aa'\n for char in 'ಇಿ':\n self.trans[char] = 'i'\n for char in 'ಈೀ':\n self.trans[char] = 'ii'\n for char in 'ಉು':\n self.trans[char] = 'u'\n for char in 'ಊೂ':\n self.trans[char] = 'uu'\n for char in 'ಋೂ':\n self.trans[char] = u\"r'\"\n for char in 'ಎೆ':\n self.trans[char] = 'e'\n for char in 'ಏೇ':\n self.trans[char] = 'ee'\n for char in 'ಐೈ':\n self.trans[char] = 'ai'\n for char in 'ಒೊ':\n self.trans[char] = 'o'\n for char in 'ಓೋ':\n self.trans[char] = 'oo'\n for char in 'ಔೌ':\n self.trans[char] = 'au'\n self.trans['ಂ'] = \"m'\"\n self.trans['ಃ'] = \"h'\"\n self.trans['ಕ'] = 'k'\n self.trans['ಖ'] = 'kh'\n self.trans['ಗ'] = 'g'\n self.trans['ಘ'] = 'gh'\n self.trans['ಙ'] = 'ng'\n self.trans['ಚ'] = 'c'\n self.trans['ಛ'] = 'ch'\n self.trans['ಜ'] = 'j'\n self.trans['ಝ'] = 'ny'\n self.trans['ಟ'] = 'tt'\n self.trans['ಠ'] = 'tth'\n self.trans['ಡ'] = 'dd'\n self.trans['ಢ'] = 'ddh'\n self.trans['ಣ'] = 'nn'\n self.trans['ತ'] = 't'\n self.trans['ಥ'] = 'th'\n self.trans['ದ'] = 'd'\n self.trans['ಧ'] = 'dh'\n self.trans['ನ'] = 'n'\n self.trans['ಪ'] = 'p'\n self.trans['ಫ'] = 'ph'\n self.trans['ಬ'] = 'b'\n self.trans['ಭ'] = 'bh'\n self.trans['ಮ'] = 'm'\n self.trans['ಯ'] = 'y'\n self.trans['ರ'] = 'r'\n self.trans['ಲ'] = 'l'\n self.trans['ವ'] = 'v'\n self.trans['ಶ'] = 'sh'\n self.trans['ಷ'] = 'ss'\n self.trans['ಸ'] = 's'\n self.trans['ಹ'] = 'h'\n self.trans['ಳ'] = 'll'\n self.trans['೦'] = '0'\n self.trans['೧'] = '1'\n self.trans['೨'] = '2'\n self.trans['೩'] = '3'\n self.trans['೪'] = '4'\n self.trans['೫'] = '5'\n self.trans['೬'] = '6'\n self.trans['೭'] = '7'\n self.trans['೮'] = '8'\n self.trans['೯'] = '9'\n # Telugu\n self.trans['అ'] = 'a'\n for char in 'ఆా':\n self.trans[char] = 'aa'\n for char in 'ఇి':\n self.trans[char] = 'i'\n for char in 'ఈీ':\n self.trans[char] = 'ii'\n for char in 'ఉు':\n self.trans[char] = 'u'\n for char in 'ఊూ':\n self.trans[char] = 'uu'\n for char in 'ఋృ':\n self.trans[char] = \"r'\"\n for char in 'ౠౄ':\n self.trans[char] = 'r\"'\n self.trans['ఌ'] = \"l'\"\n self.trans['ౡ'] = 'l\"'\n for char in 'ఎె':\n self.trans[char] = 'e'\n for char in 'ఏే':\n self.trans[char] = 'ee'\n for char in 'ఐై':\n self.trans[char] = 'ai'\n for char in 'ఒొ':\n self.trans[char] = 'o'\n for char in 'ఓో':\n self.trans[char] = 'oo'\n for char in 'ఔౌ':\n self.trans[char] = 'au'\n self.trans['ం'] = \"'\"\n self.trans['ః'] = '\"'\n self.trans['క'] = 'k'\n self.trans['ఖ'] = 'kh'\n self.trans['గ'] = 'g'\n self.trans['ఘ'] = 'gh'\n self.trans['ఙ'] = 'ng'\n self.trans['చ'] = 'ts'\n self.trans['ఛ'] = 'tsh'\n self.trans['జ'] = 'j'\n self.trans['ఝ'] = 'jh'\n self.trans['ఞ'] = 'ñ'\n for char in 'టత':\n self.trans[char] = 't'\n for char in 'ఠథ':\n self.trans[char] = 'th'\n for char in 'డద':\n self.trans[char] = 'd'\n for char in 'ఢధ':\n self.trans[char] = 'dh'\n for char in 'ణన':\n self.trans[char] = 'n'\n self.trans['ప'] = 'p'\n self.trans['ఫ'] = 'ph'\n self.trans['బ'] = 'b'\n self.trans['భ'] = 'bh'\n self.trans['మ'] = 'm'\n self.trans['య'] = 'y'\n for char in 'రఱ':\n self.trans[char] = 'r'\n for char in 'లళ':\n self.trans[char] = 'l'\n self.trans['వ'] = 'v'\n self.trans['శ'] = 'sh'\n for char in 'షస':\n self.trans[char] = 's'\n self.trans['హ'] = 'h'\n self.trans['్'] = \"\"\n for char in 'ంఁ':\n self.trans[char] = '^'\n self.trans['ః'] = '-'\n self.trans['౦'] = '0'\n self.trans['౧'] = '1'\n self.trans['౨'] = '2'\n self.trans['౩'] = '3'\n self.trans['౪'] = '4'\n self.trans['౫'] = '5'\n self.trans['౬'] = '6'\n self.trans['౭'] = '7'\n self.trans['౮'] = '8'\n self.trans['౯'] = '9'\n self.trans['౹'] = '1/4'\n self.trans['౺'] = '1/2'\n self.trans['౻'] = '3/4'\n self.trans['౼'] = '1/16'\n self.trans['౽'] = '1/8'\n self.trans['౾'] = '3/16'\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans['ກ'] = 'k'\n for char in 'ຂຄ':\n self.trans[char] = 'kh'\n self.trans['ງ'] = 'ng'\n self.trans['ຈ'] = 'ch'\n for char in 'ສຊ':\n self.trans[char] = 's'\n self.trans['ຍ'] = 'ny'\n self.trans['ດ'] = 'd'\n self.trans['ຕ'] = 't'\n for char in 'ຖທ':\n self.trans[char] = 'th'\n self.trans['ນ'] = 'n'\n self.trans['ບ'] = 'b'\n self.trans['ປ'] = 'p'\n for char in 'ຜພ':\n self.trans[char] = 'ph'\n for char in 'ຝຟ':\n self.trans[char] = 'f'\n for char in 'ມໝ':\n self.trans[char] = 'm'\n self.trans['ຢ'] = 'y'\n for char in 'ຣຼ':\n self.trans[char] = 'r'\n for char in 'ລຼ':\n self.trans[char] = 'l'\n self.trans['ວ'] = 'v'\n self.trans['ຮ'] = 'h'\n self.trans['ອ'] = \"'\"\n for char in 'ະັ':\n self.trans[char] = 'a'\n self.trans['ິ'] = 'i'\n self.trans['ຶ'] = 'ue'\n self.trans['ຸ'] = 'u'\n self.trans['ເ'] = 'é'\n self.trans['ແ'] = 'è'\n for char in 'ໂົາໍ':\n self.trans[char] = 'o'\n self.trans['ຽ'] = 'ia'\n self.trans['ເຶ'] = 'uea'\n self.trans['ຍ'] = 'i'\n for char in 'ໄໃ':\n self.trans[char] = 'ai'\n self.trans['ຳ'] = 'am'\n self.trans['າ'] = 'aa'\n self.trans['ີ'] = 'ii'\n self.trans['ື'] = 'yy'\n self.trans['ູ'] = 'uu'\n self.trans['ເ'] = 'e'\n self.trans['ແ'] = 'ei'\n self.trans['໐'] = '0'\n self.trans['໑'] = '1'\n self.trans['໒'] = '2'\n self.trans['໓'] = '3'\n self.trans['໔'] = '4'\n self.trans['໕'] = '5'\n self.trans['໖'] = '6'\n self.trans['໗'] = '7'\n self.trans['໘'] = '8'\n self.trans['໙'] = '9'\n # Chinese -- note: incomplete\n for char in '埃挨哎唉哀皑癌蔼矮艾碍爱隘':\n self.trans[char] = 'ai'\n for char in '鞍氨安俺按暗岸胺案':\n self.trans[char] = 'an'\n for char in '肮昂盎':\n self.trans[char] = 'ang'\n for char in '凹敖熬翱袄傲奥懊澳':\n self.trans[char] = 'ao'\n for char in '芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸':\n self.trans[char] = 'ba'\n for char in '白柏百摆佰败拜稗':\n self.trans[char] = 'bai'\n for char in '斑班搬扳般颁板版扮拌伴瓣半办绊':\n self.trans[char] = 'ban'\n for char in '邦帮梆榜膀绑棒磅蚌镑傍谤':\n self.trans[char] = 'bang'\n for char in '苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆':\n self.trans[char] = 'bao'\n for char in '杯碑悲卑北辈背贝钡倍狈备惫焙被':\n self.trans[char] = 'bei'\n for char in '奔苯本笨':\n self.trans[char] = 'ben'\n for char in '崩绷甭泵蹦迸':\n self.trans[char] = 'beng'\n for char in '逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛':\n self.trans[char] = 'bi'\n for char in '鞭边编贬扁便变卞辨辩辫遍':\n self.trans[char] = 'bian'\n for char in '标彪膘表':\n self.trans[char] = 'biao'\n for char in '鳖憋别瘪':\n self.trans[char] = 'bie'\n for char in '彬斌濒滨宾摈':\n self.trans[char] = 'bin'\n for char in '兵冰柄丙秉饼炳病并':\n self.trans[char] = 'bing'\n for char in '玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳':\n self.trans[char] = 'bo'\n for char in '哺补埠不布步簿部怖':\n self.trans[char] = 'bu'\n for char in '猜裁材才财睬踩采彩菜蔡':\n self.trans[char] = 'cai'\n for char in '餐参蚕残惭惨灿':\n self.trans[char] = 'can'\n for char in '苍舱仓沧藏':\n self.trans[char] = 'cang'\n for char in '操糙槽曹草':\n self.trans[char] = 'cao'\n for char in '厕策侧册测':\n self.trans[char] = 'ce'\n for char in '层蹭':\n self.trans[char] = 'ceng'\n for char in '插叉茬茶查碴搽察岔差诧':\n self.trans[char] = 'cha'\n for char in '拆柴豺':\n self.trans[char] = 'chai'\n for char in '搀掺蝉馋谗缠铲产阐颤':\n self.trans[char] = 'chan'\n for char in '昌猖场尝常长偿肠厂敞畅唱倡':\n self.trans[char] = 'chang'\n for char in '超抄钞朝嘲潮巢吵炒':\n self.trans[char] = 'chao'\n for char in '车扯撤掣彻澈':\n self.trans[char] = 'che'\n for char in '郴臣辰尘晨忱沉陈趁衬':\n self.trans[char] = 'chen'\n for char in '撑称城橙成呈乘程惩澄诚承逞骋秤':\n self.trans[char] = 'cheng'\n for char in '吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽':\n self.trans[char] = 'chi'\n for char in '充冲虫崇宠':\n self.trans[char] = 'chong'\n for char in '抽酬畴踌稠愁筹仇绸瞅丑臭':\n self.trans[char] = 'chou'\n for char in '初出橱厨躇锄雏滁除楚储矗搐触处':\n self.trans[char] = 'chu'\n self.trans['揣'] = 'chuai'\n for char in '川穿椽传船喘串':\n self.trans[char] = 'chuan'\n for char in '疮窗幢床闯创':\n self.trans[char] = 'chuang'\n for char in '吹炊捶锤垂':\n self.trans[char] = 'chui'\n for char in '春椿醇唇淳纯蠢':\n self.trans[char] = 'chun'\n for char in '戳绰':\n self.trans[char] = 'chuo'\n for char in '疵茨磁雌辞慈瓷词此刺赐次':\n self.trans[char] = 'ci'\n for char in '聪葱囱匆从丛':\n self.trans[char] = 'cong'\n self.trans['凑'] = 'cou'\n for char in '粗醋簇促':\n self.trans[char] = 'cu'\n for char in '蹿篡窜':\n self.trans[char] = 'cuan'\n for char in '摧崔催脆瘁粹淬翠':\n self.trans[char] = 'cui'\n for char in '村存寸':\n self.trans[char] = 'cun'\n for char in '磋撮搓措挫错':\n self.trans[char] = 'cuo'\n for char in '搭达答瘩打大':\n self.trans[char] = 'da'\n for char in '呆歹傣戴带殆代贷袋待逮怠':\n self.trans[char] = 'dai'\n for char in '耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋':\n self.trans[char] = 'dan'\n for char in '当挡党荡档':\n self.trans[char] = 'dang'\n for char in '刀捣蹈倒岛祷导到稻悼道盗':\n self.trans[char] = 'dao'\n for char in '德得的':\n self.trans[char] = 'de'\n for char in '蹬灯登等瞪凳邓':\n self.trans[char] = 'deng'\n for char in '堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔':\n self.trans[char] = 'di'\n for char in '颠掂滇碘点典靛垫电佃甸店惦奠淀殿':\n self.trans[char] = 'dian'\n for char in '碉叼雕凋刁掉吊钓调':\n self.trans[char] = 'diao'\n for char in '跌爹碟蝶迭谍叠':\n self.trans[char] = 'die'\n for char in '丁盯叮钉顶鼎锭定订':\n self.trans[char] = 'ding'\n self.trans['丢'] = 'diu'\n for char in '东冬董懂动栋侗恫冻洞':\n self.trans[char] = 'dong'\n for char in '兜抖斗陡豆逗痘':\n self.trans[char] = 'dou'\n for char in '都督毒犊独读堵睹赌杜镀肚度渡妒':\n self.trans[char] = 'du'\n for char in '端短锻段断缎':\n self.trans[char] = 'duan'\n for char in '堆兑队对':\n self.trans[char] = 'dui'\n for char in '墩吨蹲敦顿囤钝盾遁':\n self.trans[char] = 'dun'\n for char in '掇哆多夺垛躲朵跺舵剁惰堕':\n self.trans[char] = 'duo'\n for char in '蛾峨鹅俄额讹娥恶厄扼遏鄂饿':\n self.trans[char] = 'e'\n for char in '恩嗯':\n self.trans[char] = 'en'\n for char in '而儿耳尔饵洱二贰':\n self.trans[char] = 'er'\n for char in '发罚筏伐乏阀法珐':\n self.trans[char] = 'fa'\n for char in '藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛':\n self.trans[char] = 'fan'\n for char in '坊芳方肪房防妨仿访纺放':\n self.trans[char] = 'fang'\n for char in '菲非啡飞肥匪诽吠肺废沸费':\n self.trans[char] = 'fei'\n for char in '芬酚吩氛分纷坟焚汾粉奋份忿愤粪':\n self.trans[char] = 'fen'\n for char in '丰封枫蜂峰锋风疯烽逢冯缝讽奉凤':\n self.trans[char] = 'feng'\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in ('夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋'\n '复傅付阜父腹负富讣附妇缚咐'):\n self.trans[char] = 'fu'\n for char in '噶嘎':\n self.trans[char] = 'ga'\n for char in '该改概钙盖溉':\n self.trans[char] = 'gai'\n for char in '干甘杆柑竿肝赶感秆敢赣':\n self.trans[char] = 'gan'\n for char in '冈刚钢缸肛纲岗港杠':\n self.trans[char] = 'gang'\n for char in '篙皋高膏羔糕搞镐稿告':\n self.trans[char] = 'gao'\n for char in '哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各':\n self.trans[char] = 'ge'\n self.trans['给'] = 'gei'\n for char in '根跟':\n self.trans[char] = 'gen'\n for char in '耕更庚羹埂耿梗':\n self.trans[char] = 'geng'\n for char in '工攻功恭龚供躬公宫弓巩汞拱贡共':\n self.trans[char] = 'gong'\n for char in '钩勾沟苟狗垢构购够':\n self.trans[char] = 'gou'\n for char in '辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇':\n self.trans[char] = 'gu'\n for char in '刮瓜剐寡挂褂':\n self.trans[char] = 'gua'\n for char in '乖拐怪':\n self.trans[char] = 'guai'\n for char in '棺关官冠观管馆罐惯灌贯':\n self.trans[char] = 'guan'\n for char in '光广逛':\n self.trans[char] = 'guang'\n for char in '瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽':\n self.trans[char] = 'gui'\n for char in '辊滚棍':\n self.trans[char] = 'gun'\n for char in '锅郭国果裹过':\n self.trans[char] = 'guo'\n self.trans['哈'] = 'ha'\n for char in '骸孩海氦亥害骇':\n self.trans[char] = 'hai'\n for char in '酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉':\n self.trans[char] = 'han'\n for char in '夯杭航':\n self.trans[char] = 'hang'\n for char in '壕嚎豪毫郝好耗号浩':\n self.trans[char] = 'hao'\n for char in '呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺':\n self.trans[char] = 'he'\n for char in '嘿黑':\n self.trans[char] = 'hei'\n for char in '痕很狠恨':\n self.trans[char] = 'hen'\n for char in '哼亨横衡恒':\n self.trans[char] = 'heng'\n for char in '轰哄烘虹鸿洪宏弘红':\n self.trans[char] = 'hong'\n for char in '喉侯猴吼厚候后':\n self.trans[char] = 'hou'\n for char in '呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户':\n self.trans[char] = 'hu'\n for char in '花哗华猾滑画划化话':\n self.trans[char] = 'hua'\n for char in '槐徊怀淮坏':\n self.trans[char] = 'huai'\n for char in '欢环桓还缓换患唤痪豢焕涣宦幻':\n self.trans[char] = 'huan'\n for char in '荒慌黄磺蝗簧皇凰惶煌晃幌恍谎':\n self.trans[char] = 'huang'\n for char in '灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘':\n self.trans[char] = 'hui'\n for char in '荤昏婚魂浑混':\n self.trans[char] = 'hun'\n for char in '豁活伙火获或惑霍货祸':\n self.trans[char] = 'huo'\n for char in ('击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几'\n '脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪'):\n self.trans[char] = 'ji'\n for char in '嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁':\n self.trans[char] = 'jia'\n for char in ('歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健'\n '舰剑饯渐溅涧建'):\n self.trans[char] = 'jian'\n for char in '僵姜将浆江疆蒋桨奖讲匠酱降':\n self.trans[char] = 'jiang'\n for char in '蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖':\n self.trans[char] = 'jiao'\n for char in '揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届':\n self.trans[char] = 'jie'\n for char in '巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲':\n self.trans[char] = 'jin'\n for char in '荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净':\n self.trans[char] = 'jing'\n for char in '囧炯窘':\n self.trans[char] = 'jiong'\n for char in '揪究纠玖韭久灸九酒厩救旧臼舅咎就疚':\n self.trans[char] = 'jiu'\n for char in '鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧':\n self.trans[char] = 'ju'\n for char in '捐鹃娟倦眷卷绢':\n self.trans[char] = 'juan'\n for char in '撅攫抉掘倔爵觉决诀绝':\n self.trans[char] = 'jue'\n for char in '均菌钧军君峻俊竣浚郡骏':\n self.trans[char] = 'jun'\n for char in '喀咖卡咯':\n self.trans[char] = 'ka'\n for char in '开揩楷凯慨':\n self.trans[char] = 'kai'\n for char in '刊堪勘坎砍看':\n self.trans[char] = 'kan'\n for char in '康慷糠扛抗亢炕':\n self.trans[char] = 'kang'\n for char in '考拷烤靠':\n self.trans[char] = 'kao'\n for char in '坷苛柯棵磕颗科壳咳可渴克刻客课':\n self.trans[char] = 'ke'\n for char in '肯啃垦恳':\n self.trans[char] = 'ken'\n for char in '坑吭':\n self.trans[char] = 'keng'\n for char in '空恐孔控':\n self.trans[char] = 'kong'\n for char in '抠口扣寇':\n self.trans[char] = 'kou'\n for char in '枯哭窟苦酷库裤':\n self.trans[char] = 'ku'\n for char in '夸垮挎跨胯':\n self.trans[char] = 'kua'\n for char in '块筷侩快':\n self.trans[char] = 'kuai'\n for char in '宽款':\n self.trans[char] = 'kuan'\n for char in '匡筐狂框矿眶旷况':\n self.trans[char] = 'kuang'\n for char in '亏盔岿窥葵奎魁傀馈愧溃':\n self.trans[char] = 'kui'\n for char in '坤昆捆困':\n self.trans[char] = 'kun'\n for char in '括扩廓阔':\n self.trans[char] = 'kuo'\n for char in '垃拉喇蜡腊辣啦':\n self.trans[char] = 'la'\n for char in '莱来赖':\n self.trans[char] = 'lai'\n for char in '蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥':\n self.trans[char] = 'lan'\n for char in '琅榔狼廊郎朗浪':\n self.trans[char] = 'lang'\n for char in '捞劳牢老佬姥酪烙涝':\n self.trans[char] = 'lao'\n for char in '勒乐':\n self.trans[char] = 'le'\n for char in '雷镭蕾磊累儡垒擂肋类泪':\n self.trans[char] = 'lei'\n for char in '棱楞冷':\n self.trans[char] = 'leng'\n for char in ('厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力'\n '璃哩'):\n self.trans[char] = 'li'\n self.trans['俩'] = 'lia'\n for char in '联莲连镰廉怜涟帘敛脸链恋炼练':\n self.trans[char] = 'lian'\n for char in '粮凉梁粱良两辆量晾亮谅':\n self.trans[char] = 'liang'\n for char in '撩聊僚疗燎寥辽潦了撂镣廖料':\n self.trans[char] = 'liao'\n for char in '列裂烈劣猎':\n self.trans[char] = 'lie'\n for char in '琳林磷霖临邻鳞淋凛赁吝拎':\n self.trans[char] = 'lin'\n for char in '玲菱零龄铃伶羚凌灵陵岭领另令':\n self.trans[char] = 'ling'\n for char in '溜琉榴硫馏留刘瘤流柳六':\n self.trans[char] = 'liu'\n for char in '龙聋咙笼窿隆垄拢陇':\n self.trans[char] = 'long'\n for char in '楼娄搂篓漏陋':\n self.trans[char] = 'lou'\n for char in '芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸':\n self.trans[char] = 'lu'\n for char in '峦挛孪滦卵乱':\n self.trans[char] = 'luan'\n for char in '掠略':\n self.trans[char] = 'lue'\n for char in '抡轮伦仑沦纶论':\n self.trans[char] = 'lun'\n for char in '萝螺罗逻锣箩骡裸落洛骆络漯':\n self.trans[char] = 'luo'\n for char in '驴吕铝侣旅履屡缕虑氯律率滤绿':\n self.trans[char] = 'lv'\n for char in '妈麻玛码蚂马骂嘛吗':\n self.trans[char] = 'ma'\n for char in '埋买麦卖迈脉':\n self.trans[char] = 'mai'\n for char in '瞒馒蛮满蔓曼慢漫谩':\n self.trans[char] = 'man'\n for char in '芒茫盲氓忙莽':\n self.trans[char] = 'mang'\n for char in '猫茅锚毛矛铆卯茂冒帽貌贸':\n self.trans[char] = 'mao'\n self.trans['么'] = 'me'\n for char in '玫枚梅酶霉煤没眉媒镁每美昧寐妹媚':\n self.trans[char] = 'mei'\n for char in '门闷们':\n self.trans[char] = 'men'\n for char in '萌蒙檬盟锰猛梦孟':\n self.trans[char] = 'meng'\n for char in '眯醚靡糜迷谜弥米秘觅泌蜜密幂':\n self.trans[char] = 'mi'\n for char in '棉眠绵冕免勉娩缅面':\n self.trans[char] = 'mian'\n for char in '苗描瞄藐秒渺庙妙':\n self.trans[char] = 'miao'\n for char in '蔑灭':\n self.trans[char] = 'mie'\n for char in '民抿皿敏悯闽':\n self.trans[char] = 'min'\n for char in '明螟鸣铭名命':\n self.trans[char] = 'ming'\n self.trans['谬'] = 'miu'\n for char in '摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌':\n self.trans[char] = 'mo'\n for char in '谋牟某':\n self.trans[char] = 'mou'\n for char in '拇牡亩姆母墓暮幕募慕木目睦牧穆':\n self.trans[char] = 'mu'\n for char in '拿哪呐钠那娜纳':\n self.trans[char] = 'na'\n for char in '氖乃奶耐奈':\n self.trans[char] = 'nai'\n for char in '南男难':\n self.trans[char] = 'nan'\n self.trans['囊'] = 'nang'\n for char in '挠脑恼闹淖':\n self.trans[char] = 'nao'\n self.trans['呢'] = 'ne'\n for char in '馁内':\n self.trans[char] = 'nei'\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in '妮霓倪泥尼拟你匿腻逆溺':\n self.trans[char] = 'ni'\n for char in '蔫拈年碾撵捻念':\n self.trans[char] = 'nian'\n for char in '娘酿':\n self.trans[char] = 'niang'\n for char in '鸟尿':\n self.trans[char] = 'niao'\n for char in '捏聂孽啮镊镍涅':\n self.trans[char] = 'nie'\n self.trans['您'] = 'nin'\n for char in '柠狞凝宁拧泞':\n self.trans[char] = 'ning'\n for char in '牛扭钮纽':\n self.trans[char] = 'niu'\n for char in '脓浓农弄':\n self.trans[char] = 'nong'\n for char in '奴努怒':\n self.trans[char] = 'nu'\n self.trans['暖'] = 'nuan'\n for char in '虐疟':\n self.trans[char] = 'nue'\n for char in '挪懦糯诺':\n self.trans[char] = 'nuo'\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in '欧鸥殴藕呕偶沤':\n self.trans[char] = 'ou'\n for char in '啪趴爬帕怕琶':\n self.trans[char] = 'pa'\n for char in '拍排牌徘湃派':\n self.trans[char] = 'pai'\n for char in '攀潘盘磐盼畔判叛':\n self.trans[char] = 'pan'\n for char in '乓庞旁耪胖':\n self.trans[char] = 'pang'\n for char in '抛咆刨炮袍跑泡':\n self.trans[char] = 'pao'\n for char in '呸胚培裴赔陪配佩沛':\n self.trans[char] = 'pei'\n for char in '喷盆':\n self.trans[char] = 'pen'\n for char in '砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰':\n self.trans[char] = 'peng'\n for char in '坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬':\n self.trans[char] = 'pi'\n for char in '篇偏片骗':\n self.trans[char] = 'pian'\n for char in '飘漂瓢票':\n self.trans[char] = 'piao'\n for char in '撇瞥':\n self.trans[char] = 'pie'\n for char in '拼频贫品聘':\n self.trans[char] = 'pin'\n for char in '乒坪苹萍平凭瓶评屏':\n self.trans[char] = 'ping'\n for char in '坡泼颇婆破魄迫粕剖':\n self.trans[char] = 'po'\n for char in '扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮':\n self.trans[char] = 'pu'\n for char in ('期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄'\n '弃汽泣讫'):\n self.trans[char] = 'qi'\n for char in '掐恰洽':\n self.trans[char] = 'qia'\n for char in '牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉':\n self.trans[char] = 'qian'\n for char in '枪呛腔羌墙蔷强抢':\n self.trans[char] = 'qiang'\n for char in '橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍':\n self.trans[char] = 'qiao'\n for char in '切茄且怯窃':\n self.trans[char] = 'qie'\n for char in '钦侵亲秦琴勤芹擒禽寝沁':\n self.trans[char] = 'qin'\n for char in '青轻氢倾卿清擎晴氰情顷请庆':\n self.trans[char] = 'qing'\n for char in '琼穷':\n self.trans[char] = 'qiong'\n for char in '秋丘邱球求囚酋泅':\n self.trans[char] = 'qiu'\n for char in '趋区蛆曲躯屈驱渠取娶龋趣去':\n self.trans[char] = 'qu'\n for char in '圈颧权醛泉全痊拳犬券劝':\n self.trans[char] = 'quan'\n for char in '缺炔瘸却鹊榷确雀':\n self.trans[char] = 'que'\n for char in '裙群':\n self.trans[char] = 'qun'\n for char in '然燃冉染':\n self.trans[char] = 'ran'\n for char in '瓤壤攘嚷让':\n self.trans[char] = 'rang'\n for char in '饶扰绕':\n self.trans[char] = 'rao'\n for char in '惹热':\n self.trans[char] = 're'\n for char in '壬仁人忍韧任认刃妊纫':\n self.trans[char] = 'ren'\n for char in '扔仍':\n self.trans[char] = 'reng'\n self.trans['日'] = 'ri'\n for char in '戎茸蓉荣融熔溶容绒冗':\n self.trans[char] = 'rong'\n for char in '揉柔肉':\n self.trans[char] = 'rou'\n for char in '茹蠕儒孺如辱乳汝入褥':\n self.trans[char] = 'ru'\n for char in '软阮':\n self.trans[char] = 'ruan'\n for char in '蕊瑞锐':\n self.trans[char] = 'rui'\n for char in '闰润':\n self.trans[char] = 'run'\n for char in '若弱':\n self.trans[char] = 'ruo'\n for char in '撒洒萨':\n self.trans[char] = 'sa'\n for char in '腮鳃塞赛':\n self.trans[char] = 'sai'\n for char in '三叁伞散':\n self.trans[char] = 'san'\n for char in '桑嗓丧':\n self.trans[char] = 'sang'\n for char in '搔骚扫嫂':\n self.trans[char] = 'sao'\n for char in '瑟色涩':\n self.trans[char] = 'se'\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in '莎砂杀刹沙纱傻啥煞':\n self.trans[char] = 'sha'\n for char in '筛晒':\n self.trans[char] = 'shai'\n for char in '珊苫杉山删煽衫闪陕擅赡膳善汕扇缮':\n self.trans[char] = 'shan'\n for char in '墒伤商赏晌上尚裳':\n self.trans[char] = 'shang'\n for char in '梢捎稍烧芍勺韶少哨邵绍':\n self.trans[char] = 'shao'\n for char in '奢赊蛇舌舍赦摄射慑涉社设':\n self.trans[char] = 'she'\n for char in '砷申呻伸身深娠绅神沈审婶甚肾慎渗':\n self.trans[char] = 'shen'\n for char in '声生甥牲升绳省盛剩胜圣':\n self.trans[char] = 'sheng'\n for char in ('师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝'\n '势是嗜噬适仕侍释饰氏市恃室视试'):\n self.trans[char] = 'shi'\n for char in '收手首守寿授售受瘦兽':\n self.trans[char] = 'shou'\n for char in (\n '蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕'):\n self.trans[char] = 'shu'\n for char in '刷耍':\n self.trans[char] = 'shua'\n for char in '摔衰甩帅':\n self.trans[char] = 'shuai'\n for char in '栓拴':\n self.trans[char] = 'shuan'\n for char in '霜双爽':\n self.trans[char] = 'shuang'\n for char in '谁水睡税':\n self.trans[char] = 'shui'\n for char in '吮瞬顺舜':\n self.trans[char] = 'shun'\n for char in '说硕朔烁':\n self.trans[char] = 'shuo'\n for char in '斯撕嘶思私司丝死肆寺嗣四伺似饲巳':\n self.trans[char] = 'si'\n for char in '松耸怂颂送宋讼诵':\n self.trans[char] = 'song'\n for char in '搜艘擞':\n self.trans[char] = 'sou'\n for char in '嗽苏酥俗素速粟僳塑溯宿诉肃':\n self.trans[char] = 'su'\n for char in '酸蒜算':\n self.trans[char] = 'suan'\n for char in '虽隋随绥髓碎岁穗遂隧祟':\n self.trans[char] = 'sui'\n for char in '孙损笋':\n self.trans[char] = 'sun'\n for char in '蓑梭唆缩琐索锁所':\n self.trans[char] = 'suo'\n for char in '塌他它她塔獭挞蹋踏':\n self.trans[char] = 'ta'\n for char in '胎苔抬台泰酞太态汰':\n self.trans[char] = 'tai'\n for char in '坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭':\n self.trans[char] = 'tan'\n for char in '汤塘搪堂棠膛唐糖倘躺淌趟烫':\n self.trans[char] = 'tang'\n for char in '掏涛滔绦萄桃逃淘陶讨套':\n self.trans[char] = 'tao'\n self.trans['特'] = 'te'\n for char in '藤腾疼誊':\n self.trans[char] = 'teng'\n for char in '梯剔踢锑提题蹄啼体替嚏惕涕剃屉':\n self.trans[char] = 'ti'\n for char in '兲天添填田甜恬舔腆':\n self.trans[char] = 'tian'\n for char in '挑条迢眺跳':\n self.trans[char] = 'tiao'\n for char in '贴铁帖':\n self.trans[char] = 'tie'\n for char in '厅听烃汀廷停亭庭挺艇':\n self.trans[char] = 'ting'\n for char in '通桐酮瞳同铜彤童桶捅筒统痛':\n self.trans[char] = 'tong'\n for char in '偷投头透':\n self.trans[char] = 'tou'\n for char in '凸秃突图徒途涂屠土吐兔':\n self.trans[char] = 'tu'\n for char in '湍团':\n self.trans[char] = 'tuan'\n for char in '推颓腿蜕褪退':\n self.trans[char] = 'tui'\n for char in '吞屯臀':\n self.trans[char] = 'tun'\n for char in '拖托脱鸵陀驮驼椭妥拓唾':\n self.trans[char] = 'tuo'\n for char in '挖哇蛙洼娃瓦袜':\n self.trans[char] = 'wa'\n for char in '歪外':\n self.trans[char] = 'wai'\n for char in '豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞':\n self.trans[char] = 'wan'\n for char in '汪王亡枉网往旺望忘妄':\n self.trans[char] = 'wang'\n for char in '威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫':\n self.trans[char] = 'wei'\n for char in '瘟温蚊文闻纹吻稳紊问':\n self.trans[char] = 'wen'\n for char in '嗡翁瓮':\n self.trans[char] = 'weng'\n for char in '挝蜗涡窝我斡卧握沃':\n self.trans[char] = 'wo'\n for char in '巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误':\n self.trans[char] = 'wu'\n for char in ('昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系'\n '隙戏细'):\n self.trans[char] = 'xi'\n for char in '瞎虾匣霞辖暇峡侠狭下厦夏吓':\n self.trans[char] = 'xia'\n for char in '掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线':\n self.trans[char] = 'xian'\n for char in '相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象':\n self.trans[char] = 'xiang'\n for char in '萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效':\n self.trans[char] = 'xiao'\n for char in '楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑':\n self.trans[char] = 'xie'\n for char in '薪芯锌欣辛新忻心信衅':\n self.trans[char] = 'xin'\n for char in '星腥猩惺兴刑型形邢行醒幸杏性姓':\n self.trans[char] = 'xing'\n for char in '兄凶胸匈汹雄熊':\n self.trans[char] = 'xiong'\n for char in '休修羞朽嗅锈秀袖绣':\n self.trans[char] = 'xiu'\n for char in '墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续':\n self.trans[char] = 'xu'\n for char in '轩喧宣悬旋玄选癣眩绚':\n self.trans[char] = 'xuan'\n for char in '靴薛学穴雪血':\n self.trans[char] = 'xue'\n for char in '勋熏循旬询寻驯巡殉汛训讯逊迅':\n self.trans[char] = 'xun'\n for char in '压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶':\n self.trans[char] = 'ya'\n for char in '焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验':\n self.trans[char] = 'yan'\n for char in '殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾':\n self.trans[char] = 'yang'\n for char in '邀腰妖瑶摇尧遥窑谣姚咬舀药要耀':\n self.trans[char] = 'yao'\n for char in '椰噎耶爷野冶也页掖业叶曳腋夜液':\n self.trans[char] = 'ye'\n for char in ('一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿'\n '役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎'):\n self.trans[char] = 'yi'\n for char in '茵荫因殷音阴姻吟银淫寅饮尹引隐印':\n self.trans[char] = 'yin'\n for char in '英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映':\n self.trans[char] = 'ying'\n self.trans['哟'] = 'yo'\n for char in '拥佣臃痈庸雍踊蛹咏泳涌永恿勇用':\n self.trans[char] = 'yong'\n for char in '幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂':\n self.trans[char] = 'you'\n for char in ('淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻'\n '峪御愈欲狱育誉浴寓裕预豫驭'):\n self.trans[char] = 'yu'\n for char in '鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院':\n self.trans[char] = 'yuan'\n for char in '曰约越跃钥岳粤月悦阅':\n self.trans[char] = 'yue'\n for char in '耘云郧匀陨允运蕴酝晕韵孕':\n self.trans[char] = 'yun'\n for char in '匝砸杂':\n self.trans[char] = 'za'\n for char in '栽哉灾宰载再在':\n self.trans[char] = 'zai'\n for char in '咱攒暂赞':\n self.trans[char] = 'zan'\n for char in '赃脏葬':\n self.trans[char] = 'zang'\n for char in '遭糟凿藻枣早澡蚤躁噪造皂灶燥':\n self.trans[char] = 'zao'\n for char in '责择则泽':\n self.trans[char] = 'ze'\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in '增憎曾赠':\n self.trans[char] = 'zeng'\n for char in '扎喳渣札轧铡闸眨栅榨咋乍炸诈':\n self.trans[char] = 'zha'\n for char in '摘斋宅窄债寨':\n self.trans[char] = 'zhai'\n for char in '瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽':\n self.trans[char] = 'zhan'\n for char in '樟章彰漳张掌涨杖丈帐账仗胀瘴障':\n self.trans[char] = 'zhang'\n for char in '招昭找沼赵照罩兆肇召':\n self.trans[char] = 'zhao'\n for char in '遮折哲蛰辙者锗蔗这浙':\n self.trans[char] = 'zhe'\n for char in '珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳':\n self.trans[char] = 'zhen'\n for char in '蒸挣睁征狰争怔整拯正政帧症郑证':\n self.trans[char] = 'zheng'\n for char in ('芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置'\n '帜峙制智秩稚质炙痔滞治窒'):\n self.trans[char] = 'zhi'\n for char in '中盅忠钟衷终种肿重仲众':\n self.trans[char] = 'zhong'\n for char in '舟周州洲诌粥轴肘帚咒皱宙昼骤':\n self.trans[char] = 'zhou'\n for char in '珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻':\n self.trans[char] = 'zhu'\n for char in '抓爪':\n self.trans[char] = 'zhua'\n self.trans['拽'] = 'zhuai'\n for char in '专砖转撰赚篆':\n self.trans[char] = 'zhuan'\n for char in '桩庄装妆撞壮状':\n self.trans[char] = 'zhuang'\n for char in '椎锥追赘坠缀':\n self.trans[char] = 'zhui'\n for char in '谆准':\n self.trans[char] = 'zhun'\n for char in '捉拙卓桌琢茁酌啄着灼浊':\n self.trans[char] = 'zhuo'\n for char in '兹咨资姿滋淄孜紫仔籽滓子自渍字':\n self.trans[char] = 'zi'\n for char in '鬃棕踪宗综总纵':\n self.trans[char] = 'zong'\n for char in '邹走奏揍':\n self.trans[char] = 'zou'\n for char in '租足卒族祖诅阻组':\n self.trans[char] = 'zu'\n for char in '钻纂':\n self.trans[char] = 'zuan'\n for char in '嘴醉最罪':\n self.trans[char] = 'zui'\n for char in '尊遵':\n self.trans[char] = 'zun'\n for char in '昨左佐柞做作坐座':\n self.trans[char] = 'zuo'\n # from:\n # https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans['ଂ'] = 'anusvara'\n self.trans['ઇ'] = 'i'\n self.trans['എ'] = 'e'\n self.trans['ગ'] = 'ga'\n self.trans['ਜ'] = 'ja'\n self.trans['ഞ'] = 'nya'\n self.trans['ଢ'] = 'ddha'\n self.trans['ધ'] = 'dha'\n self.trans['ਬ'] = 'ba'\n self.trans['മ'] = 'ma'\n self.trans['ଲ'] = 'la'\n self.trans['ષ'] = 'ssa'\n self.trans['਼'] = 'nukta'\n self.trans['ാ'] = 'aa'\n self.trans['ୂ'] = 'uu'\n self.trans['ે'] = 'e'\n self.trans['ੌ'] = 'au'\n self.trans['ൎ'] = 'reph'\n self.trans['ੜ'] = 'rra'\n self.trans['՞'] = '?'\n self.trans['ୢ'] = 'l'\n self.trans['૧'] = '1'\n self.trans['੬'] = '6'\n self.trans['൮'] = '8'\n self.trans['୲'] = 'quarter'\n self.trans['ൾ'] = 'll'\n self.trans['ਇ'] = 'i'\n self.trans['ഉ'] = 'u'\n self.trans['ઌ'] = 'l'\n self.trans['ਗ'] = 'ga'\n self.trans['ങ'] = 'nga'\n self.trans['ଝ'] = 'jha'\n self.trans['જ'] = 'ja'\n self.trans['؟'] = '?'\n self.trans['ਧ'] = 'dha'\n self.trans['ഩ'] = 'nnna'\n self.trans['ଭ'] = 'bha'\n self.trans['બ'] = 'ba'\n self.trans['ഹ'] = 'ha'\n self.trans['ଽ'] = 'avagraha'\n self.trans['઼'] = 'nukta'\n self.trans['ੇ'] = 'ee'\n self.trans['୍'] = 'virama'\n self.trans['ૌ'] = 'au'\n self.trans['੧'] = '1'\n self.trans['൩'] = '3'\n self.trans['୭'] = '7'\n self.trans['૬'] = '6'\n self.trans['൹'] = 'mark'\n self.trans['ਖ਼'] = 'khha'\n self.trans['ਂ'] = 'bindi'\n self.trans['ഈ'] = 'ii'\n self.trans['ઍ'] = 'e'\n self.trans['ଌ'] = 'l'\n self.trans['ഘ'] = 'gha'\n self.trans['ઝ'] = 'jha'\n self.trans['ଡ଼'] = 'rra'\n self.trans['ਢ'] = 'ddha'\n self.trans['ന'] = 'na'\n self.trans['ભ'] = 'bha'\n self.trans['ବ'] = 'ba'\n self.trans['ਲ'] = 'la'\n self.trans['സ'] = 'sa'\n self.trans['ઽ'] = 'avagraha'\n self.trans['଼'] = 'nukta'\n self.trans['ੂ'] = 'uu'\n self.trans['ൈ'] = 'ai'\n self.trans['્'] = 'virama'\n self.trans['ୌ'] = 'au'\n self.trans['൨'] = '2'\n self.trans['૭'] = '7'\n self.trans['୬'] = '6'\n self.trans['ੲ'] = 'iri'\n self.trans['ഃ'] = 'visarga'\n self.trans['ં'] = 'anusvara'\n self.trans['ଇ'] = 'i'\n self.trans['ഓ'] = 'oo'\n self.trans['ଗ'] = 'ga'\n self.trans['ਝ'] = 'jha'\n self.trans['?'] = '?'\n self.trans['ണ'] = 'nna'\n self.trans['ઢ'] = 'ddha'\n self.trans['ଧ'] = 'dha'\n self.trans['ਭ'] = 'bha'\n self.trans['ള'] = 'lla'\n self.trans['લ'] = 'la'\n self.trans['ଷ'] = 'ssa'\n self.trans['ൃ'] = 'r'\n self.trans['ૂ'] = 'uu'\n self.trans['େ'] = 'e'\n self.trans['੍'] = 'virama'\n self.trans['ୗ'] = 'mark'\n self.trans['ൣ'] = 'll'\n self.trans['ૢ'] = 'l'\n self.trans['୧'] = '1'\n self.trans['੭'] = '7'\n self.trans['൳'] = '1/4'\n self.trans['୷'] = 'sixteenths'\n self.trans['ଆ'] = 'aa'\n self.trans['ઋ'] = 'r'\n self.trans['ഊ'] = 'uu'\n self.trans['ਐ'] = 'ai'\n self.trans['ଖ'] = 'kha'\n self.trans['છ'] = 'cha'\n self.trans['ച'] = 'ca'\n self.trans['ਠ'] = 'ttha'\n self.trans['ଦ'] = 'da'\n self.trans['ફ'] = 'pha'\n self.trans['പ'] = 'pa'\n self.trans['ਰ'] = 'ra'\n self.trans['ଶ'] = 'sha'\n self.trans['ഺ'] = 'ttta'\n self.trans['ੀ'] = 'ii'\n self.trans['ો'] = 'o'\n self.trans['ൊ'] = 'o'\n self.trans['ୖ'] = 'mark'\n self.trans['୦'] = '0'\n self.trans['૫'] = '5'\n self.trans['൪'] = '4'\n self.trans['ੰ'] = 'tippi'\n self.trans['୶'] = 'eighth'\n self.trans['ൺ'] = 'nn'\n self.trans['ଁ'] = 'candrabindu'\n self.trans['അ'] = 'a'\n self.trans['ઐ'] = 'ai'\n self.trans['ക'] = 'ka'\n self.trans['ਸ਼'] = 'sha'\n self.trans['ਛ'] = 'cha'\n self.trans['ଡ'] = 'dda'\n self.trans['ઠ'] = 'ttha'\n self.trans['ഥ'] = 'tha'\n self.trans['ਫ'] = 'pha'\n self.trans['ર'] = 'ra'\n self.trans['വ'] = 'va'\n self.trans['ୁ'] = 'u'\n self.trans['ી'] = 'ii'\n self.trans['ੋ'] = 'oo'\n self.trans['ૐ'] = 'om'\n self.trans['ୡ'] = 'll'\n self.trans['ૠ'] = 'rr'\n self.trans['੫'] = '5'\n self.trans['ୱ'] = 'wa'\n self.trans['૰'] = 'sign'\n self.trans['൵'] = 'quarters'\n self.trans['ਫ਼'] = 'fa'\n self.trans['ઁ'] = 'candrabindu'\n self.trans['ਆ'] = 'aa'\n self.trans['ઑ'] = 'o'\n self.trans['ଐ'] = 'ai'\n self.trans['ഔ'] = 'au'\n self.trans['ਖ'] = 'kha'\n self.trans['ડ'] = 'dda'\n self.trans['ଠ'] = 'ttha'\n self.trans['ത'] = 'ta'\n self.trans['ਦ'] = 'da'\n self.trans['ର'] = 'ra'\n self.trans['ഴ'] = 'llla'\n self.trans['ુ'] = 'u'\n self.trans['ୀ'] = 'ii'\n self.trans['ൄ'] = 'rr'\n self.trans['ૡ'] = 'll'\n self.trans['ୠ'] = 'rr'\n self.trans['੦'] = '0'\n self.trans['૱'] = 'sign'\n self.trans['୰'] = 'isshar'\n self.trans['൴'] = '1/2'\n self.trans['ਁ'] = 'bindi'\n self.trans['આ'] = 'aa'\n self.trans['ଋ'] = 'r'\n self.trans['ഏ'] = 'ee'\n self.trans['ખ'] = 'kha'\n self.trans['ଛ'] = 'cha'\n self.trans['ട'] = 'tta'\n self.trans['ਡ'] = 'dda'\n self.trans['દ'] = 'da'\n self.trans['ଫ'] = 'pha'\n self.trans['യ'] = 'ya'\n self.trans['શ'] = 'sha'\n self.trans['ി'] = 'i'\n self.trans['ੁ'] = 'u'\n self.trans['ୋ'] = 'o'\n self.trans['ੑ'] = 'udaat'\n self.trans['૦'] = '0'\n self.trans['୫'] = '5'\n self.trans['൯'] = '9'\n self.trans['ੱ'] = 'addak'\n self.trans['ൿ'] = 'k'\n self.trans['ആ'] = 'aa'\n self.trans['ଊ'] = 'uu'\n self.trans['એ'] = 'e'\n self.trans['ਔ'] = 'au'\n self.trans['ഖ'] = 'kha'\n self.trans['ଚ'] = 'ca'\n self.trans['ટ'] = 'tta'\n self.trans['ਤ'] = 'ta'\n self.trans['ദ'] = 'da'\n self.trans['ପ'] = 'pa'\n self.trans['ય'] = 'ya'\n self.trans['ശ'] = 'sha'\n self.trans['િ'] = 'i'\n self.trans['െ'] = 'e'\n self.trans['൦'] = '0'\n self.trans['୪'] = '4'\n self.trans['૯'] = '9'\n self.trans['ੴ'] = 'onkar'\n self.trans['ଅ'] = 'a'\n self.trans['ਏ'] = 'ee'\n self.trans['କ'] = 'ka'\n self.trans['ઔ'] = 'au'\n self.trans['ਟ'] = 'tta'\n self.trans['ഡ'] = 'dda'\n self.trans['ଥ'] = 'tha'\n self.trans['ત'] = 'ta'\n self.trans['ਯ'] = 'ya'\n self.trans['റ'] = 'rra'\n self.trans['ଵ'] = 'va'\n self.trans['ਿ'] = 'i'\n self.trans['ു'] = 'u'\n self.trans['ૄ'] = 'rr'\n self.trans['ൡ'] = 'll'\n self.trans['੯'] = '9'\n self.trans['൱'] = '100'\n self.trans['୵'] = 'sixteenth'\n self.trans['અ'] = 'a'\n self.trans['ਊ'] = 'uu'\n self.trans['ഐ'] = 'ai'\n self.trans['ક'] = 'ka'\n self.trans['ଔ'] = 'au'\n self.trans['ਚ'] = 'ca'\n self.trans['ഠ'] = 'ttha'\n self.trans['થ'] = 'tha'\n self.trans['ତ'] = 'ta'\n self.trans['ਪ'] = 'pa'\n self.trans['ര'] = 'ra'\n self.trans['વ'] = 'va'\n self.trans['ീ'] = 'ii'\n self.trans['ૅ'] = 'e'\n self.trans['ୄ'] = 'rr'\n self.trans['ൠ'] = 'rr'\n self.trans['ਜ਼'] = 'za'\n self.trans['੪'] = '4'\n self.trans['൰'] = '10'\n self.trans['୴'] = 'quarters'\n self.trans['ਅ'] = 'a'\n self.trans['ഋ'] = 'r'\n self.trans['ઊ'] = 'uu'\n self.trans['ଏ'] = 'e'\n self.trans['ਕ'] = 'ka'\n self.trans['ഛ'] = 'cha'\n self.trans['ચ'] = 'ca'\n self.trans['ଟ'] = 'tta'\n self.trans['ਥ'] = 'tha'\n self.trans['ഫ'] = 'pha'\n self.trans['પ'] = 'pa'\n self.trans['ଯ'] = 'ya'\n self.trans['ਵ'] = 'va'\n self.trans['ି'] = 'i'\n self.trans['ോ'] = 'oo'\n self.trans['ୟ'] = 'yya'\n self.trans['൫'] = '5'\n self.trans['૪'] = '4'\n self.trans['୯'] = '9'\n self.trans['ੵ'] = 'yakash'\n self.trans['ൻ'] = 'n'\n self.trans['ઃ'] = 'visarga'\n self.trans['ം'] = 'anusvara'\n self.trans['ਈ'] = 'ii'\n self.trans['ઓ'] = 'o'\n self.trans['ഒ'] = 'o'\n self.trans['ਘ'] = 'gha'\n self.trans['ଞ'] = 'nya'\n self.trans['ણ'] = 'nna'\n self.trans['ഢ'] = 'ddha'\n self.trans['ਲ਼'] = 'lla'\n self.trans['ਨ'] = 'na'\n self.trans['ମ'] = 'ma'\n self.trans['ળ'] = 'lla'\n self.trans['ല'] = 'la'\n self.trans['ਸ'] = 'sa'\n self.trans['¿'] = '?'\n self.trans['ା'] = 'aa'\n self.trans['ૃ'] = 'r'\n self.trans['ൂ'] = 'uu'\n self.trans['ੈ'] = 'ai'\n self.trans['ૣ'] = 'll'\n self.trans['ൢ'] = 'l'\n self.trans['੨'] = '2'\n self.trans['୮'] = '8'\n self.trans['൲'] = '1000'\n self.trans['ਃ'] = 'visarga'\n self.trans['ଉ'] = 'u'\n self.trans['ઈ'] = 'ii'\n self.trans['ਓ'] = 'oo'\n self.trans['ଙ'] = 'nga'\n self.trans['ઘ'] = 'gha'\n self.trans['ഝ'] = 'jha'\n self.trans['ਣ'] = 'nna'\n self.trans['ન'] = 'na'\n self.trans['ഭ'] = 'bha'\n self.trans['ଜ'] = 'ja'\n self.trans['ହ'] = 'ha'\n self.trans['સ'] = 'sa'\n self.trans['ഽ'] = 'avagraha'\n self.trans['ૈ'] = 'ai'\n self.trans['്'] = 'virama'\n self.trans['୩'] = '3'\n self.trans['૨'] = '2'\n self.trans['൭'] = '7'\n self.trans['ੳ'] = 'ura'\n self.trans['ൽ'] = 'l'\n self.trans['ઉ'] = 'u'\n self.trans['ଈ'] = 'ii'\n self.trans['ഌ'] = 'l'\n self.trans['ઙ'] = 'nga'\n self.trans['ଘ'] = 'gha'\n self.trans['ജ'] = 'ja'\n self.trans['ਞ'] = 'nya'\n self.trans['ନ'] = 'na'\n self.trans['ബ'] = 'ba'\n self.trans['ਮ'] = 'ma'\n self.trans['હ'] = 'ha'\n self.trans['ସ'] = 'sa'\n self.trans['ਾ'] = 'aa'\n self.trans['ૉ'] = 'o'\n self.trans['ୈ'] = 'ai'\n self.trans['ൌ'] = 'au'\n self.trans['૩'] = '3'\n self.trans['୨'] = '2'\n self.trans['൬'] = '6'\n self.trans['੮'] = '8'\n self.trans['ർ'] = 'rr'\n self.trans['ଃ'] = 'visarga'\n self.trans['ഇ'] = 'i'\n self.trans['ਉ'] = 'u'\n self.trans['ଓ'] = 'o'\n self.trans['ഗ'] = 'ga'\n self.trans['ਙ'] = 'nga'\n self.trans['ઞ'] = 'nya'\n self.trans['ଣ'] = 'nna'\n self.trans['ധ'] = 'dha'\n self.trans['મ'] = 'ma'\n self.trans['ଳ'] = 'lla'\n self.trans['ഷ'] = 'ssa'\n self.trans['ਹ'] = 'ha'\n self.trans['ਗ਼'] = 'ghha'\n self.trans['ા'] = 'aa'\n self.trans['ୃ'] = 'r'\n self.trans['േ'] = 'ee'\n self.trans['ൗ'] = 'mark'\n self.trans['ଢ଼'] = 'rha'\n self.trans['ୣ'] = 'll'\n self.trans['൧'] = '1'\n self.trans['੩'] = '3'\n self.trans['૮'] = '8'\n self.trans['୳'] = 'half'\n for char in self.trans:\n value = self.trans[char]\n if value == '?':\n continue\n while (value.encode(encoding, 'replace').decode(encoding) == '?'\n and value in self.trans):\n assert value != self.trans[value], \\\n '{!r} == self.trans[{!r}]!'.format(value, value)\n value = self.trans[value]\n self.trans[char] = value", "def unknown_charset(self, charset):\n return 'latin1'", "def setdefaultencoding(name):\n\tpass", "def getdefaultencoding():\n\tpass", "def test_encoding_ascii(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon-sample-1000.csv'))\n self.assertEqual(guessed_encoding.lower(), u'ascii')", "def GetEncoding(self):\n return self.GetStringSelection()", "def encodeString():\n pass", "def getISO639_1Code(self): #$NON-NLS-1$\r", "def test_encoding_amazon_de_reviews_is_utf8(self):\n guessed_encoding = detect_encoding(\n os.path.join(TEST_DATA_DIR, 'amazon_de_reviews_200.csv'))\n self.assertEqual(guessed_encoding.lower(), u'utf-8')", "def __get_encoding(self):\r\n if self.__encoding is not None:\r\n return self.__encoding\r\n return 'utf-8'", "def get_encoding(self, filename):\n # Read encoding from shapefile cpg and cst file.\n filepath = self.get_path(filename)\n encoding = None\n for extension in ['.cpg', '.cst']:\n encoding_filepath = os.path.splitext(filepath)[0] + extension\n try:\n with open(encoding_filepath) as encoding_file:\n encoding = encoding_file.read().strip()\n logger.debug(\"%s file reported %s encoding: %s\"\n % (extension, encoding, filename))\n break\n except IOError:\n continue\n\n if not encoding or encoding.lower() == \"system\":\n # No encoding found. Fall back to LATIN1.\n encoding = \"LATIN1\"\n logger.debug(\"Assuming %s attribute encoding: %s\"\n % (encoding, filename))\n\n return encoding", "def get_encoding_string(self):\n\t\treturn driver_h.FORMAT[self.encoding]", "def _unicode_encode(self, value):\n splits = self.high_codepoints_re.split(value)\n enc_value = b''\n str_len = 0\n for s in splits:\n if self.high_codepoints_re.match(s):\n str_len += 2\n enc_value += self._encode_to_surrogate_pair(s)\n else:\n str_len += len(s)\n enc_value += s.encode('utf-8')\n return str_len, enc_value", "def canonical_charset( charset ):\n # It would be nice to use Python's codecs modules for this, but\n # there is no fixed public interface to it's alias mappings.\n if not charset:\n return charset\n uc = charset.upper()\n uccon = character_set_aliases.get( uc, uc )\n return uccon", "def normalize_codec_name(chardet_name):\n\n python_name = chardet_name.lower().replace('iso-', 'iso').replace('-', '_')\n python_name = codecs.lookup(python_name).name\n\n # Since chardet only recognized all GB-based target_encoding as 'gb2312', the decoding will fail when the text file\n # contains certain special charaters. To make it more special-character-tolerant, we should\n # upgrade the target_encoding to 'gb18030', which is a character set larger than gb2312.\n if python_name == 'gb2312':\n return 'gb18030'\n\n return python_name", "def get_encoding(str):\n lookup = ('utf_8', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213',\n 'shift_jis', 'shift_jis_2004','shift_jisx0213',\n 'iso2022jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_3',\n 'iso2022_jp_ext','latin_1', 'ascii')\n for encoding in lookup:\n try:\n str = str.decode(encoding)\n return encoding\n except:\n pass\n return None", "def cencode(text):\n return _encode(text)[0]", "def convert(self, s):\r\n if self.input_codec <> self.output_codec:\r\n return unicode(s, self.input_codec).encode(self.output_codec)\r\n else:\r\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Codec can be retrieved by magic id3v2.3 encoding number
def test_get_by_magic_number(self): self.assertEqual(Codec.get(0), Latin1Codec()) self.assertEqual(Codec.get(1), UTF16Codec()) self.assertEqual(Codec.get(2), UTF16BECodec()) self.assertEqual(Codec.get(3), UTF8Codec())
[ "def get_data_encoding():", "def codecTag(self):\n codec_t = None\n if 'codec_tag_string' in self.__dict__:\n codec_t = self.__dict__['codec_tag_string']\n return codec_t", "def get_id_from_enc(encoding_list_tuple, encoding):\n for i in encoding_list_tuple:\n if i[0] == list(encoding):\n return i[1]\n return None", "def get_in_codec(file: str)-> str:\n # https://github.com/Ch00k/ffmpy/blob/master/docs/examples.rst\n p_command = \"-v error -select_streams a:0 -show_entries stream=codec_name -of default=noprint_wrappers=1:nokey=1\"\n probe = ffmpy.FFprobe(inputs={file: p_command})\n return (probe.run(stdout=subprocess.PIPE))[0].strip()", "def get_encoding_string(self):\n\t\treturn driver_h.FORMAT[self.encoding]", "def getIdBy3LetterCode(ISO3):\n return countriesBy3LetterCode[ISO3].id", "def get3LetterCodeById(id):\n return countriesById[int(id)].ISO3", "def get_encoding(self, idx: int):\r\n return np.load(self.path_to_encodings / (str(idx) + '.npz'))['arr_0']", "def _apienc(self):\n return self.parameters.read.apiencoding.get()", "def lookup_codec(space, encoding):\n assert not (space.config.translating and not we_are_translated()), \\\n \"lookup_codec() should not be called during translation\"\n state = space.fromcache(CodecState)\n normalized_encoding = encoding.replace(\" \", \"-\").lower()\n w_result = state.get_codec_from_cache(normalized_encoding)\n if w_result is not None:\n return w_result\n return _lookup_codec_loop(space, encoding, normalized_encoding)", "def _readId3v1(self):\n self.file.seek(-128, 2)\n tag = self.file.read(128)\n if len(tag) != 128:\n return\n if tag[0:3] != 'TAG':\n return\n self.header = _Header()\n self.header.majorVersion = 1\n self.header.revision = 0\n\n self._addV1Frame('v1title', tag[3:33])\n self._addV1Frame('v1performer', tag[33:63])\n self._addV1Frame('v1album', tag[63:93])\n self._addV1Frame('v1year', tag[93:97])\n self._addV1Frame('v1comment', tag[97:127])\n self._addV1Frame('v1genre', tag[127])\n if tag[125] == '\\0' and tag[126] != '\\0':\n #if _c: _coverage('id3v1.1')\n self.header.revision = 1\n self._addV1Frame('v1track', str(ord(tag[126])))\n else:\n #if _c: _coverage('id3v1.0')\n pass\n return", "def register_codec():\n def inner_register(encoding):\n if encoding != 'cly':\n return None\n return (_encode, _decode, _CodecStreamReader, _CodecStreamWriter)\n return codecs.register(inner_register)", "def get_codec(name: str) -> Union[JsonCodec, StructCodec]:\n if name == 'json':\n return JsonCodec()\n elif name == 'struct':\n return StructCodec()\n else:\n raise KeyError(name)", "def getLibIdVal(value):\n return (int(value) & 0xFF000) >> 12", "def get_encoding(byte_string):\n return detect(byte_string)['encoding']", "def get_codec_rank(self) -> int:\n return Release.get_codec_rank_static(self.get_release_codec_setting())", "def get_codec(chash):\n\n buffer = multihash.from_hex_string(chash.lstrip('0x'))\n return multicodec.get_codec(buffer)", "def normalize_codec_name(chardet_name):\n\n python_name = chardet_name.lower().replace('iso-', 'iso').replace('-', '_')\n python_name = codecs.lookup(python_name).name\n\n # Since chardet only recognized all GB-based target_encoding as 'gb2312', the decoding will fail when the text file\n # contains certain special charaters. To make it more special-character-tolerant, we should\n # upgrade the target_encoding to 'gb18030', which is a character set larger than gb2312.\n if python_name == 'gb2312':\n return 'gb18030'\n\n return python_name", "def _encode_field_id(field_header: FieldHeader) -> bytes:\n type_code = field_header.type_code\n field_code = field_header.field_code\n\n if not 0 < field_code <= 255 or not 0 < type_code <= 255:\n raise XRPLBinaryCodecException(\"Codes must be nonzero and fit in 1 byte.\")\n\n if type_code < 16 and field_code < 16:\n # high 4 bits is the type_code\n # low 4 bits is the field code\n combined_code = (type_code << 4) | field_code\n return _uint8_to_bytes(combined_code)\n if type_code >= 16 and field_code < 16:\n # first 4 bits are zeroes\n # next 4 bits is field code\n # next byte is type code\n byte1 = _uint8_to_bytes(field_code)\n byte2 = _uint8_to_bytes(type_code)\n return byte1 + byte2\n if type_code < 16 and field_code >= 16:\n # first 4 bits is type code\n # next 4 bits are zeroes\n # next byte is field code\n byte1 = _uint8_to_bytes(type_code << 4)\n byte2 = _uint8_to_bytes(field_code)\n return byte1 + byte2\n else: # both are >= 16\n # first byte is all zeroes\n # second byte is type code\n # third byte is field code\n byte2 = _uint8_to_bytes(type_code)\n byte3 = _uint8_to_bytes(field_code)\n return bytes(1) + byte2 + byte3" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the turnRestrictOff override, return control to the Xset flag.
def _resetRestrict(self): self.__tmpRemoveRestrict = False
[ "def control_points_off(self):\n self.object.GripsOn = False\n sc.doc.Views.Redraw()", "def eval_off(target):\n target.power.off()", "def toggle_recharge_off(self):\n self.will_recharge = False", "def undefine(self):\n ret = libvirtmod.virNWFilterUndefine(self._o)\n if ret == -1: raise libvirtError ('virNWFilterUndefine() failed')\n return ret", "def unflag(self):\n self._flagged = False", "def turn_off(self):\n self.status = False", "def clear_global_fpr_filter_set_override(self):\n return self._request('DELETE', 'rest/defaults/fprfilterset')", "def dip_reset(force):\n if force:\n settings.reset()", "def unsetter(xconfig):\n infile = xconfig.run['infile']\n dlist = list(fsutil.xattr_get(infile))\n logthis(\"Removing overrides:\\n\", suffix=print_r(dlist), loglevel=LL.VERBOSE)\n fsutil.xattr_del(infile, dlist)\n logthis(\"Overrides cleared.\", ccode=C.GRN, loglevel=LL.INFO)\n return 0", "def off(self):\n self._system_on = False", "def TurnOff(self, flag_name):\n flag = self.flags.get(flag_name)\n if flag is None:\n return\n\n flag.TurnOff()", "def off(self):\n sdk.SetEMCCDGain(0)\n self._switch = False", "def turn_off_gripper(self):\n try:\n turn_off_gripper_service = rospy.ServiceProxy('/pickbot/gripper/control', VacuumGripperControl)\n enable = False\n turn_off_gripper_service(enable)\n except rospy.ServiceException as e:\n rospy.loginfo(\"Turn off Gripper service call failed: {0}\".format(e))", "def turn_away_mode_off(self) -> None:\n raise NotImplementedError()", "def setNonExpert(self):\n self._expert = False\n if self._active and self.run_type_set:\n self.enable()\n else:\n self.disable()", "def turn_off(self):\n self.data.switch_off()", "def turn_off(self) -> None:\n self.delayed_turn_on = None\n\n if not Network.is_someone_home():\n super().turn_off()", "def remove_heater_override():\n global heater_override \n heater_override = False", "def set_spotlight_off(self):\n return self._set_spotlight_properties({\"enabled\": False})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore all XSPEC contexts to singleprocess execution.
def reset(self): self.leven = 1 self.error = 1 msgStr = "All parallel contexts are now reset to single-process \ execution." print(msgStr) if Xset.log is not None: print(msgStr,file=Xset.log)
[ "def teardown_workflow(self):\n del self.sca_preproc", "def resetContexts(parts):\n\tfor part in parts:\n\t\tfor item in part:\n\t\t\tresetContext(item)", "def reset_context():\n global _model\n _model = None", "def reset(self):\n try:\n for module in self.modules:\n if hasattr(module, 'terminate'): \n module.terminate(self)\n except:\n print >> sys.stderr, \"No running modules found, resetting..\"\n\n self.reset_db()\n self.setup()", "def tear_down_simple_feature_test(ns):\n #import shutil\n #shutil.rmtree(ns.temp_store)\n ns.session.close()", "def reset_env(self) -> None:\n self._obs = self.env.reset()\n if not self._multi_env:\n self._obs = self._make_batch(self._obs)\n if self.preprocess_fn:\n self._obs = self.preprocess_fn(obs=self._obs).get('obs', self._obs)\n self._act = self._rew = self._done = self._info = None\n if self._multi_env:\n self.reward = np.zeros(self.env_num)\n self.length = np.zeros(self.env_num)\n else:\n self.reward, self.length = 0, 0\n for b in self._cached_buf:\n b.reset()", "def tearDown(self):\n super().tearDown()\n self.reset_environment()", "def test_clear_context_with_init_app():\n # @TODO: Bring over the tail end of test_multiple_apps to test this\n pytest.fail()", "def tear_down_parallel_feature_test(ns):\n tear_down_simple_feature_test(ns)", "def no_envs():\n # Remove the original variables from `os.environ`.\n # Store the original `os.environ`.\n os_environ_original = os.environ.copy()\n modified_environ = {key: value for key, value in os.environ.items()\n if key not in VARIABLES_MAP.keys()}\n os.environ = modified_environ\n yield\n # Restore the original `os.environ`.\n os.environ = os_environ_original", "def stacked(context):\r\n context.save()\r\n try:\r\n yield\r\n finally:\r\n context.restore()", "def test_restore_run(self):\n pass", "def reset(self):\n self.state = self.env.reset()", "def test_api_context_restore(self) -> None:\n\n self._API_CONTEXT.save(self._TMP_FILE_PATH_FULL)\n api_context_restored = ApiContext.restore(self._TMP_FILE_PATH_FULL)\n\n os.remove(self._TMP_FILE_PATH_FULL)\n\n self.assertEqual(api_context_restored, self._API_CONTEXT)", "def keep_system_active():\n if os.name == 'nt':\n windll.kernel32.SetThreadExecutionState(ES_CONTINUOUS | ES_SYSTEM_REQUIRED)", "def clean_context() -> t.Generator[contextlib.ExitStack, None, None]:\n stack = contextlib.ExitStack()\n yield stack\n stack.close()", "def test_reset_treatment(self):\n\tprint()\n\tprint('*** TEST TRE - test_reset_treatment')\n\n\t# Consultation\n\tself.consultation_ids.unlink()\n\n\t# Recos\n\tself.service_ids.unlink()\n\t#self.service_all_ids.unlink()\t# Dep\n\n\t# Procedures\n\tself.procedure_ids.unlink()\n\tself.session_ids.unlink()\n\tself.control_ids.unlink()\n\n\t# Alta\n\tself.treatment_closed = False\n\n\t# Orders - Do not keep them !\n\tfor order in self.order_ids:\n\t\torder.remove_myself_force()", "def restore(self, fileName):\n if isinstance(fileName, str):\n savedFile = open(fileName,'r')\n # check for PyXspec indicator in first line\n isPyXspecSave = savedFile.readline().startswith('#PyXspec')\n warningMsg =\"\\n***Warning: The file sent to Xset.restore(): \"+fileName\n warningMsg += \"\\n is not detected to be a file generated from Xset.save().\"\n warningMsg += \"\\n Xset.restore() usage is only intended for Xset.save() output.\"\n warningMsg += \"\\n General XSPEC/Tcl scripts may not fully execute in PyXspec.\\n\\n\"\n # reset back to start of file\n savedFile.seek(0)\n processingModelCmd = False\n try:\n for line in savedFile:\n if line.strip() and line.lstrip()[0] != '#':\n startNextCmd = False\n lineArgs = line.split()\n if processingModelCmd:\n isNum = False\n try:\n float(lineArgs[0])\n isNum = True\n except Exception:\n pass\n if (isNum or lineArgs[0][0] == '='or lineArgs[0][0] == '/'): \n fullModelCmd.append('&')\n fullModelCmd += lineArgs\n else:\n # Finished gathering args for previous model command\n _pyXspec.doXspecCmd(fullModelCmd)\n processingModelCmd = False\n startNextCmd = True\n else:\n startNextCmd = True\n\n if startNextCmd:\n if lineArgs[0] in 'model':\n processingModelCmd = True\n fullModelCmd = lineArgs\n elif lineArgs[0] in 'data':\n _pyXspec.dataCmd(lineArgs[1:])\n elif (lineArgs[0] in 'newpar' or lineArgs[0] in 'rnewpar'):\n isRespPar = 1 if lineArgs[0][0] == 'r' else 0\n singleStrArgs = str()\n for arg in lineArgs[1:]:\n singleStrArgs += arg\n singleStrArgs += ' '\n singleStrArgs = singleStrArgs.rstrip() \n _pyXspec.newparCmd(singleStrArgs, isRespPar)\n else:\n _pyXspec.doXspecCmd(lineArgs)\n # Check if we've reached the end with a model command still pending.\n if processingModelCmd:\n _pyXspec.doXspecCmd(fullModelCmd)\n if not isPyXspecSave:\n print(warningMsg)\n if Xset.log is not None:\n print(warningMsg,file=Xset.log)\n except Exception:\n savedFile.close()\n if not isPyXspecSave:\n print(warningMsg)\n if Xset.log is not None:\n print(warningMsg,file=Xset.log)\n raise\n \n savedFile.close()\n else:\n raise Exception(\"Error: restore argument must be a file name string.\")", "def restore_all(self):\n for r in self.saved:\n self.restore(r)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a key,value pair of strings to XSPEC's internal database. This database provides a way to pass string values to certain model functions which are hardcoded to search for "key". (See the XSPEC manual description for the "xset" command for a table showing model/key usage.) If the key,value pair already exists, it will be replaced with the new entries.
def addModelString(self, key, value): if isinstance(key,str) and isinstance(value,str): # User should not have entered whitespace in key or value, # but use split() to be sure. modStringArgs = ["xset"] modStringArgs += key.split() modStringArgs += value.split() _pyXspec.doXspecCmd(modStringArgs) else: raise Exception("Error: addModelString requires 2 string arguments.")
[ "def __setitem__(self, key, value):\n query = self.store.update().where(self.store.c.key == key).values(value=value)\n result = self.conn.execute(query)\n if result.rowcount == 0:\n query = self.store.insert().values(key=key, value=value)\n result = self.conn.execute(query)", "def add(self, key, value):\n self.__dataset[key] = value", "def add(self, k: str, v: str):\n\n if k in self.__cache_dict and self.__cache_dict[k][1]:\n self.__change_set.add(k)\n elif k in self.__delete_set:\n self.__delete_set.remove(k)\n self.__change_set.add(k)\n self.__cache_dict[k] = (v, False)", "def set(self, key: str, value: str) -> None:\n new_val = String(value)\n check_type(new_val, DataType.STR)\n self.storage[key] = new_val", "def sadd(self, key: str, *args) -> None:\n cur_val = self.__get_key(key)\n if cur_val is None:\n self.storage[key] = Set(set(args))\n return\n\n check_type(cur_val, DataType.SET)\n\n # Add the values to the set\n self.storage[key].data.update(args)", "def add_entry_to_database(value_fields_dict):\n session = Session()\n try:\n new_entry = Catalog(value_fields_dict)\n session.add(new_entry)\n session.commit()\n except (IntegrityError, UniqueViolation):\n #There is already an entry in the database\n pass\n finally:\n session.close()", "def add(self, item, key=lambda x: x):\n item_key = key(item)\n assert isinstance(item_key, str)\n\n table_item = self.table.get(item_key)\n\n table_hash = None\n if table_item is None:\n item_hash = hashlib.sha256(key(item).encode('utf-8') + self.salt).hexdigest()\n else:\n item_hash = table_item[0]\n table_item = (item_hash, item)\n self.table[key(item)] = table_item\n self.set.add(table_item)", "def add(self, key, value):\n\n assert isinstance(key, bytes_type)\n assert isinstance(value, bytes_type)\n\n dbfile = self.dbfile\n pos = dbfile.tell()\n dbfile.write(_lengths.pack(len(key), len(value)))\n dbfile.write(key)\n dbfile.write(value)\n\n # Get hash value for the key\n h = self.hashfn(key)\n # Add hash and on-disk position to appropriate bucket\n self.buckets[h & 255].append((h, pos))", "def store(key: str, value: object, catname: str=''):", "def append(self, key, value):\r\n return self.execute_command(\"APPEND\", key, value)", "def store(self, key, value):\n self._store.session[key] = value\n self.commit()", "def __setitem__(self, key, value):\n k = self._lowerOrReturn(key)\n self.data[k] = (key, value)", "def safe_insert(key, value, my_dict):\r\n return", "def add_kv_store(self, key, value):\n data = {\n 'operation': 'STORE',\n 'key': key,\n 'value': value\n }\n return self.post(self.make_url(\"/useragent-kv\"), data=to_json(data),\n headers=self.default_headers).text", "def put(self, k: str, v):\n pass", "def set(self, key, data):\n self._data[key] = data\n self._data.commit()", "def set(self, key, value):\n try:\n logging.debug(\n 'INSERT {{ task: {task}, _key: \"{key}\" }} INTO {collection}'\n .format(\n collection=self.collection, key=key, task=value\n )\n )\n self.db.AQLQuery(\n 'INSERT {{ task: {task}, _key: \"{key}\" }} INTO {collection}'\n .format(\n collection=self.collection, key=key, task=value\n )\n )\n except AQLQueryError as aql_err:\n logging.error(aql_err)\n except Exception as err:\n logging.error(err)", "def setitem(self, key, value):", "def addData(self, table, key, value):\n raise NotImplementedException()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close XSPEC's current log file.
def closeLog(self): _pyXspec.closeLog()
[ "def log_close(self):\n self._logfile.close()", "def close_log(self):\n if self._logFile is not None:\n self._logFile.close()\n self._logFile = None", "def closeLog() :\n global messageLog\n messageLog.close()", "def close_file():\n lib_close.close_file()", "def close(self):\n \n self.__fh.close()", "def close(self):\n self._output_fh.close()", "def closeScript() :\n global scriptLog\n file = scriptLog.name\n scriptLog.close()\n return file", "def close_failure_file():\n logger = logging.getLogger('failures')\n if not logger.handlers:\n return\n handler = logger.handlers[0]\n logger.removeHandler(handler)\n handler.close()\n\n file_path = handler.baseFilename\n gz_path = handler.baseFilename + '.gz'\n logger.error('Compressing log report to {}'.format(gz_path))\n\n with ExitStack() as stack:\n fin = stack.enter_context(open(file_path, 'rb'))\n fout = stack.enter_context(gzip.open(gz_path, 'wb'))\n while True:\n b = fin.read(100 * 1024)\n if not len(b):\n break\n fout.write(b)\n fout.flush()\n\n os.remove(file_path)", "def _close_file(self):\n self._iostream.close()\n self._fileobj.close()", "def remove_file_logger():\n h = get_current_logfile_handler()\n if h is not None:\n h.close()\n nox_logger.removeHandler(h)", "def disableFileLogging(self):\r\n if self.logFile != None:\r\n self.logFile.close()\r\n self.logFile = None", "def closeFile():\n\n\tglobal statusFileHandler\n\tstatusFileHandler.close()\n\t# NOTE - using print() instead of printWithTime(), as file handler\n\t# closed on previous statement.\n\tprint (\"Server status file : CLOSED.\")", "def close_file(self):\n self.hdf.close()", "def close(self) -> None:\n utils.remove_handlers_from_logger(self.logger)\n self.metrics.close()", "def close (self):\r\n\r\n self.file.close ()\r\n self.file = None\r\n self.filename = None\r\n self.current_line = None", "def Close(self):\n label = self._UniqueLabel()\n self._WriteCode('(%s), @%s, 0;JMP' % (label, label))\n self.file.close()", "def close(self):\n print((\"Closing exodus file: \" + self.fileName))\n errorInt = EXODUS_LIB.ex_close(self.fileId)\n if errorInt != 0:\n raise Exception(\n \"ERROR: Closing file \" +\n self.fileName +\n \" had problems.\")", "def close_file(self):\n self.file_handler.close()", "def close(self):\n self._fileobj.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restore the data/model configuration and settings. This will restore the data, models, and settings from a previous PyXspec session, as saved in file generated by the Xset.save() function.
def restore(self, fileName): if isinstance(fileName, str): savedFile = open(fileName,'r') # check for PyXspec indicator in first line isPyXspecSave = savedFile.readline().startswith('#PyXspec') warningMsg ="\n***Warning: The file sent to Xset.restore(): "+fileName warningMsg += "\n is not detected to be a file generated from Xset.save()." warningMsg += "\n Xset.restore() usage is only intended for Xset.save() output." warningMsg += "\n General XSPEC/Tcl scripts may not fully execute in PyXspec.\n\n" # reset back to start of file savedFile.seek(0) processingModelCmd = False try: for line in savedFile: if line.strip() and line.lstrip()[0] != '#': startNextCmd = False lineArgs = line.split() if processingModelCmd: isNum = False try: float(lineArgs[0]) isNum = True except Exception: pass if (isNum or lineArgs[0][0] == '='or lineArgs[0][0] == '/'): fullModelCmd.append('&') fullModelCmd += lineArgs else: # Finished gathering args for previous model command _pyXspec.doXspecCmd(fullModelCmd) processingModelCmd = False startNextCmd = True else: startNextCmd = True if startNextCmd: if lineArgs[0] in 'model': processingModelCmd = True fullModelCmd = lineArgs elif lineArgs[0] in 'data': _pyXspec.dataCmd(lineArgs[1:]) elif (lineArgs[0] in 'newpar' or lineArgs[0] in 'rnewpar'): isRespPar = 1 if lineArgs[0][0] == 'r' else 0 singleStrArgs = str() for arg in lineArgs[1:]: singleStrArgs += arg singleStrArgs += ' ' singleStrArgs = singleStrArgs.rstrip() _pyXspec.newparCmd(singleStrArgs, isRespPar) else: _pyXspec.doXspecCmd(lineArgs) # Check if we've reached the end with a model command still pending. if processingModelCmd: _pyXspec.doXspecCmd(fullModelCmd) if not isPyXspecSave: print(warningMsg) if Xset.log is not None: print(warningMsg,file=Xset.log) except Exception: savedFile.close() if not isPyXspecSave: print(warningMsg) if Xset.log is not None: print(warningMsg,file=Xset.log) raise savedFile.close() else: raise Exception("Error: restore argument must be a file name string.")
[ "def btnRestoreClicked(self):\n pyzo.resetConfig()\n shutil.copyfile(self.backup_file, self.conf_file)\n pyzo.main.restart()", "def _restore(self):\n\n # check restore\n if not self.restore:\n return\n\n # restore\n settings = self._settings\n settings.data_format = self.data_format\n settings.byte_order = self.byte_order\n self.restore = False", "def restore_config():\n global sg_kwargs\n sg_kwargs = kwargs_backup.copy()\n backup_filepath.replace(Package.config_filepath)\n print(\"\\n ⓘ Original config.json restored.\")", "def restore_all(self):\n for r in self.saved:\n self.restore(r)", "def test_save_restore(self):\n with tempfile.TemporaryDirectory(prefix=\"phd_\") as d:\n tempdir = pathlib.Path(d)\n\n model_to_file = self.model_class(**self.model_init_opts)\n model_to_file.init(0, self.atomizer)\n model_to_file.save(tempdir / \"model\")\n\n model_from_file = self.model_class(**self.model_init_opts)\n model_from_file.restore(tempdir / \"model\")\n # We can't test that restoring the model from file actually does anything,\n # since we don't have __eq__ operator implemented for models.", "def restore_state(self):\n self._restore_input()\n self._restore_output()", "def autoSaveRestore(): \n try:\n epics.autosave.restore_pvs(\"auto_settings.sav\")\n except:\n pass\n while True: \n time.sleep(AUTOSAVE_SLEEP_TIME)\n try:\n epics.autosave.save_pvs(\"auto_settings.req\", \"auto_settings.sav\")\n except:\n pass", "def reset(self):\n\n self.model.load_state_dict(self.retrieve(\"model\"))\n self.optimizer.load_state_dict(self.retrieve(\"optimizer\"))\n self.model.to(self.model_device)", "def load_and_restore_model(self, session):\n if self.settings_object.model_path:\n pt(\"Restoring model...\", self.settings_object.model_path)\n try:\n # TODO (@gabvaztor) Do Generic possibles models\n model_possible_1 = self.settings_object.model_path + \"model\" + Dictionary.string_ckpt_extension\n model_possible_2 = model_possible_1 + Dictionary.string_meta_extension\n model_possible_3 = model_possible_1 + Dictionary.string_ckpt_extension\n model_possible_4 = model_possible_3 + Dictionary.string_meta_extension\n possibles_models = [model_possible_1, model_possible_2, model_possible_3, model_possible_4]\n model = [x for x in possibles_models if file_exists_in_path_or_create_path(x)]\n if model:\n saver = tf.train.import_meta_graph(model[0])\n # Restore variables from disk.\n saver.restore(session, model_possible_1)\n pt(\"Model restored without problems\")\n else:\n if self.ask_to_continue_creating_model_without_exist:\n response = recurrent_ask_to_continue_without_load_model()\n if not response:\n raise Exception()\n else:\n pt(\"The model won't load because it doesn't exist\",\n \"You chose 'continue_creating_model_without_exist\")\n except Exception as e:\n pt(Errors.error, e)\n raise Exception(Errors.error + \" \" + Errors.can_not_restore_model)", "def restore(self, model_dir, meta_graph=None, model_name=None, random_seed=None):\n model_dir, model_path = get_model_dir_and_path(model_dir, model_name)\n self.model_path = model_path\n\n frozen_graph_file = '%s.pb' % model_path\n if os.path.exists(frozen_graph_file):\n print('Loading from frozen_graph', frozen_graph_file, file=sys.stderr)\n frozen_map_file = '%s.map' % model_path\n return self.load_graph(frozen_graph_file, self.frozen_graph_name, frozen_map_file=frozen_map_file)\n\n if meta_graph is None:\n meta_graph = '%s.meta' % model_path\n assert os.path.exists(meta_graph), 'no pb and meta_graph: %s' % model_path\n ##https://github.com/tensorflow/tensorflow/issues/4603\n #https://stackoverflow.com/questions/37649060/tensorflow-restoring-a-graph-and-model-then-running-evaluation-on-a-single-imag\n with self.sess.graph.as_default():\n timer = gezi.Timer(f'Restoring {model_path}', print_fn=logging.info)\n saver = tf.compat.v1.train.import_meta_graph(meta_graph)\n saver.restore(self.sess, model_path)\n timer.print()\n try:\n self.sess.run(tf.compat.v1.tables_initializer())\n except Exception:\n pass\n\n if random_seed is not None:\n tf.compat.v1.set_random_seed(random_seed)\n\n #---so maybe do not use num_epochs or not save num_epochs variable!!!! can set but input producer not use, stop by my flow loop\n #---TODO not work remove can run but hang FIXME add predictor + exact_predictor during train will face\n #@gauravsindhwani , can you still run the code successfully after you remove these two collections since they are actually part of the graph. \n #I try your way but find the program is stuck after restoring.\"\n #https://github.com/tensorflow/tensorflow/issues/9747\n #tf.get_default_graph().clear_collection(\"queue_runners\")\n #tf.get_default_graph().clear_collection(\"local_variables\")\n #--for num_epochs not 0\n #tf.get_default_graph().clear_collection(\"local_variables\")\n #self.sess.run(tf.local_variables_initializer())\n\n #https://stackoverflow.com/questions/44251666/how-to-initialize-tensorflow-variable-that-wasnt-saved-other-than-with-tf-globa\n #melt.initialize_uninitialized_vars(self.sess)\n\n return self.sess", "def __restore_device(self):\n if not self.__configure_ao or self.__saved_ao is None:\n return\n\n if not self._xbee.is_remote():\n xb = self._xbee\n else:\n xb = self._xbee.get_local_xbee_device()\n\n try:\n xb.set_api_output_mode_value(self.__saved_ao[0])\n except XBeeException as e:\n self._error = \"Could not restore XBee after ZDO: \" + str(e)", "def restore(self):\n\n # Restore the sets\n try:\n self.mr.master_atoms_mapped.discard(self.mr.last_mapped[1])\n self.mr.sub_atoms_mapped.discard(self.mr.last_mapped[0])\n self.mr.atom_mapping.discard(self.mr.last_mapped)\n except IndexError:\n # happens if there was no last added atom\n pass\n # Reset the last mapped\n try:\n self.mr.last_mapped = self.mapping_stack.pop()\n except IndexError:\n # Happens if there is no backup\n pass", "def restore_variables(self):\n doc = c4d.documents.GetActiveDocument()\n dtu_dict = json.loads(doc[self.unique_id][1000])\n self.store_dtu(dtu_dict)\n self.find_skeleton(self.import_name)\n self.find_skeleton_name()\n self.find_body(self.import_name)\n self.find_body_name()\n self.find_children(self.skeleton)", "def _restore_saved_model(self):\n latest_checkpoint = tf.train.latest_checkpoint(self._checkpoint_dir)\n if latest_checkpoint is None:\n self._step = 0\n else:\n self._saver.restore(self._sess, latest_checkpoint)\n self._step = int(latest_checkpoint.split('-')[-1])", "def restore_settings(self):\n\n #Se ejecuta la función correspondiente de cada Frame.\n self.__sharing_function_frame.restore_settings()\n self.__algorithm_frame.restore_settings()", "def restore_defaults(self):\n\n pass", "def restore(self , model_dir , model_prefix):\n self.saver.restore(self.sess , os.path.join(model_dir , model_prefix))\n self.logger.info('Model restored from {}, with prefix {}'.format(model_dir , model_prefix))", "def test_reset(self):\n def create_model():\n with mn.model() as m:\n mn.variable('X', 1) \n return m \n\n m = create_model() \n m.validate_and_set('X', '', 2)\n m.reset()\n recording2 = m.recording()\n\n m.validate_and_set('X', '', 2)\n m.reset(reset_external_vars=False)\n recording3 = m.recording()\n\n m2 = create_model()\n m2.replay(recording2)\n self.assertEqual(m2['X'][''], 1)\n\n m3 = create_model()\n m3.replay(recording3)\n self.assertEqual(m3['X'][''], 2)", "def test_save_and_restore(self):\n engine = self.e\n scene = self.scene\n\n # Save visualization.\n f = StringIO()\n f.name = abspath('test.mv2') # We simulate a file.\n engine.save_visualization(f)\n f.seek(0) # So we can read this saved data.\n\n # Remove existing scene.\n \n engine.close_scene(scene)\n\n # Load visualization\n engine.load_visualization(f)\n self.scene = engine.current_scene\n s = self.scene\n self.check()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wavelet decomposition of into scales This function uses Haar wavelets for demonstration purposes.
def simpleWaveDec(signal, nb_scales): # Haar Wavelets filters for decomposition and reconstruction ld = [1, 1] hd = [-1, 1] # transformation C = [] A = signal # approximation for i in range(nb_scales): A, D = waveSingleDec(A, ld, hd) # get the coefficients C.append(D) C.append(A) return C
[ "def scalogram(filename, savename):\n\n #signal reading\n (rate,signal) = wav.read(filename)\n\n #ignore other bands for primary treatment\n if signal.shape[1] > 1:\n signal = signal[:,0]\n\n #clip the signal\n max_energy = max(energy)\n start_frame = 0\n for k in range(len(energy)):\n if energy[k] >= max_energy*0.01:\n start_frame = k\n break\n\n end_frame = start_frame\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.001:\n end_frame = k\n break\n\n if(end_frame == start_frame):\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.01:\n end_frame = k\n break\n\n samples_per_frame = rate * 0.01\n signal = signal[start_frame*samples_per_frame:end_frame*samples_per_frame]\n\n\n wavelet=DOG4\n maxscale=10\n notes=100\n scaling='log'#\"log\" #or \"linear\"\n plotpower2d=True\n\n Ns=1024\n #limits of analysis\n Nlo=0 \n Nhi=Ns\n\n # Wavelet transform\n cw=wavelet(signal,maxscale,notes,scaling=scaling)\n scales=cw.getscales() \n cwt=cw.getdata()\n # power spectrum\n pwr=cw.getpower()\n scalespec=np.sum(pwr,axis=1)/scales # calculate scale spectrum\n # scales\n y=cw.fourierwl*scales\n x=np.arange(Nlo*1.0,Nhi*1.0,1.0)\n \n #mpl.tight_layout()\n mpl.axis('off')\n fig=mpl.figure(1)\n\n # 2-d coefficient plot\n plotcwt=np.clip(np.fabs(cwt.real), 0., 1000.)\n if plotpower2d: plotcwt=pwr\n im=mpl.imshow(plotcwt,cmap=mpl.cm.jet,extent=[x[0],x[-1],y[-1],y[0]],aspect='auto')\n mpl.ylim(y[0],y[-1])\n theposition=mpl.gca().get_position()\n\n mpl.tight_layout()\n mpl.savefig(savename)", "def _plot_wavelet(datas):\n \n # Declare a starlet object (and performs the transform)\n Sw = scarlet.Starlet(datas, lvl=5, direct=True)\n # This is the starlet transform as an array\n w = Sw.coefficients\n # The inverse starlet transform of w (new object otherwise, the tranform is not used)\n iw = Sw.image\n\n # TODO: Clean this code up using plt.subplots()\n # The wavelet transform of the first slice of images in pictures\n lvl = w.shape[1]\n plt.figure(figsize=(lvl*5+5,5))\n plt.suptitle('Wavelet coefficients')\n for i in range(lvl):\n plt.subplot(1, lvl, i+1)\n plt.title('scale' + str(i+1))\n plt.imshow(w[0,i], cmap='inferno')\n plt.colorbar()\n plt.show()\n\n # Making sure we recover the original image\n plt.figure(figsize=(30,10))\n plt.subplot(131)\n plt.title('Original image', fontsize=20)\n plt.imshow(datas[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(132)\n plt.title('Starlet-reconstructed image', fontsize=20)\n plt.imshow(iw[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(133)\n plt.title('Absolute difference', fontsize=20)\n plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')\n plt.colorbar()\n plt.show()\n \n return", "def approximate_structural_wavelet_embedding(self):\n self.G.estimate_lmax()\n self.heat_filter = pygsp.filters.Heat(self.G, tau=[self.heat_coefficient])\n self.chebyshev = pygsp.filters.approximations.compute_cheby_coeff(self.heat_filter, m=self.approximation)\n self.approximate_wavelet_calculator()", "def plot_signal_decomp(data, w, title):\n w = pywt.Wavelet(w)\n a = data\n ca = []\n cd = []\n for i in range(6):\n (a, d) = pywt.dwt(a, w, mode)\n ca.append(a)\n cd.append(d)\n\n rec_a = []\n rec_d = []\n\n for i, coeff in enumerate(ca):\n coeff_list = [coeff, None] + [None] * i\n rec_a.append(pywt.waverec(coeff_list, w))\n\n for i, coeff in enumerate(cd):\n coeff_list = [None, coeff] + [None] * i\n rec_d.append(pywt.waverec(coeff_list, w))\n\n fig = plt.figure()\n ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)\n ax_main.set_title(title)\n ax_main.plot(data)\n ax_main.set_xlim(0, len(data) - 1)\n\n for i, y in enumerate(rec_a):\n ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)\n ax.plot(y, 'r')\n ax.set_xlim(0, len(y) - 1)\n ax.set_ylabel(\"A%d\" % (i + 1))\n\n for i, y in enumerate(rec_d):\n ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)\n ax.plot(y, 'g')\n ax.set_xlim(0, len(y) - 1)\n ax.set_ylabel(\"D%d\" % (i + 1))", "def cwt(\n x ,\n# frequencies = np.exp(np.arange( -5.5 , 0.0 , 0.01 )) ,\n frequencies = np.exp(np.arange( -2.5 , 0.0 , 0.01 )) ,\n wavelet = cauchy,\n Q = 10.\n):\n\n\n N_x = len(x)\n N_pad = closest_anti_prime( N_x + 120 ) - N_x\n N = N_x + N_pad # data length including padding\n\n X = np.fft.fft( np.concatenate(( x , np.zeros(N_pad) )) )\t# fft of padded input data\n w = np.arange( 0 , N/2 ) * 2./N \n # TODO check if frequency scaling is correct ( either Nyquist or zero included or both ? )\n\n WT = [] \t# the resulting transform\n\n\n for f in frequencies:\n a = 1.0 / f\n WT.append( np.fft.ifft( np.concatenate((X[:N/2] * wavelet(a*w,Q) , np.zeros(N/2))) )[:N_x] ) # <-- this makes real w'lets progressive, FIXME\n\n return [ np.array(WT) , frequencies ]\n # TODO make this a class behaving like the actual transform with freq and wlet as memebers", "def test_28_using_fluxscale_to_transfer_the_amplitude_solutions():\n\tcasalog.origin(\"test_28_using_fluxscale_to_transfer_the_amplitude_solutions\")\n\tcasalog.post(\"starting\")\n\n\tflux2 = fluxscale(vis='G192_flagged_6s.ms', caltable='calG192.G2', \\\n\t fluxtable='calG192.F2', reference='0')", "def scale_spec(wave, flux, ivar, sn, wave_ref, flux_ref, ivar_ref, mask=None, mask_ref=None, scale_method='auto', min_good=0.05,\n ref_percentile=70.0, maxiters=5, sigrej=3, max_median_factor=10.0,\n npoly=None, hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5, debug=False, show=False):\n\n if mask is None:\n mask = ivar > 0.0\n if mask_ref is None:\n mask_ref = ivar_ref > 0.0\n\n\n # Interpolate the reference spectrum onto the wavelengths of the spectrum that will be rescaled\n flux_ref_int, ivar_ref_int, mask_ref_int = interp_spec(wave, wave_ref, flux_ref, ivar_ref, mask_ref)\n\n # estimates the SNR of each spectrum and the stacked mean SNR\n #rms_sn, weights = sn_weights(wave, flux, ivar, mask, sn_smooth_npix)\n #sn = np.sqrt(np.mean(rms_sn**2))\n\n if scale_method == 'auto':\n if sn > sn_min_polyscale:\n method_used = 'poly'\n elif ((sn <= sn_min_polyscale) and (sn > sn_min_medscale)):\n method_used = 'median'\n else:\n method_used = 'none'\n else:\n method_used = scale_method\n\n # Estimate the scale factor\n if method_used == 'poly':\n # Decide on the order of the polynomial rescaling\n if npoly is None:\n if sn > 25.0:\n npoly = 5 # quintic, Is this stable?\n elif sn > 8.0:\n npoly = 3 # cubic\n elif sn >= 5.0:\n npoly = 2 # quadratic\n else:\n npoly = 1 # linear\n scale, fit_tuple, flux_scale, ivar_scale, outmask = solve_poly_ratio(\n wave, flux, ivar, flux_ref_int, ivar_ref_int, npoly,mask=mask, mask_ref=mask_ref_int,\n ref_percentile=ref_percentile, debug=debug)\n elif method_used == 'median':\n # Median ratio (reference to spectrum)\n med_scale = robust_median_ratio(flux, ivar, flux_ref_int, ivar_ref_int,ref_percentile=ref_percentile,min_good=min_good,\n mask=mask, mask_ref=mask_ref_int, maxiters=maxiters,\n max_factor=max_median_factor,sigrej=sigrej)\n # Apply\n flux_scale = flux * med_scale\n ivar_scale = ivar * 1.0/med_scale**2\n scale = np.full_like(flux,med_scale)\n elif method_used == 'hand':\n # Input?\n if hand_scale is None:\n msgs.error(\"Need to provide hand_scale parameter, single value\")\n flux_scale = flux * hand_scale\n ivar_scale = ivar * 1.0 / hand_scale ** 2\n scale = np.full(flux.size, hand_scale)\n elif method_used == 'none':\n flux_scale = flux.copy()\n ivar_scale = ivar.copy()\n scale = np.ones_like(flux)\n else:\n msgs.error(\"Scale method not recognized! Check documentation for available options\")\n # Finish\n if show:\n scale_spec_qa(wave, flux, ivar, wave_ref, flux_ref, ivar_ref, scale, method_used, mask = mask, mask_ref=mask_ref,\n title='Scaling Applied to the Data')\n\n return flux_scale, ivar_scale, scale, method_used", "def fan_trans3D_scale(filename,nx, ny, part='coh', zmin = 0, zmax= 0, partSum =False):\n\n HDU = fits.open(filename)\n cube = HDU[0].data\n header = HDU[0].header\n reso = header ['CDELT2']*60\n M = nb_scale((nx,ny))\n N = header['NAXIS3']\n\n coherent_tot = []\n gaussian_tot = []\n\n\n for i in range(N) :\n q = []\n q= [2.0]*M\n print(\"data number\",i)\n wt, S11a, wave_k, S1a, q = fan_trans(cube[i,:,:], reso=reso, angular=False,q=q,apodize = 0.98, arrdim = np.array([nx,ny]))\n\n coherent = wt[(M+zmin):(2*M-zmax),:,:]\n Gaussian = wt[(2*M+zmin):(3*M-zmax),:,:] \n\n if partSum : \n coherent = np.sum(wt[(M+zmin):(2*M-zmax),:,:],axis=0)\n Gaussian = np.sum(wt[(2*M+zmin):(3*M-zmax),:,:],axis=0)\n\n coherent_tot.append( coherent )\n gaussian_tot.append( Gaussian )\n\n if part == 'coh' or part == 'all' :\n np.save(\"cohScale.npy\", coherent_tot)\n if part == 'gau' or part == 'all' :\n np.save(\"gauScale.npy\", gaussian_tot)", "def wavelet_maps_to_real(self,wav_analysis_maps,output_maps_prefix,\n\t\tn_quads=1000):\n\t\t# Make the wavelet dict we'll feed into the reconstruction script.\n\t\ttarget_fwhm = wav_analysis_maps['target_fwhm']\n\t\tscale_int = wav_analysis_maps['scale_int']\n\t\tj_min = wav_analysis_maps['j_min']\n\t\tj_max = wav_analysis_maps['j_max']\n\t\toutput_nside = wav_analysis_maps['output_nside']\n\t\twavelet_dict = {'scale_int':scale_int,\n\t\t\t'band_lim':wav_analysis_maps['band_lim'],'j_max':j_max,\n\t\t\t'j_min':j_min,'original_nside':output_nside,\n\t\t\t'target_fwhm':target_fwhm}\n\t\tanalysis_type = wav_analysis_maps['analysis_type']\n\t\tm_level = wav_analysis_maps['m_level']\n\n\t\t# Check that the right type of map dict was passed in.\n\t\tif analysis_type != 'hgmca':\n\t\t\traise ValueError('A non-hgmca wav_analysis_maps was passed in.')\n\n\t\t# Get the analysis level for each coefficient\n\t\twav_level = self.get_analysis_level(scale_int,j_min,j_max,m_level,\n\t\t\toutput_nside)\n\t\twav_j_ind = np.zeros(2+j_max-j_min)\n\t\twav_j_ind[1:] = np.arange(j_min,j_max+1)\n\n\t\t# Iterate through the levels\n\t\tfor level in range(m_level+1):\n\t\t\t# If no wavelet scales should be analyzed at this level\n\t\t\t# continue\n\t\t\tif np.sum(wav_level==level) == 0:\n\t\t\t\tcontinue\n\t\t\t# Which scales belong at this level\n\t\t\tlevel_j_ind = wav_j_ind[wav_level==level]\n\t\t\t# Get the number of patches for a given level.\n\t\t\tn_patches = level_to_npatches(level)\n\n\t\t\t# Keep track of how many pixels into the level we've\n\t\t\t# gone so far.\n\t\t\toffset = 0\n\t\t\tfor j in level_j_ind:\n\t\t\t\t# Now deal with scaling or wavelet coefficient\n\t\t\t\tif j == 0:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j_min,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_scaling.fits'\n\t\t\t\t\twavelet_dict.update({'scale_map':{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\telse:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j+1,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_wav_%d.fits'%(j)\n\t\t\t\t\twavelet_dict.update({'wav_%d_map'%(j):{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\tn_pix = hp.nside2npix(nside)\n\t\t\t\tn_pix_patch = n_pix//n_patches\n\n\t\t\t\t# Allocate the array we'll use to write the wavelets\n\t\t\t\twav_coeff = np.zeros(n_pix)\n\n\t\t\t\t# Now grab the data from each patch\n\t\t\t\tfor patch in range(n_patches):\n\t\t\t\t\twav_coeff[patch*n_pix_patch:(patch+1)*n_pix_patch] = (\n\t\t\t\t\t\twav_analysis_maps[str(level)][patch,\n\t\t\t\t\t\toffset:offset+n_pix_patch])\n\t\t\t\toffset += n_pix_patch\n\n\t\t\t\t# Write the map and point the dictionary to the path\n\t\t\t\thp.write_map(path,wav_coeff,dtype=np.float64,\n\t\t\t\t\toverwrite=True,nest=True)\n\n\t\treturn self.s2dw_wavelet_inverse_transform(wavelet_dict,np.min(\n\t\t\ttarget_fwhm),n_quads=n_quads)", "def test_16_bandpass_calibrator_gain_amplitudes_scaling():\n\tcasalog.origin(\"test_16_bandpass_calibrator_gain_amplitudes_scaling\")\n\tcasalog.post(\"starting\")\n\n\tflux1 = fluxscale(vis='G192_flagged_6s.ms', caltable='calG192.G1', \\\n\t fluxtable='calG192.F1', reference='0', \\\n\t transfer='3', listfile='3C84.fluxinfo', fitorder=1)", "def mkcube(hdus, refcoord, pixres=0.05, pixwidth=3,\\\n wavebound=(4.045,4.06), wavebinfact=1,\\\n imsize=(500,500), widthsmooth=1.0,\\\n outname='cube.fits'):\n\n #~~~ Construct cube WCS/header\n #~~~ First get wavelength step of all spectra, and calculate wavelength grid of cube ~~~~\n wcsslist = [WCS(hdu).sub(['spectral']) for hdu in hdus]\n # print(wcsslist[0].wcs.pc)\n \n wavedeltspec = [((ws.wcs.pc[0]*u.dimensionless_unscaled)*u.Unit(ws.world_axis_units[0])).to('um').value for ws in wcsslist]\n wavedelt = wavebinfact * np.mean(wavedeltspec) # convert to microns\n print(\"WAVE DELTA FOR CUBE\",wavedelt)\n wavecube = np.arange(wavebound[0], wavebound[1] + wavedelt, wavedelt ) #this is wavelength grid of output cube\n wavedim = np.size(wavecube )\n z = np.arange(wavedim)\n # print(wavecube )\n #print(wave_cube[0])\n \n #~~~ Set reference coordinate (central coords)\n# if (refcoord==None) | (type(refcoord)!=SkyCoord):\n# refcoords = [SkyCoord(ra=h.header['crval2'],dec=h.header['crval3'],unit=(u.deg,u.deg),frame=h.header['radesys']) for h in hdu]\n# raref,decref = (np.mean([ref.icrs.ra.degree for ref in refcoords]), np.mean([ref.icrs.dec.degree for ref in refcoords]))\n# else:\n raref,decref=(refcoord.icrs.ra.degree,refcoord.icrs.dec.degree)\n print(\"Central coordinates of cube: \",raref,decref) \n \n #~~~ Set up cube WCS. Define cube shape and enter WCS info in header.\n shapecube = [wavedim, imsize[1], imsize[0]]\n hducube= fits.PrimaryHDU( data=np.zeros(shapecube) )\n \n # create cube celestial and spectral axes\n hdrkeys=[('RADESYS','ICRS'),('EQUINOX',2000.0),\\\n ('CTYPE1','RA---TAN'),('CUNIT1','deg'),('CRPIX1',shapecube[2]/2.),('CRVAL1',(raref,'deg')),('CDELT1',(-pixres/3600.,'deg')),\\\n ('CTYPE2','DEC--TAN'),('CUNIT2','deg'),('CRPIX2',shapecube[1]/2.),('CRVAL2',(decref,'deg')),('CDELT2',(pixres/3600.,'deg')),\\\n ('CTYPE3','WAVE'),('CUNIT3','um'),('CRPIX3',(z +1.0)[wavedim//2]),('CRVAL3',(wavecube[wavedim//2],'um')),('CDELT3',(np.diff(wavecube)[0],'um')),\\\n ('SPECSYS','HELIOCEN'),('RESTWAV',wcsslist[0].wcs.restwav),('VHELIO',(hdus[0].header['VHELIO'],'km/s'))]#,\\\n # ('CD1_1',(-pixres/3600.,'deg')), ('CD2_2',(pixres/3600.,'deg')),('CD3_3',(np.diff(wave cube)[0].value,'um')) ]\n for hk in hdrkeys:\n hducube.header[hk[0]]=hk[1] #.set(hk[0],hk[1])\n \n wcscube = WCS(hducube)\n #wcscube.fix()\n wcscube.printwcs()\n wcsccube=wcscube.celestial\n\n #scube=SpectralCube.read(\"temp.fits\").with_spectral_unit('um',velocity_convention='optical')\n cubestack=[]\n fpstack=[]\n #cubedata=np.zeros(shapecube) # to be output cube\n #cubemask=np.zeros(shapecube) # to be number of contributions to each pix\n iim=0\n for h in hdus:\n #w=WCS(h)\n #spec = np.resize(h.data,shape=[,h.data.shape[1],h.data.shape[2]])\n \n #~~ First smooth and rebin the spectrum ~~\n #~~~ \n spdata=h.data.copy() #np.transpose(h.data)\n print(\"Spectrum shape: \",spdata.shape)\n # define original spectral WCS\n spwcss_orig = WCS(h).sub(['spectral'])\n spwavedim=spdata.shape[0]\n zsp=np.arange(spwavedim)\n spwave=spwcss_orig.pixel_to_world_values(zsp+1)\n spwave=spwave*u.Unit(spwcss_orig.world_axis_units[0])\n \n #~~ Calculate statistics of background away from line\n #ibg = np.where( (spwave.to(u.um).value>wavebound[1]) | (spwave.to(u.um).value<wavebound[0]))\n #spbg = np.median(spdata[:,:,ibg].ravel())\n #spbgrms = np.sqrt( np.mean( spdata[:,:,ibg]**2 ) )\n #print(\"Background counts\",spbg)\n #spdata-=spbg\n \n #~~ smooth using 2D gaussian, by the re-bin factor in the wavelength direction and by stddev=1.0pix~0.155 arcsec in the spatial\n spsmooth=convolve(spdata[:,:,0].T,Gaussian2DKernel(x_stddev=wavebinfact/2.3548,y_stddev=1.0),normalize_kernel=True)\n\n #~~ Define spectrum as a Spectrum1D object for easy re-sampling/spectral smoothing\n sp=Spectrum1D(flux=spsmooth*u.dimensionless_unscaled,spectral_axis=spwave,\\\n velocity_convention='optical',rest_value=h.header['restwav']*u.m)\n \n #~~ Smooth, resample in spectral direction\n sampler = SplineInterpolatedResampler() \n# sprebin = sampler(sp, wavecube*u.um)\n splst=[]\n for spi in sp:\n# spsm = gaussian_smooth(spi, stddev=float(wavebinfact)/2.3548)\n sprb = sampler(spi, wavecube*u.um)\n splst.append(sprb.flux.value)\n sprebindat=np.array(splst)\n \n sprebin = Spectrum1D(flux=np.array(splst)*u.dimensionless_unscaled,spectral_axis=wavecube*u.um,\\\n velocity_convention='optical',rest_value=h.header['restwav']*u.m)\n\n sp3data=np.transpose(np.array([sprebin.flux.value.T]*pixwidth),axes=(1,2,0))/np.float(pixwidth)\n # extend along slit width by assuming exp\n \n # print(sp3data.shape)\n #print(sprebin.flux.value.shape,sp3data.shape)\n #sp3data = np.concatenate([[sprebin.flux.value.T]]*pixwidth,axis=2)/float(pixwidth)\n #print(sp3data.shape)\n #break\n \n # print(sprebin.flux.value)\n# plt.figure()\n# plt.plot(sprebin.wavelength.value,sprebin.flux.value.median(0))\n# plt.show()\n\n #~~ Now grow spectrum along slit width so that its extent is > 1 pixel (~slit width). Assume constant slit profile.\n #sp3d=np.concatenate([[sprebin.flux.value.T]]*3,axis=0).T \n #sp3d/=float(pixwidth) # divide by pixel width, to conserve total flux (is this true?)\n #print(sp3d.shape)\n \n #~~ Get RA,DEC of all pixels in a given wavelength slice (same mapping across wavelength)\n spwcsc = WCS(h).celestial\n # adjust wcs to reflect new size of dimension along the slit width\n #spwcsc.wcs.ndim=pixwidth,\n spwcsc.wcs.crpix=float(pixwidth)/2.0 + 0.5,spwcsc.wcs.crpix[1]\n spwcsc.printwcs()\n spchdr=spwcsc.to_header()\n spchdr['naxis1']=sp3data.shape[2]\n spchdr['naxis2']=sp3data.shape[1]\n spchdr['naxis3']=sp3data.shape[0]\n sp3wcsc=WCS(spchdr).celestial\n \n# slitcoord=SkyCoord(ra=spwcsc.wcs.crval[0]*u.deg,dec=spwcsc.wcs.crval[1]*u.deg,\\\n# frame='icrs',unit=(u.deg,u.deg))\n# slitpa=np.arctan( spwcsc.wcs.pc[0,1]/spwcsc.wcs.pc[1,1] )\n# pscale=proj_plane_pixel_scales(spwcsc)*3600.\n #print(pscale)\n# sp3wcs=WCS(fits.PrimaryHDU(sp3data))\n# sp3wcs.wcs.equinox=2000.0\n# sp3wcs.wcs.radesys='ICRS'\n# sp3wcss=wcscube.spectral(['spectral'])\n# sp3wcs.wcs.ctype='RA---TAN','DEC--TAN',sp3wcss.wcs.ctype[0]\n# sp3wcs.wcs.cunit='deg','deg',sp3wcss.wcs.cunit[0]\n# spwcs3.wcs.crval=slitcoord.icrs.ra.degree,slitcoord.icrs.dec.degree,sp3wcss.wcs.crval[0]\n# spwcs3.wcs.crpix=pixcoord[0],pixcoord[1],wcss.wcs.crpix[0]\n# cdelt=pixscale/3600.\n# wcs3.wcs.cd=[[spwcsc.wcs., cdelt*np.sin( slitpa*np.pi/180.), 0. ],\n# [cdelt*np.sin( slitpa*np.pi/180.), cdelt*np.cos( slitpa*np.pi/180.), 0. ],\n# [0., 0., scdelt ]]\n# sp3hdu = sregister(fits.PrimaryHDU(sp3data,header=h.header),\\\n# # slitcoord,slitpa,spwcsc.wcs.crpix,pscale[0],velosys=0.,outname=None)\n# # sp3wcs=WCS(sp3hdu)\n# # sp3wcs.printwcs()\n \n \n# spreproj = reproject_interp((sp3data, spwcsc.to_header()), shape_out=shapecube,\\\n# order='bicubic',\\\n# return_footprint=False)\n# independent_celestial_slices=True)\n \n# print(spreproj.shape)\n# cubestack.append(spreproj)\n \n# xsp=np.arange(spdata.shape[2])\n# ysp=np.arange(spdata.shape[1])\n# zsp=np.arange(spdata.shape[0]) # redefine z grid to reflect rebinning\n# ixsp,iysp = np.meshgrid(xsp,ysp)\n# xxcube,yycube = astropy.wcs.utils.pixel_to_pixel(spwcsc,wcsccube,ixsp,iysp,0)\n# ixcube=np.round(xxcube).astype(np.int)\n# iycube=np.round(yycube).astype(np.int)\n \n # loop through wave slices, set the data for this image cube, and zero-valued pixels to nan\n cubedata_i = np.zeros(shapecube)\n# fp_i = np.zeros(shapecube)\n for iz in range(cubedata_i.shape[0]):\n spslice=sp3data[iz,:,:]\n # cubedata_i[iz,:,:] = reproject_interp((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bicubic',return_footprint=False)\n # cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bilinear',return_footprint=False)\n cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n shape_out=cubedata_i.shape[1:],order='bilinear',return_footprint=False)\n \n # cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bilinear',return_footprint=False)\n \n #cubedata_i[iz,:,:][iycube,ixcube] = sp3data[iz,:,:][iysp,ixsp]\n cubedata_i[np.where(cubedata_i==0.)]=np.nan\n cubestack.append(cubedata_i)\n# footprint.append\n \n cubestack=np.array(cubestack)\n cubestackmask=np.isnan(cubestack)\n cubestackma=np.ma.masked_array(cubestack,mask=cubestackmask)\n cubedata=np.nanmean(cubestack,axis=0)\n cubedata[np.where(cubedata==0.)]=np.nan\n # cubedata=np.ma.mean(cubestackma,axis=0)\n #cubedata.data[np.where(cubedata.data==0.)]=np.nan\n # cubedata=np.nanmean(cubestack,axis=0)\n #print(cubedata.shape)\n fits.writeto(outname[:-5]+\"_exact.fits\",cubedata,header=hducube.header,overwrite=True)\n print(\"REPLACING NANS\")\n spatkern=Gaussian2DKernel(x_stddev=1.0,x_size=11,y_size=11)\n #spatkern=Gaussian2DKernel(x_stddev=1.0)\n #kwargs={'kind':'slinear'}\n cubeinterp=np.zeros(cubedata.shape)\n for k in range(cubedata.shape[0]):\n if (k+1)%5==0:\n print('Coadding spectral slice %i/%i'%(k+1,cubeinterp.shape[0]))\n cubeinterp[k,:,:] = interpolate_replace_nans(cubedata[k,:,:],spatkern,\\\n boundary='extend') \n# cubeinterp[k,:,:] = convolve(cubedata[k,:,:],spatkern, nan_treatment='fill', boundary=None,\\\n# fill_value=np.nan, normalize_kernel=True)\n# print(cubeinterp.shape)\n fits.writeto(outname,data=cubeinterp,header=hducube.header,overwrite=True)\n\n \n return fits.PrimaryHDU(cubeinterp,header=wcscube.to_header())", "def verticalBandPass(data, z, m_min, m_max, return_sig=True):\n\n # invert wavelengths since fft returns on frequency grid (1/lamda)\n m1 = 1/m_min\n m2 = 1/m_max\n\n\n # get spectra of each vertical cast\n dz = np.nanmean(np.gradient(np.squeeze(z)))\n spectra, mx, kx = SpectrumGenerator_vertical(data, dz, data.shape[0])\n\n # Normalize Power\n power = np.abs(scipy.fftpack.fftshift(spectra, axes=0))\n power = power/len(mx)\n\n\n # Filter on shifted spectrum\n midpoint = int(len(mx)/2)\n pos_half = mx[1:midpoint+1]\n neg_half = np.flipud(-pos_half)\n mxShift = np.hstack((neg_half, pos_half))\n\n mask1 = np.logical_and(np.abs(mxShift)>=m2, np.abs(mxShift)<=m1)\n bandpass1 = np.logical_not(mask1)\n\n filtShift = scipy.fftpack.fftshift(spectra)\n filtShift[bandpass1,:] = 0\n powerFilt = np.abs(filtShift)\n powerFilt = 2*powerFilt/len(mx)\n\n\n\n # create band bass filters using min and max lamdas\n mask = np.logical_and(mx>=m2, mx<=m1)\n bandpass = np.logical_not(mask)\n\n # Apply filter be turning all non desired values to zero\n\n filtered = spectra[:]\n filtered[bandpass,:] = 0\n\n\n # shift mx grid\n midpoint = int(len(mx)/2)\n pos_half = mx[1:midpoint+1]\n neg_half = np.flipud(-pos_half)\n mxShift = np.hstack((neg_half, pos_half))\n\n\n # retur wavnumber and wavelength grids along with the spectra and filter\n return mx, kx, spectra, bandpass, filtShift, power, mxShift, powerFilt", "def waveletPacket(self, packlevel):\n Energy = []\n Flatness = []\n self.maxWPE = []\n for clipindex in xrange(len(self.cutclip)):\n tempE = []\n tempF = []\n wp= pywt.WaveletPacket(data=self.cutclip[clipindex,:],wavelet='db1',mode='symmetric', maxlevel = packlevel)\n for i in xrange(packlevel+1):\n for index, node in enumerate(wp.get_level(i)):\n d = wp[node.path].data\n E = np.log(np.sqrt(np.sum(d ** 2)))\n F = np.exp(np.mean(np.log(np.abs(d)))) / np.mean(np.abs(d))\n tempE.append(E)\n tempF.append(F)\n maxnumE = float(max(tempE))\n temp = list(np.array(tempE) / maxnumE) # this function will deliminate the effect of the amplitude\n self.maxWPE.append(maxnumE)\n Energy.append(temp)\n Flatness.append(tempF)\n self.maxWPE = np.array(self.maxWPE)\n self.WPE = np.matrix(Energy)\n self.WPE = self.checkmatrix(self.WPE)\n self.WPF = np.matrix(Flatness)\n self.WPF = self.checkmatrix(self.WPF)", "def wavelet_transform_delta(self):\n wavelet_func = self.wavelet.frequency # wavelet as f(w_k, s)\n\n WK, S = jnp.meshgrid(self.w_k, self.scales)\n\n # compute Y_ over all s, w_k and sum over k\n norm = (2 * jnp.pi * S / self.dt) ** .5\n W_d = (1 / self.n) * jnp.sum(norm * wavelet_func(WK, S.T), axis=1)\n # N.B This W_d is 1D (defined only at n=0)\n return W_d", "def UBVRIraw(lambdaScale, flux):\r\n \r\n filters = filterSet()\r\n\r\n numBands = len(filters)\r\n #var numLambdaFilt\r\n\r\n bandFlux = [0.0 for i in range(numBands)]\r\n\r\n\r\n #var deltaLam, newY, product;\r\n\r\n for ib in range(numBands):\r\n\r\n bandFlux[ib] = 0.0 #//initialization\r\n numLambdaFilt = len(filters[ib][0])\r\n #//console.log(\"ib \" + ib + \" numLambdaFilt \" + numLambdaFilt);\r\n #//wavelength loop is over photometric filter data wavelengths\r\n\r\n for il in range(1, numLambdaFilt):\r\n\r\n #//In this case - interpolate model SED onto wavelength grid of given photometric filter data\r\n\r\n deltaLam = filters[ib][0][il] - filters[ib][0][il - 1] #//nm\r\n #//deltaLam = 1.0e-7 * deltaLam; //cm\r\n #//console.log(\"ib: \" + ib + \" il: \" + il + \" filters[ib][0][il] \" + filters[ib][0][il] + \" deltaLam: \" + deltaLam + \" filters[ib][1][il] \" + filters[ib][1][il]);\r\n\r\n #//hand log flux (row 1) to interpolation routine: \r\n newY = ToolBox.interpol(lambdaScale, flux[1], filters[ib][0][il])\r\n #// linearize interpolated flux: - fluxes add *linearly*\r\n newY = math.exp(newY)\r\n\r\n product = filters[ib][1][il] * newY\r\n #if (ib == 2):\r\n # //console.log(\"Photometry: il: \" + il + \" newY: \" + newY + \" filterLamb: \" + filters[ib][0][il] + \" filterTrans: \" + filters[ib][1][il] + \" product \" + product);\r\n \r\n #//System.out.println(\"Photometry: filtertrans: \" + filters[ib][1][il] + \" product: \" + product + \" deltaLam: \" + deltaLam);\r\n #//Rectangular picket integration\r\n bandFlux[ib] = bandFlux[ib] + (product * deltaLam)\r\n #//console.log(\"Photometry: ib: \" + ib + \" deltaLam \" + deltaLam + \" bandFlux: \" + bandFlux[ib]);\r\n\r\n #} //il loop - lambdas\r\n #//console.log(\"Photometry: ib: \" + ib + \" bandFlux: \" + bandFlux[ib], \" product \" + product + \" deltaLam \" + deltaLam);\r\n\r\n #} //ib loop - bands\r\n\r\n #var raw;\r\n\r\n return bandFlux", "def convert_to_spec1d(wl_angstrom,fl_counts,er_counts,resample=True,resample_kind='FluxConservingSpectRes',\n resample_fill_value=np.nan,resample_upsample_factor=1.):\n # Filter the NaNs\n mask = np.ma.masked_invalid(fl_counts)\n wl_angstrom_in = wl_angstrom[~mask.mask] \n fl_counts_in = fl_counts[~mask.mask]\n er_counts_in = er_counts[~mask.mask] # should this not have units attached?\n\n # Resample if needed\n if resample and (resample_kind is not 'FluxConservingSpectRes'):\n wl_angstrom, fl_counts = resample_to_median_sampling(wl_angstrom_in,fl_counts_in,kind=resample_kind,\n fill_value=resample_fill_value,\n upsample_factor=resample_upsample_factor)\n _, er_counts = resample_to_median_sampling(wl_angstrom_in,er_counts_in,kind=resample_kind,\n fill_value=resample_fill_value,\n upsample_factor=resample_upsample_factor)\n elif resample and (resample_kind is 'FluxConservingSpectRes'):\n wl_angstrom, fl_counts, er_counts = resample_to_median_sampling(wl_angstrom_in,fl_counts_in,e=er_counts_in,kind=resample_kind,\n fill_value=resample_fill_value,\n upsample_factor=resample_upsample_factor)\n else:\n wl_angstrom = wl_angstrom_in\n fl_counts = fl_counts_in\n er_counts = er_counts_in\n\n # Create a spec1d object\n out = specutils.Spectrum1D(spectral_axis=wl_angstrom*u.AA,\n flux=fl_counts*u.count,\n uncertainty=StdDevUncertainty(er_counts))\n return(out)", "def rem_blaze(norm_spec, wave_sol, wav_ref, sun_spec, s_n):\n\n flat_list = []\n pix_to_wav = wave_sol[1]-wave_sol[0]\n wav_min = min(wave_sol)\n wav_max = max(wave_sol)\n sun_min = np.argmin(abs(wav_ref - wav_min))\n sun_max = np.argmin(abs(wav_ref - wav_max))\n sun_ref = sun_spec[sun_min-100:sun_max+100]\n\n tck = interpolate.splrep(wav_ref[sun_min-100:sun_max+100], sun_ref)\n for l in range(len(norm_spec)):\n new_sun = interpolate.splev(wave_sol, tck)\n correlation = signal.correlate(new_sun, norm_spec[l], mode = 'full')\n # plt.plot(correlation)\n # plt.show()\n xcorr = np.arange(-len(norm_spec[l])+1, len(norm_spec[l]))\n x = np.argmax(correlation)\n n = 10\n centroid = np.sum(xcorr[x-n:x+n+1]*correlation[x-n:x+n+1])/np.sum(correlation[x-n:x+n+1])\n if abs(centroid) > 1:\n shift = int(centroid)\n new_sun = interpolate.splev(wave_sol+pix_to_wav*shift, tck) #HERE\n else:\n shift = 0\n if l == np.argmax(s_n) and s_n[l] > 20:\n true_wav = wave_sol+pix_to_wav*shift #HERE\n\n curve = norm_spec[l]/(new_sun)\n\n n = 3\n for i in range(n, len(curve)-n):\n curve[i] = np.mean(curve[i-n:i+n])\n\n xval = np.linspace(sun_min, sun_max, len(curve))\n coef = np.polyfit(xval, curve, 5)\n curve_fit = np.poly1d(coef)\n fit = curve_fit(xval)\n flat_list.append(norm_spec[l]/fit)\n # plt.plot((wave_sol - pix_to_wav*centroid), norm_spec[l]/fit)\n # plt.show()\n\n try:\n true_wav\n except:\n true_wav = wave_sol\n\n return flat_list, true_wav", "def coef_shrinkage_1D(cube, \n baseline,\n channel,\n polarization,\n wavelet,\n n,\n threshold,\n tfix,\n ttype):\n slice = cube[baseline,channel,:,polarization]\n \n # Decomposition\n coefs = pywt.wavedec(slice, wavelet,level =n)\n\n # Theshold\n if threshold == 'fixed':\n denoised = coefs\n for i,coef in enumerate(coefs):\n denoised[i] = pywt.threshold(coef,tfix,ttype)\n else:\n logger.warning('No other wavelet thresholds have been impleted yet')\n return\n\n # Resynthesis\n return pywt.waverec(denoised,wavelet)[:slice.shape[0]]", "def compute_tke_spectrum_1d(u, lx, ly, lz, smooth):\n nx = len(u[:, 0, 0])\n ny = len(u[0, :, 0])\n nz = len(u[0, 0, :])\n\n nt = nx * ny * nz\n n = max(nx, ny, nz) # int(np.round(np.power(nt,1.0/3.0)))\n\n uh = fftn(u) / nt\n\n # tkeh = zeros((nx, ny, nz))\n tkeh = 0.5 * (uh * conj(uh)).real\n\n length = max(lx, ly, lz)\n\n knorm = 2.0 * pi / length\n\n kxmax = nx / 2\n kymax = ny / 2\n kzmax = nz / 2\n\n wave_numbers = knorm * arange(0, n)\n tke_spectrum = zeros(len(wave_numbers))\n\n for kx in range(nx):\n rkx = kx\n if kx > kxmax:\n rkx = rkx - nx\n for ky in range(ny):\n rky = ky\n if ky > kymax:\n rky = rky - ny\n for kz in range(nz):\n rkz = kz\n if kz > kzmax:\n rkz = rkz - nz\n rk = sqrt(rkx * rkx + rky * rky + rkz * rkz)\n k = int(np.round(rk))\n #print('k = ', k)\n tke_spectrum[k] = tke_spectrum[k] + tkeh[kx, ky, kz]\n\n tke_spectrum = tke_spectrum / knorm\n\n if smooth:\n tkespecsmooth = movingaverage(tke_spectrum, 5) # smooth the spectrum\n tkespecsmooth[0:4] = tke_spectrum[0:4] # get the first 4 values from the original data\n tke_spectrum = tkespecsmooth\n\n knyquist = knorm * min(nx, ny, nz) / 2\n\n return knyquist, wave_numbers, tke_spectrum" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wavelet simple reconstruction function of a 1D signal
def simpleWaveRec(C): ld = np.array([1, 1]) hd = np.array([-1, 1]) lr = ld/2 hr = -hd/2 A = C[-1] for scale in reversed(C[:-1]): A = waveSingleRec(A, scale, lr, hr) return A
[ "def cwt(\n x ,\n# frequencies = np.exp(np.arange( -5.5 , 0.0 , 0.01 )) ,\n frequencies = np.exp(np.arange( -2.5 , 0.0 , 0.01 )) ,\n wavelet = cauchy,\n Q = 10.\n):\n\n\n N_x = len(x)\n N_pad = closest_anti_prime( N_x + 120 ) - N_x\n N = N_x + N_pad # data length including padding\n\n X = np.fft.fft( np.concatenate(( x , np.zeros(N_pad) )) )\t# fft of padded input data\n w = np.arange( 0 , N/2 ) * 2./N \n # TODO check if frequency scaling is correct ( either Nyquist or zero included or both ? )\n\n WT = [] \t# the resulting transform\n\n\n for f in frequencies:\n a = 1.0 / f\n WT.append( np.fft.ifft( np.concatenate((X[:N/2] * wavelet(a*w,Q) , np.zeros(N/2))) )[:N_x] ) # <-- this makes real w'lets progressive, FIXME\n\n return [ np.array(WT) , frequencies ]\n # TODO make this a class behaving like the actual transform with freq and wlet as memebers", "def wiener(x):\n x_array = tensor_to_array(x)\n x_filtered = signal.wiener(x_array)\n \n return array_to_tensor(x_filtered)", "def compress_spectral_1D(x, y_len):\n assert y_len >= 3\n xfft = fft(x, norm=\"ortho\")\n if y_len % 2 == 1:\n n = (y_len - 1) // 2\n # TODO", "def make_wave_from_array(qupulse_template, name='pulse'):\n return {'name': name, 'wave': qupulse_template,\n 'type': DataTypes.RAW_DATA}", "def getComplexNMF1DTemplates(S, W, H, p = 2, audioParams = None):\n K = W.shape[2]\n #Step 1: Compute the masked matrices raised to the power p\n AsSum = np.zeros(S.shape)\n As = []\n for k in range(K):\n Hk = np.array(H)\n Hk[0:k, :] = 0\n Hk[k+1::, :] = 0\n As.append(multiplyConv1D(W, Hk)**p)\n AsSum += As[-1]\n #Step 2: Average masked portions of the spectrogram to come up with\n #complex-valued templates\n Ss = []\n Ratios = []\n AllPow = np.abs(np.sum(S*np.conj(S), 0))\n AllPow[AllPow == 0] = 1\n for k in range(K):\n Ss.append(S*As[k]/AsSum)\n Pow = np.abs(np.sum(Ss[k]*np.conj(Ss[k]), 0))\n Ratios.append(Pow/AllPow)\n #Step 4: Save components if user requested\n if audioParams:\n from SpectrogramTools import iSTFT\n [winSize, hopSize] = [audioParams['winSize'], audioParams['hopSize']]\n [Fs, fileprefix] = [audioParams['Fs'], audioParams['fileprefix']]\n import matplotlib.pyplot as plt\n from scipy.io import wavfile\n X = np.array([])\n for k in range(K):\n thisS = np.array(Ss[k])\n thisS[:, Ratios[k] < 0.05] = 0\n Xk = iSTFT(thisS, winSize, hopSize)\n if k == 0:\n X = Xk\n else:\n X += Xk\n wavfile.write(\"%s_%i.wav\"%(fileprefix, k), Fs, Xk)\n plt.clf()\n plt.plot(Ratios[k])\n plt.title(\"Ratio, %.3g Above 0.05\"%(np.sum(Ratios[k] > 0.05)/float(Ratios[k].size)))\n plt.savefig(\"%s_%iPower.svg\"%(fileprefix, k), bbox_inches = 'tight')\n wavfile.write(\"%sNMF.wav\"%fileprefix, Fs, X)\n return (Ss, Ratios)", "def filter_bank_1D(wname):\n\t# returns analysis and synthesis filters concat-ed\n\tfb = torch.tensor(pywt.Wavelet(wname).filter_bank).float()\n\twa, ws = fb[:2,:], fb[2:,:]\n\treturn wa, ws", "def dwt1d(signal, wavelet=\"haar\", levels=1, npdtype=np.float32):\n # Prepare signal for tf. Turn into 32bit floats for GPU computation, and\n # expand dims to make it into a 3d tensor so tf.nn.conv1d is happy\n signal = signal.astype(npdtype)\n signal = np.expand_dims(signal, 0)\n signal = np.expand_dims(signal, -1)\n\n # Construct and compute TF graph\n return _construct_and_compute_graph(\n signal,\n tfw.nodes.dwt1d,\n _parse_wavelet(wavelet),\n levels\n )", "def reconstruct_waveform(hp, mel, n_iter=32):\n #denormalized = denormalize(mel)\n #amp_mel = db_to_amp(denormalized)\n # transpose\n mel = mel.T\n\n # de-noramlize\n mel = (np.clip(mel, 0, 1) * hp.max_db) - hp.max_db + hp.ref_db\n\n # to amplitude\n mel = np.power(10.0, mel * 0.05)\n\n S = librosa.feature.inverse.mel_to_stft(\n mel, power=1, sr=hp.sr,\n n_fft=hp.n_fft)#, fmin=hp.fmin)\n wav = librosa.core.griffinlim(\n S, n_iter=n_iter,\n hop_length=hp.hop_length, win_length=hp.win_length)\n\n # de-preemphasis\n wav = signal.lfilter([1], [1, -hp.preemphasis], wav)\n return wav", "def simpleWaveDec(signal, nb_scales):\n # Haar Wavelets filters for decomposition and reconstruction\n ld = [1, 1]\n hd = [-1, 1]\n\n # transformation\n C = []\n A = signal # approximation\n for i in range(nb_scales):\n A, D = waveSingleDec(A, ld, hd)\n # get the coefficients\n C.append(D)\n\n C.append(A)\n return C", "def test_initialize_new_wavefunction():\n nele = 3\n m_s = -1\n norb = 4\n wfn = fqe.get_wavefunction(nele, m_s, norb)\n assert isinstance(wfn, wavefunction.Wavefunction)", "def syntheticSeismogram(\n d, rho, v, wavf, wavA=1.0, usingT=True, wavtyp=\"RICKER\", dt=0.0001, dmax=200\n):\n\n v, rho, d = (\n np.array(v, dtype=float),\n np.array(rho, dtype=float),\n np.array(d, dtype=float),\n )\n usingT = np.array(usingT, dtype=bool)\n\n _, t = getTimeDepth(d, v, dmax)\n rseries, R = getReflectivity(d, rho, v)\n\n # time for reflectivity series\n tref = t[1:-1]\n\n # create time vector\n t = np.arange(t.min(), t.max(), dt)\n\n # make wavelet\n twav = np.arange(-2.0 / np.min(wavf), 2.0 / np.min(wavf), dt)\n\n # Get source wavelet\n wav = {\"RICKER\": getRicker, \"ORMSBY\": getOrmsby, \"KLAUDER\": getKlauder}[wavtyp](\n wavf, twav\n )\n wav = wavA * wav\n\n rseriesconv = np.zeros(len(t))\n for i in range(len(tref)):\n index = np.abs(t - tref[i]).argmin()\n rseriesconv[index] = rseries[i]\n\n # Do the convolution\n seis = np.convolve(wav, rseriesconv)\n tseis = np.min(twav) + dt * np.arange(len(seis))\n index = np.logical_and(tseis >= 0, tseis <= np.max(t))\n tseis = tseis[index]\n seis = seis[index]\n\n return tseis, seis, twav, wav, tref, rseries", "def m1prior(self,width,x,components):\n\t\tpbspl=(self.kcordict['default']['Bpbspl']*(self.wave[1]-self.wave[0])*self.fluxfactor['default']['B'])[np.newaxis,:]\n\t\tint1d = interp1d(self.phase,components[1],axis=0,assume_sorted=True)\n\t\tm1flux = np.sum(pbspl*int1d([0]), axis=1)\n\t\tbStdFlux=(10**((self.m0guess-27.5)/-2.5) )\n\t\tresidual = (m1flux) / (width*bStdFlux)\n\t\t#This derivative is constant, and never needs to be recalculated, so I store it in a hidden attribute\n\t\ttry:\n\t\t\tfluxDeriv= self.__m1priorfluxderiv__.copy()\n\t\texcept:\n\t\t\tfluxDeriv= np.zeros((pbspl.shape[0],self.npar))\n\t\t\tfor i in range(self.im1.size):\n\t\t\t\twaverange=self.waveknotloc[[i%(self.waveknotloc.size-self.bsorder-1),i%(self.waveknotloc.size-self.bsorder-1)+self.bsorder+1]]\n\t\t\t\tphaserange=self.phaseknotloc[[i//(self.waveknotloc.size-self.bsorder-1),i//(self.waveknotloc.size-self.bsorder-1)+self.bsorder+1]]\n\t\t\t\t#Check if this filter is inside values affected by changes in knot i\n\t\t\t\tminlam=min([np.min(self.kcordict['default'][filt+'wave'][self.kcordict['default'][filt+'tp'] > 0.01]) for filt in ['B']]) \n\t\t\t\tmaxlam=max([np.max(self.kcordict['default'][filt+'wave'][self.kcordict['default'][filt+'tp'] > 0.01]) for filt in ['B']]) \n\t\t\t\tif waverange[0] > maxlam or waverange[1] < minlam:\n\t\t\t\t\tpass\n\t\t\t\tif (0>=phaserange[0] ) & (0<=phaserange[1]):\n\t\t\t\t\t#Bisplev with only this knot set to one, all others zero, modulated by passband and color law, multiplied by flux factor, scale factor, dwave, redshift, and x0\n\t\t\t\t\t#Integrate only over wavelengths within the relevant range\n\t\t\t\t\tinbounds=(self.wave>waverange[0]) & (self.wave<waverange[1])\n\t\t\t\t\tderivInterp = interp1d(self.phase,self.regularizationDerivs[0][:,inbounds,i],axis=0,kind=self.interpMethod,bounds_error=False,fill_value=\"extrapolate\",assume_sorted=True)\n\t\t\t\t\tfluxDeriv[:,self.im1[i]] = np.sum( pbspl[:,inbounds]* derivInterp([0]),axis=1) \n\t\t\tself.__m1priorfluxderiv__=fluxDeriv.copy()\n\t\t\n\t\tjacobian=fluxDeriv/ (bStdFlux* width)\n\t\t\n\t\treturn residual,m1flux/bStdFlux,jacobian", "def UBVRIraw(lambdaScale, flux):\r\n \r\n filters = filterSet()\r\n\r\n numBands = len(filters)\r\n #var numLambdaFilt\r\n\r\n bandFlux = [0.0 for i in range(numBands)]\r\n\r\n\r\n #var deltaLam, newY, product;\r\n\r\n for ib in range(numBands):\r\n\r\n bandFlux[ib] = 0.0 #//initialization\r\n numLambdaFilt = len(filters[ib][0])\r\n #//console.log(\"ib \" + ib + \" numLambdaFilt \" + numLambdaFilt);\r\n #//wavelength loop is over photometric filter data wavelengths\r\n\r\n for il in range(1, numLambdaFilt):\r\n\r\n #//In this case - interpolate model SED onto wavelength grid of given photometric filter data\r\n\r\n deltaLam = filters[ib][0][il] - filters[ib][0][il - 1] #//nm\r\n #//deltaLam = 1.0e-7 * deltaLam; //cm\r\n #//console.log(\"ib: \" + ib + \" il: \" + il + \" filters[ib][0][il] \" + filters[ib][0][il] + \" deltaLam: \" + deltaLam + \" filters[ib][1][il] \" + filters[ib][1][il]);\r\n\r\n #//hand log flux (row 1) to interpolation routine: \r\n newY = ToolBox.interpol(lambdaScale, flux[1], filters[ib][0][il])\r\n #// linearize interpolated flux: - fluxes add *linearly*\r\n newY = math.exp(newY)\r\n\r\n product = filters[ib][1][il] * newY\r\n #if (ib == 2):\r\n # //console.log(\"Photometry: il: \" + il + \" newY: \" + newY + \" filterLamb: \" + filters[ib][0][il] + \" filterTrans: \" + filters[ib][1][il] + \" product \" + product);\r\n \r\n #//System.out.println(\"Photometry: filtertrans: \" + filters[ib][1][il] + \" product: \" + product + \" deltaLam: \" + deltaLam);\r\n #//Rectangular picket integration\r\n bandFlux[ib] = bandFlux[ib] + (product * deltaLam)\r\n #//console.log(\"Photometry: ib: \" + ib + \" deltaLam \" + deltaLam + \" bandFlux: \" + bandFlux[ib]);\r\n\r\n #} //il loop - lambdas\r\n #//console.log(\"Photometry: ib: \" + ib + \" bandFlux: \" + bandFlux[ib], \" product \" + product + \" deltaLam \" + deltaLam);\r\n\r\n #} //ib loop - bands\r\n\r\n #var raw;\r\n\r\n return bandFlux", "def debugInitializeWeights(fan_out, fan_in):\n W = np.zeros( (fan_out, 1 + fan_in) )\n W = np.sin( range(1, W.size+1) ).reshape(W.shape)/10\n return W", "def get_waveform(filepath: str, resample_rate: int = None) -> Tuple[torch.Tensor, int]:\n effects = [[\"remix\", \"1\"]]\n if resample_rate:\n effects.extend(\n [\n [\"lowpass\", f\"{resample_rate // 2}\"],\n [\"rate\", f\"{resample_rate}\"],\n ]\n )\n return torchaudio.sox_effects.apply_effects_file(filepath, effects=effects)", "def simpleImageRec(C):\n # The Haar wavelet is used\n ld = np.array([1, 1])\n hd = np.array([-1, 1])\n lr = ld/2\n hr = -hd/2\n\n A = C[-1]\n for scale in reversed(C[:-1]):\n A = recWave2D(A, scale[0], scale[1], scale[2], lr, hr)\n\n return A", "def restframe_flux(x, z):\n return x * (1.0 + z)", "def __init__(self, spectrum):\n self.spectrum = spectrum\n self.scales = wave.autoscales(N=spectrum.shape[0], dt=1, dj=0.25, wf='dog', p=2)\n self.freq0 = 0\n self.wSize = 5 if len(self.scales) > 5 else len(self.scales) - 1\n self._transformation = wave.cwt(spectrum, dt=1, scales=self.scales, wf='dog', p=2)\n self._rec = None", "def scalogram(filename, savename):\n\n #signal reading\n (rate,signal) = wav.read(filename)\n\n #ignore other bands for primary treatment\n if signal.shape[1] > 1:\n signal = signal[:,0]\n\n #clip the signal\n max_energy = max(energy)\n start_frame = 0\n for k in range(len(energy)):\n if energy[k] >= max_energy*0.01:\n start_frame = k\n break\n\n end_frame = start_frame\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.001:\n end_frame = k\n break\n\n if(end_frame == start_frame):\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.01:\n end_frame = k\n break\n\n samples_per_frame = rate * 0.01\n signal = signal[start_frame*samples_per_frame:end_frame*samples_per_frame]\n\n\n wavelet=DOG4\n maxscale=10\n notes=100\n scaling='log'#\"log\" #or \"linear\"\n plotpower2d=True\n\n Ns=1024\n #limits of analysis\n Nlo=0 \n Nhi=Ns\n\n # Wavelet transform\n cw=wavelet(signal,maxscale,notes,scaling=scaling)\n scales=cw.getscales() \n cwt=cw.getdata()\n # power spectrum\n pwr=cw.getpower()\n scalespec=np.sum(pwr,axis=1)/scales # calculate scale spectrum\n # scales\n y=cw.fourierwl*scales\n x=np.arange(Nlo*1.0,Nhi*1.0,1.0)\n \n #mpl.tight_layout()\n mpl.axis('off')\n fig=mpl.figure(1)\n\n # 2-d coefficient plot\n plotcwt=np.clip(np.fabs(cwt.real), 0., 1000.)\n if plotpower2d: plotcwt=pwr\n im=mpl.imshow(plotcwt,cmap=mpl.cm.jet,extent=[x[0],x[-1],y[-1],y[0]],aspect='auto')\n mpl.ylim(y[0],y[-1])\n theposition=mpl.gca().get_position()\n\n mpl.tight_layout()\n mpl.savefig(savename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
% wavelet decomposition of a 2D image into four new images. % The image is supposed to be square, the size of it is a power of 2 in the % x and y dimensions.
def decWave2D(image, ld, hd): # Decomposition on rows sx, sy = image.shape LrA = np.zeros((sx, int(sy/2))) HrA = np.zeros((sx, int(sy/2))) for i in range(sx): A, D = waveSingleDec(image[i, :], ld, hd) LrA[i, :] = A HrA[i, :] = D # Decomposition on cols LcLrA = np.zeros((int(sx/2), int(sy/2))) HcLrA = np.zeros((int(sx/2), int(sy/2))) LcHrA = np.zeros((int(sx/2), int(sy/2))) HcHrA = np.zeros((int(sx/2), int(sy/2))) for j in range(int(sy/2)): A, D = waveSingleDec(LrA[:, j], ld, hd) LcLrA[:, j] = A HcLrA[:, j] = D A, D = waveSingleDec(HrA[:, j], ld, hd) LcHrA[:, j] = A HcHrA[:, j] = D return LcLrA, HcLrA, LcHrA, HcHrA
[ "def multiband_starlet_transform(image, scales=None, generation=2, convolve2D=None):\n assert len(image.shape) == 3, f\"Image should be 3D (bands, height, width), got shape {len(image.shape)}\"\n assert generation in (1, 2), f\"generation should be 1 or 2, got {generation}\"\n scales = get_scales(image.shape, scales)\n\n wavelets = np.empty((scales+1,)+image.shape, dtype=image.dtype)\n for b, image in enumerate(image):\n wavelets[:, b] = starlet_transform(image, scales=scales, generation=generation, convolve2D=convolve2D)\n return wavelets", "def _plot_wavelet(datas):\n \n # Declare a starlet object (and performs the transform)\n Sw = scarlet.Starlet(datas, lvl=5, direct=True)\n # This is the starlet transform as an array\n w = Sw.coefficients\n # The inverse starlet transform of w (new object otherwise, the tranform is not used)\n iw = Sw.image\n\n # TODO: Clean this code up using plt.subplots()\n # The wavelet transform of the first slice of images in pictures\n lvl = w.shape[1]\n plt.figure(figsize=(lvl*5+5,5))\n plt.suptitle('Wavelet coefficients')\n for i in range(lvl):\n plt.subplot(1, lvl, i+1)\n plt.title('scale' + str(i+1))\n plt.imshow(w[0,i], cmap='inferno')\n plt.colorbar()\n plt.show()\n\n # Making sure we recover the original image\n plt.figure(figsize=(30,10))\n plt.subplot(131)\n plt.title('Original image', fontsize=20)\n plt.imshow(datas[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(132)\n plt.title('Starlet-reconstructed image', fontsize=20)\n plt.imshow(iw[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(133)\n plt.title('Absolute difference', fontsize=20)\n plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')\n plt.colorbar()\n plt.show()\n \n return", "def multiband_starlet_reconstruction(starlets, generation=2, convolve2D=None):\n scales, bands, width, height = starlets.shape\n result = np.array((bands, width, height), dtype=starlets.dtype)\n for band in bands:\n result[:, band] = starlet_reconstruction(\n starlets[:, band],\n generation=generation,\n convolve2D=convolve2D\n )\n return result", "def wavedec(img, wavelet, level=None):\n return pywt.wavedec2(img, wavelet, mode='symmetric', level=level, axes=(-2, -1))", "def starlet_transform(image, scales=None, generation=2, convolve2D=None):\n assert len(image.shape) == 2, f\"Image should be 2D, got {len(image.shape)}\"\n assert generation in (1, 2), f\"generation should be 1 or 2, got {generation}\"\n\n scales = get_scales(image.shape, scales)\n c = image\n if convolve2D is None:\n convolve2D = bspline_convolve\n\n ## wavelet set of coefficients.\n starlet = np.zeros((scales + 1,) + image.shape)\n for j in range(scales):\n gen1 = convolve2D(c, j)\n\n if generation == 2:\n gen2 = convolve2D(gen1, j)\n starlet[j] = c - gen2\n else:\n starlet[j] = c - gen1\n\n c = gen1\n\n starlet[-1] = c\n return starlet", "def gen_powphase2d_old(si, phiF, rF, inner, outer, dx, dy, xW, yW, normfres=True, debug=True):\r\n # specified diffraction and refraction scales\r\n ld = rF / phiF \r\n lr = rF * phiF \r\n\r\n nx = int(xW/dx)\r\n ny = nx\r\n if debug: print 'targeted number of x,y samples = ', nx,ny\n \n #print \"nx\", nx\n #print \"ny\", ny\n \r\n xvec = (arange(0.,nx)-nx/2+1)*dx\r\n yvec = (arange(0.,ny)-ny/2+1)*dy\r\n\r\n dqx = 2.*pi / xW \r\n dqy = 2.*pi / yW\r\n qmaxx = (2.*pi) / (2.*dx)\r\n qmaxy = (2.*pi) / (2.*dy)\r\n\r\n nqx = 2*int(qmaxx/dqx)\r\n nqy = 2*int(qmaxy/dqy)\r\n if debug: print 'targeted number of q samples = ', nqx, nqy \r\n if nqx != nx: \r\n print \"Forcing nqx = nx = \", nx\r\n nqx = nx\r\n if nqy != ny: \r\n print \"Forcing nqy = ny = \", ny\r\n nqy = ny\r\n qxvec = (arange(0.,nqx)-nqx/2+1)*dqx\r\n qxvec = roll(qxvec,nqx/2+1)\r\n qyvec = (arange(0.,nqy)-nqy/2+1)*dqy\r\n qyvec = roll(qyvec,nqy/2+1)\r\n\r\n qin = 2.*pi / inner\r\n qout = 2.*pi / outer\r\n qshape = zeros((nqx, nqy))\r\n \r\n for i, qxi in enumerate(qxvec):\r\n for j, qyj in enumerate(qyvec):\r\n qsq = qxi**2 + qyj**2\r\n qshape[i,j] = (qout**2 + qsq)**(-si/4.) \r\n #qshape[i,j] = (qout**2 + qsq)**(-si/4.) * exp(-(qsq/(2.*qin**2))) \r\n npoints = size(qshape)\r\n\r\n if debug:\r\n print si, inner, outer, dx, npoints\r\n print dqx, dqy, qin, qout\r\n\r\n xformr=randn(nqx, nqy)*qshape\r\n xformi=randn(nqx, nqy)*qshape\r\n xform = xformr + 1j*xformi\r\n spectrum=real(xform*conj(xform))\r\n xseries = real(ifft2(xform))\r\n\r\n if normfres:\r\n frindx = int(rF/dx)\r\n x1dcut = xseries[0,:]\r\n var_fres_in = var(x1dcut[0:size(x1dcut)-frindx]-x1dcut[frindx:])\r\n xseries_norm = xseries * rF / sqrt(var_fres_in) \r\n xn1dcut = xseries_norm[0,:]\r\n var_fres_out = var(xn1dcut[0:size(xn1dcut)-frindx]-xn1dcut[frindx:])\r\n #var_fres_out = var(xseries_norm[0:size(xseries_norm)-frindx]-xseries_norm[frindx:])\r\n print \"index of fresnel scale = \", frindx\r\n print var_fres_in, var_fres_out\r\n\r\n return xvec, yvec, xseries, xseries_norm, qxvec, qyvec, qshape", "def deInterleaveImage(image, N, conserve_flux=False,suppress_warnings=False):\n from .image import Image\n from .position import _PositionD\n from .wcs import JacobianWCS, PixelScale\n if isinstance(N,int):\n n1,n2 = N,N\n else:\n try:\n n1,n2 = N\n except (TypeError, ValueError):\n raise TypeError(\"N must be an integer or a tuple of two integers\") from None\n\n if not isinstance(image, Image):\n raise TypeError(\"image must be an instance of galsim.Image\")\n\n y_size,x_size = image.array.shape\n if x_size%n1 or y_size%n2:\n raise GalSimIncompatibleValuesError(\n \"The value of N is incompatible with the dimensions of the image to be deinterleaved\",\n N=N, image=image)\n\n im_list, offsets = [], []\n for i in range(n1):\n for j in range(n2):\n # The tricky part - going from array indices to Image coordinates (x,y)\n # DX[i'] = -(i+0.5)/n+0.5 = -i/n + 0.5*(n-1)/n\n # i = -n DX[i'] + 0.5*(n-1)\n dx,dy = -(i+0.5)/n1+0.5,-(j+0.5)/n2+0.5\n offset = _PositionD(dx,dy)\n img_arr = image.array[j::n2,i::n1].copy()\n img = Image(img_arr)\n if conserve_flux is True:\n img *= n1*n2\n im_list.append(img)\n offsets.append(offset)\n\n wcs = image.wcs\n if wcs is not None and wcs._isUniform:\n jac = wcs.jacobian()\n for img in im_list:\n img_wcs = JacobianWCS(jac.dudx*n1,jac.dudy*n2,jac.dvdx*n1,jac.dvdy*n2)\n ## Since pixel scale WCS is not equal to its jacobian, checking if img_wcs is a pixel\n ## scale\n img_wcs_decomp = img_wcs.getDecomposition()\n if img_wcs_decomp[1].g==0:\n img.wcs = PixelScale(img_wcs_decomp[0])\n else:\n img.wcs = img_wcs\n ## Preserve the origin so that the interleaved image has the same bounds as the image\n ## that is being deinterleaved.\n img.setOrigin(image.origin)\n\n elif suppress_warnings is False:\n galsim_warn(\"Individual images could not be assigned a WCS automatically.\")\n\n return im_list, offsets", "def splitImage(numRows, numCols, image, piece_dims=(32,32,3)):\n piece_height, piece_width, piece_depth = piece_dims\n # large_width, large_height, large_depth = numRows * piece_height, numCols * piece_width, piece_depth\n # # resized_img = np.array(scipy.misc.imresize(image, (large_width, large_height, large_depth), interp='nearest'))\n # resized_img = np.array(resize(image, (large_width, large_height, large_depth), \n # preserve_range=True, mode='reflect')).astype(dtype=np.uint8)\n resized_img = image\n updated_pieced_dims = (piece_height + JIGGLE_ROOM, piece_width + JIGGLE_ROOM, piece_depth)\n #print(np.shape(image))\n hsplits = np.array(np.split(resized_img, numCols, axis=1))\n vsplits = np.array(np.split(hsplits, numRows, axis=1)) # Not 1 since we introduce one more dim.\n split_images = vsplits.reshape(-1, *updated_pieced_dims)\n #jiggled_imgs = []\n #for image in split_images:\n # x_start = np.random.randint(0, JIGGLE_ROOM, 1)[0]\n # y_start = np.random.randint(0, JIGGLE_ROOM, 1)[0]\n # jiggled_imgs.append(image[x_start:(x_start + piece_height), y_start:(y_start + piece_width) , :])\n #gc.collect()\n return split_images #jiggled_imgs", "def main_image_filter2d():\n img = cv2.imread(IMAGE_GRAY)\n images = [(n, cv2.filter2D(img, -1, np.ones((n,n),np.float32)/(n*n))) for n in [3,10,20,100]]\n show_images(images)", "def expand(image):\n # per the instructions, use 0.4 for the kernel generation\n kernel = generating_kernel(0.4)\n\n # make a new array double the size, assign initial values\n output = np.zeros((image.shape[0] * 2, image.shape[1] * 2))\n output[:output.shape[0]:2, :output.shape[1]:2] = image\n\n # use convolve2d to fill in rest\n # multiply by 4 per instructions to scale back up\n output = scipy.signal.convolve2d(output, kernel, 'same') * 4\n return output", "def make2Dimage(coeffs, res, px=[64, 64], phs=[0., 0.]):\n start_time = time.time()\n\n #start from a regular Cartesian grid\n lrange = np.linspace(-1.*px[0]*res/2., px[0]*res/2., num=px[0], endpoint=True)/(np.pi/2.) #m range (-1,1)\n mrange = np.linspace(-1.*px[1]*res/2., px[1]*res/2., num=px[1], endpoint=True)/(np.pi/2.) #l range (-1,1)\n xx,yy = np.meshgrid(lrange, mrange)\n #xx,yy = np.meshgrid(np.linspace(-1., 1., num=px[0]), np.linspace(-1., 1., num=px[1])) #Full hemisphere, no FoV control\n img = np.zeros(xx.shape, dtype='complex')\n \n #convert to polar positions\n r = np.sqrt(xx**2. + yy**2.)\n phi = np.arctan2(yy, xx)\n\n # zero out undefined regions of the image where r>0\n # overly tedious steps for something that should be much easier to do\n rflat = r.flatten()\n phiflat = phi.flatten()\n maxRcond = r.flatten() > 1\n idx = np.argwhere(maxRcond)\n rflat[idx] = 0.\n phiflat[idx] = 0.\n r = np.reshape(rflat, r.shape)\n phi = np.reshape(phiflat, phi.shape)\n\n #convert to unit sphere coordinates\n thetap = np.arccos(r) - np.pi/2. #north pole is at 0 in spherical coordinates\n phip = np.pi - phi #azimuth range [-pi, pi] -> [2pi, 0]\n\n #Determine the theta, phi coordinates for a hemisphere at the snapshot zenith\n X, Y, Z = util.sph2cart(thetap, phip)\n ra = phs[0]\n raRotation = np.array([[np.cos(ra), -1.*np.sin(ra), 0.],\n [np.sin(ra), np.cos(ra), 0.],\n [ 0., 0., 1.]]) #rotate about the z-axis\n dec = np.pi - phs[1] #adjust relative to the north pole at -pi/2\n print 'dec', dec, 'phs', phs[1]\n # TODO: might need to do a transpose to apply the inverse rotation\n decRotation = np.array([[1.,0.,0.],\n [0., np.cos(dec), -1.*np.sin(dec)],\n [0., np.sin(dec), np.cos(dec)]]) #rotate about the x-axis\n XYZ = np.vstack((X.flatten(), Y.flatten(), Z.flatten()))\n rotMatrix = np.dot(decRotation, raRotation)\n XYZ0 = np.dot(rotMatrix, XYZ) #order of rotation is important\n #XYZ0 = np.dot(np.dot(raRotation, decRotation), XYZ) #order of rotation is important\n r0, phi0, theta0 = util.cart2sph(XYZ0[0,:], XYZ0[1,:], XYZ0[2,:])\n r0 = r0.reshape(thetap.shape) #not used, should all be nearly 1\n phi0 = phi0.reshape(thetap.shape) #rotated phi values\n theta0 = theta0.reshape(thetap.shape) #rotated theta values\n\n lmax = coeffs.shape[0]\n print 'L:',\n for l in np.arange(lmax):\n print l,\n sys.stdout.flush()\n for m in np.arange(-1*l, l+1):\n img += coeffs[l, l+m] * Ylm.Ylm(l, m, phi0, theta0) #TODO: a slow call\n print 'done'\n\n print 'Run time: %f s'%(time.time() - start_time)\n\n return np.ma.array(img, mask=maxRcond)", "def convolve(self, img):", "def generate2DGuassian(image, wsize, sigma):", "def wiener_filter(img: np.ndarray, mask_size: tuple) -> np.ndarray:\n data = np.array(img, dtype=np.float64)\n data = data.ravel()\n res = wiener(data, mask_size[0])\n return res.reshape(img.shape).astype(np.uint8)", "def resample(self, shape_matrix:tuple = (256, 256, 1024), shape_physic=(700, 700, 2000)) -> np.ndarray:\n spacing = self.nifti_img.GetSpacing()\n origin = self.nifti_img.GetOrigin()\n direction = self.nifti_img.GetDirection()\n size = self.nifti_img.GetSize()\n #target spacing, and size\n spacing_x = shape_physic[0]/shape_matrix[0] #mm\n spacing_y = shape_physic[1]/shape_matrix[1] #mm \n spacing_z = shape_physic[2]/shape_matrix[2] #mm\n\n true_x = size[0] * spacing[0] #mm\n true_y = size[1] * spacing[1] #mm \n true_z = size[2] * spacing[2] #mm\n\n new_size_x = int((true_x * shape_matrix[0]) / shape_physic[0]) #pixel\n new_size_y = int((true_y * shape_matrix[1]) / shape_physic[1]) #pixel\n new_size_z = int((true_z * shape_matrix[2]) / shape_physic[2]) #pixel\n\n #applied transformation\n transformation = sitk.ResampleImageFilter()\n transformation.SetOutputDirection(direction)\n transformation.SetOutputOrigin(origin)\n transformation.SetSize((new_size_x, new_size_y, new_size_z))\n transformation.SetOutputSpacing((spacing_x, spacing_y, spacing_z))\n transformation.SetInterpolator(sitk.sitkLinear)\n new_img = transformation.Execute(self.nifti_img) \n result = sitk.GetArrayFromImage(new_img) #[z,y,x]\n center = [int(shape_matrix[2]/2), int(shape_matrix[1]/2), int(shape_matrix[1]/2)]\n z = int(result.shape[0]/2)\n y = int(result.shape[1]/2)\n x = int(result.shape[2]/2)\n sommet_x = center[2] - x \n sommet_y = center[1] - y \n sommet_z = center[0] - z\n new_array = np.zeros((shape_matrix[2], shape_matrix[1], shape_matrix[0]))\n if result.shape[1] != shape_matrix[1] : \n new_array[sommet_z:sommet_z+result.shape[0], sommet_y:sommet_y + result.shape[1], sommet_x:sommet_x + result.shape[2]] = result\n else : \n new_array[sommet_z:sommet_z+result.shape[0],0:shape_matrix[1], 0:shape_matrix[0]] = result\n return new_array", "def im_split_z(data,dim,depth):\r\n\r\n\tn_tot = len(data[0][:,0]) #number of events\r\n\r\n\t#define lattice to insert energies into to creta images\r\n\tx_ref = np.linspace(x_min,x_max,dim)\r\n\ty_ref = np.linspace(y_min,y_max,dim)\r\n\tz_ref = np.linspace(z_min,z_max,depth)\r\n\r\n\th=0 #count events\r\n\tim_array = np.zeros(n_tot,dim,dim,depth)\r\n\r\n\twhile h<n_tot: #loop over events\r\n\r\n\t\tim_tab = np.zeros((dim,dim,depth)) #array to contain constructed image\r\n\r\n\t\t#remove empty hits\r\n\t\tind_nul = np.where(data[3][h,:]==0)[0]\r\n\t\tx_tab = data[0][h,:ind_nul]\r\n\t\ty_tab = data[1][h,:ind_nul]\r\n\t\tz_tab = data[2][h,:ind_nul]\r\n\t\te_tab = data[3][h,:ind_nul]\r\n\r\n\t\tfor i in range(len(e_tab)): #loop over hits within event\r\n\r\n\t\t\tx_coord = np.argmin(np.abs(x_ref-x_tab[i]))\r\n\t\t\ty_coord = np.argmin(np.abs(y_ref-y_tab[i]))\r\n\t\t\tz_coord = np.argmin(np.abs(z_ref-z_tab[i]))\r\n\r\n\t\t\tim_array[h,x_coord,y_coord,z_coord] += e_tab[i]\r\n\r\n\r\n\t\t#show a few plots of actual event vs events converted to picture\r\n\t\t\"\"\"\r\n\t\tif h<5:\r\n\t\t\tdata_actual = [x_tab,y_tab,z_tab,e_tab]\r\n\t\t\tdata_comp = im_array[h,:,:,:]\r\n\t\t\tplot_scatter(data_actual,data_comp,depth)\r\n\t\t\"\"\"\r\n\t\th+=1 #next event\r\n\r\n\t\tif h % 10000 == 0:\r\n\t\t\tprint('{}% completed'.format(h/n_tot*100))\r\n\r\n\treturn im_array", "def morph(img1, img2, img1_pts, img2_pts, triangles, warp_frac, dissolve_frac):\n interpolated = interpolate_pointclouds(img1_pts, img2_pts, warp_frac)\n\n# plt.triplot(interpolated[:,0], interpolated[:,1], triangles)\n# plt.plot(interpolated[:,0], interpolated[:,1], 'o')\n# plt.imshow(img1*0.5 + img2*0.5)\n# plt.axis('equal')\n# plt.show()\n\n triangles_avg = create_triangles(interpolated, triangles)\n triangles_img1 = create_triangles(img1_pts, triangles)\n triangles_img2 = create_triangles(img2_pts, triangles)\n\n transformations_1toavg = calc_transform_all(triangles_img1, triangles_avg)\n transformations_2toavg = calc_transform_all(triangles_img2, triangles_avg)\n\n intermed_img = np.zeros_like(img1)\n last_triangle_index = 0 #save last triangle because the next pixel is likely also in it\n transformation_1toavg = transformations_1toavg[last_triangle_index]\n transformation_2toavg = transformations_2toavg[last_triangle_index]\n\n y_max = intermed_img.shape[0]\n x_max = intermed_img.shape[1]\n\n #create an array with all possible array indices (cartesian product)\n img_indices = np.array(np.meshgrid(np.arange(0, x_max), np.arange(0, y_max))).T.reshape(-1,2)\n #for each triangle, find all containing pixels and fill according to dissolve_frac\n for t in range(len(triangles_avg)):\n triangle = path.Path(triangles_avg[t])\n transformation_1toavg = transformations_1toavg[t]\n transformation_2toavg = transformations_2toavg[t]\n\n bool_contained = np.array(triangle.contains_points(img_indices))\n contained = img_indices[bool_contained]\n for i in contained:\n x, y = (i[0], i[1])\n orig_px_1 = np.zeros((3,))\n orig_px_2 = np.zeros((3,))\n\n if(dissolve_frac != 1): #if we are taking pixels from img1\n orig_px_1 = calc_origin_pixel_color(img1, [x,y], transformation_1toavg)\n if(dissolve_frac != 0): #if we are taking pixels from img2\n orig_px_2 = calc_origin_pixel_color(img2, [x,y], transformation_2toavg)\n #no dissolving (for hybrid images)\n if(dissolve_frac == -1):\n intermed_img[y,x,:] = orig_px_1 + orig_px_2\n else:\n intermed_img[y,x,:] = (1-dissolve_frac) * orig_px_1 + dissolve_frac * orig_px_2\n #uncomment for greenscreen effect (pixels with value 0,255,0 are ignored\n if(np.isclose(np.array(orig_px_1), np.array([0,1,0]), atol=0.15)).all():\n intermed_img[y,x,:] = orig_px_2\n elif(np.isclose(np.array(orig_px_2), np.array([0,1,0]), atol=0.15)).all():\n intermed_img[y,x,:] = orig_px_1\n return intermed_img", "def reconstructAll(img,h,step) :\n \n allPatch=getAllPatch(img,h,step)\n\n while len(allPatch[\"noisyPatch\"])>0 :\n \n patchTarget=list(allPatch[\"noisyPatch\"].items())[0]\n _,newPatch = approximePatch(patchTarget,allPatch[\"goodPatch\"])\n img = newPatchInImage(img,newPatch)\n del allPatch[\"noisyPatch\"][newPatch[0]]\n return img", "def imstep(width, height): \n img = np.zeros( (height, width, 1), dtype=np.float32 )\n img[:, -width//2:] = 1\n return vipy.image.Image(array=img)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
wavelet reconstruction of an image described by the wavelet coefficients C
def simpleImageRec(C): # The Haar wavelet is used ld = np.array([1, 1]) hd = np.array([-1, 1]) lr = ld/2 hr = -hd/2 A = C[-1] for scale in reversed(C[:-1]): A = recWave2D(A, scale[0], scale[1], scale[2], lr, hr) return A
[ "def wavelet_coeffs(data,srate,freqs,nco=2):\n\n # Generate the scaled wavelets for each frequency\n Ws = list()\n for k,f in enumerate(freqs):\n\n sigma_tf = nco/(2*np.pi*f) #f: frequency in Hz\n\n t = np.arange(0,5*sigma_tf,1/srate)\n t = np.r_[-t[::-1],t[1:]]\n\n osc = np.exp(2.*1j*np.pi*f*t)\n gauss = np.exp(-t**2/(2.*sigma_tf**2))\n W_tf = osc*gauss\n W_tf /= sqrt(.5)*linalg.norm(W_tf.ravel())\n Ws.append(W_tf)\n\n # Do the actual transform (e.g., convolve the signal with the wavelets)\n # Caveat: This is currently only the single-job version of cwt !!!\n isVector = False\n if data.ndim==1:\n data = np.array([data,data])\n isVector = True\n\n if data.ndim==3:\n channels = data.shape[2]\n coeffs = list()\n for c in range(channels):\n dummy = tfr.cwt(data[:,:,c],Ws)\n coeffs.append(dummy)\n else:\n coeffs = tfr.cwt(data,Ws)\n\n if isVector:\n coeffs = np.squeeze(coeffs[0,:,:])\n print(\"Removed dummy dimension from coeffs!\")\n\n return coeffs,Ws", "def reconstruct(self):\n self.decompose()\n self.recon_full = np.sum(val * self.zernikel(i, self.grid_rho, self.grid_phi)*self.grid_mask for (i, val) in enumerate(self.coeffs))\n self.res_full = (abs(self.img) - abs(self.recon_full)) * self.grid_mask\n \n # If a threshold is given, truncate the coeffs and create truncated reconstructions\n if self.threshold:\n self.truncate()\n self.recon_trunc = np.sum(val * self.zernikel(i, self.grid_rho, self.grid_phi)*self.grid_mask for (i, val) in enumerate(self.coeffs_trunc))\n self.res_trunc = (abs(self.img) - abs(self.recon_trunc)) * self.grid_mask\n self.diff_full_trunc = (self.recon_full - self.recon_trunc) * self.grid_mask", "def simpleWaveRec(C):\n ld = np.array([1, 1])\n hd = np.array([-1, 1])\n lr = ld/2\n hr = -hd/2\n\n A = C[-1]\n for scale in reversed(C[:-1]):\n A = waveSingleRec(A, scale, lr, hr)\n return A", "def coil_combine_cmrr_sequential(chain):\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n raw = chain.raw\n\n ncoils = raw.shape[1]\n nfids = raw.shape[2]\n dim0 = raw.shape[3]\n acqdim0 = dim0\n xaxis = range(dim0)\n\n flag_norm_to_sum = False # default for now\n\n dat_comb = np.ndarray([nfids,dim0], dtype=np.complex128)\n\n all_weight = np.ndarray([nfids,ncoils], dtype=np.float)\n all_phases = np.ndarray([nfids,ncoils], dtype=np.complex)\n\n for i in range(nfids):\n\n # determine weighting and phz for each coil\n # zero-order phase correction\n # correct for phase based on 1st point in 1st wref fid\n\n # for each average, calc phase and weights to correct for coil geometry\n chans = []\n weight = []\n phases = []\n \n for j in range(ncoils):\n chan = chain.raw[0,j,i,:].copy()\n \n magn = np.abs(chan[0])\n phas = np.conjugate(chan[0])/magn # normalized complex conj to cancel phase \n chan = phas * chan # Note. applying phase here NOT below as in Siemens\n \n # amplitude of zero order phased fid in time domain\n # using 9th order polynomial fit (based on Uzay's script)\n coeffs = np.polyfit(xaxis, np.absolute(chan), 9)\n \n weight.append(coeffs[-1]) # last entry is amplitude - zero order coeff\n phases.append(phas)\n chans.append(chan)\n \n # normalize weighting function based on spectro data \n tmp = np.sum([val*val for val in weight]) # sum squared values \n if tmp == 0.0: tmp = 1.0\n if flag_norm_to_sum:\n # sum of sensitivities\n lamda = np.sum(weight) / tmp \n else:\n # sqrt of sum of squared sensitivities\n lamda = 1.0 / np.sqrt(tmp)\n\n weight = [val*lamda for val in weight]\n\n all_weight[i,:] = weight\n all_phases[i,:] = phases\n \n # apply weighting ... phase corrections done above\n for j,chan in enumerate(chans):\n chans[j] = chan * weight[j]\n \n # sum corrected FIDs from each coil into one combined FID\n dat_comb[i,:] = np.sum(chans, axis=0) \n\n print_combine_stats(all_weight, all_phases, method='CMRR_Sequential')\n \n return normalize_shape(dat_comb), all_weight, all_phases", "def cwt(\n x ,\n# frequencies = np.exp(np.arange( -5.5 , 0.0 , 0.01 )) ,\n frequencies = np.exp(np.arange( -2.5 , 0.0 , 0.01 )) ,\n wavelet = cauchy,\n Q = 10.\n):\n\n\n N_x = len(x)\n N_pad = closest_anti_prime( N_x + 120 ) - N_x\n N = N_x + N_pad # data length including padding\n\n X = np.fft.fft( np.concatenate(( x , np.zeros(N_pad) )) )\t# fft of padded input data\n w = np.arange( 0 , N/2 ) * 2./N \n # TODO check if frequency scaling is correct ( either Nyquist or zero included or both ? )\n\n WT = [] \t# the resulting transform\n\n\n for f in frequencies:\n a = 1.0 / f\n WT.append( np.fft.ifft( np.concatenate((X[:N/2] * wavelet(a*w,Q) , np.zeros(N/2))) )[:N_x] ) # <-- this makes real w'lets progressive, FIXME\n\n return [ np.array(WT) , frequencies ]\n # TODO make this a class behaving like the actual transform with freq and wlet as memebers", "def _plot_wavelet(datas):\n \n # Declare a starlet object (and performs the transform)\n Sw = scarlet.Starlet(datas, lvl=5, direct=True)\n # This is the starlet transform as an array\n w = Sw.coefficients\n # The inverse starlet transform of w (new object otherwise, the tranform is not used)\n iw = Sw.image\n\n # TODO: Clean this code up using plt.subplots()\n # The wavelet transform of the first slice of images in pictures\n lvl = w.shape[1]\n plt.figure(figsize=(lvl*5+5,5))\n plt.suptitle('Wavelet coefficients')\n for i in range(lvl):\n plt.subplot(1, lvl, i+1)\n plt.title('scale' + str(i+1))\n plt.imshow(w[0,i], cmap='inferno')\n plt.colorbar()\n plt.show()\n\n # Making sure we recover the original image\n plt.figure(figsize=(30,10))\n plt.subplot(131)\n plt.title('Original image', fontsize=20)\n plt.imshow(datas[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(132)\n plt.title('Starlet-reconstructed image', fontsize=20)\n plt.imshow(iw[0], cmap='inferno')\n plt.colorbar()\n plt.subplot(133)\n plt.title('Absolute difference', fontsize=20)\n plt.imshow((np.abs(iw[0]-datas[0])), cmap='inferno')\n plt.colorbar()\n plt.show()\n \n return", "def starlet_transform(image, scales=None, generation=2, convolve2D=None):\n assert len(image.shape) == 2, f\"Image should be 2D, got {len(image.shape)}\"\n assert generation in (1, 2), f\"generation should be 1 or 2, got {generation}\"\n\n scales = get_scales(image.shape, scales)\n c = image\n if convolve2D is None:\n convolve2D = bspline_convolve\n\n ## wavelet set of coefficients.\n starlet = np.zeros((scales + 1,) + image.shape)\n for j in range(scales):\n gen1 = convolve2D(c, j)\n\n if generation == 2:\n gen2 = convolve2D(gen1, j)\n starlet[j] = c - gen2\n else:\n starlet[j] = c - gen1\n\n c = gen1\n\n starlet[-1] = c\n return starlet", "def wavedec(img, wavelet, level=None):\n return pywt.wavedec2(img, wavelet, mode='symmetric', level=level, axes=(-2, -1))", "def makeimage(img_lam,dirname='./',incl=0.,PA=0.,npix=512,sizeau=1000.,\\\n img_name=None,dpc=10.0,**kwargs):\n import os,subprocess,shutil\n delete_image = False\n if img_name is None:\n delete_image = True\n img_name = 'temp_image'\n if os.path.exists(dirname+os.sep+img_name):\n os.unlink(dirname+os.sep+img_name)\n \n incl_str = '{:.0f}'.format(round(incl))\n PA_str = '{:.0f}'.format(round(PA-90.))\n npix_str = '{:.0f}'.format(round(npix))\n szau_str = '{:.0f}'.format(round(sizeau))\n wl_str = '{:.4f}'.format(img_lam*1e4)\n dpc_str = '{:.4f}'.format(dpc)\n #\n # create image at that wavelength\n #\n params = [item for kv in kwargs.iteritems() for item in kv]\n subprocess.call(['nice','radmc3d','image','lambda',wl_str,\\\n 'incl',incl_str,'posang',PA_str,'npix',npix_str,\\\n 'sizeau',szau_str,'dpc',dpc_str]+params,cwd=dirname)\n if os.path.exists(dirname+os.sep+'image.fits'):\n os.unlink(dirname+os.sep+'image.fits')\n radmcimage_to_fits(dirname+os.sep+'image.out',\\\n dirname+os.sep+'image.fits',dpc)\n #\n # read in image\n #\n im=readimage(filename=dirname+os.sep+'image.out')\n #\n # delete if necessary\n #\n os.unlink(dirname+os.sep+'image.out')\n if delete_image:\n os.unlink(dirname+os.sep+'image.fits')\n else:\n shutil.move(dirname+os.sep+'image.fits',\\\n dirname+os.sep+img_name+'.fits')\n #\n # return result\n #\n return im", "def scalogram(filename, savename):\n\n #signal reading\n (rate,signal) = wav.read(filename)\n\n #ignore other bands for primary treatment\n if signal.shape[1] > 1:\n signal = signal[:,0]\n\n #clip the signal\n max_energy = max(energy)\n start_frame = 0\n for k in range(len(energy)):\n if energy[k] >= max_energy*0.01:\n start_frame = k\n break\n\n end_frame = start_frame\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.001:\n end_frame = k\n break\n\n if(end_frame == start_frame):\n for k in range(start_frame,len(energy)):\n if energy[k] < max_energy*0.01:\n end_frame = k\n break\n\n samples_per_frame = rate * 0.01\n signal = signal[start_frame*samples_per_frame:end_frame*samples_per_frame]\n\n\n wavelet=DOG4\n maxscale=10\n notes=100\n scaling='log'#\"log\" #or \"linear\"\n plotpower2d=True\n\n Ns=1024\n #limits of analysis\n Nlo=0 \n Nhi=Ns\n\n # Wavelet transform\n cw=wavelet(signal,maxscale,notes,scaling=scaling)\n scales=cw.getscales() \n cwt=cw.getdata()\n # power spectrum\n pwr=cw.getpower()\n scalespec=np.sum(pwr,axis=1)/scales # calculate scale spectrum\n # scales\n y=cw.fourierwl*scales\n x=np.arange(Nlo*1.0,Nhi*1.0,1.0)\n \n #mpl.tight_layout()\n mpl.axis('off')\n fig=mpl.figure(1)\n\n # 2-d coefficient plot\n plotcwt=np.clip(np.fabs(cwt.real), 0., 1000.)\n if plotpower2d: plotcwt=pwr\n im=mpl.imshow(plotcwt,cmap=mpl.cm.jet,extent=[x[0],x[-1],y[-1],y[0]],aspect='auto')\n mpl.ylim(y[0],y[-1])\n theposition=mpl.gca().get_position()\n\n mpl.tight_layout()\n mpl.savefig(savename)", "def mkcube(hdus, refcoord, pixres=0.05, pixwidth=3,\\\n wavebound=(4.045,4.06), wavebinfact=1,\\\n imsize=(500,500), widthsmooth=1.0,\\\n outname='cube.fits'):\n\n #~~~ Construct cube WCS/header\n #~~~ First get wavelength step of all spectra, and calculate wavelength grid of cube ~~~~\n wcsslist = [WCS(hdu).sub(['spectral']) for hdu in hdus]\n # print(wcsslist[0].wcs.pc)\n \n wavedeltspec = [((ws.wcs.pc[0]*u.dimensionless_unscaled)*u.Unit(ws.world_axis_units[0])).to('um').value for ws in wcsslist]\n wavedelt = wavebinfact * np.mean(wavedeltspec) # convert to microns\n print(\"WAVE DELTA FOR CUBE\",wavedelt)\n wavecube = np.arange(wavebound[0], wavebound[1] + wavedelt, wavedelt ) #this is wavelength grid of output cube\n wavedim = np.size(wavecube )\n z = np.arange(wavedim)\n # print(wavecube )\n #print(wave_cube[0])\n \n #~~~ Set reference coordinate (central coords)\n# if (refcoord==None) | (type(refcoord)!=SkyCoord):\n# refcoords = [SkyCoord(ra=h.header['crval2'],dec=h.header['crval3'],unit=(u.deg,u.deg),frame=h.header['radesys']) for h in hdu]\n# raref,decref = (np.mean([ref.icrs.ra.degree for ref in refcoords]), np.mean([ref.icrs.dec.degree for ref in refcoords]))\n# else:\n raref,decref=(refcoord.icrs.ra.degree,refcoord.icrs.dec.degree)\n print(\"Central coordinates of cube: \",raref,decref) \n \n #~~~ Set up cube WCS. Define cube shape and enter WCS info in header.\n shapecube = [wavedim, imsize[1], imsize[0]]\n hducube= fits.PrimaryHDU( data=np.zeros(shapecube) )\n \n # create cube celestial and spectral axes\n hdrkeys=[('RADESYS','ICRS'),('EQUINOX',2000.0),\\\n ('CTYPE1','RA---TAN'),('CUNIT1','deg'),('CRPIX1',shapecube[2]/2.),('CRVAL1',(raref,'deg')),('CDELT1',(-pixres/3600.,'deg')),\\\n ('CTYPE2','DEC--TAN'),('CUNIT2','deg'),('CRPIX2',shapecube[1]/2.),('CRVAL2',(decref,'deg')),('CDELT2',(pixres/3600.,'deg')),\\\n ('CTYPE3','WAVE'),('CUNIT3','um'),('CRPIX3',(z +1.0)[wavedim//2]),('CRVAL3',(wavecube[wavedim//2],'um')),('CDELT3',(np.diff(wavecube)[0],'um')),\\\n ('SPECSYS','HELIOCEN'),('RESTWAV',wcsslist[0].wcs.restwav),('VHELIO',(hdus[0].header['VHELIO'],'km/s'))]#,\\\n # ('CD1_1',(-pixres/3600.,'deg')), ('CD2_2',(pixres/3600.,'deg')),('CD3_3',(np.diff(wave cube)[0].value,'um')) ]\n for hk in hdrkeys:\n hducube.header[hk[0]]=hk[1] #.set(hk[0],hk[1])\n \n wcscube = WCS(hducube)\n #wcscube.fix()\n wcscube.printwcs()\n wcsccube=wcscube.celestial\n\n #scube=SpectralCube.read(\"temp.fits\").with_spectral_unit('um',velocity_convention='optical')\n cubestack=[]\n fpstack=[]\n #cubedata=np.zeros(shapecube) # to be output cube\n #cubemask=np.zeros(shapecube) # to be number of contributions to each pix\n iim=0\n for h in hdus:\n #w=WCS(h)\n #spec = np.resize(h.data,shape=[,h.data.shape[1],h.data.shape[2]])\n \n #~~ First smooth and rebin the spectrum ~~\n #~~~ \n spdata=h.data.copy() #np.transpose(h.data)\n print(\"Spectrum shape: \",spdata.shape)\n # define original spectral WCS\n spwcss_orig = WCS(h).sub(['spectral'])\n spwavedim=spdata.shape[0]\n zsp=np.arange(spwavedim)\n spwave=spwcss_orig.pixel_to_world_values(zsp+1)\n spwave=spwave*u.Unit(spwcss_orig.world_axis_units[0])\n \n #~~ Calculate statistics of background away from line\n #ibg = np.where( (spwave.to(u.um).value>wavebound[1]) | (spwave.to(u.um).value<wavebound[0]))\n #spbg = np.median(spdata[:,:,ibg].ravel())\n #spbgrms = np.sqrt( np.mean( spdata[:,:,ibg]**2 ) )\n #print(\"Background counts\",spbg)\n #spdata-=spbg\n \n #~~ smooth using 2D gaussian, by the re-bin factor in the wavelength direction and by stddev=1.0pix~0.155 arcsec in the spatial\n spsmooth=convolve(spdata[:,:,0].T,Gaussian2DKernel(x_stddev=wavebinfact/2.3548,y_stddev=1.0),normalize_kernel=True)\n\n #~~ Define spectrum as a Spectrum1D object for easy re-sampling/spectral smoothing\n sp=Spectrum1D(flux=spsmooth*u.dimensionless_unscaled,spectral_axis=spwave,\\\n velocity_convention='optical',rest_value=h.header['restwav']*u.m)\n \n #~~ Smooth, resample in spectral direction\n sampler = SplineInterpolatedResampler() \n# sprebin = sampler(sp, wavecube*u.um)\n splst=[]\n for spi in sp:\n# spsm = gaussian_smooth(spi, stddev=float(wavebinfact)/2.3548)\n sprb = sampler(spi, wavecube*u.um)\n splst.append(sprb.flux.value)\n sprebindat=np.array(splst)\n \n sprebin = Spectrum1D(flux=np.array(splst)*u.dimensionless_unscaled,spectral_axis=wavecube*u.um,\\\n velocity_convention='optical',rest_value=h.header['restwav']*u.m)\n\n sp3data=np.transpose(np.array([sprebin.flux.value.T]*pixwidth),axes=(1,2,0))/np.float(pixwidth)\n # extend along slit width by assuming exp\n \n # print(sp3data.shape)\n #print(sprebin.flux.value.shape,sp3data.shape)\n #sp3data = np.concatenate([[sprebin.flux.value.T]]*pixwidth,axis=2)/float(pixwidth)\n #print(sp3data.shape)\n #break\n \n # print(sprebin.flux.value)\n# plt.figure()\n# plt.plot(sprebin.wavelength.value,sprebin.flux.value.median(0))\n# plt.show()\n\n #~~ Now grow spectrum along slit width so that its extent is > 1 pixel (~slit width). Assume constant slit profile.\n #sp3d=np.concatenate([[sprebin.flux.value.T]]*3,axis=0).T \n #sp3d/=float(pixwidth) # divide by pixel width, to conserve total flux (is this true?)\n #print(sp3d.shape)\n \n #~~ Get RA,DEC of all pixels in a given wavelength slice (same mapping across wavelength)\n spwcsc = WCS(h).celestial\n # adjust wcs to reflect new size of dimension along the slit width\n #spwcsc.wcs.ndim=pixwidth,\n spwcsc.wcs.crpix=float(pixwidth)/2.0 + 0.5,spwcsc.wcs.crpix[1]\n spwcsc.printwcs()\n spchdr=spwcsc.to_header()\n spchdr['naxis1']=sp3data.shape[2]\n spchdr['naxis2']=sp3data.shape[1]\n spchdr['naxis3']=sp3data.shape[0]\n sp3wcsc=WCS(spchdr).celestial\n \n# slitcoord=SkyCoord(ra=spwcsc.wcs.crval[0]*u.deg,dec=spwcsc.wcs.crval[1]*u.deg,\\\n# frame='icrs',unit=(u.deg,u.deg))\n# slitpa=np.arctan( spwcsc.wcs.pc[0,1]/spwcsc.wcs.pc[1,1] )\n# pscale=proj_plane_pixel_scales(spwcsc)*3600.\n #print(pscale)\n# sp3wcs=WCS(fits.PrimaryHDU(sp3data))\n# sp3wcs.wcs.equinox=2000.0\n# sp3wcs.wcs.radesys='ICRS'\n# sp3wcss=wcscube.spectral(['spectral'])\n# sp3wcs.wcs.ctype='RA---TAN','DEC--TAN',sp3wcss.wcs.ctype[0]\n# sp3wcs.wcs.cunit='deg','deg',sp3wcss.wcs.cunit[0]\n# spwcs3.wcs.crval=slitcoord.icrs.ra.degree,slitcoord.icrs.dec.degree,sp3wcss.wcs.crval[0]\n# spwcs3.wcs.crpix=pixcoord[0],pixcoord[1],wcss.wcs.crpix[0]\n# cdelt=pixscale/3600.\n# wcs3.wcs.cd=[[spwcsc.wcs., cdelt*np.sin( slitpa*np.pi/180.), 0. ],\n# [cdelt*np.sin( slitpa*np.pi/180.), cdelt*np.cos( slitpa*np.pi/180.), 0. ],\n# [0., 0., scdelt ]]\n# sp3hdu = sregister(fits.PrimaryHDU(sp3data,header=h.header),\\\n# # slitcoord,slitpa,spwcsc.wcs.crpix,pscale[0],velosys=0.,outname=None)\n# # sp3wcs=WCS(sp3hdu)\n# # sp3wcs.printwcs()\n \n \n# spreproj = reproject_interp((sp3data, spwcsc.to_header()), shape_out=shapecube,\\\n# order='bicubic',\\\n# return_footprint=False)\n# independent_celestial_slices=True)\n \n# print(spreproj.shape)\n# cubestack.append(spreproj)\n \n# xsp=np.arange(spdata.shape[2])\n# ysp=np.arange(spdata.shape[1])\n# zsp=np.arange(spdata.shape[0]) # redefine z grid to reflect rebinning\n# ixsp,iysp = np.meshgrid(xsp,ysp)\n# xxcube,yycube = astropy.wcs.utils.pixel_to_pixel(spwcsc,wcsccube,ixsp,iysp,0)\n# ixcube=np.round(xxcube).astype(np.int)\n# iycube=np.round(yycube).astype(np.int)\n \n # loop through wave slices, set the data for this image cube, and zero-valued pixels to nan\n cubedata_i = np.zeros(shapecube)\n# fp_i = np.zeros(shapecube)\n for iz in range(cubedata_i.shape[0]):\n spslice=sp3data[iz,:,:]\n # cubedata_i[iz,:,:] = reproject_interp((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bicubic',return_footprint=False)\n # cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bilinear',return_footprint=False)\n cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n shape_out=cubedata_i.shape[1:],order='bilinear',return_footprint=False)\n \n # cubedata_i[iz,:,:] = reproject_adaptive((spslice,spwcsc),output_projection=wcscube.celestial,\\\n # shape_out=cubedata_i.shape[1:], order='bilinear',return_footprint=False)\n \n #cubedata_i[iz,:,:][iycube,ixcube] = sp3data[iz,:,:][iysp,ixsp]\n cubedata_i[np.where(cubedata_i==0.)]=np.nan\n cubestack.append(cubedata_i)\n# footprint.append\n \n cubestack=np.array(cubestack)\n cubestackmask=np.isnan(cubestack)\n cubestackma=np.ma.masked_array(cubestack,mask=cubestackmask)\n cubedata=np.nanmean(cubestack,axis=0)\n cubedata[np.where(cubedata==0.)]=np.nan\n # cubedata=np.ma.mean(cubestackma,axis=0)\n #cubedata.data[np.where(cubedata.data==0.)]=np.nan\n # cubedata=np.nanmean(cubestack,axis=0)\n #print(cubedata.shape)\n fits.writeto(outname[:-5]+\"_exact.fits\",cubedata,header=hducube.header,overwrite=True)\n print(\"REPLACING NANS\")\n spatkern=Gaussian2DKernel(x_stddev=1.0,x_size=11,y_size=11)\n #spatkern=Gaussian2DKernel(x_stddev=1.0)\n #kwargs={'kind':'slinear'}\n cubeinterp=np.zeros(cubedata.shape)\n for k in range(cubedata.shape[0]):\n if (k+1)%5==0:\n print('Coadding spectral slice %i/%i'%(k+1,cubeinterp.shape[0]))\n cubeinterp[k,:,:] = interpolate_replace_nans(cubedata[k,:,:],spatkern,\\\n boundary='extend') \n# cubeinterp[k,:,:] = convolve(cubedata[k,:,:],spatkern, nan_treatment='fill', boundary=None,\\\n# fill_value=np.nan, normalize_kernel=True)\n# print(cubeinterp.shape)\n fits.writeto(outname,data=cubeinterp,header=hducube.header,overwrite=True)\n\n \n return fits.PrimaryHDU(cubeinterp,header=wcscube.to_header())", "def generate_cube_resample(outfile, frame_wcs, slits, fluximg, ivarimg, raimg, decimg, waveimg, slitimg, gpm,\n grid_nspat=5, grid_specsep=20,\n overwrite=False, output_wcs=None, blaze_wave=None, blaze_spec=None, fluxcal=False,\n sensfunc=None, specname=None, debug=False):\n # Set the output_wcs if it's not already set\n if output_wcs is None:\n output_wcs = frame_wcs\n # Check that grid_nspat is an odd number\n if grid_nspat % 2 == 0:\n msgs.warn(f\"grid_nspat must be an odd number. Using grid_nspat={grid_nspat+1} instead\")\n grid_nspat += 1\n debug = False\n # Get the grid spacing along the spatial direction\n frm_cd_spat = np.sqrt(frame_wcs.wcs.cd[1, 1] ** 2 + frame_wcs.wcs.cd[0, 1] ** 2)\n out_cd_spat = np.sqrt(output_wcs.wcs.cd[1, 1] ** 2 + output_wcs.wcs.cd[0, 1] ** 2)\n slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True))))\n nvox_spat = int(np.ceil(slitlength*frm_cd_spat/out_cd_spat))\n crd_vox_spat = out_cd_spat * (np.arange(nvox_spat+1) - (nvox_spat+1)// 2) # +1 to get bin edges\n # Get the grid spacing along the spectral direction\n out_cr_wave = output_wcs.wcs.crval[2]\n out_cd_wave = output_wcs.wcs.cd[2, 2]\n nvox_wave = int(np.ceil((np.max(waveimg)-out_cr_wave)/out_cd_wave))\n crd_vox_spec = out_cr_wave + out_cd_wave * np.arange(nvox_wave+1) # +1 to get bin edges\n vox_shape = (nvox_wave+1, nvox_spat+1)\n\n # Detector spectal/spatial pixels and number of slices\n nspec, nspat, nslice = slits.nspec, slits.nspat, slits.spat_id.size\n\n # Generate the output datacube\n datcube = np.zeros((nslice, nvox_spat, nvox_wave), dtype=float)\n varcube = np.zeros((nslice, nvox_spat, nvox_wave), dtype=float)\n\n # Transform the voxel geometry to detector pixels\n grid_nspec = 1 + nspec // grid_specsep\n xgrid = np.zeros((grid_nspec, grid_nspat), dtype=int)\n ygridt = np.zeros(grid_nspec, dtype=int)\n ygridt[-1] = nspec - 1\n ygridt[1:-1] = (nspec % grid_specsep + 2 * grid_specsep) // 2 + np.arange(grid_nspec - 2) * grid_specsep\n ygrid = ygridt[:, np.newaxis].repeat(grid_nspat, axis=1)\n ra0, dec0 = np.zeros(nslice), np.zeros(nslice)\n offsimg = np.zeros_like(waveimg)\n varimgsq = utils.inverse(ivarimg ** 2)\n for sl, spat_id in enumerate(slits.spat_id):\n msgs.info(f\"Calculating voxel geometry for slit {spat_id}\")\n # Calculate RA and Dec of central traces\n wsl = np.where(slitimg == spat_id)\n this_ra, this_dec, this_wave = raimg[wsl], decimg[wsl], waveimg[wsl]\n _, spat_posn, _ = frame_wcs.wcs_world2pix(this_ra, this_dec, this_wave*1.0E-10, 0)\n asrt = np.argsort(spat_posn)\n ra0[sl] = np.interp(0.0, spat_posn[asrt], this_ra[asrt])\n dec0[sl] = np.interp(0.0, spat_posn[asrt], this_dec[asrt])\n # Generate the offsets\n cosdec = np.cos(dec0[sl] * np.pi / 180.0)\n diff_ra, diff_dec = (this_ra - ra0[sl]) * cosdec, this_dec - dec0[sl]\n msgs.bug(\"There is sometimes a sign error that needs to be resolved here...\")\n msgs.error(\"Use another algorithm for the time being...\")\n if np.max(diff_ra)-np.min(diff_ra) > np.max(diff_dec)-np.min(diff_dec):\n sgn = np.sign(diff_ra)\n else:\n sgn = np.sign(diff_dec)\n offsimg[wsl] = -sgn * np.sqrt(diff_ra**2 + diff_dec**2)\n # Update the xgrid values for this slice\n for yy in range(grid_nspec):\n wsl = np.where(slitimg == spat_id)\n allind = wsl[1][np.where(wsl[0] == ygridt[yy])]\n xgrid[yy, 0] = np.min(allind)\n xgrid[yy, -1] = np.max(allind)\n numpix = xgrid[yy, -1] - xgrid[yy, 0]\n sep = numpix // (grid_nspat - 1)\n xgrid[yy, 1:-1] = xgrid[yy, 0] + (numpix % sep + 2 * sep) // 2 + np.arange(grid_nspat - 2) * sep\n # Extract offset + wavelength information and estimate transform\n grid_coord = (ygrid.flatten(), xgrid.flatten())\n grid_offs = offsimg[grid_coord]\n grid_wave = waveimg[grid_coord]\n src = np.column_stack((grid_wave, grid_offs))\n dst = np.column_stack(grid_coord).astype(float)\n # Transform the voxel coordinates to detector coordinates\n evalpos = np.column_stack((crd_vox_spec[:,np.newaxis].repeat(crd_vox_spat.size, axis=1).flatten(),\n crd_vox_spat[np.newaxis,:].repeat(crd_vox_spec.size, axis=0).flatten()))\n # tform = LinearNDInterpolator(src, dst, rescale=True)\n # crd_det_tmp = tform(evalpos)\n\n src_off = np.min(src, axis=0)\n src_scl = np.max(src-src_off, axis=0)\n dst_off = np.min(dst, axis=0)\n dst_scl = np.max(dst-dst_off, axis=0)\n tform = RBFInterpolator((src-src_off)/src_scl, (dst-dst_off)/dst_scl, smoothing=0.01)\n crd_det = dst_off + dst_scl * tform((evalpos-src_off)/src_scl)\n if debug:\n plt.plot(crd_det[:, 0], crd_det[:, 1], 'rx')\n #plt.plot(crd_det_tmp[:, 0], crd_det_tmp[:, 1], 'bx')\n plt.plot(np.arange(slits.left_init.shape[0]), slits.left_init[:, 0], 'k-')\n plt.plot(np.arange(slits.right_init.shape[0]), slits.right_init[:, 0], 'k-')\n plt.show()\n\n # Calculate an \"offsets\" image, which indicates the offset in arcsec from (RA_0, DEC_0)\n # Create two splines of the offsets image: (1) offset predicts RA; (2) offset predicts Dec.\n # Use these splines to calculate the RA and DEC of the voxels, combine this with the output wavelength grid.\n # Generate all RA, DEC, WAVELENGTH triples (i.e. find the RA,DEC pairs along constant wavelength, for all wavelengths)\n # Use the WCS (which contains the astrometric transform) to go from world to pix\n # i.e. need to invert this:\n # world_ra, world_dec, _ = wcs.wcs_pix2world(slitID, evalpos, tilts[onslit_init]*(nspec-1), 0)\n # This gives us the x,y detector positions of the voxel geometry\n from shapely.geometry import Polygon, box as shapelyBox\n from shapely.strtree import STRtree\n\n crd_det_spec, crd_det_spat = crd_det[:, 0].reshape(vox_shape), crd_det[:, 1].reshape(vox_shape)\n # Generate a list of all detector pixels in this slice\n detpix_polys = []\n pix_spec, pix_spat = np.where(slitimg == spat_id)\n for ss in range(pix_spat.size):\n detpix_polys.append(shapely.geometry.box(pix_spat[ss], pix_spec[ss], pix_spat[ss]+1, pix_spec[ss]+1))\n # Create a Sort-Tile-Recursive tree of the detector pixels to quickly query overlapping voxels\n detgeom = shapely.strtree.STRtree(detpix_polys)\n # Loop through all voxels for this slice and calculate the overlapping area\n for wv in range(nvox_wave):\n for sp in range(nvox_spat):\n # Generate the voxel coordinates in detector pixel space (points must be counter-clockwise)\n voxel_geom = shapely.geometry.Polygon([(crd_det_spat[wv, sp], crd_det_spec[wv, sp]),\n (crd_det_spat[wv, sp+1], crd_det_spec[wv, sp]),\n (crd_det_spat[wv, sp+1], crd_det_spec[wv+1, sp]),\n (crd_det_spat[wv, sp], crd_det_spec[wv+1, sp]),\n (crd_det_spat[wv, sp], crd_det_spec[wv, sp])])\n # Find overlapping detector pixels\n result = detgeom.query(voxel_geom)\n # Sum all overlapping flux-weighted areas\n this_flx = 0\n this_var = 0\n this_area = 0\n for pp in range(len(result)):\n area = voxel_geom.intersection(result[pp]).area\n pix_spat = int(min(result[pp].exterior.coords[0][0], result[pp].exterior.coords[2][0]))\n pix_spec = int(min(result[pp].exterior.coords[0][1], result[pp].exterior.coords[2][1]))\n if ivarimg[pix_spec, pix_spat] != 0.0:\n this_flx += area * fluximg[pix_spec, pix_spat]\n this_var += area**2 * varimgsq[pix_spec, pix_spat]\n this_area += area\n # Fill in the datacube\n this_area = 1 if this_area == 0 else this_area\n datcube[sl, sp, wv] = this_flx / this_area\n varcube[sl, sp, wv] = this_var / this_area**2\n\n # Generate a header\n hdr = output_wcs.to_header()\n\n # Add the unit of flux to the header\n if fluxcal:\n hdr['FLUXUNIT'] = (PYPEIT_FLUX_SCALE, \"Flux units -- erg/s/cm^2/Angstrom/arcsec^2\")\n else:\n hdr['FLUXUNIT'] = (1, \"Flux units -- counts/s/Angstrom/arcsec^2\")\n\n # Save the final datacube\n msgs.info(\"Saving datacube as: {0:s}\".format(outfile))\n final_cube = DataCube(datcube.T, varcube.T, specname, blaze_wave, blaze_spec, sensfunc=sensfunc, fluxed=fluxcal)\n final_cube.to_file(outfile, hdr=hdr, overwrite=overwrite)", "def coefficients(self, coeffs):\n self._coeffs = coeffs\n self._image = starlet_reconstruction(self.coefficients, self.generation, self.convolve2D)", "def multiband_starlet_transform(image, scales=None, generation=2, convolve2D=None):\n assert len(image.shape) == 3, f\"Image should be 3D (bands, height, width), got shape {len(image.shape)}\"\n assert generation in (1, 2), f\"generation should be 1 or 2, got {generation}\"\n scales = get_scales(image.shape, scales)\n\n wavelets = np.empty((scales+1,)+image.shape, dtype=image.dtype)\n for b, image in enumerate(image):\n wavelets[:, b] = starlet_transform(image, scales=scales, generation=generation, convolve2D=convolve2D)\n return wavelets", "def getComplexNMF1DTemplates(S, W, H, p = 2, audioParams = None):\n K = W.shape[2]\n #Step 1: Compute the masked matrices raised to the power p\n AsSum = np.zeros(S.shape)\n As = []\n for k in range(K):\n Hk = np.array(H)\n Hk[0:k, :] = 0\n Hk[k+1::, :] = 0\n As.append(multiplyConv1D(W, Hk)**p)\n AsSum += As[-1]\n #Step 2: Average masked portions of the spectrogram to come up with\n #complex-valued templates\n Ss = []\n Ratios = []\n AllPow = np.abs(np.sum(S*np.conj(S), 0))\n AllPow[AllPow == 0] = 1\n for k in range(K):\n Ss.append(S*As[k]/AsSum)\n Pow = np.abs(np.sum(Ss[k]*np.conj(Ss[k]), 0))\n Ratios.append(Pow/AllPow)\n #Step 4: Save components if user requested\n if audioParams:\n from SpectrogramTools import iSTFT\n [winSize, hopSize] = [audioParams['winSize'], audioParams['hopSize']]\n [Fs, fileprefix] = [audioParams['Fs'], audioParams['fileprefix']]\n import matplotlib.pyplot as plt\n from scipy.io import wavfile\n X = np.array([])\n for k in range(K):\n thisS = np.array(Ss[k])\n thisS[:, Ratios[k] < 0.05] = 0\n Xk = iSTFT(thisS, winSize, hopSize)\n if k == 0:\n X = Xk\n else:\n X += Xk\n wavfile.write(\"%s_%i.wav\"%(fileprefix, k), Fs, Xk)\n plt.clf()\n plt.plot(Ratios[k])\n plt.title(\"Ratio, %.3g Above 0.05\"%(np.sum(Ratios[k] > 0.05)/float(Ratios[k].size)))\n plt.savefig(\"%s_%iPower.svg\"%(fileprefix, k), bbox_inches = 'tight')\n wavfile.write(\"%sNMF.wav\"%fileprefix, Fs, X)\n return (Ss, Ratios)", "def convolve(self, img):", "def coef_shrinkage_1D(cube, \n baseline,\n channel,\n polarization,\n wavelet,\n n,\n threshold,\n tfix,\n ttype):\n slice = cube[baseline,channel,:,polarization]\n \n # Decomposition\n coefs = pywt.wavedec(slice, wavelet,level =n)\n\n # Theshold\n if threshold == 'fixed':\n denoised = coefs\n for i,coef in enumerate(coefs):\n denoised[i] = pywt.threshold(coef,tfix,ttype)\n else:\n logger.warning('No other wavelet thresholds have been impleted yet')\n return\n\n # Resynthesis\n return pywt.waverec(denoised,wavelet)[:slice.shape[0]]", "def cwave_filters(filters):\n\n\tf = h5py.File(dir_file+'filters_w.hdf5', 'r')\n\tnbands = len(filters)\n\n\tif nbands>1:\n\t\tcwaves = np.zeros(nbands)\n\t\tfor bb in range(0,nbands):\n\t\t\tstr_temp = 'cw_%s' % filters[bb]\n\t\t\tcwaves[bb] = f[filters[bb]].attrs[str_temp]\n\telse:\n\t\tstr_temp = 'cw_%s' % filters\n\t\tcwaves = f[filters].attrs[str_temp]\n\tf.close()\n\n\treturn cwaves", "def wavelet_maps_to_real(self,wav_analysis_maps,output_maps_prefix,\n\t\tn_quads=1000):\n\t\t# Make the wavelet dict we'll feed into the reconstruction script.\n\t\ttarget_fwhm = wav_analysis_maps['target_fwhm']\n\t\tscale_int = wav_analysis_maps['scale_int']\n\t\tj_min = wav_analysis_maps['j_min']\n\t\tj_max = wav_analysis_maps['j_max']\n\t\toutput_nside = wav_analysis_maps['output_nside']\n\t\twavelet_dict = {'scale_int':scale_int,\n\t\t\t'band_lim':wav_analysis_maps['band_lim'],'j_max':j_max,\n\t\t\t'j_min':j_min,'original_nside':output_nside,\n\t\t\t'target_fwhm':target_fwhm}\n\t\tanalysis_type = wav_analysis_maps['analysis_type']\n\t\tm_level = wav_analysis_maps['m_level']\n\n\t\t# Check that the right type of map dict was passed in.\n\t\tif analysis_type != 'hgmca':\n\t\t\traise ValueError('A non-hgmca wav_analysis_maps was passed in.')\n\n\t\t# Get the analysis level for each coefficient\n\t\twav_level = self.get_analysis_level(scale_int,j_min,j_max,m_level,\n\t\t\toutput_nside)\n\t\twav_j_ind = np.zeros(2+j_max-j_min)\n\t\twav_j_ind[1:] = np.arange(j_min,j_max+1)\n\n\t\t# Iterate through the levels\n\t\tfor level in range(m_level+1):\n\t\t\t# If no wavelet scales should be analyzed at this level\n\t\t\t# continue\n\t\t\tif np.sum(wav_level==level) == 0:\n\t\t\t\tcontinue\n\t\t\t# Which scales belong at this level\n\t\t\tlevel_j_ind = wav_j_ind[wav_level==level]\n\t\t\t# Get the number of patches for a given level.\n\t\t\tn_patches = level_to_npatches(level)\n\n\t\t\t# Keep track of how many pixels into the level we've\n\t\t\t# gone so far.\n\t\t\toffset = 0\n\t\t\tfor j in level_j_ind:\n\t\t\t\t# Now deal with scaling or wavelet coefficient\n\t\t\t\tif j == 0:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j_min,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_scaling.fits'\n\t\t\t\t\twavelet_dict.update({'scale_map':{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\telse:\n\t\t\t\t\tnside = wavelets_base.get_max_nside(scale_int,j+1,\n\t\t\t\t\t\toutput_nside)\n\t\t\t\t\tpath = output_maps_prefix+'_wav_%d.fits'%(j)\n\t\t\t\t\twavelet_dict.update({'wav_%d_map'%(j):{'path':path,\n\t\t\t\t\t\t'nside':nside}})\n\t\t\t\tn_pix = hp.nside2npix(nside)\n\t\t\t\tn_pix_patch = n_pix//n_patches\n\n\t\t\t\t# Allocate the array we'll use to write the wavelets\n\t\t\t\twav_coeff = np.zeros(n_pix)\n\n\t\t\t\t# Now grab the data from each patch\n\t\t\t\tfor patch in range(n_patches):\n\t\t\t\t\twav_coeff[patch*n_pix_patch:(patch+1)*n_pix_patch] = (\n\t\t\t\t\t\twav_analysis_maps[str(level)][patch,\n\t\t\t\t\t\toffset:offset+n_pix_patch])\n\t\t\t\toffset += n_pix_patch\n\n\t\t\t\t# Write the map and point the dictionary to the path\n\t\t\t\thp.write_map(path,wav_coeff,dtype=np.float64,\n\t\t\t\t\toverwrite=True,nest=True)\n\n\t\treturn self.s2dw_wavelet_inverse_transform(wavelet_dict,np.min(\n\t\t\ttarget_fwhm),n_quads=n_quads)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Utility function to perform reverse enumerate of a list returns zip
def reversedEnumerate(l): return zip(range(len(l)-1, -1, -1), l[::-1])
[ "def reversed_enumerate(seq):\r\n return izip(reversed(xrange(len(seq))), reversed(seq))", "def reverse_enumerate(iterable):\n\t# Lifted from http://galvanist.com/post/53478841501/python-reverse-enumerate\n\treturn itertools.izip(reversed(xrange(len(iterable))), reversed(iterable))\n\t# Alternative python3 version:\n\t# return zip(reversed(range(len(iterable))), reversed(iterable))", "def reverse_enumerate(iterable):\n return izip(reversed(range(len(iterable))), reversed(iterable))", "def rev_enumerate(seq):\n cnt = 0\n seq = reverse(seq)\n for i in seq:\n yield len(seq)-cnt-1, i\n cnt += 1", "def test_reversed_enumeration(self):\n test_list = range(10)\n expected = [\n (0, 9), (-1, 8), (-2, 7), (-3, 6), (-4, 5),\n (-5, 4), (-6, 3), (-7, 2), (-8, 1), (-9, 0)\n ]\n result = [l for l in reverse_enumerate(test_list)]\n self.assertEquals(expected, result)", "def reverse(iterator):\n for i in iterator:\n yield from reverse(iterator)\n yield i", "def unzip(iter):\n v, _ = zip(*iter)\n return v", "def test_reversed_enumeration_option_params(self):\n test_list = range(10)\n expected = [\n (9, 9), (8, 8), (7, 7), (6, 6), (5, 5),\n (4, 4), (3, 3), (2, 2), (1, 1), (0, 0)\n ]\n result = [l for l in reverse_enumerate(test_list, 9)]\n self.assertEquals(expected, result)", "def elements_reversed(seq):\n return seq[::-1]", "def _do_reverse_IterRankDifferentiaZip(\n self: \"HereditaryStratumOrderedStoreTree\",\n # deposition ranks might not be stored in strata\n get_rank_at_column_index: typing.Optional[typing.Callable] = None,\n start_column_index: int = 0,\n ) -> typing.Iterator[typing.Tuple[int, int]]:\n for reverse_column_idx, node in enumerate(self._GetAscendingIter()):\n column_idx = self.GetNumStrataRetained() - 1 - reverse_column_idx\n if column_idx >= start_column_index:\n rank: int\n if get_rank_at_column_index is None:\n rank = node.stratum.GetDepositionRank()\n assert rank is not None\n else:\n rank = get_rank_at_column_index(column_idx)\n assert rank is not None\n yield (rank, node.stratum.GetDifferentia())\n else:\n break", "def reverse_list(l):\n\n return l[::-1]", "def reverselist(lista):\n return list(reversed(lista))", "def unzip(l):\n return list(map(list, zip(*l)))", "def reverseState(self, stateList):\n flipped = []\n for item in stateList:\n item = item * -1\n flipped.append(item)\n return tuple(flipped)", "def get_reverse(self):\r\n\r\n\t\treturn MotifList([motif.get_reverse() for motif in self])", "def reverse_list(a_list):\n reverse = a_list[::-1]\n\n return reverse", "def _reverse_inputs_and_indices(encoded_sentence_forward, output_index_list_forward):\n if len(encoded_sentence_forward) >= 2: # sentence should at least have start, end characters\n start_sentence_value = first(encoded_sentence_forward)\n end_sentence_value = last(encoded_sentence_forward)\n encoded_sentence_length = len(encoded_sentence_forward)\n\n # Reverse all character codes in the sentence without affecting the first and last elements\n # (those are special start_sentence_value and end_sentence_value)\n encoded_sentence_back = [start_sentence_value]\n encoded_sentence_back.extend(encoded_sentence_forward[-2:0:-1]) # skip start and end\n encoded_sentence_back.append(end_sentence_value)\n else:\n encoded_sentence_back = []\n\n # compute backward output indices\n if len(output_index_list_forward) == 0:\n locations_before_tokens = []\n else:\n locations_before_tokens = [0] + output_index_list_forward[:-1]\n output_indices_back = [encoded_sentence_length - x - 1 for x in locations_before_tokens]\n return encoded_sentence_back, output_indices_back", "def sort_reverse(list_of_integers):", "def reverse(L):\r\n return L[::-1]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function to read in start and end simulaiton time and return a `ClockStructClass` object
def read_clock_paramaters(SimStartTime,SimEndTime,OffSeason=False): # extract data and put into numpy datetime format SimStartTime = pd.to_datetime(SimStartTime) SimEndTime = pd.to_datetime(SimEndTime) # create object ClockStruct = ClockStructClass() # add variables ClockStruct.SimulationStartDate = SimStartTime ClockStruct.SimulationEndDate = SimEndTime ClockStruct.nSteps = (SimEndTime - SimStartTime).days + 1 ClockStruct.TimeSpan = pd.date_range(freq='D',start=SimStartTime,end=SimEndTime) ClockStruct.StepStartTime = ClockStruct.TimeSpan[0] ClockStruct.StepEndTime = ClockStruct.TimeSpan[1] ClockStruct.SimOffSeason = OffSeason return ClockStruct
[ "def GetTime(self):\n\t\tseconds = self.bcd2dec( self.readRegister(SEC) &(~START_32KHZ)) # mask out ST bit\n\t\tminutes = self.bcd2dec( self.readRegister(MIN))\n\t\t\n\t\thour_t = self.readRegister(HOUR)\n\t\tif((hour_t & HOUR_12) == HOUR_12):\n\t\t\t(hour_t & 0x1F) \n\t\telse:\n\t\t\t(hour_t & 0x3F)\n\t\t\t\n\t\thours = self.bcd2dec(hour_t)\t\t\n\t\tweekday = self.bcd2dec( self.readRegister(DAY) & ~(OSCRUN|PWRFAIL|VBATEN))\n\t\tdate = self.bcd2dec( self.readRegister(DATE)) \n\t\tmonth = self.bcd2dec( self.readRegister(MNTH) & ~(LPYR)) \n\t\tyear = self.bcd2dec( self.readRegister(YEAR))\n\t\t\n\t\trtc_time=RTCC_Struct(seconds,minutes,hours,weekday,date,month,year)\n\t\t\n\t\treturn rtc_time", "def __read(self):\n data = os.read(self.__src, 8)\n code, time, timeHigh, five, cmd, zeroes = struct.unpack('=bHBBBh', data)\n time = self.__fixInputTime(time + (timeHigh << 16))\n return time, cmd", "def calculateStartTimeAndEndTimes():\n\n utcOffset = get_utc_offset()\n workingStart = config[\"workingStart\"]\n workingFinish = config[\"workingFinish\"]\n utc_now = utc.localize(datetime.utcnow())\n workingStart = workingStart.split(\":\")\n workingFinish = workingFinish.split(\":\")\n workStart = utc_now.replace(\n hour=int(workingStart[0]), minute=int(workingStart[1]), second=0, microsecond=0,\n )\n workFinish = utc_now.replace(\n hour=int(workingFinish[0]),\n minute=int(workingFinish[1]),\n second=0,\n microsecond=0,\n )\n utcDelta = timedelta(minutes=utcOffset)\n workStart = workStart - utcDelta\n workFinish = workFinish - utcDelta\n\n sunriseDateTime, sunsetDateTime = getSunriseSunsetDateTimes()\n\n return max(workStart, sunriseDateTime), min(workFinish, sunsetDateTime)", "def initialize_clock(self, simulated_start_time, simulated_stop_time=None, speed=None):\n try:\n parsed_start_time = utils.parse_timestamp_string(simulated_start_time)\n except ValueError:\n _log.debug('Failed to parse simulated_start_time {}'.format(simulated_start_time))\n return 'Invalid simulated_start_time'\n\n if simulated_stop_time:\n try:\n parsed_stop_time = utils.parse_timestamp_string(simulated_stop_time)\n except ValueError:\n _log.debug('Failed to parse simulated_stop_time {}'.format(simulated_stop_time))\n return 'Invalid simulated_stop_time'\n else:\n parsed_stop_time = None\n\n if speed is not None:\n try:\n parsed_speed = float(speed)\n except ValueError:\n _log.debug('Failed to parse speed {}'.format(speed))\n return 'Invalid speed'\n if speed <= 0.0:\n _log.debug('Asked to initialize with a zero or negative speed')\n return 'Asked to initialize with a zero or negative speed'\n else:\n parsed_speed = 1.0\n\n if parsed_stop_time and (parsed_stop_time < parsed_start_time):\n _log.debug('Asked to initialize with out-of-order start/stop times')\n return 'simulated_stop_time is earlier than simulated_start_time'\n\n self.actual_start_time = utils.get_aware_utc_now()\n self.simulated_start_time = parsed_start_time\n self.simulated_stop_time = parsed_stop_time\n self.speed = parsed_speed\n _log.debug('Initializing clock at {} to start at: {}'.format(self.actual_start_time, self.simulated_start_time))\n _log.debug('Initializing clock to stop at: {}'.format(self.simulated_stop_time))\n _log.debug('Initializing clock to run at: {} times normal'.format(self.speed))\n return 'Simulation started at {}'.format(self.actual_start_time)", "def _get_recording_start_time(self) -> float:\n recording_start_time = 0.0\n if self.sync_message_file is not None:\n with open(self.sync_message_file, \"r\") as f:\n sync_strs = f.read()\n sync_lines = sync_strs.split(\"\\n\")\n for line in sync_lines:\n if \"Start Time\" in line:\n tokens = line.split(\":\")\n start_time = int(tokens[-1])\n sample_rate = int(tokens[0].split(\"@\")[-1].strip().split()[0])\n recording_start_time = start_time / float(sample_rate)\n return recording_start_time", "def get_start_end_info_from_xml(self, raw_xml):\n\n xml_root = ElementTree.fromstring(raw_xml)\n\n time_start_list = xml_root.findall('.//Attribute[@Name=\"time_coverage_start\"]')\n if len(time_start_list) > 0:\n if len(time_start_list) > 1:\n print(\"Encountered more than 1 time_coverage_start tag. Using 1st value.\")\n start = self.get_time_coverage_xml(time_start_list[0])\n else:\n time_start_list = xml_root.findall('.//Attribute[@Name=\"Scene Start time\"]')\n if len(time_start_list) > 1:\n print(\"Encountered more than 1 Scene Start time tag. Using 1st value.\")\n start_str = self.get_time_coverage_xml(time_start_list[0])\n start = self.get_goci_time(start_str)\n\n time_end_list = xml_root.findall('.//Attribute[@Name=\"time_coverage_end\"]')\n if len(time_end_list) > 0:\n if len(time_end_list) > 1:\n print(\"Encountered more than 1 time_coverage_end tag. Using 1st value.\")\n stop = self.get_time_coverage_xml(time_end_list[0])\n else:\n time_end_list = xml_root.findall('.//Attribute[@Name=\"Scene end time\"]')\n if len(time_end_list) > 1:\n print(\"Encountered more than 1 Scene end time tag. Using 1st value.\")\n stop_str = self.get_time_coverage_xml(time_end_list[0])\n stop = self.get_goci_time(stop_str)\n return start, stop", "def read_time(self, timing_type: str) -> float:\n time_filename = f\"{self.hash}.time.{timing_type}\"\n with self.cache_fs.open(time_filename, \"r\") as fid:\n return float(fid.read())", "def get_monitor_start_time():\n \n # read the 8th of December data as a list of strings\n# f = open('../data_p_beam/2_second/20171208.csv')\n# lines = f.readlines()\n# f.close()\n \n # !!! temporarily changing this to a run closer to the start of where\n # proper data was first collected\n filename = 'T071217_0001.txt'\n f = open('../data_ucn/monitor_detector/' + filename)\n lines = f.readlines()\n f.close()\n \n date_time = filename[1:3].zfill(2) + \\\n '.12.2017 ' + \\\n lines[26][15:23]\n\n pattern = '%d.%m.%Y %H:%M:%S'\n start_time = int(time.mktime(time.strptime(date_time, pattern)))\n \n return start_time", "def extract_exact_ck_times(observStart, observEnd, targetFrame):\n times = []\n\n FILESIZ = 128;\n TYPESIZ = 32;\n SOURCESIZ = 128;\n\n currentTime = observStart\n\n count = spice.ktotal(\"ck\")\n if (count > 1):\n msg = \"Unable to get exact CK record times when more than 1 CK is loaded, Aborting\"\n raise Exception(msg)\n\n _, _, _, handle = spice.kdata(0, \"ck\", FILESIZ, TYPESIZ, SOURCESIZ)\n spice.dafbfs(handle)\n found = spice.daffna()\n spCode = int(targetFrame / 1000) * 1000\n\n while found:\n observationSpansToNextSegment = False\n summary = spice.dafgs()\n dc, ic = spice.dafus(summary, 2, 6)\n\n # Don't read type 5 ck here\n if ic[2] == 5:\n break\n\n if (ic[0] == spCode and ic[2] == 3):\n segStartEt = spice.sct2e(int(spCode/1000), dc[0])\n segStopEt = spice.sct2e(int(spCode/1000), dc[1])\n\n if (currentTime >= segStartEt and currentTime <= segStopEt):\n # Check for a gap in the time coverage by making sure the time span of the observation\n # does not cross a segment unless the next segment starts where the current one ends\n if (observationSpansToNextSegment and currentTime > segStartEt):\n msg = \"Observation crosses segment boundary--unable to interpolate pointing\"\n raise Exception(msg)\n if (observEnd > segStopEt):\n observationSpansToNextSegment = True\n\n dovelocity = ic[3]\n end = ic[5]\n val = spice.dafgda(handle, int(end - 1), int(end))\n # int nints = (int) val[0];\n ninstances = int(val[1])\n numvel = dovelocity * 3\n quatnoff = ic[4] + (4 + numvel) * ninstances - 1\n # int nrdir = (int) (( ninstances - 1 ) / DIRSIZ); /* sclkdp directory records */\n sclkdp1off = int(quatnoff + 1)\n sclkdpnoff = int(sclkdp1off + ninstances - 1)\n # int start1off = sclkdpnoff + nrdir + 1;\n # int startnoff = start1off + nints - 1;\n sclkSpCode = int(spCode / 1000)\n\n sclkdp = spice.dafgda(handle, sclkdp1off, sclkdpnoff)\n\n instance = 0\n et = spice.sct2e(sclkSpCode, sclkdp[0])\n\n while (instance < (ninstances - 1) and et < currentTime):\n instance = instance + 1\n et = spice.sct2e(sclkSpCode, sclkdp[instance])\n\n if (instance > 0):\n instance = instance - 1\n et = spice.sct2e(sclkSpCode, sclkdp[instance])\n\n while (instance < (ninstances - 1) and et < observEnd):\n times.append(et)\n instance = instance + 1\n et = spice.sct2e(sclkSpCode, sclkdp[instance])\n times.append(et)\n\n if not observationSpansToNextSegment:\n break\n else:\n currentTime = segStopEt\n spice.dafcs(handle) # Continue search in daf last searched\n found = spice.daffna() # Find next forward array in current daf\n\n return times", "def get_changing_times2(recfile):\n times = recfile[0][1]\n startings = [t[0] for t in times]\n endings = [t[1] for t in times]\n return startings, endings", "def timeClockStatus(self, clock):\n\n\t\tclockHex = hex(clock)[2:]\n\t\tresp = StatusResponse(self.port.command(\"802 \" + clockHex))\n\n\t\tschedule = string.atoi(resp.results[1], 16)\n\t\tnextType = resp.results[2]\n\n\t\tminsSinceMidnight = string.atoi(resp.results[3], 16)\n\t\tnextTime = datetime.time(minsSinceMidnight/60, minsSinceMidnight%60)\n\n\t\tnextScript = string.atoi(resp.results[4], 16)\n\n\t\treturn (schedule, nextType, nextTime, nextScript)", "def time(self):\n return ((self['clock']['initial'] + 40 * self['clock']['increment'])\n / 60)", "def _get_device_start_time(self):\n result = self.shell(command='date \\\"+%Y%m%d %H:%M:%S\\\"').response()\n #result = result+'.000'\n epoch = float(time.mktime(time.strptime(result, '%Y%m%d %H:%M:%S')))\n self._device_start_time = epoch", "def build_clock():\n return html.Div([\n daq.LEDDisplay(id='clock', value='00:00:00',\n size=24, color='#333333')\n ], className='navbar-2')", "def get_time_and_direction(par_file):\n with open(par_file, 'r') as f:\n for l in f.readlines():\n if 'start_time' in l:\n start_time = l.strip().split()[1]\n if 'heading' in l:\n heading = l.strip().split()[1]\n heading = float(heading)\n start_time = float(start_time)\n if heading > -180 and heading < -90:\n direction = 'DES'\n else:\n direction = 'ASC'\n\n return start_time, direction", "def get_start_end_info(info):\n starttime = None\n stoptime = None\n startdate = None\n stopdate = None\n for line in info[0].decode(\"utf-8\").splitlines():\n if line.find(\"Start_Time\") != -1:\n starttime = line.split('=')[1]\n if line.find(\"End_Time\") != -1:\n stoptime = line.split('=')[1]\n if line.find(\"Start_Date\") != -1:\n startdate = line.split('=')[1]\n if line.find(\"End_Date\") != -1:\n stopdate = line.split('=')[1]\n return starttime, startdate, stoptime, stopdate", "def testClockInSameTime(self):\r\n self.login_user(self.user)\r\n entry1_data = {\r\n 'user': self.user,\r\n 'start_time': self.now,\r\n 'project': self.project,\r\n 'activity': self.devl_activity,\r\n }\r\n entry1 = factories.Entry(**entry1_data)\r\n entry1_data.update({\r\n 'st_str': self.now.strftime('%H:%M:%S')\r\n })\r\n data = self.clock_in_form\r\n data.update({\r\n 'start_time_0': entry1.start_time.strftime('%m/%d/%Y'),\r\n 'start_time_1': entry1.start_time.strftime('%H:%M:%S'),\r\n })\r\n #This clock in attempt should be blocked by entry1 (same start time)\r\n response = self.client.post(self.url, data)\r\n self.assertFormError(response, 'form', None, \\\r\n 'Please enter a valid start time')\r\n self.assertFormError(response, 'form', 'start_time', \\\r\n 'The start time is on or before the current entry: ' + \\\r\n '%(project)s - %(activity)s starting at %(st_str)s' % entry1_data)", "def readactspindlespeed(self):\n\t\tst=self._req_rdsingle(1,1,0x25)\n\t\treturn self._decode8(st['data']) if st['len']==8 else None", "def getTimeIntervalOffering(self):\n codec16 = QtCore.QTextCodec.codecForName(\"UTF-16\")\n start = unicode(codec16.fromUnicode(self.lblStartTime.text()), 'UTF-16')\n end = unicode(codec16.fromUnicode(self.lblEndTime.text()), 'UTF-16')\n #print \"SOS:234\", type(start), start, end\n return (start, end)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finalise soil and crop paramaters including planting and harvest dates save to new object ParamStruct
def read_model_parameters(ClockStruct,Soil,Crop,weather_df): # create ParamStruct object ParamStruct = ParamStructClass() Soil.fill_nan() # Assign Soil object to ParamStruct ParamStruct.Soil = Soil while Soil.zSoil < Crop.Zmax+0.1: for i in Soil.profile.index[::-1]: if Soil.profile.loc[i,"dz"] < 0.25: Soil.profile.loc[i,"dz"] += 0.1 Soil.fill_nan() break ########### # crop ########### # if isinstance(Crop, Iterable): # CropList=list(Crop) # else: # CropList = [Crop] # # assign variables to paramstruct # ParamStruct.NCrops = len(CropList) # if ParamStruct.NCrops > 1: # ParamStruct.SpecifiedPlantCalander = 'Y' # else: # ParamStruct.SpecifiedPlantCalander = 'N' # # add crop list to ParamStruct # ParamStruct.CropList = CropList ############################ # plant and harvest times ############################ # # find planting and harvest dates # # check if there is more than 1 crop or multiple plant dates in sim year # if ParamStruct.SpecifiedPlantCalander == "Y": # # if here than crop rotation occours during same period # # create variables from dataframe # PlantingDates = pd.to_datetime(planting_dates) # HarvestDates = pd.to_datetime(harvest_dates) # if (ParamStruct.NCrops > 1): # CropChoices = [crop.Name for crop in ParamStruct.CropList] # assert len(CropChoices) == len(PlantingDates) == len(HarvestDates) #elif ParamStruct.NCrops == 1: # Only one crop type considered during simulation - i.e. no rotations # either within or between years CropList = [Crop] ParamStruct.CropList = CropList ParamStruct.NCrops=1 # Get start and end years for full simulation SimStartDate = ClockStruct.SimulationStartDate SimEndDate = ClockStruct.SimulationEndDate # extract the years and months of these dates start_end_years = pd.DatetimeIndex([SimStartDate,SimEndDate]).year start_end_months = pd.DatetimeIndex([SimStartDate,SimEndDate]).month if Crop.HarvestDate==None: Crop = compute_crop_calander(Crop,ClockStruct,weather_df) mature = int(Crop.MaturityCD+30) plant = pd.to_datetime("1990/"+Crop.PlantingDate) harv = plant + np.timedelta64(mature,'D') new_harvest_date = str(harv.month)+'/'+str(harv.day) Crop.HarvestDate=new_harvest_date # check if crop growing season runs over calander year # Planting and harvest dates are in days/months format so just add arbitrary year singleYear = pd.to_datetime("1990/"+Crop.PlantingDate) < pd.to_datetime("1990/"+Crop.HarvestDate) if singleYear: # if normal year # specify the planting and harvest years as normal plant_years = list(range(start_end_years[0],start_end_years[1]+1)) harvest_years = plant_years else: # if it takes over a year then the plant year finishes 1 year before end of sim # and harvest year starts 1 year after sim start if pd.to_datetime(str(start_end_years[1]+2)+'/'+Crop.HarvestDate) < SimEndDate: # specify shifted planting and harvest years plant_years = list(range(start_end_years[0],start_end_years[1]+1)) harvest_years = list(range(start_end_years[0]+1,start_end_years[1]+2)) else: plant_years = list(range(start_end_years[0],start_end_years[1])) harvest_years = list(range(start_end_years[0]+1,start_end_years[1]+1)) # Correct for partial first growing season (may occur when simulating # off-season soil water balance) if pd.to_datetime(str(plant_years[0])+"/"+Crop.PlantingDate) < ClockStruct.SimulationStartDate: # shift everything by 1 year plant_years = plant_years[1:] harvest_years = harvest_years[1:] # ensure number of planting and harvest years are the same assert len(plant_years) == len(harvest_years) # create lists to hold variables PlantingDates = [] HarvestDates = [] CropChoices = [] # save full harvest/planting dates and crop choices to lists for i in range(len(plant_years)): PlantingDates.append(str(plant_years[i]) + "/" + ParamStruct.CropList[0].PlantingDate ) HarvestDates.append(str(harvest_years[i]) + "/" + ParamStruct.CropList[0].HarvestDate ) CropChoices.append( ParamStruct.CropList[0].Name ) # save crop choices ParamStruct.CropChoices = list(CropChoices) # save clock paramaters ClockStruct.PlantingDates = pd.to_datetime(PlantingDates) ClockStruct.HarvestDates = pd.to_datetime(HarvestDates) ClockStruct.nSeasons = len(PlantingDates) # Initialise growing season counter if pd.to_datetime(ClockStruct.StepStartTime) == ClockStruct.PlantingDates[0]: ClockStruct.SeasonCounter = 0 else: ClockStruct.SeasonCounter = -1 # return the FileLocations object as i have added some elements return ClockStruct, ParamStruct
[ "def init_coupled_parameters(self):\n params=NamedObjects(scenario=self,cast_value=cast_to_parameter)\n # All of the current known options:\n # params['Tau']=1\n # params['TauFlow']=1\n # params['Velocity']=1\n if self.model.mdu.get_bool('physics','Salinity'):\n params['salinity']=1 \n if self.model.mdu.get_bool('physics','Temperature'):\n params['temp']=1 \n params['vwind']=1\n #params['winddir']=1\n #params['rain']=1\n return params", "def save_calibration_parameters(self):\n if self.plant_db.tmp_dir is None:\n directory = self.dir\n else:\n directory = self.plant_db.tmp_dir\n with open(directory + self.parameters_file, 'w') as oututfile:\n json.dump(self.calibration_params, oututfile)", "def parameters_updated(self):\n self.calculate_variables()\n termination = self.detect_termination()\n if termination is None:\n self.request_estimation()\n self.monitor_progress()\n else:\n self.callback.plp_terminated(termination)", "def century_params_to_new_model_params(\n pft_param_path, animal_param_path, site_param_path):\n CENTURY_DIR = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Century46_PC_Jan-2014\"\n TEMPLATE_HIST = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0_hist.sch\"\n TEMPLATE_SCH = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0.sch\"\n TEMPLATE_100 = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0.100\"\n new_model_args = {\n 'template_level': 'GLP',\n 'fix_file': \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/Forage_model/model_inputs/Laikipia_RPM/Century_inputs/drytrpfi.100\",\n 'grass_type': 'C4',\n 'herbivore_csv': \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/Forage_model/model_inputs/herd_avg_uncalibrated.csv\"\n }\n # parameter table containing only necessary parameters\n parameter_table = pandas.read_csv(\n \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/GK_doc/Century_parameter_table.csv\")\n parameters_to_keep = parameter_table['Century parameter name'].tolist()\n crop_params = os.path.join(CENTURY_DIR, 'crop.100')\n\n # get crop from TEMPLATE_HIST and TEMPLATE_SCH\n first_month = set()\n senescence_month = set()\n last_month = set()\n crop_list = set()\n with open(TEMPLATE_HIST, 'r') as hist_sch:\n for line in hist_sch:\n if ' CROP' in line:\n crop_line = next(hist_sch)\n crop_list.add(crop_line[:10].strip())\n if ' FRST' in line:\n first_month.add(line[7:10].strip())\n if ' SENM' in line:\n senescence_month.add(line[7:10].strip())\n if ' LAST' in line:\n last_month.add(line[7:10].strip())\n with open(TEMPLATE_SCH, 'r') as hist_sch:\n for line in hist_sch:\n if ' CROP' in line:\n crop_line = next(hist_sch)\n crop_list.add(crop_line[:10].strip())\n if ' FRST' in line:\n first_month.add(line[7:10].strip())\n if ' SENM' in line:\n senescence_month.add(line[7:10].strip())\n if ' LAST' in line:\n last_month.add(line[7:10].strip())\n # ensure that crop (e.g. GCD_G) is same between hist and extend schedule\n assert len(crop_list) == 1, \"We can only handle one PFT for old model\"\n # ensure that the file contains only one schedule to begin and end\n # growth\n assert len(first_month) == 1, \"More than one starting month found\"\n assert len(last_month) == 1, \"More than one ending month found\"\n PFT_label = list(crop_list)[0]\n\n # collect parameters from all Century sources\n master_param_dict = {}\n # get crop parameters from crop.100 file in CENTURY_DIR\n with open(crop_params, 'r') as cparam:\n for line in cparam:\n if line.startswith('{} '.format(PFT_label)):\n while 'MXDDHRV' not in line:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n line = next(cparam)\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get grazing effect parameters from graz.100 file\n graz_file = os.path.join(CENTURY_DIR, 'graz.100')\n with open(graz_file, 'r') as grazparams:\n for line in grazparams:\n if line.startswith(new_model_args['template_level']):\n line = next(grazparams)\n while 'FECLIG' not in line:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n line = next(grazparams)\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get site parameters from TEMPLATE_100\n with open(TEMPLATE_100, 'r') as siteparam:\n for line in siteparam:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get fixed parameters from new_model_args['fix_file']\n with open(new_model_args['fix_file'], 'r') as siteparam:\n for line in siteparam:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n\n def century_to_rp(century_label):\n \"\"\"Convert Century name to rangeland production name.\"\"\"\n rp = re.sub(r\"\\(\", \"_\", century_label)\n rp = re.sub(r\",\", \"_\", rp)\n rp = re.sub(r\"\\)\", \"\", rp)\n return rp\n\n # apportion parameters to PFT and site tables\n PFT_param_dict = {'PFT': 1}\n pft_params = parameter_table[\n parameter_table['Property of'] == 'PFT']['Century parameter name']\n for label in pft_params:\n PFT_param_dict[label] = master_param_dict[label]\n site_param_dict = {'site': 1}\n site_params = parameter_table[\n parameter_table['Property of'] == 'site']['Century parameter name']\n for label in site_params:\n site_param_dict[label] = master_param_dict[label]\n animal_param_dict = {'animal_id': 1}\n animal_params = parameter_table[\n parameter_table['Property of'] == 'animal']['Century parameter name']\n for label in animal_params:\n animal_param_dict[label] = master_param_dict[label]\n\n # add to grass csv to make PFT trait table\n PFT_param_dict['growth_months'] = (\n [','.join([str(m) for m in range(\n int(list(first_month)[0]), int(list(last_month)[0]) + 1)])])\n if senescence_month:\n PFT_param_dict['senescence_month'] = (\n ','.join([str(m) for m in list(senescence_month)]))\n if new_model_args['grass_type'] == 'C3':\n PFT_param_dict['species_factor'] = 0\n else:\n PFT_param_dict['species_factor'] = 0.16\n pft_df = pandas.DataFrame(PFT_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in pft_df.columns.values}\n pft_df.rename(index=int, columns=col_rename_dict, inplace=True)\n pft_df.to_csv(pft_param_path, index=False)\n # TODO: add new PFT parameters:\n # digestibility_slope\n # digestibility_intercept\n\n # add to herbivore csv to make animal parameter table\n animal_beta_df = pandas.read_csv(new_model_args['herbivore_csv'])\n animal_df = pandas.DataFrame(animal_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in animal_df.columns.values}\n animal_df.rename(index=int, columns=col_rename_dict, inplace=True)\n merged_animal_df = pandas.concat(\n [animal_beta_df, animal_df], axis=1, sort=False)\n merged_animal_df.to_csv(animal_param_path, index=False)\n # TODO: add parameter 'grzeff'\n\n # make site parameter table\n site_df = pandas.DataFrame(site_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in site_df.columns.values}\n site_df.rename(index=int, columns=col_rename_dict, inplace=True)\n site_df.to_csv(site_param_path, index=False)", "def save_params(self) -> None:\n self._lib.save_params(self._device_handle)", "def clear_future_param(self):\n self.future_param_dict = dict()\n self.future_phase_dict = dict()\n last_phase = list(self.phase_dict.items())[-1][0]\n self.phase_dict[last_phase][\"end_date\"] = None\n return self", "def requestNewParameters(self):\n res = self.getNewParameters(self.calibrationID, self.currentStep)\n if res['OK']:\n returnValue = res['Value']\n # FIXME calibrationRun state will be updated number of worker times while only one time is enough\n if returnValue is not None:\n self.currentPhase = returnValue['currentPhase']\n self.currentStage = returnValue['currentStage']\n self.currentStep = returnValue['currentStep']\n return res", "def _init_param(self): \n \n param={}\n param['y'] = self.x_oris if self.FixedOris else self.x \n param['u'] = self.t\n for s in ['y','u']: param['N'+s] = len(param[s])\n param['sigma_d'] = self.sigma_d\n param['sigma_0'] = self.sigma_0\n param['m_0'] = np.log10(self.I_0)\n param['ell_0'] = self.ell_0\n param['tau_0'] = self.tau_0\n param['d'] = self.data.ravel() # d vector Nxt\n self._param = param\n # we set non-computed attributes to None\n # that way, we know if these attributess are already computed or need to be computed\n keys = ['MAP','samples','samples_stats', 'I_MAP', 'logZ', \n '_theta_MAP','_H_MAP','_H_MAP_D','_H_MAP_R', '_thetas']\n for key in keys: setattr(self, key, None)", "def __handle_new_params(self):\n if self.__test_type == igf.TEST_TYPES.RANDOM:\n sec_param_text = \",\".join([\"L\" + \"=\" + str(self.__L),\n \"D\" + \"=\" + str(self.__D),\n \"K\" + \"=\" + str(self.__K)])\n else:\n sec_param_text = \",\".join([\"L\" + \"=\" + str(self.__L),\n \"D\" + \"=\" +\n str(self.__num_levels),\n \"K\" + \"=\" + str(self.__K)])\n # Only update the params if there have been changes to it:\n if sec_param_text != self.__latest_params:\n self.__latest_params = sec_param_text\n # find the security parameter id:\n self.__sec_param_id = self.__resultsdb.get_next_params_id()\n # write the security parameter to the results database:\n self.__resultsdb.add_row(\n t2s.PARAM_TABLENAME,\n {t2s.PARAM_PID: self.__sec_param_id,\n t2s.PARAM_TESTNAME: self.__test_name,\n t2s.PARAM_K: self.__K,\n t2s.PARAM_D: self.__D,\n t2s.PARAM_L: self.__L})\n # write the security parameter to a params file:\n sec_param_file_name = os.path.join(self.__params_dir_name,\n str(self.__sec_param_id)\n + \".keyparams\")\n sec_param_file = self.__fho.get_file_object(sec_param_file_name,\n 'w')\n sec_param_file.write(sec_param_text)\n self.__fho.close_file_object(sec_param_file)\n # write the params location to the test file:\n self.__test_file.write(\n \"\".join([\"KEY\\n\",\n self.__get_testfile_path(sec_param_file_name), \"\\n\"]))", "def fillParamDict(self, allParams=False):\n #print \"QID: fillParamDict() called.\"\n s = self.readSettings()\n #print \" settings structure read.\"\n if allParams:\n p = []\n for x in lib('enums', 'QCam_Param').keys():\n p.append(x)\n for x in lib('enums', 'QCam_ParamS32').keys():\n p.append(x)\n for x in lib('enums', 'QCam_Param64').keys():\n p.append(x)\n else:\n p = externalParams\n #for x in lib('enums', 'QCam_Param'):\n for x in p:\n if x == 'ringSize':\n self.paramAttrs[x] = [(2,100), True, True, []]\n continue\n x = self.translateToCamera(x)\n if x not in ['qprmS32AbsoluteOffset', 'qprmS32RegulatedCoolingTemp', 'qprm64Exposure']:\n try:\n if self.call(lib.GetParam, byref(s), getattr(lib, x))() == 0:\n try: ###first try to get a SparseTable\n table = (c_ulong * 32)()\n r = self.call(lib.GetParamSparseTable, byref(s), getattr(lib, x), table, c_long(32))\n self.paramAttrs[self.translateToUser(x)] = [list(r[2])[:r[3]], True, True, []]\n except QCamFunctionError as err: ###if sparse table doesn't work try getting a RangeTable\n if err.value == 1: \n min = self.call(lib.GetParamMin, byref(s), getattr(lib, x))[2]\n max = self.call(lib.GetParamMax, byref(s), getattr(lib, x))[2]\n self.paramAttrs[self.translateToUser(x)] = [(min, max), True, True, []]\n else: raise \n except QCamFunctionError as err:\n if err.value == 1: pass \n else: raise\n #for x in lib('enums', 'QCam_ParamS32'):\n elif x not in ['qprm64Exposure']:\n try:\n if self.call(lib.GetParamS32, byref(s), getattr(lib, x))() == 0:\n try:\n table = (c_long * 32)()\n r = self.call(lib.GetParamSparseTableS32, byref(s), getattr(lib, x), table, c_long(32))\n self.paramAttrs[self.translateToUser(x)] = [list(r[2])[:r[3]], True, True, []]\n except QCamFunctionError as err:\n if err.value == 1:\n min = self.call(lib.GetParamS32Min, byref(s), getattr(lib, x))[2]\n max = self.call(lib.GetParamS32Max, byref(s), getattr(lib, x))[2]\n self.paramAttrs[self.translateToUser(x)] = [(min, max), True, True, []]\n else: raise\n except QCamFunctionError as err:\n if err.value == 1: pass\n else: raise\n #for x in lib('enums', 'QCam_Param64'):\n elif x not in ['qprmS32AbsoluteOffset', 'qprmS32RegulatedCoolingTemp']:\n try:\n if self.call(lib.GetParam64, byref(s), getattr(lib, x))() == 0:\n try:\n table = (c_ulonglong * 32)()\n r = self.call(lib.GetParamSparseTable64, byref(s), getattr(lib, x), table, c_long(32))\n self.paramAttrs[self.translateToUser(x)] = [list(r[2])[:r[3]], True, True, []]\n except QCamFunctionError as err:\n if err.value == 1: ## qerrNotSupported\n min = self.call(lib.GetParam64Min, byref(s), getattr(lib, x))[2]\n max = self.call(lib.GetParam64Max, byref(s), getattr(lib, x))[2]\n self.paramAttrs[self.translateToUser(x)] = [(min, max), True, True, []]\n else: raise\n except QCamFunctionError as err:\n if err.value == 1: pass\n else: raise\n #print \" parameters are retrieved.\"\n #self.paramAttrs.pop('qprmExposure')\n #self.paramAttrs.pop('qprmOffset')\n ### Replace qcam enum numbers with qcam strings\n #for x in self.paramAttrs: \n # if type(self.paramAttrs[x]) == type([]):\n # if x in self.paramEnums: ## x is the name of the parameter\n # #print \"Param: \", x, self.paramAttrs[x]\n # for i in range(len(self.paramAttrs[x])): ## i is the index\n # a = self.paramAttrs[x][i] ## a is the value\n # for b in lib('enums', self.paramEnums[x]): # b is the name of the parameter option\n # if lib('enums', self.paramEnums[x])[b] == a:\n # self.paramAttrs[x][i] = self.translateToUser(b)\n for x in self.paramAttrs:\n if type(self.paramAttrs[x][0]) != tuple:\n self.paramAttrs[x][0] = self.getNameFromEnum(x, self.paramAttrs[x][0])\n else:\n self.paramAttrs[x][0] = self.convertUnitsToAcq4(x, self.paramAttrs[x][0])\n \n ## rearrange trigger names\n trigNames = ['Normal', 'Strobe', 'Bulb']\n for n in self.paramAttrs['triggerMode'][0]:\n if n not in trigNames:\n trigNames.append(n)\n self.paramAttrs['triggerMode'][0] = trigNames\n \n \n \n return self.paramAttrs", "def parameter_refresh(self):\n\n self.saft_source.parameter_refresh(self.bead_library, self.cross_library)\n\n # Update Association site matrices\n if self.eos_dict[\"flag_assoc\"]:\n assoc_output = Aassoc.calc_assoc_matrices(\n self.beads,\n self.bead_library,\n self.eos_dict[\"molecular_composition\"],\n sitenames=self.eos_dict[\"sitenames\"],\n cross_library=self.cross_library,\n nk=self.eos_dict[\"nk\"],\n )\n self.eos_dict.update(assoc_output)", "def saveParameters(self):\n\n name = 'Hyteresis_Measurement_Parameters.txt'\n file = open(name, 'w') # Trying to create a new file or open one\n file.write(\"Voltage: {} V\\n\".format(str(Parameters['Voltage'])))\n file.write(\"Loops: {} \\n\".format(str(LoopParams['Loops'])))\n file.write(\"Measurementpoints: {} \\n\".format(\n str(LoopParams['MeasurementPoints'])))\n file.write(\"Set Fluenz: {} \\n\".format(\n str(MeasParams['Fluence'])))\n file.write(\"TimeZero: {} \\n\".format(\n str(MeasParams['timeZero'])))\n file.write(\"Pump-Angle: {} \\n\".format(\n str(MeasParams['angle'])))\n file.write(\"Samplename: {} \\n\".format(\n str(MeasParams['sampleName'])))\n\n if not self.Stage_ReadFromFile:\n file.write(\"StartPoint: {} ps\\n\".format(\n str(StageParams_ps['StartPoint'])))\n file.write(\"End Point: {} ps\\n\".format(\n str(StageParams_ps['EndPoint'])))\n file.write(\"Stepwidth: {} ps\\n\".format(\n str(StageParams_ps['StepWidth'])))\n file.write(\"Stage Velocity: {} \\n\".format(\n str(Stage_SpeedParams['Velocity'])))\n file.write(\"Stage Acceleration: {} \\n\".format(\n str(Stage_SpeedParams['Acceleration'])))\n\n if self.Stage_ReadFromFile:\n file.write(\"Start \\t Stop \\t Stepwidth ps\\n\")\n for idx, val in enumerate(self.saveVector):\n entry = ' '.join(str(e) for e in self.saveVector[idx])\n file.write(\"{}\\n\".format(entry))\n\n if self.Hysteresis_Check.isChecked():\n file.write(\"StartPoint: {} ps\\n\".format(\n str(HysteresisParameters['Stepwidth'])))\n file.write(\"Amplitude: {} ps\\n\".format(\n str(HysteresisParameters['Amplitude'])))\n file.write(\"@StageDelay\")\n for idx, val in enumerate(self.hystDelayVector_ps):\n entry = ' '.join(str(val))\n file.write(\"{}\\n\".format(entry))\n\n file.close()", "def save(self):\n LOGGER.info('saving parameters: {} ...'.format(self._param_file))\n np.save(self._param_file, self._parameters, allow_pickle=True, fix_imports=True)", "def __init__(self):\n\n VT = ValueTypes()\n self.fork_params = {'camera_offset': Param(0.06, VT.number, 'Your camera offset to use in lane_planner.py', live=True),\n 'dynamic_follow': Param('auto', str, 'Can be: (\\'traffic\\', \\'relaxed\\', \\'roadtrip\\'): Left to right increases in following distance.\\n'\n 'All profiles support dynamic follow so you\\'ll get your preferred distance while\\n'\n 'retaining the smoothness and safety of dynamic follow!'),\n 'global_df_mod': Param(1.0, VT.number, 'The multiplier for the current distance used by dynamic follow. The range is limited from 0.85 to 2.5\\n'\n 'Smaller values will get you closer, larger will get you farther\\n'\n 'This is multiplied by any profile that\\'s active. Set to 1. to disable', live=True),\n 'min_TR': Param(0.9, VT.number, 'The minimum allowed following distance in seconds. Default is 0.9 seconds.\\n'\n 'The range is limited from 0.85 to 1.6.', live=True),\n 'alca_nudge_required': Param(True, bool, 'Whether to wait for applied torque to the wheel (nudge) before making lane changes. '\n 'If False, lane change will occur IMMEDIATELY after signaling'),\n 'alca_min_speed': Param(25.0, VT.number, 'The minimum speed allowed for an automatic lane change (in MPH)'),\n 'steer_ratio': Param(None, VT.none_or_number, '(Can be: None, or a float) If you enter None, openpilot will use the learned sR.\\n'\n 'If you use a float/int, openpilot will use that steer ratio instead', live=True),\n 'lane_speed_alerts': Param('silent', str, 'Can be: (\\'off\\', \\'silent\\', \\'audible\\')\\n'\n 'Whether you want openpilot to alert you of faster-traveling adjacent lanes'),\n 'upload_on_hotspot': Param(False, bool, 'If False, openpilot will not upload driving data while connected to your phone\\'s hotspot'),\n 'enable_long_derivative': Param(False, bool, 'If you have longitudinal overshooting, enable this! This enables derivative-based\\n'\n 'integral wind-down to help reduce overshooting within the long PID loop'),\n 'disengage_on_gas': Param(False, bool, 'Whether you want openpilot to disengage on gas input or not'),\n 'update_behavior': Param('auto', str, 'Can be: (\\'off\\', \\'alert\\', \\'auto\\') without quotes\\n'\n 'off will never update, alert shows an alert on-screen\\n'\n 'auto will reboot the device when an update is seen'),\n 'dynamic_gas': Param(True, bool, 'Whether to use dynamic gas if your car is supported'),\n 'hide_auto_df_alerts': Param(False, bool, 'Hides the alert that shows what profile the model has chosen'),\n 'log_auto_df': Param(False, bool, 'Logs dynamic follow data for auto-df'),\n 'dynamic_camera_offset': Param(False, bool, 'Whether to automatically keep away from oncoming traffic.\\n'\n 'Works from 35 to ~60 mph (requires radar)'),\n 'dynamic_camera_offset_time': Param(3.5, VT.number, 'How long to keep away from oncoming traffic in seconds after losing lead'),\n 'support_white_panda': Param(False, bool, 'Enable this to allow engagement with the deprecated white panda.\\n'\n 'localizer might not work correctly'),\n 'slowdown_for_curves': Param(True, bool, 'Whether your car will slow down for curves using the old planner code from 0.5/0.6'),\n 'steer_fault_fix': Param(True, bool, live=True),\n\n 'prius_use_pid': Param(False, bool, 'This enables the PID lateral controller with new a experimental derivative tune\\nFalse: stock INDI, True: TSS2-tuned PID'),\n 'use_lqr': Param(False, bool, 'Enable this to use LQR as your lateral controller over default with any car'),\n 'corollaTSS2_use_indi': Param(False, bool, 'Enable this to use INDI for lat with your TSS2 Corolla'),\n 'rav4TSS2_use_indi': Param(False, bool, 'Enable this to use INDI for lat with your TSS2 RAV4'),\n 'standstill_hack': Param(False, bool, 'Some cars support stop and go, you just need to enable this')}\n\n self._params_file = '/data/op_params.json'\n self._backup_file = '/data/op_params_corrupt.json'\n self._last_read_time = sec_since_boot()\n self.read_frequency = 3 # max frequency to read with self.get(...) (sec)\n self._to_delete = ['steer_rate_fix'] # a list of unused params you want to delete\n self._run_init() # restores, reads, and updates params", "def save_calibration_data_to_env(self):\n self.json_calibration_data = self.calibration_params\n self.cparams.parameters = self.calibration_params\n self.cparams.save_to_env_var('calibration')", "def _params_validate_and_generate(self) -> None:\n # default params\n if \"d\" not in self.params:\n self.params[\"d\"] = 3\n\n # calculated params\n self.params[\"T\"] = -1 # -1 until a stabilizer round is added!\n self.params[\"num_readout\"] = -1 # -1 until a logical readout is performed!\n self.params[\n \"num_lattice_readout\"\n ] = -1 # -1 until a lattice readout is performed!\n self.params[\"num_data\"] = self.params[\"d\"]\n self.params[\"num_syn\"] = self.params[\"d\"] - 1", "def reset_params(self):\n pass", "def save_params(self):\n self.autoencoder.save_parameters('/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/encoder')", "def aggregate_params_and_data(yaml_fp):\r\n\r\n config = param_parser.load(yaml_fp, validate=False)\r\n\r\n # -------------Get data/params from get_data/params ----------------\r\n\r\n # handling of legacy param names, formatted as:\r\n # [old name which is still supported, new name]\r\n legacy_conversions = tuple([\r\n ['sd_date', 'c_reduction_date'],\r\n ['DATA_FOLDER', 'data_folder'],\r\n ['CITY', 'city'],\r\n ])\r\n for conversion in legacy_conversions:\r\n old_name = conversion[0]\r\n new_name = conversion[1]\r\n if new_name not in config:\r\n assert old_name in config, \"config YAML has no field \" + \\\r\n \"`{}` (formerly known as `{}`)\".format(new_name, old_name)\r\n config[new_name] = config[old_name]\r\n\r\n # get demographics, school calendar, and transmission data from Excel files\r\n AgeGroupDict, metro_pop, school_calendar, \\\r\n time_begin, FallStartDate, Phi, symp_h_ratio_overall, \\\r\n symp_h_ratio, hosp_f_ratio = SEIR_get_data(config=config)\r\n\r\n config.update({\r\n \"AgeGroupDict\": AgeGroupDict,\r\n 'metro_pop': metro_pop,\r\n 'school_calendar': school_calendar,\r\n 'time_begin': time_begin,\r\n 'FallStartDate': FallStartDate,\r\n 'phi': Phi,\r\n #initial_state': config['initial_state'],\r\n 'initial_i': config['I0'],\r\n 'symp_h_ratio_overall': symp_h_ratio_overall,\r\n 'symp_h_ratio': symp_h_ratio,\r\n 'hosp_f_ratio': hosp_f_ratio\r\n })\r\n\r\n # -------------Get initial state of model --------------------------\r\n ## -- get initial state of compartments\r\n # todo: SEIR model should take a new arg \"init_type\" that explicitly states whether to initialize every compartment or just infected\r\n # todo: currently the type of initialization is inferred from the instance type of \"initial_i\" -- that is sure to break at some point\r\n init_state = InitialModelState(config['total_time'], config['interval_per_day'], config['n_age'], config['n_risk'],\r\n config['I0'], metro_pop)\r\n compartments = init_state.initialize()\r\n # todo: more graceful and transparent override of user config specified start date\r\n # todo: perhaps in param_parser we can check that time_begin_sim is None if a I0 is a file path\r\n if init_state.start_day:\r\n print('Start date as specified in the config file is overridden by initialization from a deterministic solution.')\r\n print('The new start date is {}'.format(init_state.start_day))\r\n date_begin = init_state.start_day\r\n config['time_begin_sim'] = datetime.strftime(date_begin, '%Y%m%d') # return datetime to its expected string format\r\n # todo: we should re-save this config to reflect the updated start time\r\n\r\n # ------------- Update config with revised initial conditions -------\r\n config['initial_state'] = compartments\r\n config['t_offset'] = init_state.offset\r\n\r\n return config" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to compute additional parameters needed to define crop phenological calendar
def compute_crop_calander(crop,ClockStruct,weather_df): if len(ClockStruct.PlantingDates)==0: plant_year = pd.DatetimeIndex([ClockStruct.SimulationStartDate]).year[0] if pd.to_datetime(str(plant_year)+"/"+crop.PlantingDate) < ClockStruct.SimulationStartDate: pl_date = str(plant_year+1) + '/' + crop.PlantingDate else: pl_date = str(plant_year) + '/' + crop.PlantingDate else: pl_date=ClockStruct.PlantingDates[0] #Define crop calendar mode Mode = crop.CalendarType #Calculate variables %% if Mode == 1: # Growth in calendar days # Time from sowing to end of vegatative growth period if crop.Determinant == 1: crop.CanopyDevEnd = round(crop.HIstart+(crop.Flowering/2)) else: crop.CanopyDevEnd = crop.Senescence # Time from sowing to 10% canopy cover (non-stressed conditions) crop.Canopy10Pct = round(crop.Emergence+(np.log(0.1/crop.CC0)/crop.CGC)) # Time from sowing to maximum canopy cover (non-stressed conditions) crop.MaxCanopy = round( crop.Emergence +( np.log( (0.25*crop.CCx*crop.CCx/crop.CC0) /(crop.CCx-(0.98*crop.CCx)) ) /crop.CGC ) ) # Time from sowing to end of yield formation crop.HIend = crop.HIstart+crop.YldForm # Duplicate calendar values (needed to minimise if statements when switching between GDD and CD runs) crop.EmergenceCD = crop.Emergence crop.Canopy10PctCD = crop.Canopy10Pct crop.MaxRootingCD = crop.MaxRooting crop.SenescenceCD = crop.Senescence crop.MaturityCD = crop.Maturity crop.MaxCanopyCD = crop.MaxCanopy crop.CanopyDevEndCD = crop.CanopyDevEnd crop.HIstartCD = crop.HIstart crop.HIendCD = crop.HIend crop.YldFormCD = crop.YldForm if crop.CropType == 3: crop.FloweringEnd = crop.HIstart+crop.Flowering crop.FloweringEndCD = crop.FloweringEnd crop.FloweringCD = crop.Flowering else: crop.FloweringEnd = -999 crop.FloweringEndCD = -999 crop.FloweringCD = -999 # Check if converting crop calendar to GDD mode if crop.SwitchGDD == 1: # # Extract weather data for first growing season that crop is planted # for i,n in enumerate(ParamStruct.CropChoices): # if n == crop.Name: # idx = i # break # else: # idx = -1 # assert idx > -1 date_range = pd.date_range(pl_date,ClockStruct.TimeSpan[-1]) wdf = weather_df.copy(); wdf.index = wdf.Date wdf = wdf.loc[date_range] Tmin = wdf.MinTemp Tmax = wdf.MaxTemp # Calculate GDD's if crop.GDDmethod == 1: Tmean = (Tmax+Tmin)/2 Tmean = Tmean.clip(lower=crop.Tbase,upper=crop.Tupp) GDD = Tmean-crop.Tbase elif crop.GDDmethod == 2: Tmax = Tmax.clip(lower=crop.Tbase,upper=crop.Tupp) Tmin = Tmin.clip(lower=crop.Tbase,upper=crop.Tupp) Tmean = (Tmax+Tmin)/2 GDD = Tmean-crop.Tbase elif crop.GDDmethod == 3: Tmax = Tmax.clip(lower=crop.Tbase,upper=crop.Tupp) Tmin = Tmin.clip(upper=crop.Tupp) Tmean = (Tmax+Tmin)/2 Tmean = Tmean.clip(lower=crop.Tbase) GDD = Tmean-crop.Tbase GDDcum = np.cumsum(GDD) # Find GDD equivalent for each crop calendar variable # 1. GDD's from sowing to emergence crop.Emergence = GDDcum.iloc[int(crop.EmergenceCD)] # 2. GDD's from sowing to 10# canopy cover crop.Canopy10Pct = GDDcum.iloc[int(crop.Canopy10PctCD)] # 3. GDD's from sowing to maximum rooting crop.MaxRooting = GDDcum.iloc[int(crop.MaxRootingCD)] # 4. GDD's from sowing to maximum canopy cover crop.MaxCanopy = GDDcum.iloc[int(crop.MaxCanopyCD)] # 5. GDD's from sowing to end of vegetative growth crop.CanopyDevEnd = GDDcum.iloc[int(crop.CanopyDevEndCD)] # 6. GDD's from sowing to senescence crop.Senescence = GDDcum.iloc[int(crop.SenescenceCD)] # 7. GDD's from sowing to maturity crop.Maturity = GDDcum.iloc[int(crop.MaturityCD)] # 8. GDD's from sowing to start of yield formation crop.HIstart = GDDcum.iloc[int(crop.HIstartCD)] # 9. GDD's from sowing to start of yield formation crop.HIend = GDDcum.iloc[int(crop.HIendCD)] # 10. Duration of yield formation (GDD's) crop.YldForm = crop.HIend-crop.HIstart # 11. Duration of flowering (GDD's) - (fruit/grain crops only) if crop.CropType == 3: # GDD's from sowing to end of flowering crop.FloweringEnd = GDDcum.iloc[int(crop.FloweringEndCD)] # Duration of flowering (GDD's) crop.Flowering = crop.FloweringEnd-crop.HIstart # Convert CGC to GDD mode crop.CGC_CD = crop.CGC crop.CGC = (np.log((((0.98*crop.CCx)-crop.CCx)*crop.CC0) /(-0.25*(crop.CCx**2))) )/(-(crop.MaxCanopy-crop.Emergence)) # Convert CDC to GDD mode crop.CDC_CD = crop.CDC tCD = crop.MaturityCD-crop.SenescenceCD if tCD <= 0: tCD = 1 CCi = crop.CCx*(1-0.05*(np.exp((crop.CDC_CD/crop.CCx)*tCD)-1)) if CCi < 0: CCi = 0 tGDD = crop.Maturity-crop.Senescence if tGDD <= 0: tGDD = 5 crop.CDC = (crop.CCx/tGDD)*np.log(1+((1-CCi/crop.CCx)/0.05)) # Set calendar type to GDD mode crop.CalendarType = 2 elif Mode ==2: # Growth in growing degree days # Time from sowing to end of vegatative growth period if crop.Determinant == 1: crop.CanopyDevEnd = round(crop.HIstart+(crop.Flowering/2)) else: crop.CanopyDevEnd = crop.Senescence # Time from sowing to 10# canopy cover (non-stressed conditions) crop.Canopy10Pct = round(crop.Emergence+(np.log(0.1/crop.CC0)/crop.CGC)) # Time from sowing to maximum canopy cover (non-stressed conditions) crop.MaxCanopy = round(crop.Emergence+(np.log((0.25*crop.CCx*crop.CCx/crop.CC0) /(crop.CCx-(0.98*crop.CCx)))/crop.CGC)) # Time from sowing to end of yield formation crop.HIend = crop.HIstart+crop.YldForm # Time from sowing to end of flowering (if fruit/grain crop) if crop.CropType == 3: crop.FloweringEnd = crop.HIstart+crop.Flowering # Extract weather data for first growing season that crop is planted # for i,n in enumerate(ParamStruct.CropChoices): # if n == crop.Name: # idx = i # break # else: # idx = -1 # assert idx> -1 date_range = pd.date_range(pl_date,ClockStruct.TimeSpan[-1]) wdf = weather_df.copy(); wdf.index = wdf.Date wdf = wdf.loc[date_range] Tmin = wdf.MinTemp Tmax = wdf.MaxTemp # Calculate GDD's if crop.GDDmethod == 1: Tmean = (Tmax+Tmin)/2 Tmean = Tmean.clip(lower=crop.Tbase,upper=crop.Tupp) GDD = Tmean-crop.Tbase elif crop.GDDmethod == 2: Tmax = Tmax.clip(lower=crop.Tbase,upper=crop.Tupp) Tmin = Tmin.clip(lower=crop.Tbase,upper=crop.Tupp) Tmean = (Tmax+Tmin)/2 GDD = Tmean-crop.Tbase elif crop.GDDmethod == 3: Tmax = Tmax.clip(lower=crop.Tbase,upper=crop.Tupp) Tmin = Tmin.clip(upper=crop.Tupp) Tmean = (Tmax+Tmin)/2 Tmean = Tmean.clip(lower=crop.Tbase) GDD = Tmean-crop.Tbase GDDcum = np.cumsum(GDD).reset_index(drop=True) assert GDDcum.values[-1] > crop.Maturity, f"not enough growing degree days in simulation ({GDDcum.values[-1]}) to reach maturity ({crop.Maturity})" crop.MaturityCD = (GDDcum>crop.Maturity).idxmax()+1 assert crop.MaturityCD < 365, "crop will take longer than 1 year to mature" # 1. GDD's from sowing to maximum canopy cover crop.MaxCanopyCD = (GDDcum>crop.MaxCanopy).idxmax()+1 # 2. GDD's from sowing to end of vegetative growth crop.CanopyDevEndCD = (GDDcum>crop.CanopyDevEnd).idxmax()+1 # 3. Calendar days from sowing to start of yield formation crop.HIstartCD = (GDDcum>crop.HIstart).idxmax()+1 # 4. Calendar days from sowing to end of yield formation crop.HIendCD = (GDDcum>crop.HIend).idxmax()+1 # 5. Duration of yield formation in calendar days crop.YldFormCD = crop.HIendCD-crop.HIstartCD if crop.CropType == 3: # 1. Calendar days from sowing to end of flowering FloweringEnd = (GDDcum>crop.FloweringEnd).idxmax()+1 # 2. Duration of flowering in calendar days crop.FloweringCD = FloweringEnd-crop.HIstartCD else: crop.FloweringCD = -999 return crop
[ "def process_epidemic_parameters(self):", "def update_roi_params(self,roi_pnts,**kwargs):\n self.cal_factor = kwargs.get('cal_factor',0.0212) \n \n #width values\n self.width_value = self.configs.get('width_default',150) #current value for filament width\n\n \n #roi\n self.roi_x1 ,self.roi_y1,self.roi_x2,self.roi_y2 = roi_pnts\n self.roi_pnt1 = (self.roi_x1,self.roi_y1)\n self.roi_pnt2 = (self.roi_x2,self.roi_y2)\n self.roi_height = self.roi_y2 - self.roi_y1\n self.roi_width = self.roi_x2 - self.roi_x1\n \n self.w_roi_middle = int((self.roi_width)/2) #middle of roi rectangle, approx middle of filament\n self.w_roi_middle_left = int(self.w_roi_middle/2) # middle of left half\n self.w_roi_middle_right = int(self.w_roi_middle + self.w_roi_middle/2) #middle of right half\n\n self.w_vline_left_roi = self.w_vline_left_border #x-position of vline in roi\n self.w_vline_right_roi = self.roi_width - self.w_vline_right_border #x-position of vline right \n \n self.width_left_pnt = self.w_vline_left_roi #min point - left edge\n self.width_right_pnt = self.w_vline_right_roi #max point - right edge\n \n# print(self.w_vline_left_roi,self.w_detection_half_width)\n #assume left and right edge are in these sub-roi areas\n self.left_left = self.w_vline_left_roi - self.w_detection_half_width\n self.left_right = self.w_vline_left_roi + self.w_detection_half_width\n roi_left = self.roi[:,self.left_left:self.left_right]\n \n self.right_left = self.w_vline_right_roi - self.w_detection_half_width\n self.right_right = self.w_vline_right_roi + self.w_detection_half_width\n roi_right = self.roi[:,self.right_left : self.right_right]\n \n self.w_vline_left_roix = roi_left.shape[1] - self.w_detection_half_width\n self.w_vline_right_roix = self.w_detection_half_width\n \n self.w_zoom_left = (roi_left.shape[1]*self.w_zf, roi_left.shape[0]*self.w_zf)\n self.w_zoom_right= (roi_right.shape[1]*self.w_zf, roi_right.shape[0]*self.w_zf)", "def get_pres(model, region):\n if INCLUDE_SHOCK: \n ind0=0\n else:\n ind0 = 8*2 # exclude first two days\n if model.lower()==\"nicam\":\n if region.lower()==\"twp\":\n p = xr.open_dataset(ap.TWP_NICAM_P)[\"ms_pres\"][ind0:]\n elif region.lower()==\"shl\":\n p = xr.open_dataset(ap.SHL_NICAM_P)[\"ms_pres\"][ind0:]\n elif region.lower()==\"nau\":\n p = xr.open_dataset(ap.NAU_NICAM_P)[\"ms_pres\"][ind0:]\n elif model.lower()==\"fv3\":\n if region.lower()==\"twp\":\n p = xr.open_dataset(ap.TWP_FV3_P)[\"pres\"][ind0:]\n elif region.lower()==\"shl\":\n p = xr.open_dataset(ap.SHL_FV3_P)[\"pres\"][ind0:]\n elif region.lower()==\"nau\":\n p = xr.open_dataset(ap.NAU_FV3_P)[\"pres\"][ind0:]\n elif model.lower()==\"icon\":\n if region.lower()==\"twp\":\n p = xr.open_dataset(ap.TWP_ICON_P)[\"NEW\"][ind0:]\n elif region.lower()==\"shl\":\n p = xr.open_dataset(ap.SHL_ICON_P)[\"NEW\"][ind0:]\n elif region.lower()==\"nau\":\n p = xr.open_dataset(ap.NAU_ICON_P)[\"P\"][ind0:]\n else: assert Exception(\"region not valid, try SHL, NAU, or TWP\")\n elif model.lower()==\"sam\":\n if region.lower()==\"twp\":\n p = ((xr.open_dataset(ap.TWP_SAM_P)[\"p\"])*100)[ind0:,:]\n elif region.lower()==\"shl\":\n p = ((xr.open_dataset(ap.SHL_SAM_P)[\"p\"])*100)[ind0:,:]\n elif region.lower()==\"nau\":\n p = ((xr.open_dataset(ap.NAU_SAM_P)[\"p\"])*100)[ind0:,:]\n else:\n raise Exception(\"try valid region (SHL, NAU, TWP)\")\n else: raise Exception(\"invalide model: model = SAM, ICON, FV3, NICAM\")\n print(\"\\t returned pressure with shape\", p.shape)\n return p", "def setupDocVariables(self):\n # gloden mean\n ScCalendar.setupDocVariables(self)\n self.gmean = self.height - self.goldenMean(self.height) + self.margint\n # calendar size\n self.calHeight = self.height - self.gmean + self.margint\n # rows and cols\n self.rowSize = self.gmean / 8\n self.colSize = self.width / 7", "def _populate_wavecal(params):\n s = params['s']\n w = params['w']\n wave_cal = params['wavecal']\n spat_cal = params['spatcal']\n order_mask = params['order_mask']\n dlnw = params['dlnw']\n nw2 = params['nw2']\n plate_scale = params['pltscale']\n order_numbers = params['order_numbers']\n\n # start with order mask set to zero\n order_mask[:] = 0\n\n for i in range(1, params['norders'] + 1):\n if params['order_idx'] != -1:\n if i != params['order']:\n continue\n idx = params['order_idx']\n else:\n idx = i\n\n wnoi = params['wnoc'] + params['dw'] * (\n idx - (params['norders'] + 1) / 2)\n bottom = params['ob'][idx - 1]\n top = params['ot'][idx - 1]\n start = params['os'][idx - 1]\n stop = params['oe'][idx - 1]\n\n if params['order_idx'] == -1:\n in_range = ((s >= bottom) & (s <= top)\n & (w >= start) & (w <= stop))\n if np.sum(in_range) == 0: # pragma: no cover\n continue\n\n wave_cal[in_range] = wnoi * np.exp(\n dlnw * (w[in_range] - nw2 + 0.5))\n if params['crossdisp']:\n spat_cal[in_range] = (top - s[in_range]) * plate_scale\n else:\n spat_cal[in_range] = (s[in_range] - bottom) * plate_scale\n order_mask[in_range] = order_numbers[i - 1]\n else:\n in_range = (w >= start) & (w <= stop)\n if np.sum(in_range) == 0: # pragma: no cover\n continue\n params['wavemap'] = wnoi * np.exp(dlnw * (w[in_range] - nw2 + 0.5))", "def getSeasonalProbabilities(probability_collection, year, band_names, reduce_method='median', season_list = [['winter',-1,12,1,0,2,'end'],['spring',0,3,1,0,5,'end'],['summer',0,6,1,0,8,'end'],['fall',0,9,1,0,11,'end']], include_difference=True, year_difference=1, image_name='season_probs_{}'):\n season_changes = []\n year = int(year)\n for season_definition in season_list:\n season_name = season_definition[0]\n season_name = season_name.lower()\n \n season_start_year_position = season_definition[1]\n season_start_month = season_definition[2]\n season_start_day = season_definition[3]\n season_end_year_position = season_definition[4]\n season_end_month = season_definition[5]\n season_end_day = season_definition[6]\n \n season_start_year_firstYear = year+season_start_year_position\n season_end_year_firstYear = year+season_end_year_position\n \n if include_difference:\n season_start_year_secondYear = year+season_start_year_position+year_difference\n season_end_year_secondYear = year+season_end_year_position+year_difference\n \n if season_start_day == 'end':\n season_firstYear_start_day = calendar.monthrange(season_start_year_firstYear, int(season_start_month))[1]\n if include_difference:\n season_secondYear_start_day = calendar.monthrange(season_end_year_firstYear, int(season_start_month))[1]\n \n else:\n season_firstYear_start_day = season_start_day\n if include_difference:\n season_secondYear_start_day = season_start_day\n \n if season_end_day == 'end':\n season_firstYear_end_day = calendar.monthrange(season_end_year_firstYear, int(season_end_month))[1]\n if include_difference:\n season_secondYear_end_day = calendar.monthrange(season_start_year_secondYear, int(season_end_month))[1]\n \n else:\n season_firstYear_end_day = season_end_day\n if include_difference:\n season_secondYear_end_day = season_end_day\n \n season_firstYear_start = '{}-{}-{}'.format(season_start_year_firstYear, season_start_month, season_firstYear_start_day)\n season_firstYear_end = '{}-{}-{}'.format(season_end_year_firstYear, season_end_month, season_firstYear_end_day)\n \n if include_difference:\n season_secondYear_start = '{}-{}-{}'.format(season_start_year_secondYear, season_start_month, season_secondYear_start_day)\n season_secondYear_end = '{}-{}-{}'.format(season_end_year_secondYear, season_end_month, season_secondYear_end_day) \n \n if reduce_method=='mean':\n season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.mean()).rename(band_names)\n if include_difference:\n diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start, \n season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='mean').rename(band_names)\n else:\n season_image = probability_collection.filterDate(season_firstYear_start,season_firstYear_end).reduce(ee.Reducer.median()).rename(band_names)\n if include_difference:\n diff_image = getTemporalProbabilityDifference(probability_collection, season_firstYear_start, \n season_firstYear_end, season_secondYear_start, season_secondYear_end, reduce_method='median').rename(band_names)\n \n season_image = season_image.set('system:index','{}_start'.format(season_name))\n \n season_changes.append(season_image)\n \n if include_difference:\n diff_image = diff_image.set('system:index','{}_difference'.format(season_name))\n season_changes.append(diff_image) \n \n season_changes = ee.ImageCollection(season_changes) \n season_changes = season_changes.toBands()\n season_changes = season_changes.set('system:index',image_name.format(year))\n season_changes = season_changes.set('system:time_start',ee.Date(season_firstYear_start))\n season_changes = season_changes.set('system:time_end',ee.Date(season_firstYear_end))\n return season_changes", "def get_calibration_params(self):\r\n profile = self.pipeline.get_active_profile()\r\n\r\n color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))\r\n color_intrinsics = color_profile.get_intrinsics()\r\n\r\n depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.depth))\r\n depth_intrinsics = depth_profile.get_intrinsics()\r\n\r\n depth_to_color_extrinsics = depth_profile.get_extrinsics_to(color_profile)\r\n \r\n calibration_params = {'color intrinsics': color_intrinsics,\r\n 'depth intrinsics': depth_intrinsics,\r\n 'depth to color extrinsics': depth_to_color_extrinsics}\r\n\r\n calibration_data = {}\r\n for name, params in calibration_params.items():\r\n calibration_data[name] = {attr:str(getattr(params, attr)) if attr == 'model' else getattr(params, attr)\r\n for attr in dir(params) if not attr.startswith('__')}\r\n \r\n return calibration_data", "def compare_for_season( start_year = 1958,\n end_year = 1974,\n the_months = None,\n period_str = \"djf\"):\n #b, lons2d, lats2d = draw_regions.get_basemap_and_coords(llcrnrlat=40.0, llcrnrlon=-145, urcrnrlon=-10)\n b, lons2d, lats2d = draw_regions.get_basemap_and_coords()\n lons2d[lons2d > 180] -= 360\n x, y = b(lons2d, lats2d)\n\n\n cru = CRUDataManager()\n cru_data = cru.get_mean(start_year,end_year, months = the_months)\n cru_data_interp = cru.interpolate_data_to(cru_data, lons2d, lats2d)\n\n\n temp_levels = np.arange(-40, 40, 5)\n diff_levels = np.arange(-10, 12, 2)\n gs = gridspec.GridSpec(3,2)\n #plot_utils.apply_plot_params(width_pt=None, height_cm=20, width_cm=20, font_size=12)\n fig = plt.figure()\n coast_line_width = 0.25\n axes_list = []\n\n\n\n #plot CRU data\n ax = fig.add_subplot(gs[0,:])\n axes_list.append(ax)\n cru_data_interp = maskoceans(lons2d, lats2d, cru_data_interp)\n img = b.contourf(x, y, cru_data_interp, ax = ax, levels = temp_levels)\n ax.set_title(\"CRU\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n\n\n #era40 driven\n file_path = None\n era40_folder = \"data/CORDEX/na/era40_1\"\n file_prefix = \"dm\"\n for file_name in os.listdir(era40_folder):\n if period_str.upper() in file_name and file_name.startswith(file_prefix):\n file_path = os.path.join(era40_folder, file_name)\n break\n #get the temperature\n rpn_obj = RPN(file_path)\n t2m_era40 = rpn_obj.get_first_record_for_name_and_level(varname=\"TT\",\n level=1, level_kind=level_kinds.HYBRID)\n t2m_era40 = maskoceans(lons2d, lats2d, t2m_era40)\n ax = fig.add_subplot(gs[1,0])\n axes_list.append(ax)\n img = b.contourf(x, y, t2m_era40, ax = ax, levels = temp_levels)\n ax.set_title(\"ERA40 driven 1 (1958-1961)\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n rpn_obj.close()\n\n #era40 - cru\n ax = fig.add_subplot(gs[1,1])\n axes_list.append(ax)\n img = b.contourf(x, y, t2m_era40 - cru_data_interp, ax = ax, levels = diff_levels)\n ax.set_title(\"ERA40 driven 1 - CRU\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n\n\n plot_e2_data = False\n if plot_e2_data:\n ##get and plot E2 data\n file_path = None\n e2_folder = \"data/CORDEX/na/e2\"\n prefix = \"dm\"\n #get file path\n for file_name in os.listdir(e2_folder):\n if file_name.endswith(period_str) and file_name.startswith(prefix):\n file_path = os.path.join(e2_folder, file_name)\n break\n pass\n #get the temperature\n rpn_obj = RPN(file_path)\n t2m = rpn_obj.get_first_record_for_name_and_level(varname=\"TT\",\n level=1, level_kind=level_kinds.HYBRID)\n t2m = maskoceans(lons2d, lats2d, t2m)\n ax = fig.add_subplot(gs[2,0])\n axes_list.append(ax)\n img = b.contourf(x, y, t2m, ax = ax, levels = temp_levels)\n ax.set_title(\"E2, GCM driven\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n\n #e2 - cru\n ax = fig.add_subplot(gs[2,1])\n axes_list.append(ax)\n img = b.contourf(x, y, t2m - cru_data_interp, ax = ax, levels = diff_levels)\n ax.set_title(\"E2, GCM driven - CRU\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n\n\n ####Draw common elements\n pf_kinds = draw_regions.get_permafrost_mask(lons2d, lats2d)\n for the_ax in axes_list:\n b.drawcoastlines(ax = the_ax, linewidth = coast_line_width)\n b.contour(x, y, pf_kinds, ax = the_ax, colors = \"k\")\n\n\n\n gs.tight_layout(fig, h_pad = 5, w_pad = 5, pad=2)\n fig.suptitle(period_str.upper(), y = 0.03, x = 0.5)\n fig.savefig(\"temperature_validation_{0}.png\".format(period_str))\n\n\n\n\n\n fig = plt.figure()\n ax = plt.gca()\n img = b.contourf(x, y, t2m_era40 - cru_data_interp, ax = ax, levels = diff_levels)\n ax.set_title(\"ERA40 driven 1 - CRU\")\n plot_utils.draw_colorbar(fig, img, ax = ax)\n b.drawcoastlines(ax = ax, linewidth = coast_line_width)\n b.contour(x, y, pf_kinds, ax = ax, colors = \"k\")\n fig.savefig(\"temperature_diff_{0}.png\".format(period_str))\n\n pass", "def get_hyperparameters(year):", "def make_RA_window(window):\n\n from datetime import datetime\n\n window_before=window[0]*15 # hours to convert to degrees\n window_after=window[1]*15\n \n RA_center = datetime.now().timetuple().tm_yday /365 * 360 + 90 \n # Jan 1 has local sidereal time at midnight of ~6 hrs\n\n RA_lower=RA_center-window_before\n if RA_lower<0: RA_lower=RA_lower+360\n\n RA_upper=RA_center+window_after\n if RA_upper>360: RA_upper=RA_upper-360\n\n# print(\"****\",RA_lower, RA_upper, RA_center)\n return RA_lower, RA_upper", "def range_process( instrument, raw, max_range, constants\n ,rs_cal, rs_Cxx, corr_adjusts ,processing_defaults):\n\n\n \n\n assert(rs_Cxx is not None)\n rs = hau.Time_Z_Group(like=raw)\n\n if 0:\n import matplotlib.pylab as plt\n \n mol = np.nanmean(raw.molecular_counts,0)\n wfov = np.nanmean(raw.molecular_wfov_counts,0)\n bin_vec = 7.5 * np.arange(len(wfov))\n mol = mol - np.nanmean(mol[0:40])\n wfov = wfov - np.nanmean(wfov[0:40])\n mol *= (bin_vec-45*7.5)**2\n wfov *= (bin_vec-45*7.5)**2\n wfov *= np.exp(-2*bin_vec *1e-5)\n #wfov = wfov - bin_vec*wfov[900]/(900 *0.0001)\n wfov *= mol[900]/wfov[900]\n plt.figure(99999)\n plt.plot(bin_vec,wfov,'c',bin_vec,mol,'r')\n ax=plt.gca()\n ax.set_yscale('log')\n plt.grid(True)\n plt.show()\n print j\n #copy corrected raw into rs \n for field in ['transmitted_1064_energy','transmitted_energy','seeded_shots','molecular_counts'\n ,'combined_lo_counts','combined_hi_counts','cross_pol_counts',\n 'combined_wfov_counts','molecular_wfov_counts',\n 'molecular_i2a_counts','combined_1064_counts','telescope_pointing']:\n if hasattr(raw,field):\n setattr(rs,field,getattr(raw,field).copy())\n setattr(rs,'raw_'+field,getattr(raw,field).copy())\n \n # compute bin number of laser pulse\n [dark_interval_end_time, laser_pulse_time, cal_pulse_end_time] = \\\n constants['apd_pulse_timing']\n bin_duration = constants['binwidth']\n s_bin = int(laser_pulse_time / bin_duration) # laser pulse bin number\n #dark_interval_end_bin = int(dark_interval_end_time / bin_duration)- 1\n\n nalts = raw.molecular_counts.shape[1]\n\n #save geo corrected raw counts as 'var_xxx' in rs so that they get averaged without\n #other range processing for use in compute_photon_statistics. We also multiply\n #by the square of the geocorrection to account for the geocorrection in the\n #signal used compute_phothon_statistics()\n if processing_defaults.enabled('compute_stats'):\n ones_array = np.ones(raw.molecular_counts.shape)\n # bin 0 of geo_correction is defined as occurring at the laser pulse\n geocorr = ones_array.copy()\n geocorr[:,s_bin:] = rs_cal.geo.data[:nalts-s_bin, 1] * ones_array[:,s_bin:]\n \n for field in ('molecular_counts','combined_lo_counts'\n ,'combined_hi_counts','cross_pol_counts','combined_wfov_counts'\n ,'molecular_wfov_counts','molecular_i2a_counts','combined_1064_counts'):\n if hasattr(raw,field):\n setattr(rs,'var_raw_'+field,getattr(raw,field)*geocorr*geocorr) \n \n #counts arrays are the average number of counts in a data raw acquistion interval\n #of raw.times[2]-raw.times[1] while seeded_shots is the total number of laser pulses\n #the acquisition interval prior to preaveraging in preprocess_raw.py\n\n #note: this does not compensate for the pileup correction--in very high count areas this\n #will under estimate the varience because actual counts are multipled by a pileup correction\n #in the preprocess_raw.py routine \n\n #counts have been pileup corrected in preprocessing\n #do dark correction for all channels\n \n s_time =datetime.utcnow()\n dark_count_correction(instrument,raw,rs,rs_Cxx,corr_adjusts,processing_defaults,constants)\n print 'time for dark correction = ',datetime.utcnow() - s_time\n\n # gain correction for nadir pointing in airborne operation\n # this is expected to be a very small correction with little\n # impact on signal statitics\n if 'installation' in constants and constants['installation'] == 'airborne' \\\n and constants['nadir_comb_gain_adjustment'] != 1.0:\n print 'Apply nadir gain adjustment'\n print 'nadir gain adj= ', constants['nadir_comb_gain_adjustment']\n ix = np.arange(rs.telescope_pointing.shape[0])\n indices = ix[rs.telescope_pointing[:] < 0.1]\n nadir_gain_adj = constants['nadir_comb_gain_adjustment']\n rs.combined_lo_counts[indices, :] *= nadir_gain_adj\n rs.combined_hi_counts[indices, :] *= nadir_gain_adj\n \n #np.set_printoptions(threshold='nan')\n \n #do baseline correction\n rs = baseline_correction(rs,rs_cal,nalts,corr_adjusts,constants)\n \n # correct for differential geometry between 1064 and 532 nm channels\n rs = diff_1064_532_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(67)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n #do combined-molecular differential geo correction if available\n rs = diff_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(68)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b')\n ax=plt.gca()\n ax.set_xscale('log') \n \n # Matt Add: do cross polarization differential geometry correction\n rs = diff_cp_geometry_correction(rs,rs_cal,nalts,processing_defaults\n ,corr_adjusts)\n \n # do i2a differential geo correction if present and relavent to instrument\n if hasattr(rs,'molecular_i2a_counts') and corr_adjusts['i2a_dif_geo_corr'] > 0:\n rs = i2a_diff_geo_correction(rs,rs_cal,corr_adjusts)\n\n #create combined_counts from combined_hi and combined_lo profiles\n rs = merge_combined_hi_and_lo(rs,constants)\n if 0:\n import matplotlib.pylab as plt\n plt.figure(69)\n plt.plot(np.nanmean(rs.combined_hi_counts,0),np.arange(len(rs.combined_hi_counts[0,:])),'r'\n ,np.nanmean(rs.molecular_counts,0),np.arange(len(rs.molecular_counts[0,:])),'b'\n ,np.nanmean(rs.combined_lo_counts,0),np.arange(len(rs.combined_lo_counts[0,:])),'c'\n ,np.nanmean(rs.cross_pol_counts,0),np.arange(len(rs.cross_pol_counts[0,:])),'g'\n ,np.nanmean(rs.combined_counts,0),np.arange(len(rs.combined_counts[0,:])),'k')\n ax=plt.gca()\n ax.set_xscale('log')\n #plt.show()\n\n print 'cp/mol'\n \"\"\"\n if processing_defaults.enabled('wfov_geo_corr') and hasattr(rs,'molecular_wfov_counts'):\n #do geometry correction after adjusting geo_corr with wide-field-of-view data.\n geo_corr = rs_cal.geo.data[:4000,1]\n s_bin = np.int(constants['apd_pulse_timing'][1]/constants['binwidth'])\n wfov_ratios = np.zeros(rs.molecular_wfov_counts.shape[1])\n wfov_ratios[:-s_bin] = nanmean(rs.molecular_wfov_counts[:,s_bin:],0)\\\n / nanmean(rs.molecular_counts[:,s_bin:],0) \n wfov_geometry_correction(rs,wfov_ratios,geo_corr,processing_defaults,constants,corr_adjusts)\n \"\"\"\n #does wfov corr exist?\n if processing_defaults.enabled('wfov_corr') and hasattr(rs,'molecular_wfov_counts')\\\n and hasattr(rs_cal,'geo')\\\n and hasattr(rs_cal.geo,'wfov_mol_ratio'):\n \n \n #add pre-trigger bins to wfov_mol_ratio array provided in geofile_default_file\n #and add to structure for use in extinction processing\n calibration_wfov_mol_ratio = np.zeros(rs.molecular_counts.shape[1])\n calibration_wfov_mol_ratio[s_bin:] = \\\n rs_cal.geo.wfov_mol_ratio[:(rs.molecular_counts.shape[1]-s_bin)]\n rs.calibration_wfov_mol_ratio = hau.Z_Array(calibration_wfov_mol_ratio)\n \n # do the normal geometric correction on the following variables\n select = ['molecular_counts','combined_lo_counts','combined_hi_counts'\n ,'molecular_i2a_counts','combined_1064_counts','molecular_wfov_counts'\n ,'combined_counts','cross_pol_counts']\n rs = lu.geometry_correction(select,rs,rs_cal,nalts,s_bin,corr_adjusts['geo_corr'])\n \n #mask close range bin counts\n first_bin_to_process = processing_defaults.get_value('first_bin_to_process','bin_number')\n for field in ['combined_hi_counts','combined_lo_counts','combined_wfov_counts','molecular_wfov_counts'\n 'molecular_i2a_counts','molecular_counts','cross_pol_counts','combined_counts'\\\n 'combined_1064_counts']:\n if hasattr(rs,field):\n getattr(rs,field)[:, :(s_bin+first_bin_to_process)] = np.NaN\n \n return rs", "def set_calibration_input_params(self):\n self.cparams.parameters['blur'] = self.calibration_params['blur']\n self.cparams.parameters['morph'] = self.calibration_params['morph']\n self.cparams.parameters['H'] = self.calibration_params['H']\n self.cparams.parameters['S'] = self.calibration_params['S']\n self.cparams.parameters['V'] = self.calibration_params['V']", "def setupDocVariables(self):\n # gloden mean\n ScCalendar.setupDocVariables(self)\n self.gmean = self.width - self.goldenMean(self.width) + self.marginl\n # calendar size = gmean\n # rows and cols\n self.rowSize = self.height / 8", "def calibration_times(file, Npts): \r\n unit=parse_units(file) #determine measurement system (CGS or SI)\r\n\r\n string='PauseRvrsl' #Pause at reversal field (new file format, -1 if not available)\r\n tr0=parse_header(file,string)\r\n \r\n string='PauseNtl' #Pause at reversal field (old file format, -1 if not available)\r\n tr1=parse_header(file,string)\r\n\r\n tr=np.max((tr0,tr1)) #select Pause value depending on file format\r\n \r\n string='Averaging time' #Measurement averaging time \r\n tau=parse_header(file,string)\r\n\r\n string='PauseCal' #Pause at calibration point\r\n tcal=parse_header(file,string)\r\n\r\n string='PauseSat' #Pause at saturation field\r\n ts=parse_header(file,string)\r\n\r\n string='SlewRate' #Field slewrate\r\n alpha=parse_header(file,string)\r\n\r\n string='HSat' #Satuation field\r\n Hs=parse_header(file,string)\r\n\r\n string='Hb2' #upper Hb value for the FORC box\r\n Hb2=parse_header(file,string)\r\n\r\n string='Hb1' #lower Hb value for the FORC box\r\n Hb1=parse_header(file,string)\r\n\r\n string='Hc2' #upper Hc value for the FORC box (n.b. Hc1 is assumed to be 0)\r\n Hc2=parse_header(file,string)\r\n\r\n string='NForc' # Numer of measured FORCs (new file format, -1 if not available)\r\n N0=parse_header(file,string)\r\n\r\n string='NCrv' # Numer of measured FORCs (old file format, -1 if not available)\r\n N1=parse_header(file,string)\r\n\r\n N=np.max((N0,N1)) #select Number of FORCs depending on file format\r\n\r\n if unit=='Cgs':\r\n alpha=alpha/1E4 #convert from Oe to T\r\n Hs=Hs/1E4 #convert from Oe to T\r\n Hb2=Hb2/1E4 #convert from Oe to T\r\n Hb1=Hb1/1E4 #convert from Oe to T\r\n \r\n dH = (Hc2-Hb1+Hb2)/N #estimated field spacing\r\n \r\n #now following Elgi's estimate of the measurement time\r\n nc2 = Hc2/dH\r\n Dt1 = tr + tau + tcal + ts + 2.*(Hs-Hb2-dH)/alpha\r\n Dt2 = tr + tau + (Hc2-Hb2-dH)/alpha\r\n\r\n Npts=int(Npts)\r\n tcal_k=np.zeros(Npts)\r\n \r\n for k in range(1,Npts+1):\r\n if k<=1+nc2:\r\n tcal_k[k-1]=k*Dt1-Dt2+dH/alpha*k**2+(tau-dH/alpha)*(k-1)**2\r\n else:\r\n tcal_k[k-1]=k*Dt1-Dt2+dH/alpha*k**2+(tau-dH/alpha)*((k-1)*(1+nc2)-nc2)\r\n\r\n return tcal_k", "def make_crds_parameter_dict(self):\n\n parameters = {}\n parameters['INSTRUME'] = self.instrument.upper()\n parameters['DETECTOR'] = self.detector.upper()\n parameters['READPATT'] = self.read_pattern.upper()\n parameters['SUBARRAY'] = self.subarray.upper()\n parameters['DATE-OBS'] = datetime.date.today().isoformat()\n current_date = datetime.datetime.now()\n parameters['TIME-OBS'] = current_date.time().isoformat()\n\n return parameters", "def _date_params(self, start_year):\n end_date = self.config.data_date.shift(days=+1)\n params = {\n 'startDate': arrow.get(start_year, 1, 1).format('YYYY-MM-DD'),\n 'endDate': end_date.format('YYYY-MM-DD')\n }\n return params", "def century_params_to_new_model_params(\n pft_param_path, animal_param_path, site_param_path):\n CENTURY_DIR = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Century46_PC_Jan-2014\"\n TEMPLATE_HIST = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0_hist.sch\"\n TEMPLATE_SCH = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0.sch\"\n TEMPLATE_100 = \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/Kenya/input/regional_properties/Worldclim_precip/empty_2014_2015/0.100\"\n new_model_args = {\n 'template_level': 'GLP',\n 'fix_file': \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/Forage_model/model_inputs/Laikipia_RPM/Century_inputs/drytrpfi.100\",\n 'grass_type': 'C4',\n 'herbivore_csv': \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/Forage_model/model_inputs/herd_avg_uncalibrated.csv\"\n }\n # parameter table containing only necessary parameters\n parameter_table = pandas.read_csv(\n \"C:/Users/ginge/Dropbox/NatCap_backup/Forage_model/CENTURY4.6/GK_doc/Century_parameter_table.csv\")\n parameters_to_keep = parameter_table['Century parameter name'].tolist()\n crop_params = os.path.join(CENTURY_DIR, 'crop.100')\n\n # get crop from TEMPLATE_HIST and TEMPLATE_SCH\n first_month = set()\n senescence_month = set()\n last_month = set()\n crop_list = set()\n with open(TEMPLATE_HIST, 'r') as hist_sch:\n for line in hist_sch:\n if ' CROP' in line:\n crop_line = next(hist_sch)\n crop_list.add(crop_line[:10].strip())\n if ' FRST' in line:\n first_month.add(line[7:10].strip())\n if ' SENM' in line:\n senescence_month.add(line[7:10].strip())\n if ' LAST' in line:\n last_month.add(line[7:10].strip())\n with open(TEMPLATE_SCH, 'r') as hist_sch:\n for line in hist_sch:\n if ' CROP' in line:\n crop_line = next(hist_sch)\n crop_list.add(crop_line[:10].strip())\n if ' FRST' in line:\n first_month.add(line[7:10].strip())\n if ' SENM' in line:\n senescence_month.add(line[7:10].strip())\n if ' LAST' in line:\n last_month.add(line[7:10].strip())\n # ensure that crop (e.g. GCD_G) is same between hist and extend schedule\n assert len(crop_list) == 1, \"We can only handle one PFT for old model\"\n # ensure that the file contains only one schedule to begin and end\n # growth\n assert len(first_month) == 1, \"More than one starting month found\"\n assert len(last_month) == 1, \"More than one ending month found\"\n PFT_label = list(crop_list)[0]\n\n # collect parameters from all Century sources\n master_param_dict = {}\n # get crop parameters from crop.100 file in CENTURY_DIR\n with open(crop_params, 'r') as cparam:\n for line in cparam:\n if line.startswith('{} '.format(PFT_label)):\n while 'MXDDHRV' not in line:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n line = next(cparam)\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get grazing effect parameters from graz.100 file\n graz_file = os.path.join(CENTURY_DIR, 'graz.100')\n with open(graz_file, 'r') as grazparams:\n for line in grazparams:\n if line.startswith(new_model_args['template_level']):\n line = next(grazparams)\n while 'FECLIG' not in line:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n line = next(grazparams)\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get site parameters from TEMPLATE_100\n with open(TEMPLATE_100, 'r') as siteparam:\n for line in siteparam:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n # get fixed parameters from new_model_args['fix_file']\n with open(new_model_args['fix_file'], 'r') as siteparam:\n for line in siteparam:\n label = re.sub(r\"\\'\", \"\", line[13:].strip()).lower()\n if label in parameters_to_keep:\n value = float(line[:13].strip())\n master_param_dict[label] = value\n\n def century_to_rp(century_label):\n \"\"\"Convert Century name to rangeland production name.\"\"\"\n rp = re.sub(r\"\\(\", \"_\", century_label)\n rp = re.sub(r\",\", \"_\", rp)\n rp = re.sub(r\"\\)\", \"\", rp)\n return rp\n\n # apportion parameters to PFT and site tables\n PFT_param_dict = {'PFT': 1}\n pft_params = parameter_table[\n parameter_table['Property of'] == 'PFT']['Century parameter name']\n for label in pft_params:\n PFT_param_dict[label] = master_param_dict[label]\n site_param_dict = {'site': 1}\n site_params = parameter_table[\n parameter_table['Property of'] == 'site']['Century parameter name']\n for label in site_params:\n site_param_dict[label] = master_param_dict[label]\n animal_param_dict = {'animal_id': 1}\n animal_params = parameter_table[\n parameter_table['Property of'] == 'animal']['Century parameter name']\n for label in animal_params:\n animal_param_dict[label] = master_param_dict[label]\n\n # add to grass csv to make PFT trait table\n PFT_param_dict['growth_months'] = (\n [','.join([str(m) for m in range(\n int(list(first_month)[0]), int(list(last_month)[0]) + 1)])])\n if senescence_month:\n PFT_param_dict['senescence_month'] = (\n ','.join([str(m) for m in list(senescence_month)]))\n if new_model_args['grass_type'] == 'C3':\n PFT_param_dict['species_factor'] = 0\n else:\n PFT_param_dict['species_factor'] = 0.16\n pft_df = pandas.DataFrame(PFT_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in pft_df.columns.values}\n pft_df.rename(index=int, columns=col_rename_dict, inplace=True)\n pft_df.to_csv(pft_param_path, index=False)\n # TODO: add new PFT parameters:\n # digestibility_slope\n # digestibility_intercept\n\n # add to herbivore csv to make animal parameter table\n animal_beta_df = pandas.read_csv(new_model_args['herbivore_csv'])\n animal_df = pandas.DataFrame(animal_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in animal_df.columns.values}\n animal_df.rename(index=int, columns=col_rename_dict, inplace=True)\n merged_animal_df = pandas.concat(\n [animal_beta_df, animal_df], axis=1, sort=False)\n merged_animal_df.to_csv(animal_param_path, index=False)\n # TODO: add parameter 'grzeff'\n\n # make site parameter table\n site_df = pandas.DataFrame(site_param_dict, index=[0])\n col_rename_dict = {c: century_to_rp(c) for c in site_df.columns.values}\n site_df.rename(index=int, columns=col_rename_dict, inplace=True)\n site_df.to_csv(site_param_path, index=False)", "def read_model_parameters(ClockStruct,Soil,Crop,weather_df):\n # create ParamStruct object\n ParamStruct = ParamStructClass()\n\n Soil.fill_nan()\n\n # Assign Soil object to ParamStruct\n ParamStruct.Soil = Soil\n\n\n\n while Soil.zSoil < Crop.Zmax+0.1:\n for i in Soil.profile.index[::-1]:\n if Soil.profile.loc[i,\"dz\"] < 0.25:\n Soil.profile.loc[i,\"dz\"] += 0.1\n Soil.fill_nan()\n break\n\n ###########\n # crop\n ###########\n\n\n\n\n\n\n# if isinstance(Crop, Iterable):\n# CropList=list(Crop)\n# else:\n# CropList = [Crop]\n\n\n# # assign variables to paramstruct\n# ParamStruct.NCrops = len(CropList)\n# if ParamStruct.NCrops > 1:\n# ParamStruct.SpecifiedPlantCalander = 'Y'\n# else:\n# ParamStruct.SpecifiedPlantCalander = 'N'\n\n\n\n\n# # add crop list to ParamStruct\n# ParamStruct.CropList = CropList\n\n ############################\n # plant and harvest times\n ############################\n\n\n# # find planting and harvest dates\n# # check if there is more than 1 crop or multiple plant dates in sim year\n# if ParamStruct.SpecifiedPlantCalander == \"Y\":\n# # if here than crop rotation occours during same period\n\n# # create variables from dataframe\n# PlantingDates = pd.to_datetime(planting_dates)\n# HarvestDates = pd.to_datetime(harvest_dates)\n\n\n# if (ParamStruct.NCrops > 1):\n\n# CropChoices = [crop.Name for crop in ParamStruct.CropList]\n\n\n# assert len(CropChoices) == len(PlantingDates) == len(HarvestDates)\n\n\n\n\n #elif ParamStruct.NCrops == 1:\n # Only one crop type considered during simulation - i.e. no rotations\n # either within or between years\n\n CropList = [Crop]\n ParamStruct.CropList = CropList\n ParamStruct.NCrops=1\n\n # Get start and end years for full simulation\n SimStartDate = ClockStruct.SimulationStartDate\n SimEndDate = ClockStruct.SimulationEndDate\n\n # extract the years and months of these dates\n start_end_years = pd.DatetimeIndex([SimStartDate,SimEndDate]).year\n start_end_months = pd.DatetimeIndex([SimStartDate,SimEndDate]).month\n\n\n if Crop.HarvestDate==None:\n Crop = compute_crop_calander(Crop,ClockStruct,weather_df)\n mature = int(Crop.MaturityCD+30)\n plant = pd.to_datetime(\"1990/\"+Crop.PlantingDate)\n harv = plant + np.timedelta64(mature,'D')\n new_harvest_date = str(harv.month)+'/'+str(harv.day)\n Crop.HarvestDate=new_harvest_date\n\n\n\n # check if crop growing season runs over calander year\n # Planting and harvest dates are in days/months format so just add arbitrary year\n singleYear = pd.to_datetime(\"1990/\"+Crop.PlantingDate) < pd.to_datetime(\"1990/\"+Crop.HarvestDate)\n if singleYear:\n # if normal year\n\n # specify the planting and harvest years as normal\n plant_years = list(range(start_end_years[0],start_end_years[1]+1))\n harvest_years = plant_years\n else:\n # if it takes over a year then the plant year finishes 1 year before end of sim\n # and harvest year starts 1 year after sim start\n\n if pd.to_datetime(str(start_end_years[1]+2)+'/'+Crop.HarvestDate) < SimEndDate:\n\n # specify shifted planting and harvest years\n plant_years = list(range(start_end_years[0],start_end_years[1]+1))\n harvest_years = list(range(start_end_years[0]+1,start_end_years[1]+2))\n else:\n\n plant_years = list(range(start_end_years[0],start_end_years[1]))\n harvest_years = list(range(start_end_years[0]+1,start_end_years[1]+1))\n\n\n\n # Correct for partial first growing season (may occur when simulating\n # off-season soil water balance)\n if pd.to_datetime(str(plant_years[0])+\"/\"+Crop.PlantingDate) < ClockStruct.SimulationStartDate:\n # shift everything by 1 year\n plant_years = plant_years[1:]\n harvest_years = harvest_years[1:]\n\n\n\n # ensure number of planting and harvest years are the same\n assert len(plant_years) == len(harvest_years)\n\n # create lists to hold variables\n PlantingDates = []\n HarvestDates = []\n CropChoices = []\n\n # save full harvest/planting dates and crop choices to lists\n for i in range(len(plant_years)):\n PlantingDates.append(str(plant_years[i]) + \"/\" + ParamStruct.CropList[0].PlantingDate )\n HarvestDates.append(str(harvest_years[i]) + \"/\" + ParamStruct.CropList[0].HarvestDate )\n CropChoices.append( ParamStruct.CropList[0].Name )\n\n\n # save crop choices\n ParamStruct.CropChoices = list(CropChoices)\n\n\n # save clock paramaters\n ClockStruct.PlantingDates = pd.to_datetime(PlantingDates)\n ClockStruct.HarvestDates = pd.to_datetime(HarvestDates)\n ClockStruct.nSeasons = len(PlantingDates)\n\n # Initialise growing season counter\n if pd.to_datetime(ClockStruct.StepStartTime) == ClockStruct.PlantingDates[0]:\n ClockStruct.SeasonCounter = 0\n else:\n ClockStruct.SeasonCounter = -1\n\n # return the FileLocations object as i have added some elements\n return ClockStruct, ParamStruct", "def get_static_projection_prompt(time_bin):\n info = get_static_projection_info_prompt(time_bin) \n N_locations = info['N_locations']\n N_axial = info['N_axial']\n N_azimuthal = info['N_azimuthal']\n descriptor = [ {'name':'time_bin', 'type':'uint', 'value':time_bin }, \n {'name':'time_start', 'type':'uint', 'value':None }, \n {'name':'time_end', 'type':'uint', 'value':None }, \n {'name':'N_counts', 'type':'uint', 'value':None }, \n {'name':'N_locations', 'type':'uint', 'value':None }, \n {'name':'compression_ratio', 'type':'float', 'value':None },\n {'name':'listmode_loss', 'type':'float', 'value':None },\n {'name':'N_axial', 'type':'uint', 'value':None }, \n {'name':'N_azimuthal', 'type':'uint', 'value':None }, \n {'name':'angles_axial', 'type':'float', 'value':None, 'dtype':float32, 'size':(1,10000) }, \n {'name':'angles_azimuthal', 'type':'float', 'value':None, 'dtype':float32, 'size':(1,10000) }, \n {'name':'size_u', 'type':'float', 'value':None }, \n {'name':'size_v', 'type':'float', 'value':None }, \n {'name':'N_u', 'type':'uint', 'value':None }, \n {'name':'N_v', 'type':'uint', 'value':None }, \n {'name':'offsets', 'type':'array', 'value':None, 'dtype':int32, 'size':(N_azimuthal,N_axial), 'order':'F'}, \n {'name':'counts', 'type':'array', 'value':None, 'dtype':float32, 'size':(N_locations), 'order':'F'}, \n {'name':'locations', 'type':'array', 'value':None, 'dtype':uint16, 'size':(3,N_locations), 'order':'F'}, ] \n r = call_c_function( mMR_c.get_static_projection_prompt, descriptor )\n if not r.status == petlink.status_success(): \n raise ErrorInCFunction(\"The execution of 'get_static_projection_prompt' was unsuccessful.\",r.status,'mMR_c.get_static_projection_prompt')\n return r.dictionary" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
funciton to create soil profile class to store soil info. Its much faster to access the info when its in a class compared to a dataframe
def create_soil_profile(ParamStruct): Profile = SoilProfileClass(int(ParamStruct.Soil.profile.shape[0])) pdf = ParamStruct.Soil.profile.astype('float64') Profile.dz = pdf.dz.values Profile.dzsum = pdf.dzsum.values Profile.zBot = pdf.zBot.values Profile.zTop = pdf.zTop.values Profile.zMid = pdf.zMid.values Profile.Comp = np.int64(pdf.Comp.values) Profile.Layer = np.int64(pdf.Layer.values) #Profile.Layer_dz = pdf.Layer_dz.values Profile.th_wp = pdf.th_wp.values Profile.th_fc = pdf.th_fc.values Profile.th_s = pdf.th_s.values Profile.Ksat = pdf.Ksat.values Profile.Penetrability = pdf.penetrability.values Profile.th_dry = pdf.th_dry.values Profile.tau = pdf.tau.values Profile.th_fc_Adj = pdf.th_fc_Adj.values if ParamStruct.WaterTable==1: Profile.aCR = pdf.aCR.values Profile.bCR = pdf.bCR.values ParamStruct.Soil.Profile = Profile return ParamStruct
[ "def __init__(self):\n self.profiles = namedtuple('profiles',fake.profile().keys())\n self.group = namedtuple('group',['profiles'])\n\n for i in range(10000):\n p1 = self.profiles(**fake.profile())\n if(i==0):\n self.profiles_nt = self.group(p1)\n else:\n self.profiles_nt += self.group(p1)", "def prof_obj(professors):\r\n plist = [(prof(row.profID, row.Name, row.years_of_exp)) for index, row in professors.iterrows()]\r\n return plist", "def extract_profile(self):\n\n log.info(\"Extract profile from account page\")\n\n if not hasattr(\"self\", \"soup_profile\"):\n self.get_account_soup()\n soup = self.soup_profile\n\n self.account[\"profile\"][\"first_name\"] = self.from_soup_get_profile_first_name(soup)\n self.account[\"profile\"][\"last_name\"] = self.from_soup_get_profile_last_name(soup)\n self.account[\"profile\"][\"user_name\"] = self.from_soup_get_profile_user_name(soup)\n self.account[\"profile\"][\"date_of_birth\"] = self.from_soup_get_profile_date_of_birth(soup)\n self.account[\"profile\"][\"gender\"] = self.from_soup_get_profile_gender(soup)\n self.account[\"profile\"][\"phone\"] = self.from_soup_get_profile_phone_number(soup)\n self.account[\"profile\"][\"email\"] = self.from_soup_get_profile_email(soup)\n self.account[\"profile\"][\"member_since\"] = self.from_soup_get_profile_member_since(soup)\n self.account[\"profile\"][\"bike_angel_since\"] = self.from_soup_get_profile_bike_angel_since(soup)\n\n self.account[\"trips\"][\"lifetime\"] = self.from_soup_get_lifetime_stats(soup)\n\n self.account[\"my_statistics\"][\"number_of_trips\"] = self.from_soup_get_lifetime_stats_number_of_trips(soup)\n self.account[\"my_statistics\"][\"total_usage_time\"] = self.from_soup_get_lifetime_stats_total_usage_time(soup)\n self.account[\"my_statistics\"][\"distance_traveled\"] = self.from_soup_get_lifetime_stats_distance_traveled(soup)\n self.account[\"my_statistics\"][\"gas_saved\"] = self.from_soup_get_lifetime_stats_gas_saved(soup)\n self.account[\"my_statistics\"][\"co2_reduced\"] = self.from_soup_get_lifetime_stats_co2_reduced(soup)\n\n self.account[\"last_trip\"][\"date\"] = self.from_soup_get_last_trip_dates(soup)\n self.account[\"last_trip\"][\"station\"] = self.from_soup_get_last_trip_stations(soup)\n self.account[\"last_trip\"][\"trip_time\"] = self.from_soup_get_last_trip_time(soup)\n\n self.account[\"bike_key\"][\"number\"] = self.from_soup_get_bike_key_number(soup)\n self.account[\"bike_key\"][\"status\"] = self.from_soup_get_bike_key_status(soup)\n\n self.account[\"membership_status\"][\"current\"][\"type\"] = self.from_soup_get_membership_current_type(soup)\n self.account[\"membership_status\"][\"current\"][\"status\"] = self.from_soup_get_membership_current_status(soup)\n self.account[\"membership_status\"][\"current\"][\"expiration\"] = self.from_soup_get_membership_current_expiration(\n soup\n )\n\n self.account[\"membership_status\"][\"next\"][\"type\"] = self.from_soup_get_membership_next_type(soup)\n self.account[\"membership_status\"][\"next\"][\"status\"] = self.from_soup_get_membership_next_status(soup)\n self.account[\"membership_status\"][\"next\"][\"start\"] = self.from_soup_get_membership_next_start(soup)\n self.account[\"membership_status\"][\"next\"][\"expiration\"] = self.from_soup_get_membership_next_expiration(soup)\n\n self.account[\"billing_summary\"][\"next_billing_date\"] = self.from_soup_get_billing_summary_next_billing_date(\n soup\n )\n self.account[\"billing_summary\"][\"current_balance\"] = self.from_soup_get_billing_summary_current_balance(soup)\n\n self.account[\"billing_information\"][\"postal_code\"] = self.from_soup_get_billing_info_postal_code(soup)\n\n if self.ba:\n # these should work because try/except but we'll be safe\n log.info(\"Extracting bikeangels from profile\")\n self.account[\"my_statistics\"][\"bike_angels_current\"] = self.from_soup_get_ba_points_current(soup)\n self.account[\"my_statistics\"][\"bike_angels_annual\"] = self.from_soup_get_ba_points_annual(soup)\n self.account[\"my_statistics\"][\"bike_angels_lifetime\"] = self.from_soup_get_ba_points_lifetime(soup)\n\n self.account[\"last_trip\"][\"bike_angels_points\"] = self.from_soup_get_last_trip_bike_angels_points(soup)\n\n log.debug(self.account)\n return self.account", "def calculate_random_profile_info(no=10000,\n convert_nmd_tuple = True):\n no = no\n from faker import Faker\n import collections\n from collections import namedtuple\n from datetime import date\n \n import pandas as pd\n \n Faker.seed(0)\n fake = Faker()\n \n dlist = []\n faker_profile = namedtuple('faker_profile',['largest_blood_type',\n 'mean_current_location',\n 'oldest_person_age',\n 'average_age']) ## defining the namedtuple here with default assignment\n #fake_profile = fake.profiles()\n #profile_info_nt = faker_profile(0,0,0,0) ## initializing the object\n faker_profile_dictionary = {} ## declaring this\n convert_nmd_tuple = convert_nmd_tuple\n for _ in range(no):\n profile_dictonary = {}\n profile_dictonary.update(fake.profile(fields=['job','blood_group','current_location','birthdate']))\n profile_dictonary['age'] = (date.today() - profile_dictonary['birthdate']).days\n lat, long = profile_dictonary['current_location']\n profile_dictonary['lat'] = float(lat)\n profile_dictonary['long'] = float(long)\n dlist.append(profile_dictonary)\n \n profile_df = pd.DataFrame.from_dict(dlist) ## converting into dataframe\n \n def calculate_stats():\n \"\"\"\n this function will refer the namedtuple earlier and faker data to calculate stats\n -------\n calculate_stats.\n \"\"\"\n nonlocal convert_nmd_tuple\n \n oldest_person_age_days = max(profile_df['age'])\n avg_age_days = profile_df['age'].mean()\n mean_current_location = profile_df['lat'].mean(),profile_df['long'].mean()\n largest_blood_type = profile_df['blood_group'].value_counts().idxmax()\n profile_info_summary = faker_profile(largest_blood_type,\n mean_current_location,\n oldest_person_age_days,\n avg_age_days)\n # print(f\"Faker profile info return from Closure:{faker_profile}\")\n \n faker_profile_dictionary_temp = profile_info_summary._asdict()\n faker_profile_dictionary=dict(faker_profile_dictionary_temp)\n \n if (convert_nmd_tuple):\n return profile_info_summary\n else:\n return faker_profile_dictionary\n \n return calculate_stats", "def simple_profile(self):\r\n\r\n\t\treturn {\"username\":self.generator.user_name(),\r\n\t\t\t\"name\":self.generator.name(),\r\n\t\t\t\"sex\": self.random_element([\"M\",\"F\"]),\r\n\t\t\t\"address\":self.generator.address(),\r\n\t\t\t\"mail\":self.generator.free_email(),\r\n\r\n\t\t\t#\"password\":self.generator.password()\r\n\t\t\t\"birthdate\":self.generator.date(),\r\n\r\n\t\t}", "def _getStudentData(profile):\n if not profile.student_info:\n return None\n else:\n school_id = profile.student_info.school_name\n school_country = profile.student_info.school_country\n expected_graduation = profile.student_info.expected_graduation\n\n if isinstance(profile, GSoCProfile):\n properties = {\n 'number_of_proposals': profile.student_info.number_of_proposals,\n 'number_of_projects': profile.student_info.number_of_projects,\n 'number_of_passed_evaluations':\n profile.student_info.passed_evaluations,\n 'number_of_failed_evaluations':\n profile.student_info.failed_evaluations,\n 'project_for_orgs': [ndb.Key.from_old_key(org_key) for org_key\n in profile.student_info.project_for_orgs]\n }\n\n if profile.student_info.tax_form:\n properties['tax_form'] = profile.student_info.getTaxFormKey()\n\n if profile.student_info.enrollment_form:\n properties['enrollment_form'] = (\n profile.student_info.getEnrollmentFormKey())\n\n degree = _degreeToEnum(profile)\n major = profile.student_info.major\n properties['education'] = education_model.Education(\n school_id=school_id, school_country=school_country,\n expected_graduation=expected_graduation, major=major, degree=degree)\n\n return profile_model.StudentData(**properties)\n else:\n properties = {\n 'number_of_completed_tasks':\n profile.student_info.number_of_completed_tasks,\n }\n\n if profile.student_info.consent_form:\n properties['consent_form'] = (\n profile.student_info.consent_form.key())\n properties['is_consent_form_verified'] = (\n profile.student_info.consent_form_verified)\n\n if profile.student_info.student_id_form:\n properties['enrollment_form'] = (\n profile.student_info.student_id_form.key())\n properties['is_enrollment_form_verified'] = (\n profile.student_info.student_id_form_verified)\n\n if profile.student_info.winner_for:\n properties['winner_for'] = ndb.Key.from_old_key(\n profile.student_info.winner_for.key())\n\n grade = profile.student_info.grade\n properties['education'] = education_model.Education(\n school_id=school_id, school_country=school_country,\n expected_graduation=expected_graduation, grade=grade)\n\n return profile_model.StudentData(**properties)", "def __init__(self):\n this = _coin.new_SoNurbsProfile()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def profile(self,fields=[]):\r\n\t\td={\r\n\t\t\"job\":self.generator.job(),\r\n\t\t\"company\":self.generator.company(),\r\n\t\t\"ssn\":self.generator.ssn(),\r\n\t\t\"residence\":self.generator.address(),\r\n\t\t\"current_location\":(self.generator.latitude(),self.generator.longitude()),\r\n\t\t\"blood_group\":\"\".join(self.random_element(list(itertools.product([\"A\",\"B\",\"AB\",\"0\"],[\"+\",\"-\"]))))\r\n\t\t}\r\n\r\n\t\td[\"website\"]=[self.generator.url() for i in range(1,self.random_int(2,5))]\r\n\t\td= dict(d,**self.generator.simple_profile())\r\n\t\t#field selection\r\n\t\tif len(fields)>0:\r\n\t\t\td=dict((k,v) for (k,v) in d.items() if k in fields)\r\n\r\n\t\treturn d", "def get_profile():\n # Get the netCDF file\n nc = test_sbm.make_ctd_file()\n\n # Create profile object\n profile = ambient.Profile(nc, chem_names='all')\n \n # Add crossflow\n z = profile.interp_ds.coords['z'].values\n ua = np.zeros(len(z))\n for i in range(len(z)):\n ua[i] = 0.15\n\n # Add this crossflow profile to the Profile dataset\n data = np.vstack((z, ua)).transpose()\n symbols = ['z', 'ua']\n units = ['m', 'm/s']\n comments = ['measured', 'synthetic']\n profile.append(data, symbols, units, comments, 0)\n \n # Close the netCDF dataset\n profile.close_nc()\n \n # Return a profile object\n return profile", "def calculate_stats():\n nonlocal convert_nmd_tuple\n \n oldest_person_age_days = max(profile_df['age'])\n avg_age_days = profile_df['age'].mean()\n mean_current_location = profile_df['lat'].mean(),profile_df['long'].mean()\n largest_blood_type = profile_df['blood_group'].value_counts().idxmax()\n profile_info_summary = faker_profile(largest_blood_type,\n mean_current_location,\n oldest_person_age_days,\n avg_age_days)\n # print(f\"Faker profile info return from Closure:{faker_profile}\")\n \n faker_profile_dictionary_temp = profile_info_summary._asdict()\n faker_profile_dictionary=dict(faker_profile_dictionary_temp)\n \n if (convert_nmd_tuple):\n return profile_info_summary\n else:\n return faker_profile_dictionary", "def create_profile(self):\n\n self.intensity_profile.create_profile(self.points, self._data)", "def _build_profile(self):\n self.setDriver('GV7', 4)\n # This writes all the profile data files and returns our config info.\n wrote_profile = False\n try:\n config_data = write_profile(LOGGER,self.hubs)\n wrote_profile = True\n except (Exception) as err:\n self.l_error('build_profile','write_profile failed: {}'.format(err), exc_info=True)\n self.setDriver('GV7', 7)\n cdata = deepcopy(self.polyConfig['customData'])\n if wrote_profile:\n cdata['profile_version'] = self.serverdata['profile_version']\n self.saveCustomData(cdata)\n # Reload the config we just generated.\n self.load_config()\n #\n # Upload the profile\n #\n st = self.install_profile()\n if not self.first_run:\n self.restart_hubs()\n return st", "def __init__(self):\n this = _coin.new_SoLinearProfile()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def _process_profile(self, item):\n a_profile = nodes.Profile()\n logger = logging.getLogger(self.__class__.__name__)\n\n a_profile.set_name(item[\"@name\"])\n\n if \"@base\" in item:\n a_profile.set_base(item[\"@base\"])\n\n if \"@extends\" in item:\n a_profile.set_extends(item[\"@extends\"])\n\n if \"@minVersion\" in item:\n a_profile.set_min_version(item[\"@minVersion\"])\n\n if \"description\" in item:\n a_profile.set_description(item[\"description\"])\n\n logger.debug(\"Processing Profile: \\\"{}\\\"\".format(a_profile.get_name()))\n\n if \"object\" in item:\n if isinstance(item[\"object\"], list):\n for object_item in item[\"object\"]:\n a_profile.add_profile_object(self._process_profile_object(object_item))\n else:\n a_profile.add_profile_object(self._process_profile_object(item[\"object\"]))\n\n if \"parameter\" in item:\n if isinstance(item[\"parameter\"], list):\n for parameter_item in item[\"parameter\"]:\n a_profile.add_profile_parameter(self._process_profile_parameter(parameter_item))\n else:\n a_profile.add_profile_parameter(self._process_profile_parameter(item[\"parameter\"]))\n\n return a_profile", "def _getProfileFromUser(self):\r\n\r\n # Getting and Verifying current user\r\n user = getUser()\r\n \r\n # get the user_id (email) \r\n user_id = getUserId(user)\r\n\r\n # Creating a profile key. \r\n p_key = ndb.Key(Profile, user_id)\r\n \r\n # Using the profile key to get a profile Object\r\n profile = p_key.get()\r\n\r\n # create new Profile if not there\r\n if not profile:\r\n \r\n profile=Profile(\r\n key=p_key,\r\n displayName=user.nickname(),\r\n mainEmail=user.email(),\r\n teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),)\r\n \r\n profile.put()\r\n \r\n return profile", "def dataset_profiler():\n global d\n if not os.path.exists(os.path.join(d.results_folder, d.name, \"strategy-filtering\")):\n os.mkdir(os.path.join(d.results_folder, d.name, \"strategy-filtering\"))\n dp_folder_path = os.path.join(d.results_folder, d.name, \"strategy-filtering\", \"dataset-profiling\")\n if not os.path.exists(dp_folder_path):\n os.mkdir(dp_folder_path)\n for attribute in d.dataframe.columns.tolist():\n characters_dictionary = {}\n values_dictionary = {}\n for value in d.dataframe[attribute]:\n for character in list(set(list(value))):\n if character not in characters_dictionary:\n characters_dictionary[character] = 0.0\n characters_dictionary[character] += 1.0\n # for term in list(set(nltk.word_tokenize(value) + [value])):\n if value not in values_dictionary:\n values_dictionary[value] = 0.0\n values_dictionary[value] += 1.0\n column_profile = {\n \"characters\": {ch: characters_dictionary[ch] / d.dataframe.shape[0] for ch in characters_dictionary},\n \"values\": {v: values_dictionary[v] / d.dataframe.shape[0] for v in values_dictionary},\n }\n pickle.dump(column_profile, open(os.path.join(dp_folder_path, attribute + \".dictionary\"), \"wb\"))\n print(\"The {} dataset is profiled.\").format(d.name)", "def gather_profiling_info(self, extrainfo=None):\n # Add metadata\n info_dict = {}\n info_dict[\"time\"] = self.runtime\n info_dict[\"created-at\"] = time.time()\n if extrainfo:\n info_dict.update(extrainfo)\n\n # get the information from the timing script\n with self.runner_temp.in_dir():\n if os.path.exists(cf.STDERR_FILE):\n with open(cf.STDERR_FILE) as stderr_file:\n stderr = stderr_file.read()\n time_str = stderr.splitlines()[-1]\n time_dat = json.loads(time_str)\n info_dict.update(time_dat)\n\n logger.debug(\"Caching profile information\")\n self.info_dict = info_dict", "def __init__(self, name=\"\", description=\"\", homepage=\"\", accesspoint=\"\", memento_compliance=\"\", timegate=\"\", timemap=\"\", established=\"\", profile_updated=\"\", **kwargs):\n print(\"{0} => Initializing the profile for {1}\".format(time.strftime(\"%Y-%m-%d %H:%M:%S\"), name))\n self.about = {\n \"name\": name,\n \"description\": description,\n \"homepage\": homepage,\n \"accesspoint\": accesspoint,\n \"memento_compliance\": memento_compliance,\n \"timegate\": timegate,\n \"timemap\": timemap,\n \"established\": established,\n \"profile_updated\": profile_updated\n }\n self.__dict__[\"about\"].update(kwargs)\n self.stats = {}\n setattr(self, \"@context\", \"https://oduwsdl.github.io/contexts/archiveprofile.jsonld\")\n setattr(self, \"@id\", homepage)", "def init(self, state: 'SoState') -> \"void\":\n return _coin.SoProfileElement_init(self, state)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
bperturbations are now calculated and stored in the nncomp_df dataframe. If CLEAR calculates a bperturbation that is infeasible, then the details of the bperturbation are stated in the missing_log_df dataframe. CLEAR will classify a bperturbation as being infeasible if it is outside 'the feasibility range' it calculates for each feature.
def Calculate_Perturbations(explainer, results_df, multiClassBoundary_df, multi_index=None): print("\n Calculating b-counterfactuals \n") nncomp_df = pd.DataFrame(columns=['observation', 'multi_class', 'feature', 'orgFeatValue', 'orgAiProb', 'actPerturbedFeatValue', 'AiProbWithActPerturbation', 'estPerturbedFeatValue', 'errorPerturbation', 'regProbWithActPerturbation', 'errorRegProbActPerturb', 'orgClass']) bPerturb = CLEARPerturbation() bPerturb.nncomp_idx = 1 missing_log_df = pd.DataFrame(columns=['observation', 'feature', 'reason', 'perturbation']) first_obs = CLEAR_settings.first_obs last_obs = CLEAR_settings.last_obs + 1 for i in range(first_obs, last_obs): s1 = pd.Series(results_df.local_data[i], explainer.feature_list) s2 = pd.DataFrame(columns=explainer.feature_list) s2 = s2.append(s1, ignore_index=True) x = symbols('x') if results_df.loc[i, 'features'][0] == '1': results_df.loc[i, 'features'].remove('1') results_df.loc[i, 'weights']=results_df.loc[i, 'weights'].tolist() results_df.loc[i, 'weights'].pop(0) bPerturb.raw_eqn = results_df.loc[i, 'features'].copy() bPerturb.raw_weights = results_df.loc[i, 'weights'] bPerturb.raw_data = results_df.loc[i, 'local_data'].tolist() features_processed = 0 # the next 3 lines ensures that the same code can be used irrespective of whether the regression has # been forced through the data point to be explained for j in range(0, len(explainer.feature_list)): features_processed += 1 bPerturb.target_feature_weight = 0 bPerturb.target_feature = explainer.feature_list[j] old_value = s2.iloc[0, j] # set target probability for b-perturbation if len(explainer.class_labels)>2: bPerturb.target_prob = multiClassBoundary_df.loc[i, bPerturb.target_feature + '_prob'] if bPerturb.target_prob == 10000: continue else: bPerturb.target_prob = CLEAR_settings.binary_decision_boundary # establish if b-perturbation exists if bPerturb.target_feature in explainer.numeric_features: temp_df = explainer.sensit_df[(explainer.sensit_df['observation'] == i) & ( explainer.sensit_df['feature'] == bPerturb.target_feature)] temp_df = temp_df['probability'].agg(['min', 'max']) if not (temp_df['min'] <= bPerturb.target_prob) & (temp_df['max'] > bPerturb.target_prob): continue elif bPerturb.target_feature in explainer.cat_features: # CLEAR only considers b-perturbations from target feature = 1, that change its class if old_value != 1: continue temp = [x for x in explainer.cat_features if ((x[:3] == bPerturb.target_feature[:3]) and (x != bPerturb.target_feature))] temp_df = explainer.catSensit_df[(explainer.catSensit_df['observation'] == i) & ( explainer.catSensit_df['feature'].isin(temp))] if bPerturb.target_prob >= results_df.loc[i, 'nn_forecast']: if all(bPerturb.target_prob >= temp_df['probability']): continue elif all(bPerturb.target_prob < temp_df['probability']): continue # establish if feature is in equation if not any(bPerturb.target_feature in s for s in bPerturb.raw_eqn): if missing_log_df.empty: idx = 0 else: idx = missing_log_df.index.max() + 1 missing_log_df.loc[idx, 'observation'] = i missing_log_df.loc[idx, 'feature'] = bPerturb.target_feature missing_log_df.loc[idx, 'reason'] = 'not in raw equation' continue # If target feature is numeric then create equation string if bPerturb.target_feature in explainer.numeric_features: str_eqn, bPerturb.target_feature_weight = generateString(explainer, results_df, i, bPerturb) # Solve the equation and check if there is a solution with a 'feasible' value solution = [] eqn_roots = solve(str_eqn, x) for k in eqn_roots: if k.is_real: solution.append(k) elif k == eqn_roots[len(eqn_roots) - 1]: if missing_log_df.empty: idx = 0 else: idx = missing_log_df.index.max() + 1 missing_log_df.loc[idx, 'feature'] = bPerturb.target_feature missing_log_df.loc[idx, 'observation'] = i missing_log_df.loc[idx, 'reason'] = 'value not real' estPerturbedFeatValue = None continue # get minimum perturbation if len(solution) > 0: temp2 = [] for y in solution: if explainer.feature_min[bPerturb.target_feature] <= y <= explainer.feature_max[ bPerturb.target_feature]: temp2.append(y) if len(temp2) > 0: valid_roots = temp2 - old_value estPerturbedFeatValue = min(valid_roots, key=abs) estPerturbedFeatValue = estPerturbedFeatValue + old_value else: # if roots are all infeasible, take root nearest to feasibility range lowest_root = 999 for y in solution: k = min(abs(explainer.feature_min[bPerturb.target_feature] - y), abs(explainer.feature_max[bPerturb.target_feature] - y)) if k < lowest_root: lowest_root = k estPerturbedFeatValue = y if lowest_root == 999: continue # i.e. go to next feature in bPerturb.raw_eqn else: continue # update the observation to be explained (whose feature values are in s2) estPerturbedFeatValue = np.float64(estPerturbedFeatValue) s2.iloc[0, j] = estPerturbedFeatValue if multi_index is None: temp = 'N\A' else: temp = explainer.class_labels[multi_index] bPerturb.nncomp_idx += 1 nncomp_df.loc[bPerturb.nncomp_idx, 'observation'] = i nncomp_df.loc[bPerturb.nncomp_idx, 'multi_class'] = temp nncomp_df.loc[bPerturb.nncomp_idx, 'feature'] = bPerturb.target_feature nncomp_df.loc[bPerturb.nncomp_idx, 'orgFeatValue'] = old_value nncomp_df.loc[bPerturb.nncomp_idx, 'orgAiProb'] = results_df.loc[i, 'nn_forecast'] nncomp_df.loc[bPerturb.nncomp_idx, 'estPerturbedFeatValue'] = estPerturbedFeatValue AiProbWithActPerturbation = \ explainer.counterf_rows_df.prediction[(explainer.counterf_rows_df['feature'] == \ bPerturb.target_feature) & ( explainer.counterf_rows_df['observation'] == i)].iloc[ 0] nncomp_df.loc[bPerturb.nncomp_idx, 'AiProbWithActPerturbation'] = AiProbWithActPerturbation nncomp_df.loc[bPerturb.nncomp_idx, 'orgClass'] = results_df.loc[i, 'regression_class'] s2.iloc[0, j] = old_value # estimate estPerturbedFeatValue corresponding to the decision boundary if len(explainer.class_labels)>2: boundary_val = multiClassBoundary_df.loc[i, bPerturb.target_feature + '_val'] else: boundary_val = CLEAR_regression.numeric_counterfactual(explainer, bPerturb.target_feature, old_value, i) nncomp_df.loc[bPerturb.nncomp_idx, 'actPerturbedFeatValue'] = boundary_val nncomp_df.loc[bPerturb.nncomp_idx, 'errorPerturbation'] = abs(estPerturbedFeatValue - boundary_val) str_eqn = str_eqn.replace('x', str(boundary_val)) bPerturb.wTx = simplify(str_eqn) if CLEAR_settings.regression_type == 'multiple': regProbWithActPerturbation = bPerturb.wTx + bPerturb.target_prob else: regProbWithActPerturbation = 1 / (1 + exp(-bPerturb.wTx)) nncomp_df.loc[bPerturb.nncomp_idx, 'regProbWithActPerturbation'] = regProbWithActPerturbation nncomp_df.loc[bPerturb.nncomp_idx, 'errorRegProbActPerturb'] = \ abs(regProbWithActPerturbation - AiProbWithActPerturbation) elif (bPerturb.target_feature in explainer.cat_features): # Create equation string obsData_df = pd.DataFrame(columns=explainer.feature_list) obsData_df.loc[0] = results_df.loc[i, 'local_data'] dummy_trap = False counterfactualDummies = CLEAR_regression.getCounterfactualDummies(explainer, results_df.loc[i, 'nn_forecast'], obsData_df, i, dummy_trap) y = [x for x in counterfactualDummies if (x.startswith(bPerturb.target_feature[:3]) and x != bPerturb.target_feature)] for k in y: bPerturb.adj_raw_data = list(bPerturb.raw_data) bPerturb.adj_raw_data[explainer.feature_list.index(k)] = 1 str_eqn, bPerturb.target_feature_weight = generateString(explainer, results_df, i, bPerturb) str_eqn = str_eqn.replace('x', '0') bPerturb.wTx = simplify(str_eqn) nncomp_df = catUpdateNncomp_df(explainer, nncomp_df, bPerturb, multi_index, i, results_df, k) nncomp_df.observation = nncomp_df.observation.astype(int) nncomp_df.reset_index(inplace=True, drop=True) """ Determines the actual values of the AI decision boundary for numeric features. This will then be used for determining the fidelity errors of the CLEAR perturbations. """ return nncomp_df, missing_log_df
[ "def fit(self):\n # if self.verbose == 1:\n # print ('The list of all perturbation with its probability: \\n')\n # for perturb in range(len(self.p_list)):\n # print('%s perturbation with probability of: %s \\n' %(self.p_list[perturb], self.p_prob[perturb]))\n #p_current, error_vec_current ,error_vec_normal_current = self.minus_log_prob_neuron(self.neuron) # log probability of the current neuron\n p_current, error_vec_current ,error_vec_normal_current = self.kl_distance(self.neuron) # log probability of the current neuron\n acc = 0\n for i in range(self.ite):\n if(self.verbose ==1):\n #p_current, er , error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, er , error_vec_normal_current = self.kl_distance(self.neuron)\n #print('feature of current is: \\n %s' %(self.neuron.features)+ '\\n')\n print('\\n and its probability is: %s' %p_current)\n per = self.select_proposal() # MCMC index\n p_sym, details = self.do_MCMC(per)\n #p_proposal, error_vec_proposal, error_vec_normal_proposal = self.minus_log_prob_neuron(self.neuron)\n p_proposal, error_vec_proposal, error_vec_normal_proposal = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n #print('feature of proposal is: \\n %s' %(self.neuron.features))\n print('\\n and its probability is: %s' %p_proposal)\n a = min(1, p_sym * np.exp(p_current - p_proposal)) # Metropolis choice, notice that the values are minus log probability\n B = self.accept_proposal(a) # the boolean of acceptance\n if(B):\n p_current = p_proposal\n error_vec_current = error_vec_proposal\n error_vec_normal_current = error_vec_normal_proposal\n self.trend[:,i] = error_vec_proposal\n self.trend_normal[:,i] = error_vec_normal_proposal\n acc = acc + 1\n else:\n self.undo_MCMC(per, details)\n self.trend[:,i] = error_vec_current\n self.trend_normal[:,i] = error_vec_normal_current\n if len(self.neuron.nodes_list) == self.neuron.n_soma:\n self.neuron = self.initial_neuron(int(self.n_node/self.initial_seg),self.initial_seg)\n #p_current, error_vec_current, error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, error_vec_current, error_vec_normal_current = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n print ('\\n')\n print('Selected perturbation = ' + per)\n print('the p of acceptance was %s and it was %s that it`s been accepted.'%(a,B))\n print ('\\n')\n if(np.remainder(i,100)==0):\n self.evo.append(deepcopy(self.neuron))\n self.neuron.set_nodes_values()\n print acc", "def addUnknownLikelihood(self, bn):\n \n person_cpt_list = []\n \n counter = self.i_labels.index(self.unknown_var)\n\n # DONT USE THIS (0.5 FOR UNKNOWN, 0.5/(1-num_people) FOR THE REST)! IT MAKES FAR (FALSE ALARM RATE) WORSE\n # gives higher false positive! use the other one instead\n# li_f_unnorm = [ self.applyWeight(0.5/(len(self.i_labels)-1),self.weights[0]) for x in range(0, len(self.i_labels))]\n# li_f_unnorm[counter] = self.applyWeight(0.5, self.weights[0])\n\n # P(F|I) same way as the likelihoods for other states \n li_f_unnorm = [self.applyWeight((1 - self.face_recognition_rate)/(len(self.i_labels)-1),self.weights[0]) for x in range(0, len(self.i_labels))]\n li_f_unnorm[counter] = self.applyWeight(self.face_recognition_rate, self.weights[0])\n li_f = self.normaliseSum(li_f_unnorm)\n bn.cpt(self.F)[{'I':self.unknown_var}] = li_f[:]\n person_cpt_list.append(li_f[:])\n \n # P(G|I) : Equally likely to be male or female\n li_g = [0.5, 0.5]\n bn.cpt(self.G)[{'I':self.unknown_var}] = li_g[:]\n person_cpt_list.append(li_g[:])\n \n # P(A|I) : Uniform distribution for unknown age\n li_a = self.uniformDistribution(self.age_min, self.age_max)\n bn.cpt(self.A)[{'I':self.unknown_var}] = li_a[:]\n person_cpt_list.append(li_a[:])\n \n # P(H|I) : Uniform distribution for unknown height\n li_h = self.uniformDistribution(self.height_min, self.height_max)\n bn.cpt(self.H)[{'I':self.unknown_var}] = li_h[:]\n person_cpt_list.append(li_h[:])\n \n # P(T|I) : Uniform distribution for any time \n li_t = self.uniformDistribution(self.time_min, self.time_max)\n bn.cpt(self.T)[{'I':self.unknown_var}] = li_t[:]\n person_cpt_list.append(li_t[:])\n \n self.cpt_matrix.append(person_cpt_list)", "def _compute_perturbed_reliability(self, data, labels,\n weights, bias, perturbation):\n weights_perturbed = weights + perturbation * tf.random.normal(\n weights.shape, seed=test_util.test_seed())\n logits_perturbed = tf.matmul(data, weights_perturbed)\n logits_perturbed += tf.expand_dims(bias, 0)\n\n _, _, reliability = tfp.stats.brier_decomposition(\n labels=labels, logits=logits_perturbed)\n\n return float(reliability)", "def _prepare_ligand_BC(self):\n if self.data['BC'].protocol == []:\n\n # Set up the force field\n params_o = self.system.paramsFromAlpha(1.0, 'BC', site=False)\n self.system.setParams(params_o)\n\n # Get starting configurations\n basename = os.path.basename(self.args.FNs['score'])\n basename = basename[:basename.find('.')]\n dirname = os.path.dirname(self.args.FNs['score'])\n minimizedB_FN = os.path.join(dirname, basename + '_minB.nc')\n if os.path.isfile(minimizedB_FN):\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'r')\n minimizedConfigurations = [\n dock6_nc.variables['confs'][n][self.top.inv_prmtop_atom_order_L, :]\n for n in range(dock6_nc.variables['confs'].shape[0])\n ]\n Es = dict([(key, dock6_nc.variables[key][:])\n for key in dock6_nc.variables.keys() if key != 'confs'])\n dock6_nc.close()\n else:\n (minimizedConfigurations, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n\n from netCDF4 import Dataset\n dock6_nc = Dataset(minimizedB_FN, 'w')\n dock6_nc.createDimension('n_confs', len(minimizedConfigurations))\n dock6_nc.createDimension('n_atoms', minimizedConfigurations[0].shape[0])\n dock6_nc.createDimension('n_cartesian', 3)\n dock6_nc.createDimension('one', 1)\n dock6_nc.createVariable('confs', 'f8', ('n_confs', 'n_atoms', 'n_cartesian'))\n for n in range(len(minimizedConfigurations)):\n dock6_nc.variables['confs'][n] = minimizedConfigurations[n][self.top.prmtop_atom_order_L, :]\n for key in Es.keys():\n dock6_nc.createVariable(key, 'f8', ('one', 'n_confs'))\n dock6_nc.variables[key][:] = Es[key]\n dock6_nc.close()\n\n # initializes smart darting for BC\n # and sets the universe to the lowest energy configuration\n self.iterator.initializeSmartDartingConfigurations(\n minimizedConfigurations, 'BC', self.log, self.data)\n if len(minimizedConfigurations) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, minimizedConfigurations[-1]))\n self.data['BC'].confs['starting_poses'] = minimizedConfigurations\n\n # Ramp the temperature from 0 to the desired starting temperature using HMC\n self._ramp_T(params_o['T'], normalize=True)\n\n # Run at starting temperature\n seeds = [np.copy(self.top.universe.configuration().array) \\\n for n in range(self.args.params['BC']['seeds_per_state'])]\n else:\n seeds = None\n return seeds", "def calculate_free_energy_mbar(energy, count, bias = None, verbose = False): \n num_configurations = energy.shape[0]\n \n ## mbar_loss object to calcualte loss and grad\n loss_model = mbar_loss(energy, count, bias)\n optimizer = optim.LBFGS(loss_model.parameters(), max_iter = 10, tolerance_change=1e-5)\n\n ## calcualte loss and grad\n previous_loss = loss_model()\n previous_loss.backward()\n previous_loss = previous_loss.item()\n grad_max = torch.max(torch.abs(loss_model.bias.grad)).item()\n\n ## minimize loss using L-BFGS-B\n if verbose:\n print(\"start loss: {:>7.5f}, start grad: {:>7.5f}\".format(previous_loss, grad_max)) \n for i in range(30):\n def closure():\n optimizer.zero_grad()\n loss = loss_model()\n loss.backward() \n return loss\n optimizer.step(closure)\n loss = loss_model().item()\n grad_max = torch.max(torch.abs(loss_model.bias.grad)).item()\n \n if verbose:\n print(\"step: {:>4d}, loss:{:>7.5f}, grad: {:>7.5f}\".format(i, loss, grad_max))\n \n ## stop criterion for L-BFGS-B\n ## this is added because the optim.LBFGS often returns nan values\n ## when it runs too many iterations.\n if np.abs(loss-previous_loss) <= 1e-4 or grad_max <= 1e-4:\n break\n previous_loss = loss\n\n ## using the bias energies to calculate free energies for states with\n ## nonzero samples\n bias = loss_model.bias.data\n tmp = -torch.log(count/num_configurations)\n F = tmp - bias\n\n # # ## normalize free energyies of states with nonzero samples\n # F = F - torch.min(F,0)[0]\n # prob = torch.exp(-F)\n # prob = prob / prob.sum(-1, keepdim = True)\n # F_nz = - torch.log(prob)\n\n # ## update bias energy based on normalized F for states with\n # ## nonzero samples\n # bias = -torch.log(count/num_configurations) - F_nz\n\n return F, bias", "def bic(self):\n bics = []\n bics_bool = []\n for i, chain in enumerate(self.parent.chains):\n p, n_data, n_free = chain.posterior, chain.num_eff_data_points, chain.num_free_params\n if p is None or n_data is None or n_free is None:\n bics_bool.append(False)\n missing = \"\"\n if p is None:\n missing += \"posterior, \"\n if n_data is None:\n missing += \"num_eff_data_points, \"\n if n_free is None:\n missing += \"num_free_params, \"\n\n self._logger.warning(\"You need to set %s for chain %s to get the BIC\" % (missing[:-2], chain.name))\n else:\n bics_bool.append(True)\n bics.append(n_free * np.log(n_data) - 2 * np.max(p))\n if len(bics) > 0:\n bics -= np.min(bics)\n bics_fin = []\n i = 0\n for b in bics_bool:\n if not b:\n bics_fin.append(None)\n else:\n bics_fin.append(bics[i])\n i += 1\n return bics_fin", "def check_convergence(complete_data, missing_data, prev_prob, new_prob, threshold, log_likelihood_list, iterations):\n total_log_likelihood_prev_prob, total_log_likelihood_new_prob = 0, 0\n\n # for each complete data row (G, W, H) in the given dataset calculate log(P(G,W,H))\n for data in complete_data:\n g, w, h = data\n # P(G,W,H) = P(G) * P(W|G) * P(H|G) using probabilities from the previous probability tables\n prob_data_using_prev_prob = prev_prob[(g, ' ', ' ')] * prev_prob[(g, w, ' ')] * prev_prob[(g, ' ', h)]\n # P(G,W,H) = P(G) * P(W|G) * P(H|G) using probabilities from the new probability tables\n prob_data_using_new_prob = new_prob[(g, ' ', ' ')] * new_prob[(g, w, ' ')] * new_prob[(g, ' ', h)]\n # add log(P(G,W,H)) to the total log likelihoods\n total_log_likelihood_prev_prob += math.log(prob_data_using_prev_prob) * complete_data[data]\n total_log_likelihood_new_prob += math.log(prob_data_using_new_prob) * complete_data[data]\n\n # for each missing data row ('-', W, H) in the given dataset calculate log(P('-',W,H))\n for data in missing_data:\n w = data[1]\n h = data[2]\n prob_data_using_prev_prob, prob_data_using_new_prob = 0, 0\n\n # for G=0 and G=1\n for i in range(2):\n g = str(i)\n # P(W,H) = sum_of_G P(G) * P(W|G) * P(H|G) using probabilities from the previous probability tables\n prob_data_using_prev_prob += prev_prob[(g, ' ', ' ')] * prev_prob[(g, w, ' ')] * prev_prob[(g, ' ', h)]\n # P(W,H) = sum_of_G P(G) * P(W|G) * P(H|G) using probabilities from the new probability tables\n prob_data_using_new_prob += new_prob[(g, ' ', ' ')] * new_prob[(g, w, ' ')] * new_prob[(g, ' ', h)]\n # add log(P(W,H)) to the total log likelihoods\n total_log_likelihood_prev_prob += math.log(prob_data_using_prev_prob) * missing_data[data]\n total_log_likelihood_new_prob += math.log(prob_data_using_new_prob) * missing_data[data]\n\n # if on iteration#1 add the log likelihood of iteration#0 to the log likelihood list\n if iterations == 1:\n log_likelihood_list.append(total_log_likelihood_prev_prob)\n\n # add the new log likelihood of current iteration to the log likelihood list\n log_likelihood_list.append(total_log_likelihood_new_prob)\n\n # calculate change in log likelihood\n difference = abs(total_log_likelihood_prev_prob - total_log_likelihood_new_prob)\n\n return difference <= threshold, log_likelihood_list", "def about_integral(branch_df, known_opt_value):\n\n total_time = branch_df.iloc[-1]['elapsed']\n # opt = float(miplib_df.loc[miplib_df['Name'] == inst_name]['Objective']) # best known objective\n\n # copy part of branch_df\n use_cols = ['elapsed', 'best_integer', 'best_bound']\n copy_branch_df = branch_df[use_cols].copy()\n copy_branch_df['inc_changes'] = abs(copy_branch_df['best_integer'].diff(1))\n copy_branch_df['inc_bool'] = copy_branch_df['inc_changes'] != 0\n copy_branch_df['bb_changes'] = abs(copy_branch_df['best_bound'].diff(1))\n copy_branch_df['bb_bool'] = copy_branch_df['bb_changes'] != 0\n\n # compute primal integral pi, if value of opt is known\n if known_opt_value:\n primal_dict = OrderedDict() # {t_i: p(t_i)} for t_i with incumbent change\n primal_dict[0] = 1\n for idx, row in copy_branch_df.loc[copy_branch_df['inc_bool'] != 0].iterrows():\n primal_dict[row['elapsed']] = primal_gap(known_opt_value, row['best_integer'])\n primal_dict[total_time] = None\n\n times = list(primal_dict.keys())\n integrals = list(primal_dict.values())\n\n pi = 0\n for i in range(len(times) - 1):\n pi += integrals[i] * (times[i + 1] - times[i])\n else:\n pi = None\n\n # compute primal-dual integral pdi\n pd_dict = OrderedDict() # {t_i: pd(t_i)} for t_i with incumbent change\n pd_dict[0] = 1\n for idx, row in copy_branch_df.loc[(copy_branch_df['inc_bool'] != 0) | (copy_branch_df['bb_bool'] != 0)].iterrows():\n pd_dict[row['elapsed']] = primal_dual_gap(row['best_integer'], row['best_bound'])\n pd_dict[total_time] = None\n\n pd_times = list(pd_dict.keys())\n pd_integrals = list(pd_dict.values())\n\n pdi = 0\n for i in range(len(pd_times) - 1):\n pdi += pd_integrals[i] * (pd_times[i + 1] - pd_times[i])\n\n integral_list = [pi, pdi]\n\n if len(integral_list) != 2:\n print(\"***len(integral_list): {}\".format(len(integral_list)))\n\n return integral_list, len(integral_list)", "def bc_interpolation(teff, current_logg, fe_h, ebv, filter = 'K', debug=False, show_todo=False):\r\n\r\n # First find which of the precomputed ebv grids is closest to the actual ebv and load in the associated BC Table\r\n ebv_grid = np.array(['00','12','24','36','48'])\r\n ebv_grid_difference = np.array([abs(int(ebv_grid_entry)/100.-ebv) for ebv_grid_entry in ebv_grid])\r\n closest_ebv = np.where(np.min(ebv_grid_difference)==ebv_grid_difference)[0][0]\r\n BCTable = Table.read(r'C:\\Users\\jama2357\\Documents\\Galafiles\\GALAH\\DATA/Casagrande2014_BC_EBV_'+ebv_grid[closest_ebv]+'.fits',1)\r\n\r\n if show_todo:\r\n print('There are better ways to implement the EBV... In the best case, one could just adjust Lucas fortran interpolation routine to work in python...')\r\n \r\n # Now prepare \r\n\r\n # 1) Select appropriate Teff \r\n tg = np.unique(BCTable['teff'])\r\n tdiff = abs(tg-teff)\r\n tmin = np.where(tdiff == min(tdiff))[0]\r\n tmin = tmin[0]\r\n t1 = tg[tmin]\r\n if min(tg-teff) >= 0.0:\r\n # too low -> choose lowest Teff \r\n tfrac=0.0\r\n t2 = t1\r\n elif max(tg-teff) <= 0.0:\r\n # too high -> choose highest Teff \r\n tfrac=0.0\r\n t2 = t1\r\n else:\r\n # get ratio of two entries with smallest difference \r\n tsort=np.sort(tdiff)\r\n t2min = np.where(tsort[1] == tdiff); t2min = t2min[-1]\r\n t2 = tg[t2min]; t2=t2[0]\r\n tfrac = abs((teff - t1)/(t2-t1))\r\n\r\n # 2) Select appropriate [Fe/H]\r\n has_correct_teff = np.where((BCTable['teff'] == t1) | (BCTable['teff'] == t2))\r\n fg = np.unique(BCTable['feh'][has_correct_teff])\r\n fdiff = abs(fg-fe_h)\r\n fmin = np.where(fdiff == min(fdiff)); fmin = fmin[0]\r\n f1 = fg[fmin]; f1=f1[0]\r\n if min(fg-fe_h) >= 0.0:\r\n # too low -> choose lowest [Fe/H] \r\n ffrac=0.0\r\n f2 = f1\r\n elif max(fg-fe_h) <= 0.0:\r\n # too high -> choose highest [Fe/H] \r\n ffrac=0.0\r\n f2 = f1\r\n else:\r\n # get ratio of two entries with smallest difference \r\n fsort=np.sort(fdiff)\r\n f2min = np.where(fsort[1]== fdiff); f2min = f2min[-1]\r\n f2 = fg[f2min]; f2=f2[0]\r\n ffrac = abs((fe_h - f1)/(f2-f1))\r\n\r\n # 1) Select appropriate logg\r\n has_correct_teff_and_feh = np.where(\r\n ((BCTable['teff'] == t1) & (BCTable['feh']==f1)) | \r\n ((BCTable['teff'] == t1) & (BCTable['feh']==f2)) | \r\n ((BCTable['teff'] == t2) & (BCTable['feh']==f1)) | \r\n ((BCTable['teff'] == t2) & (BCTable['feh']==f2))\r\n )\r\n gg = np.unique(BCTable['logg'][has_correct_teff_and_feh])\r\n # Test wether requested log(g) is in grid \r\n gdiff = abs(gg-current_logg)\r\n gmin = np.where(gdiff == min(gdiff)); gmin = gmin[0]\r\n g1 = gg[gmin]; g1=g1[0]\r\n if min(gg-current_logg) >= 0.0:\r\n # too low -> choose lowest log(g) \r\n gfrac=0.0\r\n g2 = g1\r\n elif max(gg-current_logg) <= 0.0:\r\n # too high -> choose highest log(g) \r\n gfrac=0.0\r\n g2 = g1\r\n else:\r\n # get ratio of two entries with smallest difference \r\n gsort=np.sort(gdiff)\r\n g2min = np.where(gsort[1] == gdiff); g2min = g2min[-1]\r\n g2 = gg[g2min]; g2=g2[0]\r\n gfrac = abs((current_logg - g1)/(g2-g1))\r\n\r\n # Now get the BC values for the 8 points with either teff, logg, and fe_h value\r\n df111 = BCTable['BC_'+filter][(((BCTable['teff'] == t1) & (BCTable['feh'] == f1) & (BCTable['logg'] == g1)))]\r\n df112 = BCTable['BC_'+filter][(((BCTable['teff'] == t1) & (BCTable['feh'] == f1) & (BCTable['logg'] == g2)))]\r\n df121 = BCTable['BC_'+filter][(((BCTable['teff'] == t1) & (BCTable['feh'] == f2) & (BCTable['logg'] == g1)))]\r\n df211 = BCTable['BC_'+filter][(((BCTable['teff'] == t2) & (BCTable['feh'] == f1) & (BCTable['logg'] == g1)))]\r\n df122 = BCTable['BC_'+filter][(((BCTable['teff'] == t1) & (BCTable['feh'] == f2) & (BCTable['logg'] == g2)))]\r\n df212 = BCTable['BC_'+filter][(((BCTable['teff'] == t2) & (BCTable['feh'] == f1) & (BCTable['logg'] == g2)))]\r\n df221 = BCTable['BC_'+filter][(((BCTable['teff'] == t2) & (BCTable['feh'] == f2) & (BCTable['logg'] == g1)))]\r\n df222 = BCTable['BC_'+filter][(((BCTable['teff'] == t2) & (BCTable['feh'] == f2) & (BCTable['logg'] == g2)))]\r\n\r\n # Combine them, weighted by the fractions, first the logg difference, then the teff difference, then the feh difference\r\n d11=(1-gfrac)*df111+gfrac*df112\r\n d12=(1-gfrac)*df121+gfrac*df122\r\n d21=(1-gfrac)*df211+gfrac*df212\r\n d22=(1-gfrac)*df221+gfrac*df222\r\n\r\n d1=(1-ffrac)*d11+ffrac*d12\r\n d2=(1-ffrac)*d21+ffrac*d22\r\n\r\n bc = (1-tfrac)*d1+tfrac*d2\r\n \r\n if debug:\r\n print('////////')\r\n print('BC estimation: Teff/logg/feh values, weights estimated from the closest grid points')\r\n print(teff, tfrac,t1,t2)\r\n print(fe_h, ffrac,f1,f2)\r\n print(current_logg, gfrac,g1,g2)\r\n print('BC: ',float(bc))\r\n print('////////')\r\n\r\n return(float(bc))", "def is_colocalised(X, LD_matrix ,trait1, trait2,db=0):\n\n ### Get number of samples\n n = X.shape[0]\n \n ### generate individual linear models\n models1 = trait_simulation.build_linear_models(X,trait1)\n models2 = trait_simulation.build_linear_models(X,trait2)\n\n ### pull out slope and standard error terms.\n beta1 = [x.slope for x in models1]\n se_beta1 = [x.stderr for x in models1]\n\n beta2 = [x.slope for x in models2]\n se_beta2 = [x.stderr for x in models2]\n\n ### calculate z scores\n simulated_effectsize_data1 = ([x*numpy.sqrt(n) for x in beta1], LD_matrix, n)\n simulated_effectsize_data2 = ([x*numpy.sqrt(n) for x in beta2], LD_matrix, n)\n\n ### generate the gene set Bayes Factors\n gene_set_BFs1 = bayes_factors.calc_variant_set_BFs(simulated_effectsize_data1,k=4,v=0.01)\n gene_set_BFs2 = bayes_factors.calc_variant_set_BFs(simulated_effectsize_data2,k=4,v=0.01)\n \n\n ### calculate the posteriors\n gene_set_posteriors1 = bayes_factors.calc_posterior(gene_set_BFs1)\n gene_set_posteriors2 = bayes_factors.calc_posterior(gene_set_BFs2)\n \n if db == 1: \n \n print gene_set_BFs1[0:10]\n print gene_set_BFs2[0:10]\n \n print gene_set_posteriors1[0:10]\n print gene_set_posteriors2[0:10]\n\n\n ### sort by posterior size\n gene_set_posteriors1.sort(key=lambda x: x[0], reverse=False)\n gene_set_posteriors2.sort(key=lambda x: x[0], reverse=False)\n\n ### select just toe posteriors\n posteriors1 = [x[1] for x in gene_set_posteriors1]\n posteriors2 = [x[1] for x in gene_set_posteriors2]\n\n ### generate cartesian product from the posteriors\n cart_product = list(itertools.product(posteriors1,posteriors2))\n\n gene_set_len1 = len(gene_set_posteriors1)\n gene_set_len2 = len(gene_set_posteriors2)\n\n ### calculate colocalisation posteriors with a specificed scoring function.\n colocalisations = numpy.array(map(lambda x: min(x[0],x[1]), cart_product)).reshape(gene_set_len1,gene_set_len2)\n\n\n ### pull out sorted set list\n sorted_setlist1 = [x[0] for x in gene_set_posteriors1]\n sorted_setlist2 = [x[0] for x in gene_set_posteriors2]\n\n if db == 1:\n \n ### create bidirectional map from gene_set to positon in colocalisation array\n setlist_1map = bidict([(sorted_setlist1[i],i) for i in range(len(sorted_setlist1))])\n setlist_2map = bidict([(sorted_setlist1[i],i) for i in range(len(sorted_setlist2))])\n\n bf_1map = dict(gene_set_BFs1)\n bf_2map = dict(gene_set_BFs2)\n\n\n posterior1_map = dict(gene_set_posteriors1)\n posterior2_map = dict(gene_set_posteriors2)\n pdb.set_trace()\n\n ### output total evidence for colocalisation\n return sum([colocalisations[i][i] for i in range(colocalisations.shape[0])])", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n for label in self.legalLabels:\n sum = 0\n for feature in self.features:\n #print(self.conditionalProbabilities[(feature, label)])\n if datum[feature] == 1:\n # can't find log of 0 --- behavior is undefined\n if self.conditionalProbabilities[(feature, label)] == 0:\n sum = sum + 0\n else:\n sum = sum + math.log(self.conditionalProbabilities[(feature, label)])\n else:\n sum = sum + math.log(1 - self.conditionalProbabilities[(feature, label)])\n \n logJoint[label] = math.log(self.labelProbabilities[label]) + sum\n \n return logJoint", "def handle_subproblem_infeasible(self, fixed_nlp, config, cb_opt=None):\n # TODO try something else? Reinitialize with different initial\n # value?\n config.logger.info('NLP subproblem was locally infeasible.')\n self.nlp_infeasible_counter += 1\n if config.calculate_dual_at_solution:\n for c in fixed_nlp.MindtPy_utils.constraint_list:\n rhs = value(c.upper) if c.has_ub() else value(c.lower)\n c_geq = -1 if c.has_ub() else 1\n fixed_nlp.dual[c] = c_geq * max(0, c_geq * (rhs - value(c.body)))\n dual_values = list(\n fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list\n )\n else:\n dual_values = None\n\n # if config.strategy == 'PSC' or config.strategy == 'GBD':\n # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True):\n # fixed_nlp.ipopt_zL_out[var] = 0\n # fixed_nlp.ipopt_zU_out[var] = 0\n # if var.has_ub() and abs(var.ub - value(var)) < config.absolute_bound_tolerance:\n # fixed_nlp.ipopt_zL_out[var] = 1\n # elif var.has_lb() and abs(value(var) - var.lb) < config.absolute_bound_tolerance:\n # fixed_nlp.ipopt_zU_out[var] = -1\n\n config.logger.info('Solving feasibility problem')\n feas_subproblem, feas_subproblem_results = self.solve_feasibility_subproblem(\n config\n )\n # TODO: do we really need this?\n if self.should_terminate:\n return\n copy_var_list_values(\n feas_subproblem.MindtPy_utils.variable_list,\n self.mip.MindtPy_utils.variable_list,\n config,\n )\n self.add_cuts(\n dual_values=dual_values,\n linearize_active=True,\n linearize_violated=True,\n cb_opt=cb_opt,\n )\n # Add a no-good cut to exclude this discrete option\n var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)\n if config.add_no_good_cuts:\n # excludes current discrete option\n add_no_good_cuts(self.mip, var_values, config, self.timing)", "def __check_objective_feasibility(self):\n pass", "def _find_coefs(self):\n\t\tlog_level = logging.getLogger().getEffectiveLevel()\n\t\tiprint = 1 if log_level == logging.DEBUG else -1\n\n\t\tinitial_coefs, boundaries = self._optimization_start_conditions()\n\t\tresult = fmin_l_bfgs_b(self._optimization_forecast,\n\t\t x0=initial_coefs, bounds=boundaries,\n\t\t approx_grad=True, iprint=iprint,\n\t\t epsilon=1e-2)\n\t\tself._extract_coefs(result[0])\n\t\tlogger.debug(\"Optimal coefficients found: {}\".format(self._coefs))", "def calculate_BH_matrix_elements(self):\n # first handle diagonal (site number-operator commuting) elements\n self.sum_nnm1=(self.hilbert_space.fock_basis*(self.hilbert_space.fock_basis-1)).sum(axis=1)\n self.sum_n=self.hilbert_space.fock_basis.sum(axis=1)\n self.muU=self.U*self.sum_nnm1 - self.mu*self.sum_n\n # now handle off-diagonal tunneling elements - tunneling preserves (commutes with) number\n for n in numpy.unique(self.sum_n):\n # pick out the subspace at occupancy n\n n_subspace=self.hilbert_space.fock_basis[self.sum_n==n].astype(numpy.int32)\n # difference all fock basis states by local number, sum their absolute values across sites\n # tunnel-coupled states must have one site raised and one site lowered, thus\n # the sum of absolute value of differences must be 2 - use these to mark possible\n # tunnel-coupled sites in a list of basis state i <-> basis state j , (i,j) pairs\n fock_diffs=n_subspace[:,numpy.newaxis]-n_subspace\n single_swap_subspace=(numpy.abs(fock_diffs).sum(axis=2)==2).nonzero()\n # for each candidate tunnel-coupled pair, check the connectivity graph\n # what we want is essentially a graph to graph mapping; the connection \n # of basis state i to basis state j is a lowering of site s and raising of site t\n # so potential matrix element (i,j) depends on lattice connectivity (s,t)\n # start by finding raised and lowered sites for each (i,j) basis state pair\n raised=(fock_diffs[single_swap_subspace]==1).nonzero()[1]\n lowered=(fock_diffs[single_swap_subspace]==-1).nonzero()[1]\n pdb.set_trace()\n \n self.K=numpy.empty((self.hilbert_space.fock_basis.shape[0],)*2,dtype=numpy.float64) \n pass", "def calculate_radial_dcomp_pytorch(nufftob_forw, nufftob_back, ktraj):\n dtype = nufftob_forw.scaling_coef_tensor.dtype\n device = nufftob_forw.scaling_coef_tensor.device\n\n if not nufftob_forw.norm == 'ortho':\n if not nufftob_back.norm == 'ortho':\n norm_factor = torch.prod(torch.tensor(nufftob_back.grid_size)).to(\n dtype=dtype, device=device)\n else:\n print('warning: forward/backward operators mismatched norm setting')\n norm_factor = 1\n elif not nufftob_back.norm == 'ortho':\n print('warning: forward/backward operators mismatched norm setting')\n norm_factor = 1\n else:\n norm_factor = 1\n\n # append 0s for batch, first coil, real part\n image_loc = (0, 0, 0, ) + \\\n tuple((np.array(nufftob_forw.im_size) // 2).astype(np.int))\n\n # get the size of the test signal (add batch, coil, real/imag dim)\n test_size = (1, 1, 2) + nufftob_forw.im_size\n\n test_sig = torch.ones(test_size, dtype=dtype, device=device)\n dcomps = []\n\n # get one dcomp for each batch\n threshold_levels = torch.zeros(len(ktraj), dtype=dtype, device=device)\n for batch_ind, batch_traj in enumerate(ktraj):\n # extract the signal amplitude increase from center of image\n query_point = nufftob_back(\n nufftob_forw(\n test_sig,\n om=batch_traj.unsqueeze(0)\n ),\n om=batch_traj.unsqueeze(0)\n )[image_loc] / norm_factor\n\n # use query point to get ramp intercept\n threshold_levels[batch_ind] = 1 / query_point\n\n # compute the new dcomp for the batch in batch_ind\n dcomps.append(\n torch.max(\n torch.sqrt(\n torch.sum(batch_traj[-2:, ...] ** 2, dim=0)) * 1 / np.pi,\n threshold_levels[batch_ind]\n )\n )\n\n if isinstance(ktraj, torch.Tensor):\n dcomps = torch.stack(dcomps)\n\n return dcomps", "def cells_per_perturb(adata_here,perturbations_obs='guide',count_unassigned=False,copy=False):\n\n if copy: adata_here = adata_here.copy()\n\n #get perturbations \n perturbations=_get_perturbations(adata_here,\n perturbations_obs=perturbations_obs)\n if count_unassigned:\n perturbations=list(set(perturbations).union(['unassigned']))\n\n #find their obs \n perturbations=perturb_overlap_obs(perturbations,adata_here,list_name='perturbations')\n\n cell2perturbs=1.0*(adata_here.obs.loc[:,perturbations]>0.0)\n cells_with_single_perturb=(cell2perturbs.sum(axis=1)==1)\n cell2perturbs_single=cell2perturbs.loc[cells_with_single_perturb,perturbations]\n cell2perturbs_counts=cell2perturbs.sum(axis=0)\n cell2perturbs_single_counts=cell2perturbs_single.sum(axis=0)\n adata_here.uns['cells_per_perturb.'+perturbations_obs]=pd.DataFrame(cell2perturbs_single_counts,index=cell2perturbs_single_counts.index,columns=['Number of cells'])\n counts=adata_here.obs[perturbations_obs].value_counts()\n adata_here.uns['cells_per_perturb.'+perturbations_obs+'.incl_multi_inf']=pd.DataFrame({\"Number of cells\":counts},\n index=counts.index)\n if not count_unassigned:\n adata_here.uns['cells_per_perturb.'+perturbations_obs+'.incl_multi_inf']=adata_here.uns['cells_per_perturb.'+perturbations_obs+'.incl_multi_inf'].loc[adata_here.uns['cells_per_perturb.'+perturbations_obs+'.incl_multi_inf'].index!='unassigned',:]\n \n if copy:\n return(adata_here)", "def component_bayesfactor(self,likelihood,method='AIC',format='ndarray'):\n mposterior = self.model_posterior(likelihood,method)\n c_bf = np.empty((mposterior.shape[0],self.num_comp))\n\n for i in range(self.num_comp):\n c_bf[:,i] = np.log(mposterior[:,self.combinations[:,i]==1].sum(axis=1))-np.log(mposterior[:,self.combinations[:,i]==0].sum(axis=1))\n\n if format == 'DataFrame':\n return pd.DataFrame(data=c_bf,\n index=np.arange(c_bf.shape[0]),\n columns = self.comp_names)\n return c_bf", "def computeBindingProbabilities( self ):\n\n unboundWt = np.exp( -self.unboundEnergy )\n\n #The probability of a location being bound in the input sample.\n bgWt = np.exp( -self.bgEnergy )\n pBgBound = bgWt/(unboundWt + bgWt)\n\n #The probability of a location being bound in the ChIP sample. \n #This is the expression employed when there is only a single TF\n #capable of binding a location.\n spWt = np.exp( (-self.spEnergies + self.chemicalPotential) )\n pTFbound = spWt/(spWt + unboundWt)\n\n if len( self.secondTFintEnergies ) > 0 and len( self.indirectLocations ) == 0:\n #When there are two TFs present in the simulation, then the pTFbound\n #values computed in the earlier step are over-written. \n #See the Methods section in the manuscript for the justification\n #behind computing occupancies of cooperatively bound TFs in this fashion.\n spWt = np.exp( (-self.spEnergies + self.chemicalPotential) )\n secondTFwt = np.exp( -self.secondTFspEnergies + self.secondTFchemicalPotential )\n coopWt = np.exp( -self.secondTFspEnergies - self.secondTFintEnergies - self.spEnergies + self.chemicalPotential + self.secondTFchemicalPotential )\n denom = (unboundWt + spWt + secondTFwt + coopWt)\n\n pTFbound = (spWt + coopWt)/denom\n \n if len( self.indirectLocations ) > 0:\n #In the case of indirect binding, the binding energy of the second\n #TF determines the occupancy of the location and not the binding\n #energy of the target TF.\n indirectWt = np.exp( -self.secondTFspEnergies + self.secondTFchemicalPotential )\n pTFbound[ self.indirectLocations ] = indirectWt[ self.indirectLocations ]/(unboundWt + indirectWt[self.indirectLocations])\n\n self.locations.loc[:,'p_occ_chip'] = pTFbound * self.chromAccessibility\n return [pTFbound,pBgBound]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read the tags associated with video frames in the file videotags.csv.
def read_tags(): f = open('videotags.csv') skip = f.readline() tags = defaultdict(lambda: []) for line in f: fields = line.rstrip().split(',') vid = int(fields[0]) framestart = int(fields[1]) frameend = None if len(fields[2])==0 else int(fields[2]) frametags = set(fields[3:]) tags[vid].append((framestart, frameend, frametags)) return VideoTags(dict(tags))
[ "def read_features(video_name: str, directory: str) -> Tuple[np.ndarray, np.ndarray]:\n if not os.path.isfile(f'{directory}/{video_name}-feats.npy'):\n raise Exception(f'Missing features file for video {video_name} in {directory}')\n if not os.path.isfile(f'{directory}/{video_name}-tags.npy'):\n raise Exception(f'Missing tags file for video {video_name} in {directory}')\n\n features = np.load(f'{directory}/{video_name}-feats.npy')\n tags = np.load(f'{directory}/{video_name}-tags.npy')\n\n return tags, features", "def _parse_videos(self):\n with open(self._data_set, 'r') as f:\n f.readline() # skip the 1st line\n return [\n Video(i, int(size))\n for i, size in enumerate(f.readline().split())\n ]", "def getTags(filename):\n\n # Set up CSV headers\n header = ['Path', 'Name', 'Size', 'Filesystem CTime', 'Filesystem MTime', 'Title', 'Subtitle', 'Artist', 'Album',\n 'Album/Artist', 'Length (Sec)', 'Year', 'Category', 'Track Number', 'Comments', 'Publisher', 'Bitrate',\n 'Sample Rate', 'Encoding', 'Channels', 'Audio Layer']\n tags = {}\n tags['Path'] = filename\n tags['Name'] = os.path.basename(filename)\n tags['Size'] = utility.convertSize(os.path.getsize(filename))\n tags['Filesystem CTime'] = strftime('%m/%d/%Y %H:%M:%S', gmtime(os.path.getctime(filename)))\n tags['Filesystem MTime'] = strftime('%m/%d/%Y %H:%M:%S', gmtime(os.path.getmtime(filename)))\n\n # MP3 Specific metadata\n audio = mp3.MP3(filename)\n if 'TENC' in audio.keys():\n tags['Encoding'] = audio['TENC'][0]\n tags['Bitrate'] = audio.info.bitrate\n tags['Channels'] = audio.info.channels\n tags['Audio Layer'] = audio.info.layer\n tags['Length (Sec)'] = audio.info.length\n tags['Sample Rate'] = audio.info.sample_rate\n\n # ID3 embedded metadata tags\n id = id3.ID3(filename)\n if 'TPE1' in id.keys():\n tags['Artist'] = id['TPE1'][0]\n if 'TRCK' in id.keys():\n tags['Track Number'] = id['TRCK'][0]\n if 'TIT3' in id.keys():\n tags['Subtitle'] = id['TIT3'][0]\n if 'COMM::eng' in id.keys():\n tags['Comments'] = id['COMM::eng'][0]\n if 'TDRC' in id.keys():\n tags['Year'] = id['TDRC'][0]\n if 'TALB' in id.keys():\n tags['Album'] = id['TALB'][0]\n if 'TIT2' in id.keys():\n tags['Title'] = id['TIT2'][0]\n if 'TCON' in id.keys():\n tags['Category'] = id['TCON'][0]\n if 'TPE2' in id.keys():\n tags['Album/Artist'] = id['TPE2'][0]\n if 'TPUB' in id.keys():\n tags['Publisher'] = id['TPUB'][0]\n\n return tags, header", "def loadVideos(catalog):\n videosfile = cf.data_dir + 'videos-large.csv'\n input_file = csv.DictReader(open(videosfile, encoding= 'utf-8', errors='ignore'))\n for video in input_file:\n model.addVideo(catalog, video)", "def read_csv(csv_file):\n image_names = []\n tags = []\n label_list = []\n\n with open(csv_file) as f:\n lines = f.readlines()[1:]\n \n for line in lines:\n if line:\n strs = line.strip().split(',')\n image_names.append(strs[0])\n tags.append(strs[1])\n return image_names, tags", "def read_tags(i, dataset):\n filepath = 'tags_train/' if dataset == TRAIN else 'tags_test/'\n filepath += str(i) + \".txt\"\n with open(filepath) as f:\n lines = f.read().splitlines()\n lines = list(filter(None, lines))\n imgtags = []\n for tag in lines:\n imgtags.append(tuple(tag.split(':')))\n return imgtags", "def extractVideoViews(record):\n parts = list(csv.reader([record.encode('utf-8')]))\n country = parts[0][17]\n video = parts[0][0]\n views = parts[0][8]\n return (country + ', ' + video, views)", "def tags():\r\n tags_csv_path = (\r\n \"application/seeds/Tags.csv\"\r\n ) # os.path.join(BASE_DIR, 'FroshGroups.csv')\r\n with open(tags_csv_path, \"r\") as tags_csv:\r\n for i in tags_csv:\r\n if i[-1] == \"\\n\":\r\n record = Tag(tag_name=i[:-1])\r\n else:\r\n record = Tag(tag_name=i)\r\n db.session.add(record)\r\n db.session.commit()", "def parse_frame_info_file(video_frames_info_path):\n video_frame_info = dict()\n with open(video_frames_info_path) as f:\n reader = csv.reader(f)\n next(reader, None) # Skip headers\n for row in reader:\n video_frame_info[row[0]] = (float(row[1]), int(row[2]))\n return video_frame_info", "def make_vid_list():\n tests = []\n\n for i in range(1, 5):\n with open(f'data/vid_props/test{i}.csv') as f:\n test = [Video.from_text(l) for l in f.readlines()[1:]]\n tests.append(test)\n\n return tests", "def read_csv(csv_path=csv_path):\n movies = []\n file = open(csv_path, \"r\")\n lines = file.read().split('\\n')[1:-1]\n for line in lines:\n line = line.split(\"\\t\")\n movie = {}\n movie[\"name\"] = line[1]\n movie[\"year\"] = line[2]\n movies += [movie]\n return movies", "def read_data():\n users = pd.read_csv('./data/20150701094451-User_attributes.csv')\n behaviors = pd.read_csv('./data/20150701094451-Behavior_training.csv')\n videos_matrix = pd.read_csv('./data/videos_similarity_matrix.csv')\n # video_id and its min date_hour\n videos = behaviors.groupby('video_id').agg({'date_hour': np.min})\n videos['video_id'] = videos.index\n videos = videos.reset_index(drop=True)\n # Remove unused columns\n behaviors = behaviors.drop(['date_hour','mv_ratio'], 1)\n return (behaviors, users, videos, videos_matrix)", "def csv(self, outfile=None):\n assert self.load().isloaded()\n csv = [(self.filename(), # video filename\n k, # frame number (zero indexed)\n d.category(), d.shortlabel(), # track category and shortlabel (displayed in caption)\n ';'.join([self.activities(id=aid).category() for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else ''), # semicolon separated activity category associated with track\n d.xmin(), d.ymin(), d.width(), d.height(), # bounding box\n d.attributes['trackid'], # globally unique track ID\n ';'.join([aid for aid in tolist(d.attributes['activityid'])] if 'activityid' in d.attributes else '')) # semicolon separated activity ID associated with track\n for (k,im) in enumerate(self) for d in im.objects()]\n csv = [('# video_filename', 'frame_number', 'object_category', 'object_shortlabel', 'activity categories(;)', 'xmin', 'ymin', 'width', 'height', 'track_id', 'activity_ids(;)')] + csv\n return writecsv(csv, outfile) if outfile is not None else csv", "def yuv_import(video_path, startfrm, nfs,\\\n height_frame=0, width_frame=0, bar=False, opt_clear=False):\n fp = open(video_path, 'rb')\n\n ## retrieve resolution info from video path\n if height_frame == 0:\n res = video_path.split(\"-\")[2].split(\"_\")[0]\n width_frame = int(res.split(\"x\")[0])\n height_frame = int(res.split(\"x\")[1])\n\n ## target at startfrm\n blk_size = int(height_frame * width_frame * 3 / 2)\n fp.seek(blk_size * startfrm, 0)\n\n d0 = height_frame // 2\n d1 = width_frame // 2\n\n ## init\n y_frame = []\n y_batch = []\n u_frame = []\n u_batch = []\n v_frame = []\n v_batch = []\n\n ## extract\n y_size = height_frame * width_frame\n u_size = d0 * d1\n v_size = d0 * d1\n for ite_frame in range(nfs):\n\n y_frame = [ord(fp.read(1)) for i in range(y_size)]\n y_frame = np.array(y_frame, dtype=np.uint8).reshape((height_frame, width_frame))\n y_batch.append(y_frame)\n\n u_frame = [ord(fp.read(1)) for i in range(u_size)]\n u_frame = np.array(u_frame, dtype=np.uint8).reshape((d0, d1))\n u_batch.append(u_frame)\n \n v_frame = [ord(fp.read(1)) for i in range(v_size)]\n v_frame = np.array(v_frame, dtype=np.uint8).reshape((d0, d1))\n v_batch.append(v_frame)\n\n if bar:\n print(\"\\r<%d, %d>\" % (ite_frame, nfs-1), end=\"\", flush=True)\n\n fp.close()\n\n if opt_clear:\n print(\"\\r\"+20*\" \", end=\"\\r\", flush=True)\n\n y_batch = np.array(y_batch)\n u_batch = np.array(u_batch)\n v_batch = np.array(v_batch)\n return y_batch, u_batch, v_batch", "def parse_csv(self, data_dir):\n metacsvfilepath = os.path.join(data_dir, self.metadataFile)\n #print(metacsvfilepath)\n with open(metacsvfilepath, 'r', newline='') as f:\n reader = csv.reader(f)\n #parsed_recordings = list(reader, delimiter=',')[1:]\n ids = []\n labels = []\n for line in reader:\n # line is a list of ['id', 'dataset', 'label']\n rec_id, label = line[0], line[-1]\n ids.append(rec_id)\n labels.append(label)\n\n return ids, labels", "def read_frames(video):\n v = cv2.VideoCapture(video)\n frames = []\n success, image = v.read()\n while success:\n success, image = v.read()\n if success:\n frames.append(hist(image))\n return frames", "def tags(self):\n\n return self.video_data.get('tags')", "def load_data(f):\n import csv\n with open(f, newline='') as csvfile:\n ecgreader = csv.reader(csvfile, delimiter=' ')\n time, voltage, high_voltages = organize_data(ecgreader, f)\n return time, voltage, high_voltages", "def load_annotations(self):\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n video_dir = line_split[0]\n label = int(line_split[1])\n num_clips = int(line_split[2])\n positive_clip_inds = [int(ind) for ind in line_split[3:]]\n\n if self.data_prefix is not None:\n video_dir = osp.join(self.data_prefix, video_dir)\n video_infos.append(\n dict(\n video_dir=video_dir,\n label=label,\n num_clips=num_clips,\n positive_clip_inds=positive_clip_inds))\n return video_infos", "def get_video_params_from_xml(filepath):\n parser = etree.XMLParser(encoding=CHARSET)\n tree = etree.parse(filepath, parser=parser)\n video_el = tree.getroot()\n frames_els = video_el.getchildren()\n frames = []\n fps = int(video_el.get(FPS_TAG))\n numpx = video_el.get(NUMPX_TAG)\n\n with LogCont(\"Import frames from XML\"):\n for vframe_el in frames_els:\n descriptor_el = vframe_el[0]\n hists = []\n for hist_el in descriptor_el:\n hist = [int(x) for x in hist_el.text.split(hlp.VAL_SEP)]\n hists.append(hist)\n descriptor = fd.FeatureDescriptor(hists)\n\n timestamp_str = vframe_el.get(TIMESTAMP_TAG)\n timestamp_obj = lt.Timestamp.from_str(timestamp_str)\n vframe = fd.VFrame(timestamp=timestamp_obj, descriptor=descriptor)\n frames.append(vframe)\n\n return fps, frames, numpx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find all tags associated with a given video frame.
def frame_tags(self, vid, frame): if not self.tags.has_key(vid): raise Exception("Video ID not found.") v = self.tags[vid] L = [] for interval in v: if frame >= interval[0] and frame <= interval[1]: L += interval[2] return set(L)
[ "def search_videos_tag(self, video_tag):\n results = []\n for video in self._video_library.get_all_videos():\n for tag in video.tags:\n if video_tag.lower() == tag.lower():\n if not video.flag:\n results.append(video)\n self.display_search(results, video_tag)", "def tags(self):\n\n return self.video_data.get('tags')", "def search_videos_tag(self, video_tag: str) -> None:\n videos = self._get_sorted_videos()\n videos = [v for v in videos if video_tag.lower() in v.tags]\n\n self._print_search_results(video_tag, videos)", "def tags(self) -> List:", "def getFriendsPhotoVideoTags(self):\n return self.base.get(\"friends_photo_video_tags\", [])", "def findall(self, tag: str) -> Iterable[\"Element\"]:", "def get_tag_list_for_movie(self, movie):\n movie_specific_data = self.genre_data[self.genre_data[\"movieid\"] == movie]\n tags_list = movie_specific_data[\"tag_string\"].unique()\n\n return tags_list", "def get_by_name(self, tag_name: str, default: Optional[Any] = None) -> List[VideoTag]:\n # super().get_by_name(tag_name, default)\n res = []\n for tag in self:\n if tag.name == tag_name:\n res.append(tag)\n return res", "def keywords(self) -> list:\n try:\n return self._current_video()[\"snippet\"][\"tags\"]\n except KeyError as e:\n return []", "def _tagIDs(self):\n session = yield self.session\n response = yield threads.deferToThread(\n session.get,\n 'https://{host}/rest/com/vmware/cis/tagging/tag'.format(host=self.host)\n )\n output = []\n try:\n output = response.json().get('value')\n except Exception as e:\n logging.error('Unable to fetch tag IDs from vcenter {} ({})'.format(self.host, e))\n\n return output", "def get_movies_for_tag(self, tag):\n tag_specific_data = self.genre_data[self.genre_data[\"tag_string\"] == tag]\n movies_list = tag_specific_data[\"movieid\"].unique()\n\n return movies_list", "def list_filenames_by_tag(absolute_directory, tags):\n matches = []\n\n for root, _, filenames in os.walk(absolute_directory):\n for filename in filenames:\n absolute_path_to_file = os.path.join(root, filename)\n\n with open(absolute_path_to_file, \"r\") as f:\n for line in f:\n if line.strip().startswith(\"tags:\"):\n if match_all(line, tags):\n matches.append(filename)\n break\n\n return matches", "def tags():\n qs = models.ConferenceTaggedItem.objects\\\n .all()\\\n .select_related('tag')\n\n tags = defaultdict(set)\n for item in qs:\n tags[item.tag].add((item.content_type_id, item.object_id))\n\n # Add tags which are not currently in use\n qs = models.ConferenceTag.objects.all()\n for tag in qs:\n if tag not in tags:\n tags[tag] = set()\n\n return dict(tags)", "def find_all_by(tag_name, attribs, ctx):\n return ctx.find_all(tag_name, attribs)", "def get_volumes_with_tag(self, tag):\n matching_vols = []\n for vol in self.cinder_clnt.volumes.list():\n for k, v in tag.items():\n if vol.metadata.get(k) == v:\n matching_vols.append(vol)\n\n return matching_vols", "def getAllTags():\n # Create a diary database object.\n db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()\n tagRows = db.selectFromTable('tags',('name',),'')\n db.close()\n return [element[0] for element in tagRows]", "def tags(self):\n return [t['tag'] for t in database.execute(\n 'SELECT tag FROM tags WHERE post_id = ?', [self['id']])]", "def track(self):\n empty = None\n frames = []\n frames_bw = []\n for i, frame in enumerate(self.video):\n if empty is None:\n empty = np.zeros(frame.shape, np.uint8)\n frame_bw = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frames.append(frame)\n frames_bw.append(frame_bw)\n frames_bw = np.array(frames_bw)\n avg = frames_bw.mean(axis=0)\n avg_blur = cv2.GaussianBlur(avg, (0, 0), 3)\n for idx in range(len(frames_bw)):\n frame = frames_bw[idx]\n sub = (avg_blur - 30) - cv2.GaussianBlur(frame, (0, 0), 3)\n # iterate over frames in video\n if np.array_equal(empty, sub):\n # skip empty frames\n self.counter += 1\n self.skipped_frames += 1\n print(self.skipped_frames)\n continue\n # create mask with the detected spots from the frame\n mask = cv2.inRange(sub, 1, 256)\n mask = cv2.dilate(mask, None, iterations=1)\n mask = cv2.erode(mask, None, iterations=1)\n # find contours in the video\n contours = cv2.findContours(mask.copy(),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n if len(contours) > 0:\n if self.previous_frame is None:\n # for the first frame in the video, just find the largest contour in the mask\n c = max(contours, key=cv2.contourArea)\n # compute center point\n m = cv2.moments(c)\n try:\n center = (int(m[\"m10\"] / m[\"m00\"]), int(m[\"m01\"] / m[\"m00\"]))\n except ZeroDivisionError:\n continue\n # add point to the points dictionary\n self.pts[self.counter] = Point(center, cv2.contourArea(c), self.counter)\n self.previous_frame = self.counter\n else:\n # make a list of possible spots and choose the one with the highest score\n candidate_pts = []\n for c in contours:\n m = cv2.moments(c)\n try:\n center = (int(m[\"m10\"] / m[\"m00\"]), int(m[\"m01\"] / m[\"m00\"]))\n except ZeroDivisionError:\n continue\n candidate_pts.append(Point(center,\n cv2.contourArea(c),\n self.counter,\n self.pts[self.previous_frame]))\n if len(candidate_pts) >= 1:\n # add the best spot to the points dictionary\n c = sorted(candidate_pts)[-1]\n self.pts[self.counter] = c\n self.previous_frame = self.counter\n else:\n self.skipped_frames += 1\n self.counter += 1\n\n # after finding all spots, split tracks with gaps of more than 25 frames\n prev_key = 0\n curr_track = 0\n for key in sorted(self.pts):\n if (key - prev_key) > 25:\n self.tracks.append([])\n curr_track += 1\n self.tracks[curr_track].append(self.pts[key])\n prev_key = key\n\n # delete tracks with less than 10 points\n tracks = [t for t in self.tracks if len(t) >= 10]\n self.tracks = tracks\n if out_path:\n for track in self.tracks:\n pts = deque(maxlen=300)\n for pt in track:\n for idx in range(1, len(pts)):\n if pts[idx - 1] is None or pts[idx] is None:\n continue\n cv2.line(self.video[pt.frame], pts[idx - 1].coords, pts[idx].coords, 255, 1)\n pts.append(pt)\n tifffile.imsave(out_path, self.video)\n\n return self.tracks", "async def get_tag_members(self, tag: str) -> List[str]:\n assert self._session is not None\n\n url = urls.TAGS / \"tag\" / f\"{tag}/\"\n\n try:\n async with self._session.get(\n url,\n timeout=self._timeout,\n headers={\"Authorization\": self._access_token},\n ) as resp:\n response = await resp.json()\n if resp.status != 200:\n raise ClientAPIError(resp.method, resp.url, resp.status, response)\n\n return response[\"instruments\"]\n except (aiohttp.ClientError, asyncio.TimeoutError) as e:\n raise ClientRequestError(\"GET\", url) from e", "def tags(self):\n TAG_RE = r'\\#\\w+\\d*'\n matches = re.findall(TAG_RE, self.title)\n tags = []\n for m in matches:\n tags.append(m[1:])\n return tags" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The set of all tags as a sorted list.
def all_tags(self): t = list(set.union(*[L[2] for v in self.tags.values() for L in v])) t.sort() return t
[ "def _list(self):\n with self._treant._read:\n tags = self._treant._state['tags']\n\n tags.sort()\n return tags", "def tags(self) -> List:", "def get_tags(self):\n\n return sorted(self.tags, key=lambda tag: tag.commit.committed_datetime, reverse=True)", "def get_tags(self):\n return self._tag_list.keys()", "def sorted_tags(self):\n return ','.join(sorted(self.tags.split(',')))", "def get_all_tags(self):\n return self.scenario.get_all_tags()", "def get_tagged_items (self, tags):\n if type(tags) is not list:\n tags = [tags]\n\n all_items = []\n for tag in tags:\n if tag in self.tags:\n if not all_items:\n all_items = self.tags[tag].keys()\n else:\n # set intersection\n all_items = [item for item in all_items if item in self.tags[tag].keys()]\n #\n #\n # \n return all_items", "def get_tags(self):\n return self.tags.order_by(article_tag_mapping.c.created_at.asc()).all()", "def serialize_tags(self):\n return [i.name for i in self.tags.all()]", "def get_all_tags():\n return Tag.query.all()", "def tag_names(self):\n return self.__tag_names", "def getAllTags():\n # Create a diary database object.\n db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()\n tagRows = db.selectFromTable('tags',('name',),'')\n db.close()\n return [element[0] for element in tagRows]", "def list_git_tags():\n tags = (\n check_output(\n [\n \"git\",\n \"tag\",\n ]\n )\n .decode(\"utf-8\")\n .split()\n )\n return set(tags)", "def tags(self):\n return self._named_trees('tag')", "def tags(self):\n return [t['tag'] for t in database.execute(\n 'SELECT tag FROM tags WHERE post_id = ?', [self['id']])]", "def tag_list(self):\n\n print u\"Retrieving unread entries...\"\n self._retrieve_entries(starred=False)\n subs_list = self._subscription_list()\n\n feeds = {tag: [(self._count_unread(feed), feed[u\"title\"]) for feed in subs_list[tag]]\n for tag in subs_list if tag != u\"<Untagged>\"}\n untagged = {tag: [(self._count_unread(feed), feed[u\"title\"]) for feed in subs_list[tag]]\n for tag in subs_list if tag == u\"<Untagged>\"}\n\n sorted_feeds = sorted(feeds.items())\n sorted_feeds.extend(untagged.items())\n\n return OrderedDict(sorted_feeds)", "def get_all_tags(self):\n\n verbose = self.options.verbose\n gh = self.github\n user = self.options.user\n repo = self.options.project\n if verbose:\n print(\"Fetching tags...\")\n\n tags = []\n page = 1\n while page > 0:\n if verbose > 2:\n print(\".\", end=\"\")\n rc, data = gh.repos[user][repo].tags.get(\n page=page, per_page=PER_PAGE_NUMBER)\n if rc == 200:\n tags.extend(data)\n else:\n self.raise_GitHubError(rc, data, gh.getheaders())\n page = NextPage(gh)\n if verbose > 2:\n print(\".\")\n\n if len(tags) == 0:\n if not self.options.quiet:\n print(\"Warning: Can't find any tags in repo. Make sure, that \"\n \"you push tags to remote repo via 'git push --tags'\")\n exit()\n if verbose > 1:\n print(\"Found {} tag(s)\".format(len(tags)))\n return tags", "def list_every_unique_recipe_tag():\n data = rs.get_recipes_from_file()\n tag_list = []\n for recipe in data:\n for tag in recipe['tags']:\n if tag.lower() not in tag_list:\n tag_list.append(tag.lower())\n tag_list.sort()\n print len(tag_list), tag_list\n return True", "def get_unique_tags(soup):\r\n tags = [tag.name for tag in soup.find_all()]\r\n return list(set(tags))", "def get_tags(self, language=None):\n\n # get tagged post\n entries = self\n if language:\n entries = entries.filter_by_language(language)\n entries = entries.distinct()\n if not entries:\n return []\n kwargs = TaggedItem.bulk_lookup_kwargs(entries)\n\n # aggregate and sort\n counted_tags = dict(TaggedItem.objects\n .filter(**kwargs)\n .values('tag')\n .annotate(count=models.Count('tag'))\n .values_list('tag', 'count'))\n\n # and finally get the results\n tags = Tag.objects.filter(pk__in=counted_tags.keys())\n for tag in tags:\n tag.count = counted_tags[tag.pk]\n return sorted(tags, key=lambda x: -x.count)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test correct message composing.
def test_compose_message(): message = b'message' topic = b'test topic' # Basic composing for serializer in AgentAddressSerializer.SERIALIZER_SIMPLE: serializer = AgentAddressSerializer(serializer) assert compose_message(message, topic, serializer) == topic + message for serializer in AgentAddressSerializer.SERIALIZER_SEPARATOR: serializer = AgentAddressSerializer(serializer) assert ( compose_message(message, topic, serializer) == topic + TOPIC_SEPARATOR + message ) # Raise with wrong serializer with pytest.raises(Exception): compose_message(message, topic, 'foo')
[ "def test_message_exactly_buffsize(self):\n buf_message = \"It's 16 bytes eh\"\n self.send_message(buf_message)\n actual_sent, actual_reply = self.process_log()\n expected_sent = self.sending_msg.format(buf_message)\n self.assertEqual(expected_sent, actual_sent)\n expected_reply = self.received_msg.format(buf_message)\n self.assertEqual(expected_reply, actual_reply)", "def test_createMsg(self):\n msg = b'12345'\n assert(self.radio.createMsg(msg) == msg)", "def test_valid_message_build(self):\n message = pyspamsteg.create(test_pt00)\n self.assertEqual(pyspamsteg.read(message), test_pt00)", "def test_handle_message_completed2(self):\n\n msg = Message(name='completed', target='fake-id', origin='fake-id_1')\n self.fexpr.state = 'active'\n newmsg = Message(name='completed', target='', origin='fake-id')\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessage.return_value = newmsg\n result = self.fexpr.handle_message(self.ch, msg)\n self.assertEqual(result, 'consumed')\n self.assertEqual(self.fexpr.state, 'completed')\n MockMessage.assert_called_once_with(name='completed', target='',\n origin='fake-id')\n self.ch.send.assert_called_once_with(newmsg)", "def test_parseSerialMsg(self):\n # Check rejection of message with invalid CRC\n self.msgParser.parseSerialMsg(truthSLIPMsg, 0)\n assert(self.msgParser.msg.msgFound == True) # slip msg found\n assert(self.msgParser.msg.msgEnd != 1) # message end found\n assert(self.msgParser.parsedMsgs == []) # message rejected \n\n # Check acceptance of message with valid CRC \n crc = self.msgParser.msg.crc(testMsg)\n slipMsg = SLIPMsg(256)\n slipMsg.encodeMsg(testMsg) \n self.msgParser.parseSerialMsg(slipMsg.encoded, 0)\n assert(self.msgParser.msg.msgFound == True) # slip msg found\n assert(self.msgParser.msg.msgEnd != 1) # message end found\n assert(self.msgParser.parsedMsgs[0] == testMsg) # message accepted \n \n # Check that proper message end position is returned\n self.msgParser.parsedMsgs = []\n paddedMsg = slipMsg.encoded + b'989898'\n msgEnd = self.msgParser.parseSerialMsg(paddedMsg, 0)\n assert(self.msgParser.parsedMsgs[0] == testMsg)\n assert(msgEnd == len(slipMsg.encoded)-1)", "def test_readMsgs(self):\n # Send message\n msgBytes = b'12345'\n self.serialComm.sendMsg(msgBytes)\n time.sleep(0.1)\n \n # Read messages\n self.serialComm.readMsgs()\n assert(len(self.serialComm.msgParser.parsedMsgs) == 1)\n assert(self.serialComm.msgParser.parsedMsgs[0] == msgBytes) # confirm parsed message matches original", "def testFormattedEmptyMessage(self):\n\t\tnotNeeded=self.fixture.read(1) # Empty the port.\n\t\tself.assertEqual(self.fixture.read(1),b'',\n\t\t\t\t\t\tmsg='Need an empty buffer before running this test case.')\n\t\t# port.inWaiting will be 0, so grabPortOutput will just proceed to return\n\t\t# the input outputBuffer and the default (empty) output.\n\t\tformattedOutput=sm.commsInterface.grabPortOutput(self.fixture,'DummyBuff','formatted')\n\t\tself.assertEqual(formattedOutput[0],'',msg='Expected empty string as output.')\n\t\tself.assertEqual(formattedOutput[1],'DummyBuff',msg='Expected unchanged DummyBuff.')\n\t\t# Check message length.\n\t\tself.assertEqual(len(formattedOutput[0]),0,msg='Expected zero bytes')\n\t\tself.assertEqual(len(formattedOutput[1]),9,msg='Expected nine bytes')\n\t\t# Should have no warnings.\n\t\tself.assertEqual(formattedOutput[2],{},msg='Expected empty warning dict.')\n\t\t# The port should be empty now.\n\t\tself.assertEqual(self.fixture.read(1),b'',msg='Expected empty buffer after the test.')", "def test_msg_repr(self):\n\n self.assertEqual(repr(self.msg), f\"<Message #{self.msg.id} @{self.msg.timestamp}>\")", "def test_msg_equality(self):\n msg1 = Message('note_on', channel=1, note=2, velocity=3)\n msg2 = Message('note_on', channel=1, note=2, velocity=3)\n\n self.assertTrue(msg1 == msg2)", "def test_parseMsgs(self):\n # Test parsing of messages\n msgBytes = b'ABCDEF'\n self.serialComm.radio.bufferRxMsg(msgBytes, False) # put test bytes into radio rx buffer\n self.serialComm.parseMsgs()\n assert(len(self.serialComm.msgParser.parsedMsgs) == 1)\n assert(self.serialComm.msgParser.parsedMsgs[0] == b'ABCDEF')\n\n # Check that radio buffer was cleared\n assert(len(self.serialComm.radio.getRxBytes()) == 0)", "def test_handle_message_response(self):\n\n msg = Message(name='response', target='fake-id_0', origin='fake-id_0',\n payload={\"status\": \"done\"})\n self.fexpr.state = 'active'\n self.fexpr.children[0].state = 'active'\n newmsg = Message(name='completed', target='fake-id',\n origin='fake-id_0')\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessage.return_value = newmsg\n result = self.fexpr.handle_message(self.ch, msg)\n self.assertEqual(result, 'consumed')\n self.assertEqual(self.fexpr.state, 'active')\n MockMessage.assert_called_once_with(name='completed',\n target='fake-id',\n origin='fake-id_0')\n self.ch.send.assert_called_once_with(newmsg)", "def test_valid_message():\n id_ = '12345'\n\n msg = Message({'@type': TEST_TYPE, '@id': id_})\n assert msg.type == TEST_TYPE\n assert msg.id == id_\n assert msg.doc_uri == 'test_type/'\n assert msg.protocol == 'protocol'\n assert msg.version == '1.0'\n assert msg.normalized_version == '1.0.0'\n assert msg.name == 'test'\n assert msg.version_info == Semver(1, 0, 0)", "def test_handle_message_completed1(self):\n\n msg = Message(name='completed', target='fake-id', origin='fake-id_0')\n self.fexpr.state = 'active'\n newmsg = Message(name='start', target='fake-id_1', origin='fake-id')\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessage.return_value = newmsg\n result = self.fexpr.handle_message(self.ch, msg)\n self.assertEqual(result, 'consumed')\n self.assertEqual(self.fexpr.state, 'active')\n MockMessage.assert_called_once_with(name='start',\n target='fake-id_1',\n origin='fake-id')\n self.ch.send.assert_called_once_with(newmsg)", "def test_type(self):\n assert isinstance(self.message, Message)\n assert type(self.message).__name__ == 'FooEvent'", "def test_handle_message_start(self):\n\n msg = Message(name='start', target='fake-id', origin='')\n newmsg = Message(name='start', target='fake-id_0', origin='fake-id')\n self.fexpr.state = 'ready'\n with patch('bureaucrat.flowexpression.Message') as MockMessage:\n MockMessage.return_value = newmsg\n result = self.fexpr.handle_message(self.ch, msg)\n self.assertEqual(result, 'consumed')\n self.assertEqual(self.fexpr.state, 'active')\n MockMessage.assert_called_once_with(name='start',\n target='fake-id_0',\n origin='fake-id')\n self.ch.send.assert_called_once_with(newmsg)", "def test_message_equal(self):\n message1 = Message(\"1\", \"2\", \"3\", \"4\", \"5\", \"MsgId\")\n message2 = Message(\"1\", \"2\", \"3\", \"4\", \"5\", \"MsgId\")\n self.assertTrue(message1 == message2)", "def test_verify_message_format(self):\n\n def message_assert(message):\n fields = [('publisher_id', 'publisher_id'),\n ('event_type', 'event_type'),\n ('priority', 'WARN'),\n ('payload', dict(a=3))]\n for k, v in fields:\n self.assertEqual(message[k], v)\n self.assertTrue(len(message['message_id']) > 0)\n self.assertTrue(len(message['timestamp']) > 0)\n\n self.stubs.Set(nova.notifier.no_op_notifier, 'notify',\n message_assert)\n notify('publisher_id', 'event_type',\n nova.notifier.api.WARN, dict(a=3))", "def test_publish_message(self):\n pass", "def test_encodeMsg(self):\n slipMsg = SLIPMsg(256)\n slipMsg.encodeMsg(testMsg)\n encodedMsg = self.msgParser.encodeMsg(testMsg)\n assert(encodedMsg == slipMsg.encoded)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple requestreply pattern between two agents with different serializations.
def test_reqrep(nsproxy, serializer, message, response): def rep_handler(agent, message): return response a0 = run_agent('a0') a1 = run_agent('a1') addr = a0.bind('REP', 'reply', rep_handler, serializer=serializer) a1.connect(addr, 'request') assert a1.send_recv('request', message) == response
[ "async def test_quickresponse(self):\n actions = [\n ('id1', 'Action 1'),\n ('id2', 'Action 2'),\n ]\n self.clients[0]['xep_0439'].ask_for_actions(\n self.clients[1].boundjid.full,\n \"Action 1 or 2 ?\",\n actions\n )\n msg = await self.clients[1].wait_until('action_received')\n actions_recv = [\n (st['id'], st['label']) for st in msg if isinstance(st, stanza.Action)\n ]\n self.assertEqual(\n actions,\n actions_recv,\n )\n\n reply = self.clients[1].make_message(\n mto=self.clients[0].boundjid.full\n )\n reply['action_selected']['id'] = 'id1'\n reply.send()\n\n reply_recv = await self.clients[0].wait_until('action_selected')\n self.assertEqual(\n reply_recv['action_selected']['id'],\n 'id1',\n )\n\n self.clients[0]['xep_0439'].ask_for_response(\n self.clients[1].boundjid.full,\n \"Reply with action 1 or 2 (id1/id2) ?\",\n actions\n )\n msg = await self.clients[1].wait_until('responses_received')", "def getReplyResults():", "def createReply(title, text, REQUEST, RESPONSE):", "def test_acceptor(self):\n # Also a regression test for #120\n # C-ECHO-RQ\n # 80 total length\n echo_rq = (\n b\"\\x04\\x00\\x00\\x00\\x00\\x4a\" # P-DATA-TF 74\n b\"\\x00\\x00\\x00\\x46\\x01\" # PDV Item 70\n b\"\\x03\" # PDV: 2 -> 69\n b\"\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x42\\x00\\x00\\x00\" # 12 Command Group Length\n b\"\\x00\\x00\\x02\\x00\\x12\\x00\\x00\\x00\\x31\\x2e\\x32\\x2e\\x38\"\n b\"\\x34\\x30\\x2e\\x31\\x30\\x30\\x30\\x38\\x2e\\x31\\x2e\\x31\\x00\" # 26\n b\"\\x00\\x00\\x00\\x01\\x02\\x00\\x00\\x00\\x30\\x00\" # 10 Command Field\n b\"\\x00\\x00\\x10\\x01\\x02\\x00\\x00\\x00\\x01\\x00\" # 10 Message ID\n b\"\\x00\\x00\\x00\\x08\\x02\\x00\\x00\\x00\\x01\\x01\" # 10 Command Data Set Type\n )\n\n # Send associate request then c-echo requests then release request\n commands = [\n (\"send\", a_associate_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", a_release_rq),\n (\"exit\", None),\n ]\n self.scp = scp = self.start_server(commands)\n\n assoc, fsm = self.get_acceptor_assoc()\n assoc.start()\n\n for ii in range(len(commands) - 1):\n scp.step()\n\n while assoc.dul.is_alive():\n time.sleep(0.001)\n\n scp.step()\n scp.shutdown()\n\n assert [\n (\"Sta1\", \"Evt5\", \"AE-5\"),\n (\"Sta2\", \"Evt6\", \"AE-6\"),\n (\"Sta3\", \"Evt7\", \"AE-7\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt12\", \"AR-2\"),\n (\"Sta8\", \"Evt14\", \"AR-4\"),\n (\"Sta13\", \"Evt17\", \"AR-5\"),\n ] == fsm._changes[:30]", "def test_reqrep_raw_zmq_outside(nsproxy):\n # Create an osBrain agent that will receive the message\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a1.bind(\n 'REP', transport='tcp', handler=echo_handler, serializer='raw'\n )\n\n # Create a raw ZeroMQ REQ socket\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))\n\n # Send the message\n message = b'Hello world'\n socket.send(message)\n assert socket.recv() == message\n\n socket.close()\n context.destroy()", "def setreply(self, msg):\n return CONTINUE", "def test_parse_several_request_with_the_same_parser(self): \n \n message = \"begin ims1.0\\nmsg_type request \\nmsg_id ex042 \\ne-mail foo_bar.a.vb.bar@venus.com \\ntime 1999/07/12 to 1999/07/13 \\nbull_type idc_sel3\\nbulletin ims1.0\\nstop\"\n\n parser = IMSParser()\n \n result = parser.parse(message)\n \n #print(\"\\nresult = %s\\n\" %(result))\n \n # check mandatory fields\n self.assertEqual(result['MSGFORMAT'],'ims1.0')\n self.assertEqual(result['MSGTYPE'],'request')\n self.assertEqual(result['MSGID'],'ex042')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result['EMAILADDR'],'foo_bar.a.vb.bar@venus.com')\n \n # optional for this request\n self.assertFalse(result.has_key('SOURCE'))\n \n # product_1\n self.assertTrue(result.has_key('PRODUCT_1'))\n \n self.assertEqual(result['PRODUCT_1'], {'FORMAT': 'ims1.0', 'STARTDATE': '1999/07/12', 'BULLTYPE': 'idc_sel3', 'ENDDATE': '1999/07/13', 'TYPE': 'BULLETIN'})\n \n # first message ok, get the second one\n \n message_1 = \"begin ims1.0\\nmsg_type request \\nmsg_id ex042 myndc \\ne-mail foo.bar@pluto.com \\ntime 2005/06/01 to 2006/07/01 \\nbull_type idc_reb\\nmag 3.5 to 5.0\\ndepth to 30\\nlat -30 to -20\\nlon -180 to -140\\nbulletin ims1.0\\nlat 75 to 79\\nlon 110 to 140\\nbulletin ims2.0:cm6\\nstop\"\n \n result1 = parser.parse(message_1)\n \n # check mandatory fields\n self.assertEqual(result1['MSGFORMAT'],'ims1.0')\n self.assertEqual(result1['MSGTYPE'],'request')\n self.assertEqual(result1['MSGID'],'ex042')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result1['EMAILADDR'],'foo.bar@pluto.com')\n \n # optional for this request\n self.assertEqual(result1['SOURCE'],'myndc')\n \n # product_1\n self.assertTrue(result1.has_key('PRODUCT_1'))\n \n self.assertEqual(result1['PRODUCT_1'], {'ENDLON': '-140', 'STARTDATE': '2005/06/01', 'ENDDATE': '2006/07/01', 'MAG': {'START': '3.5', 'END': '5.0'}, 'DEPTH': {'START': 'MIN', 'END': '30'}, 'FORMAT': 'ims1.0', 'ENDLAT': '-20', 'STARTLAT': '-30', 'BULLTYPE': 'idc_reb', 'STARTLON': '-180', 'TYPE': 'BULLETIN'})\n \n # product_2\n self.assertTrue(result1.has_key('PRODUCT_2'))\n \n self.assertEqual(result1['PRODUCT_2'], {'STARTDATE': '2005/06/01', 'ENDDATE': '2006/07/01', 'FORMAT': 'ims2.0', 'ENDLAT': '79', 'SUBFORMAT': 'cm6', 'STARTLAT': '75', 'DEPTH': {'START': 'MIN', 'END': '30'}, 'BULLTYPE': 'idc_reb', 'MAG': {'START': '3.5', 'END': '5.0'}, 'ENDLON': '140', 'STARTLON': '110', 'TYPE': 'BULLETIN'})", "def test_standard_requests(self):\n get_msgs = self.client.message_recorder(\n blacklist=self.BLACKLIST, replies=True)\n nomid_req = partial(self.client.blocking_request, use_mid=False)\n nomid_req(katcp.Message.request(\"watchdog\"))\n nomid_req(katcp.Message.request(\"restart\"))\n nomid_req(katcp.Message.request(\"log-level\"))\n nomid_req(katcp.Message.request(\"log-level\", \"trace\"))\n nomid_req(katcp.Message.request(\"log-level\", \"unknown\"))\n nomid_req(katcp.Message.request(\"help\"))\n nomid_req(katcp.Message.request(\"help\", \"watchdog\"))\n nomid_req(katcp.Message.request(\"help\", \"unknown-request\"))\n nomid_req(katcp.Message.request(\"client-list\"))\n nomid_req(katcp.Message.request(\"version-list\"))\n nomid_req(katcp.Message.request(\"sensor-list\"))\n nomid_req(katcp.Message.request(\"sensor-list\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-list\", \"an.unknown\"))\n nomid_req(katcp.Message.request(\"sensor-value\"))\n nomid_req(katcp.Message.request(\"sensor-value\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-value\",\n \"an.unknown\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\",\n \"differential\", \"2\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\",\n \"event-rate\", \"2\", \"3\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\",\n \"an.unknown\", \"auto\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\", \"unknown\"))\n\n def tst():\n self.server.log.trace(\"trace-msg\")\n self.server.log.debug(\"debug-msg\")\n self.server.log.info(\"info-msg\")\n self.server.log.warn(\"warn-msg\")\n self.server.log.error(\"error-msg\")\n self.server.log.fatal(\"fatal-msg\")\n self.server.ioloop.add_callback(tst)\n\n self.assertEqual(self.server.restart_queue.get_nowait(), self.server)\n expected_msgs = [\n (r\"!watchdog ok\", \"\"),\n (r\"!restart ok\", \"\"),\n (r\"!log-level ok warn\", \"\"),\n (r\"!log-level ok trace\", \"\"),\n (r\"!log-level fail Unknown\\_logging\\_level\\_name\\_'unknown'\", \"\"),\n (r\"#help cancel-slow-command Cancel\\_slow\\_command\\_request,\\_\"\n \"resulting\\_in\\_it\\_replying\\_immediately\", \"\"),\n (r\"#help client-list\", \"\"),\n (r\"#help halt\", \"\"),\n (r\"#help help\", \"\"),\n (r\"#help log-level\", \"\"),\n (r\"#help new-command\", \"\"),\n (r\"#help raise-exception\", \"\"),\n (r\"#help raise-fail\", \"\"),\n (r\"#help restart\", \"\"),\n (r\"#help sensor-list\", \"\"),\n (r\"#help sensor-sampling\", \"\"),\n (r\"#help sensor-sampling-clear\", \"\"),\n (r\"#help sensor-value\", \"\"),\n (r\"#help slow-command\", \"\"),\n (r\"#help version-list\", \"\"),\n (r\"#help watchdog\", \"\"),\n (r\"!help ok %d\" % NO_HELP_MESSAGES, \"\"),\n (r\"#help watchdog\", \"\"),\n (r\"!help ok 1\", \"\"),\n (r\"!help fail\", \"\"),\n (r\"#client-list\", \"\"),\n (r\"!client-list ok 1\", \"\"),\n (r\"#version-list katcp-protocol\", \"\"),\n (r\"#version-list katcp-library\", \"\"),\n (r\"#version-list katcp-device\", \"\"),\n (r\"!version-list ok 3\", \"\"),\n (r\"#sensor-list an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list ok 1\", \"\"),\n (r\"#sensor-list an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list ok 1\", \"\"),\n (r\"!sensor-list fail\", \"\"),\n (r\"#sensor-value 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value ok 1\", \"\"),\n (r\"#sensor-value 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value ok 1\", \"\"),\n (r\"!sensor-value fail\", \"\"),\n (r\"!sensor-sampling ok an.int none\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling ok an.int differential 2\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling ok an.int event-rate 2 3\", \"\"),\n (r\"!sensor-sampling fail No\\_sensor\\_name\\_given.\", \"\"),\n (r\"!sensor-sampling fail Unknown\\_sensor\\_name:\\_an.unknown.\", \"\"),\n (r\"!sensor-sampling fail Unknown\\_strategy\\_name:\\_unknown.\", \"\"),\n (r\"#log trace\", r\"root trace-msg\"),\n (r\"#log debug\", r\"root debug-msg\"),\n (r\"#log info\", r\"root info-msg\"),\n (r\"#log warn\", r\"root warn-msg\"),\n (r\"#log error\", r\"root error-msg\"),\n (r\"#log fatal\", r\"root fatal-msg\"),\n ]\n self._assert_msgs_like(get_msgs(min_number=len(expected_msgs)),\n expected_msgs)", "def receive(msg=('result_proof', result_proof, replica_no, request_id)):\n '''logging.info(\"Received RESULT_PROOF. {RequestId : %s FromReplica: %s\" +\n \" ReplicaNo: %s }, RESULT: { %s }\", str(request_id),\n self.current_replicas_name[int(replica_no)],\n str(replica_no), result_proof[0][0],\n extra=logger.NODE_INFO)'''\n\n print (\"Order received (by first) - \", result_proof[0][1])\n print (\"Order received (by second) - \", result_proof[1][1])\n print (\"Order received (by third) - \", result_proof[2][1])\n print (\"Result received (by first) - \", result_proof[0][0])\n print (\"Result received (by second) - \", result_proof[1][0])\n print (\"Result received (by third) - \", result_proof[2][0])\n print (\"Request id - \", request_id, \"No of replies - \", len(result_proof))\n if verify_result_proof(result_proof, request_id) == False:\n print(\"Retransmitting requestId - \", request_id, \" and order - \", result_map[request_id][1])\n retransmit_failed_opr(request_id, result_map[request_id][1])\n return", "def handle(self, data):\n with open('request.bin', 'wb') as fout:\n fout.write(data)\n msg = dns.message.from_wire(data)\n log.debug('[REQUEST]\\n%s\\n[/REQUEST]', str(msg))\n nameservers = self.config.default\n if len(msg.question) > 1:\n log.warning(\"Warning: multi-question messages \" +\\\n \"are not yet supported. Using default nameserver.\")\n return self.forward_request(msg, nameservers).to_wire()\n question = msg.question[0]\n log.info('%-10s%-8s%s', 'Question:', msg.id, str(question))\n if question.rdtype == dns.rdatatype.A:\n name = question.name.to_text()\n ipaddr, nameservers = self.resolve_by_config(name)\n if ipaddr:\n response = self.create_response(ipaddr, msg)\n log.info('%-10s%-8s%s DNS: %s', 'Answer:', response.id, map(str, response.answer), '[* STATIC IP *]')\n with open('response.bin', 'wb') as fout:\n fout.write(response.to_wire())\n return response.to_wire()\n\n # let some nameserver handle the message\n response = self.forward_request(msg, nameservers)\n log.debug('[RESPONSE]\\n%s\\n[/RESPONSE]', str(response))\n log.info('%-10s%-8s%s DNS: %r', 'Answer:', response.id, map(str, response.answer), nameservers)\n return response.to_wire()", "async def _send_answer(self, msg: SignalingOfferMessage) -> None:\n try:\n # Process received offer message updating target's remote address\n # Generates an answer request payload containing\n # local network description data/metadata (IP, MAC, Mask, etc...)\n payload = asyncio.run(self.connection._set_answer(payload=msg.payload))\n\n # Save remote node's metadata in roder to create a SoloRoute.\n self._client_metadata = msg.host_metadata\n\n # Create a new SignalingAnswerMessage\n signaling_answer = SignalingAnswerMessage(\n address=self.signaling_client.address,\n payload=payload, # Signaling answer payload\n host_metadata=self.node.get_metadata_for_client(), # Own Node Metadata\n target_peer=msg.host_peer, # Remote Node ID\n host_peer=self.signaling_client.duet_id,\n )\n\n # Enqueue it in the push msg queue to be sent to the signaling server.\n await self._push_msg_queue.put(signaling_answer)\n except Exception as e:\n traceback_and_raise(e)", "def test_reply_on_inbound(self):\n\n post, reply = self._post_reply(\n [self.sc1, self.sc2],\n [self.sc1.inbound, self.sc2.outbound]\n )\n\n # Reply will only trigger state change for sc2 - which is outbound\n self.assertEqual(post.channel_assignments[str(self.sc1.inbound)], 'highlighted')\n self.assertEqual(post.channel_assignments[str(self.sc2.inbound)], 'replied')", "def proxy(target1:socket.socket, target2:socket.socket):\n def resend(from_s:socket.socket, to_s:socket.socket):\n try:\n from_s.settimeout(5)\n while True:\n try:\n b = from_s.recv(1024)\n if len(b) == 0:\n return\n to_s.send(b)\n except socket.timeout as e:\n pass\n except Exception as e:\n # print(f\"c > t {e}\")\n return\n except:\n pass\n \n\n\n t1 = threading.Thread(target=resend, args=(target1, target2), name=f\"{target1.getpeername()} client > I am > target {target2.getpeername()} \")\n t2 = threading.Thread(target=resend, args=(target2, target1), name=f\"{target1.getpeername()} client < I am < target {target2.getpeername()} \")\n t1.start()\n t2.start()\n while t1.is_alive() and t2.is_alive():\n time.sleep(5)\n return", "def test_sending_and_accepting_request(self):\n\n self.send_request()\n\n request_response_id = RequestResponse.list(\n self._API_CONTEXT,\n self._USER_ID,\n self._MONETARY_ACCOUNT_ID2\n ).value[self._FIRST_INDEX].id_\n\n self.accept_request(request_response_id)", "def answer_call():\n # Start our TwiML response\n from_number = request.values.get('Caller')\n # to_number = request.values.get('Called')\n\n resp = VoiceResponse()\n\n if from_number == \"+19788073607\" or from_number == \"+16173540817\":\n # Read a message aloud to the caller\n resp.play(\"https://instaud.io/_/3GEH.mp3\")\n resp.say(\"Welcome to Game of Thrones.\", voice='man', language=\"en-GB\")\n resp.say(\"The door will now open. Enjoy the night.\", voice='man', language=\"en-GB\")\n resp.play('', digits='ww9')\n resp.say(\"If you can hear this message, Arjun messed up. Just text him.\", voice='man', language=\"en-GB\")\n\n \n # gather = Gather(action=\"/door\", method=\"POST\")\n # gather.say(\"Welcome to Game of Thrones night. \\n Please enter the code to enter.\", voice='man', language=\"en-GB\")\n\n # resp.append(gather)\n\n return str(resp)\n\n return resp", "def respond(self, question, answer):\n self.responses.append((question, answer))\n return self", "async def process_post(self, form: dict) -> str:\n\n logger = logging.getLogger(__name__)\n logger.debug('_BaseAgent.process_post: >>> form: {}'.format(form))\n\n validate_form(form, self.cfg.get('proxy-relay', False))\n\n if form['type'] == 'agent-nym-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_nym(form['data']['agent-nym']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = await self.get_endpoint(form['data']['agent-endpoint']['did'])\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'agent-endpoint-send':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n resp_json = await self.send_endpoint()\n rv = json.dumps({})\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] == 'schema-lookup':\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n s_key = schema_key_for(form['data']['schema'])\n schema_json = await self.get_schema(s_key)\n schema = json.loads(schema_json)\n if not schema:\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n rv = schema_json\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n elif form['type'] in (\n 'agent-nym-send',\n 'schema-send',\n 'claim-def-send',\n 'claim-offer-create',\n 'claim-offer-store',\n 'claim-create',\n 'claim-store',\n 'claim-request',\n 'proof-request',\n 'proof-request-by-referent',\n 'verification-request'): # do not proxy: master-secret-set, claims-reset\n resp_proxy_json = await self._response_from_proxy(form)\n if resp_proxy_json != None:\n rv = resp_proxy_json # it's proxied\n logger.debug('_BaseAgent.process_post: <<< {}'.format(rv))\n return rv\n\n # base listening agent doesn't do this work\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not respond to token type {}'.format(self.__class__.__name__, form['type']))\n\n logger.debug('_BaseAgent.process_post: <!< not this form type: {}'.format(form['type']))\n raise TokenType('{} does not support token type {}'.format(self.__class__.__name__, form['type']))", "def respond(self, msg):\n\n for client in self.clients:\n\n try:\n\n if 'reply' in msg.data:\n\n if msg['reply'] == 1 or client.id != msg['src_id']:\n\n client.send(msg)\n\n else:\n\n client.send(msg)\n\n except DeadClientError as err:\n\n # Remove client if no longer contactable\n\n self.remove_client(client.address)\n\n stdout(err)\n return", "def get_requests_resp_and_aito_resp(aito_client: AitoClient, request_obj: aito_requests.AitoRequest):\n raw_resp_obj = requests.request(\n method=request_obj.method,\n url=aito_client.instance_url + request_obj.endpoint,\n headers=aito_client.headers,\n json=request_obj.query\n )\n raw_resp_json = raw_resp_obj.json()\n\n aito_resp = aito_client.request(request_obj=request_obj)\n return raw_resp_json, aito_resp" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple requestreply pattern between an agent and a direct ZMQ connection.
def test_reqrep_raw_zmq_outside(nsproxy): # Create an osBrain agent that will receive the message a1 = run_agent('a1') a1.set_attr(received=None) addr = a1.bind( 'REP', transport='tcp', handler=echo_handler, serializer='raw' ) # Create a raw ZeroMQ REQ socket context = zmq.Context() socket = context.socket(zmq.REQ) socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port)) # Send the message message = b'Hello world' socket.send(message) assert socket.recv() == message socket.close() context.destroy()
[ "def test_reqrep(nsproxy, serializer, message, response):\n\n def rep_handler(agent, message):\n return response\n\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n addr = a0.bind('REP', 'reply', rep_handler, serializer=serializer)\n a1.connect(addr, 'request')\n assert a1.send_recv('request', message) == response", "def test_acceptor(self):\n # Also a regression test for #120\n # C-ECHO-RQ\n # 80 total length\n echo_rq = (\n b\"\\x04\\x00\\x00\\x00\\x00\\x4a\" # P-DATA-TF 74\n b\"\\x00\\x00\\x00\\x46\\x01\" # PDV Item 70\n b\"\\x03\" # PDV: 2 -> 69\n b\"\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x42\\x00\\x00\\x00\" # 12 Command Group Length\n b\"\\x00\\x00\\x02\\x00\\x12\\x00\\x00\\x00\\x31\\x2e\\x32\\x2e\\x38\"\n b\"\\x34\\x30\\x2e\\x31\\x30\\x30\\x30\\x38\\x2e\\x31\\x2e\\x31\\x00\" # 26\n b\"\\x00\\x00\\x00\\x01\\x02\\x00\\x00\\x00\\x30\\x00\" # 10 Command Field\n b\"\\x00\\x00\\x10\\x01\\x02\\x00\\x00\\x00\\x01\\x00\" # 10 Message ID\n b\"\\x00\\x00\\x00\\x08\\x02\\x00\\x00\\x00\\x01\\x01\" # 10 Command Data Set Type\n )\n\n # Send associate request then c-echo requests then release request\n commands = [\n (\"send\", a_associate_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", echo_rq),\n (\"recv\", None),\n (\"send\", a_release_rq),\n (\"exit\", None),\n ]\n self.scp = scp = self.start_server(commands)\n\n assoc, fsm = self.get_acceptor_assoc()\n assoc.start()\n\n for ii in range(len(commands) - 1):\n scp.step()\n\n while assoc.dul.is_alive():\n time.sleep(0.001)\n\n scp.step()\n scp.shutdown()\n\n assert [\n (\"Sta1\", \"Evt5\", \"AE-5\"),\n (\"Sta2\", \"Evt6\", \"AE-6\"),\n (\"Sta3\", \"Evt7\", \"AE-7\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt10\", \"DT-2\"),\n (\"Sta6\", \"Evt9\", \"DT-1\"),\n (\"Sta6\", \"Evt12\", \"AR-2\"),\n (\"Sta8\", \"Evt14\", \"AR-4\"),\n (\"Sta13\", \"Evt17\", \"AR-5\"),\n ] == fsm._changes[:30]", "def setreply(self, msg):\n return CONTINUE", "def receive(msg=('result_proof', result_proof, replica_no, request_id)):\n '''logging.info(\"Received RESULT_PROOF. {RequestId : %s FromReplica: %s\" +\n \" ReplicaNo: %s }, RESULT: { %s }\", str(request_id),\n self.current_replicas_name[int(replica_no)],\n str(replica_no), result_proof[0][0],\n extra=logger.NODE_INFO)'''\n\n print (\"Order received (by first) - \", result_proof[0][1])\n print (\"Order received (by second) - \", result_proof[1][1])\n print (\"Order received (by third) - \", result_proof[2][1])\n print (\"Result received (by first) - \", result_proof[0][0])\n print (\"Result received (by second) - \", result_proof[1][0])\n print (\"Result received (by third) - \", result_proof[2][0])\n print (\"Request id - \", request_id, \"No of replies - \", len(result_proof))\n if verify_result_proof(result_proof, request_id) == False:\n print(\"Retransmitting requestId - \", request_id, \" and order - \", result_map[request_id][1])\n retransmit_failed_opr(request_id, result_map[request_id][1])\n return", "async def test_quickresponse(self):\n actions = [\n ('id1', 'Action 1'),\n ('id2', 'Action 2'),\n ]\n self.clients[0]['xep_0439'].ask_for_actions(\n self.clients[1].boundjid.full,\n \"Action 1 or 2 ?\",\n actions\n )\n msg = await self.clients[1].wait_until('action_received')\n actions_recv = [\n (st['id'], st['label']) for st in msg if isinstance(st, stanza.Action)\n ]\n self.assertEqual(\n actions,\n actions_recv,\n )\n\n reply = self.clients[1].make_message(\n mto=self.clients[0].boundjid.full\n )\n reply['action_selected']['id'] = 'id1'\n reply.send()\n\n reply_recv = await self.clients[0].wait_until('action_selected')\n self.assertEqual(\n reply_recv['action_selected']['id'],\n 'id1',\n )\n\n self.clients[0]['xep_0439'].ask_for_response(\n self.clients[1].boundjid.full,\n \"Reply with action 1 or 2 (id1/id2) ?\",\n actions\n )\n msg = await self.clients[1].wait_until('responses_received')", "def handle_reply(self, msg):\n print msg", "def query(self, *args):\n\n log(\"calling client.query\")\n self._connect()\n try:\n self.sendConn.send(args)\n\n res = self.receive()\n while self.isUnilateralResponse(res):\n res = self.receive()\n\n return res\n except EnvironmentError as ee:\n # When we can depend on Python 3, we can use PEP 3134\n # exception chaining here.\n raise WatchmanEnvironmentError(\n \"I/O error communicating with watchman daemon\",\n ee.errno,\n ee.strerror,\n args,\n )\n except WatchmanError as ex:\n ex.setCommand(args)\n raise", "def __init__(self, context, connection):\n super(ZMQClient, self).__init__(context, socket_type=zmq.REQ)\n self.socket.connect(connection)\n # The REQ socket sends, to the network, an empty delimiter frame in\n # front of the message data. REQ sockets are synchronous. REQ sockets\n # always send one request and then wait for one reply. REQ sockets talk\n # to one peer at a time. If you connect a REQ socket to multiple peers,\n # requests are distributed to and replies expected from each peer one\n # turn at a time.", "def handle_connect(self, req):\r\n \r\n # Create a socket to connect to the remote server\r\n remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n # From now on, we must not forget to close this socket before leaving.\r\n try:\r\n try:\r\n # Connection to the remote server\r\n print thread.get_ident(), 'Connecting to', req['address']\r\n\r\n\r\n # Possible way to handle the timeout defined in the protocol!\r\n # Make the connect non-blocking, then do a select and keep\r\n # an eye on the writable socket, just as I did with the\r\n # accept() from BIND requests.\r\n # Do this tomorrow... Geez... 00:47... Do this this evening.\r\n \r\n remote.connect(req['address'])\r\n \r\n # The only connection that can be reset here is the one of the\r\n # client, so we don't need to answer. Any other socket\r\n # exception forces us to try to answer to the client.\r\n except socket.error:\r\n exception, value, traceback = sys.exc_info()\r\n if value[0] == ERR_CONNECTION_RESET_BY_PEER:\r\n raise Client_Connection_Closed((ERR_CONNECTION_RESET_BY_PEER, socket.errorTab[ERR_CONNECTION_RESET_BY_PEER]))\r\n else:\r\n raise Remote_Connection_Failed\r\n except:\r\n raise Remote_Connection_Failed\r\n \r\n # From now on we will already have answered to the client.\r\n # Any exception occuring now must make us exit silently.\r\n try:\r\n # Telling the client that the connection it asked for is\r\n # granted.\r\n self.answer_granted()\r\n # Starting to relay information between the two peers.\r\n self.forward(self.request, remote)\r\n # We don't have the right to \"speak\" to the client anymore.\r\n # So any socket failure means a \"connection closed\" and silent\r\n # exit.\r\n except socket.error:\r\n raise Connection_Closed\r\n # Mandatory closing of the remote socket.\r\n finally:\r\n remote.close()", "def handle(self, data):\n with open('request.bin', 'wb') as fout:\n fout.write(data)\n msg = dns.message.from_wire(data)\n log.debug('[REQUEST]\\n%s\\n[/REQUEST]', str(msg))\n nameservers = self.config.default\n if len(msg.question) > 1:\n log.warning(\"Warning: multi-question messages \" +\\\n \"are not yet supported. Using default nameserver.\")\n return self.forward_request(msg, nameservers).to_wire()\n question = msg.question[0]\n log.info('%-10s%-8s%s', 'Question:', msg.id, str(question))\n if question.rdtype == dns.rdatatype.A:\n name = question.name.to_text()\n ipaddr, nameservers = self.resolve_by_config(name)\n if ipaddr:\n response = self.create_response(ipaddr, msg)\n log.info('%-10s%-8s%s DNS: %s', 'Answer:', response.id, map(str, response.answer), '[* STATIC IP *]')\n with open('response.bin', 'wb') as fout:\n fout.write(response.to_wire())\n return response.to_wire()\n\n # let some nameserver handle the message\n response = self.forward_request(msg, nameservers)\n log.debug('[RESPONSE]\\n%s\\n[/RESPONSE]', str(response))\n log.info('%-10s%-8s%s DNS: %r', 'Answer:', response.id, map(str, response.answer), nameservers)\n return response.to_wire()", "def send_arp_reply (self, reply_to, mac, src_mac = _default_mac):\n if src_mac is _default_mac:\n src_mac = self.default_reply_src_mac\n return send_arp_reply(reply_to, mac, src_mac)", "def createReply(title, text, REQUEST, RESPONSE):", "def respond(self, msg):\n\n for client in self.clients:\n\n try:\n\n if 'reply' in msg.data:\n\n if msg['reply'] == 1 or client.id != msg['src_id']:\n\n client.send(msg)\n\n else:\n\n client.send(msg)\n\n except DeadClientError as err:\n\n # Remove client if no longer contactable\n\n self.remove_client(client.address)\n\n stdout(err)\n return", "async def _send_answer(self, msg: SignalingOfferMessage) -> None:\n try:\n # Process received offer message updating target's remote address\n # Generates an answer request payload containing\n # local network description data/metadata (IP, MAC, Mask, etc...)\n payload = asyncio.run(self.connection._set_answer(payload=msg.payload))\n\n # Save remote node's metadata in roder to create a SoloRoute.\n self._client_metadata = msg.host_metadata\n\n # Create a new SignalingAnswerMessage\n signaling_answer = SignalingAnswerMessage(\n address=self.signaling_client.address,\n payload=payload, # Signaling answer payload\n host_metadata=self.node.get_metadata_for_client(), # Own Node Metadata\n target_peer=msg.host_peer, # Remote Node ID\n host_peer=self.signaling_client.duet_id,\n )\n\n # Enqueue it in the push msg queue to be sent to the signaling server.\n await self._push_msg_queue.put(signaling_answer)\n except Exception as e:\n traceback_and_raise(e)", "def getReplyResults():", "def request(self, data, ip):\n port = 8000\n s = None\n for res in socket.getaddrinfo(ip, port, socket.AF_UNSPEC, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n try:\n s = socket.socket(af, socktype, proto)\n except socket.error, msg:\n s = None\n continue\n try:\n s.connect(sa) # connect to remote host, here we only connect to agent\n s.setblocking(0) # we use non-blocking socket\n except socket.error, msg:\n s.close()\n s = None\n continue\n break\n if s is None:\n print 'could not open socket'\n sys.exit(1)\n\n s.sendall(data) # send data to agent\n\n # put socket into listening list which is waiting for response\n self.inputs.append(s)\n self.message_queue[s] = Queue.Queue()\n gevent.sleep(0.1) # switch to listening handler\n\n # getting response and close socket would be done by listening handler\n return", "def on_mdp_request(self, msg):\n Plugin.on_mdp_request(self, msg)\n # self.log.info(u\"==> Received 0MQ messages: %s\" % format(msg))\n if msg.get_action() == \"client.cmd\":\n reason = None\n status = True\n data = msg.get_data()\n \n device_id = data[\"device_id\"]\n command_id = data[\"command_id\"]\n if device_id not in self.device_list:\n self.log.error(u\"### MQ REQ command, Device ID '%s' unknown, Have you restarted the plugin after device creation ?\" % device_id)\n status = False\n reason = u\"Plugin onewired: Unknown device ID %d\" % device_id\n self.send_rep_ack(status, reason, command_id, \"unknown\") ; # Reply MQ REP (acq) to REQ command\n return\n\n device_name = self.device_list[device_id][\"name\"]\n self.log.info(u\"==> Received for device '%s' MQ REQ command message: %s\" % (device_name, format(data))) # {u'command_id': 70, u'value': u'1', u'device_id': 169}\n\n status, reason = self.onewire.writeSensor(self.device_list[device_id][\"address\"], self.device_list[device_id][\"properties\"], data[\"value\"])\n if status:\n self.send_pub_data(device_id, data[\"value\"]) # Update sensor command.\n \n # Reply MQ REP (acq) to REQ command\n self.send_rep_ack(status, reason, command_id, device_name) ;", "def test_reply_on_inbound(self):\n\n post, reply = self._post_reply(\n [self.sc1, self.sc2],\n [self.sc1.inbound, self.sc2.outbound]\n )\n\n # Reply will only trigger state change for sc2 - which is outbound\n self.assertEqual(post.channel_assignments[str(self.sc1.inbound)], 'highlighted')\n self.assertEqual(post.channel_assignments[str(self.sc2.inbound)], 'replied')", "def respond(self, follow_up=True, **kwargs):\n if self['type'] == 'response':\n assert self['done'] == False, \"Can't respond to a response that is already marked done.\"\n data = {'command_id': self['command_id'], 'type':'response'}\n data.update(kwargs)\n if not data.has_key('done'):\n data['done'] = False\n data_str = json.dumps(data, cls=JSONEncoder)\n log.debug(\"Sending response : {data}\".format(data=data_str))\n self.ws.send(data_str)\n if data['done'] == False and follow_up:\n # We are still expecting a response to our response:\n return self.receive()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple pushpull pattern test, using different serializations.
def test_pushpull(nsproxy, serializer, message): a0 = run_agent('a0') a1 = run_agent('a1') a1.set_attr(received=None) addr = a1.bind('PULL', handler=set_received, serializer=serializer) a0.connect(addr, 'push') a0.send('push', message) assert wait_agent_attr(a1, name='received', value=message)
[ "def test_simple_push_pull():\n with Remote() as remote, Pusher(remote) as pusher:\n pusher.push_file('README.md', '1')\n\n with Puller(remote) as puller:\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('README.md') == pusher.read_file('README.md') == '1'\n\n pusher.push_file('README.md', '2')\n for l in puller.gp.pull():\n print(puller.path + l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('README.md') == pusher.read_file('README.md') == '2'\n\n pusher.push_file('another-file', '3')\n\n for l in puller.gp.pull():\n print(l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('another-file') == pusher.read_file('another-file') == '3'\n\n pusher.git('rm', 'another-file')\n pusher.git('commit', '-m', 'Removing File')\n pusher.git('push', 'origin', 'master')\n\n for l in puller.gp.pull():\n print(l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert not os.path.exists(os.path.join(puller.path, 'another-file'))", "def test_pullProducer(self):\n\n @implementer(IPullProducer)\n class PullProducer:\n def __init__(self, toProduce):\n self.toProduce = toProduce\n\n def start(self, consumer):\n self.consumer = consumer\n self.consumer.registerProducer(self, False)\n\n def resumeProducing(self):\n self.consumer.write(self.toProduce.pop(0))\n if not self.toProduce:\n self.consumer.unregisterProducer()\n\n return self._producertest(PullProducer)", "def test_push(self):\n self.try_topic('push', 'push')", "def test_pushpull_raw_zmq_outside(nsproxy):\n # Create an osBrain agent that will receive the message\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a1.bind(\n 'PULL', transport='tcp', handler=set_received, serializer='raw'\n )\n\n # Create a raw ZeroMQ PUSH socket\n context = zmq.Context()\n socket = context.socket(zmq.PUSH)\n socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))\n\n # Send the message\n message = b'Hello world'\n socket.send(message)\n assert wait_agent_attr(a1, name='received', value=message)\n\n socket.close()\n context.destroy()", "def test_pullRequest(self):\n self.try_topic('pullRequest', 'pull-request')", "def test_push_twice(new_dll):\n new_dll.push(\"brandy\")\n new_dll.push(\"chardonnay\")\n assert new_dll.head.val == \"chardonnay\" and new_dll.tail.val == \"brandy\"", "def test_roundtrip_msg(self):\n for ser in self._test_serializers:\n\n for contains_binary, msg in self._test_messages:\n\n # serialize message\n payload, binary = ser.serialize(msg)\n\n # unserialize message again\n msg2 = ser.unserialize(payload, binary)\n\n # must be equal: message roundtrips via the serializer\n self.assertEqual([msg], msg2)", "def test_push_many(self):\n stk = Stack()\n items = list(range(100))\n for i in items:\n stk.push(i)\n self.assertEqual(stk.show(), list(reversed(items)))", "def test_push_full_next(dll):\n dll.push(6)\n assert dll.head.next.data is 3", "def test_untracked_puller():\n with Remote() as remote, Pusher(remote) as pusher:\n pusher.push_file('README.md', '1')\n\n with Puller(remote) as puller:\n pusher.push_file('another-file', '2')\n\n puller.write_file('another-file', '3')\n\n for l in puller.gp.pull():\n print(l)\n assert puller.read_file('another-file') == '2'\n # Find file that was created!\n renamed_file = glob.glob(os.path.join(puller.path, 'another-file_*'))[0]\n assert puller.read_file(os.path.basename(renamed_file)) == '3'", "def test_pubsub(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a0.bind('PUB', alias='pub', serializer=serializer)\n a1.connect(addr, handler=set_received)\n while not a1.get_attr('received'):\n a0.send('pub', message)\n time.sleep(0.1)\n assert a1.get_attr('received') == message", "def pull(self, microbotArray):", "def test_push_pop_empty(self):\n stk = Stack()\n stk.push(1)\n stk.pop()\n self.assertEqual(stk.peek(), [])", "def test_push_empty(empty):\n empty.push(3)\n assert empty.head.data is 3", "def pull(pull_url, extra_data=None,\n encode=None, decode=None, headers=None, monitor=None, timeout=None,\n include_extensions=True):\n assert isinstance(pull_url, basestring), \"pull url must be a string\"\n assert bool(pull_url), \"pull url can't be empty\"\n if extra_data is not None:\n assert isinstance(extra_data, dict), \"extra data must be a dictionary\"\n request_message = PullRequestMessage()\n for op in compress(): request_message.add_operation(op)\n data = request_message.to_json()\n data.update({'extra_data': extra_data or {}})\n\n code, reason, response = post_request(\n pull_url, data, encode, decode, headers, timeout, monitor)\n if (code // 100 != 2):\n if monitor:\n monitor({'status': \"error\", 'reason': reason.lower()})\n raise BadResponseError(code, reason, response)\n if response is None:\n if monitor:\n monitor({\n 'status': \"error\",\n 'reason': \"invalid response format\"})\n raise BadResponseError(code, reason, response)\n message = None\n try:\n message = PullMessage(response)\n except KeyError:\n if monitor:\n monitor({\n 'status': \"error\",\n 'reason': \"invalid message format\"})\n raise BadResponseError(\n \"response object isn't a valid PullMessage\", response)\n\n if monitor:\n monitor({\n 'status': \"merging\",\n 'operations': len(message.operations)})\n merge(message, include_extensions=include_extensions)\n if monitor:\n monitor({'status': \"done\"})\n # return the response for the programmer to do what she wants\n # afterwards\n return response", "def test_push(self):\n list = LinkedList([2, 3, 4])\n pushedNode = list.push(1)\n self.assertEqual(pushedNode, list.head)", "def test_try_pull_unique_messages__unique_first_try(self, mocker):\n client = self.MockClient()\n sub_name = \"test_sub_2\"\n previous_msg_ids = set()\n last_run_time = \"2020-04-25T08:36:30.242Z\"\n unique_messages = self.RAW_RESPONSES[\"try_pull_unique_messages_1\"]\n mocker.patch.object(client, \"pull_messages\", return_value=unique_messages)\n debug_mock = mocker.patch.object(demisto, \"debug\")\n (\n res_msgs,\n res_msg_ids,\n res_acks,\n res_max_publish_time,\n ) = try_pull_unique_messages(\n client, sub_name, previous_msg_ids, last_run_time, retry_times=1\n )\n assert not any(call.args[0].startswith('GCP_PUBSUB_MSG') for call in debug_mock.call_args_list)\n assert res_msgs == [\n {\n \"ackId\": \"321\",\n \"data\": \"42\",\n \"messageId\": \"123\",\n \"publishTime\": \"2020-04-18T08:36:30.541Z\",\n }\n ]\n assert res_msg_ids == {\"123\"}\n assert res_acks == [\"321\"]\n assert res_max_publish_time == \"2020-04-18T08:36:30.541000Z\"", "def wrappedpushdiscovery(orig, pushop):\n\n pushop.reviewnodes = None\n\n caps = getreviewcaps(pushop.remote)\n if 'pushreview' not in caps:\n return orig(pushop)\n\n ui = pushop.ui\n repo = pushop.repo\n\n if repo.noreviewboardpush:\n return orig(pushop)\n\n # If no arguments are specified to push, Mercurial will try to push all\n # non-remote changesets by default. This can result in unexpected behavior,\n # especially for people doing multi-headed development.\n #\n # Since we reject pushes with multiple heads anyway, default to pushing\n # the working copy.\n if not pushop.revs:\n pushop.revs = [repo['.'].node()]\n\n tipnode = None\n basenode = None\n\n # Our prepushoutgoing hook validates that all pushed changesets are\n # part of the same DAG head. If revisions were specified by the user,\n # the last is the tip commit to review and the first (if more than 1)\n # is the base commit to review.\n #\n # Note: the revisions are in the order they were specified by the user.\n # This may not be DAG order. So we have to explicitly order them here.\n revs = sorted(repo[r].rev() for r in pushop.revs)\n tipnode = repo[revs[-1]].node()\n if len(revs) > 1:\n basenode = repo[revs[0]].node()\n\n if repo.pushsingle:\n basenode = tipnode\n\n # Given a base and tip node, find all changesets to review.\n #\n # A solution that works most of the time is to find all non-public\n # ancestors of that node. This is our default.\n #\n # If basenode is specified, we stop the traversal when we encounter it.\n #\n # Note that we will still refuse to review a public changeset even with\n # basenode. This decision is somewhat arbitrary and can be revisited later\n # if there is an actual need to review public changesets.\n nodes = [tipnode]\n # Special case where basenode is the tip node.\n if basenode and tipnode == basenode:\n pass\n else:\n for node in repo[tipnode].ancestors():\n ctx = repo[node]\n\n if ctx.phase() == phases.public:\n break\n if basenode and ctx.node() == basenode:\n nodes.insert(0, ctx.node())\n break\n\n nodes.insert(0, ctx.node())\n\n # Filter out public nodes.\n publicnodes = []\n for node in nodes:\n ctx = repo[node]\n if ctx.phase() == phases.public:\n publicnodes.append(node)\n ui.status(_('(ignoring public changeset %s in review request)\\n') %\n ctx.hex()[0:12])\n\n nodes = [n for n in nodes if n not in publicnodes]\n if not nodes:\n raise util.Abort(\n _('no non-public changesets left to review'),\n hint=_('add or change the -r argument to include draft changesets'))\n\n # We stop completely empty changesets prior to review.\n for node in nodes:\n ctx = repo[node]\n if not ctx.files():\n raise util.Abort(\n _('cannot review empty changeset %s') % ctx.hex()[:12],\n hint=_('add files to or remove changeset'))\n\n run_android_checkstyle(repo, nodes)\n\n # Ensure all reviewed changesets have commit IDs.\n replacenodes = []\n for node in nodes:\n ctx = repo[node]\n if not parse_commit_id(encoding.fromlocal(ctx.description())):\n replacenodes.append(node)\n\n def makememctx(repo, ctx, revmap, copyfilectxfn):\n parents = newparents(repo, ctx, revmap)\n # Need to make a copy otherwise modification is made on original,\n # which is just plain wrong.\n msg = encoding.fromlocal(ctx.description())\n new_msg, changed = addcommitid(msg, repo=repo)\n\n memctx = context.memctx(repo, parents,\n encoding.tolocal(new_msg), ctx.files(),\n copyfilectxfn, user=ctx.user(),\n date=ctx.date(), extra=dict(ctx.extra()))\n\n return memctx\n\n if replacenodes:\n ui.status(_('(adding commit id to %d changesets)\\n') %\n (len(replacenodes)))\n nodemap = replacechangesets(repo, replacenodes, makememctx,\n backuptopic='addcommitid')\n\n # Since we're in the middle of an operation, update references\n # to rewritten nodes.\n nodes = [nodemap.get(node, node) for node in nodes]\n pushop.revs = [nodemap.get(node, node) for node in pushop.revs]\n\n pushop.reviewnodes = nodes\n\n # Since we may rewrite changesets to contain review metadata after\n # push, abort immediately if the working directory state is not\n # compatible with rewriting. This prevents us from successfully\n # pushing and failing to update commit metadata after the push. i.e.\n # it prevents potential loss of metadata.\n #\n # There may be some scenarios where we don't rewrite after push.\n # But coding that here would be complicated. And future server changes\n # may change things like review request mapping, which may invalidate\n # client assumptions. So always assume a rewrite is needed.\n impactedrevs = list(repo.revs('%ln::', nodes))\n if repo['.'].rev() in impactedrevs:\n cmdutil.checkunfinished(repo)\n cmdutil.bailifchanged(repo)\n\n return orig(pushop)", "def test_try_pull_unique_messages__partially_unique_first_try(self, mocker):\n client = self.MockClient()\n sub_name = \"test_sub_2\"\n previous_msg_ids = {\"123\"}\n last_run_time = \"2020-04-09T08:36:30.242Z\"\n unique_messages_list = [self.RAW_RESPONSES[\"try_pull_unique_messages_3\"]]\n mocker.patch.object(client, \"pull_messages\", side_effect=unique_messages_list)\n debug_mock = mocker.patch.object(demisto, \"debug\")\n (\n res_msgs,\n res_msg_ids,\n res_acks,\n res_max_publish_time,\n ) = try_pull_unique_messages(\n client, sub_name, previous_msg_ids, last_run_time, retry_times=1\n )\n assert not any(call.args[0].startswith('GCP_PUBSUB_MSG') for call in debug_mock.call_args_list)\n assert res_msgs == [\n {\n \"ackId\": \"654\",\n \"data\": \"43\",\n \"messageId\": \"456\",\n \"publishTime\": \"2020-04-19T08:36:30.541Z\",\n }\n ]\n assert res_msg_ids == {\"456\"}\n assert res_acks == [\"654\"]\n assert res_max_publish_time == \"2020-04-19T08:36:30.541000Z\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple pushpull pattern test. Channel without serialization. The message is sent from outside osBrain, through a ZMQ PUSH socket.
def test_pushpull_raw_zmq_outside(nsproxy): # Create an osBrain agent that will receive the message a1 = run_agent('a1') a1.set_attr(received=None) addr = a1.bind( 'PULL', transport='tcp', handler=set_received, serializer='raw' ) # Create a raw ZeroMQ PUSH socket context = zmq.Context() socket = context.socket(zmq.PUSH) socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port)) # Send the message message = b'Hello world' socket.send(message) assert wait_agent_attr(a1, name='received', value=message) socket.close() context.destroy()
[ "def test_pushpull(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a1.bind('PULL', handler=set_received, serializer=serializer)\n a0.connect(addr, 'push')\n a0.send('push', message)\n assert wait_agent_attr(a1, name='received', value=message)", "def test_push(self):\n self.try_topic('push', 'push')", "def test_pubsub_raw_zmq_outside(nsproxy):\n # Create an osBrain agent that will receive the message\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a1.bind(\n 'SUB', transport='tcp', handler=set_received, serializer='raw'\n )\n\n # Create a raw ZeroMQ PUB socket\n context = zmq.Context()\n socket = context.socket(zmq.PUB)\n socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))\n\n # Send the message\n message = b'Hello world'\n while not a1.get_attr('received'):\n socket.send(message)\n time.sleep(0.01)\n assert a1.get_attr('received') == message\n\n socket.close()\n context.destroy()", "def test_message_unpin():\n user_data = server_create_user(\n \"email@email.com\", \"password\", \"Billy\", \"Batson\")\n channel_data = server_create_channel(\n user_data['token'], 'test_channel', True)\n message_str = \"This is a test message!\"\n\n message_payload = json.dumps({'token': user_data['token'],\n 'channel_id': channel_data['channel_id'],\n 'message': message_str}).encode('utf-8')\n #send a message by route\n send_msg_req = urllib.request.Request(f\"{get_url()}/message/send\",\n data=message_payload,\n headers={\n \"Content-Type\": \"application/json\"},\n method='POST')\n response = urllib.request.urlopen(send_msg_req)\n decoded_send_response = json.load(response)\n #pin the message above\n pin_payload = json.dumps({'token': user_data['token'],\n 'message_id': decoded_send_response['message_id']}).encode('utf-8')\n pin_msg_req = urllib.request.Request(f\"{get_url()}/message/pin\",\n data=pin_payload,\n headers={\n \"Content-Type\": \"application/json\"},\n method='POST')\n urllib.request.urlopen(pin_msg_req)\n\n response_details = urllib.request.urlopen(\n f\"{get_url()}/channel/messages?token={user_data['token']}\"\n + f\"&channel_id={channel_data['channel_id']}&start={0}\")\n details_decoded = json.load(response_details)\n\n assert details_decoded['messages'][0].get('is_pinned') is True\n\n unpin_msg_req = urllib.request.Request(f\"{get_url()}/message/unpin\",\n data=pin_payload,\n headers={\n \"Content-Type\": \"application/json\"},\n method='POST')\n urllib.request.urlopen(unpin_msg_req)\n\n response_details = urllib.request.urlopen(\n f\"{get_url()}/channel/messages?token={user_data['token']}\"\n + f\"&channel_id={channel_data['channel_id']}&start={0}\")\n details_decoded = json.load(response_details)\n\n assert details_decoded['messages'][0].get('is_pinned') is False", "def setup_zmq(self):\n self.context = zmq.Context()\n self.push = self.context.socket(zmq.PUSH)\n self.push_port = self.push.bind_to_random_port(\"tcp://%s\" % self.host)\n # start a listener for the pull socket\n eventlet.spawn(self.zmq_pull)\n eventlet.sleep(0)", "def test_simple_push_pull():\n with Remote() as remote, Pusher(remote) as pusher:\n pusher.push_file('README.md', '1')\n\n with Puller(remote) as puller:\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('README.md') == pusher.read_file('README.md') == '1'\n\n pusher.push_file('README.md', '2')\n for l in puller.gp.pull():\n print(puller.path + l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('README.md') == pusher.read_file('README.md') == '2'\n\n pusher.push_file('another-file', '3')\n\n for l in puller.gp.pull():\n print(l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert puller.read_file('another-file') == pusher.read_file('another-file') == '3'\n\n pusher.git('rm', 'another-file')\n pusher.git('commit', '-m', 'Removing File')\n pusher.git('push', 'origin', 'master')\n\n for l in puller.gp.pull():\n print(l)\n\n assert puller.git('rev-parse', 'HEAD') == pusher.git('rev-parse', 'HEAD')\n assert not os.path.exists(os.path.join(puller.path, 'another-file'))", "def test_pubsub(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a0.bind('PUB', alias='pub', serializer=serializer)\n a1.connect(addr, handler=set_received)\n while not a1.get_attr('received'):\n a0.send('pub', message)\n time.sleep(0.1)\n assert a1.get_attr('received') == message", "def test_pullProducer(self):\n\n @implementer(IPullProducer)\n class PullProducer:\n def __init__(self, toProduce):\n self.toProduce = toProduce\n\n def start(self, consumer):\n self.consumer = consumer\n self.consumer.registerProducer(self, False)\n\n def resumeProducing(self):\n self.consumer.write(self.toProduce.pop(0))\n if not self.toProduce:\n self.consumer.unregisterProducer()\n\n return self._producertest(PullProducer)", "def test_reqrep_raw_zmq_outside(nsproxy):\n # Create an osBrain agent that will receive the message\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a1.bind(\n 'REP', transport='tcp', handler=echo_handler, serializer='raw'\n )\n\n # Create a raw ZeroMQ REQ socket\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port))\n\n # Send the message\n message = b'Hello world'\n socket.send(message)\n assert socket.recv() == message\n\n socket.close()\n context.destroy()", "def test_message_pin():\n user_data = server_create_user(\n \"email@email.com\", \"password\", \"Billy\", \"Batson\")\n channel_data = server_create_channel(\n user_data['token'], 'test_channel', True)\n message_str = \"This is a test message!\"\n message_payload = json.dumps(\n {'token': user_data['token'],\n 'channel_id': channel_data['channel_id'],\n 'message': message_str}).encode('utf-8')\n send_msg_req = urllib.request.Request(f\"{get_url()}/message/send\",\n data=message_payload,\n headers={\n \"Content-Type\": \"application/json\"},\n method='POST')\n response = urllib.request.urlopen(send_msg_req)\n decoded_send_response = json.load(response)\n\n pin_payload = json.dumps(\n {'token': user_data['token'],\n 'message_id': decoded_send_response['message_id']}).encode('utf-8')\n pin_msg_req = urllib.request.Request(f\"{get_url()}/message/pin\",\n data=pin_payload,\n headers={\n \"Content-Type\": \"application/json\"},\n method='POST')\n urllib.request.urlopen(pin_msg_req)\n\n response_details = urllib.request.urlopen(\n f\"{get_url()}/channel/messages?token={user_data['token']}\"\n + f\"&channel_id={channel_data['channel_id']}&start={0}\")\n details_decoded = json.load(response_details)\n\n assert details_decoded['messages'][0].get('is_pinned') is True", "def test_publish_message(self):\n pass", "def test_publish(self, mock_channel):\n mock_channel = mock.MagicMock(name='basic_publish')\n mock_channel.basic_publish.return_value = mock.Mock()\n publish('message', mock_channel, 'exchange', 'routing')\n mock_channel.basic_publish.assert_called()", "def test_pushProducer(self):\n\n @implementer(IPushProducer)\n class PushProducer:\n resumed = False\n\n def __init__(self, toProduce):\n self.toProduce = toProduce\n\n def resumeProducing(self):\n self.resumed = True\n\n def start(self, consumer):\n self.consumer = consumer\n consumer.registerProducer(self, True)\n self._produceAndSchedule()\n\n def _produceAndSchedule(self):\n if self.toProduce:\n self.consumer.write(self.toProduce.pop(0))\n reactor.callLater(0, self._produceAndSchedule)\n else:\n self.consumer.unregisterProducer()\n\n d = self._producertest(PushProducer)\n\n def finished(results):\n (client, server) = results\n self.assertFalse(\n server.producer.resumed,\n \"Streaming producer should not have been resumed.\",\n )\n\n d.addCallback(finished)\n return d", "def push_message(message, channel):\n # assume the hookbox server is on localhost:2974 \n url = \"http://127.0.0.1:2974/rest/publish\"\n\n values = { \"secret\" : \"bakonv8\",\n \"channel_name\" : channel,\n \"payload\" : message\n }\n\n formdata = urllib.urlencode(values)\n req = urllib2.Request(url, formdata)\n resp = urllib2.urlopen(req)\n\n # the hookbox response can be useful for debugging,\n # but i'm commenting it out.\n #page = resp.read()\n #print page", "def testNotifications(self):\n req = {\n 'jsonrpc': '2.0',\n 'method': 'greet',\n }\n msg = json.dumps(req)\n self.sock.sendall(msg.encode())\n time.sleep(0.1)\n res = self.sock.recv(1024).decode()\n self.assertEqual(res, '')", "async def test_pubsub_patterns(self, transport, protocol):\n\n async def listener():\n # Subscribe to two patterns\n transport2, protocol2 = await connect()\n\n subscription = await protocol2.start_subscribe()\n await subscription.psubscribe([\"h*llo\", \"w?rld\"])\n\n # Receive messages\n results = []\n for i in range(4):\n results.append((await subscription.next_published()))\n\n self.assertEqual(\n results,\n [\n PubSubReply(\"hello\", \"message1\", pattern=\"h*llo\"),\n PubSubReply(\"heello\", \"message2\", pattern=\"h*llo\"),\n PubSubReply(\"world\", \"message3\", pattern=\"w?rld\"),\n PubSubReply(\"wArld\", \"message4\", pattern=\"w?rld\"),\n ],\n )\n\n transport2.close()\n\n f = asyncio.ensure_future(listener())\n\n async def sender():\n # Should not be received\n await protocol.publish(\"other-channel\", \"message5\")\n\n # These for should be received.\n await protocol.publish(\"hello\", \"message1\")\n await protocol.publish(\"heello\", \"message2\")\n await protocol.publish(\"world\", \"message3\")\n await protocol.publish(\"wArld\", \"message4\")\n\n await asyncio.sleep(0.5)\n await sender()\n await f", "def test_collapsingPumpPolicy(self):\n bytes = []\n client = Protocol()\n client.dataReceived = bytes.append\n queue = loopback._LoopbackQueue()\n queue.put(b\"foo\")\n queue.put(b\"bar\")\n queue.put(None)\n\n loopback.collapsingPumpPolicy(queue, client)\n\n self.assertEqual(bytes, [b\"foobar\"])", "def test_sendMsg(self):\n # Send test message\n testMsg = b'123456789'\n msgBytes = testMsg\n self.radio.sendMsg(testMsg)\n time.sleep(0.1)\n self.radio.readBytes(True)\n readBytes = self.radio.getRxBytes()\n assert(readBytes == msgBytes)", "def launch(args, message, headers, formatter, position=0):\n credentials = pika.PlainCredentials(args.username, args.password)\n props = pika.BasicProperties(content_type='application/json',\n headers=headers,\n delivery_mode=2)\n connection = pika.BlockingConnection(pika.ConnectionParameters(\n host=args.host,\n port=args.port,\n credentials=credentials))\n channel = connection.channel()\n\n # tqdm the range for pretty metrics\n for i in tqdm(range(args.bunnos), position=position):\n channel.basic_publish(exchange=args.exchange,\n routing_key=args.routing_key,\n properties=props,\n body=formatter.format(message))\n\n connection.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple publishersubscriber pattern test with different serializations.
def test_pubsub(nsproxy, serializer, message): a0 = run_agent('a0') a1 = run_agent('a1') a1.set_attr(received=None) addr = a0.bind('PUB', alias='pub', serializer=serializer) a1.connect(addr, handler=set_received) while not a1.get_attr('received'): a0.send('pub', message) time.sleep(0.1) assert a1.get_attr('received') == message
[ "def test_multiple_publishers_one_subscriber(self):\n\n def client(port, result_queue, registrations):\n def callback(cb_topic, cb_message_data, cb_associated_data):\n self.assertIn(int(cb_topic), registrations)\n expected_message, data = registrations[int(cb_topic)]\n self.assertEqual(expected_message, cb_message_data)\n self.assertEqual(data, cb_associated_data)\n\n return True, (int(cb_topic), cb_message_data)\n\n listener = Listener(port)\n try:\n for topic, (_, data) in iter(registrations.items()):\n listener.register(str(topic), callback, data)\n\n for result in listener.listen():\n result_queue.put(result)\n except Exception as ex:\n result_queue.put(ex)\n\n result_queue.put(None)\n\n messages = [\n (9827, 'first message', ),\n (9829, 'second message', ),\n (9831, 'last message', ),\n ]\n registrations = {\n 9827: ('first message', {'exdata': 654}),\n 9829: ('second message', {'exdata': 873}),\n 9831: ('last message', {'exdata': 298}),\n }\n\n actual = self._distribute_and_collate(client, server, messages, registrations, client_port=5561)\n\n self.assertEqual(set(messages), set(actual))", "def test_publish_qos1_sub_qos1(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos1.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 1)", "def test_publish_qos0_sub_qos1(self):\n self.publisher.publish(\"/test/publish/qos0\", \"test_publish_qos0\", 0)\n msg = self.subscriber_qos1.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos0')\n self.assertEqual(msg.qos, 1)", "def test_publish_qos1_sub_qos0(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos0.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 0)", "def test_publish_qos1_sub_qos2(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos2.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 2)", "def test_one_switch_oversubscribe(self):\n pass", "def test_publish_message(self):\n pass", "def publisher():\n backend = get_backend(\"school_backends\", BACKEND, CHANNEL, \"my.app\")\n for x in range(0, 100):\n data = {\"foo\": \"bar\", \"nested\": [{\"foo\": \"baz\"}]}\n\n print(\"-----------------------\")\n publish(backend, random.choice(events), data)\n sleep_time = random.choice(range(0, 10))\n print(\"Next publication in {}\".format(sleep_time))\n time.sleep(sleep_time)", "async def test_pubsub_patterns(self, transport, protocol):\n\n async def listener():\n # Subscribe to two patterns\n transport2, protocol2 = await connect()\n\n subscription = await protocol2.start_subscribe()\n await subscription.psubscribe([\"h*llo\", \"w?rld\"])\n\n # Receive messages\n results = []\n for i in range(4):\n results.append((await subscription.next_published()))\n\n self.assertEqual(\n results,\n [\n PubSubReply(\"hello\", \"message1\", pattern=\"h*llo\"),\n PubSubReply(\"heello\", \"message2\", pattern=\"h*llo\"),\n PubSubReply(\"world\", \"message3\", pattern=\"w?rld\"),\n PubSubReply(\"wArld\", \"message4\", pattern=\"w?rld\"),\n ],\n )\n\n transport2.close()\n\n f = asyncio.ensure_future(listener())\n\n async def sender():\n # Should not be received\n await protocol.publish(\"other-channel\", \"message5\")\n\n # These for should be received.\n await protocol.publish(\"hello\", \"message1\")\n await protocol.publish(\"heello\", \"message2\")\n await protocol.publish(\"world\", \"message3\")\n await protocol.publish(\"wArld\", \"message4\")\n\n await asyncio.sleep(0.5)\n await sender()\n await f", "def test_publish_qos0_sub_qos0(self):\n self.publisher.publish(\"/test/publish/qos0\", \"test_publish_qos0\", 0)\n msg = self.subscriber_qos0.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos0')\n self.assertEqual(msg.qos, 0)", "def test_roundtrip_msg(self):\n for ser in self._test_serializers:\n\n for contains_binary, msg in self._test_messages:\n\n # serialize message\n payload, binary = ser.serialize(msg)\n\n # unserialize message again\n msg2 = ser.unserialize(payload, binary)\n\n # must be equal: message roundtrips via the serializer\n self.assertEqual([msg], msg2)", "def test_custom_serializer(self):\n msg = \"my message\"\n # Test using None:\n self.queue.serializer = None\n self.queue.put(msg)\n self.assertEqual(self.queue.get(), msg)\n self.queue.put({\"a\": 1})\n self.assertEqual(self.queue.get(), \"{'a': 1}\") # Should be a string\n # Test using DummySerializer:\n self.queue.serializer = DummySerializer\n self.queue.put(msg)\n self.assertEqual(self.queue.get(), \"foo\")", "def test_messages(self):\n # Prepare service 1\n context = self.framework.get_bundle_context()\n config, svc_ref = self._setup_mqtt(context)\n mqtt_1 = context.get_service(svc_ref)\n\n # Prepare service 2\n config_2, svc_ref = self._setup_mqtt(context)\n mqtt_2 = context.get_service(svc_ref)\n\n # Assert that we have two different services\n self.assertIsNot(mqtt_1, mqtt_2, \"Same services returned\")\n\n # Register a publisher\n listener = Listener()\n lst_reg = context.register_service(\n services.SERVICE_MQTT_LISTENER, listener,\n {services.PROP_MQTT_TOPICS: \"/pelix/test/#\"})\n\n # Check the initial test condition\n self.assertListEqual(listener.messages, [], \"Invalid precondition\")\n\n # Send a message\n topic = \"/pelix/test/foobar\"\n payload = self.__send_message(mqtt_1, topic, 1)\n\n # Wait for it\n for _ in range(10):\n try:\n msg_topic, msg_payload, qos = listener.messages.pop()\n break\n except IndexError:\n time.sleep(.5)\n else:\n self.fail(\"Got no message\")\n\n # Check message\n self.assertEqual(msg_topic, topic)\n self.assertEqual(msg_payload, payload)\n\n # Test with a filtered out topic\n topic = \"/pelix/foo/bar\"\n self.__send_message(mqtt_1, topic, 1)\n\n # Wait for something\n for _ in range(6):\n try:\n msg_topic, msg_payload, qos = listener.messages.pop()\n except IndexError:\n time.sleep(.5)\n else:\n # It is possible we got a copy of the previous message\n # (QOS 1: at least one time)\n if msg_topic == topic:\n self.fail(\"Got a message that should be filtered: {}\"\n .format(msg_topic))\n\n # Change topic filter\n lst_reg.set_properties({services.PROP_MQTT_TOPICS: \"/pelix/foo/#\"})\n payload = self.__send_message(mqtt_1, topic, 1)\n\n # Wait for it\n for _ in range(10):\n try:\n msg_topic, msg_payload, qos = listener.messages.pop()\n break\n except IndexError:\n time.sleep(.5)\n else:\n self.fail(\"Got no message\")\n\n # Check message\n self.assertEqual(msg_topic, topic)\n self.assertEqual(msg_payload, payload)\n\n # Unregister service\n lst_reg.unregister()\n\n # Clean up\n del listener.messages[:]\n\n # Send a message\n self.__send_message(mqtt_1, topic, 1)\n\n # Wait for something\n for _ in range(6):\n try:\n listener.messages.pop()\n except IndexError:\n time.sleep(.5)\n else:\n self.fail(\"Got an unexpected message\")\n\n # Clean up\n config.delete()\n config_2.delete()", "def test_pubsub_with_model_and_api(self, mock_pub_func):\n # Testing a Biobank order with sample records will test the recursive loop code, and\n # we should see multiple Pub/Sub messages are sent.\n mock_pub_func.return_value = {'messageIds': ['123']}\n\n # Simulate a Biobank order API call with multiple order sample records attached.\n self.summary_dao.insert(self.participant_summary(self.participant))\n order_json = load_biobank_order_json(self.participant.participantId, filename=\"biobank_order_2.json\")\n result = self.send_post(self.path, order_json)\n\n # Test response properties\n self.assertIsInstance(result, dict)\n self.assertEqual(result['id'], 'WEB1YLHV123')\n self.assertEqual(result['origin'], 'example')\n\n # Test Pub/Sub messages successfully sent.\n self.assertTrue(mock_pub_func.called)\n # We now get two extra calls due to participant enrollment re-calculations after biobank order is submitted.\n self.assertEqual(mock_pub_func.call_count, 5)", "def check_for_publishers(self, sub_data):\n\n # At least two instances must be present to compare:\n if (sub_data == None) or (len(sub_data) <= 1):\n raise TestError(\"DDS2466TestScenario::check_for_publishers - at least two samples must be present in the subscriber sample list\")\n\n # Get all id for all samples:\n sample_ids = []\n last_pub_ids = []\n for sample in sub_data:\n if sample.id not in sample_ids:\n sample_ids.append(sample.id)\n last_pub_ids.append(-1)\n\n index = 0\n # Check all subscriber samples:\n for id in sample_ids:\n # Find the latest publisher for the sample with id for subscriber:\n for sample in sub_data:\n # Only for the current id:\n if sample.id == id:\n last_pub_ids[index] = sample.pub_id\n index += 1\n\n # Check for the same last publisher:\n first_pub_id = last_pub_ids[0]\n if first_pub_id == -1:\n raise TestError(\"DDS2466TestScenario::check_for_publishers - no publisher found for the msg_id[%s]\"% sample_ids[0])\n\n index = 1\n for pub_id in last_pub_ids[1:]:\n # Check for the publisher:\n if first_pub_id != pub_id:\n raise TestError(\"DDS2466TestScenario::check_for_publishers - pub_id[%s] for the msg_id[%s] breaks the rule - the last publisher id must be [%s]\"%\\\n (pub_id, sample_ids[index], first_pub_id))\n index += 1", "def main():\r\n subscribe = Subscription()\r\n print(subscribe.name)\r\n subscribe.duplicate()\r\n subscribe.overridden()", "def test_onPublish(self):\n\n xml = \"\"\"\n <iq type='set' to='pubsub.example.org'\n from='user@example.org'>\n <pubsub xmlns='http://jabber.org/protocol/pubsub'>\n <publish node='test'/>\n </pubsub>\n </iq>\n \"\"\"\n\n def publish(requestor, service, nodeIdentifier, items):\n self.assertEqual(JID('user@example.org'), requestor)\n self.assertEqual(JID('pubsub.example.org'), service)\n self.assertEqual('test', nodeIdentifier)\n self.assertEqual([], items)\n return defer.succeed(None)\n\n self.service.publish = publish\n return self.handleRequest(xml)", "def testIsSubscribedIdentity(self):\n a = constant_op.constant(1)\n b = constant_op.constant(2)\n c = math_ops.add(a, b)\n idop = array_ops.identity(c)\n c_sub = subscribe.subscribe(c, [])\n\n self.assertFalse(subscribe._is_subscribed_identity(a))\n self.assertFalse(subscribe._is_subscribed_identity(c))\n self.assertFalse(subscribe._is_subscribed_identity(idop))\n self.assertTrue(subscribe._is_subscribed_identity(c_sub))", "async def __subscribe_upstream(self, message):\n payload = message.payload\n if isinstance(payload, AbstractActor):\n if payload not in self.subscribers:\n self.subscribers.append(payload)\n else:\n msg = \"Can Only Subscribe Object of Abstract Actor to StreamPubSub\"\n logging.error(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple publishersubscriber pattern test. Channel without serialization. The message is sent from outside osBrain, through a ZMQ PUB socket.
def test_pubsub_raw_zmq_outside(nsproxy): # Create an osBrain agent that will receive the message a1 = run_agent('a1') a1.set_attr(received=None) addr = a1.bind( 'SUB', transport='tcp', handler=set_received, serializer='raw' ) # Create a raw ZeroMQ PUB socket context = zmq.Context() socket = context.socket(zmq.PUB) socket.connect('tcp://%s:%s' % (addr.address.host, addr.address.port)) # Send the message message = b'Hello world' while not a1.get_attr('received'): socket.send(message) time.sleep(0.01) assert a1.get_attr('received') == message socket.close() context.destroy()
[ "def test_pubsub(nsproxy, serializer, message):\n a0 = run_agent('a0')\n a1 = run_agent('a1')\n a1.set_attr(received=None)\n addr = a0.bind('PUB', alias='pub', serializer=serializer)\n a1.connect(addr, handler=set_received)\n while not a1.get_attr('received'):\n a0.send('pub', message)\n time.sleep(0.1)\n assert a1.get_attr('received') == message", "def __create_ZMQ_publisher(self):\n\n success, self.port, self.ip = self.__network_selection()\n if success: \n # Create a new ZeroMQ context and a publisher socket\n try:\n context = zmq.Context()\n # Define the socket using the \"Context\"\n self.sock = context.socket(zmq.PUB)\n #Set the topic of the publisher and the end_point\n self.__connect_ZMQ_socket()\n self.connected = True\n except:\n print (\"NEP ERROR: socket already in use\")\n \n time.sleep(1)\n #This delay in important, whithout them the comunication is not effective\n \n # ZeroMQ note:\n # There is one more important thing to know about PUB-SUB sockets: \n # you do not know precisely when a subscriber starts to get messages.\n # Even if you start a subscriber, wait a while, and then start the publisher, \n # the subscriber will always miss the first messages that the publisher sends. \n\n\n # In Chapter 2 - Sockets and Patterns we'll explain how to synchronize a \n # publisher and subscribers so that you don't start to publish data until \n # the subscribers really are connected and ready. There is a simple and \n # stupid way to delay the publisher, which is to sleep. Don't do this in a\n # real application, though, because it is extremely fragile as well as\n # inelegant and slow. Use sleeps to prove to yourself what's happening, \n # and then wait for \n # Chapter 2 - Sockets and Patterns to see how to do this right", "def test_publish_message(self):\n pass", "def test_publish(self, mock_channel):\n mock_channel = mock.MagicMock(name='basic_publish')\n mock_channel.basic_publish.return_value = mock.Mock()\n publish('message', mock_channel, 'exchange', 'routing')\n mock_channel.basic_publish.assert_called()", "async def test_pubsub_patterns(self, transport, protocol):\n\n async def listener():\n # Subscribe to two patterns\n transport2, protocol2 = await connect()\n\n subscription = await protocol2.start_subscribe()\n await subscription.psubscribe([\"h*llo\", \"w?rld\"])\n\n # Receive messages\n results = []\n for i in range(4):\n results.append((await subscription.next_published()))\n\n self.assertEqual(\n results,\n [\n PubSubReply(\"hello\", \"message1\", pattern=\"h*llo\"),\n PubSubReply(\"heello\", \"message2\", pattern=\"h*llo\"),\n PubSubReply(\"world\", \"message3\", pattern=\"w?rld\"),\n PubSubReply(\"wArld\", \"message4\", pattern=\"w?rld\"),\n ],\n )\n\n transport2.close()\n\n f = asyncio.ensure_future(listener())\n\n async def sender():\n # Should not be received\n await protocol.publish(\"other-channel\", \"message5\")\n\n # These for should be received.\n await protocol.publish(\"hello\", \"message1\")\n await protocol.publish(\"heello\", \"message2\")\n await protocol.publish(\"world\", \"message3\")\n await protocol.publish(\"wArld\", \"message4\")\n\n await asyncio.sleep(0.5)\n await sender()\n await f", "def test_publish_qos0_sub_qos0(self):\n self.publisher.publish(\"/test/publish/qos0\", \"test_publish_qos0\", 0)\n msg = self.subscriber_qos0.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos0')\n self.assertEqual(msg.qos, 0)", "def test_publish_qos1_sub_qos0(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos0.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 0)", "def test_publish_qos0_sub_qos1(self):\n self.publisher.publish(\"/test/publish/qos0\", \"test_publish_qos0\", 0)\n msg = self.subscriber_qos1.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos0')\n self.assertEqual(msg.qos, 1)", "def test_topic_send_receive(self):\n\n conn = self.rpc.create_connection()\n message = 'topic test message'\n\n self.received_message = None\n\n def _callback(message):\n self.received_message = message\n\n conn.declare_topic_consumer('a_topic', _callback)\n conn.topic_send('a_topic', message)\n conn.consume(limit=1)\n conn.close()\n\n self.assertEqual(self.received_message, message)", "def test_push(self):\n self.try_topic('push', 'push')", "def test_consume(self, mock_publish, mock_pika):\n work = mock.Mock()\n work.return_value = mock.MagicMock()\n consume(work, mock_pika, 'queue', 'routing')\n mock_pika.channel.assert_called()", "def test_publish_qos1_sub_qos1(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos1.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 1)", "def listen(publisher):\n global client\n client.subscribe(SUBSCRIBER_TOPIC, 1, on_message)\n print('Subscribed to topic.')\n while listening:\n time.sleep(10)", "def subscribeAtQoS_BrokerPublishMessageAtQoS_Runner(xift, subscribe_qos, publish_qos ):\n ###\n # broker flow\n # 1. accept connection, send connack\n # 2. accept the control topic topic subscription\n # 3. accept the standard topic subscription, check for 'subscribe_qos'\n # 4. Cleanup on Disconnect\n broker_flow = [ call.broker.on_client_connect(ANY, ANY),\n # 2. accept the control topic topic subscription\n call.broker.on_client_subscribe(ANY, ANY,\n [(TestEssentials.control_topic_name, 1)], 0),\n # 3. accept the standard topic subscription, check for 'subscribe_qos'\n call.broker.on_client_subscribe(ANY, ANY,\n [(TestEssentials.topic_as_signed_chars, subscribe_qos)], 0),\n call.broker.on_client_disconnect(xift.broker, ANY, MQTT_ERR_SUCCESS)]\n\n # 1. accept connection, send connack\n xift.broker.on_client_connect.side_effect = lambda userdata, connect_options: \\\n xift.broker.send_connack(mqtt_messages.CONNACK_ACCEPTED)\n\n # 2. accept the control topic topic subscription\n # 3. accept the standard topic subscription\n xift.broker.on_client_subscribe.side_effect = lambda userdata, msg_id, topics_and_qos, dup: \\\n xift.broker.send_suback( msg_id, topics_and_qos )\n\n # 4. Cleanup on Disconnect\n xift.broker.on_client_disconnect.side_effect = lambda br, userdata, rc: \\\n xift.broker.trigger_shutdown()\n\n ###\n # client flow\n # 1. connect (automagically through act())\n # 1a. hidden, subscribe to control topic.\n # 2. on connect response, subscribe to topic\n # 3. subscription callback invoked, broker to publish to topic client subscribed to.\n # 4. on message callback, validate message, shutdown connection\n # 5. on shutdown callback, stop.\n\n # Validates that the message parameters are what we expect to recieve on the client.\n client_flow = [ call.client.on_connect_finish(mqtt_messages.CONNACK_ACCEPTED),\n call.client.on_subscribe_finish([ANY]),\n # 4. on message callback, validate message,\n call.client.on_message_received(ANY,\n BasicMessageMatcher(TestEssentials.topic,\n publish_qos,\n TestEssentials.payload_as_bytes ) ),\n call.client.on_disconnect(ANY)]\n\n #2. on connect response, subscribe to topic\n xift.client_sut.on_connect_finish.side_effect = lambda connect_res: \\\n xift.client_sut.subscribe([[TestEssentials.topic, subscribe_qos]])\n\n # 3. subscription callback invoked, broker publish to topic client subscribed to.\n xift.client_sut.on_subscribe_finish.side_effect = lambda granted_access_list: \\\n xift.broker.publish( TestEssentials.topic, TestEssentials.payload_as_string, publish_qos )\n\n # 4. on message callback, shutdown connection\n xift.client_sut.on_message_received.side_effect = lambda topic, message: \\\n xift.client_sut.disconnect()\n\n # 5. on shutdown callback, stop.\n xift.client_sut.on_disconnect.side_effect = lambda return_code: xift.client_sut.stop()\n\n ##\n #Act\n act(xift)\n\n ##\n #Assert\n assert client_flow == xift.mock_call_history_client.method_calls\n assert broker_flow == xift.mock_call_history_broker.method_calls", "async def __subscribe_upstream(self, message):\n payload = message.payload\n if isinstance(payload, AbstractActor):\n if payload not in self.subscribers:\n self.subscribers.append(payload)\n else:\n msg = \"Can Only Subscribe Object of Abstract Actor to StreamPubSub\"\n logging.error(msg)", "def test_pubsubhubbub_required_callback(self):\n topic = (\n \"hub.topic\",\n \"https://github.com/octocat/hello-world/events/push\",\n )\n body = [(\"hub.mode\", \"subscribe\"), topic, (\"hub.callback\", \"\")]\n data = {k[4:]: v for k, v in body}\n self.instance.pubsubhubbub(**data)\n assert self.session.post.called is False", "def test_onPublish(self):\n\n xml = \"\"\"\n <iq type='set' to='pubsub.example.org'\n from='user@example.org'>\n <pubsub xmlns='http://jabber.org/protocol/pubsub'>\n <publish node='test'/>\n </pubsub>\n </iq>\n \"\"\"\n\n def publish(requestor, service, nodeIdentifier, items):\n self.assertEqual(JID('user@example.org'), requestor)\n self.assertEqual(JID('pubsub.example.org'), service)\n self.assertEqual('test', nodeIdentifier)\n self.assertEqual([], items)\n return defer.succeed(None)\n\n self.service.publish = publish\n return self.handleRequest(xml)", "def publish_message(self, message, queue):", "def test_publish_qos1_sub_qos2(self):\n self.publisher.publish(\"/test/publish/qos1\", \"test_publish_qos1\", 1)\n msg = self.subscriber_qos2.wait_for_message(2)\n self.assertIsNotNone(msg)\n self.assertEqual(msg.payload, b'test_publish_qos1')\n self.assertEqual(msg.qos, 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the weighted value of this input
def getWeightedValue(): weight*value
[ "def get_weight(self) -> float:\n return 0", "def get_weight(self, temp):\n return self.temp_dict[temp]['weight']", "def getWeight(self):\n return np.concatenate([self.weight.ravel()] * 4)", "def easyWeighting(self, weights, values):\n summedVal = 0 \n for k, weight in enumerate(weights): \n summedVal += weight * values[k] \n \n return summedVal", "def total_weight(self):\n return self.weight_fun(self.graph, self.path)", "def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ", "def get_calc_weight(self):\n # TODO: Make sure piece weight is being imported correctly\n weight = db.run_sql(\"SELECT SUM(bl_inventories.quantity * parts.weight) FROM bl_inventories JOIN parts\"\n \" ON bl_inventories.piece_id = parts.id\"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return weight", "def weight(o):\n return o[1]", "def getWeightGenerality(self):\r\n return self.weightedGenerality", "def weight(self):\n return sum(e*L.weight() for L,e in self._factorization)", "def calc_water(self, weight):\n\n water_you_need = weight / self.pound / self.constant_2\n return water_you_need", "def FinalWeight(self):\n return self.Aircraft['Max Take Off Weight'] - self.Aircraft['Fuel Weight']", "def getWeight(self):\n return self.vertexWeight", "def _weights_field_scalar(self, methods=False):\n data = Data(1.0, \"1\")\n\n f = type(self)()\n f.set_data(data, copy=False)\n f.long_name = \"weight\"\n f.comment = f\"Weights for {self!r}\"\n\n return f", "def weight_pt(self):\n return self.container['weight_pt']", "def sumw ( self ) :\n N = len ( self )\n if 0 == N : return 0 \n g = ( self.weight ( i ) for i in range ( N ) ) \n return sum ( g )", "def weight(self):\n vec = np.array([[reqt.weight for reqt in self.requirements]])\n return vec.T # Return as column vector", "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "def calculate_weight(self):\n\n\t\tweight = 0\n\t\tfor item in self.items:\n\t\t\tif item == \"Health Potions\" or item == \"Magic Potions\":\n\t\t\t\tweight += self.items[item]\n\n\t\tself.weight = weight" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return sink of this input edge
def getSink(): return sink
[ "def src_sink(self) -> SrcSink:\n pass", "def signal(self):\n return self.source.output", "def sink_path(self):\n return self._sink_path if self.enabled else None", "def get_propagate_wire(self):\r\n return self.out.get_wire(2)", "def getSinkFlow(self):\r\n return self.sink.sensors[0].getFlowRatePct()", "def getSinkState(self):\r\n return self.sink.actuators[0].getState()", "def __deref__(self):\n return _wmbus_swig.wmbus_packet_sink_sptr___deref__(self)", "def Sinks(self):\n # ------------------------------------------------------------------------\n return [self.scope.Sink(), self.fft.Sink(), self.histo.Sink(),\n self.waterfall.Sink()]", "def __iter__(self):\n raise SyntaxError(\n 'Sinks cannot be inputs: ' + str(self.__class__.__name__))", "def get_sinks(self):\n res = []\n for vertex in self.__graph_dict:\n # dont know if this isolated really helps here , not sure\n if len(self.__graph_dict[vertex]) == 0:\n res.append(vertex)\n return res", "def get_edge(self, target):\n return self.target2edge.get(target, None)", "def __deref__(self):\n return _raw_util.raw_message_sink_sptr___deref__(self)", "def __init__(self, sink):\n\n self.sink = sink", "def get_edge(self, destination):\r\n for edge in self.edges:\r\n if edge.destination == destination:\r\n return edge\r\n return None", "def sink(self, species):\n return self.__graph.children(species)", "def test_neuron_sink(self):\n with nengo.Network():\n a = nengo.Ensemble(100, 2)\n b = nengo.Ensemble(100, 4)\n\n a_b = nengo.Connection(a.neurons, b.neurons, transform=np.eye(100))\n\n # Create a model with the Ensemble for b in it\n model = builder.Model()\n b_ens = operators.EnsembleLIF(b)\n model.object_operators[b] = b_ens\n\n # Get the sink, check that an appropriate target is return\n sink = ensemble.get_neurons_sink(model, a_b)\n assert sink.target.obj is b_ens\n assert sink.target.port is ensemble.EnsembleInputPort.neurons", "def best_match(self) -> Iterable[SrcSink]:\n pass", "def get_out_vertex(self):", "def edge_s(self, edge):\n return self.edge_st(edge)[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the currently configured storefront search function. Returns a callable that accepts the search phrase.
def pick_backend(): return import_module(settings.SEARCH_BACKEND).search_storefront
[ "def get_function_search_session(self):\n if not self.supports_function_search():\n raise errors.Unimplemented()\n # pylint: disable=no-member\n return sessions.FunctionSearchSession(runtime=self._runtime)", "def get_function_search_session(self):\n return # osid.authorization.FunctionSearchSession", "def do_alfred_search(self):\n dirpath = Dirpath.dirpath(self.query).abbr_noslash\n return run_trigger('search', arg='{} {} '.format(dirpath, DELIMITER))\n # return run_alfred(':fzysrch {} {} '.format(dirpath, DELIMITER))", "def supports_function_search(self):\n return # boolean", "def supports_function_search(self):\n return False # Change to True when implemented.", "def apply_query():\n s = Search(using=ES_OBJECT, index=SEARCHING_INDEX)\n if INDEXATION_MODE == \"autocomplete\":\n logging.info(\"Applying autocomplete search\")\n s.update_from_dict(\n autocomplete_query(QUERY, FIELDS_TO_SEARCH, popularity_field=POPULARITY_FIELD)\n )\n elif INDEXATION_MODE in [\"basic_english\", \"french\"]:\n logging.info(\"Applying multi match search with fuzziness if set in yaml\")\n s.update_from_dict(\n multi_match_query(QUERY, FIELDS_TO_SEARCH, fuzziness=FUZZINESS)\n )\n else:\n raise NotImplementedError(\"Mode d'indexation choisi pas setup\")\n return s", "def api_search(self):\n g.list_callback = 'api_search'\n\n if not getattr(self, 'check_%s' % request.method.lower())():\n return self.response_forbidden()\n\n # terms to search for\n search_term = request.args.get('query') or ''\n\n # the engine to use\n engine = request.args.get('engine') or ''\n\n # construct a raw query\n query = self.get_query()\n query = self.apply_ordering(query)\n\n if engine == 'default':\n # search in default fields\n\n # split keywords by blank chars\n kw_set = set(re.split(r'\\s+', search_term, re.U))\n kw_set.discard('')\n if kw_set and self._search.get('default', []):\n query = self.apply_search_query(\n query, list(kw_set), self._search['default'])\n else:\n # more complicated search methods\n # split query to 'field:(terms)'' or 'term' using the\n # following regular expression\n regex = re.compile(\n '((?:\\w+:\\([^)]*\\))|(?:\\w+:[^()\\s]+)|[^:\\s]+)', re.U)\n kw_split_list = regex.findall(search_term)\n search_kw = MultiDict()\n\n for kw in kw_split_list:\n try:\n sp = kw.index(':')\n key = kw[0:sp]\n val = kw[sp + 1:]\n if val.startswith('(') and val.endswith(')'):\n # expand\n for x in re.split(r'\\s+', val[1:-1], re.U):\n x and search_kw.add(key, x)\n else:\n # single term\n search_kw.add(key, val)\n\n except ValueError:\n # single word\n search_kw.add('default', kw)\n\n # apply search filter engine by engine\n for engine, kws in search_kw.iterlists():\n kw_set = set(kws)\n kw_set.discard('')\n if kw_set and self._search.get(engine, []):\n query = self.apply_search_query(\n query, list(kw_set), self._search[engine])\n\n # apply output limit \n if self.paginate_by or 'limit' in request.args:\n return self.paginated_object_list(query)\n\n return self.response(self.serialize_query(query))", "def _execute_search(self, text):\n self._search_text = text[1:]\n\n #\n # if the user input is only \"/\" (starting to type something), hint\n # that they are entering the Search mode. nothing else to do!\n #\n\n if text == \"/\":\n self._line_label.setText(\"Search\")\n return\n\n #\n # stop an existing command timer if there is one running. we are about\n # to schedule a new one or execute inline. so the old/deferred command\n # is no longer needed.\n #\n\n self._command_timer.stop()\n\n #\n # if the functions list is HUGE, we want to defer the filtering until\n # we think the user has stopped typing as each pass may take awhile\n # to compute (while blocking the main thread...)\n #\n\n if self._director.metadata.is_big():\n self._command_timer = singleshot(1000, self._execute_search_internal)\n self._command_timer.start()\n\n #\n # the database is not *massive*, let's execute the search immediately\n #\n\n else:\n self._execute_search_internal()\n\n # done\n return", "def get_filter_function(study_name: str) -> Callable:\n if study_name not in _filter_funcs:\n return _filter_funcs[\"*\"]\n\n return _filter_funcs[study_name]", "def get_search_query(request):\n return (request.GET.get('q') or request.session.get('query') or\n get_setting('DEFAULT_QUERY'))", "def match_ticker_to_searchstring(searchstr):\n\n # for the time being implement it this way\n # return Ticker.objects.get(symbol=searchstr)\n return searchstr", "def get_search_query(s, file_extensions):\n words = get_words(s)\n if words and words[-1] in file_extensions:\n words = words[:-1]\n return ' '.join(words)", "def get_search_query(self, search):\n \"\"\"This can prevent us from searching manually\"\"\"\n return quote(search)", "def get_function_search_session_for_vault(self, vault_id):\n return # osid.authorization.FunctionSearchSession", "def get_search_handler(self, request, search_fields=None):\n return self.search_handler_class(\n search_fields or self.get_search_fields(request)\n )", "def search():\n if not g.search_form.validate_on_submit():\n return redirect(url_for('index'))\n # Redirect to search_results function and pass search query\n return redirect(url_for('search_results', query=g.search_form.search.data))", "def _set_search(self, strategy = \"d\"):\n\n if strategy == \"a\":\n self.search = self.search_aggressive\n elif strategy == \"d\":\n self.search = self.search_default\n elif strategy == \"c\":\n self.search = self.search_cautious\n elif strategy == \" e\":\n self.search = self.search_evade\n else:\n self.search = self.search_default", "def get_filter(self) -> Callable[[str], bool]:\n return self._filter", "def locate_qualified_function(qualified_name: str) -> Callable[[], Iterable[ET]]:\n if \".\" not in qualified_name:\n raise QueryException(\"Could not find a '.' in the function name, e.g. my.reddit.rexport.comments\")\n rdot_index = qualified_name.rindex(\".\")\n return locate_function(qualified_name[:rdot_index], qualified_name[rdot_index + 1:])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the currently configured dashboard search function. Returns a callable that accepts the search phrase.
def pick_dashboard_backend(): return import_module(settings.SEARCH_BACKEND).search_dashboard
[ "def get_filter_function(study_name: str) -> Callable:\n if study_name not in _filter_funcs:\n return _filter_funcs[\"*\"]\n\n return _filter_funcs[study_name]", "def get_function_search_session(self):\n return # osid.authorization.FunctionSearchSession", "def do_alfred_search(self):\n dirpath = Dirpath.dirpath(self.query).abbr_noslash\n return run_trigger('search', arg='{} {} '.format(dirpath, DELIMITER))\n # return run_alfred(':fzysrch {} {} '.format(dirpath, DELIMITER))", "def get_function_search_session(self):\n if not self.supports_function_search():\n raise errors.Unimplemented()\n # pylint: disable=no-member\n return sessions.FunctionSearchSession(runtime=self._runtime)", "def supports_function_search(self):\n return # boolean", "def _execute_search(self, text):\n self._search_text = text[1:]\n\n #\n # if the user input is only \"/\" (starting to type something), hint\n # that they are entering the Search mode. nothing else to do!\n #\n\n if text == \"/\":\n self._line_label.setText(\"Search\")\n return\n\n #\n # stop an existing command timer if there is one running. we are about\n # to schedule a new one or execute inline. so the old/deferred command\n # is no longer needed.\n #\n\n self._command_timer.stop()\n\n #\n # if the functions list is HUGE, we want to defer the filtering until\n # we think the user has stopped typing as each pass may take awhile\n # to compute (while blocking the main thread...)\n #\n\n if self._director.metadata.is_big():\n self._command_timer = singleshot(1000, self._execute_search_internal)\n self._command_timer.start()\n\n #\n # the database is not *massive*, let's execute the search immediately\n #\n\n else:\n self._execute_search_internal()\n\n # done\n return", "def supports_function_search(self):\n return False # Change to True when implemented.", "def get_search_query(request):\n return (request.GET.get('q') or request.session.get('query') or\n get_setting('DEFAULT_QUERY'))", "def pick_backend():\n return import_module(settings.SEARCH_BACKEND).search_storefront", "def apply_query():\n s = Search(using=ES_OBJECT, index=SEARCHING_INDEX)\n if INDEXATION_MODE == \"autocomplete\":\n logging.info(\"Applying autocomplete search\")\n s.update_from_dict(\n autocomplete_query(QUERY, FIELDS_TO_SEARCH, popularity_field=POPULARITY_FIELD)\n )\n elif INDEXATION_MODE in [\"basic_english\", \"french\"]:\n logging.info(\"Applying multi match search with fuzziness if set in yaml\")\n s.update_from_dict(\n multi_match_query(QUERY, FIELDS_TO_SEARCH, fuzziness=FUZZINESS)\n )\n else:\n raise NotImplementedError(\"Mode d'indexation choisi pas setup\")\n return s", "def api_search(self):\n g.list_callback = 'api_search'\n\n if not getattr(self, 'check_%s' % request.method.lower())():\n return self.response_forbidden()\n\n # terms to search for\n search_term = request.args.get('query') or ''\n\n # the engine to use\n engine = request.args.get('engine') or ''\n\n # construct a raw query\n query = self.get_query()\n query = self.apply_ordering(query)\n\n if engine == 'default':\n # search in default fields\n\n # split keywords by blank chars\n kw_set = set(re.split(r'\\s+', search_term, re.U))\n kw_set.discard('')\n if kw_set and self._search.get('default', []):\n query = self.apply_search_query(\n query, list(kw_set), self._search['default'])\n else:\n # more complicated search methods\n # split query to 'field:(terms)'' or 'term' using the\n # following regular expression\n regex = re.compile(\n '((?:\\w+:\\([^)]*\\))|(?:\\w+:[^()\\s]+)|[^:\\s]+)', re.U)\n kw_split_list = regex.findall(search_term)\n search_kw = MultiDict()\n\n for kw in kw_split_list:\n try:\n sp = kw.index(':')\n key = kw[0:sp]\n val = kw[sp + 1:]\n if val.startswith('(') and val.endswith(')'):\n # expand\n for x in re.split(r'\\s+', val[1:-1], re.U):\n x and search_kw.add(key, x)\n else:\n # single term\n search_kw.add(key, val)\n\n except ValueError:\n # single word\n search_kw.add('default', kw)\n\n # apply search filter engine by engine\n for engine, kws in search_kw.iterlists():\n kw_set = set(kws)\n kw_set.discard('')\n if kw_set and self._search.get(engine, []):\n query = self.apply_search_query(\n query, list(kw_set), self._search[engine])\n\n # apply output limit \n if self.paginate_by or 'limit' in request.args:\n return self.paginated_object_list(query)\n\n return self.response(self.serialize_query(query))", "def get_search_handler(self, request, search_fields=None):\n return self.search_handler_class(\n search_fields or self.get_search_fields(request)\n )", "def query_function(self):\n ret = self.com.query('FUNC?')\n return ret", "def _search(self):", "def get_search_query(self, search):\n \"\"\"This can prevent us from searching manually\"\"\"\n return quote(search)", "def search():\n if not g.search_form.validate_on_submit():\n return redirect(url_for('index'))\n # Redirect to search_results function and pass search query\n return redirect(url_for('search_results', query=g.search_form.search.data))", "def search(): \r\n os.system('cls')\r\n topic = input(\"Enter your search query (Press X to Quit):\")\r\n if topic.lower()=='x':\r\n exit(0)\r\n res = wiki.search(topic)\r\n if len(res)==1:\r\n return res[0]\r\n \r\n elif res == []:\r\n if wiki.suggest(topic) != None:\r\n print(\"You want to search for: {}?\".format(wiki.suggest(topic)))\r\n ch = input(\"Enter Y(Yes) to continue or any other key to search other term:\")\r\n if ch.lower()=='y' or ch.lower()=='yes':\r\n return wiki.suggest(topic)\r\n else:\r\n print(\"Sorry, no content matches your query.\\nTry with another term.\")\r\n return(search())\r\n else:\r\n print(\"\\nSuggested topics are:\")\r\n for i,r in enumerate(res,1):\r\n print('{}. {}'.format(i,r))\r\n ch = int(input(\"Press corresponding number to read about any term from the suggestions:\"))\r\n if ch in range(1,len(res)+1):\r\n return res[ch-1]\r\n else:\r\n print(\"Sorry, your term is not in given suggestions\\nCarrying on with the original query...\")\r\n return res[0]", "def get_filter(self) -> Callable[[str], bool]:\n return self._filter", "def navbar_search_decorator(function):\n\n def wrapper(*args, **kwargs):\n if args[0].method == \"POST\":\n if args[0].POST.get(\"produit\"):\n form: SearchForm = SearchForm(args[0].POST)\n\n if form.is_valid():\n base_product: Product = form.cleaned_data[\"produit\"]\n\n return redirect(\n reverse(\n \"catalog:results\", kwargs={\"base_product\": base_product.id}\n )\n )\n\n return function(*args, **kwargs)\n\n return wrapper" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
tbl True for tblname being tested, False if a fldname being tested. Bad name for SQLite? The best way is to find out for real (not too costly and 100% valid by definition). Strangely, SQLite accepts '' as a table name but we won't ;).
def valid_name(name, is_tblname=True): debug = False if name == '': return False default_db = mg.LOCAL_PATH / mg.INT_FOLDER / 'sofa_tmp' con = sqlite.connect(str(default_db)) ## Note - newer versions accept pathlib Path as well as strings but Bionic doesn't :-( add_funcs_to_con(con) cur = con.cursor() valid = True err = '' try: if is_tblname: tblname = quote_obj(name) fldname = 'safefldname' else: tblname = 'safetblname' fldname = name ## in case it survives somehow esp safetblname ## OK if this fails here sql_drop = f'DROP TABLE IF EXISTS {tblname}' if debug: print(sql_drop) cur.execute(sql_drop) con.commit() ## usable names in practice? sql_make = f'CREATE TABLE {tblname} (`{fldname}` TEXT)' if debug: print(sql_make) cur.execute(sql_make) con.commit() ## otherwise when committing, no net change to commit and ## no actual chance to succeed or fail ## clean up sql_drop = f'DROP TABLE IF EXISTS {tblname}' if debug: print(sql_drop) cur.execute(sql_drop) con.commit() except Exception as e: valid = False if debug: print(b.ue(e)) err = b.ue(e) finally: cur.close() con.close() return valid, err
[ "def verify_table_name(table_name):\n table_names = get_table_names()\n if table_name in table_names:\n return True\n else:\n return False", "def test_db_table_name_must_be_tag(self):\n db_table = Tag._meta.db_table\n self.assertEquals(db_table, 'tag')", "def test_contest_table_name_is_contest(self):\n table_name = Contest._meta.db_table\n\n self.assertEqual(table_name, 'contest')", "def test_db_table_name_must_be_code_problem(self):\n db_table = CodeProblem._meta.db_table\n self.assertEquals(db_table, 'code_problem')", "def table_identify(origin, format, *args, **kwargs):\n itis = False\n if origin == \"read\":\n itis = isinstance(args[1], Table) and (format in (None, \"astropy.table\"))\n return itis", "def _check_field(table, field, name, type_):\n columns = [getattr(table.columns, key) for key in table.columns.keys()]\n\n # If ‘field’ is not specified, we try to autodetect it from the columns\n # of the table based on ‘type_’.\n if field is None:\n candidates = filter(lambda c: isinstance(c.type, type_), columns)\n if len(candidates) == 1:\n field = candidates[0]\n else:\n field = 'tree_' + name\n\n # We assume that we'll be passed either a string or a SQLAlchemy Column\n # object (duck typing is not allowed). If what we're passed is a Column\n # object, we just need to check that\n if not isinstance(field, basestring):\n assert isinstance(field, sqlalchemy.Column)\n assert field.table is table\n\n # Otherwise we're passed a string, and either we find a field with that\n # name in the existing table Columns (likely but not necessarily if the\n # developer specified their own field name), or we'll have to create a\n # new column of the specified name and type, and insert it into the\n # table's column descriptions.\n elif field in table.columns:\n # Column exists:\n field = table.columns[field]\n else:\n # Column not found; create it:\n field = sqlalchemy.Column(field, type_(), nullable=False)\n table.append_column(field)\n # And return (since we know the following checks are redundant):\n return field\n\n # If we found the column or the developer specified it directly, we'll\n # do a quick sanity check to make sure that the column has the right\n # type and meta-attributes:\n assert isinstance(field.type, type_), \\\n \"The type of %s field should be %r\" % (name, type_)\n assert not field.nullable, \\\n \"The %s field should not be nullable\" % name\n\n # Field passes; return to caller:\n return field", "def test_db_table_name_must_be_contest_registration(self):\n db_table = ContestRegistration._meta.db_table\n self.assertEquals(db_table, 'contest_registration')", "def test_rule_table_name(self):\n assert_equal(self.rule_table.name, _RULES_TABLE)", "def verify(tabnam: str = \"readings\") -> bool:\n columns = get_columns(tabnam)\n if len(columns) < 2:\n logging.info(f\"table {tabnam}: not enough fields\")\n return False\n if columns[0] != (\"station\", \"INTEGER\", 1) or \\\n columns[1] != (\"dwdts\", \"TEXT\", 2):\n logging.info(f\"table {tabnam}: primary key is not (station INTEGER, dwdts TEXT)\")\n return False\n for col in columns[2:]:\n if col[2] != 0:\n logging.info(f\"table {tabnam}: too many fields in primary key\")\n return False\n return True", "def compile_table_exists(self):\n return \"SELECT * FROM sqlite_master WHERE type = 'table' AND name = ?\"", "def testOpsimDbSeeingColName(self):\n seeingcol = self.oo.fetchSeeingColName()\n self.assertTrue(seeingcol, 'finSeeing')", "def table_exists(self):\n query = \"\"\"select name from sqlite_master \\\n where type='table' and name='%s' \"\"\" % self.table_name\n result = self.conn.execute(query)\n return result.fetchone() != None", "def table_exists(table, conn):\n result = conn.execute('select name from sqlite_master where name=?', \\\n (table,))\n return not (not result.fetchall()) #False if does not exist, True otherwise", "def test_db_table_name_must_be_mcq_problem(self):\n db_table = MCQProblem._meta.db_table\n self.assertEquals(db_table, 'mcq_problem')", "def test_db_table_name_must_be_contest_announcement(self):\n db_table = ContestAnnouncement._meta.db_table\n self.assertEquals(db_table, 'contest_announcement')", "def test_db_check():\n _test_call(\n mysql.db_check,\n \"CHECK TABLE `test``'\\\" db`.`my``'\\\" table`\",\n \"test`'\\\" db\",\n \"my`'\\\" table\",\n )", "def exists(self, engine):\n return self.name in engine.table_names()", "def _looks_like_database(obj):\n return (isinstance(obj, _Backend) or\n all(hasattr(obj, attr) for attr in\n ('find', 'all_items', 'delete', 'save'))\n )", "def _check_table_name(self, table_name: str, fail_if_table_exists: bool) -> None:\n if (\n fail_if_table_exists\n and db.session.query(SqlaTable)\n .filter_by(table_name=table_name)\n .one_or_none()\n ):\n message = _(\n f\"Table name {table_name} already exists. Please choose another\"\n )\n raise NameNotAllowedException(message, None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
u"""Create a decorator that requires ``predicate(request)`` to evaluate ``True`` before calling the decorated function. If the predicate evalutates ``False`` then ``response_builder`` is called with the original function, request and args and kwargs and returned.
def create_require(predicate, response_builder): def require(func): @wraps(func) def decorated(request, *args, **kwargs): if predicate(request): return func(request, *args, **kwargs) else: return response_builder(func, request, *args, **kwargs) return decorated return require
[ "def require(predicate):\n def outer(f):\n @wraps(f)\n def inner(request, *args, **kwargs):\n try:\n predicate.check_authorization(request.environ)\n except NotAuthorizedError as e:\n reason = unicode(e)\n if request.environ.get('repoze.who.identity'):\n # The user is authenticated.\n code = 403\n else:\n # The user is not authenticated.\n code = 401\n return Response(reason, code)\n else:\n return f(request, *args, **kwargs)\n return inner\n return outer", "def boolean_decorator(boolean_operator: str):\n\n def boolean_function(function: FunctionType):\n def wrapper(self, expressions: list):\n plan = self.create_execution_plan_expression(*expressions, boolean_operator)\n result = function(self, expressions)\n return ValueWithPlan(result, plan)\n\n return wrapper\n\n return boolean_function", "def conditional(self, predicate, true_callable, false_callable):\n return tf.cond(predicate, true_callable, false_callable)", "def accepts_evaluator(function):\n\n def decorated(*args, evaluator=None, **kwargs):\n if evaluator is None:\n return function(*args, **kwargs)\n else:\n return evaluator(function, args, kwargs)\n\n return decorated", "def dev_api_request(fn):\n @json_response\n @passes_test(api_request_is_developer)\n @validate_request\n def decorator(request, *args, **kwargs):\n return fn(request, *args, **kwargs)\n return decorator", "def memoize_request(f):\n @wraps(f)\n def memoizer(*args, **kwargs):\n if not has_request_context() or current_app.config['TESTING'] or current_app.config.get('REPL'):\n # No memoization outside request context\n return f(*args, **kwargs)\n\n try:\n cache = g.memoize_cache\n except AttributeError:\n g.memoize_cache = cache = {}\n\n key = (f.__module__, f.__name__, make_hashable(getcallargs(f, *args, **kwargs)))\n if key not in cache:\n cache[key] = f(*args, **kwargs)\n return cache[key]\n\n return memoizer", "def conditional_decorator(decorator, condition):\n def gen_decorator(f):\n if not condition:\n return f\n return decorator(f)\n return gen_decorator", "def as_request_function(function):\n spec = inspect.getfullargspec(function)\n\n if spec.args and spec.args[0] == \"request\":\n return function\n\n @functools.wraps(function)\n def request_function(request, *args, **kwargs):\n return function(*args, **kwargs)\n\n return request_function", "def precondition(precond: Callable[[Any], bool]) -> Callable[[TestFunc], TestFunc]:\n\n def decorator(f):\n @proxies(f)\n def precondition_wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n\n existing_initialize_rule = getattr(f, INITIALIZE_RULE_MARKER, None)\n if existing_initialize_rule is not None:\n raise InvalidDefinition(\n \"An initialization rule cannot have a precondition. \", Settings.default\n )\n\n rule = getattr(f, RULE_MARKER, None)\n invariant = getattr(f, INVARIANT_MARKER, None)\n if rule is not None:\n assert invariant is None\n new_rule = attr.evolve(rule, preconditions=rule.preconditions + (precond,))\n setattr(precondition_wrapper, RULE_MARKER, new_rule)\n elif invariant is not None:\n assert rule is None\n new_invariant = attr.evolve(\n invariant, preconditions=invariant.preconditions + (precond,)\n )\n setattr(precondition_wrapper, INVARIANT_MARKER, new_invariant)\n else:\n setattr(\n precondition_wrapper,\n PRECONDITIONS_MARKER,\n getattr(f, PRECONDITIONS_MARKER, ()) + (precond,),\n )\n\n return precondition_wrapper\n\n return decorator", "def filter_from_function():\n\n def decorate(f):\n params, _ = extract_params(f)\n return FilterOfFunction(params=params, f=f)\n\n return decorate", "def ApplyToResult( func ):\n\n @simple_decorator\n def wrap( f ):\n def new_function(*args, **kw):\n return func( f( *args, **kw ) )\n return new_function\n \n return wrap", "def wrap_filter(hook, filter_fn):\n def wrap_filter_inner(hs):\n if filter_fn(hs):\n return hook(hs)\n else:\n return hs()\n\n return wrap_filter_inner", "def passthrough_decorator(f):\n return f", "def response_by_test(view_func,test_func,view_func_on_pass=None,view_func_on_fail=None):\n def decorator(view_func):\n @wraps(view_func)\n def _wrapped_view(request, *args, **kwargs):\n test_result = test_func(request,*args, **kwargs)\n if test_result is None:\n raise Http404\n elif test_result and view_func_on_pass is None \\\n or not test_result and view_func_on_fail is None:\n return view_func(request, *args, **kwargs)\n elif test_result:\n return view_func_on_pass(request, *args, **kwargs)\n else:\n return view_func_on_fail(request, *args, **kwargs)\n return _wrapped_view\n return decorator", "def test_decorator_function_url(self):\n def redirect_url(request):\n return '/consent?next=%s' % quote(request.get_full_path())\n\n request = self.request_factory.get('/foo/')\n request.user = User.objects.create_user(username='user',\n email='user@example.com')\n\n with self.settings(\n DJBLETS_PRIVACY_PENDING_CONSENT_REDIRECT_URL=redirect_url):\n rsp = decorated_view(request)\n\n self.assertIsInstance(rsp, HttpResponseRedirect)\n self.assertEqual(rsp.url, '/consent?next=/foo/')", "def api_decorator(func):\n\n def make_request(*args, **kwargs):\n \"\"\"\n Function that makes the actual request and redirects to success\n or failure depending on error exceptions.\n \"\"\"\n\n # Try the function and return to success\n try:\n return APIUtils.success(func(*args, **kwargs))\n\n # Except any errors and call failure\n except Exception as err:\n return APIUtils.failure(err)\n\n return make_request", "def _decorate_once(fn):\n if hasattr(fn, '_wsgiwapi_props'):\n props = fn._wsgiwapi_props\n if props.get('decorated', False) == True:\n return fn, props\n props = {'decorated': True}\n\n # Note: the following wrapper function just checks that the properties on\n # the callable passed to application match those set here. I think this\n # will always be true unless a later applied decorator has failed to copy\n # the properties.\n\n # It is tempting to remove this check, and just set the properties on the\n # original callable object, but there is a potential security issue in\n # doing so: if a later applied decorator _has_ failed to copy the\n # properties, this would lead to decorators getting lost, which could mean\n # that code which looks like it is validating parameters is actually\n # failing to do the validation.\n\n # Perhaps the best fix would be to make parameters unavailable unless\n # they've been validated.\n\n # FIXME - review this.\n def res(*args, **kwargs):\n # Check that the decorator has not been applied and then the properties\n # have been lost (probably by a second decorator which doesn't copy the\n # properties being applied).\n if isinstance(args[0], Request):\n request = args[0]\n else:\n request = args[1]\n if request._handler_props is not props:\n raise RuntimeError(\"Handler properties do not match decorated properties. Probably missing call to wsgiwapi.copyprops.\")\n return fn(*args, **kwargs)\n res.__doc__ = fn.__doc__\n res.__name__ = fn.__name__\n res.__dict__.update(fn.__dict__)\n res._wsgiwapi_props = props\n return res, props", "def where(predicate: Callable[[ir.Value], bool]) -> Selector:\n return Selector(predicate)", "def gather_precondition(precondition_fn: Callable[[GatherArgs], None]):\n\n def decorator(gather_fn: Callable[[GatherArgs], Any]):\n\n @wraps(gather_fn)\n def wrapper(args: GatherArgs):\n # Call `precondition_fn`; we assume it may throw an exception.\n precondition_fn(args)\n return gather_fn(args)\n\n return wrapper\n\n return decorator" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initialise a deck of cards and return lists of suits and card values.
def deck(): suits = ['clubs', 'diamonds', 'hearts', 'spades'] cards = ['A', 2, 3, 4, 5, 6, 7, 8, 9, 10, 'J', 'Q', 'K'] return suits, cards
[ "def build_deck(self):\n suits = ['Hearts', 'Diamonds', 'Spades', 'Clubs']\n ranks = {\n '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':10, 'Q':10, 'K':10, 'A':11,\n }\n for suit in suits:\n for rank, value in ranks.items():\n card = Card(rank, value, suit)\n self.cards.append(card)", "def build_deck():\n\tsuits = {\n\t\t'hearts': [],\n\t\t'diamonds': [],\n\t\t'clubs': [],\n\t\t'spades': []\n\t\t}\n\n\tface_cards = ['jack','queen', 'king', 'ace']\n\n\tfor suit in suits.keys():\n\t\tfor number in range(1,11):\n\t\t\tsuits[suit].append(f'{number} of {suit.title()}')\n\t\tfor face_card in face_cards:\n\t\t\tsuits[suit].append(f'{face_card.title()} of {suit.title()}')\n\n\n\treturn suits", "def initiate_deck(self):\n for suit in self.suits:\n for i in range(1, 14):\n new_card = Card(i, suit)\n self.cards.append(new_card)", "def get_cards():\n Card = namedtuple('Card', 'rank suit')\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n suits = ['spades', 'hearts', 'diamonds', 'clubs']\n full_deck = [Card(suit, rank) for suit in suits for rank in ranks]\n return full_deck", "def build(self):\n cards = []\n # for each suit\n for s in self.SUITS:\n # for each rank\n for r in self.RANKS:\n # create a new card\n card = Card(s, r)\n # set's the image src\n card.set_image_src(CARD_IMAGE_SRC)\n # set the back image src\n card.set_back_image_src(CARD_BACK_IMAGE_SRC)\n # set's the card size\n card.set_size(CARD_IMAGE_SIZE)\n # add the new card into the list\n cards.append(card)\n return cards", "def newDeck(self):\n temp_deck = []\n for card in self.cards:\n for suit in self.suits:\n temp_deck.append(\"{} {}\".format(card, suit))\n return temp_deck", "def fetch_cards_of_suit(self, suit):\n\n def sort_by_value(card):\n \"\"\" Returns the value of the card based on it's value name \"\"\"\n return VALUE_BY_NAME[card.value]\n\n cards_of_suit = [card for card in self.cards if suit == card.suit]\n\n # Sort for easy viewing.\n cards_of_suit.sort(key=sort_by_value)\n return cards_of_suit", "def create_deck():\n card_deck = []\n for x in range(6):\n for suit in ('H', 'S', 'C', 'D'):\n for rank in range(2, 11):\n card_deck.append((str(rank) + str(suit)))\n for face_cards in ('A', 'J', 'Q', 'K'):\n card_deck.append((str(face_cards) + str(suit)))\n\n random.shuffle(card_deck)\n return card_deck", "def card_values(hand):\n\n # Extract card values\n card_values = [value for (suit, value) in hand]\n\n # Convert special card names to values\n card_values = [10 if value in ('J', 'Q', 'K') else 1 if value == 'A' \\\n else value for value in card_values]\n\n return card_values", "def all_cards(self):\n for i in range(len(__class__.card_suits) * len(__class__.card_values)):\n suit = __class__.card_suits[i // len(__class__.card_values)]\n value = __class__.card_values[i % len(__class__.card_values)]\n yield __class__(suit=suit, value=value)", "def get_cards(self):\r\n return self.deck", "def init():\n\n deck,board,graveyard=[],[],[]\n temp=[]\n for i in elements.values(): # Saves all the possible combinations of elements,symbols,fillings and colors in deck list\n for j in symbols.values():\n for k in fillings.values():\n for l in colors.values():\n deck.append([i,j,k,l])\n\n shuffle(deck)\n \n for i in range(4): # Creates an array 4*3 for the board\n for j in range(3):\n temp.append(deck.pop(i)) \n board.append(temp)\n temp=[]\n \n \n return (deck,board,graveyard)", "def build_deck():\n\n deck = []\n suits = ['clubs', 'diamonds', 'hearts', 'spades']\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n for times in range(6):\n for s in suits:\n for r in ranks:\n deck.append(Card(s, r))\n\n # plastic_card = \" \"\n shuffle(deck)\n # deck.insert(-randint(60, 75), plastic_card) # minus to count from the last card\n\n return deck", "def __init__(self, num_decks=1):\n self._ranks = ['A'] + [str(n) for n in range(2, 11)] + ['J', 'Q', 'K']\n self._suits = ['spades', 'diamonds', 'clubs', 'hearts']\n self._cards = list()\n for deck in range(num_decks):\n # New decks of cards increase in rank from Ace (low) to King for two suits\n self._cards += [BjCard(suit, rank) for suit in self._suits for rank in self._ranks if suit in ['spades', 'diamonds']]\n # Then the ranks go in reverse for the other half of the deck\n self._cards += [BjCard(suit, rank) for suit in self._suits for rank in reversed(self._ranks) if suit in ['clubs', 'hearts']]\n self._logger = logging.getLogger('bj')\n self._logger.info(\"New deck of cards opened and spread\")", "def generate_deck(self):\n deck = []\n for suit in SUITS:\n for value in ORDER:\n deck.append(value + suit)\n\n self.cardsLeft = deck # Start keeping track of unplayed cards.\n\n random.shuffle(deck)\n \n self.h[0].add(deck[:10]) # Deal to hands ...\n self.h[1].add(deck[10:20]) #\n self.h[2].add(deck[20:30]) #\n self.kitty = deck[30:] # ... and to kitty.\n self.oldKitty = self.kitty", "def setUp(self):\n self.deck = create_deck()\n self.display_deck = [card['display'] for card in self.deck]\n self.value_deck = [card['value'] for card in self.deck]\n self.deck = shuffle_deck(self.deck)\n self.display_shuffled_deck = [card['display'] for card in self.deck]\n self.value_shuffled_deck = [card['value'] for card in self.deck]", "def build_deck(animals_list_of_list):\n deck = []\n for current_row in range(len(animals_list_of_list)):\n card = {'pow_lvl': int(animals_list_of_list[current_row][0]), 'name': animals_list_of_list[current_row][1],\n 'type': animals_list_of_list[current_row][2]}\n deck.append(card)\n\n return deck", "def test_build_deck(self):\n suits = [\"Clubs\", \"Spades\", \"Hearts\", \"Diamonds\"]\n\n self.deck.build_deck()\n res_list = self.deck.deck\n exp_list = []\n for suit in suits:\n for value in range(2, 15):\n exp_list.append(card.Card(suit, value))\n index = 0\n\n for i in exp_list:\n self.assertEqual(i.show(), res_list[index].show())\n index += 1\n\n exp = 52\n res = len(res_list)\n self.assertEqual(res, exp)", "def generate_deck() -> Deck:\n\n card_suites: List[str] = [\"spade\",\"heart\",\"clubs\",\"diamond\"]\n card_positions: List[str] = [\"ace\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"jack\",\"queen\",\"king\"]\n deck: Deck = deque(maxlen=52)\n\n for suite in card_suites:\n for position in card_positions:\n deck.append((suite, position))\n\n return deck" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw up to n unique cards from a deck with or without replacement. Randomly draw n unique cards from a standard deck until the desired number (n_cards) is reached.
def draw(n_cards, replacement=False): import random # If replacement is True, the same card can be picked multiple times if replacement: # Initialise hand to the empty list (no card picked yet) hand = [] # Append a random card to the hand while len(hand) < n_cards: hand.append((random.choice(suits), random.choice(cards))) else: # Initialise hand to the empty set (no card picked yet) hand = set() # Add n unique cards to the hand, if n is less than or equal to total # deck size (52) if n_cards > len(suits) * len(cards): raise ValueError('Not enough cards in the deck.') else: while len(hand) < n_cards: hand.add((random.choice(suits), random.choice(cards))) return hand
[ "def draw(self, n: int):\n draw_cards = self.cards[:n]\n self.cards[:n] = [] # remove cards drawn from deck\n return draw_cards", "def deal(self, n: int):\n d = Deck()\n d.shuffle()\n deal = tuple([] for _ in range(n))\n for _ in range(3):\n for i in range(n):\n card = d.pop()\n deal[i].append(card)\n return deal", "def draw_hunt_card(self, number_of_cards=1):\n for i in range(number_of_cards):\n card = random.choice(self.game.hunt_deck)\n move(card, self.game.hunt_deck, self.hhand)", "def deal(players, n=5, deck=mydeck):\n random.shuffle(deck)\n return [deck[n*player:n*(player+1)] for player in range(players)]", "def deal_cards(deck, number_of_cards = 1):\n cards = random.sample(deck, number_of_cards)\n for c in cards:\n deck.remove(c)\n return sorted(cards, reverse = True)", "def shuffle(deck):\n \n n = len(deck) - 1\n while n > 0:\n k = random.randint(0, n)\n deck[k], deck[n] = deck[n], deck[k]\n n -= 1\n\n return deck", "def make_deck():\n deck = 4 * valid_ranks\n random.shuffle(deck)\n return deck", "def deal_cards(self, num_cards=7):\n\n deal_cards = []\n for _ in range(num_cards):\n deal_cards.append(self.draw_card())\n\n return deal_cards", "def drawCards(self, count=1):\n\n cards = []\n while len(cards) < count:\n try:\n card = self.activeDeck.pop()\n except IndexError:\n print \"Not enough cards! Deck is empty\"\n return None\n self.usedDeck.append(card)\n cards.append(card)\n return cards", "def build_deck():\n\n deck = []\n suits = ['clubs', 'diamonds', 'hearts', 'spades']\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n for times in range(6):\n for s in suits:\n for r in ranks:\n deck.append(Card(s, r))\n\n # plastic_card = \" \"\n shuffle(deck)\n # deck.insert(-randint(60, 75), plastic_card) # minus to count from the last card\n\n return deck", "def get_random_cards(cards, amount=10):\n if len(cards) < amount:\n print(\"There are not enough cards to display {a} cards. Displaying {l} cards instead.\".format(a=amount, l=len(cards)))\n return cards\n else:\n print(\"Selecting {a} cards from card list of {l}.\".format(a=amount, l=len(cards)))\n return random.sample(cards, amount)", "def draw(self, game, no_of_cards):\r\n for _ in range(no_of_cards):\r\n # DONE: make shoe keep track of drawn card\r\n # So if needed, we can go through the list of drawn card\r\n # and calculate running_count if we want\r\n self.cards.append(game.shoe.deal(game))", "def new_deck(n_sets=4):\n card_set = [Card(rank, suit) for _ in range(n_sets) for rank in Rank for suit in Suit]\n return Deck(card_set)", "def hands(n_cards, k_hands, replacement=False):\n\n # For each of the k hands draw n cards (with or without replacement) and\n # compute their values\n if replacement:\n hands = [card_values(draw(n_cards, True)) for hand in range(k_hands)]\n else:\n hands = [card_values(draw(n_cards)) for hand in range(k_hands)]\n\n return hands", "def draw_card(self) -> int:\n card_drawn_number = random.randint(0, self.num_cards - 1)\n card_tally = 0\n card_index = 0\n while True:\n card_tally += self.card_array[card_index]\n if card_drawn_number < card_tally:\n self.num_cards -= 1\n self.card_array[card_index] -= 1\n if self.num_cards <= self.min_cards:\n self.reshuffle()\n return card_index + 1\n card_index += 1", "def draw_uniform_sample(choices: List[T], n: int) -> List[T]:\n return random.default_rng().choice(a=choices, size=n)", "def draw_sample(sample_size, n):\n sample = set()\n i = 0\n while i < sample_size:\n sample.add(draw_element(n))\n if len(sample) < 1 + 1:\n continue\n i += 1\n return sample", "def get_cards():\n return random.randint(1, 10)", "def draw(self, count=1):\n if count > len(self.cards):\n raise IndexError('not enough cards to draw')\n drawn = list(reversed(self.cards[-count:]))\n self.cards = self.cards[:-count]\n return drawn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract card values from drawn cards. Extract values out of all cards in the hand. Assign numerical value to
def card_values(hand): # Extract card values card_values = [value for (suit, value) in hand] # Convert special card names to values card_values = [10 if value in ('J', 'Q', 'K') else 1 if value == 'A' \ else value for value in card_values] return card_values
[ "def values(self):\n val = 0\n aces = 0\n for card in self.cards:\n if cardvals[card.val] == 1:\n aces+=1\n else:\n val += cardvals[card.val]\n\n handval = [val]\n\n for ace in range(aces):\n newhandval = []\n for i in handval:\n if newhandval.count(i+1) == 0:\n newhandval.append(i + 1)\n if not newhandval.count(i+1) == 0:\n newhandval.append(i + 11)\n handval = newhandval\n\n return handval", "def get_value(cards):\n result = 0\n aces = 0\n for c in cards:\n result += c.value\n if c.rank == \"Ace\":\n aces += 1\n while result > 21 and aces > 0:\n result -= 10\n aces -= 1\n return result", "def hand_score(cards):\n total = 0\n for card in cards:\n card = value(card)\n total += card\n return total", "def get_hand_values(self):\n values = list()\n has_ace = False\n hand_value = 0\n for c in self.hand:\n if isinstance(c, card.Card):\n if c.value is 1:\n has_ace = True\n else:\n hand_value += c.value\n if has_ace: # append the current hand value with the two values of an Ace\n values.append(hand_value + 1)\n values.append(hand_value + 11)\n else:\n values.append(hand_value)\n return values", "def get_card_value(self, index):\n return VALUES[self._hand[index].get_rank()]", "def get_value_test():\n c1 = Card(\"S\", \"A\")\n c2 = Card(\"C\", \"2\")\n c3 = Card(\"D\", \"T\")\n c4 = Card(\"S\", \"K\")\n c5 = Card(\"C\", \"7\")\n c6 = Card(\"D\", \"A\")\n\n test_hand = Hand()\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c2)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c5)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c3)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c4)\n print test_hand\n print test_hand.get_value()\n\n test_hand = Hand()\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c1)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c6)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c4)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c5)\n print test_hand\n print test_hand.get_value()\n\n test_hand.add_card(c3)\n print test_hand\n print test_hand.get_value()\n\n print\n print \".\"*20\n print \"Expected Output:\"\n output = '''\n Hand contains \n 0\n Hand contains C2 \n 2\n Hand contains C2 C7 \n 9\n Hand contains C2 C7 DT \n 19\n Hand contains C2 C7 DT SK \n 29\n Hand contains \n 0\n Hand contains SA \n 11\n Hand contains SA DA \n 12\n Hand contains SA DA SK \n 12\n Hand contains SA DA SK C7 \n 19\n Hand contains SA DA SK C7 DT \n 29\n '''\n print output\n print \".\"*20\n print", "def get_value(self):\n hand_value=0\n is_ace=False\n for i in self.hand_list:\n hand_value+=VALUES[list(i)[1]]\n #check the hand for aces\n if list(i)[1]=='A':\n is_ace=True\n #if aces are in the hand and the value is below 12 add 10 \n if is_ace and hand_value<=11:\n hand_value+=10\n return hand_value", "def value(card):\n val = card[0]\n if val == \"A\":\n val = 11\n elif val == \"K\":\n val = 10\n elif val == \"Q\":\n val = 10\n elif val == \"J\":\n val = 10\n elif val == \"0\":\n val = 10\n else:\n val = int(val)\n return val", "def _get_card_score(card):\n suit_score = SUIT_SCORING[card[0]]\n try:\n value_score = int(card[1])\n except ValueError:\n value_score = NAN_VALUES_SCORING[card[1]]\n return suit_score * value_score", "def _get_card_value(rank: str, suit: str) -> Tuple[int, str]:\n try:\n return int(rank), suit\n except ValueError:\n return CARD_MAPPED_VALUES.get(rank), suit", "def get_value(self):\n if self.card:\n return self.card.get_value()\n return 0", "def get_card_value(card_label):\r\n #numbers are converted to integers and and ace is assigned to 1 the rest (face cards) are assigned 10\r\n if card_label == '2' or card_label == '3' or card_label == '4' or card_label == '5' or card_label == '6' or card_label == '7' or card_label == '8' or card_label == '9' or card_label == '10':\r\n return int(card_label)\r\n elif card_label == 'A':\r\n return int(1)\r\n else:\r\n return int(10)", "def sum_hand(self, hand):\n total = 0\n for card in hand:\n if \"Ace\" in card:\n if total + 11 > 21:\n total += 1\n else:\n total += 11\n else:\n total += self.deck.deck[card]['value']\n return total", "def value_hand(hand):\n assert len(hand) == 5\n score = 0\n hand = sorted(hand)\n suits_in_hand = set(card.suit for card in hand)\n ranks_in_hand = set(card.rank for card in hand)\n\n def is_straight():\n if len(ranks_in_hand) < 5:\n return False\n max_rank_value, min_rank_value = hand[-1].rank.value, hand[0].rank.value\n if {Rank.TWO, Rank.ACE} < ranks_in_hand:\n max_rank_value, min_rank_value = hand[-2].rank.value, Rank.TWO.value - 1\n return len(ranks_in_hand) == 5 and max_rank_value - min_rank_value == 4\n\n # Check pair hands (pair, full house, ...)\n counter = Counter(card.rank for card in hand)\n RankCount = namedtuple('RankCount', 'rank, count')\n rank_counts = [RankCount(e[0], e[1]) for e in counter.most_common()]\n if rank_counts[0].count == 3 and rank_counts[1].count == 2: # Full house\n score = value_hand.FULL\\\n + value_hand.K * rank_counts[0].rank.value\\\n + rank_counts[1].rank.value\n elif rank_counts[0].count == 2 and rank_counts[1].count == 2: # Two pair\n high_pair_value = max(rank_counts[0].rank.value, rank_counts[1].rank.value)\n low_pair_value = min(rank_counts[0].rank.value, rank_counts[1].rank.value)\n score = value_hand.TWO_PAIR\\\n + value_hand.K**2 * high_pair_value\\\n + value_hand.K * low_pair_value\\\n + rank_counts[2].rank.value\n elif rank_counts[0].count == 3 and rank_counts[1].count == 1: # Three of a kind\n high_kicker_value = max(rank_counts[1].rank.value, rank_counts[2].rank.value)\n low_kicker_value = min(rank_counts[1].rank.value, rank_counts[2].rank.value)\n score = value_hand.SET\\\n + value_hand.K**2 * rank_counts[0].rank.value\\\n + value_hand.K * high_kicker_value\\\n + low_kicker_value\n elif rank_counts[0].count == 2 and rank_counts[1].count == 1: # One pair\n kickers = sorted(ranks_in_hand - {rank_counts[0].rank}, reverse=True)\n score = value_hand.PAIR\\\n + value_hand.K**3 * rank_counts[0].rank.value\\\n + value_hand.K**2 * kickers[0].value\\\n + value_hand.K * kickers[1].value\\\n + kickers[2].value\n elif rank_counts[0].count == 4 and rank_counts[1].count == 1: # Four of a kind\n score = value_hand.FOUR\\\n + value_hand.K * rank_counts[0].rank.value\\\n + rank_counts[1].rank.value\n # Check for flush\n if len(suits_in_hand) == 1:\n score = max(value_hand.FLUSH, score)\n # Check for straight_flush\n if is_straight():\n score = value_hand.STRAIGHT_FLUSH\n # Check for royal flush\n if min(ranks_in_hand) is Rank.TEN:\n score = value_hand.ROYAL\n else:\n score += Rank.FIVE.value if {Rank.TWO, Rank.ACE} < ranks_in_hand else max(ranks_in_hand).value\n else:\n score += sum(14**i * hand[i].rank.value for i in range(len(hand)))\n elif is_straight(): # Check for straight\n score = value_hand.STRAIGHT\\\n + (Rank.FIVE.value if {Rank.TWO, Rank.ACE} < ranks_in_hand else max(ranks_in_hand).value)\n elif score < value_hand.PAIR:\n # High card is best hand\n score = sum(14**i * hand[i].rank.value for i in range(len(hand)))\n return score", "def _get_hand_values(self, hand, values):\n return [values[i-1] for i in hand]", "def dealer_cards():\n card4 = draw_card()\n card5 = draw_card()\n draw_card()\n print(\"The dealer drew a\", card4)\n draw_card()\n print(\"The dealer also drew a\", card5)\n print(\"The dealers total is\", card4 + card5)\n return card4 + card5", "def hand_ranking(five_cards):\n cards_val = []\n cards_col = []\n for card in five_cards:\n cards_val.append((card % 13) + 2)\n cards_col.append(card // 13)\n if cards_col == [cards_col[0]] * 5:\n flush = True\n else:\n flush = False\n\n # Start checking for hand's value\n\n if flush and sorted(cards_val) == list(range(min(cards_val), max(cards_val) + 1)):\n return [8, max(cards_val)] # straight flush\n\n elif flush and sorted(cards_val) == [2, 3, 4, 5, 14]:\n return [8, 5] # straight flush of A,2,3,4,5\n\n elif len(set(cards_val)) == 2:\n for val in set(cards_val):\n if cards_val.count(val) == 4:\n one = max(set(cards_val) - {val})\n return [7, val, one] # four of a kind\n elif cards_val.count(val) == 3:\n two = max(set(cards_val) - {val})\n return [6, val, two] # full house\n\n elif flush:\n return [5] + sorted(cards_val, reverse=True) # flush\n\n elif sorted(cards_val) == list(range(min(cards_val), max(cards_val) + 1)):\n return [4, max(cards_val)] # straight\n\n elif sorted(cards_val) == [2, 3, 4, 5, 14]:\n return [4, 5] # straight of A,2,3,4,5\n\n elif len(set(cards_val)) == 3:\n two = set()\n for val in set(cards_val):\n if cards_val.count(val) == 3:\n one = sorted(set(cards_val) - {val}, reverse=True)\n return [3, val] + one # three of a kind\n elif cards_val.count(val) == 2:\n two.add(val)\n return [2] + sorted(two, reverse=True) + list(set(cards_val) - two) # two pairs\n\n elif len(set(cards_val)) == 4:\n for val in set(cards_val):\n if cards_val.count(val) == 2:\n return [1, val] + sorted(set(cards_val) - {val}, reverse=True) # one pair\n\n else:\n return [0] + sorted(cards_val, reverse=True) # high card", "def cardToNumber(self, card):\n if card[0] == \"A\": # does the opposite of the function above basically\n number = 1\n elif card[0] == \"J\":\n number = 11\n elif card[0] == \"Q\":\n number = 12\n elif card[0] == \"K\":\n number = 13\n else:\n number = int(card[0]) # makes the value of the main card then adds it by the suit number\n suitIndex = 1\n if card[1] == \"0\":\n number = 10\n suitIndex = 2\n if card[suitIndex] == \"D\":\n suit = 0\n elif card[suitIndex] == \"H\":\n suit = 13\n elif card[suitIndex] == \"C\":\n suit = 26\n elif card[suitIndex] == \"S\":\n suit = 39\n cardNumber = number + suit\n return cardNumber", "def rank_card(card):\n return RANKS[card[0]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw n cards with or without replacement for each of k hands. Randomly draw n cards from the deck until the desired number is reached. Repeat the step k times to obtain k distinct hands. Return already converted card values. If 'replacement' is omitted or False, the cards are drawn
def hands(n_cards, k_hands, replacement=False): # For each of the k hands draw n cards (with or without replacement) and # compute their values if replacement: hands = [card_values(draw(n_cards, True)) for hand in range(k_hands)] else: hands = [card_values(draw(n_cards)) for hand in range(k_hands)] return hands
[ "def draw(n_cards, replacement=False):\n import random\n\n # If replacement is True, the same card can be picked multiple times\n if replacement:\n\n # Initialise hand to the empty list (no card picked yet)\n hand = []\n\n # Append a random card to the hand\n while len(hand) < n_cards:\n hand.append((random.choice(suits), random.choice(cards)))\n\n else:\n\n # Initialise hand to the empty set (no card picked yet)\n hand = set()\n\n # Add n unique cards to the hand, if n is less than or equal to total\n # deck size (52)\n if n_cards > len(suits) * len(cards):\n raise ValueError('Not enough cards in the deck.')\n else:\n while len(hand) < n_cards:\n hand.add((random.choice(suits), random.choice(cards)))\n\n return hand", "def make_deck():\n deck = 4 * valid_ranks\n random.shuffle(deck)\n return deck", "def draw(self, n: int):\n draw_cards = self.cards[:n]\n self.cards[:n] = [] # remove cards drawn from deck\n return draw_cards", "def build_deck():\n\n deck = []\n suits = ['clubs', 'diamonds', 'hearts', 'spades']\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n for times in range(6):\n for s in suits:\n for r in ranks:\n deck.append(Card(s, r))\n\n # plastic_card = \" \"\n shuffle(deck)\n # deck.insert(-randint(60, 75), plastic_card) # minus to count from the last card\n\n return deck", "def dealer_action(cards, deck):\n while cards_value(cards) < 16:\n cards = draw(cards, deck)\n return cards", "def choose_k_rand(k, max):\n rand = np.zeros((k,))\n for i in np.arange(k):\n rand[i] = np.random.randint(max)\n j = 0\n while j != i:\n if rand[i] == rand[j]:\n rand[i] = np.random.randint(max)\n j = 0\n j += 1\n return rand.astype(dtype=\"int16\")", "def gen(k):\n n = k * (k - 1) + 1\n\n cards = []\n # First do the first set\n for i in xrange(k):\n start = i*(k-1) + 1\n end = (i+1)*(k-1) + 1\n guys = (0,) + tuple(range(start, end))\n cards.append(guys)\n\n for block in xrange(1, k):\n for row in xrange(1, k):\n guys = (block,)\n for colblock in xrange(1, k):\n pad = (colblock-1)*(block-1)\n loc = (pad + (row-1)) % (k-1)\n offset = (colblock * (k-1)) + 1\n guys += ( loc + offset , )\n cards.append(guys)\n\n return cards", "def generate_deck(self):\n deck = []\n for suit in SUITS:\n for value in ORDER:\n deck.append(value + suit)\n\n self.cardsLeft = deck # Start keeping track of unplayed cards.\n\n random.shuffle(deck)\n \n self.h[0].add(deck[:10]) # Deal to hands ...\n self.h[1].add(deck[10:20]) #\n self.h[2].add(deck[20:30]) #\n self.kitty = deck[30:] # ... and to kitty.\n self.oldKitty = self.kitty", "def deal(self, n: int):\n d = Deck()\n d.shuffle()\n deal = tuple([] for _ in range(n))\n for _ in range(3):\n for i in range(n):\n card = d.pop()\n deal[i].append(card)\n return deal", "def create_deck():\n card_deck = []\n for x in range(6):\n for suit in ('H', 'S', 'C', 'D'):\n for rank in range(2, 11):\n card_deck.append((str(rank) + str(suit)))\n for face_cards in ('A', 'J', 'Q', 'K'):\n card_deck.append((str(face_cards) + str(suit)))\n\n random.shuffle(card_deck)\n return card_deck", "def shuffle(deck):\n \n n = len(deck) - 1\n while n > 0:\n k = random.randint(0, n)\n deck[k], deck[n] = deck[n], deck[k]\n n -= 1\n\n return deck", "def deal(players, n=5, deck=mydeck):\n random.shuffle(deck)\n return [deck[n*player:n*(player+1)] for player in range(players)]", "def _generate_random(self, k):\n # Tracks how many times each wizard has been used as a\n # right-hand variable in a constraint, in order to enforce\n # the heuristic that repeated use makes the problem easier.\n if k < self.num_wizards // 3:\n # We can't generate k constraints that mention all magicians\n print(\"We reached an invalid value for k given n.\")\n return []\n\n selected_count_to_wizard_list = {\n i: list() for i in range(1, max((k // self.num_wizards), self.num_wizards) + 2)\n }\n selected_count_to_wizard_list[0] = [\n self.wizards[i] for i in range(self.num_wizards)\n ]\n current_level = 0\n constraints = []\n for i in range(k):\n # Pick a target wizard for our constraint. Selection should be\n # uniformly random from the lowest possible selection level.\n selection_level_target_index = random.randint(\n 0,\n len(selected_count_to_wizard_list[current_level]) - 1,\n )\n target = selected_count_to_wizard_list[current_level][selection_level_target_index]\n target_index = self.wizards.index(target)\n selected_count_to_wizard_list[current_level].pop(\n selection_level_target_index\n )\n selected_count_to_wizard_list[current_level + 1].append(target)\n if not selected_count_to_wizard_list[current_level]:\n current_level += 1\n\n # Pick two other wizards for the constraint. Can be a random\n # selection of any two that satisfy the following criteria:\n # 1: The two wizards are not the same and are not TARGET\n # 2: The two wizards are both from the SAME side of TARGET\n # 3: The two wizards are chosen from the larger free side of TARGET\n selection_range = [0, self.num_wizards - 1]\n if target_index < self.num_wizards / 2:\n selection_range[0] = target_index + 1\n else:\n selection_range[1] = target_index - 1\n first, second = None, None\n while first == second:\n first = random.randint(*selection_range)\n second = random.randint(*selection_range)\n first, second = self.wizards[first], self.wizards[second]\n\n constraints.append([first, second, target])\n\n return constraints", "def new_deck(n_sets=4):\n card_set = [Card(rank, suit) for _ in range(n_sets) for rank in Rank for suit in Suit]\n return Deck(card_set)", "def choosecards(deck, number, remove=False):\n\n random.seed()\n selected = []\n while len(selected) < number:\n debugp (\"We have %d cards so far.\" % len(selected))\n newcard = random.choice(deck)\n if not newcard in selected:\n debugp(\"Adding to set: \")\n debugp(newcard.getprops())\n selected.append(newcard)\n if remove:\n deck.remove(newcard)\n else:\n debugp (\"******** Drew duplicate:\")\n debugp (newcard.getprops())\n sys.exit\n pass\n debugp (\"We have %d cards.\" % len(selected))\n return selected", "def deal_cards(deck, number_of_cards = 1):\n cards = random.sample(deck, number_of_cards)\n for c in cards:\n deck.remove(c)\n return sorted(cards, reverse = True)", "def draw_hunt_card(self, number_of_cards=1):\n for i in range(number_of_cards):\n card = random.choice(self.game.hunt_deck)\n move(card, self.game.hunt_deck, self.hhand)", "def deal_cards(self, num_cards=7):\n\n deal_cards = []\n for _ in range(num_cards):\n deal_cards.append(self.draw_card())\n\n return deal_cards", "def simulate():\n\tnp.random.seed(42)\n\tmask = np.asarray([1,2,3]*ceil(N/3))[:N]\n\twon, lost = 0, 0\n\tfor i in range(10**6):\n\t\tdeck = np.asarray(list(range(1,int(N/KINDS)+1))*4)\n\t\tnp.random.shuffle(deck)\n\t\tres = not any(deck == mask)\n\t\tif res:\twon += 1\n\t\telse:\tlost += 1\n\t\t\n\t\tif not i%10**4:\n\t\t\tp_eval = won/(won+lost)\n\t\t\tprint(f\">>> Simulated win probability with {i} games: {100*p_eval:.4f}%\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sum card values for each of the k hands. Return the sum of the card values, for each of the k hands provided.
def sum_hands(hands): # Give me the sum, for each of the hands provided sum_hands = [sum(hand) for hand in hands] return sum_hands
[ "def sum_hand(self, hand):\n total = 0\n for card in hand:\n if \"Ace\" in card:\n if total + 11 > 21:\n total += 1\n else:\n total += 11\n else:\n total += self.deck.deck[card]['value']\n return total", "def hand_score(cards):\n total = 0\n for card in cards:\n card = value(card)\n total += card\n return total", "def hands(n_cards, k_hands, replacement=False):\n\n # For each of the k hands draw n cards (with or without replacement) and\n # compute their values\n if replacement:\n hands = [card_values(draw(n_cards, True)) for hand in range(k_hands)]\n else:\n hands = [card_values(draw(n_cards)) for hand in range(k_hands)]\n\n return hands", "def values(self):\n val = 0\n aces = 0\n for card in self.cards:\n if cardvals[card.val] == 1:\n aces+=1\n else:\n val += cardvals[card.val]\n\n handval = [val]\n\n for ace in range(aces):\n newhandval = []\n for i in handval:\n if newhandval.count(i+1) == 0:\n newhandval.append(i + 1)\n if not newhandval.count(i+1) == 0:\n newhandval.append(i + 11)\n handval = newhandval\n\n return handval", "def value_hand(hand):\n assert len(hand) == 5\n score = 0\n hand = sorted(hand)\n suits_in_hand = set(card.suit for card in hand)\n ranks_in_hand = set(card.rank for card in hand)\n\n def is_straight():\n if len(ranks_in_hand) < 5:\n return False\n max_rank_value, min_rank_value = hand[-1].rank.value, hand[0].rank.value\n if {Rank.TWO, Rank.ACE} < ranks_in_hand:\n max_rank_value, min_rank_value = hand[-2].rank.value, Rank.TWO.value - 1\n return len(ranks_in_hand) == 5 and max_rank_value - min_rank_value == 4\n\n # Check pair hands (pair, full house, ...)\n counter = Counter(card.rank for card in hand)\n RankCount = namedtuple('RankCount', 'rank, count')\n rank_counts = [RankCount(e[0], e[1]) for e in counter.most_common()]\n if rank_counts[0].count == 3 and rank_counts[1].count == 2: # Full house\n score = value_hand.FULL\\\n + value_hand.K * rank_counts[0].rank.value\\\n + rank_counts[1].rank.value\n elif rank_counts[0].count == 2 and rank_counts[1].count == 2: # Two pair\n high_pair_value = max(rank_counts[0].rank.value, rank_counts[1].rank.value)\n low_pair_value = min(rank_counts[0].rank.value, rank_counts[1].rank.value)\n score = value_hand.TWO_PAIR\\\n + value_hand.K**2 * high_pair_value\\\n + value_hand.K * low_pair_value\\\n + rank_counts[2].rank.value\n elif rank_counts[0].count == 3 and rank_counts[1].count == 1: # Three of a kind\n high_kicker_value = max(rank_counts[1].rank.value, rank_counts[2].rank.value)\n low_kicker_value = min(rank_counts[1].rank.value, rank_counts[2].rank.value)\n score = value_hand.SET\\\n + value_hand.K**2 * rank_counts[0].rank.value\\\n + value_hand.K * high_kicker_value\\\n + low_kicker_value\n elif rank_counts[0].count == 2 and rank_counts[1].count == 1: # One pair\n kickers = sorted(ranks_in_hand - {rank_counts[0].rank}, reverse=True)\n score = value_hand.PAIR\\\n + value_hand.K**3 * rank_counts[0].rank.value\\\n + value_hand.K**2 * kickers[0].value\\\n + value_hand.K * kickers[1].value\\\n + kickers[2].value\n elif rank_counts[0].count == 4 and rank_counts[1].count == 1: # Four of a kind\n score = value_hand.FOUR\\\n + value_hand.K * rank_counts[0].rank.value\\\n + rank_counts[1].rank.value\n # Check for flush\n if len(suits_in_hand) == 1:\n score = max(value_hand.FLUSH, score)\n # Check for straight_flush\n if is_straight():\n score = value_hand.STRAIGHT_FLUSH\n # Check for royal flush\n if min(ranks_in_hand) is Rank.TEN:\n score = value_hand.ROYAL\n else:\n score += Rank.FIVE.value if {Rank.TWO, Rank.ACE} < ranks_in_hand else max(ranks_in_hand).value\n else:\n score += sum(14**i * hand[i].rank.value for i in range(len(hand)))\n elif is_straight(): # Check for straight\n score = value_hand.STRAIGHT\\\n + (Rank.FIVE.value if {Rank.TWO, Rank.ACE} < ranks_in_hand else max(ranks_in_hand).value)\n elif score < value_hand.PAIR:\n # High card is best hand\n score = sum(14**i * hand[i].rank.value for i in range(len(hand)))\n return score", "def total_cards(first, second, third):\n cards_list = []\n cards_list.append(deck_of_cards[first])\n cards_list.append(deck_of_cards[second])\n cards_list.append(deck_of_cards[third])\n\n return sum(cards_list)", "def iqm_sum(self, key):\n data = self.data[key]\n deck = sorted(data[\"deck\"])\n none_action = self.none_action\n if data[\"deck_count\"] == 0:\n return\n # Handle None values\n if None in deck:\n none_count = deck.count(None)\n while None in deck:\n deck.remove(None)\n if none_action in (\"max\", \"min\"):\n if none_action is \"max\":\n num_max = deck[-1]\n deck = deck + ([num_max] * none_count)\n else: # min\n num_min = deck[0]\n deck = ([num_min] * none_count) + deck\n else:\n data[\"deck_count\"] -= none_count\n deck.sort()\n # Return average if there are too few numbers to quartile\n if data[\"deck_count\"] < 4:\n iqm = float(sum(deck) / data[\"deck_count\"])\n else:\n # determine quartile (point that divides the deck into four\n # groups)\n quartile = int(0.25 * data[\"deck_count\"])\n # discard the lowest 25% and highest 25%\n deck = deck[quartile:-quartile]\n # mean of the interquartile range\n iqm = sum(deck) / len(deck)\n data[\"iqm_sum\"] += iqm\n data[\"iqm_count\"] += 1\n data[\"deck_count\"] = 0", "def count_ways_summing_to_k(nums, k):\n count = 0\n for n in nums:\n diff = k - n\n if diff == 0:\n count += 1\n elif diff > 0:\n if diff in dp:\n count += dp[diff]\n else:\n count += count_ways_summing_to_k(nums, diff)\n dp[k] = count\n return count", "def sum(self, values):\n return sum(values)", "def card_values(hand):\n\n # Extract card values\n card_values = [value for (suit, value) in hand]\n\n # Convert special card names to values\n card_values = [10 if value in ('J', 'Q', 'K') else 1 if value == 'A' \\\n else value for value in card_values]\n\n return card_values", "def get_hand_values(self):\n values = list()\n has_ace = False\n hand_value = 0\n for c in self.hand:\n if isinstance(c, card.Card):\n if c.value is 1:\n has_ace = True\n else:\n hand_value += c.value\n if has_ace: # append the current hand value with the two values of an Ace\n values.append(hand_value + 1)\n values.append(hand_value + 11)\n else:\n values.append(hand_value)\n return values", "def selective_sum(n, k):\n n = str(n)\n sum =0\n count = 0\n for i in (sorted(n)[::-1]):\n count += 1\n if count <= k :\n sum += int(i)\n else:\n break\n return sum", "def maxScore(self, cardPoints: list[int], k: int) -> int:\n maxLen = len(cardPoints) - k\n minSum = float('inf')\n start = currSum = 0\n for end, p in enumerate(cardPoints):\n currSum += p\n\n if end - start + 1 > maxLen:\n currSum -= cardPoints[start]\n start += 1\n\n if end - start + 1 == maxLen:\n minSum = min(minSum, currSum)\n\n return sum(cardPoints) - minSum", "def get_value(cards):\n result = 0\n aces = 0\n for c in cards:\n result += c.value\n if c.rank == \"Ace\":\n aces += 1\n while result > 21 and aces > 0:\n result -= 10\n aces -= 1\n return result", "def _stat_count(self, k):\n # only evaluate that part of the RP that can be covered without allowing for \n # 'edge boxes' with different size\n nmod = int(self.array.shape[0]//k*k)\n array = self.array[:nmod, :nmod]\n # box-counting (src: https://github.com/rougier/numpy-100 (#87))\n S = np.add.reduceat(np.add.reduceat(array, np.arange(0, array.shape[0], k), axis=0, dtype=int),\n np.arange(0, array.shape[1], k), axis=1, dtype=int)\n self.S = S", "def sum_values(metric_out):\n total = 0\n\n for rank_to_dict in metric_out.values():\n for target_to_val in rank_to_dict.values():\n for value in target_to_val.values():\n total += value\n\n return total", "def sum(self):\n return sum(sum(r) for r in self.data)", "def two_numbers_sum_to_k_single_pass(numbers, k):\n\n # Some basic sanity checking\n if len(numbers) < 2:\n print(\"{}: Not enough numbers!\".format(sys._getframe().f_code.co_name))\n return False\n\n # Loop through each index in numbers\n for i in range(len(numbers) - 1):\n\n # Form a new list excluding the entry at index i and see if there are\n # any entries equal to k-numbers[i]\n if((numbers[:i] + numbers[i + 1:]).count(k - numbers[i])):\n print('{}: {} and {} sum to {}'.format(\n sys._getframe().f_code.co_name,\n numbers[i],\n k - numbers[i],\n k)\n )\n\n return True\n\n else:\n print(\"{}: No numbers found to sum to {}\".format(\n sys._getframe().f_code.co_name,\n k)\n )\n\n return False", "def _glide_count(self, k, sym):\n # only evaluate that part of the RP that can be covered without allowing for \n # 'edge boxes' with different size\n nmod = int(self.array.shape[0]//k*k)\n array = self.array[:nmod, :nmod]\n T = array.shape[0]\n S = np.zeros((T, T), dtype=int)\n # should symmetry be used for saving computational time?\n # (e.g. not reasonable for RPs based on FAN)\n if sym:\n for i in range(T):\n for j in range(i,T):\n tmp_box = self.array[i:i+k, j:j+k]\n s = tmp_box.sum()\n S[i,j], S[j,i] = s, s\n else:\n for i in range(T):\n for j in range(T):\n tmp_box = self.array[i:i+k, j:j+k]\n S[i,j] = tmp_box.sum()\n self.S = S" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute Student's t cumulative distribution function (cdf). Compute Student's t cumulative distribution function, F(x) = P(X <= x). Compute 1 F(x)if upper = True.
def tcdf(x, m, s, n, upper=False): from scipy import stats # If upper is set to True, compute 1 - F(x); else, compute F(x) if upper: tcdf = 1 - stats.t.cdf(x, n - 1, m, s) print('P(X >= %s) = %.4f'%(x, tcdf)) else: tcdf = stats.t.cdf(x, n - 1, m, s) print('P(X <= %s) = %.4f'%(x, tcdf))
[ "def to_cdf(pdf):\n return np.cumsum(pdf)", "def _uniform_order_statistic_cdf(i, n, t):\r\n return betainc(i+1, n-i, t)", "def cdf(self, t, l):\n def _cdf(t):\n x = np.linspace((self.r-l)/self.c,t,100)\n return np.trapz(self.pdf(x,l),x)\n\n if np.iterable(t):\n return np.array([_cdf(x) for x in t])\n else:\n return _cdf(t)", "def compute_cdf(pdf):\n values = np.array([np.trapz(pdf.iloc[:x], pdf.index[:x]) for x in range(pdf.size)])\n cdf = pd.Series(values, index=pdf.index.values)\n return cdf", "def _NormalDistributionCDF(x, stddev):\n return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20)))", "def compute_sf(cdf):\n sf = 1 - cdf\n return sf", "def cdf(self, x) -> float:\n cdf_result = 0\n\n for distribution_amplitude, distribution in zip(self.distribution_amplitudes, self.distributions):\n cdf_result += (distribution_amplitude * distribution.cdf(x))\n\n return cdf_result", "def cdf(self, t):\n idx = np.searchsorted(self.T, t) # gives idx of first number bigger than t\n\n if idx==0: # extrapolation \n a = (self.F_T[1] - self.F_T[0])/(self.T[1] - self.T[0])\n res = self.F_T[0] - a * (self.T[0]-t) \n return max(0, res)\n\n if idx==len(self.T): # extrapolation \n a = (self.F_T[-1] - self.F_T[-2])/(self.T[-1] - self.T[-2])\n res = self.F_T[-1] + a * (t-self.T[-1]) \n return min(1, res)\n\n a = (t-self.T[idx-1])/(self.T[idx] - self.T[idx-1])\n return (1-a)*self.F_T[idx-1] + a*self.F_T[idx]", "def make_conditional_cdf(lb, ub, plb, pub):\n pr = pub - plb\n def actual_decorator(fzcdf):\n def wrapper(x):\n result = (fzcdf(np.maximum(lb, np.minimum(x, ub))) - plb) / pr\n return result\n return wrapper\n\n return actual_decorator", "def cdf(x, iterations=300):\r\n product = 1.0\r\n taylor_exp = [x]\r\n for i in range(3, iterations, 2):\r\n product *= i\r\n taylor_exp.append(float(x**i)/product)\r\n taylor_fact = sum(taylor_exp)\r\n\r\n return (0.5 + (taylor_fact * std_normal_pdf.pdf(x, mean=0, std_dev=1)))", "def cdf(self, value):\n self._cdf = self._validate_cdf(value)", "def pmf_to_cdf(pmf):\r\n return np.cumsum(pmf)", "def samples_cdf(self):\n if not hasattr(self, '_samples_cdf'):\n self._samples_cdf = self.evaluate(sorted(self.samples))\n return self._samples_cdf", "def approx_gaussian_cdf(x):\n factor = np.sqrt(2.0 / np.pi)\n y = factor * (x + 0.044715 * torch.pow(x, 3))\n phi = 0.5 * (1 + torch.tanh(y))\n return phi", "def standard_normal_cdf(x: Union[int, float, list, np.ndarray]) -> Union[float, list, np.ndarray]:\n\n # Check\n check_type_x(x)\n x = initialize_input(x)\n\n # Compute\n cdf = (1/2) * ( 1 + erf(x / np.sqrt(2)) )\n\n # Return\n if len(cdf)==1:\n return cdf[0]\n else:\n return cdf", "def cdf(self, x) -> float:\n if x < 0:\n return 0.\n\n if (0 <= x < 1) or (x is False):\n return 1 - self.proba_is_true\n\n if x >= 1 or (x is True):\n return 1.\n\n return 0.", "def cdf(self) -> xr.DataArray:\n if not self._is_memoized('_cdf'):\n # ecfd = sm.distributions.ECDF(self._ds)\n x = np.linspace(min(self._ds), max(self._ds))\n self._cdf = sm.distributions.ECDF(self._ds)(x)\n\n return self._cdf", "def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n # print(self.pmf(k))\n e = 2.7182818285\n const = (e ** (-1 * self.lambtha))\n return self.pmf(k) + self.cdf(k - 1)", "def empirical_cdf(data):\n\n vals = pd.Series(data).value_counts()\n ecdf = pd.DataFrame(data).set_index(keys=0)\n probs = pd.DataFrame(vals.sort_index().cumsum() / np.float(len(data)))\n ecdf = ecdf.join(probs)\n ecdf = ecdf.reset_index()\n ecdf.columns = ['data', 'ecdf']\n\n return ecdf" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces org token with html representation
def org(value): start_token = '<org>' end_token = '</org>' return value.replace(start_token,'<i class="organisation">').replace(end_token,'</i>&nbsp;<sup><i class="fa fa-briefcase"></i></sup>')
[ "def export_to_html(org_filename):\n if not org_filename.endswith(ORG_FILE_EXTENSION):\n raise Exception(\"Must provide an org-mode file.\")\n\n output_lines = []\n title, language, date, tags, author, description = \"\", \"\", \"\", \"\", \"\", \"\"\n with open(org_filename, 'r') as input:\n for line in input:\n if line.startswith(\"\\n\"):\n continue\n if line.startswith(TITLE):\n title = line.replace(TITLE, \"\").strip()\n continue\n if line.startswith(LANGUAGE):\n language = line.replace(LANGUAGE, \"\").strip()\n continue\n if line.startswith(DATE):\n date = line.replace(DATE, \"\").strip()\n continue\n if line.startswith(TAGS):\n tags = line.replace(TAGS, \"\").strip()\n continue\n if line.startswith(AUTHOR):\n author = line.replace(AUTHOR, \"\").strip()\n continue\n if line.startswith(DESCRIPTION):\n description = line.replace(DESCRIPTION, \"\").strip()\n continue\n output_lines.append(translate_to_html(line))\n\n output_lines = [HTML_HEADER.replace(\"TITLE\", title)\n .replace(\"LANGUAGE\", language)\n .replace(\"TAGS\", tags)\n .replace(\"DESCRIPTION\", description)\n .replace(\"AUTHOR\", author)] + output_lines\n output_lines.append(HTML_FOOTER)\n\n return bs(\"\".join(output_lines), \"html.parser\").prettify()", "def globalise(tag):\n GLOBE = u\"\\U0001F310\"\n if not tag.text_content().strip(): # delink if no text content\n tag.tag = \"span\"\n text = tag.text or \"\"\n tag.text = GLOBE + \" \" + text", "def to_html(tree):", "def _get_format_from_document(self, token: Any, document: Any) -> Any:\n # Modified by EKR.\n # These lines cause unbounded recursion.\n # code, html = next(self._formatter._format_lines([(token, u'dummy')]))\n # self._document.setHtml(html)\n return QtGui.QTextCursor(self._document).charFormat()", "def mask_token(self) -> str:", "def tech(value):\n start_token = '<tech>'\n end_token = '</tech>'\n return value.replace(start_token,'<i class=\"technology\">').replace(end_token,'</i>&nbsp;<sup><i class=\"fa fa-file-screen\"></i></sup>')", "def __handle_raw_html_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [\n output_html,\n \"<\",\n ParserHelper.resolve_all_from_text(next_token.raw_tag),\n \">\",\n ]\n )", "def orgname_clean(orgname):\n\n\tx = orgname.strip()\n\tx = re.sub('\\&\\#x\\d\\d\\;', '', x)\n\treturn x", "def replace(self, replaceWord): #$NON-NLS-1$\r", "def replace(matched):\n\n matched_str = matched.groups()[0]\n if match == '`':\n matched_str = cgi.escape(matched_str)\n return opener + matched_str + closer", "def make_token(self, org):\n return self._make_token_with_timestamp(org, self._num_days(self._today()))", "def url_to_tag(self, document, icon='<URL>'):\n\t\treturn re.sub(Patterns.URL, icon, document)", "def encode(token):\n return token.replace('\\\\', '\\\\\\\\').replace('/', '\\\\-')", "def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> &lt; </symbol>')\n elif value == '>':\n self.output(f'<symbol> &gt; </symbol>')\n elif value == '&':\n self.output(f'<symbol> &amp; </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')", "def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)", "def fix_translation(org_text, trans_text):\r\n pre_matcher = re.compile(r'(<pre>[^<]*</pre>)', re.DOTALL)\r\n trans_text = replace_all(trans_text, pre_matcher.findall(\r\n trans_text), pre_matcher.findall(org_text))\r\n code_matcher = re.compile(r'(<code>[^<]*</code>)', re.DOTALL)\r\n trans_text = replace_all(trans_text, code_matcher.findall(\r\n trans_text), code_matcher.findall(org_text))\r\n return trans_text", "def clean_content(self):\n\n transformations = {\n re.escape('<#{0.id}>'.format(channel)): '#' + channel.name\n for channel in self.channel_mentions\n }\n\n mention_transforms = {\n re.escape('<@{0.id}>'.format(member)): '@' + member.display_name\n for member in self.mentions\n }\n\n # add the <@!user_id> cases as well..\n second_mention_transforms = {\n re.escape('<@!{0.id}>'.format(member)): '@' + member.display_name\n for member in self.mentions\n }\n\n transformations.update(mention_transforms)\n transformations.update(second_mention_transforms)\n\n if self.server is not None:\n role_transforms = {\n re.escape('<@&{0.id}>'.format(role)): '@' + role.name\n for role in self.role_mentions\n }\n transformations.update(role_transforms)\n\n def repl(obj):\n return transformations.get(re.escape(obj.group(0)), '')\n\n pattern = re.compile('|'.join(transformations.keys()))\n result = pattern.sub(repl, self.content)\n\n transformations = {\n '@everyone': '@\\u200beveryone',\n '@here': '@\\u200bhere'\n }\n\n def repl2(obj):\n return transformations.get(obj.group(0), '')\n\n pattern = re.compile('|'.join(transformations.keys()))\n return pattern.sub(repl2, result)", "def edit_token(self):\n return self.wiki.meta.tokens()", "def check_organizations(self,words,entity_tag):\n idWord=0\n last_org=-1\n for word in words:\n if word.title():\n first_letter=word[0]\n if re.search('[ÑA-Z]', first_letter)!=None and re.compile(self.organizations[first_letter]).search(word):\n entity_tag[idWord]='ORG'\n last_org=idWord\n idWord+=1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces tech token with html representation
def tech(value): start_token = '<tech>' end_token = '</tech>' return value.replace(start_token,'<i class="technology">').replace(end_token,'</i>&nbsp;<sup><i class="fa fa-file-screen"></i></sup>')
[ "def clean_kspon(token: str) -> str:\n token = replace_number_token(token)\n token = remove_erroneous_tags(token)\n token = replace_double_space(token)\n return token", "def __handle_raw_html_token(cls, output_html, next_token, transform_state):\n _ = transform_state\n\n return \"\".join(\n [\n output_html,\n \"<\",\n ParserHelper.resolve_all_from_text(next_token.raw_tag),\n \">\",\n ]\n )", "def mask_token(self) -> str:", "def html_encode_django_chars(txt):\n txt = txt.replace(\"{\", \"&#123;\")\n txt = txt.replace(\"}\", \"&#125;\")\n txt = txt.replace(\"%\", \"&#37;\")\n return txt", "def escape_html(html):\n #boileeeeeeerplate\n return unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def globalise(tag):\n GLOBE = u\"\\U0001F310\"\n if not tag.text_content().strip(): # delink if no text content\n tag.tag = \"span\"\n text = tag.text or \"\"\n tag.text = GLOBE + \" \" + text", "def write_token(self, token):\n\n type = token.type\n value = token.value\n\n if type == 'keyword': # check for keyword\n self.output(f'<keyword> {value} </keyword>')\n elif type == 'symbol': # check for symbol\n #\"\"\" start xml formatting requirements for symbols \"\"\"\n if value == '<':\n self.output(f'<symbol> &lt; </symbol>')\n elif value == '>':\n self.output(f'<symbol> &gt; </symbol>')\n elif value == '&':\n self.output(f'<symbol> &amp; </symbol>')\n #\"\"\" end xml formatting requirements for symbols \"\"\"\n else:\n self.output(f'<symbol> {value} </symbol>')\n elif type == 'integer': # check for integer\n self.output(f'<integerConstant> {value} </integerConstant>')\n elif type == 'identifier': # check for indentifier\n self.output(f'<identifier> {value} </identifier>')\n elif type == 'string': # it's a string\n self.output(f'<stringConstant> {value} </stringConstant>')", "def replace_mustache_tag(self, html_source, tag, replacement_text, encode=False):\n if encode:\n return html_source.replace(tag, html.escape(replacement_text, quote=True))\n else:\n return html_source.replace(tag, replacement_text)", "def sanitize_token(self, token):\n token_type = token['type']\n if token_type in ['StartTag', 'EndTag', 'EmptyTag']:\n if token['name'] in self.allowed_elements:\n return self.allow_token(token)\n\n elif self.strip_disallowed_elements:\n pass\n\n else:\n if 'data' in token:\n # Alphabetize the attributes before calling .disallowed_token()\n # so that the resulting string is stable\n token['data'] = OrderedDict(\n [(key, val) for key, val in sorted(token['data'].items(), key=_attr_key)]\n )\n return self.disallowed_token(token)\n\n elif token_type == 'Comment':\n if not self.strip_html_comments:\n return token\n\n else:\n return token", "def escapeForContent(data):\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n data = data.replace(b'&', b'&amp;'\n ).replace(b'<', b'&lt;'\n ).replace(b'>', b'&gt;')\n return data", "def clean_entities(tag):\n return ENTITY_CHARS_RE.sub('', tag)", "def encode(token):\n return token.replace('\\\\', '\\\\\\\\').replace('/', '\\\\-')", "def clean_hashtag(hashtag):\n hashtag_unicode = hashtag.encode('ascii','ignore').decode('UTF-8')\n hashtag_control = hashtag_unicode.translate(controls)\n hashtag_escape = hashtag_control.translate(escapes)\n return hashtag_escape", "def redact_string(self, string: str) -> str:\n if not self._redact:\n return string\n\n return string.replace(self._host.api_token, \"[REDACTED_API_TOKEN]\")", "def _remove_debugger_token(self, content):\n if (not project_config.ProjectConfig.get_instance().is_launch_ide or\n not self.project_info.is_main_project):\n content = content.replace(_ENABLE_DEBUGGER_MODULE_TOKEN, '')\n return content", "def transform(self, actual_tokens):\n POGGER.debug(\"\\n\\n---\\n\")\n transform_state, output_html, actual_tokens_size = (\n TransformState(actual_tokens),\n \"\",\n len(actual_tokens),\n )\n for next_token in transform_state.actual_tokens:\n (\n transform_state.add_trailing_text,\n transform_state.add_leading_text,\n transform_state.next_token,\n ) = (None, None, None)\n if (transform_state.actual_token_index + 1) < actual_tokens_size:\n transform_state.next_token = actual_tokens[\n transform_state.actual_token_index + 1\n ]\n if next_token.token_name in self.start_token_handlers:\n start_handler_fn = self.start_token_handlers[next_token.token_name]\n output_html = start_handler_fn(output_html, next_token, transform_state)\n\n elif next_token.is_end_token:\n if next_token.type_name in self.end_token_handlers:\n end_handler_fn = self.end_token_handlers[next_token.type_name]\n output_html = end_handler_fn(\n output_html, next_token, transform_state\n )\n else:\n assert (\n False\n ), f\"Markdown token end type {next_token.type_name} not supported.\"\n else:\n assert False, f\"Markdown token type {type(next_token)} not supported.\"\n\n POGGER.debug(\"======\")\n POGGER.debug(\n \"add_trailing_text-->$<--\",\n transform_state.add_trailing_text,\n )\n POGGER.debug(\"add_leading_text -->$<--\", transform_state.add_leading_text)\n POGGER.debug(\"output_html -->$<--\", output_html)\n\n if transform_state.add_trailing_text:\n output_html = self.__apply_trailing_text(output_html, transform_state)\n\n if transform_state.add_leading_text:\n output_html = self.__apply_leading_text(output_html, transform_state)\n\n POGGER.debug(\"------\")\n POGGER.debug(\"next_token -->$<--\", next_token)\n POGGER.debug(\"output_html -->$<--\", output_html)\n POGGER.debug(\"transform_stack-->$<--\", transform_state.transform_stack)\n\n transform_state.last_token = next_token\n transform_state.actual_token_index += 1\n if output_html and output_html[-1] == ParserHelper.newline_character:\n output_html = output_html[:-1]\n POGGER.debug(\"output_html -->$<--\", output_html)\n return output_html", "def to_html(tree):", "def _replace_tags_in_text(self, notification, contact, appointment):\n messages = {\n \"sms\": notification.sms_message,\n \"mail\": notification.mail_message,\n \"chat\": notification.chat_message,\n \"mail_subject\": notification.mail_subject,\n }\n\n if appointment.slot is not None:\n public_description = appointment.slot.public_description\n else:\n public_description = None\n\n if appointment.advisor is not None:\n advisor = appointment.advisor.full_name\n else:\n advisor = \"\"\n\n tokens = {\n \"{contact}\": contact.full_name if contact else \"\", # NOQA: FS003\n \"{date_rdv}\": format_datetime_interval( # NOQA: FS003\n appointment.start_date, appointment.end_date\n ),\n \"{conseiller}\": advisor, # NOQA: FS003\n \"{structure}\": appointment.group.name or \"\", # NOQA: FS003\n \"{lieu}\": f\"{self._format_place(appointment.place)}\" # NOQA: FS003\n if appointment.place\n else \"\",\n \"{tel_structure}\": f\"{appointment.group.phone}\" # NOQA: FS003\n if appointment.group\n else \"\",\n \"{tel_lieu}\": f\"{appointment.place.phone}\" # NOQA: FS003\n if appointment.place\n else \"\", # NOQA: FS003\n \"{texte_lieu}\": appointment.place.presentation or \"\" # NOQA: FS003\n if appointment.place\n else \"\",\n \"{texte_motif}\": appointment.reason.description or \"\" # NOQA: FS003\n if appointment.reason\n else \"\",\n \"{texte_creneau}\": public_description or \"\", # NOQA: FS003\n \"{jitsi}\": f\"<a href=\\\"https://meet.jit.si/{contact.email.replace('@', '-')}\\\">https://meet.jit.si/{contact.email.replace('@', '-')}</a>\" # NOQA: FS003,E501\n if contact\n else \"\", # NOQA: FS003, E501\n }\n\n for media in messages:\n for token in tokens:\n try:\n messages[media] = (\n messages[media].replace(token, tokens[token]).strip()\n )\n except AttributeError:\n pass\n\n # we may want to strip for sms : messages[\"sms\"] = messages[\"sms\"][:160]\n if messages[\"mail\"] is not None:\n messages[\"mail_ascii\"] = self._strip_html_tags(messages[\"mail\"])\n else:\n messages[\"mail_ascii\"] = \"\"\n\n return messages", "def _fix_angle_brackets(htex):\n htex = re.sub(r\"<(\\s+)\", r\"&lt;\\1\", htex)\n htex = re.sub(r\"([^a-z\\\"\\/\\-]+)>\", r\"\\1&gt;\", htex)\n return htex" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the INDRA Statement corresponding to a given rule by name.
def _stmt_from_rule(model, rule_name, stmts): stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
[ "def get_rule(node_name: str):\n rule_name = get_rule_name(node_name)\n return Rule(rule_name)", "def get_replication_rule_by_name(self, name):\n LOG.info(\"Getting replication_rule details by name: '%s'\" % name)\n return self.rest_client.request(\n constants.GET,\n constants.REPLICATION_RULE_LIST_URL.format(self.server_ip),\n querystring=helpers.prepare_querystring(\n constants.SELECT_ID_AND_NAME,\n name=constants.EQUALS + name))", "def get_rule(connection, rule_id):\n connection.command_path = \"rule/{0}\".format(rule_id)\n extra_headers = {connection.header_key: connection.token}\n url = connection.build_url()\n verify_ssl = connection.verify_ssl\n res = requests.get(url=url, headers=extra_headers, verify=verify_ssl)\n if res.status_code > 210:\n raise RuleNotFoundException(res.content)\n return rules.parse_rule(res.content)", "def get_ruleset(self, rulesetId=None, name=None):\n if rulesetId:\n return self._get(route='CompliancePolicy/%s' % str(rulesetId))\n elif name:\n return filter(lambda x: x['name'] == name, self.list_rulesets())[0]", "def get(self, name):\n try:\n return [ruleset for ruleset in self.filter() if ruleset.name == name][0]\n except IndexError:\n return []", "def grammar_rule_api_name(rule):\n return names.Name.from_lower(rule + '_rule')", "def get_network_acl_rule_by_name(self, acl, name):\n # Retrieve network ACL to get the ID\n # (mostly useful if a name is provided)\n acl_info = self.get_network_acl(acl)\n if \"errors\" in acl_info:\n return acl_info\n\n try:\n # Retrieve network ACL rules\n data = self.get_network_acl_rules()\n if \"errors\" in data:\n return data\n\n # Loop over network ACL rules until filter match\n for rule in data['rules']:\n if rule[\"name\"] == name:\n # Return data\n return rule\n\n # Return error if no network ACL is found\n return resource_not_found()\n\n except Exception as error:\n print(\"Error fetching rule with name {} for network ACL\"\n \"with ID {}. {}\".format(name, acl_info[\"id\"], error))\n raise", "def _create_db_rule_with_name(self, name, stage=False):\n self.rule_table._table.put_item(Item={'RuleName': name, 'Staged': stage})", "def rule_name(self) -> Optional[str]:\n return pulumi.get(self, \"rule_name\")", "def get_snapshot_rule_by_name(self, name):\n LOG.info(\"Getting snapshot_rule details by name: '%s'\" % name)\n return self.rest_client.request(\n constants.GET,\n constants.SNAPSHOT_RULE_LIST_URL.format(self.server_ip),\n querystring=helpers.prepare_querystring(\n constants.SELECT_ID_AND_NAME,\n name=constants.EQUALS + name))", "def get_type_by_name(self, name):\n # The implicit_rules dict is populated by the analyze() method\n # of one.typedecl_statements.Implicit\n implicit_rules = self.a.implicit_rules\n if implicit_rules is None:\n raise AnalyzeError(\n \"Implicit rules mapping is null \" \"while getting %r type\" % (name)\n )\n line = name[0].lower()\n if line in implicit_rules:\n return implicit_rules[line]\n # default rules:\n if line in \"ijklmn\":\n line = \"default_integer\"\n else:\n line = \"default_real\"\n var = implicit_rules.get(line, None)\n if var is None:\n if line[8:] == \"real\":\n implicit_rules[line] = var = Real(self, self.item.copy(\"real\"))\n else:\n implicit_rules[line] = var = Integer(self, self.item.copy(\"integer\"))\n return var", "def load_rule(index, entry):\n\n LOGGER.debug(\"Parsing rule %d: %s\", index, dict(entry))\n \n try:\n if entry[\"label\"].startswith(RULE_COMMENT_CHAR):\n LOGGER.debug(\"Rule %d ignored, because it's commented out\", index)\n return None\n\n require_column(entry, \"label\", [\"label\", \"LABEL\", \"apply\", \"APPLY\"])\n label = require_column(entry, \"Label\", \"*\")\n require_column(entry, \"if\", [\"if\", \"IF\", \"when\", \"WHEN\"])\n\n condition = load_condition(entry, 1)\n\n rule = Rule(index, label, condition)\n LOGGER.debug(\"Parsed rule %d: %s\", index, rule)\n return rule\n\n except Exception as ex:\n LOGGER.error(\"Rule %d ignored, because it cannot be parsed: %s\", index, ex)\n return None", "def getSingleRuleReq(self, rule):\n r = requests.get(self.SONAR_URL + '/api/rules/search?rule_key=' + rule)\n ruleInfo = r.json()['rules'][0]\n return ruleInfo", "def get_rule_by_ordinal(name, ruleslist):\n from src.praxxis.sqlite import sqlite_rulesengine\n from src.praxxis.util import error\n\n if str(name).isdigit():\n try:\n name = ruleslist[int(name)-1][0]\n except IndexError:\n raise error.RuleNotFoundError(name)\n else:\n ruleslist = [rule[0] for rule in ruleslist] # un-tuple the list to check if name valid\n if name not in ruleslist:\n raise error.RuleNotFoundError(name)\n return(name)", "def get_rule_name(node_name: str):\n return node_mappings.get(node_name)", "def _get_statement_by_id(tx: Transaction,\n statement_id: str) -> Optional[Node]:\n query = (\n \"MATCH (s:Statement) \"\n f\"WHERE toLower(s.id) = toLower('{statement_id}') \"\n \"RETURN s\"\n )\n return (tx.run(query).single() or [None])[0]", "def test_get_rule_info(self):\n rule_name = 'test_rule_01'\n self._create_db_rule_with_name(rule_name, True)\n\n expected_result = {'Staged': True}\n assert_equal(self.rule_table.rule_info(rule_name), expected_result)", "def get_rule():\r\n global config\r\n with open(\"config/clash-my-rule.yml\") as f:\r\n my_rule = yaml.safe_load(f)\r\n r = requests.get(config.rule_url)\r\n rule = yaml.safe_load(r.text)\r\n rule = my_rule + rule\r\n\r\n return rule", "async def get_one(self, rule_id):\n return await self._rules.find_one({'id': rule_id}, {'_id': 0})", "def get_network_acl_rules_by_name(self, name):\n # Retrieve network ACL information to get the ID\n # (mostly useful if a name is provided)\n acl_info = self.get_network_acl(name)\n if \"errors\" in acl_info:\n return acl_info\n\n try:\n # Connect to api endpoint for network_acls\n path = (\"/v1/network_acls/{}/rules/?version={}\"\n \"&generation={}\".format(acl_info[\"id\"],\n self.cfg[\"version\"],\n self.cfg[\"generation\"]))\n\n # Return data\n return qw(\"iaas\", \"GET\", path, headers())[\"data\"]\n\n except Exception as error:\n print(\"Error fetching network ACL with name {}. {}\".format(\n name, error))\n raise" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the maximum length of the longest ORF over num_trials shuffles of the specfied DNA sequence
def longest_ORF_noncoding(dna, num_trials): for x in range (0,num_trials): shuffle= shuffle_string(dna) maxlengthORF= longest_ORF(shuffle) return maxlengthORF
[ "def longest_ORF_noncoding(dna, num_trials):\n longest_length = 0\n for i in range(0, num_trials):\n \tshuffled_dna = shuffle_string(dna)\n \tshuffled_dna_longest_length = len(longest_ORF(shuffled_dna))\n \tif shuffled_dna_longest_length > longest_length:\n \t\tlongest_length = shuffled_dna_longest_length\n return longest_length", "def longest_ORF_noncoding(dna, num_trials):\n \n longest_ORFs = []\n s = list(dna)\n for i in range(0,num_trials): #needs to shuffle and re-collapse the dna \n shuffle(s)\n collapse(s)\n longest_ORFs.append(longest_ORF(s))\n \n long_ORF = max(longest_ORFs, key=len) \n return len(long_ORF)", "def longest_ORF_unit_tests():\n\n print 'input: ATACGACATAGTACATGCAATGCATGAAT, expected output: ATGCATTGCATGTACTATGTCGTA, actual: ' + longest_ORF('ATACGACATAGTACATGCAATGCATGAAT')", "def longest_orf(sequence):\r\n\r\n sequence = \"\".join([x.upper() for x in sequence]) # makes the characters uppercase\r\n\r\n length_1 = 0 #store length\r\n length_1_pos = 0 #store position\r\n length_2 = 0 #-''-\r\n length_2_pos = 0 #-''-\r\n\r\n for i in list(range(0, int(len(sequence)))): # for i in list(range(0, int(len(sequence)/3))):\r\n\r\n #print(i)\r\n length1 = distance_to_stop(sequence, i) # starting at each codon\r\n\r\n if length1 > length_1:\r\n length_1 = length1\r\n length_1_pos = i\r\n else:\r\n pass\r\n\r\n length2 = distance_to_stop(reverse_complement(sequence), i) # starting at each codon in the reverse complement\r\n\r\n if length2 > length_2:\r\n length_2 = length2\r\n length_2_pos = i\r\n else:\r\n pass\r\n\r\n # print(convert2aa(sequence[length_1_pos:length_1*3]), convert2aa(reverse_complement(sequence)[length_2_pos:length_2*3]))\r\n\r\n return max(convert2aa(sequence[length_1_pos:length_1*3]), convert2aa(reverse_complement(sequence)[length_2_pos:length_2*3]), key=len) # has '*3' because of the input from distance_to_stop()\r", "def longest_ORF_unit_tests():\n \n sequence=\"ATGCGAATGTAGCATCAAA\"\n print 'input:'+str(sequence)+', expected output: '+\"ATGCTACATTCGCAT\"\n print 'actual output:', longest_ORF(sequence)\n sequence='ATGCATGAATGTAG'\n print 'input:'+str(sequence)+', expected output: '+\"ATGCATGAATGTAG\"\n print 'actual output:', longest_ORF(sequence)", "def longest_ORF(dna):\n longest = \"\"\n # YOUR IMPLEMENTATION HERE\n if find_all_ORFs_both_strands(dna) == []: \n return \"\"\n else:\n longest = max(find_all_ORFs_both_strands(dna),key=len)\n return longest", "def longest_ORF(dna):\n longest_orf = ''\n\n # find the longest orf in both strands\n longest_orf = max(find_all_ORFs_both_strands(dna))\n\n return longest_orf", "def solve():\r\n longest_d = 0\r\n longest_len = 0\r\n for d in range(7, 1000):\r\n repetend_len = repeating_pattern_len(1, d)\r\n if repetend_len > longest_len:\r\n longest_len = repetend_len\r\n longest_d = d\r\n\r\n return longest_d", "def longest_ORF(dna):\n # TODO: implement this \n try:\n # find the longest ORF in both strands using max function\n return max(find_all_ORFs_both_strands(dna), key=len)\n except ValueError:\n # max throws value error if there are no ORFs\n return \"\"", "def longest_sequence(max_n):\n lengths = seqs_lenghts_cached(max_n)\n\n longest_chain = 0\n starting_number = 0\n for k, v in lengths.items():\n if v > longest_chain:\n starting_number = k\n longest_chain = v\n\n return starting_number", "def find_longest_sequence_length(sequences):\n max_length = np.max([len(sequence) for sequence in sequences])\n return max_length", "def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE\n input1 = \"ATGCCCCATGATGTAG\"\n expected1 = 'ATGCCCCATGATGTAG'\n actual1 = longest_ORF(input1)\n print \"LONGEST_ORF input: \" + input1 + \", expected output: \" + expected1 + \", actual output: \" + actual1\n \n\n input2 = \"ATGCGAATGTAGCATCAAA\"\n expected2 = 'ATGCTACATTCGCAT'\n actual2 = longest_ORF(input2)\n print \"LONGEST_ORF input: \" + input2 + \", expected output: \" + expected2 + \", actual output: \" + actual2", "def longest_palin(st):\n return max(construct_palindrome_list(st), key=len)", "def calc_longest_tryppep_length(self):\n tryp_peps = TRYPTIC_CLEAVAGE_REGEX.findall(self.sequence)\n return max([len(tryppep) for tryppep in tryp_peps])", "def longest_collatz_sequence_below_n(n):\n lookup = dict()\n for i in range(n):\n if i % 100000 == 0:\n print(i)\n collatz_sequence_length(i, lookup)\n\n max_key, max_value = max(lookup.items(), key=(lambda kv: kv[1]))\n return max_key, max_value", "def euler14(num: int) -> int:\n longest = 1, 1\n\n for i in range(num // 2, num):\n c = collatz_sequence_length(i)\n if c > longest[1]:\n longest = i, c\n\n return longest", "def orf_exceed_length_threshold(input_fasta, output_directory, required_simulations = None, families_file = None):\n\n names, seqs = gen.read_fasta(input_fasta)\n seqs = {name.split(\".\")[0]: seqs[i] for i, name in enumerate(names)}\n # join all seqeunces together\n seqs_string = \"\".join([seqs[i] for i in seqs])\n # pick only one sequence per family for tests\n seqs = sequo.pick_random_family_member(families_file, seqs)\n # get the lengths of sequences\n seqs = [seqs[i] for i in seqs]\n seqs_lengths = [len(i) for i in seqs]\n\n real_longest_orfs = seqo.get_longest_orfs(seqs)\n\n simulant_longest_orfs = simoc.run_simulation_function(list(range(required_simulations)), [seqs_string, seqs_lengths], calc_sim_orfs, sim_run = False)\n\n # now for each threshold, see how many exceed\n for threshold in list(range(200,610,10)):\n real_greater = len([i for i in real_longest_orfs if i >= threshold])\n output_file = \"{0}/{1}.csv\".format(output_dir, threshold)\n with open(output_file, \"w\") as outfile:\n outfile.write(\"id,greater,max_orf,total\\n\")\n outfile.write(\"real,{0},{1},{2}\\n\".format(real_greater, max(real_longest_orfs), len(real_longest_orfs)))\n for i in simulant_longest_orfs:\n sim_output = simulant_longest_orfs[i]\n outfile.write(\"sim_{0},{1},{2},{3}\\n\".format(i, len([j for j in sim_output if j >= threshold]), max(sim_output), len(sim_output)))", "def max_length():\r\n #Finding the longest string\r\n strings\r\n global maximum\r\n maximum=0\r\n for i in strings:\r\n if len(i)>maximum:\r\n maximum=len(i)\r\n else: continue\r\n return maximum", "def longest_run(L):\n def get_sublists(L, n):\n result = []\n for i in range(len(L)-n+1):\n result.append(L[i:i+n])\n return result\n for i in range(len(L), 0, -1):\n possibles = get_sublists(L, i)\n for p in possibles:\n if p == sorted(p) or p == sorted(p, reverse=True):\n return sum(p)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the frame number at the topleft corner of the frame.
def draw_frame_num(image, frame_num): cv2.putText(image, '{}'.format(frame_num), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), thickness=2) return image
[ "def _draw_number(number, pos, frame):\n width, height, _ = number.shape\n\n # Calculate box.\n num_x, num_y = pos\n num_x_start = num_x - width / 2\n num_x_end = num_x + width / 2\n num_y_start = num_y - width / 2\n num_y_end = num_y + width / 2\n\n # We make this additive to two numbers can safely overlap.\n frame[num_y_start:num_y_end, num_x_start:num_x_end] += number\n frame.clip(0, 1)\n\n return frame", "def draw_number(self, num, x, y):\r\n\r\n num = str(num)\r\n text = self.font.render(num, True, self.BLACK)\r\n pygame.draw.rect(self.win, self.WHITE, pygame.Rect(x-text.get_width()/2, y-text.get_height()/2, 54, 54))\r\n if not num == '0':\r\n self.win.blit(text, (x-text.get_width()/2, y-text.get_height()/2))", "def draw_frames():\n win.fill(WHITE)\n pygame.draw.rect(win, BLACK, (RECTS_POS[0][0], RECTS_POS[0][1],\n RECT_SIDE, RECT_SIDE), 1)\n pygame.draw.rect(win, BLACK, (RECTS_POS[1][0], RECTS_POS[1][1],\n RECT_SIDE, RECT_SIDE), 1)\n pygame.draw.rect(win, BLACK, (RECTS_POS[2][0], RECTS_POS[2][1],\n RECT_SIDE, RECT_SIDE), 1)", "def _set_frame(self):\n rectangle(self._canvas, self._plot_x0, self._plot_y0, self._plot_width, self._plot_height, outline=OUTLINE_COLOR)", "def _draw_count(self, frame: np.array, y_pos: int, count: int) -> None:\n text = 'COUNT: {0}'.format(count)\n cv2.putText(frame, text, (self.legend_left_x + 10, y_pos), FONT_HERSHEY_SIMPLEX,\n SMALL_FONTSCALE, WHITE, THICK, LINE_AA)", "def frame_number(self, n=None, relative=False):\n if n is not None:\n if relative:\n flag = 'relative'\n else:\n flag = 'absolute'\n self._send_to_ztv(('set-cur-display-frame-num', (n, flag)))\n return self._request_return_value_from_ztv('get-cur-display-frame-num')", "def changeFrameText(self):\n check = re.search(\"[1-9][0-9]*\", self.ui.t_frame.text())\n if check:\n num = check.group()\n frame = int(num)\n maxF = self.image_holder.vidLen - 1\n if frame > maxF:\n logging.warning(\n \"Too big number for frame. Falling back to max {0} frame.\".format(\n maxF\n )\n )\n frame = maxF\n self.ui.t_frame.setText(str(frame))\n else:\n logging.info(\"Wrong Input For Frame\")\n self.ui.t_frame.setText(\"0\")", "def getStartFrame(self) -> int:\r\n \r\n return data.windows[self.current_window][0]+1", "def updateTopFrame(self, frame):\n\t\tself.frame_widget.displayFrame(frame)\n\t\tself.source_and_assembly_widget.setLine(frame.line, frame.assembly)", "def __show_bignum(self, num):\n fill = 'lightgreen'\n font = (\"Courier\", 150, 'bold')\n for x in range(3):\n self.canvas.delete(f'bignum-{x}')\n self.canvas.drawText(num[x], 75 + 150 * x, 100, font=font,\n tag=f'bignum-{x}', fill=fill)", "def render_frame(i, frame_start, frame_end):\n\n sim_time_ns = 96*(i - frame_start)/(frame_end-frame_start)\n img = Image.new('RGBA', (2560, 1440), color=(0, 0, 0, 0))\n fnt = ImageFont.truetype('/usr/share/fonts/OTF/FiraCode-Regular.otf', 60)\n d = ImageDraw.Draw(img)\n d.text(\n (10, 10),\n 't = {}'.format(f'{sim_time_ns:7.3f} ns'),\n font=fnt,\n fill=(255, 255, 255, 255)\n )\n img.save(f'frame_{i}.png')", "def drawCoordinates(self):\n \n adjust = 30\n \n self.libtft.DrawRect(adjust, adjust, self._MAX_COLS - adjust, self._MAX_ROWS - adjust, self.libtft.GREEN)\n \n x = adjust - (adjust / 2)\n y = adjust\n st = \"%d, %d\" % (x, y)\n self.libtft.PutSt(st, x, y)\n self.libtft.DrawPixel(x, y, self.libtft.GREEN)\n \n x = self._MAX_COLS - adjust - (adjust / 2)\n y = self._MAX_ROWS - adjust\n st = \"%d, %d\" % (x, y)\n self.libtft.PutSt(st, x, y)\n self.libtft.DrawPixel(x, y, self.libtft.GREEN)\n \n x = adjust - (adjust / 2)\n y = self._MAX_ROWS - adjust\n st = \"%d, %d\" % (x, y) \n self.libtft.PutSt(st, x, y)\n self.libtft.DrawPixel(x, y, self.libtft.GREEN)\n\n x = self._MAX_COLS - adjust - (adjust / 2)\n y = adjust\n st = \"%d, %d\" % (x, y) \n self.libtft.PutSt(st, x, y)\n self.libtft.DrawPixel(x, y, self.libtft.GREEN)", "def goto_frame(self, idx):\n if idx < self.nof_bframes and idx >= 0:\n self.idx_bframe = idx\n self.draw_dancefloor()", "def draw_framelinegreen(frame,height,width):\n # Draw line top left and right\n cv2.line(frame, (0, 0), (0,int(height/10)), (0,255,0),10)\n cv2.line(frame, (0, 0), (int(height/10),0), (0,255,0),10)\n cv2.line(frame, (width, 0), (width-int(height/10),0), (0,255,0),10)\n cv2.line(frame, (width, 0), (width,int(height/10)), (0,255,0),10)\n\n # Draw line bottom left and right\n cv2.line(frame, (0, height), (0,height-int(height/10)), (0,255,0),10)\n cv2.line(frame, (0, height), (int(height/10),height), (0,255,0),10)\n cv2.line(frame, (width, height), (width-int(height/10),height), (0,255,0),10)\n cv2.line(frame, (width, height), (width,height-int(height/10)), (0,255,0),10)\n return frame", "def __draw_fps(self):\n txt = f'{round(self.clock.get_fps())} FPS'\n rtxt = self.font.render(txt, False, pygame.Color('black'))\n rsiz = self.font.size(txt)\n self.__screen.blit(rtxt, (SCREEN_WIDTH-rsiz[0]-5, 5))", "def drawbytes(self):\n start=self.adr&~0xFF;\n for row in range(0,curses.LINES-4):\n self.drawbyteline(row+2,start+0x10*row);", "def _draw_x(self, coord, color):\n edge_distance = 10\n top_left = (coord.pixel_x - edge_distance,\n coord.pixel_y - edge_distance)\n bottom_right = (coord.pixel_x + edge_distance,\n coord.pixel_y + edge_distance)\n pygame.draw.line(self._display_surface, color, top_left, bottom_right)\n top_right = (coord.pixel_x + edge_distance,\n coord.pixel_y - edge_distance)\n bottom_left = (coord.pixel_x - edge_distance,\n coord.pixel_y + edge_distance)\n pygame.draw.line(self._display_surface, color, top_right, bottom_left)", "def get_start_frame(self):\n\n shot_node = self.get_node()\n if not shot_node:\n return -1\n\n return tp.Dcc.shot_start_frame(shot_node)", "def left(n, color='black'):\n\n turtle.penup()\n cur_pos = turtle.pos()\n turtle.goto(cur_pos[0] - n * square_size, cur_pos[1])\n draw_square(square_size, color)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the box with an ID tag for a tracked target.
def draw_target_box(image, box, id, draw_center=False): image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), _colors[int(id) % _colors.__len__()], thickness=3) id_string = '{:d}'.format(int(id)) id_size, baseline = cv2.getTextSize(id_string, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2) image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[0] + id_size[0] + 4), int(box[1] + id_size[1]) + 10), _colors[int(id) % _colors.__len__()], thickness=-1) image = cv2.putText(image, id_string, (int(box[0] + 2), int(box[1]) + id_size[1] + 4), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), thickness=2) if draw_center: image = cv2.circle(image, (int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)), radius=10, color=(0, 0, 255), thickness=-1) return image
[ "def draw_target(self, col=(255,0,0)):\r\n\t\tself.app.fill(*col)\r\n\t\tself.app.ellipse(self.target_center.x, self.height-self.ground_height, 10,10)\r\n\t\tself.app.rect(self.target_pos, self.height-self.ground_height, self.target_size, self.ground_height)", "def _get_new_id(self, *bbox, **options) -> int:\n return self.canvas.create_rectangle(*bbox, **options)", "def _get_new_id(self, *bbox) -> int:\n return self.canvas.create_arc(*bbox)", "def targetChanged(self):\n cmds.mnpr(renderTarget=self.targetCoBox.currentIndex())\n print(\"mnpr -renderTarget {0};\".format(self.targetCoBox.currentIndex()))", "def plot_game(game):\n #game_index = np.random.randint(0, high=len(games))\n #print(game_index)\n if not os.path.exists(os.path.join(IMG_DIR, game['image']['file_name'])):\n img_fname = wget.download(game['image']['flickr_url'])\n os.rename(img_fname, os.path.join(IMG_DIR, game['image']['file_name']))\n img = Image.open(os.path.join(IMG_DIR, game['image']['file_name']))\n osize = (game['image']['width'], game['image']['height'])\n img = img.resize((osize), resample=Image.NEAREST)\n\n objects = game['objects']\n fig, ax = plt.subplots(1)\n\n ax.imshow(img)\n\n im_height = game['image']['height']\n target_id = game['object_id']\n\n print('Objects:')\n for i, obj in enumerate(objects):\n # Every game has an object whose id is target_id\n # so it's safe the assign target this way\n if obj['id'] == target_id:\n target = obj\n print(i, obj['category'], '<--- target')\n else:\n print(i, obj['category'])\n # bbox = [left, up, width, heigh]\n bbox = obj['bbox']\n if obj['id'] == target_id:\n target = obj\n x_width = bbox[2]\n y_height = bbox[3]\n\n x_left = bbox[0]\n x_right = x_left + x_width\n\n y_upper = bbox[1]\n y_lower = y_upper - y_height\n if obj['id'] == target_id:\n rect = patches.Rectangle((x_left, y_upper),\n x_width,\n y_height,\n linewidth=1.8,\n edgecolor='g', facecolor='none')\n else:\n rect = patches.Rectangle((x_left, y_upper),\n x_width,\n y_height,\n linewidth=1.8,\n edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n if obj['id'] == target_id:\n ax.text(x_right, y_upper-1, str(i), color='g', fontsize=34)\n else:\n ax.text(x_right, y_upper-1, str(i), color='r', fontsize=34)\n\n plt.show()", "def draw_target_prediction(image, box):\n image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),\n (255, 255, 255), thickness=1)\n return image", "def select_target(self):\n # If there's a selected unit, outline it\n if self.sel_unit:\n pygame.gfxdraw.rectangle(\n self.screen,\n self.sel_unit.rect,\n SELECT_COLOR)\n \n # Mark potential targets\n for tile_pos in self._attackable_tiles:\n screen_pos = self.map.screen_coords(tile_pos)\n self.draw_reticle(screen_pos)", "def draw(self, t, size):", "def draw_box(detection, detection_image_cv2):\n\n logging.debug('drawing box for {}'.format(detection['label'].upper()))\n scale = 2\n color = (0, 0, 255)\n cv2.rectangle(\n detection_image_cv2,\n (detection['topleft']['x'], detection['topleft']['y']),\n (detection['bottomright']['x'], detection['bottomright']['y']),\n color,\n scale\n ) \n\n return detection_image_cv2", "def _get_new_id(self, *bbox) -> int:\n return self.canvas.create_oval(*bbox)", "def setTextureTarget(self, target: 'int') -> \"void\":\n return _coin.SoGLDisplayList_setTextureTarget(self, target)", "def cover_box(self, x, y, width, height):\n pg.draw.rect(self.screen, (255, 255, 255), (x, y, width, height))", "def addLogBox(self, id, dim=[2,2], label=None):\n\t\tif not label: label = id\n\t\tself.widgets.append(JPLLoggingBox(self.centralWidget, id, dim, label))\n\t\tself.labels.append(JPLLabel(self.centralWidget, label, dim))", "def _draw_box(self, dc, box):\n CIRCLE_RAD = 1 if self.scale == 1.0 else 3\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetPen(wx.Pen(box.color, box.line_width))\n \n w_img, h_img = self.img_bitmap.GetWidth(), self.img_bitmap.GetHeight()\n x1, y1 = int(round(box.x1*w_img)), int(round(box.y1*h_img))\n x2, y2 = int(round(box.x2*w_img)), int(round(box.y2*h_img))\n w, h = abs(x1-x2), abs(y1-y2)\n (ul_x, ul_y), (lr_x, lr_y) = util_gui.get_box_corners((x1,y1),(x2,y2))\n dc.DrawRectangle(ul_x,ul_y,w,h)\n # Draw the 'grabber' circles\n dc.SetPen(wx.Pen(\"Black\", 1))\n dc.SetBrush(wx.Brush(\"White\"))\n dc.DrawCircle(ul_x, ul_y, CIRCLE_RAD) # Upper-Left\n dc.DrawCircle(ul_x+(w/2), ul_y, CIRCLE_RAD) # Top\n dc.DrawCircle(ul_x+w, ul_y, CIRCLE_RAD) # Upper-Right\n dc.DrawCircle(ul_x, ul_y+(h/2), CIRCLE_RAD) # Left\n dc.DrawCircle(ul_x+w, ul_y+(h/2), CIRCLE_RAD) # Right\n dc.DrawCircle(ul_x, ul_y+h, CIRCLE_RAD) # Lower-Left\n dc.DrawCircle(ul_x+(w/2), lr_y, CIRCLE_RAD) # Bottom\n dc.DrawCircle(lr_x, lr_y, CIRCLE_RAD) # Lower-Right\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n \n # Draw the ID numbers (for now)\n #if box.is_contest:\n # dc.SetTextForeground(\"Blue\")\n #else:\n # dc.SetTextForeground(\"Red\")\n #dc.DrawText(str(box.contest_id), ul_x, ul_y)", "def add_tag(self, object_id, tag):\n tags=self.canvas.gettags(object_id)\n if tag not in tags:\n self.canvas.addtag_withtag(tag, object_id)", "def draw_camera_pixel_ids(xs_center, ys_center, pixels_id, axes):\n\n axes.text(xs_center, ys_center, pixels_id, fontsize=10, ha='center')", "def at_target(self, target_pos):\n x = (174 * (target_pos - 1)) + 130\n y = 50\n self.click(x, y, delay=.2)\n return self", "def draw_entity(self):\n pygame.draw.rect(self.screen, self.entity_color, self.rect)", "def addTextBox(self, id, dim=[1,1], label=None):\n\t\tpass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the prediction box with an ID tag for a tracked target.
def draw_target_prediction(image, box): image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 255), thickness=1) return image
[ "def draw_instance_predictions(self, predictions, track_ids):\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n \n # set the color according to the track ids \n colors = [cm.tab20(id_) for id_ in track_ids]\n alpha = 0.6\n\n labels = [f'Track {id_} {label}' for label, id_ in zip(labels,track_ids)]\n \n # increase font size\n if self._default_font_size < 20: self._default_font_size *= 1.3\n \n\n if self._instance_mode == ColorMode.IMAGE_BW:\n assert predictions.has(\"pred_masks\"), \"ColorMode.IMAGE_BW requires segmentations\"\n self.output.img = self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n )\n\n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output", "def draw_target_box(image, box, id, draw_center=False):\n image = cv2.rectangle(image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])),\n _colors[int(id) % _colors.__len__()], thickness=3)\n id_string = '{:d}'.format(int(id))\n id_size, baseline = cv2.getTextSize(id_string, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)\n image = cv2.rectangle(image, (int(box[0]), int(box[1])),\n (int(box[0] + id_size[0] + 4), int(box[1] + id_size[1]) + 10),\n _colors[int(id) % _colors.__len__()], thickness=-1)\n image = cv2.putText(image, id_string, (int(box[0] + 2), int(box[1]) + id_size[1] + 4),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), thickness=2)\n if draw_center:\n image = cv2.circle(image, (int((box[0] + box[2]) / 2), int((box[1] + box[3]) / 2)), radius=10,\n color=(0, 0, 255), thickness=-1)\n return image", "def plot_prediction(self, *args):\n pass", "def plot_game(game):\n #game_index = np.random.randint(0, high=len(games))\n #print(game_index)\n if not os.path.exists(os.path.join(IMG_DIR, game['image']['file_name'])):\n img_fname = wget.download(game['image']['flickr_url'])\n os.rename(img_fname, os.path.join(IMG_DIR, game['image']['file_name']))\n img = Image.open(os.path.join(IMG_DIR, game['image']['file_name']))\n osize = (game['image']['width'], game['image']['height'])\n img = img.resize((osize), resample=Image.NEAREST)\n\n objects = game['objects']\n fig, ax = plt.subplots(1)\n\n ax.imshow(img)\n\n im_height = game['image']['height']\n target_id = game['object_id']\n\n print('Objects:')\n for i, obj in enumerate(objects):\n # Every game has an object whose id is target_id\n # so it's safe the assign target this way\n if obj['id'] == target_id:\n target = obj\n print(i, obj['category'], '<--- target')\n else:\n print(i, obj['category'])\n # bbox = [left, up, width, heigh]\n bbox = obj['bbox']\n if obj['id'] == target_id:\n target = obj\n x_width = bbox[2]\n y_height = bbox[3]\n\n x_left = bbox[0]\n x_right = x_left + x_width\n\n y_upper = bbox[1]\n y_lower = y_upper - y_height\n if obj['id'] == target_id:\n rect = patches.Rectangle((x_left, y_upper),\n x_width,\n y_height,\n linewidth=1.8,\n edgecolor='g', facecolor='none')\n else:\n rect = patches.Rectangle((x_left, y_upper),\n x_width,\n y_height,\n linewidth=1.8,\n edgecolor='r', facecolor='none')\n ax.add_patch(rect)\n if obj['id'] == target_id:\n ax.text(x_right, y_upper-1, str(i), color='g', fontsize=34)\n else:\n ax.text(x_right, y_upper-1, str(i), color='r', fontsize=34)\n\n plt.show()", "def draw_target_plot(train_df):\n target_plot = alt.Chart(train_df, title='Target Counts').mark_bar().encode(\n x='target',\n y='count()'\n )\n\n return target_plot", "def draw_boxes(record, im_save_dir, judge=True):\n image = cv2.imread(record['index'])\n h, w = image.shape[:2]\n fig = plt.figure(figsize=(w / 96, h / 96))\n ax = fig.add_subplot(1, 1, 1)\n\n prediction = record['prediction'][0]\n coords = prediction['position'] # ((x, y), width, height)\n ax.add_patch(plt.Rectangle(*coords, fill=False, edgecolor='cyan', linewidth=2))\n\n display_coords = np.array(coords[0]) + [3, -10] # align rectangle box, top-left\n display_txt = '{!s}: {:.3f}'.format(prediction['class'], prediction['score'])\n ax.text(*display_coords, display_txt, bbox={'facecolor': 'cyan', 'alpha': 0.4})\n\n if judge:\n judge_coords = display_coords + [coords[1] - 20, 0] # align rectangle box, top-right\n is_right = int(prediction['class'] == record['label'])\n judge_txt = [{'symbol': '×', 'color': 'red'}, {'symbol': '√', 'color': 'green'}][is_right]\n ax.text(*judge_coords, judge_txt['symbol'], bbox={'facecolor': judge_txt['color'], 'alpha': 0.7})\n\n extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n plt.axis('off')\n plt.imshow(image)\n im_save_name = im_save_dir + record['index'].split('/')[-1]\n plt.savefig(im_save_name, bbox_inches=extent)\n plt.clf()", "def visualize_patch_segmentation_predictions(self, X, y=None, threshold=0.5, num_predictions=3):\n\n # Choose random samples\n random_samples = np.random.randint(0, len(X), num_predictions)\n X_rand = X[random_samples]\n y_pred = self.model.predict(X_rand)\n\n # Number of rows and columns for the figure\n ncols = 2\n nrows = num_predictions\n if y is not None:\n ncols = 3\n y_rand = y[random_samples]\n fig, axes = plt.subplots(nrows, ncols)\n\n if num_predictions == 1:\n if X_rand.shape[3] == 1:\n axes[0].imshow(X_rand[0, :, :, 0], cmap='gray')\n else:\n axes[0].imshow(X_rand[0])\n axes[0].set_xticks([])\n axes[0].set_yticks([])\n\n axes[1].imshow(y_pred[0, :, :, 0] > threshold, cmap='gray')\n axes[1].set_xticks([])\n axes[1].set_yticks([])\n\n axes[0].set_title(\"Original Image\")\n axes[1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[2].imshow(y_rand[0, :, :, 0], cmap='gray')\n axes[2].set_xticks([])\n axes[2].set_yticks([])\n axes[2].set_title(\"Ground Truth Mask\")\n else:\n for idx in range(num_predictions):\n if X_rand.shape[3] == 1:\n axes[idx, 0].imshow(X_rand[idx, :, :, 0], cmap='gray')\n else:\n axes[idx, 0].imshow(X_rand[idx])\n axes[idx, 0].set_xticks([])\n axes[idx, 0].set_yticks([])\n\n axes[idx, 1].imshow(y_pred[idx, :, :, 0] > threshold, cmap='gray')\n axes[idx, 1].set_xticks([])\n axes[idx, 1].set_yticks([])\n\n if idx == 0:\n axes[idx, 0].set_title(\"Original Image\")\n axes[idx, 1].set_title(\"Predicted Mask\")\n\n if y is not None:\n axes[idx, 2].imshow(y_rand[idx, :, :, 0], cmap='gray')\n axes[idx, 2].set_xticks([])\n axes[idx, 2].set_yticks([])\n if idx == 0:\n axes[idx, 2].set_title(\"Ground Truth Mask\")\n\n plt.show()", "def add_single_predicted_image_info(self, image_id, result_dict):\n if image_id not in self._image_ids:\n raise ValueError('Missing groundtruth for image id: {}'.format(image_id))\n\n if self._image_ids[image_id]:\n tf.logging.warning(\n 'Ignoring detection with image id %s since it was previously added', image_id)\n return\n\n assert \"instances\" in result_dict\n instance_result = result_dict[\"instances\"]\n detection_boxes = instance_result[fields.ResultFields.boxes]\n detection_scores = instance_result[fields.ResultFields.scores]\n detection_classes = instance_result[fields.ResultFields.classes]\n\n if len(self._image_ids) <= self._max_examples_to_draw:\n image = visualization.visualize_boxes_and_labels_on_image_array(\n image=result_dict[fields.InputFields.image],\n boxes=detection_boxes,\n classes=detection_classes,\n scores=detection_scores,\n category_index=self._category_index,\n instance_masks=instance_result.get(fields.ResultFields.masks),\n min_score_thresh=self._min_visualization_score_thresh\n )\n self._summaries.append(\n tf.Summary.Value(\n tag=\"{}/Prediction/Detection\".format(image_id),\n image=tf.Summary.Image(\n encoded_image_string=visualization.encode_image_array_as_png_str(image)))\n )\n\n self._detection_boxes_list.extend(\n coco_tools.ExportSingleImageDetectionBoxesToCoco(\n image_id=image_id,\n category_id_set=self._category_id_set,\n detection_boxes=detection_boxes,\n detection_scores=detection_scores,\n detection_classes=detection_classes))\n self._image_ids[image_id] = True", "def draw_identify(self, result):\n # defaults\n box_width = 100\n box_height = 22\n\n camera_width = self.camera.properties['width']\n camera_height = self.camera.properties['height']\n left = int(camera_width * 0.5 - box_width * 0.5)\n right = int(camera_width * 0.5 + box_width * 0.5)\n top = int(camera_height * 0.5 - box_height * 0.5)\n bottom = int(camera_height * 0.5 + box_height * 0.5)\n\n text = \"Searching...\"\n color = (0, 0, 200)\n text_color = (255, 255, 255)\n\n if result:\n left, top, right, bottom, text, color, text_color = result\n\n # Draw frame\n cv2.rectangle(Data.frame, (left, top), (right, bottom), color, 1)\n\n # Draw a label with a name below the face\n cv2.rectangle(Data.frame, (left, bottom - 20), (right, bottom), color, cv2.FILLED)\n font = cv2.FONT_HERSHEY_PLAIN\n center = int((left + right) * 0.5 * 0.95)\n cv2.putText(Data.frame, text, (center, bottom - 6), font, 1.0, text_color, 1)", "def train(self, id_to_context_reply_label):\n pass", "def display_prediction(test_x,test_labels_a,predicted):\n fig = plt.figure(figsize=(10, 10))\n j = 1\n for i in range(0, 1000, 50):\n truth = test_labels_a[i]\n prediction = predicted[i]\n plt.subplot(5, 4, j)\n j = j + 1\n plt.axis('off')\n color = 'green' if truth == prediction else 'red'\n plt.text(40, 10, \"Truth: {0}\\nPrediction: {1}\".format(truth, prediction),\n fontsize=12, color=color)\n plt.imshow(test_x[i], cmap='gray')", "def plot_image_with_bboxes(image_id,\r\n images_folder_path=Path('data/raw/train/'),\r\n target_folder_path=Path('data/interim/train/')):\r\n fig = plt.figure(figsize=(10, 10))\r\n ax = fig.add_subplot(111)\r\n\r\n im = Image.open(images_folder_path / (image_id + '.jpg'))\r\n\r\n ax.imshow(im)\r\n\r\n bbox_list = get_bbox_for_image(image_id)\r\n\r\n for bbox in bbox_list:\r\n add_bbox_to_axis(ax, bbox)\r\n\r\n fig.savefig(target_folder_path / (image_id + '_bbox.jpg'))\r\n\r\n return", "def draw_target(self, col=(255,0,0)):\r\n\t\tself.app.fill(*col)\r\n\t\tself.app.ellipse(self.target_center.x, self.height-self.ground_height, 10,10)\r\n\t\tself.app.rect(self.target_pos, self.height-self.ground_height, self.target_size, self.ground_height)", "def draw_boxes(boxes, img, model_size, crop_rect, color=(255, 255, 255), debug=False):\n\n retimg = img.copy()\n [xmin, xmax] = crop_rect[0]\n [ymin, ymax] = crop_rect[1]\n crop_w = xmax - xmin\n crop_h = ymax - ymin\n [model_h, model_w] = model_size\n [img_h, img_w] = img.shape[0:2]\n\n for box in boxes:\n # only show if prediction is in CLASSES_TO_SHOW\n if box.cn not in CLASSES_TO_SHOW:\n if debug: print(\"[INFO] detected class\", box.cn)\n continue\n\n label = '{} {:.2f}'.format(box.cn, box.prob)\n\n # convert bounding box to coordinates\n left = (box.x - box.w / 2)\n right = (box.x + box.w / 2)\n top = (box.y - box.h / 2)\n bottom = (box.y + box.h / 2)\n\n # scale up boxes to cropped image size\n left *= crop_w / model_w\n right *= crop_w / model_w\n top *= crop_h / model_h\n bottom *= crop_h / model_h\n\n # shift boxes from cropped to original image\n left += xmin\n right += xmin\n top += ymin\n bottom += ymin\n\n top = max(0, np.floor(top + 0.5).astype('int32'))\n left = max(0, np.floor(left + 0.5).astype('int32'))\n bottom = min(img_h, np.floor(bottom + 0.5).astype('int32'))\n right = min(img_w, np.floor(right + 0.5).astype('int32'))\n\n # draw rectangle\n cv2.rectangle(retimg, (left, top), (right, bottom), color=color, thickness=2, lineType=cv2.LINE_AA)\n\n # write label\n fontface = cv2.FONT_HERSHEY_SIMPLEX\n fontscale = 0.5\n fontthickness = 1\n textsize, _ = cv2.getTextSize(label, fontface, fontscale, fontthickness)\n cv2.putText(retimg, label, (left + 2, top + textsize[1] + 2),\n fontface, fontScale=fontscale, color=color,\n thickness=fontthickness, lineType=cv2.LINE_AA)\n\n return retimg", "def draw_box(detection, detection_image_cv2):\n\n logging.debug('drawing box for {}'.format(detection['label'].upper()))\n scale = 2\n color = (0, 0, 255)\n cv2.rectangle(\n detection_image_cv2,\n (detection['topleft']['x'], detection['topleft']['y']),\n (detection['bottomright']['x'], detection['bottomright']['y']),\n color,\n scale\n ) \n\n return detection_image_cv2", "def draw_camera_pixel_ids(xs_center, ys_center, pixels_id, axes):\n\n axes.text(xs_center, ys_center, pixels_id, fontsize=10, ha='center')", "def targetChanged(self):\n cmds.mnpr(renderTarget=self.targetCoBox.currentIndex())\n print(\"mnpr -renderTarget {0};\".format(self.targetCoBox.currentIndex()))", "def draw_prediction_error(model, X, y):\n split = train_test_split(X, y, random_state=123)\n X_train, X_test, y_train, y_test = split\n\n scaler = StandardScaler()\n X_train_rs = scaler.fit_transform(X_train)\n X_test_rs = scaler.transform(X_test)\n\n visualizer = PredictionError(model, alpha=0.25)\n visualizer.fit(X_train_rs, y_train['target'])\n visualizer.score(X_test_rs, y_test['target'])\n visualizer.poof()", "def display_fitb_query(query, predictions, ground_truth, images_dir, save_fn=None):\n\n # display query products (outfit)\n num_query = len(query)\n for i, p in enumerate(query):\n # read product image\n im_fn = os.path.join(images_dir, str(p) + \".jpg\")\n im = mpimg.imread(im_fn)\n\n # imshow\n plt.subplot(1, num_query, i + 1)\n plt.imshow(im)\n plt.title(str(p))\n plt.axis(\"off\")\n plt.suptitle(\"Outfit query\")\n if save_fn is None:\n plt.show()\n else:\n plt.savefig(save_fn.format(\"query\"))\n\n # display model candidates predictions\n num_preds = len(predictions)\n for i, prod_id in enumerate(predictions):\n # read product image\n im_fn = os.path.join(images_dir, str(prod_id) + \".jpg\")\n im = mpimg.imread(im_fn)\n start_point = (0, im.shape[0])\n end_point = (im.shape[1], 0)\n if i == 0 or query[0] == ground_truth:\n im = draw_rectangle(im, start_point, end_point, (0, 0, 255))\n\n elif prod_id == ground_truth:\n im = draw_rectangle(im, start_point, end_point, (0, 255, 0))\n\n # imshow\n plt.subplot(1, num_preds, i + 1)\n plt.imshow(im)\n plt.title(str(prod_id))\n plt.axis(\"off\")\n plt.suptitle(\"Ranked Candidates\")\n if save_fn is None:\n plt.show()\n else:\n plt.savefig(save_fn.format(\"ranked_candidates\"))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw the skeleton for a tracked target..
def draw_target_skeleton(image, keypoints, id): for connection in _keypoint_connections: image = cv2.line(image, (int(keypoints[connection[0]][0]), int(keypoints[connection[0]][1])), (int(keypoints[connection[1]][0]), int(keypoints[connection[1]][1])), color=_colors[int(id) % _colors.__len__()], thickness=2) for keypoint in keypoints: image = cv2.circle(image, (int(keypoint[0]), int(keypoint[1])), 2, (255, 255, 255), thickness=-1) return image
[ "def draw_skeleton(self):\n raise NotImplementedError", "def draw_skeleton_image(self, ax, array):\n draw_array(ax, array)", "def draw_snake(self):\n # Draw the head\n self._draw_head()\n\n # Draw the rest of the body\n if len(self.body) > 0:\n self._draw_body()", "def plot(self, skeleton_data: np.ndarray, ax: plt.Axes, **kwargs) -> None:\n\n skeleton_data = skeleton_data.reshape(-1, 3)\n palm = skeleton_data[self.skeleton['palm']]\n ax.scatter(palm[0], palm[1], palm[2], c=\"g\", s=3)\n\n for p in self.get_skeleton_paths(skeleton_data):\n c = kwargs.get('color', 'b')\n ax.plot(xs=p[:, 0], ys=p[:, 1], zs=p[:, 2], c=c)\n\n ax.view_init(azim=-90.0, elev=-90.0)\n ax.set_xlim(0.0, 1.0)\n ax.set_ylim(0.0, 1.0)\n ax.set_zlim(0.0, 1.0)\n ax.axis('off')\n ax.grid(b=None)", "def draw(self, extras=False):\n im = self.image\n for y in range(6):\n for x in range(6):\n #draw the dots\n cv2.rectangle(im, tuple(np.array(maze_to_image_coords(x, y))-1),\n tuple(np.array(maze_to_image_coords(x, y))+1), (100,100,100), -1)\n\n #draw any walls present\n if self.hedge_mask[y][x]:\n hline(im, x, y)\n if self.vedge_mask[y][x]:\n vline(im, x, y)\n if extras:\n #draw the start / target\n x, y = self.target\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 8, (0, 0, 255), -1)\n x, y = self.start\n cv2.circle(im, tuple(maze_to_image_coords(x, y)), 5, (50, 50, 220), -1)", "def draw(self, target):\n for offset in np.transpose(np.where(self.brush)):\n offset -= np.array(self.brush.shape) // 2\n coord = target + offset\n if coord[0] < self.board.shape[0] and coord[1] < self.board.shape[1]:\n self.board[coord[0], coord[1]] = 1", "def draw(self,):\n\n # \n # Clear the screen.\n #\n sys.stdout.write(\"\\x1b[2J\")\n\n if not self.animate:\n # \n # Move to upper left and defer to superclass.\n #\n sys.stdout.write(\"\\x1b[H\")\n Maze.draw(self) \n \n else:\n # \n # If we are animating, clear the screen and start carving:\n #\n Kruskal.carve_passages(self)", "def draw_target(self, col=(255,0,0)):\r\n\t\tself.app.fill(*col)\r\n\t\tself.app.ellipse(self.target_center.x, self.height-self.ground_height, 10,10)\r\n\t\tself.app.rect(self.target_pos, self.height-self.ground_height, self.target_size, self.ground_height)", "def line_of_sight(self, target=None):\n\n # If no target, use opponent turtle's position\n if target == None:\n target = self.other_position\n \n # Get heading towards target\n rh = math.radians(self.heading_towards(target))\n \n # Get initial signs of x- and y-direction differences\n sx = self._sign(target[0] - self.x) # x-direction sign\n sy = self._sign(target[1] - self.y) # y-direction sign\n \n # Handle the trivial case of the turtle's own coordinate\n if sx == 0 and sy == 0:\n return True\n \n # Test sample points on path to target\n pt = list(self.position) # sample point\n spd = self.missile_speed # move sample point at missile speed\n iter = 0 # number of samples tested (for iteration cutoff)\n while True:\n # Loop repeats until either reaching an iteration cutoff, finding\n # a block collision, or moving past the target coordinate\n \n # Move sample point\n pt[0] += spd*math.cos(rh)\n pt[1] -= spd*math.sin(rh)\n \n # If the point collides with a block, there is no clear path\n if self.free_space(pt) == False:\n return False\n \n # If the point has moved past the target, there must be clear path\n if (self._sign(target[0] - pt[0]) != sx or\n self._sign(target[1] - pt[1]) != sy):\n return True\n \n # If past iteration cutoff, return False\n iter += 1\n if iter >= 100:\n return False", "def draw(self, t, size):", "def _create_skeleton(self, width, height, bb_thickness=0.05):\n base_link = urdf.Link('base_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, 0.3, 0.1))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n back_link = urdf.Link('back_link',\n urdf.Inertial(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Mass(value=0.5),\n urdf.Inertia(ixx=0.001, ixy=0, ixz=0, iyy=0.001, iyz=0, izz=0.001)\n ),\n urdf.Collision(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n )\n ),\n urdf.Visual(\n urdf.Origin(xyz=(0, 0, 0), rpy=(0, 0, 0)),\n urdf.Geometry(\n urdf.Box(size=(width, bb_thickness, height))\n ),\n urdf.Material('brown',\n urdf.Color(rgba=(0.82, 0.71, 0.55, 1.0))\n )\n ))\n\n fixed_joint = urdf.Joint('fixed_backboard',\n urdf.Parent('base_link'),\n urdf.Child('back_link'),\n urdf.Origin(xyz=(0, 0, height/2.0 + 0.05), rpy=(0, 0, 0)),\n type='fixed')\n\n self._links.append(base_link)\n self._links.append(back_link)\n self._joints.append(fixed_joint)", "def draw_bonus():\n global bonus_x, bonus_y, bonus, head\n while True: # To prevent bug if bonus will be drawn in the snake head or snake tail\n bonus_x = rnd(3, 50, 1)\n bonus_y = rnd(3, 35, 1)\n breaking = True\n if 20 * bonus_x == canv.coords(head)[2] and 20 * bonus_y == canv.coords(head)[3]:\n breaking = False\n for i in range(len(tail)):\n if tail[i][0] != \"\":\n if 20 * bonus_x == canv.coords(tail[i][0])[2] and 20 * bonus_y == canv.coords(tail[i][0])[3]:\n breaking = False\n if breaking:\n break\n bonus = canv.create_oval(20 * bonus_x - 15, 20 * bonus_y - 15, 20 * bonus_x - 5, 20 * bonus_y - 5, fill=\"Red\")", "def _draw_head(self):\n self.head_rect.x = self.pos_x_idx * self.snake_game.settings.case_width + 2\n self.head_rect.y = self.pos_y_idx * self.snake_game.settings.case_width + 2\n\n if self.direction in ('stop', 'up'):\n self.snake_game.window.blit(self.head_up, self.head_rect)\n elif self.direction == 'right':\n self.snake_game.window.blit(self.head_right, self.head_rect)\n elif self.direction == 'left':\n self.snake_game.window.blit(self.head_left, self.head_rect)\n elif self.direction == 'down':\n self.snake_game.window.blit(self.head_down, self.head_rect)", "def draw_sample(self, t=None):\n raise NotImplementedError(\"This method draw_sample(t) has to be implemented in the class inheriting from Arm.\")", "def bite():\n winsound.PlaySound('bite2.wav', winsound.SND_ASYNC)\n win.tracer(0)\n food.goto(random.randint(-260, 260), random.randint(-260, 260))\n new_tail = turtle.Turtle()\n new_tail.color(SNAKE_COLOR)\n new_tail.shape('circle')\n new_tail.speed(0)\n new_tail.pu()\n if segments:\n segments[-1].shape('square')\n new_tail.goto(segments[-1].xcor(), segments[-1].ycor())\n else:\n new_tail.goto(head.xcor(), head.ycor())\n segments.append(new_tail)\n win.tracer(1)", "def draw_snake():\n\n clear_buffer()\n square = HEAD\n\n while square is not None:\n SCREEN_BUFFER[square.x][square.y] = 2\n square = square.behind\n\n update_screen()", "def OnDraw(self):\n self.SetCurrent()\n \n glClear(GL_COLOR_BUFFER_BIT)\n \n if self.arena != None:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = self.arena.GetColor()\n glColor3f(red, green, blue)\n for lines in self.arena.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n \n \n glEnd()\n \n \n for pillar in self.pillar:\n glBegin(GL_LINE_LOOP)\n [red, green, blue] = pillar.GetColor()\n glColor3f(red, green, blue)\n for lines in pillar.GetLines():\n [point1x, point1y] = lines.GetPosition(0)\n [point2x, point2y] = lines.GetPosition(1)\n glVertex2f(point1x, point1y)\n glVertex2f(point2x, point2y)\n glEnd()\n\n\n#\t if self.temppoint != []:\n#\t \t glBegin(GL_POINTS)\n#\t \t glVertex2f(self.temppoint[0][0], self.temppoint[0][1])\n# glEnd()\n\t\n #Currentray is the ray where we have to worry about animation and changes.\n if self.currentray is not None: \n glBegin(GL_LINES)\n [red, green, blue] = self.currentray.GetColor()\n glColor3f(red, green, blue)\n\t\n [x, y] = [self.currentray.GetPoint().GetPosition(0), self.currentray.GetPoint().GetPosition(1)]\n glVertex2f(x, y)\n \n \n [x, y] = self.currentray.GetEndPoint(self.t)\n \n glVertex2f(x, y)\n\t\n glEnd()\n \n #These rays are static, since they have come to a stop at their points of collision.\n for i in self.ray:\n glBegin(GL_LINES)\n [red, green, blue] = i.GetColor()\n glColor3f(red, green, blue)\n \n [x, y] = [i.GetPoint().GetPosition(0), i.GetPoint().GetPosition(1)]\n glVertex(x, y)\n \n [x, y] = i.GetEndPoint(i.finaltime)\n glVertex2f(x, y)\n glEnd()\n\t\t\t\n \n self.SwapBuffers()\n \n return", "def skeleton_image(folder, image_file, threshold=50, area_thresh=50, figsize=(10, 10), show=False):\n # Median filtered image.\n fname = '{}/{}'.format(folder, image_file)\n image0 = sio.imread(fname)\n image0 = np.ceil(255* (image0[:, :, 1] / image0[:, :, 1].max())).astype(int)\n image0 = skimage.filters.median(image0)\n filt = 'filt_{}.png'.format(image_file.split('.')[0])\n sio.imsave(folder+'/'+filt, image0)\n\n #threshold the image\n binary0 = binary_image(folder, filt, threshold=threshold, close=True, show=False)\n clean = 'clean_{}'.format(filt)\n\n #label image\n short_image, props = label_image(folder, clean, area_thresh=area_thresh, show=False)\n short = 'short_{}'.format(clean)\n short_image = short_image > 1\n # Skeletonize\n skeleton0 = skeletonize(short_image)\n\n branch_data = csr.summarise(skeleton0)\n branch_data_short = branch_data\n\n #Remove small branches\n mglia = branch_data['skeleton-id'].max()\n nbranches = []\n\n ncount = 0\n for i in range(1, mglia+1):\n bcount = branch_data[branch_data['skeleton-id']==i]['skeleton-id'].count()\n if bcount > 0:\n ids = branch_data.index[branch_data['skeleton-id']==i].tolist()\n nbranches.append(bcount)\n for j in range(0, len(ids)):\n branch_data_short.drop([ids[j]])\n\n ncount = ncount + 1\n if show:\n fig, ax = plt.subplots(figsize=(10, 10))\n draw.overlay_euclidean_skeleton_2d(image0, branch_data_short,\n skeleton_color_source='branch-type', axes=ax)\n plt.savefig('{}/skel_{}'.format(folder, short))\n\n return skeleton0, branch_data_short, nbranches, short_image, props", "def draw(self, window):\n # usar sprite para desenhar drone\n pygame.draw.circle(window, BLUE, RATIO * self.location, radius=SIZE_DRONE, width=20)\n pygame.draw.circle(window, BLACK, RATIO * self.location, radius=RATIO * AVOID_DISTANCE, width=1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a name this will resolve the full list of actions, in the correct order, and return a list of names
def resolve(cls, name, seen=None): action = cls.get(name) resolved = deque() if seen is None: seen = [] elif name in seen: return [] seen.append(name) def find_in_instances(find_name, attr): """Closure to find the current name in our instances based on the named attr.""" return [ other_name for other_name, other_action in six.iteritems(cls._instances) if find_name == getattr(other_action, attr) ] return found_names # find all instances where we are listed in an action's 'before' for action_name in find_in_instances(name, 'before'): for resolved_name in cls.resolve(action_name, seen=seen): resolved.append(resolved_name) # add this action resolved.append(name) # now add all instances where we are listed in an action's 'after' for action_name in find_in_instances(name, 'after'): for resolved_name in cls.resolve(action_name, seen=seen): resolved.append(resolved_name) return resolved
[ "def get_actions_names(self, name):\n actions = []\n resp_rule = self.get(name)\n resp_actions = resp_rule[\"Actions\"] \n if isinstance(resp_actions, list):\n for resp_action in resp_actions:\n actions.append(resp_action[\"value\"])\n else:\n actions.append(resp_actions[\"value\"])\n \n return actions", "def resolve(action_name, actions=None):\n actionnames = actions or get_server_actions()\n return actionnames.get(action_name)", "def expand_actions(self, actions):\n results = list()\n\n for action in actions:\n if action in self.aliased_actions:\n results.append(action)\n for item in self.expand_actions(self.aliased_actions[action]):\n results.append(item)\n else:\n results.append(action)\n\n return results", "def get_actions(module, name):\r\n\r\n return getattr(module, name)", "def get_action_handlers(name):\n from .cache import _action_handlers\n logger.debug(\"Available action handlers: %s\", _action_handlers)\n return _action_handlers.get(name)", "def _expand_wildcard_action(actions):\n if isinstance(actions, str):\n # Bail early if we have a string with no wildcard\n if \"*\" not in actions:\n return [actions.lower()]\n actions = [actions]\n\n # Map _expand function to action list, resulting in a list of lists of expanded actions.\n temp = map(_expand, actions)\n\n # This flattens the list of lists. It's hard to read, but it's a hot path and the optimization\n # speeds it up by 90% or more.\n expanded = [item.lower() for sublist in temp for item in sublist]\n\n return expanded", "def parse(self, actions: List[str]) -> List[Action]:\n\n list_action = []\n\n for action in actions:\n if action == \"d-\":\n list_action.append(Action.DAY_EARLIER)\n elif action == \"d+\":\n list_action.append(Action.DAY_LATER)\n elif action == \"t-\":\n list_action.append(Action.TIME_EARLIER)\n elif action == \"t+\":\n list_action.append(Action.TIME_LATER)\n\n return list_action", "def get_name_from_actions(regex, actions: list):\n for action in actions:\n assert 1 == len(action)\n lines = action[list(action.keys())[0]]\n for line in lines:\n if isinstance(line, str):\n m = regex.match(line)\n if m:\n groupdict = m.groupdict()\n if 'NAME' in groupdict:\n return groupdict['NAME']\n else:\n assert isinstance(line, dict), \"Bad entry in action list\"\n return None", "def _get_slotnames_from_actionname(self, action_name):\n return action_name.split('#')[1:]", "def get_verbs_from_name(name):\n return [word for word in split_snake_case_name_to_words(name) if is_verb(word)]", "def check_action_name():", "def list_actions_for_rule(self, ruleresourcename: str, count: int = None, filter: str = None, offset: int = None, orderby: List[str] = None, query_params: Dict[str, object] = None) -> List[Action]:\n if query_params is None:\n query_params = {}\n if count is not None:\n query_params['count'] = count\n if filter is not None:\n query_params['filter'] = filter\n if offset is not None:\n query_params['offset'] = offset\n if orderby is not None:\n query_params['orderby'] = orderby\n\n path_params = {\n \"ruleresourcename\": ruleresourcename,\n }\n\n path = Template(\"/catalog/v2alpha2/rules/${ruleresourcename}/actions\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Action)", "def expand(self, names):\n\n names = OptimizeTask.expand(self, names)\n\n newnames = []\n for name in names:\n if name in ['fcc', 'bcc', 'hcp', 'diamond']:\n for Z in range(1, 95):\n x = reference_states[Z]\n if x is not None and x['symmetry'] == name:\n newnames.append(chemical_symbols[Z])\n else:\n newnames.append(name)\n\n return newnames", "def _compl_flow_rule_action(self, tokens):\n candidates = []\n\n if (tokens[self._flow_rule_token_index - 1] == \"end\"):\n candidates = [\"actions\"]\n self._flow_rule_token_index += 1\n\n while self._flow_rule_token_index < len(tokens):\n token = tokens[self._flow_rule_token_index - 1]\n\n if token in self.ACT_COMPL_CLASSES.keys():\n try:\n action_cls = self.ACT_COMPL_CLASSES[token]\n action_instance = action_cls()\n\n candidates, index = action_instance.compl_action(\n tokens, self._flow_rule_token_index)\n\n self._flow_rule_token_index = index\n if self._flow_rule_token_index == len(tokens):\n break\n\n if (tokens[self._flow_rule_token_index - 1] == \"/\"):\n # Type candidate and end token\n action_list = list(self.ACT_COMPL_CLASSES.keys())\n candidates = copy.deepcopy(action_list)\n candidates.append(\"end\")\n\n except Exception as _:\n candidates = []\n\n elif (tokens[self._flow_rule_token_index - 1] == \"end\"):\n candidates = []\n break\n\n else:\n # Type candidate and end token\n action_list = list(self.ACT_COMPL_CLASSES.keys())\n candidates = copy.deepcopy(action_list)\n candidates.append(\"end\")\n\n self._flow_rule_token_index += 1\n\n return candidates", "def action_for_all(self, name, **kwargs):\n\n for k in self._manager.keys():\n self._manager[k].action(name, **kwargs)", "def _available_actions(self, pkgs):\n actions = []\n\n for pkg in pkgs:\n if isinstance(pkg, str):\n if pkg not in sys.modules:\n try:\n __import__(pkg)\n except ImportError:\n raise RuntimeError(\"While collecting robot actions, I encountered an unknown module <%s>!\" % pkg)\n\n path = sys.modules[pkg].__path__\n for loader, module_name, is_pkg in pkgutil.walk_packages(path):\n __import__(pkg + \".\" + module_name)\n m = sys.modules[pkg + \".\" + module_name]\n for member in [getattr(m, fn) for fn in dir(m)]:\n if hasattr(member, \"_action\"):\n actions.append(member)\n else:\n # we assume a list of methods has been passed \n if hasattr(pkg, \"_action\"):\n actions.append(pkg)\n\n return actions", "def applicable_actions(self,state):\n res = []\n for a,action in self.actions.iteritems():\n args = action.applicable_args(state)\n for arg in args:\n res.append((a,arg))\n return res", "def getActors(self, name, accuracy=0.8):\n if type(name) is not str:\n raise InvalidInput(\n \"You have entered an invalid name. Please try again.\")\n if not self.__authorized:\n self._authorize()\n id = self._getShowID(name, accuracy)\n if id == -1:\n raise InvalidShowID(\"Show was not found, please try again\")\n return self._getActors(id)", "def extractActionSequence(model, actions):\n \"*** YOUR CODE HERE ***\"\n plan = []\n for key in model:\n syminfo = logic.PropSymbolExpr.parseExpr(key)\n if model[key] and isinstance(syminfo, tuple) and isinstance(syminfo[1], str) and syminfo[0] in actions:\n plan.append(syminfo)\n return [action for action, time in sorted(plan, key=lambda pair: int(pair[1]))]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load any class object stored with Pickle. Create and store a new instance in case it doesn't exist.
def load_pickle_object(filename, class_name, class_args): try: with open(filename, 'rb') as f: loaded_object = pickle.load(f) # except (OSError, IOError) as e: except Exception as e: loaded_object = class_name(*class_args) with open(filename, 'wb') as f: pickle.dump(loaded_object, f) return(loaded_object)
[ "def load_obj(path):\n with open(path, \"rb\") as f:\n return pickle.load(f)", "def load_obj(name):\r\n with open(name + '.pkl', 'rb') as f:\r\n return pickle.load(f)", "def pickle_main(f_name, pickle_source, do_pickle, instance = None):\n \n if do_pickle and instance is not None:\n \n \"if given an instance. save it as a class dictionary pickle\"\n print(f\"Pickling file to {f_name}\") \n pickler(instance.__dict__, pickle_source, f_name)\n return\n \n else:\n file = depickler(pickle_source, f_name)\n print(f\"Loading pickle {f_name}\")\n \"try loading the specified file as a class dict. else an instance.\"\n if type(file) == dict:\n \"removes old ukf function in memory\"\n \n \n instance = class_dict_to_instance(file)\n else: \n instance = file\n \n return instance", "def load(path):\n with open(path, 'rb') as pickle_file:\n obj = pickle.load(pickle_file)\n if not isinstance(obj, Modeler):\n raise ValueError('Serialized object is not a Modeler instance')\n return obj", "def load_obj(load_dir):\r\n return pickle.load(open(load_dir, 'rb'))", "def load(cls, infile):\n import pickle\n from .io import open_file\n # instantiate a new Processor and return it\n with open_file(infile, 'rb') as f:\n # Python 2 and 3 behave differently\n try:\n # Python 3\n obj = pickle.load(f, encoding='latin1')\n except TypeError:\n # Python 2 doesn't have/need the encoding\n obj = pickle.load(f)\n # warn if the unpickled Processor is of other type\n if obj.__class__ is not cls:\n import warnings\n warnings.warn(\"Expected Processor of class '%s' but loaded \"\n \"Processor is of class '%s', processing anyways.\" %\n (cls.__name__, obj.__class__.__name__))\n return obj", "def load(self, *args):\r\n object = self.createObject(*args)\r\n self.fillObject(object)\r\n if self._loader is not None :\r\n self._loader( object )\r\n return object", "def load_object(self, obj):\n pass", "def _make_class_unpicklable(obj):\n def _break_on_call_reduce(self, proto):\n raise TypeError('%r cannot be pickled' % self)\n if isinstance(obj, dict):\n obj['__reduce_ex__'] = _break_on_call_reduce\n obj['__module__'] = '<unknown>'\n else:\n setattr(obj, '__reduce_ex__', _break_on_call_reduce)\n setattr(obj, '__module__', '<unknown>')", "def load(cls):\r\n\r\n try:\r\n return cls.objects.get()\r\n except cls.DoesNotExist: # pragma: no cover\r\n return cls()", "def load_object_from(source_path):\n abs_source_path = os.path.abspath(source_path)\n f = open(abs_source_path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj", "def load_loader(py_obj):\n\n # Obtain the MRO of this object\n mro_list = get_mro_list(py_obj)\n\n # Loop over the entire mro_list\n for mro_item in mro_list:\n # Check if mro_item can be found in types_dict and return if so\n if mro_item in types_dict:\n return\n\n # Obtain the package name of mro_item\n pkg_name = mro_item.__module__.split('.')[0]\n\n # Obtain the name of the associated loader\n loader_name = 'hickle.loaders.load_%s' % (pkg_name)\n\n # Check if this module is already loaded, and return if so\n if loader_name in loaded_loaders:\n return\n\n # Try to load a loader with this name\n try:\n loader = import_module(loader_name)\n # If any module is not found, catch error and check it\n except ImportError as error:\n # Check if the error was due to a package in loader not being found\n if 'hickle' not in error.args[0]: # pragma: no cover\n # If so, reraise the error\n raise\n # If such a loader does exist, register classes and return\n else:\n list(starmap(register_class, loader.class_register))\n list(map(register_class_exclude, loader.exclude_register))\n loaded_loaders.append(loader_name)\n return", "def unpickle_this(path):\t\t\t\t\t#Deprecated in final version: too slow\n\tprint \"Unpickling \"+str(path)+\"...\",\n\tthis_obj = pickle.load(open('pickles/'+str(path)+'.pickle'))\n\tprint \"done\"\n\treturn this_obj", "def load(self, filename):\n raise NotImplementedError(\"Loading from pickled files is not yet supported.\")", "def load(self, serialized):\n self.m = pickle.load(serialized)", "def unpickle_obj(filename: str) -> Any:\n with open(filename, 'rb') as f:\n return pickle.load(f)", "def importer(filename):\n start_time = time.time()\n\n # Check the file directory exists\n file_directory = os.path.dirname(os.path.abspath(filename))\n if not os.path.isdir(file_directory):\n raise ValueError(\"The file directory: \\\"\" + file_directory + \"\\\" does not appear to exist.\")\n\n logging.info(\"Beginning pickle IMPORT of file: \\\"\" + filename + \"\\\"\")\n # import from the pickle file.\n f = open(filename, 'r')\n obj = pickle.load(f)\n f.close()\n\n logging.info(\"Completed pickle IMPORT of file: \\\"\" + filename + \"\\\"\")\n print_elapsed_time(start_time, \"pickle IMPORT of file: \\\"\" + filename + \"\\\"\")\n return obj", "def load(cls, id: str) -> BaseConverter:\n obj = cache.get(id)\n if obj:\n return pickle.loads(obj)\n raise KeyError(f\"Converter with id {id} not found in cache\")", "def load_objects(saved_path):\n objects = None\n if os.path.exists(saved_path):\n with open(saved_path, 'rb') as file:\n objects = pickle.load(file)\n return objects" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Note This function requires the UGCNormal normalizer to be on the parent directory.
def normalize_corpus(input_folder, output_folder): subprocess.call(['../UGCNormal/ugc_norm.sh', input_folder, output_folder])
[ "def _normalise_path(self, path: str) -> str:\n return os.path.normpath(os.path.normcase(path))", "def preproc(indir, outdir):\n subdirs = glob.glob(indir + '/*')\n for i,subdir in enumerate(subdirs):\n print \"Preprocessing directory {} of {}\".format(i+1, len(subdirs))\n files = glob.glob(subdir + '/*')\n for text_file in files:\n path = dirname(text_file)\n prefix = basename(path)\n with open(text_file) as f:\n out_name = pjoin(outdir, prefix+basename(text_file))\n with open(out_name, 'w+') as out:\n for line in f:\n for sent in sent_tokenize(line.decode('utf-8').strip()):\n out.write(\n strip_punct(sent).lower().encode('utf-8')+'\\n')", "def test_read_input_does_not_crash_diff_dir(self):\n ugen.read_input(\"../input/file1\")", "def _normalize_filename(self, filename, basedir):\n if filename.startswith('/'):\n return filename\n\n return os.path.join(basedir, filename).replace('\\\\', '/')", "def test_read_input_does_not_crash_same_dir(self):\n ugen.read_input(\"./file1\")", "def normalize_path( some_path ):\r\n return os.path.normcase( os.path.normpath( some_path ) )", "def compute_normalized_volumes(self, db, prep, exp_out, mode):\n \"\"\"\n Arguments:\n db: DatabaseBRATS object\n prep: PreprocessorBRATS object\n exp_out: path to the experiment meta data output\n mode: train, valid or test database\n \"\"\"\n if mode == 'train':\n data_dict = db.train_dict\n elif mode == 'valid':\n data_dict = db.valid_dict\n elif mode == 'test':\n data_dict = db.test_dict\n\n db.norm_volumes_dir = os.path.join(exp_out,\n 'normalized_volumes', mode)\n if not os.path.exists(os.path.join(db.norm_volumes_dir, 'done')):\n n_subjects = len(data_dict)\n if not os.path.exists(db.norm_volumes_dir):\n os.makedirs(db.norm_volumes_dir)\n for s_idx, s in enumerate(data_dict):\n self._normalize_volumes(data_dict[s], db, prep)\n sys.stdout.write(\"\\rComputing and saving normalized volumes: \"\n \"%.3f %% / 100 %%\" %\n (100 * float(s_idx + 1) / n_subjects))\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n\n with open(os.path.join(db.norm_volumes_dir, 'done'), 'w') as f:\n f.close()\n else:\n print \"Volumes already normalized\"", "def _flattenAllLevels(self, source):\n\n for root, directories, files in os.walk(source):\n for file in files:\n filePath = os.path.join(root, file)\n destination = os.path.join(source, file)\n if filePath != destination:\n shutil.move(filePath, destination)\n\n for directory in os.listdir(source):\n if os.path.isdir(os.path.join(source, directory)):\n shutil.rmtree(os.path.join(source,directory))", "def Normalize( script_filename_or_dir,\r\n output_stream=sys.stdout,\r\n ):\r\n\r\n with StreamDecorator(output_stream).DoneManager( line_prefix='',\r\n prefix=\"\\nResults: \",\r\n suffix='\\n',\r\n ) as dm:\r\n if os.path.isfile(script_filename_or_dir):\r\n script_filenames = [ script_filename_or_dir, ]\r\n elif os.path.isdir(script_filename_or_dir):\r\n script_filenames = list(FileSystem.WalkFiles(script_filename_or_dir, recurse=False))\r\n else:\r\n assert False\r\n\r\n for index, script_filename in enumerate(script_filenames):\r\n nonlocals = CommonEnvironment.Nonlocals(result=None)\r\n\r\n dm.stream.write(\"Processing '{}' ({} of {})...\".format( script_filename,\r\n index + 1,\r\n len(script_filenames),\r\n ))\r\n with dm.stream.DoneManager( done_suffix=lambda: PythonActivationActivity.NormalizeScriptResultStrings[nonlocals.result.value],\r\n ):\r\n nonlocals.result = PythonActivationActivity.NormalizeScript(script_filename)\r\n\r\n return dm.result", "def test_normalize():\n normalize = preprocess.Normalize()\n tensor = (np.random.rand(100, 10) - 0.5) * 100\n\n normed = normalize(tensor)\n assert np.allclose(normed.mean(), 0.0)\n assert np.allclose(normed.std(), 1.0)", "def test_abnormal_dir_omitted_synthetic(self, make_data_module):\n data_module = make_data_module(dataset=\"folder\", test_split_mode=\"synthetic\", abnormal_dir=None)\n # check if we can retrieve a sample from every subset\n next(iter(data_module.train_dataloader()))\n next(iter(data_module.test_dataloader()))\n next(iter(data_module.val_dataloader()))\n # the test set should contain anomalous samples, which have been converted from normals\n assert data_module.test_data.has_anomalous", "def _localNormalizeData(self,values,names,feat):\n if not self.externalNorm:\n self.muAndSigmaFeatures[feat] = (0.0,1.0)\n else:\n super()._localNormalizeData(values,names,feat)", "def test_c_normalizer(self):\n #3' shuffling\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000088.3:c.589_600inv\"))),\n \"NM_000088.3:c.590_599inv\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.31del\"))),\n \"NM_001166478.1:c.35delT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.35_36insT\"))),\n \"NM_001166478.1:c.35dupT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.36_37insTC\"))),\n \"NM_001166478.1:c.36_37dupCT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.35_36dup\"))),\n \"NM_001166478.1:c.36_37dupCT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.2_7delinsTTTAGA\"))),\n \"NM_001166478.1:c.3_4delGAinsTT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.30_31insT\"))),\n \"NM_001166478.1:c.35dupT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.59delG\"))),\n \"NM_001166478.1:c.61delG\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.36_37insTCTCTC\"))),\n \"NM_001166478.1:c.37_38insCTCTCT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.14_15insT\"))),\n \"NM_000051.3:c.15dupT\")\n\n #5' shuffling\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000088.3:c.589_600inv\"))),\n \"NM_000088.3:c.590_599inv\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.34del\"))),\n \"NM_001166478.1:c.31delT\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.35_36insT\"))),\n \"NM_001166478.1:c.31dupT\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.36_37insTC\"))),\n \"NM_001166478.1:c.35_36dupTC\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.35_36dup\"))),\n \"NM_001166478.1:c.35_36dupTC\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.2_7delinsTTTAGA\"))),\n \"NM_001166478.1:c.3_4delGAinsTT\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.30_31insT\"))),\n \"NM_001166478.1:c.31dupT\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.61delG\"))),\n \"NM_001166478.1:c.59delG\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.36_37insTCTCTC\"))),\n \"NM_001166478.1:c.34_35insTCTCTC\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.14_15insT\"))),\n \"NM_000051.3:c.14dupT\")\n\n #Around exon-intron boundary\n self.assertEqual(str(self.normc.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.59delG\"))),\n \"NM_001166478.1:c.60delG\")\n self.assertEqual(str(self.norm5c.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.61delG\"))),\n \"NM_001166478.1:c.61delG\")\n self.assertEqual(str(self.norm5c.normalize(self.hp.parse_hgvs_variant(\"NM_001110792.1:c.1030_1035del\"))),\n \"NM_001110792.1:c.1029_1034delGAGCGG\")\n with self.assertRaises(HGVSUnsupportedOperationError):\n self.normc.normalize(self.hp.parse_hgvs_variant(\"NM_001166478.1:c.59_61del\"))\n\n #UTR variants\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.-5_-4insA\"))),\n \"NM_000051.3:c.-3dupA\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.-4_-3insAC\"))),\n \"NM_000051.3:c.-3_-2dupAC\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.-2_-1insCA\"))),\n \"NM_000051.3:c.-1_1dupCA\")\n\n self.assertEqual(str(self.normc.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.-2_-1insCA\"))),\n \"NM_000051.3:c.-1_1insAC\")\n\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.-4_-3insA\"))),\n \"NM_000051.3:c.-4dupA\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.1_2insCA\"))),\n \"NM_000051.3:c.-1_1dupCA\")\n\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.*2_*3insT\"))),\n \"NM_000051.3:c.*4dupT\")\n self.assertEqual(str(self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.9170_9171insAT\"))),\n \"NM_000051.3:c.9171_*1dupAT\")\n\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.*4_*5insT\"))),\n \"NM_000051.3:c.*3dupT\")\n self.assertEqual(str(self.norm5.normalize(self.hp.parse_hgvs_variant(\"NM_000051.3:c.9171_*1insA\"))),\n \"NM_000051.3:c.9171dupA\")\n\n with self.assertRaises(HGVSInvalidVariantError):\n self.norm.normalize(self.hp.parse_hgvs_variant(\"NM_000059.3:c.7790delAAG\"))", "def test_read_input_does_not_crash_same_dir_filename(self):\n ugen.read_input(\"file1\")", "def test_normalization(self):\n model = NormalizationTestModel()\n model.qconfig = torch.ao.quantization.get_default_qconfig('fbgemm')\n prepare(model, inplace=True)\n self.checkObservers(model)\n test_only_eval_fn(model, self.calib_data)\n model = convert(model)\n\n def checkQuantized(model):\n self.checkNoPrepModules(model.layer_norm)\n self.checkNoPrepModules(model.group_norm)\n self.checkNoPrepModules(model.instance_norm1d)\n self.checkNoPrepModules(model.instance_norm2d)\n self.checkNoPrepModules(model.instance_norm3d)\n self.assertEqual(type(model.layer_norm), nnq.LayerNorm)\n self.assertEqual(type(model.group_norm), nnq.GroupNorm)\n self.assertEqual(type(model.instance_norm1d), nnq.InstanceNorm1d)\n self.assertEqual(type(model.instance_norm2d), nnq.InstanceNorm2d)\n self.assertEqual(type(model.instance_norm3d), nnq.InstanceNorm3d)\n test_only_eval_fn(model, self.calib_data)\n self.checkScriptable(model, self.calib_data)\n self.checkNoQconfig(model)\n\n checkQuantized(model)\n\n model_oneline = quantize(\n NormalizationTestModel(), test_only_eval_fn, [self.calib_data])\n checkQuantized(model)", "def _flattenOneLevel(self, source):\n\n for directory in os.listdir(source):\n currentFolder = os.path.join(source, directory)\n if os.path.isdir(currentFolder) and os.path.basename(currentFolder) != \"Text\":\n for file in os.listdir(currentFolder):\n shutil.move(os.path.join(currentFolder, file), os.path.join(source, file))\n\n try:\n shutil.rmtree(currentFolder)\n except OSError:\n print \"Error: Unable to remove path: \" + os.path.abspath(path)", "def test_e_regularization_structure(self):\n self.family.clean_tree()\n self.family.generate_tree(thermo_database=self.thermoDatabase, rxns=self.treerxns)\n self.family.check_tree()\n self.family.regularize(thermo_database=self.thermoDatabase, rxns=self.treerxns)\n self.family.check_tree()", "def normalize(self):\n self._pdf / self.norm", "def test_normalize_if():\n hops = HOPS(\n sys_param,\n noise_param=noise_param,\n hierarchy_param=hier_param,\n eom_param=eom_param,\n integration_param=integrator_param,\n )\n hops.initialize([2, 3])\n hops.basis.eom.normalized = True\n norm = hops.normalize([2, 3])\n known_norm = [0.5547002, 0.83205029]\n assert np.allclose(norm, known_norm)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
change the messages into the desired form 1. change the toAddresses value to a list.
def format_messages(messages: list): for message in messages: to_addresses = message.get('toAddresses') if isinstance(to_addresses, str): message['toAddresses'] = argToList(to_addresses) return messages
[ "def sendall_recipient_addresses() -> List[str]:\n return [to_address(0x1234)]", "def _transform_recipients(self):\n # Extract recipients\n addrs = email.utils.getaddresses(self._message.get_all(\"TO\", [])) + \\\n email.utils.getaddresses(self._message.get_all(\"CC\", [])) + \\\n email.utils.getaddresses(self._message.get_all(\"BCC\", []))\n self._recipients = [x[1] for x in addrs]\n self._message.__delitem__(\"bcc\")\n self._sender = self._message[\"from\"]", "def fill_messages(addressant, messages):\n\n check_func = dict.get(addressant.name, None)\n\n if not check_func:\n print(NOT_FOUND_ADDRESSANT_MESSAGE)\n return\n\n for message in messages:\n if check_func(message.strip('\\n')): # set message without '\\n' in end\n addressant.messages.append(message)", "def test_msg_to_list_of_string(self):\n self.json_message[\"msg_to\"] = [\"01b51fcc-ed43-4cdb-ad1c-450f9986859b\"]\n with self.app.app_context():\n g.user = User(self.json_message[\"msg_from\"], \"respondent\")\n try:\n MessageSchema().load(self.json_message)\n except ValidationError:\n self.fail(\"Schema should've been correct and not thrown an error\")", "def update_mailing_list(new_list):\n sympa_mgmt = get_sympa()\n member_list = map(lambda x: x + \"@ust.hk\", new_list)\n sympa_mgmt.replace_email(app.config['MAILING_LIST'], member_list)", "def get_to_addresses(self):\n addresses = self.get_all_addr_header('Resent-To')\n addresses.extend(self.get_all_addr_header('Resent-Cc'))\n if addresses:\n for address in addresses:\n yield address\n else:\n for key in TO_HEADERS:\n for address in self.get_all_addr_header(key):\n yield address", "def forwarding_address_list_command() -> CommandResults:\n args = demisto.args()\n user_id = args.get('user_id')\n limit = int(args.get('limit', '50'))\n result = forwarding_address_list(user_id)\n context = result.get('forwardingAddresses')\n context = context[:limit] if context else []\n for msg in context:\n msg['userId'] = user_id\n headers = ['forwardingEmail', 'verificationStatus']\n return CommandResults(\n raw_response=result,\n outputs=context,\n readable_output=tableToMarkdown(f'Forwarding addresses list for: \"{user_id}\"', context, headers, removeNull=True),\n outputs_prefix='Gmail.ForwardingAddress',\n outputs_key_field=['forwardingEmail', 'userId']\n )", "def forwarding_address_update_command() -> list[CommandResults]:\n args = demisto.args()\n forwarding_email_list = argToList(args.get('forwarding_email'))\n user_id = args.get('user_id')\n disposition = args.get('disposition')\n headers = {\n 'success': ['forwardingEmail', 'userId', 'disposition', 'enabled'],\n 'failure': ['forwardingEmail', 'errorMessage']\n }\n outputs_list_success = []\n outputs_list_failure = []\n results = []\n for forwarding_email in forwarding_email_list:\n result_forwarding_update, is_exception, error_details = forwarding_address_update(user_id, disposition, forwarding_email)\n if is_exception:\n outputs_list_failure.append(error_details)\n demisto.debug(error_details)\n else:\n result_forwarding_update['forwardingEmail'] = result_forwarding_update.pop('emailAddress')\n result_forwarding_update['userId'] = user_id\n outputs_list_success.append(result_forwarding_update)\n\n if outputs_list_success:\n results.append(CommandResults(raw_response=outputs_list_success,\n outputs=outputs_list_success,\n readable_output=tableToMarkdown(f'Forwarding addresses update results for \"{user_id}\":',\n outputs_list_success, headers['success'], removeNull=True),\n outputs_prefix='Gmail.ForwardingAddress',\n outputs_key_field=['forwardingEmail', 'userId']))\n if outputs_list_failure:\n results.append(CommandResults(raw_response=outputs_list_failure,\n readable_output=tableToMarkdown(f'Forwarding addresses update errors for \"{user_id}\":',\n outputs_list_failure, headers['failure'], removeNull=True),\n outputs_prefix='Gmail.ForwardingAddress',\n outputs_key_field=['userId', 'forwardingEmail']))\n\n return results", "def reply_to_list(self) -> List[MailAddress]:\n return self._reply_to_list", "def recipient_addrs(self):\n tos = self.msg.get_all('to', [])\n ccs = self.msg.get_all('cc', [])\n ccs = self.msg.get_all('bcc', [])\n resent_tos = self.msg.get_all('resent-to', [])\n resent_ccs = self.msg.get_all('resent-cc', [])\n recipient_addrs = email.utils.getaddresses(tos + bccs + ccs + resent_tos + resent_ccs)\n return [String(a[1]) for a in recipient_addrs]", "def to_address_messenger_type(self, to_address_messenger_type):\n allowed_values = [\"sms\", \"facebook\", \"twitter\", \"line\", \"whatsapp\", \"webmessaging\", \"open\"]\n if to_address_messenger_type.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for to_address_messenger_type -> \" + to_address_messenger_type)\n self._to_address_messenger_type = \"outdated_sdk_version\"\n else:\n self._to_address_messenger_type = to_address_messenger_type", "def addresses(self) -> typing.List[str]:\n return typing.cast(\n typing.List[str],\n self._properties.get(\"addresses\"),\n )", "def test_get_list_unsubscribe_recipients(self):\n pass", "def clean_emailaddrs(self, stoplist):\n mapping_table = {} # username-email mapping table\n contact_table = {} # contact list\n mbox = [mailbox.mbox(each) for each in self.path]\n\n for each_mbox in mbox:\n for each_record in each_mbox:\n self.mails_count += 1\n tmp_contact_set = set()\n tmp_raw_contact_set = set()\n\n if each_record.has_key('From'):\n tmp_mail_from = each_record['From'].split(',')\n # filter auto-sent group emails\n if self.is_auto_sent_group(tmp_mail_from, stoplist):\n continue\n tmp_raw_contact_set.update(tmp_mail_from)\n if each_record.has_key('To'):\n tmp_mail_to = each_record['To'].split(',')\n # filter auto-sent group emails\n if self.is_auto_sent_group(tmp_mail_to, stoplist):\n continue\n tmp_raw_contact_set.update(tmp_mail_to)\n if each_record.has_key('Cc'):\n tmp_raw_contact_set.update(each_record['Cc'].split(','))\n\n for each_raw_contact in tmp_raw_contact_set:\n emailaddr, username = self.parseaddr(each_raw_contact)\n if emailaddr and not self.is_stopword(emailaddr, stoplist):\n tmp_contact_set.update([emailaddr])\n mapping_table[emailaddr] = username\n\n for each_contact in tmp_contact_set:\n try:\n contact_table[each_contact].update(tmp_contact_set)\n except:\n contact_table.update({each_contact:tmp_contact_set})\n contact_table[each_contact].remove(each_contact)\n return [mapping_table, contact_table]", "def msgContacts9(mensaje,contactsToSendTo,diccionarioMaestro ):\n listSplit = contactsToSendTo.split(',')\n verifiedSplit = []\n invalidSplit = []\n iteration1 = 0\n for items in listSplit:\n if listSplit[iteration1] in diccionarioMaestro:\n verifiedSplit.append(listSplit[iteration1])\n else:\n invalidSplit.append(listSplit[iteration1])\n iteration1 = iteration1 + 1\n\n\n \n if len(invalidSplit) != 0:\n print('Contacto(s) → {} ← parece(n) no existir en la lista de contactos'.format(', '.join(invalidSplit)))\n print(\"Porfavor asegúrese de haber escrito el contacto correctamente y que el contacto si exista en el directorio.\")\n\n elif len(invalidSplit) == 0:\n print('To: {}'.format(', '.join(verifiedSplit)),' → ', mensaje)", "def modify_v10_results_to_v9_format(response: List[Dict[Any, Any]]) -> List[Dict[Any, Any]]:\n key_list = ['IPv6AddressRange', 'HostIPv6', 'Network_IPV_6', 'Network_IPV_4',\n 'HostIPv4', 'IPv4AddressRange']\n for record in response:\n for key, value in record.items():\n if key in key_list and value: # find the key that its value is the dict contains the addresses\n address_list: list = []\n my_key = key\n\n # The value of the first (and only) key is a list containing dict with addresses\n addresses = value[ADDRESS_LIST_MAP.get(key)]\n for inner_dict in addresses:\n temp_dict = {}\n for key in inner_dict.keys():\n # choose the relevant keys and values and saves them in a temp dict\n if key == 'value':\n address_list.append(inner_dict[key])\n elif key in ['FromAddress', 'ToAddress']:\n temp_dict[key] = inner_dict[key]\n\n address_list.append(temp_dict) if temp_dict else None\n\n if address_list:\n # replace the list of dicts in the original record with a list of strings containing the addresses\n record[my_key] = {ADDRESS_LIST_MAP.get(my_key): address_list}\n\n return response", "def send_messages(self, messages):\n to_send = []\n for message in messages:\n pre_intercept = message.to\n message.to, message.cc, message.bcc = [self.email], [], []\n log.info('email to {0} intercepted by {1}'.format(\n pre_intercept, message.to))\n to_send.append(message)\n return super(EmailInterceptor, self).send_messages(to_send) or 0", "def test_suppress_empty_address_lists(self):\n self.message.send()\n data = self.get_api_call_data()\n self.assertNotIn(\"cc\", data)\n self.assertNotIn(\"bcc\", data)\n self.assertNotIn(\"h:Reply-To\", data)\n\n # Test empty `to`--but send requires at least one recipient somewhere (like cc)\n self.message.to = []\n self.message.cc = [\"cc@example.com\"]\n self.message.send()\n data = self.get_api_call_data()\n self.assertNotIn(\"to\", data)", "def test_get_notification_addresses(self):\n self.assertEqual(\n sorted([\n 'project@example.com',\n 'worker@example.com',\n 'template@example.com',\n 'job@example.com',\n ]),\n sorted(Job.objects.get(pk=1).get_notification_addresses())\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get messages from a threat and return only the messages that are in the time range
def get_messages_by_datetime(client: Client, threat_id: str, after: str, before: str): messages = [] res = client.get_threat(threat_id) for message in res.get('messages'): # messages are ordered from newest to oldest received_time = message.get('receivedTime') if before >= received_time >= after: messages.append(message) elif received_time < after: break return messages
[ "def filter_time_range(messages, time_keys, start_time_inclusive, end_time_inclusive):\n # De-duplicate time_keys\n assert isinstance(time_keys, set)\n\n log.debug(f\"Filtering out messages sent outside the time range \"\n f\"{start_time_inclusive.isoformat()} to {end_time_inclusive.isoformat()} \"\n f\"for time keys {time_keys}...\")\n\n # Validate the input data to ensure that each message object only contains one of the time_keys.\n for td in messages:\n matching_time_keys = 0\n for time_key in time_keys:\n if time_key in td:\n matching_time_keys += 1\n assert matching_time_keys == 1, matching_time_keys\n\n # Perform the actual filtering\n filtered = []\n for td in messages:\n for time_key in time_keys:\n if time_key in td and start_time_inclusive <= isoparse(td[time_key]) < end_time_inclusive:\n filtered.append(td)\n break\n\n log.info(f\"Filtered out messages sent outside the time range \"\n f\"{start_time_inclusive.isoformat()} to {end_time_inclusive.isoformat()}. \"\n f\"Returning {len(filtered)}/{len(messages)} messages.\")\n\n return filtered", "def getMessages(self, time):\n with self.__lock:\n # Get left index, to retrieve strict equality times\n base_idx = bisect.bisect_left(self.__times, time)\n return [message\n for message in self.__messages[time]\n for time in self.__times[base_idx:]]", "def check_if_each_msg_time_are_in_the_period_of_time_between_st1_and_st2(st1, st2, msg_list):\n import datetime\n if len(msg_list) == 0:\n return 'msg list is zero'\n\n for i in range(len(msg_list)):\n msg = msg_list[i]\n for j in range(len(msg_list[i])):\n temp_list = msg[j]\n if temp_list['time']['date_time'] < st2 and temp_list['time']['date_time'] > st1:\n continue\n else:\n print 'temp_t=',temp_list['time']['date_time']\n return 'failure'\n return 'success'", "def get_campaigns_over_time(self, project, start, end):\n mc_man = MailchimpManager(project.mailchimp_api_token)\n json = mc_man.get_campaigns(start, end, project.mailchimp_list_id)\n result = []\n for item in json.get('data'):\n if(item.get('status') == 'sent'):\n dt = parser.parse(item.get('send_time'))\n result.append({'x':util.unix_time_millis(dt),\n 'title': 'M',\n 'text': u'Mailchimp campaign verstuurd: {}'.format(item.get('title'))})\n # sort the array of dicts by the value of x\n newlist = sorted(result, key=itemgetter('x'))\n return newlist", "async def list(self, ctx):\n message = ctx.message\n found_subs = ''\n found_subs_messages = []\n one_sub_found = False\n for sub in self.scheduler.subscriptions:\n if message.author in sub.users and (not sub.is_private or message.channel.is_private):\n if len(found_subs) + len(sub.tags_to_message()) >= 2000:\n found_subs_messages.append(found_subs)\n found_subs = ''\n found_subs += '\\n`{}`'.format(sub.tags_to_message())\n one_sub_found = True\n if sub.is_private:\n found_subs += ' [private]'\n found_subs_messages.append(found_subs)\n\n if one_sub_found:\n for element in found_subs_messages:\n await self.bot.say(element)\n else:\n await self.bot.reply('You aren\\'t subscribed to any tags')", "def get_messages_matching_query(self, query=''):\n try:\n response = self.service.users().messages().list(\n userId='me', q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = self.service.users().messages().list(\n userId='me',\n q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n return messages\n except errors.HttpError as error:\n raise GMailError(f'An error occurred: {error}')", "def check_if_multi_msg_time_are_in_the_period_of_time_between_st1_and_st2(st1, st2, msg_list):\n import datetime\n if len(msg_list) == 0:\n return 'msg list is zero'\n\n st1 = datetime.datetime.strptime(st1.strip(),'%Y-%m-%d %H:%M:%S.%f')\n st2 = datetime.datetime.strptime(st2.strip(),'%Y-%m-%d %H:%M:%S.%f')\n print 'st1=',st1\n print 'st2=',st2\n \n for i in range(len(msg_list)):\n msg = msg_list[i]\n result = check_if_each_msg_time_are_in_the_period_of_time_between_st1_and_st2(st1, st2, msg)\n if result == 'success':\n continue\n else:\n return 'failure'\n return 'success'", "def split_phish_emails_by_time(self, phish_inbox, train_time_interval):\n train_start_time = train_time_interval[0]\n train_end_time = train_time_interval[1]\n train_indeces = []\n\n for i in range(len(phish_inbox)):\n email = phish_inbox[i]\n email_time = email.get_time()\n if email_time != None and email_time >= train_start_time and email_time < train_end_time:\n train_indeces.append(i)\n return train_indeces", "def filter_tweets_timewindow(self,timepoint_1,timepoint_2):\n filtered_tweets = []\n point_datetime_begin = time_functions.return_datetime(timepoint_1)\n point_datetime_end = time_functions.return_datetime(timepoint_2)\n for instance in self.instances:\n #Get the time of the event mentioned in the tweet \n tweet_datetime = time_functions.return_datetime(instance.date,\n time=instance.time,setting=\"vs\")\n #Extract the time difference between the tweet and the event \n if tweet_datetime > point_datetime_begin and tweet_datetime < point_datetime_end:\n filtered_tweets.append(instance)\n \n self.instances = filtered_tweets", "def check_message_time(msg_buf):\n lines = msg_buf.strip().splitlines()\n for line in lines:\n match = re.search(r\"MONITORING TIME: 2011-03-27\\s*([01][0-9]):.*\", line, re.I)\n if match is not None:\n if match.group(1) == '02' or match.group(1) == '04':\n continue\n else:\n print line\n return 'failure'\n return 'success'", "def query(self, *,\n sender=None,\n senders=None,\n start=datetime.min,\n end=datetime.max):\n if sender and senders:\n raise ValueError(\"`sender` and `senders` cannot be simultaneously specified\")\n\n if sender:\n senders = set([sender])\n\n def condition(m):\n return (start <= m.time < end) and (not senders or m.sender in senders)\n\n return [m for m in self.messages if condition(m)]", "def _timed_msg_of_bundle(bundle: osc_bundle.OscBundle, now: float) -> List[TimedMessage]:\n msgs = []\n for content in bundle:\n if type(content) is osc_message.OscMessage:\n if (bundle.timestamp == osc_types.IMMEDIATELY or bundle.timestamp < now):\n msgs.append(TimedMessage(now, content))\n else:\n msgs.append(TimedMessage(bundle.timestamp, content))\n else:\n msgs.extend(_timed_msg_of_bundle(content, now))\n return msgs", "def get_list_threats(client: Client, after: str, before: str):\n threats = []\n is_next_page = True\n page_number = 1\n while is_next_page:\n params = assign_params(pageSize=1000, filter=f'receivedTime gte {after} lte {before}', pageNumber=page_number)\n res = client.list_threats(params)\n threats += res.get('threats')\n if res.get('nextPageNumber'):\n page_number = res.get('nextPageNumber')\n else:\n is_next_page = False\n return threats", "def request_by_timestamp(reqs, start, end):\n return reqs[(reqs['timestamp'] >= start) & (reqs['timestamp'] <= end)]", "def get_messages():\n mes_tweets = MyTweet.query.all()\n return mes_tweets", "def messages(self) -> List[TimedMessage]:\n return self._messages", "def timed_out(self):\n return [p for p, t in self._unacknowledged.items() \n if t <= self.flow.env.now - self._timeout]", "def _getDaysWithoutMessages(self):\n daysWithMsgs = self.df['date'].drop_duplicates()\n\n datelist = pd.Series(pd.date_range(self.df.iloc[0]['date'], self.df.iloc[-1]['date']))\n datelist = datelist.apply(lambda x:x.date().strftime(Message.DATE_FORMAT))\n # data.index = pd.DatetimeIndex(data.index)\n # data = data.reindex(datelist, fill_value=0)\n\n daysWithoutMsgs = np.setdiff1d(datelist, daysWithMsgs)\n return daysWithoutMsgs", "def search_SENTBEFORE(self, query, id, msg):\n date = msg.getHeaders(False, 'date').get('date', '')\n date = email.utils.parsedate(date)\n return date < parseTime(query.pop(0))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get list of all threats ids in the time range
def get_list_threats(client: Client, after: str, before: str): threats = [] is_next_page = True page_number = 1 while is_next_page: params = assign_params(pageSize=1000, filter=f'receivedTime gte {after} lte {before}', pageNumber=page_number) res = client.list_threats(params) threats += res.get('threats') if res.get('nextPageNumber'): page_number = res.get('nextPageNumber') else: is_next_page = False return threats
[ "def all_timestamps():\n ts = current_timestamp()\n return list(range(ts - history_sec, ts, aggregation_interval_sec))", "def get_ids():", "def get_incidents_since(hours):\n\n def get_since(hours):\n\n now = datetime.utcnow()\n since = now - timedelta(hours=hours)\n\n return since\n\n incidents = do_get_request(\"incidents\", \"incidents\", {\"since\": str(get_since(hours))})\n\n incident_ids = []\n\n for incident in incidents[\"incidents\"]:\n incident_ids.append(str(incident[\"id\"]))\n\n return incident_ids", "def time_ranges(self):\r\n time_range_list = []\r\n for idx in range(self.data_annotation.shape[-1]):\r\n time_range_list.append((self.data_annotation[0, idx] * MCS_TICK, self.data_annotation[1, idx] * MCS_TICK))\r\n return time_range_list", "def split_phish_emails_by_time(self, phish_inbox, train_time_interval):\n train_start_time = train_time_interval[0]\n train_end_time = train_time_interval[1]\n train_indeces = []\n\n for i in range(len(phish_inbox)):\n email = phish_inbox[i]\n email_time = email.get_time()\n if email_time != None and email_time >= train_start_time and email_time < train_end_time:\n train_indeces.append(i)\n return train_indeces", "def getToonIdsAsList(self):\n return self.toonIds", "def gen_time_indices(all_tracks, step_interval):\n start = min([r.index.min() for r in all_tracks])\n end = max([r.index.max() for r in all_tracks])\n # Make some fake dates to get pandas to generate a time range, then strip date component\n dt_range = pd.date_range(\n start='2000/01/01T{}'.format(start),\n end='2000/01/01T{}'.format(end),\n freq=step_interval,\n )\n return [dt.strftime('%H:%M:%S') for dt in dt_range]", "def list_all_ids(self):\n values = []\n list = self.redis_server.hkeys(self.actinia_template_id_db)\n for entry in list:\n entry = entry.decode()\n values.append(entry)\n\n return values", "def list_begin_time(lijst):\n list_1 = []\n for film in lijst['filmsoptv']['film']:\n bewerk = datetime.datetime.fromtimestamp(\n int(film['starttijd'])\n ).strftime('%H:%M:%S')\n list_1.append(bewerk)\n return list_1", "def beats(peak_indices, time):\n logging.info(\"Identifying time of beats in ECG trace\")\n peak_times = list()\n for i in peak_indices:\n peak_time = time[i]\n peak_times.append(peak_time)\n return peak_times", "def ids(self, timestamp:str=None):\n base = \"\"\"\n SELECT id\n FROM raw_datas\n WHERE creation_date LIKE \"{}%\"\n \"\"\"\n base = base.format(timestamp)\n self.execute_query(base)\n id_list = []\n for i in self.last_cursor:\n id_list.append(i[0])\n for i in reversed(self.faulty_index):\n id_list.pop(i)\n return id_list", "def contestant_ids(self):\n return [p.contestant_id for p in [self.player1, self.player2]]", "def playing_trial_ids(self):\n return sorted([t.id for t in self.trials if not t.observing])", "def id_list(self):\n return numpy.array(self.spiketrains.keys(), int)", "def getMetricIds(self):\n metricId = []\n for m in self.session.query(MetricRow).all():\n metricId.append(m.metricId)\n return metricId", "def get_times_from_utterance(utterance: str) -> List[str]:\n pm_times = [int(pm_str.rstrip('pm')) * HOUR_TO_TWENTY_FOUR + TWELVE_TO_TWENTY_FOUR\n for pm_str in re.findall(r'\\d+pm', utterance)]\n am_times = [int(am_str.rstrip('am')) * HOUR_TO_TWENTY_FOUR\n for am_str in re.findall(r\"\\d+\", utterance)]\n oclock_times = [int(oclock_str.rstrip(\"o'clock\")) * HOUR_TO_TWENTY_FOUR\n for oclock_str in re.findall(r\"\\d+\\so'clock\", utterance)]\n oclock_times = oclock_times + [(oclock_time + TWELVE_TO_TWENTY_FOUR) % HOURS_IN_DAY \\\n for oclock_time in oclock_times]\n times = am_times + pm_times + oclock_times\n if 'noon' in utterance:\n times.append(1200)\n\n around_times = []\n if \"around\" in utterance or \"about\" in utterance:\n for time in times:\n around_times.append((time + AROUND_RANGE) % HOURS_IN_DAY)\n around_times.append((time - HOUR_TO_TWENTY_FOUR + AROUND_RANGE) % HOURS_IN_DAY)\n\n times += around_times\n\n return [str(time) for time in times]", "def ineligible_collection_ids(self, cutoff_timestamp):\n result = self._connection.fetch_all_rows(\n _ineligible_query, [cutoff_timestamp, ]\n )\n return [collection_id for (collection_id, ) in result]", "def get_messages_by_datetime(client: Client, threat_id: str, after: str, before: str):\n messages = []\n res = client.get_threat(threat_id)\n for message in res.get('messages'):\n # messages are ordered from newest to oldest\n received_time = message.get('receivedTime')\n if before >= received_time >= after:\n messages.append(message)\n elif received_time < after:\n break\n return messages", "def track_id_list(self) -> List[int]:\n _track_id_list: List[int] = np.unique(self.seq_df[\"TRACK_ID\"].values).tolist()\n return _track_id_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read Object File Format (.off) into Numpy 3D array.
def load_off(filename, size): # create 3D array (cube with edge = size) obj = np.zeros([size, size, size]) # open filename.off with open(filename) as f: # read first line header = f.readline() # returns a string # set properties properties = f.readline().split(" ") # returns a list of chars num_vertices = int(properties[0]) num_faces = int(properties[1]) num_edges = int(properties[2]) print("Properties:", "\nNumber of vertices:", num_vertices, "\nNUmber of faces: ", num_faces, "\nNumber of edges: ", num_edges) # read everything else body = f.readlines() # returns a list of strings if num_vertices != 0: vertices = body[0:num_vertices] else: raise ValueError("No vertex found.") if num_faces != 0: faces = body[num_vertices:num_vertices+num_faces] else: raise ValueError("No face found.") if num_edges != 0: edges = body[num_faces:num_faces+num_edges] # set vertices for i in range(num_vertices): coords = vertices[i].split(" ") if (int(float(coords[0])) < size) and (int(float(coords[1])) < size) and (int(float(coords[2])) < size): obj[int(float(coords[0])), int(float(coords[1])), int(float(coords[2]))] = 1 else: print("Error at vertex", i) return obj
[ "def data_from_fileobj(self, fileobj):\n dtype = self.get_data_dtype()\n shape = self.get_data_shape()\n offset = self.get_data_offset()\n return array_from_file(shape, dtype, fileobj, offset,\n order=self.data_layout)", "def cube_to_array(fname):\n cube_details = {}\n with open(fname, 'r') as cube:\n cube.readline()\n cube.readline() # ignore comments\n natm, cube_details['org'] = _getline(cube)\n nx, cube_details['xvec'] = _getline(cube)\n ny, cube_details['yvec'] = _getline(cube)\n nz, cube_details['zvec'] = _getline(cube)\n cube_details['atoms'] = [_getline(cube) for i in range(natm)]\n data = np.zeros((nx * ny * nz))\n idx = 0\n for line in cube:\n for val in line.strip().split():\n data[idx] = float(val)\n idx += 1\n data = np.reshape(data, (nx, ny, nz))\n cube.close()\n return data, cube_details", "def ReadOBJ(file, device='cpu'):\n\n with open(file) as f:\n lines = f.readlines()\n verts = np.array([list(map(float, line.split()[1:4])) for line in lines if line.startswith('v ')])\n faces = np.array([list(map(int, line.split()[1:4])) for line in lines if line.startswith('f ')])\n # Subtract 1 because the faces are 1 indexed and need to be 0 indexed for python\n f.close()\n faces -= 1\n\n verts = torch.tensor(verts, dtype=torch.float, device=device, requires_grad=False)\n faces = torch.tensor(faces, dtype=torch.long, device=device, requires_grad=False)\n\n return verts, faces", "def load_npz(file, obj, path='', strict=True):\n with numpy.load(file) as f:\n d = NpzDeserializer(f, path=path, strict=strict)\n d.load(obj)", "def read(filename='Omega2'):\n f=open(filename,'r').read()\n i = 0\n filetype, = unpack('<64s',f[i:i+64*strSize]) ; i += 64*strSize\n version, = unpack('<i',f[i:i+intSize]) ; i += intSize\n comment, = unpack('<1024s',f[i:i+1024*strSize]) ; i += 1024*strSize\n D,N_b,N_q = unpack('<3i',f[i:i+3*intSize]) ; i += 3*intSize\n Omega2 = unpack('<%id' % (N_q*N_b*D),f[i:])\n Omega2 = numpy.array(Omega2)\n Omega2.shape = (N_q,N_b*D)\n filetype = filetype.strip( '\\x00' )\n assert filetype == 'Omega2', \"File %s is not in correct format\" % filename\n return (filetype,version,comment.strip('\\x00')),Omega2", "def getNdArray(self):\n ndarray, _ = sep.read_file(self.vecfile)\n return ndarray", "def _read(self, datafile):\n inData = open(datafile, 'r')\n x = []\n y = []\n z = []\n\n # First read till end of header\n inData.readline()\n\n for line in inData:\n columns = line.split()\n x.append(columns[0])\n y.append(columns[1])\n z.append(columns[2])\n\n self.x,self.y,self.z = np.asarray(x), np.asarray(y), np.asarray(z)\n inData.close()", "def load_2D_mnc(self,filename):\n import pyminc.volumes.factory as pyminc\n mncfile=pyminc.volumeFromFile(filename)\n array_data = np.squeeze(np.array(mncfile.data))\n return array_data", "def array_from_c3d(self, file_c3d):\r\n\r\n reader = btk.btkAcquisitionFileReader()\r\n reader.SetFilename(file_c3d)\r\n reader.Update()\r\n acq = reader.GetOutput()\r\n\r\n N = acq.GetPointFrameNumber()\r\n nber_points = acq.GetPointNumber()\r\n\r\n result = []\r\n points_coords = np.array([acq.GetPoint(j).GetValues()\r\n for j in range(nber_points)])\r\n for i in range(N):\r\n result.append(points_coords[:, i])\r\n return np.array(result)", "def unpack(blob):\n # decompress if necessary\n if blob[0:5]=='ZL123':\n blobLen = np.fromstring(blob[6:14],dtype=np.uint64)[0]\n blob = zlib.decompress(blob[14:])\n assert(len(blob)==blobLen)\n\n blobType = blob[4]\n if blobType<>'A':\n raise DataJointError('only arrays are currently allowed in blobs')\n p = 5\n ndims = np.fromstring(blob[p:p+8], dtype=np.uint64)\n p+=8\n arrDims = np.fromstring(blob[p:p+8*ndims], dtype=np.uint64)\n p+=8*ndims\n mxType, dtype = mxClassID.items()[np.fromstring(blob[p:p+4],dtype=np.uint32)[0]]\n if dtype is None:\n raise DataJointError('Unsupported matlab datatype '+mxType+' in blob')\n p+=4\n complexity = np.fromstring(blob[p:p+4],dtype=np.uint32)[0]\n p+=4\n obj = np.fromstring(blob[p:], dtype=dtype)\n if complexity:\n obj = obj[:len(obj)/2] + 1j*obj[len(obj)/2:]\n obj = obj.reshape(arrDims)\n return obj", "def readUBC_DC3Dobs(fileName):\n\n # Load file\n obsfile = np.genfromtxt(fileName, delimiter=\" \\n\", dtype=np.str, comments=\"!\")\n\n # Pre-allocate\n srcLists = []\n Rx = []\n d = []\n wd = []\n # Flag for z value provided\n zflag = True\n poletx = False\n polerx = False\n\n # Countdown for number of obs/tx\n count = 0\n for ii in range(obsfile.shape[0]):\n\n if not obsfile[ii]:\n continue\n\n # First line is transmitter with number of receivers\n if count == 0:\n rx = []\n temp = np.fromstring(obsfile[ii], dtype=float, sep=\" \").T\n count = int(temp[-1])\n # Check if z value is provided, if False -> nan\n if len(temp) == 5:\n # check if pole-dipole\n if np.allclose(temp[0:2], temp[2:4]):\n tx = np.r_[temp[0:2], np.nan]\n poletx = True\n\n else:\n tx = np.r_[temp[0:2], np.nan, temp[2:4], np.nan]\n zflag = False\n\n else:\n # check if pole-dipole\n if np.allclose(temp[0:3], temp[3:6]):\n tx = np.r_[temp[0:3]]\n poletx = True\n temp[2] = -temp[2]\n else:\n # Flip z values\n temp[2] = -temp[2]\n temp[5] = -temp[5]\n tx = temp[:-1]\n\n continue\n\n temp = np.fromstring(obsfile[ii], dtype=float, sep=\" \")\n\n if zflag:\n\n # Check if Pole Receiver\n if np.allclose(temp[0:3], temp[3:6]):\n polerx = True\n # Flip z values\n temp[2] = -temp[2]\n rx.append(temp[:3])\n else:\n temp[2] = -temp[2]\n temp[5] = -temp[5]\n rx.append(temp[:-2])\n\n # Check if there is data with the location\n if len(temp) == 8:\n d.append(temp[-2])\n wd.append(temp[-1])\n\n else:\n # Check if Pole Receiver\n if np.allclose(temp[0:2], temp[2:4]):\n polerx = True\n # Flip z values\n rx.append(temp[:2])\n else:\n rx.append(np.r_[temp[0:2], np.nan, temp[2:4], np.nan])\n\n # Check if there is data with the location\n if len(temp) == 6:\n d.append(temp[-2])\n wd.append(temp[-1])\n\n count = count - 1\n\n # Reach the end of transmitter block\n if count == 0:\n rx = np.asarray(rx)\n if polerx:\n Rx = dc.Rx.Pole(rx[:, :3])\n else:\n Rx = dc.Rx.Dipole(rx[:, :3], rx[:, 3:])\n if poletx:\n srcLists.append(dc.Src.Pole([Rx], tx[:3]))\n else:\n srcLists.append(dc.Src.Dipole([Rx], tx[:3], tx[3:]))\n\n survey = dc.Survey(srcLists)\n data = Data(survey=survey, dobs=np.asarray(d), relative_error=np.asarray(wd))\n return data", "def read_localarray(fp):\n version = read_magic(fp)\n if version != (1, 0):\n msg = \"only support version (1,0) of file format, not %r\"\n raise ValueError(msg % (version,))\n\n __version__, dim_data = read_localarray_header(fp, version=(1, 0))\n\n buf = np.load(fp)\n\n distbuffer = {\n '__version__': __version__,\n 'dim_data': dim_data,\n 'buffer': buf,\n }\n\n return distbuffer", "def read_xyz(self): \n self.__rd_xyz_nmol()\n n_mol = self.dim['n_mol']\n \n filename = self.files['xyz'] \n fpin = open(filename, \"r\")\n \n model = []\n for i in xrange(n_mol):\n # number of atom, \n line = fpin.readline()\n natom = int(line)\n line = fpin.readline()[0:-1]\n molinfo = {'n_atom': natom, 'title':line}\n\n atom = []\n for j in range(natom):\n line = fpin.readline()\n rec = line.split()\n atomname, x, y, z= rec[0:4]\n record = {'name': atomname, 'coord': [float(x),float(y),float(z)]}\n atom.append(record)\n mol = {'info':molinfo, 'atoms':atom}\n model.append(mol)\n fpin.close()\n \n self.model = model\n\n return", "def read_from_phonopy_file(self, filename):\n\n fcfile = open(filename)\n num = int((fcfile.readline().strip().split())[0])\n force_constants = numpy.zeros((num, num, 3, 3), dtype=float)\n for i in range(num):\n for j in range(num):\n fcfile.readline()\n tensor = []\n for k in range(3):\n tensor.append([float(x) for x in fcfile.readline().strip().split()])\n force_constants[i, j] = numpy.array(tensor)\n\n self.set_data(force_constants)", "def readPartRnn(filepath):\n\n with open(filepath, 'rb') as fp:\n #read header\n bytes = fp.read(4*5)\n head = struct.unpack('iiiii', bytes)\n #read in densities\n bytes = fp.read()\n print(head)\n delta = struct.unpack('{0}f'.format(head[1]), bytes[:-4])\n dtype = np.dtype([('delta', float)])\n #delta = np.array(delta[:-1])\n delta = np.array(delta)\n delta.dtype = dtype\n\n return delta", "def load_offset(offset_file):\n offset_file = realpath(offset_file)\n return np.loadtxt(offset_file)", "def extract_array_from_pcd_obj(pcd):\n x = np.array(pcd.pc_data[\"x\"]).reshape(-1, 1)\n y = np.array(pcd.pc_data[\"y\"]).reshape(-1, 1)\n z = np.array(pcd.pc_data[\"z\"]).reshape(-1, 1)\n intensity = np.array(pcd.pc_data[\"intensity\"]).reshape(-1, 1)\n data = np.hstack([x, y, z, intensity])\n return data", "def test_read_data():\n data = glymur.Jp2k(AIA_193_JP2).read()\n assert isinstance(data, np.ndarray)", "def readMOLFile(filename):\n\tf = open(filename, 'r')\n\t#first three lines are irrelevant\n\tf.readline()\n\tf.readline()\n\tf.readline()\n\n\tn = int(f.readline()[0:3])#read the number of atoms\n\t#initialize the atomTypes and atomCoords arrays\n\t#atomCoords = [[0.0 for j in range(3)] for i in range(n)]\n\tatomCoords = [[] for i in range(n)]\n\tatomTypes = [0 for i in range(n)]\n\n\t#read info from the mole file into the arrays\n\tfor i in range(n):\n\t\tsplitLine = f.readline().split()\n\t\tatomCoords[i] = [float(splitLine[0]), float(splitLine[1]), float(splitLine[2])]\n\t\tatomTypes[i] = atomicSymbolToNumber(splitLine[3])\n\t\n\tf.close() #close the file\n\n\treturn MolecularGeometry(atomTypes,atomCoords) #return the MolecularGeometry object" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load all files from a folder into a 4D nupmy array.
def load_folder(folder, size): # create a 4D array with first dimension the number of files num_files = len(os.listdir(folder)) print(folder, "contains", num_files, "objects.") dataset = np.zeros([num_files, size, size, size]) for index, filename in enumerate(os.listdir(folder)): print("\nImporting:", filename) dataset[index, :, :, :] = load_off(folder + filename, size) return dataset
[ "def get_data():\n frames = []\n filenames = []\n for imname in sorted(os.listdir(folder), key=numericalSort):\n if not imname.startswith('.'):\n im = imageio.imread(folder+'/'+imname)\n #im = im[:,180:1100,:]\n im = im[:,275:1000,:]\n im = skimage.transform.resize(im, (imageSize, imageSize, 3))\n img_arr = np.asarray(im)\n img_arr = preprocess_image(img_arr)\n frames.append(img_arr)\n filenames.append(imname)\n frames = np.asarray(frames)\n print('Finished converting frames to nparray')\n return frames, filenames", "def _load_array(self, path_list):\n array_list = []\n for path in path_list:\n array = np.load(path)\n for f in array.files:\n array_list.append(array[f])\n array = np.concatenate(array_list, axis=0)\n\n return array", "def files_to_MVP(directory):\n files = os.listdir(directory)\n body_count = num_bodies(files, directory)\n master = np.zeros((body_count,7,len(files))) ##ROW | COLS | TIME\n for index, file in enumerate(files):\n master[:,:,index] = np.genfromtxt(directory + file, delimiter=',')\n return master", "def load_and_concat(fpath: str, file_identifier: str) -> np.ndarray:\n arrays = []\n files = [f for f in os.listdir(fpath) if f[:len(file_identifier)] == file_identifier]\n file_numbers = np.array([int(f.replace(file_identifier, '').replace('-', '').replace('.npy', '')) for f in files])\n files = [files[i] for i in file_numbers.argsort()]\n\n for file in files:\n arrays.append(np.load(fpath + file, allow_pickle=True))\n\n return np.concatenate(arrays)", "def load_npy(filepath, filenames_list):\n if not os.path.exists(filepath):\n raise InvalidPathError(\"{} does not exist!\".format(filepath))\n data = []\n for i in range(len(filenames_list)):\n data.append(np.load(filepath + '/' + filenames_list[i]))\n return data", "def load_segmentation_maps(path):\n seg_map_dir = os.listdir(path)\n n = len(seg_map_dir)\n seg_map_arr = np.ndarray((n, 1, im_size, im_size), dtype='uint8')\n im_count = 0\n print(\"-\" * 30)\n for image_name in seg_map_dir:\n seg_map = cv2.imread(os.path.join(path, image_name), cv2.IMREAD_GRAYSCALE)\n seg_map = image.im_rescale(seg_map).astype('uint8')\n seg_map = image.resize_w_aspect(seg_map, im_size)\n seg_map_arr[im_count] = seg_map\n im_count += 1\n print(\"Loaded segmentation image number %i\" % im_count)\n return seg_map_arr", "def read_images(img_paths):\n imgs = np.empty([len(img_paths), 160, 320, 3])\n\n for i, path in enumerate(img_paths):\n imgs[i] = imread(path)\n #image = load_img(path, target_size=(160, 320))\n #imgs[i] = img_to_array(image)\n\n return imgs", "def load_groundtruths(folder_path, num_images):\n imgs = []\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n # See if it is better to use dtype = int\n hot_img = convert_image_to_hot(img)\n imgs.append(hot_img)\n else:\n print('File ' + image_path + ' does not exist')\n #imgs = np.around(imgs) # Uncomment if we want to round values.\n imgs_array = np.asarray(imgs)\n print('imgs_array shape:', imgs_array.shape)\n return imgs_array", "def load_data(data_dir):\n images_result = []\n labels_result = []\n\n for dir_name in os.listdir(data_dir):\n path = os.path.join(data_dir, dir_name)\n directory_id = int(dir_name)\n images_list = os.listdir(path)\n\n print(f\"Loading {path} folder...\")\n for image_file in os.listdir(path):\n image_path = os.path.join(path, image_file)\n image_cv = cv2.imread(image_path)\n image_resized = cv2.resize(image_cv, (IMG_WIDTH, IMG_HEIGHT))\n\n images_result.append(image_resized)\n labels_result.append(directory_id)\n\n return (images_result, labels_result)", "def load_N_molecules(N, pathout = '/misc/vlgscratch4/BrunaGroup/sulem/chem/data/molecules/debug.pickle'):\n \n dir_path = '/misc/vlgscratch4/BrunaGroup/sulem/chem/data/dsgdb9nsd.xyz'\n files = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]\n \n molecules = []\n for f in range(N):\n file = join(dir_path, files[f])\n molecules.append(pre.xyz_to_molecule(file))\n \n with open(pathout,'wb') as fileout:\n pickle.dump(molecules,fileout)\n \n return molecules", "def loadData(path = \"../data/\"):\n\n I = None\n L = None\n s = None \n images = None\n \n for i in range(7):\n j = i+1\n temp = imread(path + 'input_' + str(j) + '.tif')\n temp = rgb2xyz(temp)\n fors = np.copy(temp)\n temp = temp[:,:,1] #Just take luminance (Y)\n ipyn = np.copy(temp)\n print(ipyn.shape)\n temp = np.reshape(temp, (temp.shape[0]*temp.shape[1]))\n \n \n if i == 0:\n I = np.copy(temp)\n images = np.copy(ipyn)\n else:\n I = np.vstack((I, temp))\n images = np.vstack((images, ipyn))\n \n sources = np.load(path + 'sources.npy')\n L = np.copy(sources)\n L = L.T\n \n # s = (431, 369, 3)\n s = (fors.shape[0], fors.shape[1])\n \n print(L.shape, temp.shape, I.shape, s)\n \n return I, L, s, images", "def read_multi_dim_data(filename):\n dataset =[]\n\n ##from tutorial\n\n return dataset", "def load_images(path):\r\n images = []\r\n for file_name in os.listdir(path):\r\n each_image = games.load_image(path + os.sep + file_name).convert()\r\n images.append(each_image)\r\n return images", "def array_final_matrices_in_dir(path:str):\n file_name_array = glob.glob(path + '/*.csv')\n\n matrix_array = []\n for file_name in file_name_array:\n curr_matrix = deque()\n breaks_seen = 0\n for line in _readlines_reverse(file_name):\n if line == '':\n # blank line is the last in file (first read)\n pass\n elif (line == '<break/>' or line == '<break/>\\n'):\n if breaks_seen <1:\n breaks_seen += 1\n else:\n break\n else:\n curr_matrix.appendleft(np.array(line.strip().split(sep=','), dtype=float))\n matrix_array.append(np.array(curr_matrix))\n matrix_array = np.array(matrix_array)\n\n return matrix_array", "def load_data(data_dir):\n # Read data in from files\n images = []\n labels = []\n for folder in range(0, NUM_CATEGORIES):\n full_folder = os.path.join(data_dir, str(folder))\n for filename in os.listdir(full_folder):\n # print(f\"Reading and resizing file {filename} from folder {full_folder}...\")\n img = cv2.imread(os.path.join(full_folder, filename), 1)\n if img is not None:\n res = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT), interpolation=cv2.INTER_AREA)\n images.append(res)\n labels.append(folder)\n else:\n continue\n # print(f\"ERROR: Issue reading file {filename} from folder {full_folder}!\")\n\n return images, labels", "def load_npy():\n cell_data = []\n arr = np.load(INPUT_NPY_PATH + '/' + FILE_TO_READ)\n label_arr = np.load(INPUT_NPY_PATH + '/' + FILE_TO_READ.split('.')[0] + '_labels.npy')\n\n IMAGE_ID = FILE_TO_READ.split('.')[0] + '.jpg'\n\n # read table image; the path is where you store the images for each table\n img_cv = cv2.imread(IMAGE_PATH.format(IMAGE_ID))\n\n # add image name, should be deleted after we have image id as input\n row_num = 0\n for row in arr:\n if label_arr[row_num] == 0 or row[0] == row[2] or row[1] == row[3]:\n row_num += 1\n continue\n row = row.tolist()\n row.insert(0, label_arr[row_num]) # insert cell data type\n cell_data.append(row)\n row_num += 1\n\n sort_data(cell_data, img_cv)", "def load_npy(self, filename):\n self.set_data(np.load(filename))", "def load_data(data_dir):\n # Change into data directory\n os.chdir(data_dir)\n images = []\n labels = []\n\n # Repeat as many times as categories exist\n for i in range(NUM_CATEGORIES):\n\n # Enter \"i\"'th folder\n path = os.path.join(os.getcwd(), str(i))\n os.chdir(path)\n\n # Get all images in folder\n files = os.listdir('.')\n\n # For each image, add it to images array\n for image in files:\n \n # Read image and convert it to RGB from BGR\n img = cv2.imread(image)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # If not correct size, resize image\n shape = img.shape\n if shape != (IMG_HEIGHT, IMG_WIDTH, 3):\n img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH))\n\n # Update arrays\n images.append(img)\n labels.append(i)\n\n # Go back to data folder\n os.chdir(\"..\")\n\n return(images, labels)", "def load_all_blocks(folder_path):\n\n import os\n import numpy as np\n gt_files = np.sort(os.listdir(folder_path))\n\n block_list=[]\n\n for file in gt_files:\n\n block_path=os.path.join(folder_path, file)\n\n block_list.append(load_block(block_path))\n\n return block_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a 3d solid using points with straight connections edges, azimuth_placement_angle and rotation_angle.
def create_solid(self): # Creates a cadquery solid from points and revolves solid = ( cq.Workplane(self.workplane) .polyline(self.points) .close() .extrude(distance=-self.distance / 2.0, both=self.extrude_both) ) # Checks if the azimuth_placement_angle is a list of angles if isinstance(self.azimuth_placement_angle, Iterable): rotated_solids = [] # Perform seperate rotations for each angle for angle in self.azimuth_placement_angle: rotated_solids.append( solid.rotate( (0, 0, -1), (0, 0, 1), angle)) solid = cq.Workplane(self.workplane) # Joins the seperate solids together for i in rotated_solids: solid = solid.union(i) else: # Peform rotations for a single azimuth_placement_angle angle solid = solid.rotate( (0, 0, -1), (0, 0, 1), self.azimuth_placement_angle) self.perform_boolean_operations(solid) return solid
[ "def solid(self):\n return RotatedShape(shape_in=self.endplate.solid,\n rotation_point=self.position.point,\n vector=self.main[0].surface.position.orientation.Vx,\n angle=radians(-self.cant),\n label=\"right_side\",\n hidden=self.hide)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def addCube(self,x,y,z,size):\n p1 = mp.point(x,y,z)\n p2 = mp.point(x+size,y,z)\n p3 = mp.point(x,y+size,z)\n p4 = mp.point(x,y,z+size)\n p5 = mp.point(x+size,y+size,z)\n p6 = mp.point(x+size,y,z+size)\n p7 = mp.point(x,y+size,z+size)\n p8 = mp.point(x+size,y+size,z+size)\n self.surfaces.append(surface([0,0,0],[p1,p2,p5,p3])) #z constant\n self.surfaces.append(surface([0,0,0],[p4,p6,p8,p7])) #z constant\n self.surfaces.append(surface([0,0,0],[p1,p2,p6,p4])) #y constant\n self.surfaces.append(surface([0,0,0],[p3,p5,p8,p7])) #y constant\n self.surfaces.append(surface([0,0,0],[p1,p3,p7,p4])) #x constant\n self.surfaces.append(surface([0,0,0],[p2,p5,p8,p6])) #x constant", "def cylinder_3d(self, start_x, start_y, start_z, end_x, end_y, end_z, start_radius, end_radius, flags):\n self._cylinder_3d(start_x, start_y, start_z, end_x, end_y, end_z, start_radius, end_radius, flags)", "def rotateZ(self, angle):\n (cosa, sina) = cos_and_sin(angle)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def get_3d(self) -> \"ProjectionGeometry\":\n if self.ndim == 2:\n if self.det_shape_vu is not None:\n new_det_shape_vu = np.ones(2, dtype=int)\n new_det_shape_vu[-len(self.det_shape_vu) :] = self.det_shape_vu\n else:\n new_det_shape_vu = None\n return dc_replace(self, geom_type=self.geom_type.replace(\"2d\", \"3d\"), det_shape_vu=new_det_shape_vu)\n else:\n return dc_replace(self)", "def extent_3d(self):\n\n minxy, maxxy = self.extent_2d()\n cs = self.cs\n xyz0 = cs.xyz_from_oriented((minxy[0], minxy[1], 0.0))\n xyz1 = cs.xyz_from_oriented((maxxy[0], minxy[1], 0.0))\n xyz2 = cs.xyz_from_oriented((maxxy[0], maxxy[1], 0.0))\n xyz3 = cs.xyz_from_oriented((minxy[0], maxxy[1], 0.0))\n\n \"\"\"\n xyz0 = cs.xyz_from_oriented((self.x0, self.y0, 0.0))\n xyz1 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0,\n 0.0))\n xyz2 = cs.xyz_from_oriented((self.x0 + (self.nx - 1) * self.dx,\n self.y0 + (self.ny - 1) * self.dy, 0.0))\n xyz3 = cs.xyz_from_oriented((self.x0,\n self.y0 + (self.ny - 1) * self.dy,\n 0.0))\n \"\"\"\n\n minxyz = (min(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n min(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n min(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n maxxyz = (max(xyz0[0], xyz1[0], xyz2[0], xyz3[0]),\n max(xyz0[1], xyz1[1], xyz2[1], xyz3[1]),\n max(xyz0[2], xyz1[2], xyz2[2], xyz3[2]))\n\n return minxyz, maxxyz", "def init_from_3d_ccw_angle_around_z(cls, theta, degrees=True):\n if degrees:\n theta = np.deg2rad(theta)\n return Rotation(\n np.array(\n [\n [np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1],\n ]\n ),\n skip_checks=True,\n )", "def plot_3d(pts):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n xs, ys, zs = zip(*pts)\n ax.scatter(xs, ys, zs, c='r', marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.show()", "def polyPlatonicSolid(sideLength=\"string\", texture=int, axis=\"string\", radius=\"string\", solidType=int, createUVs=int, constructionHistory=bool, name=\"string\"):\n pass", "def plot_graspable(graspable, contact, c1=0, c2=0, draw_plane=False):\n # Plot SDF\n fig = plt.figure()\n graspable.sdf.scatter()\n\n # Plotting tangent plane and projection\n normal, t1, t2 = contact.tangents()\n ax = plt.gca()\n contact_ = graspable.sdf.transform_pt_obj_to_grid(contact.point)\n n_ = graspable.sdf.transform_pt_obj_to_grid(normal)\n t1_ = graspable.sdf.transform_pt_obj_to_grid(t1)\n t2_ = graspable.sdf.transform_pt_obj_to_grid(t2)\n n_ = n_ / np.linalg.norm(n_)\n t1_ = t1_ / np.linalg.norm(t1_)\n t2_ = t2_ / np.linalg.norm(t2_)\n\n t1_x, t1_y, t1_z = zip(contact_, contact_ + t1_)\n t2_x, t2_y, t2_z = zip(contact_, contact_ + t2_)\n n_x, n_y, n_z = zip(contact_ + c1 * t1_ + c2 * t2_,\n contact_ + c1 * t1_ + c2 * t2_ + n_)\n\n from matplotlib.patches import FancyArrowPatch\n from mpl_toolkits.mplot3d import proj3d\n class Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n kwargs.update(dict(mutation_scale=20, lw=1, arrowstyle='-|>'))\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n t1_vec = Arrow3D(t1_x, t1_y, t1_z, color='c')\n t2_vec = Arrow3D(t2_x, t2_y, t2_z, color='m')\n n_vec = Arrow3D(n_x, n_y, n_z, color='k')\n ax.add_artist(t1_vec)\n ax.add_artist(t2_vec)\n ax.add_artist(n_vec)\n\n if draw_plane:\n n_ = np.cross(t1_, t2_)\n n_ = n_ / np.linalg.norm(n_)\n d = -contact_.dot(n_)\n xx, yy = np.meshgrid(range(8, 16), range(8, 16))\n z = (-n_[0] * xx - n_[1] * yy - d) * 1. / n_[2]\n ax.plot_surface(xx, yy, z, rstride=1, cstride=1, color='r')", "def test_create_3D(self):\n segmentation = adapter.SFFSegmentation()\n segmentation.name = rw.random_word()\n segmentation.primary_descriptor = u\"three_d_volume\"\n # transforms\n transforms = adapter.SFFTransformList()\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n transforms.append(\n adapter.SFFTransformationMatrix(\n rows=3,\n cols=4,\n data=\" \".join(map(_str, range(12)))\n )\n )\n # bounding_box\n xmax = _random_integer(start=500)\n ymax = _random_integer(start=500)\n zmax = _random_integer(start=500)\n segmentation.bounding_box = adapter.SFFBoundingBox(\n xmax=xmax,\n ymax=ymax,\n zmax=zmax\n )\n # lattice container\n lattices = adapter.SFFLatticeList()\n # lattice 1\n # binlist = numpy.array([random.randint(0, 5) for i in _xrange(20 * 20 * 20)]).reshape(20, 20, 20)\n binlist = numpy.random.randint(0, 5, size=(20, 20, 20))\n lattice = adapter.SFFLattice(\n mode=u'uint32',\n endianness=u'little',\n size=adapter.SFFVolumeStructure(cols=20, rows=20, sections=20),\n start=adapter.SFFVolumeIndex(cols=0, rows=0, sections=0),\n data=binlist,\n )\n lattices.append(lattice)\n # lattice 2\n # binlist2 = numpy.array([random.random() * 100 for i in _xrange(30 * 40 * 50)]).reshape(30, 40, 50)\n binlist2 = numpy.random.rand(30, 40, 50) * 100\n lattice2 = adapter.SFFLattice(\n mode=u'float32',\n endianness=u'big',\n size=adapter.SFFVolumeStructure(cols=30, rows=40, sections=50),\n start=adapter.SFFVolumeIndex(cols=-50, rows=-40, sections=100),\n data=binlist2,\n )\n lattices.append(lattice2)\n # segments\n segments = adapter.SFFSegmentList()\n # segment one\n segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))\n vol1_value = 1\n segment.three_d_volume = adapter.SFFThreeDVolume(\n lattice_id=0,\n value=vol1_value,\n )\n segments.append(segment)\n # segment two\n segment = adapter.SFFSegment(colour=adapter.SFFRGBA(random_colour=True))\n vol2_value = 37.1\n segment.three_d_volume = adapter.SFFThreeDVolume(\n lattice_id=1,\n value=vol2_value\n )\n # add segment to segments\n segments.append(segment)\n segmentation.transforms = transforms\n segmentation.segments = segments\n segmentation.lattices = lattices\n # export\n # self.stderr(segmentation)\n # self.stderrj(segmentation.as_json())\n segmentation.export(self.three_d_volume_file)\n # assertions\n self.assertRegex(\n _str(segmentation),\n r\"\"\"SFFSegmentation\\(name=\"\\w+\", version=\"{}\"\\)\"\"\".format(\n EMDB_SFF_VERSION\n )\n )\n self.assertEqual(segmentation.primary_descriptor, u\"three_d_volume\")\n self.assertEqual(segmentation.bounding_box.xmin, 0)\n self.assertEqual(segmentation.bounding_box.xmax, xmax)\n self.assertEqual(segmentation.bounding_box.ymin, 0)\n self.assertEqual(segmentation.bounding_box.ymax, ymax)\n self.assertEqual(segmentation.bounding_box.zmin, 0)\n self.assertEqual(segmentation.bounding_box.zmax, zmax)\n # test the number of transforms\n self.assertTrue(len(segmentation.transforms) > 0)\n # test the transform IDs\n t_ids = map(lambda t: t.id, segmentation.transforms)\n self.assertCountEqual(t_ids, range(3))\n # segments\n self.assertEqual(len(segmentation.segments), 2)\n # segment one\n segment = segmentation.segments[0]\n # volume\n self.assertEqual(segment.three_d_volume.lattice_id, 0)\n self.assertEqual(segment.three_d_volume.value, vol1_value)\n # segment two\n segment = segmentation.segments.get_by_id(2)\n # volume\n self.assertEqual(segment.three_d_volume.lattice_id, 1)\n self.assertEqual(segment.three_d_volume.value, vol2_value)\n # lattices\n lattices = segmentation.lattices\n self.assertEqual(len(lattices), 2)\n # lattice one\n lattice1 = lattices.get_by_id(0)\n self.assertEqual(lattice1.mode, u'uint32')\n self.assertEqual(lattice1.endianness, u'little')\n self.assertCountEqual(lattice1.size.value, (20, 20, 20))\n self.assertCountEqual(lattice1.start.value, (0, 0, 0))\n # lattice two\n self.assertEqual(lattice2.mode, u'float32')\n self.assertEqual(lattice2.endianness, u'big')\n self.assertCountEqual(lattice2.size.value, (30, 40, 50))\n self.assertCountEqual(lattice2.start.value, (-50, -40, 100))", "def _get_spacecraft_points(self):\n #points are in NED coordinates\n points = np.array([[1, 1, 0], # point 1\n [1, -1, 0], # point 2\n [-1, -1, 0], # point 3\n [-1, 1, 0], # point 4\n [1, 1, -2], # point 5\n [1, -1, -2], # point 6\n [-1, -1, -2], # point 7\n [-1, 1, -2], # point 8\n [1.5, 1.5, 0], # point 9\n [1.5, -1.5, 0], # point 10\n [-1.5, -1.5, 0], # point 11\n [-1.5, 1.5, 0], # point 12\n ]).T\n # scale points for better rendering\n scale = 10\n points = scale * points\n\n # define the colors for each face of triangular mesh\n red = np.array([1., 0., 0., 1])\n green = np.array([0., 1., 0., 1])\n blue = np.array([0., 0., 1., 1])\n yellow = np.array([1., 1., 0., 1])\n meshColors = np.empty((12, 3, 4), dtype=np.float32)\n meshColors[0] = yellow # front\n meshColors[1] = yellow # front\n meshColors[2] = blue # back\n meshColors[3] = blue # back\n meshColors[4] = blue # right\n meshColors[5] = blue # right\n meshColors[6] = blue # left\n meshColors[7] = blue # left\n meshColors[8] = blue # top\n meshColors[9] = blue # top\n meshColors[10] = green # bottom\n meshColors[11] = green # bottom\n return points, meshColors", "def createPlane(r=0.5, dr=0.1):\r\n bounds = np.array([[-r, -r, 0], [r, r, 0]]) / dr\r\n bounds = np.stack((np.floor(bounds[0]), np.ceil(bounds[1]))) * dr\r\n nx, ny, nz = np.ceil((bounds[1] - bounds[0]) / dr).astype(int)\r\n # print(nx,ny)\r\n xyz = np.reshape([[[[i, j, 0], [i + 1, j, 0], [i, j + 1, 0],\r\n [i, j + 1, 0], [i + 1, j, 0], [i + 1, j + 1, 0]] for i in range(nx - 1)] for j in\r\n range(ny - 1)], (-1, 3))\r\n xyz = (xyz - ((nx - 1) / 2, (ny - 1) / 2, 0)) * dr\r\n # xyz, bounds, (nx, ny, nz) = create_grid(bounds, dr)\r\n # print(nx, ny, nz)\r\n triangles = np.arange(xyz.shape[0]).reshape((-1, 3))\r\n plane = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(\r\n xyz), o3d.utility.Vector3iVector(triangles))\r\n # assign checkerboard color pattern\r\n c0 = (0.323, 0.78, 0.321) # first color\r\n c1 = (0.863, 0.62, 0.343) # second color\r\n colors = np.reshape([[np.tile(c0 if (i + j) % 2 else c1, (6, 1)) for i in range(nx - 1)] for j in range(ny - 1)],\r\n (-1, 3))\r\n plane.vertex_colors = o3d.utility.Vector3dVector(colors)\r\n plane.compute_triangle_normals()\r\n return plane", "def gen_3s(img, a, cen, sz=16):\n tf_xy = get_tf_xynormal(cen)\n tf_xy = tf_rotateXYZ(tf_xy, a)\n xy = gen_slice(img, tf_xy, sz2ext(sz))\n\n tf_yz = get_tf_yznormal(cen)\n tf_yz = tf_rotateXYZ(tf_yz, a)\n yz = gen_slice(img, tf_yz, sz2ext(sz))\n\n tf_xz = get_tf_xznormal(cen)\n tf_xz = tf_rotateXYZ(tf_xz, a)\n xz = gen_slice(img, tf_xz, sz2ext(sz))\n\n return xy, yz, xz", "def point_3d(self, x, y, z):\n self._point_3d(x, y, z)", "def add_solid(self, solid, rotation=None, displacement=None):\n\n if rotation is None:\n rotation = np.identity(3)\n else:\n rotation = np.asarray(rotation, dtype=np.float32)\n\n if rotation.shape != (3,3):\n raise ValueError('rotation matrix has the wrong shape.')\n\n self.solid_rotations.append(rotation.astype(np.float32))\n\n if displacement is None:\n displacement = np.zeros(3)\n else:\n displacement = np.asarray(displacement, dtype=np.float32)\n\n if displacement.shape != (3,):\n raise ValueError('displacement vector has the wrong shape.')\n\n self.solid_displacements.append(displacement)\n\n self.solids.append(solid)\n\n return len(self.solids)-1", "def extrude(self, displaced):\n points = len(self.vertices)\n\n #construct edge information\n edges = self.edges()\n\n boundary = npi.multiplicity(self.order_edges(edges)) == 1\n eb = edges[boundary]\n if len(eb) == 0: raise Exception('Surface to be extruded is closed, thus does not have a boundary')\n\n #construct closed solid\n boundary_tris = np.concatenate((\n np.concatenate((eb[:,::-1], eb[:,0:1]+points),axis=1),\n np.concatenate((eb[:,::+1]+points, eb[:,1:2]),axis=1)\n ))\n\n copy_tris = self.faces[:, ::-1] + points\n\n solid_points = np.concatenate((self.vertices, displaced))\n solid_tris = np.concatenate((self.faces, copy_tris, boundary_tris))\n\n mesh = Mesh(solid_points, solid_tris)\n if mesh.volume() < 0:\n mesh.faces = mesh.faces[:, ::-1]\n return mesh", "def min_z_plane(self):\n center_of_side = self.center_point\n center_of_side[2] = self.min_z\n return Plane(center_of_side, vg.basis.z)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a random initial configuration with values from 0 to base1 inclusive and, if positional arguments are given, use the supplied percentages for the different states.
def __init__(self, base=2, *percentages): self.values = range(base) self.percentages = percentages self.make_percentages_cumulative(percentages)
[ "def init_params_random(self) -> None:\n self.probs = Dirichlet(self.prior).sample()", "def random_param_init(dim):\n # TODO\n pass", "def init_params_random(self) -> None:\n prec_m = Gamma(self.prec_alpha_prior,\n self.prec_beta_prior)\n self.precs = prec_m.sample()\n\n means_m = MultivariateNormal(loc=self.means_prior,\n precision_matrix=(self.n0 *\n self.prec_alpha_prior /\n self.prec_beta_prior\n ).diag())\n self.means = means_m.sample()", "def _default_expected_speed_sample_func(self):\n return random.uniform(40, 80)", "def transition_fcn_1d(state: float):\n # Should return a value of 1/5 = 0.2\n return uniform(loc=state, scale=5)", "def __init__(self):\n self.prob_heads = random()", "def uniform_probability(self, args = []):\n\t\tself.probability = 1", "def guess_params(true_params, percent):\n\tP = copy_params(true_params, False)\n\tfor i in ['Me', 'Re', 'n']:\n\t\tbulge = i+'B'\n\t\tdisc = i+'D'\n\t\tP[bulge].value *= (1.- percent)\n\t\tP[disc].value *= (1.+ percent)\n\tP['nD'].value = 1.\n\tP['nD'].vary = False\n\treturn P", "def rand_init_state(self):\n state = np.random.random((self.lattice, self.lattice))\n state[state >= 0.5] = 1\n state[state < 0.5] = -1\n return state", "def stat_gen(stat_precedence: dict, level: int) -> dict:\n stat_final = {}\n for stat in stat_precedence:\n prec = stat_precedence[stat]\n if prec < 1:\n prec = 1\n stat_final[stat] = random.randrange(1, prec)+(level)\n return stat_final", "def initial_sample_state(self, param_groups):\n return nest.map_structure(torch.zeros_like, param_groups)", "def gen_bg_def():\n\n while True:\n\n bg_params = [None, None]\n\n bg_params[0] = np.random.choice(OFF_OPTS, p=OFF_PROBS)\n bg_params[1] = np.random.choice(SL_OPTS, p=SL_PROBS)\n\n yield bg_params", "def define_params_and_initial_state_distributions(self):\n\n label2ind = dict(zip(list(self.fitted_cov.columns), np.arange(len(self.fitted_cov.columns))))\n for i in self.fitted_params.index:\n r = self.fitted_params['id'][i] # region\n self._all_internal_params_distribs[r] = dict(b_fit=LogNormalDist(params=mv2musig(self.fitted_params['b1_mean'][i],\n self.fitted_cov['b1_pop'][label2ind['b1_pop']]),\n stochastic=self.stochastic),\n r_fit=NormalDist(params=[0.034, 0.034 * self.noise], stochastic=self.stochastic),\n N=DiracDist(params=self.fitted_params['popsize'][i], stochastic=self.stochastic),\n N_av=DiracDist(params=float(np.mean(self.fitted_params['popsize'])), stochastic=self.stochastic),\n Dq_fit=LogNormalDist(params=mv2musig(self.fitted_params['Dq_mean'][i],\n self.fitted_cov['Dq_pop'][label2ind['Dq_pop']]),\n stochastic=self.stochastic),\n De=NormalDist(params=[5.1, 5.1 * self.noise], stochastic=self.stochastic),\n Dh=NormalDist(params=[30, 30 * self.noise], stochastic=self.stochastic),\n Di=NormalDist(params=[2.3, 2.3 * self.noise], stochastic=self.stochastic),\n alpha=NormalDist(params=[0.55, 0.55 * self.noise], stochastic=self.stochastic),\n icu=DiracDist(params=self.fitted_params['ICUcapacity'][i], stochastic=self.stochastic),\n beta1=NormalDist(params=[self.fitted_params['betaw1_mean'][i],\n np.sqrt(self.fitted_cov['betaw1_pop'][label2ind['betaw1_pop']])],\n stochastic=self.stochastic),\n beta2=NormalDist(params=[self.fitted_params['betaw2_mean'][i],\n np.sqrt(self.fitted_cov['betaw2_pop'][label2ind['betaw2_pop']])],\n stochastic=self.stochastic),\n beta3=NormalDist(params=[self.fitted_params['betaw3_mean'][i],\n np.sqrt(self.fitted_cov['betaw3_pop'][label2ind['betaw3_pop']])],\n stochastic=self.stochastic),\n beta4=NormalDist(params=[self.fitted_params['betaw4_mean'][i],\n np.sqrt(self.fitted_cov['betaw4_pop'][label2ind['betaw4_pop']])],\n stochastic=self.stochastic),\n )\n self._all_initial_state_distribs[r] = dict(E0=LogNormalDist(params=mv2musig(self.fitted_params['initE_mean'][i], self.fitted_cov['initE_pop'][label2ind['initE_pop']]),\n stochastic=self.stochastic),\n I0=DiracDist(params=self.fitted_params['I0_kalman_mean'][i], stochastic=self.stochastic),\n R0=DiracDist(params=0, stochastic=self.stochastic),\n A0=DiracDist(params=1, stochastic=self.stochastic), # is updated below\n H0=DiracDist(params=self.fitted_params['H0_kalman_mean'][i], stochastic=self.stochastic)\n )", "def sample (self, percent):\n self.total = 0\n self.total_unique = None\n for key in self.__ranges.keys():\n num = int(len(self.__ranges[key][0])*percent)\n self.__ranges[key][0]=array(BYTE4,sorted(random_sample(self.__ranges[key][0],num)))\n num = int(len(self.__ranges[key][1])*percent)\n self.__ranges[key][1]=array(BYTE4,sorted(random_sample(self.__ranges[key][1],num)))\n self.total += len(self.__ranges[key][0]) + len(self.__ranges[key][1])\n self.__counts[key] = [[],[]]", "def init_params_random(self):\n raise NotImplementedError(\"init_params_random method not implemented\")", "def initialize_1nucl():\n bases = defaultdict(dict)\n bases['A'] = ['A', 'A']\n bases['T'] = ['T', 'T']\n bases['G'] = ['G', 'G']\n bases['C'] = ['C', 'C']\n bases['N'] = ['N', 'N']\n bases['W'] = ['A', 'T']\n bases['R'] = ['A', 'G']\n bases['M'] = ['A', 'C']\n bases['K'] = ['G', 'T']\n bases['Y'] = ['C', 'T']\n bases['S'] = ['C', 'G']\n return bases", "def random_init(self, deg_sampler, args):\n self.g = gt.random_graph(N=self.N,\n deg_sampler=lambda: deg_sampler(**args),\n directed=False,\n parallel_edges=False,\n random=True)", "def __init__(self) :\n\t\t#later may want to pass in something that effects the probability of certain events\n\t\tself._rand = Random()\n\t\tself._seed = self._rand.seed()\n\t\tself._randNum = self._rand.randint(0,100)\n\t\tself._noEventRange = range(0,50)\n\t\tself._goodEventRange = range(50,75)\n\t\tself._badEventRange = range(75,101)", "def _GenerateFromG1G2(config, base, value_type):\n req = { 'g1' : float, 'g2' : float }\n kwargs, safe = GetAllParams(config, base, req=req)\n #print(base['obj_num'],'Generate from G1G2: kwargs = ',kwargs)\n return Shear(**kwargs), safe" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns a function that takes the radius and maximum distance as arguments into a function compatible with the `DensityDistributedConfiguration` class.
def function_of_radius(function, max_dist="diagonal"): if max_dist == "shortest": calc_max_dist = lambda size: min(size) elif max_dist == "longest": calc_max_dist = lambda size: max(size) elif max_dist == "diagonal": def calc_max_dist(size): halves = [num / 2 for num in size] squares = [num ** 2 for num in halves] return math.sqrt(sum(squares)) def wrapper(*args): dists = [] half = len(args) // 2 for num in range(half): center = args[num + half] / 2 dists.append(abs(center - args[num])) squares = [num ** 2 for num in dists] dist = math.sqrt(sum(squares)) return function(dist, calc_max_dist(args[half:])) return wrapper
[ "def create_evaluate_func_deap(self) -> typing.Callable:\n def evaluate_function(values: typing.List) -> typing.Tuple:\n \"\"\"\n Simply map the chromosome values into a dict like the original config file and return that to the evaluate function\n :param values: the \"chromosone\" or parameter for this \"individual\"\n :return a tuple containing the score of fitness\n \"\"\"\n config = self.map_tuning_config_back(values)\n try:\n return (self.evaluate_outside_function(config), )\n except Exception as e:\n warnings.warn(\"there was an error while evaluating the function: {}\".format(e))\n return tuple((float('inf'), )) if self.minimize else tuple((float(\"-inf\"), ))\n return evaluate_function", "def getDistLambda(loc, maxDist):\n return lambda args:Geo.dist(args['Location'], loc) <= maxDist", "def dist_calc(x, y):\n def func(x1, y1):\n return math.sqrt(math.pow(x1-x, 2) + math.pow(y1-y, 2))\n return func", "def get_on_fit_config(config: DictConfig):\n\n def fit_config_fn(server_round: int):\n return {\n \"lr\": config.lr,\n \"momentum\": config.momentum,\n \"local_epochs\": config.local_epochs,\n }\n\n return fit_config_fn", "def create(\n distanceFunction=..., similarityFunction=..., similarityParameter=...\n ) -> retval:\n ...", "def dynamicTreeCut(distance_df, func='hybrid', method='average', **cluster_kws):\n stats = importr('stats')\n dynamicTreeCut = importr('dynamicTreeCut')\n dist = stats.as_dist(distance_df)\n link = stats.hclust(dist, method=method)\n if func == 'hybrid':\n dist = stats.as_dist(distance_df)\n clustering = dynamicTreeCut.cutreeHybrid(link, distance_df, **cluster_kws)\n return np.array(clustering[0])\n elif func == 'dynamic':\n clustering = dynamicTreeCut.cutreeDynamic(link, **cluster_kws)\n return np.array(clustering)", "def optimize_sdp(n, dist_func, dist_arg=None, verbose=False):\n \n dist_matrix_sq = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n dist_matrix_sq[i,j] = dist_func(i,j,dist_arg)**2\n \n # create variables\n delta = cp.Variable((n,n)) #symmetric matrix\n G = cp.Variable((n-1,n-1),PSD=True)\n D2 = cp.Variable() #this is the variable for D^2\n \n #define the constraints\n constraints = [G >> 0,\n D2 >= 0,\n dist_matrix_sq <= delta\n ]\n for i in range(1,n):\n for j in range(1,n):\n constraints += [\n D2*dist_matrix_sq[i,j] >= delta[i,j],\n G[i-1,j-1] == 1/2*(delta[0,i]+delta[0,j]-delta[i,j])\n ]\n \n \n objective = cp.Minimize(D2) #later we take the square root\n \n prob = cp.Problem(objective,constraints)\n prob.solve()\n \n status = prob.status\n D = D2.value\n D = np.sqrt(D)\n G = G.value\n delta = delta.value\n \n return [status, D, G, delta]", "def max_radius():\r\n return 20", "def conf(func):\n\n func._is_conf = True\n return func", "def cost(config, dist):\n cost = np.sum([dist[config[i]][config[(i + 1) % len(config)]] for i in range(len(config))])\n return cost", "def get_density_matrix_cost_fn(circuit):\n\n def cost_fn(x):\n res = circuit(x)\n probs = qml.math.abs(res) ** 2\n return probs[0][0]\n\n return cost_fn", "def _node_cut(self,configs,wf):\n ne=configs.shape[1]\n d2=0.0\n for e in range(ne):\n d2+=np.sum(wf.gradient(e,configs[:,e,:])**2,axis=0)\n r=1./(d2*ne*ne)\n return r < self.nodal_cutoff**2", "def create_network_function_device_config(self, context, request_data):\n\n try:\n nfp_context = module_context.init()\n log_info = request_data.get('info')\n logging_context = log_info['context'].get('logging_context', {})\n nfp_context['log_context'] = logging_context\n LOG.info(\"Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG \"\n \"for %(service_type)s, NFI: %(nfi)s, \"\n \"NF_ID: %(nf_id)s\",\n {'service_type': request_data['info']['service_type'],\n 'nfi': request_data['info']['context']['nfi_id'],\n 'nf_id': request_data['info']['context']['nf_id']})\n\n self._invoke_service_agent('create', request_data, True)\n except Exception as err:\n msg = (\"Failed to create network device configuration. %s\" %\n str(err).capitalize())\n LOG.error(msg)", "def InputSoluteParameters(parameters_dict, c_int = 0, D = 1e-11, alpha = 1e-11, kappa = 1e-13, K = 0.1, delta = 2.5*1e-4, dt_mult = 1000, dt = 0.001):\n nx, ny = parameters_dict[\"nx\"], parameters_dict[\"ny\"] #retrieve grid size \n dx = parameters_dict['dx']\n\n #-- set initial solute concentration based on c_option choice \n c0 = c_int * np.ones((nx, ny))\n\n #-- nondimensionalise parameters\n L = parameters_dict['L']\n T = parameters_dict['T']\n cM = 1e-9 #concentration ng/ml\n\n D1 = D * T / L**2\n\n alpha1 = (alpha * T / cM) #non-dim production rate \n kappa1 = (kappa * T / cM) #non-dim production rate \n delta1 = (delta * T) #non-dim degradation rate \n\n #-- set appropriate timestep based on explicit FD scheme limits \n #dt_mult scales this timestep \n dx2 = parameters_dict['dx2']\n maxtimestep = (dx2 / (2 * D1) )\n dt = dt_mult * maxtimestep\n dt = dt\n\n #-- update parameters dictionary\n parameters_dict[\"dt\"] = dt\n parameters_dict['c0'] = c0 \n parameters_dict[\"D\"] = D1\n parameters_dict[\"alpha\"] = alpha1\n parameters_dict[\"kappa\"] = kappa1\n parameters_dict[\"K\"] = K\n parameters_dict[\"delta\"] = delta1\n\n return c0, parameters_dict", "def from_configuration(cls, graph, configuration):\n fuzzy_network = cls(\n configuration['parameters']['nodes'], \n configuration['parameters']['base'], \n graph)\n # Replace lambdas.\n conjunction = configuration['fuzzy']['conjunction']\n disjunction = configuration['fuzzy']['disjunction']\n negation = configuration['fuzzy']['negation']\n lambdas_probabilities, lambdas = get_fuzzy_lambdas(conjunction, disjunction, negation)\n fuzzy_network.lambdas_probabilities = lambdas_probabilities\n fuzzy_network.lambdas = lambdas\n return fuzzy_network", "def dimensionless_mass_density(self, scaled_radius, conc):\n return nfw_dimensionless_mass_density(scaled_radius, conc)", "def configure_simulation(independent_variable_value, input_params, empirical_profile, original_team_size,\n configuration_function, simulation_configuration):\n\n return configuration_function(independent_variable_value=independent_variable_value, input_params=input_params,\n empirical_profile=empirical_profile, original_team_size=original_team_size,\n simulation_configuration=simulation_configuration)", "def get_config(self):\n config = super(GaussianKernel, self).get_config()\n config.update({\"kernel_length\": self.kernel_length})\n return config", "def dnearest_inter_padding(l1, l2, dist_function, filt=None, func=min):\n def _internal(l1, l2, n, idx, nprocs, shared_arr, dist_function):\n for i in xrange(idx, n, nprocs):\n # if i % 100 == 0:\n # progressbar(i, n)\n shared_arr[i] = _min(\n ifilter(filt, (dist_function(l1[i], el2) for el2 in l2)),\n func)\n\n n = len(l1)\n nprocs = min(mp.cpu_count(), n)\n shared_array = mp.Array('d', [0.] * n)\n procs = []\n try:\n for idx in xrange(nprocs):\n p = mp.Process(target=_internal,\n args=(l1, l2, n, idx, nprocs, shared_array,\n dist_function))\n p.start()\n procs.append(p)\n\n for p in procs:\n p.join()\n except (KeyboardInterrupt, SystemExit):\n term_processes(procs, 'Exit signal received\\n')\n except BaseException as msg:\n term_processes(procs, 'ERROR: %s\\n' % msg)\n\n # progressbar(n, n)\n return shared_array" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes a set of parameters for the neural network
def initialize_parameters(self): self.n_inputs = len(self.df.columns[:-1]) self.n_hidden_per_layer = 3 self.n_hidden = 2 self.n_outputs = len(self.df.Class.unique()) if self.c_t == "classification" else 1 self.learning_rate = .07 self.epochs = 3 self.momentum_factor = .5 self.performance = 0
[ "def __initialize_parameters(self):\n parameters = dict()\n parameters['weights_recognition'] = {\n 'l1': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_visible, self.__nb_hidden)),\n 'mean': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_z)),\n 'log_std_squared': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_z))\n }\n parameters['biases_recognition'] = {\n 'l1': numpy.zeros((1, self.__nb_hidden)),\n 'mean': numpy.zeros((1, self.__nb_z)),\n 'log_std_squared': numpy.zeros((1, self.__nb_z))\n }\n parameters['weights_generation'] = {\n 'l1': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_z, self.__nb_hidden)),\n 'mean': numpy.random.normal(loc=0.,\n scale=0.01,\n size=(self.__nb_hidden, self.__nb_visible))\n }\n parameters['biases_generation'] = {\n 'l1': numpy.zeros((1, self.__nb_hidden)),\n 'mean': numpy.zeros((1, self.__nb_visible))\n }\n return parameters", "def defaultParams(self):\n self.blurs = [[-1, self.fileRes], [-1, self.fileRes],[-1, self.fileRes]] \n self.gradient = [[False,True], [False,True], [False,True]]\n self.similarityMetric = [[\"CC\", \"CC\"],[\"CC\", \"CC\"],[\"CC\", \"CC\"]]\n self.weight = [[1,1],[1,1],[1,1]]\n self.radiusHisto = [[3,3],[3,3],[3,3]]\n self.transformationModel = [\"SyN[0.1]\", \"SyN[0.1]\", \"SyN[0.1]\"]\n self.regularization = [\"Gauss[2,1]\", \"Gauss[2,1]\", \"Gauss[2,1]\"]\n self.iterations = [\"100x100x100x0\", \"100x100x100x20\", \"100x100x100x100\"]\n self.useMask = [False, True, True]\n self.memoryRequired = [0.177, 1.385e-7, 2.1e-7]", "def init_parameters(self):\r\n self.guessed_fishes_dict = {}\r\n self.train_index = 0", "def _initialize_parameters(state_machine, n_features):\n return np.zeros((state_machine.n_states \n + state_machine.n_transitions,\n n_features))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.diagonal_weight_indices = torch.LongTensor([[node for node in range(self.ncount)], [node for node in range(self.ncount)]])\n self.diagonal_weight_indices = self.diagonal_weight_indices\n self.diagonal_weight_filter = torch.nn.Parameter(torch.Tensor(self.ncount, 1))", "def _init_param(self): \n \n param={}\n param['y'] = self.x_oris if self.FixedOris else self.x \n param['u'] = self.t\n for s in ['y','u']: param['N'+s] = len(param[s])\n param['sigma_d'] = self.sigma_d\n param['sigma_0'] = self.sigma_0\n param['m_0'] = np.log10(self.I_0)\n param['ell_0'] = self.ell_0\n param['tau_0'] = self.tau_0\n param['d'] = self.data.ravel() # d vector Nxt\n self._param = param\n # we set non-computed attributes to None\n # that way, we know if these attributess are already computed or need to be computed\n keys = ['MAP','samples','samples_stats', 'I_MAP', 'logZ', \n '_theta_MAP','_H_MAP','_H_MAP_D','_H_MAP_R', '_thetas']\n for key in keys: setattr(self, key, None)", "def __init__(self, num_inputs, num_outputs, num_in_hidden_layer_1=None, num_in_hidden_layer_2=None):\n self.num_inputs = num_inputs\n self.num_outputs = num_outputs\n self.num_in_hidden_layer_1 = num_in_hidden_layer_1\n self.num_in_hidden_layer_2 = num_in_hidden_layer_2\n\n self.init()", "def __init__(self, layers):\n self.num_layers = len(layers)\n self.layers = layers\n self.weights = [np.random.randn(layers[i], layers[i-1] + 1) for i in range(1, len(layers))]", "def _create_state_init_parameters(self):\n self.init_ws, self.init_bs, self.init_norms = [], [], []\n # shallow copy of the state shapes:\n state_shapes = list(self.rnn_pre_attention.state_shape)\n if self.rnn_post_attention:\n state_shapes += self.rnn_post_attention.state_shape\n for state_idx, (_, init_num_hidden) in enumerate(state_shapes):\n self.init_ws.append(mx.sym.Variable(\"%senc2decinit_%d_weight\" % (self.prefix, state_idx)))\n self.init_bs.append(mx.sym.Variable(\"%senc2decinit_%d_bias\" % (self.prefix, state_idx)))\n if self.config.layer_normalization:\n self.init_norms.append(layers.LayerNormalization(prefix=\"%senc2decinit_%d_norm\" % (self.prefix,\n state_idx)))", "def __init__(self, state_dim, hidden_dim, init_w=3e-3):\n super(ValueNetwork, self).__init__()\n\n self.linear1 = nn.Linear(state_dim, hidden_dim)\n self.linear2 = nn.Linear(hidden_dim, hidden_dim)\n self.linear3 = nn.Linear(hidden_dim, 1)\n\n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)", "def __init__(self, num_zeros=2, num_poles=1, learning_rate=0.1):\n\n self.zs = [tf.Variable(tf.random.uniform(shape=(1,), dtype=tf.float64)) for i in range(num_zeros)] # pure real\n self.ps = [tf.Variable(tf.random.uniform(shape=(2,), dtype=tf.float64)) for i in range(num_poles)] # complex (includ CC)\n self.g = tf.Variable(tf.random.uniform(shape=(1,), dtype=tf.float64)) # gain\n self.train_vars = self.zs + self.ps + [self.g]\n self.learning_rate = learning_rate\n\n for p in self.ps:\n p.assign(tf.clip_by_norm(p, MAX_POLE_NORM))\n\n # list addition with pure Python\n # all Tensorflow Variables\n self.train_vars = self.zs + self.ps + [self.g]", "def init_params(options):\n\tparams = OrderedDict()\n\t# embedding\n\trandn = numpy.random.rand(options['n_words'],\n\t\t\t\t\t\t\t options['dim_proj'])\n\tparams['Wemb'] = (0.01 * randn).astype(config.floatX)\n\t#what does this line do \n\tparams = param_init_lstm(options,\n\t\t\t\t\t\t\t\t\t\t\t params,\n\t\t\t\t\t\t\t\t\t\t\t prefix=options['encoder'])\n\t# classifier\n\tparams['U'] = 0.01 * numpy.random.randn(options['dim_proj'],\n\t\t\t\t\t\t\t\t\t\t\toptions['ydim']).astype(config.floatX)\n\tparams['b'] = numpy.zeros((options['ydim'],)).astype(config.floatX)\n\n\n\treturn params", "def __init__(self, layer_sizes, class_labels, params=None):\n self.num_features = layer_sizes[0]\n self.num_classes = layer_sizes[-1]\n self.class_labels = class_labels\n self.params = params or dict()\n\n # initialize MLP\n self.model = cv2.ANN_MLP()\n self.model.create(layer_sizes)", "def setup_params(self):\n params = {}\n\n \"\"\"\n train_params defines the training parameters consisting of \n - the data provider that reads the data, preprocesses it and enqueues it into\n the data queue\n - the data queue that batches and if specified shuffles the data and provides \n the input to the model\n - other configuration parameters like the number of training steps\n It's arguments are\n data_params: defines how the data is read in.\n queue_params: defines how the data is presented to the model, i.e.\n if it is shuffled or not and how big of a batch size is used.\n targets: the targets to be extracted and evaluated in the tensorflow session\n num_steps: number of training steps\n thres_loss: if the loss exceeds thres_loss the training will be stopped\n validate_first: run validation before starting the training\n \"\"\"\n\n params['inter_op_parallelism_threads'] = 500\n\n params['train_params'] = {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': Combine_world,\n 'cfg_dataset': self.Config.datasets,\n 'group': 'train',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'train*.tfrecords',\n 'batch_size': 1, #self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 1,#sum(self.Config.datasets.values()),\n },\n 'queue_params': {\n 'queue_type': 'random',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.return_outputs,\n 'targets': [],\n },\n 'num_steps': self.Config.train_steps,\n 'thres_loss': self.Config.thres_loss,\n 'validate_first': False,\n }\n\n \"\"\"\n validation_params similar to train_params defines the validation parameters.\n It has the same arguments as train_params and additionally\n agg_func: function that aggregates the validation results across batches,\n e.g. to calculate the mean of across batch losses\n online_agg_func: function that aggregates the validation results across\n batches in an online manner, e.g. to calculate the RUNNING mean across\n batch losses\n \"\"\"\n \"\"\"\n params['validation_params'] = {\n 'topn_val': {\n 'data_params': {\n # ImageNet data provider arguments\n 'func': ImageNetDataProvider,\n 'data_path': self.Config.data_path,\n 'group': 'val',\n 'crop_size': self.Config.crop_size,\n # TFRecords (super class) data provider arguments\n 'file_pattern': 'validation*.tfrecords',\n 'batch_size': self.Config.batch_size,\n 'shuffle': False,\n 'shuffle_seed': self.Config.seed,\n 'file_grab_func': self.subselect_tfrecords,\n 'n_threads': 4,\n },\n 'queue_params': {\n 'queue_type': 'fifo',\n 'batch_size': self.Config.batch_size,\n 'seed': self.Config.seed,\n 'capacity': self.Config.batch_size * 10,\n 'min_after_dequeue': self.Config.batch_size * 5,\n },\n 'targets': {\n 'func': self.in_top_k,\n },\n 'num_steps': self.Config.val_steps,\n 'agg_func': self.agg_mean, \n 'online_agg_func': self.online_agg_mean,\n }\n }\n \"\"\"\n params['validation_params'] = {}\n \"\"\"\n model_params defines the model i.e. the architecture that \n takes the output of the data provider as input and outputs \n the prediction of the model.\n\n You will need to EDIT alexnet_model in models.py. alexnet_model \n is supposed to define a standard AlexNet model in tensorflow. \n Please open models.py and fill out the missing parts in the alexnet_model \n function. Once you start working with different models you will need to\n switch out alexnet_model with your model function.\n \"\"\"\n params['model_params'] = {\n 'func': self.Config.ytn.inference,\n }\n\n \"\"\"\n loss_params defines your training loss.\n\n You will need to EDIT 'loss_per_case_func'. \n Implement a softmax cross-entropy loss. You can use tensorflow's \n tf.nn.sparse_softmax_cross_entropy_with_logits function.\n \n Note: \n 1.) loss_per_case_func is called with\n loss_per_case_func(inputs, outputs)\n by tfutils.\n 2.) labels = outputs['labels']\n logits = outputs['pred']\n \"\"\"\n def loss_wrapper(inputs, outputs):\n # coco\n predicts = outputs['bboxes']\n gt_boxes = tf.reshape(tf.cast(outputs['boxes'], tf.int32), [self.Config.batch_size, -1, 5])\n num_objects = outputs['num_objects']\n coco_loss, _, _ = self.Config.ytn.loss(predicts, gt_boxes, num_objects)\n # imagenet\n labels = outputs['labels']\n logits = outputs['logits']\n imagenet_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n print(imagenet_loss, coco_loss)\n return imagenet_loss + coco_loss\n \n params['loss_params'] = {\n 'targets': ['labels'],\n 'agg_func': tf.reduce_mean,\n 'loss_per_case_func': loss_wrapper,\n 'loss_per_case_func_params' : {'_outputs': 'outputs', \n '_targets_$all': 'inputs'},\n 'loss_func_kwargs' : {},\n }\n\n \"\"\"\n learning_rate_params defines the learning rate, decay and learning function.\n\n You will need to EDIT this part. Replace the exponential decay \n learning rate policy with a piecewise constant learning policy.\n ATTENTION: \n 1.) 'learning_rate', 'decay_steps', 'decay_rate' and 'staircase' are not\n arguments of tf.train.piecewise_constant! You will need to replace\n them with the appropriate keys. \n 2.) 'func' passes global_step as input to your learning rate policy \n function. Set the 'x' argument of tf.train.piecewise_constant to\n global_step.\n 3.) set 'values' to [0.01, 0.005, 0.001, 0.0005] and\n 'boundaries' to [150000, 300000, 450000] for a batch size of 256\n 4.) You will need to delete all keys except for 'func' and replace them\n with the input arguments to \n \"\"\"\n \n params['learning_rate_params'] = {\t\n 'func': tf.train.exponential_decay,\n 'learning_rate': 0.001,\n 'decay_steps': 5000, # FIX LATER,\n 'decay_rate': 0.95,\n 'staircase': True,\n }\n\n \"\"\"\n optimizer_params defines the optimizer.\n\n You will need to EDIT the optimizer class. Replace the Adam optimizer\n with a momentum optimizer after switching the learning rate policy to\n piecewise constant.\n \"\"\"\n params['optimizer_params'] = {\n 'func': optimizer.ClipOptimizer,\n 'optimizer_class': tf.train.AdamOptimizer,\n 'clip': False,\n }\n\n \"\"\"\n save_params defines how, where and when your training results are saved\n in the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n \"\"\"\n params['save_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'combined_fix',\n 'save_valid_freq': 10000,\n 'save_filters_freq': 5000,\n 'cache_filters_freq': 5000,\n 'save_metrics_freq': 200,\n 'save_initial_filters' : False,\n 'save_to_gfs': [],\n }\n\n \"\"\"\n load_params defines how and if a model should be restored from the database.\n\n You will need to EDIT this part. Set your 'host' (set it to 'localhost',\n or to IP if using remote mongodb), 'port' (set it to 24444, unless you \n have changed mongodb.conf), 'dbname', 'collname', and 'exp_id'. \n\n If you want to restore your training these parameters should be the same \n as in 'save_params'.\n \"\"\"\n params['load_params'] = {\n 'host': '35.199.154.71 ',\n 'port': 24444,\n 'dbname': 'final',\n 'collname': 'yolo',\n 'exp_id': 'imagenet',\n 'do_restore': True,\n 'load_query': None,\n }\n\n return params", "def _initialize_train(self):\n self._train_input = acme_utils.prefetch(self._build_train_input())\n\n # Check we haven't already restored params\n if self._byol_state is None:\n logging.info(\n 'Initializing parameters rather than restoring from checkpoint.')\n\n # initialize Byol and setup optimizer state\n inputs = next(self._train_input)\n init_byol = jax.pmap(self._make_initial_state, axis_name='i')\n\n # Init uses the same RNG key on all hosts+devices to ensure everyone\n # computes the same initial state and parameters.\n init_rng = jax.random.PRNGKey(self._random_seed)\n init_rng = helpers.bcast_local_devices(init_rng)\n\n self._byol_state = init_byol(rng=init_rng, dummy_input=inputs)", "def _init_network_variables(self):\n\n # Here we defind placeholders ...\n with tf.variable_scope('input'):\n # ... for the input of the syndrome increments\n self.x = tf.placeholder(tf.float32,\n [None, None, self.dim_syndr],\n name='x_input')\n # ... for the input of the final syndrome increments\n self.fx = tf.placeholder(tf.float32, [None, self.dim_fsyndr],\n name='fx_input')\n # ... for the parity of the bitflips\n self.y = tf.placeholder(tf.float32, [None, 1], name='y_input')\n # ... for the number of stabilizer measurement cycles in a sequence\n self.length = tf.placeholder(tf.int32, [None], name='length_input')\n\n with tf.variable_scope('training_parameters'):\n # ... for the learning rate\n self.lr = tf.placeholder(tf.float32, name='learning_rate')\n # ... for the weighing of the auxillary head\n self.alf = tf.placeholder(tf.float32, name='aux_loss_factor')\n\n # ... for the dropout (keep probabilities)\n self.lstm_kp = tf.placeholder(tf.float32, name='lstm_keep_probability')\n self.ff_kp = tf.placeholder(tf.float32, name='ff_keep_probability')\n\n with tf.variable_scope('summary_placeholders'):\n # ... for the tensorboard summaries\n self.plog = tf.placeholder(tf.float32, name='plog_train')\n self.plog_aux = tf.placeholder(tf.float32, name='plog_aux_train')\n self.tot_cost = tf.placeholder(tf.float32, name='tot_cost')", "def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1", "def __init__(self, model_params, training_params):\n # Set model and training parameters as instance variables\n\n\n self._model_params = deepcopy(model_params)\n self._training_params = deepcopy(training_params)", "def initialize(self):\n # self.gc1.reset_parameters()\n # self.gc2.reset_parameters()\n nn.init.xavier_uniform_(self.gc2.fc.weight)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Backpropagates errors through neural network, assigning a delta weight value to each node. This delta weight value is the change that the node will make to its weight
def backpropagate(self, expected): #Assigns delta values to each node in the output layer and calculates momentum for i in range(len(self.output_layer)): node = self.output_layer[i] node.delta_weight = expected[i] - node.output #Backpropagates errors through hidden layers for i in reversed(range(len(self.NN[:-1]))): layer = self.NN[i] #Iterates through each node in a layer for j in range(len(layer)): error = 0 cur_node = layer[j] #Iterates through each node in the next layer up for node in self.NN[i+1]: error += node.weights[j] * node.delta_weight cur_node.delta_weight = error * cur_node.derivative()
[ "def backpropagate(self, outputs, learning_rate, momentum):\n errors = {}\n for node in self.nodes:\n calculated_value = self.values[node]\n real_value = outputs[node - 1]\n\n errors[node] = ((real_value - calculated_value) *\n calculated_value *\n (1 - calculated_value))\n\n return errors", "def backpropagate(self, x, y):\r\n signal=x\r\n signals = [signal]\r\n weighted_sums = []\r\n for w,b in zip(self.weights, self.biases):\r\n weighted_sum = np.dot(w, signal) + b\r\n weighted_sums.append(weighted_sum)\r\n signal = sigmoid(weighted_sum)\r\n signals.append(signal)\r\n \r\n error = signals[-1]-y\r\n delta = error*sigmoid_prime(weighted_sums[-1])\r\n deltas = [delta]\r\n \r\n for i in range(1, len(self.weights)):\r\n delta = np.dot(self.weights[-i].transpose(),\r\n delta)*sigmoid_prime(weighted_sums[-i-1])\r\n deltas.insert(0, delta)\r\n \r\n nabla_w = []\r\n for i in range(len(self.weights)):\r\n nabla_w.append(np.dot(deltas[i],signals[i].transpose()))\r\n return (deltas, nabla_w)", "def backward_propagation(self, batch_loss, alpha = 0.001, eta = 0.4):\n\t\tif self.optimizer == 'normal':\n\t\t\t## Calculating E(t)\n\t\t\t# E_t_4 = np.multiply(-(self.y - self.output) * (self.output), self.layer3) ##For weights 4\n\t\t\tE_t_3 = np.multiply(-(self.y - self.output) * (self.output), self.layer2) ##For Weights 3\n\t\t\tE_t_2 = np.multiply(-(self.y - self.output) * (self.output), self.layer1) ##For Weights 2\n\t\t\tE_t_1 = np.multiply(-(self.y - self.output) * (self.output), self.input) ##For Weights 1\n\n\t\t\t##Compute d_weights(t)\n\t\t\t# d_weights_t_4 = np.add((-eta * E_t_4), np.multiply(alpha,self.d_weights_prev_t_4))\n\t\t\td_weights_t_3 = np.add((-eta * E_t_3), np.multiply(alpha,self.d_weights_prev_t_3))\n\t\t\td_weights_t_2 =\tnp.add((-eta * E_t_2), np.multiply(alpha,self.d_weights_prev_t_2))\n\t\t\td_weights_t_1 = np.add((-eta * E_t_1), np.multiply(alpha,self.d_weights_prev_t_1))\n\n\t\t\t##Update the Weights using the derived formula\n\t\t\t# self.weights4 = np.add(self.weights4, d_weights_t_4)\n\t\t\tself.weights3 = self.weights3 + d_weights_t_3\n\t\t\tself.weights2 = self.weights2 + d_weights_t_2\n\t\t\tself.weights1 = self.weights1 + d_weights_t_1\n\n\t\t\t# self.weights1 = MinMaxFuzzy.normalise(self.weights1)\n\t\t\t# self.weights2 = MinMaxFuzzy.normalise(self.weights2)\n\t\t\t# self.weights3 = MinMaxFuzzy.normalise(self.weights3)\n\n\t\t\t## Change the Values of d_weight(t-1)\n\t\t\t# d_weights_prev_t_4 = d_weights_t_4\n\t\t\td_weights_prev_t_3 = d_weights_t_3\n\t\t\td_weights_prev_t_2 = d_weights_t_2\n\t\t\td_weights_prev_t_1 = d_weights_t_1\n\n\t\telif self.optimizer == 'sgd':\n\n\t\t\td_weights_t_3 = 2 * (batch_loss) * self.layer2\n\t\t\td_weights_t_2 = 2 * (batch_loss) * np.dot(self.weights3.T,self.layer1)\n\t\t\td_weights_t_1 = 2 * (batch_loss) * np.dot(np.dot(self.weights3,self.weights2).T,self.input)\n\n\t\t\t## Update the Weights\n\t\t\t# self.weights4 = self.weights4 - (alpha * d_weights_t_4)\n\t\t\tself.weights3 = np.subtract(self.weights3,(alpha * d_weights_t_3))\n\t\t\tself.weights2 = np.subtract(self.weights2,(alpha * d_weights_t_2))\n\t\t\tself.weights1 = np.subtract(self.weights1,(alpha * d_weights_t_1))", "def _backpropagation(self, error):\n # backward passes\n for layer in reversed(self.layers):\n error = layer.backward(error)", "def adjustWeight(self, deltaWeight):\n self.lastWeightDelta = deltaWeight\n #before = self.weight\n self.weight = self.weight + (deltaWeight * self.inputNeuron.getOutput())\n #print 'adjusting %s with delta %s: %s => %s' % (self, deltaWeight, before, self.weight)", "def update_node_weights(self, inputs):\n\n #Iterates through each node in each layer\n for i in range(len(self.NN)):\n for node in self.NN[i]:\n #Iterates through each value in the inputs and assigns weights\n for j in range(len(inputs)):\n #Multiplies the weight gradient by the learning rate and input value\n weight_update = self.learning_rate * node.delta_weight * inputs[j]\n #Adjusts the weight with momentum\n node.weights[j] += weight_update + node.momentum[j]\n #Adjusts the momentum value\n node.momentum[j] = weight_update\n #Updates the bias node\n node.weights[-1] += self.learning_rate * node.delta_weight\n #Sets the new inputs to the output vector of current layer\n inputs = [node.output for node in self.NN[i]]", "def weight_loss(self):\n weight_loss_hash = {}\n for key in self.graph.nodes():\n print key, type(key),self.candidates[40961]['weight_change'],self.canid\n weight_loss_hash[key] = float(self.candidates[key]['weight_change'])\n\n weight_loss_nodes = self._annotate_graph(weight_loss_hash, \"weight_change\")", "def backward_propagation(Y, states, parameters, layer_dims, gradients, weights):\n\n L = len(layer_dims) - 1\n m = Y.shape[1]\n w_min = weights['w_min']\n w_maj = weights['w_maj']\n\n gradients['dA' + str(L)] = - (np.divide(Y * w_min, states['A' + str(L)]) - np.divide((1 - Y) * w_maj, 1 - states['A' + str(L)]))\n gradients['dZ' + str(L)] = np.multiply( gradients['dA' + str(L)], sigmoid_deriv(states['Z' + str(L)]) )\n gradients['dW' + str(L)] = np.dot(gradients['dZ' + str(L)], states['A' + str(L-1)].T) / m\n gradients['db' + str(L)] = np.sum(gradients['dZ' + str(L)], axis=1, keepdims=True) / m\n\n for l in reversed(range(1, L)): # L-1 ... 1\n gradients['dA' + str(l)] = np.dot(parameters['W' + str(l+1)].T, gradients['dZ' + str(l+1)])\n gradients['dZ'+ str(l)] = np.multiply(gradients['dA' + str(l)] , relu_deriv(states['Z' + str(l)]))\n gradients['dW' + str(l)] = np.dot(gradients['dZ' + str(l)], states['A' + str(l-1)].T) / m\n gradients['db' + str(l)] = np.sum(gradients['dZ' + str(l)], axis=1, keepdims=True) / m\n\n return gradients", "def back_propagation(y,val_dict):\n # get backprop derivative with respect to y at every node lower than y\n forward_pass(y,val_dict)\n initialize(y,y)\n backward(y,val_dict)", "def backward_pass(self, w, delta):\n batch_size = float(delta.shape[0])\n self.delta = np.multiply(np.dot(w, delta.T).T, self.act.act_der(self.z))\n self.db = np.sum(self.delta, axis = 0, keepdims = True) / batch_size\n self.dw = np.dot(self.x.T, self.delta) / batch_size\n return self.delta", "def update_wights(self, weights: np.ndarray, data_input: np.ndarray, layer_outputs: np.ndarray, weight_error_matrix: np.ndarray, alpha=.1):\n for layer_index in range(len(weights)-1, -1, -1):\n layer_weights = weights[layer_index]\n\n # get the previous layer INPUTS\n if layer_index == 0:\n layer_output = np.array(data_input)\n else:\n # get the previous layer INPUTS\n layer_output = layer_outputs[layer_index - 1]\n\n layer_error = weight_error_matrix[layer_index]\n\n # Add BIAS to input values, in last position, as done in estimating\n layer_output = np.append(layer_output, [1])\n\n # weight_delta = layer_output * layer_error\n layer_output = layer_output.reshape(1, len(layer_output))\n layer_error = layer_error.reshape(1, len(layer_error))\n weight_delta = layer_output * layer_error.transpose()\n\n weight_delta = alpha * weight_delta\n\n weights[layer_index] = layer_weights + weight_delta\n\n return weights", "def backprop(self, example):\n eg_input, exp_output = example\n weighted_ins, layer_outs = self.layer_outputs(eg_input)\n\n # Error of the output layer:\n error = np.empty_like(layer_outs)\n error[-1] = self.cost_fn.grad(layer_outs[-1], exp_output, weighted_ins[-1])\n\n # Perform the backpropagation itself to produce all error terms\n for i in range(len(error) - 2, -1, -1):\n error[i] = np.dot(self.layers[i+1].ws.transpose(), error[i+1]) * self.layers[i].xfer_fn.dx(weighted_ins[i])\n\n w_grad = [np.empty_like(layer.ws) for layer in self.layers]\n w_grad[0] = error[0] * eg_input.transpose()\n for i in range(1, len(w_grad)):\n w_grad[i] = error[i] * layer_outs[i-1].transpose()\n\n return w_grad, error", "def backpropagate(self, outputs, labels):\n\n\t\t# Calculate error at the output layer\n\t\terror = np.array(labels - outputs)\n\n\t\t# List containing arrays of the errors of all nodes from inputLayer to the outputLayer\n\t\terrorVector = [error]\n\n\t\t# Start at the ouput player and go backwards to the input layer\n\t\tfor layerWeights in reversed(self.weights):\n\t\t\t# Get layernodes error\n\t\t\terror = np.dot(layerWeights.T, error)\n\n\t\t\t# Prepend (since we start at the outputlayer and move to the input layer) error to the errorVector\n\t\t\terrorVector.insert(0, error)\n\n\t\treturn np.array(errorVector)", "def _elastic_net_derivative(weights):\n return np.sign(weights) + weights", "def backpropagation(activations_list, weights_list, labels):\n assert len(activations_list) == len(weights_list)\n N = len(activations_list)\n deltas_list = [None] * N\n deltas_list[-1] = activations_list[-1] - labels\n for n in range(N)[:-1]:\n m = -(n+1)\n comp1 = np.matmul(deltas_list[m], weights_list[m])\n comp2 = activations_list[m-1] * (1 - activations_list[m-1])\n deltas_list[m-1] = comp1 * comp2\n return(deltas_list)", "def update_networks(self, state, next_state, ref, next_ref, dr_ds, F, G):\n augmented_state = self.augment_state(state, ref)\n next_augmented_state = self.augment_state(next_state, next_ref)\n\n # Forward passes...\n action = self.actor.forward(augmented_state)\n lambda_t1 = self.critic.forward(augmented_state)\n lambda_t2 = self.target_critic.forward(next_augmented_state)\n\n # Backpropagate raw action through actor network\n action.backward()\n da_ds = augmented_state.grad\n\n # From DHP definition:\n target = dr_ds + self.gamma * lambda_t2\n error_critic = lambda_t1 - target.mm(F + G.mm(da_ds.unsqueeze(0)))\n\n # Backpropagate error_critic through critic network and update weights\n lambda_t1.backward(error_critic.squeeze())\n # Make sure these calculations don't affect the actual gradients by wrapping them in no_grad()\n with torch.no_grad():\n for wa, wc in zip(self.actor.parameters(), self.critic.parameters()):\n # .sub_() is in-place subtraction (NOT SUBSTITUTION!!!) - fast en memory-efficient\n wa.data.sub_(wa.grad.data * (-target.mm(G).squeeze(dim=0)) * self.learning_rate_actor)\n wc.data.sub_(wc.grad.data * self.learning_rate_critic)\n # In PyTorch, gradients accumulate rather than overwrite, so after updating they must be zeroed:\n self.critic.zero_grad()\n self.actor.zero_grad()\n self.target_critic.zero_grad() # I don't think these have a value inside of them but just to be sure...\n\n # Update target network - copy_() is a fast and memory-unintensive value overwrite\n for target_param, param in zip(self.target_critic.parameters(), self.critic.parameters()):\n target_param.data.copy_(self.tau_target_critic * param.data + (1.0 - self.tau_target_critic) * target_param.data)", "def calculate_deltas(net, input_values, desired_output):\n neuron_update_coefficients = {}\n neuron_outputs = forward_prop(net, input_values, threshold_fn=sigmoid)[1]\n neurons_backwards = net.topological_sort()\n neurons_backwards.reverse()\n\n # For each neuron starting at the last\n for neuron in neurons_backwards: \n\n # This neuron output\n outB = neuron_outputs[neuron]\n # Last neuron output\n out = neuron_outputs[neurons_backwards[0]]\n\n # Calculate delta_b different ways depending on if neuron is in last layer\n if net.is_output_neuron(neuron):\n delta_b = outB*(1-outB)*(desired_output-out)\n neuron_update_coefficients[neuron] = delta_b\n else:\n delta_b_summed_part = 0\n for wire in net.get_outgoing_wires(neuron):\n delta_b_summed_part += wire.weight * neuron_update_coefficients[wire.endNode]\n delta_b = outB*(1-outB)*delta_b_summed_part\n neuron_update_coefficients[neuron] = delta_b\n\n return neuron_update_coefficients", "def verify_gradients(self):\n \n print 'WARNING: calling verify_gradients reinitializes the learner'\n \n rng = np.random.mtrand.RandomState(1234)\n \n self.initialize(10,3)\n example = (rng.rand(4,10),np.array([0,1,1,2]))\n input,target = example\n epsilon=1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.weights = [0.01*rng.rand(self.input_size,self.n_classes),\n 0.01*rng.rand(self.input_size,self.n_classes),\n 0.01*rng.rand(self.input_size,self.n_classes)]\n self.bias = 0.01*rng.rand(self.n_classes)\n self.lateral_weights = 0.01*rng.rand(self.n_classes,self.n_classes)\n \n self.fprop(input,target)\n self.bprop(input,target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n \n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i,j] += epsilon\n a = self.fprop(input,target)\n self.weights[h][i,j] -= epsilon\n \n self.weights[h][i,j] -= epsilon\n b = self.fprop(input,target)\n self.weights[h][i,j] += epsilon\n \n emp_grad_weights[h][i,j] = (a-b)/(2.*epsilon)\n\n\n print 'grad_weights[-1] diff.:',np.sum(np.abs(self.grad_weights[-1].ravel()-emp_grad_weights[-1].ravel()))/self.weights[-1].ravel().shape[0]\n print 'grad_weights[0] diff.:',np.sum(np.abs(self.grad_weights[0].ravel()-emp_grad_weights[0].ravel()))/self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:',np.sum(np.abs(self.grad_weights[1].ravel()-emp_grad_weights[1].ravel()))/self.weights[1].ravel().shape[0]\n \n emp_grad_lateral_weights = copy.deepcopy(self.lateral_weights)\n \n for i in range(self.lateral_weights.shape[0]):\n for j in range(self.lateral_weights.shape[1]):\n self.lateral_weights[i,j] += epsilon\n a = self.fprop(input,target)\n self.lateral_weights[i,j] -= epsilon\n\n self.lateral_weights[i,j] -= epsilon\n b = self.fprop(input,target)\n self.lateral_weights[i,j] += epsilon\n \n emp_grad_lateral_weights[i,j] = (a-b)/(2.*epsilon)\n\n\n print 'grad_lateral_weights diff.:',np.sum(np.abs(self.grad_lateral_weights.ravel()-emp_grad_lateral_weights.ravel()))/self.lateral_weights.ravel().shape[0]\n\n emp_grad_bias = copy.deepcopy(self.bias)\n for i in range(self.bias.shape[0]):\n self.bias[i] += epsilon\n a = self.fprop(input,target)\n self.bias[i] -= epsilon\n \n self.bias[i] -= epsilon\n b = self.fprop(input,target)\n self.bias[i] += epsilon\n \n emp_grad_bias[i] = (a-b)/(2.*epsilon)\n \n print 'grad_bias diff.:',np.sum(np.abs(self.grad_bias.ravel()-emp_grad_bias.ravel()))/self.bias.ravel().shape[0]", "def propagate(self):\n for sample in self.input_value:\n # perform forward propagation on one sample\n layer_output = sample\n for l in self.layers:\n layer_output = l.activate(layer_output)\n self.forward_propagation_output.append(layer_output) #stores propagation output value of one sample\n return self.forward_propagation_output" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goes through and updates all the weights utilizing input values, node weights, and the learning rate
def update_node_weights(self, inputs): #Iterates through each node in each layer for i in range(len(self.NN)): for node in self.NN[i]: #Iterates through each value in the inputs and assigns weights for j in range(len(inputs)): #Multiplies the weight gradient by the learning rate and input value weight_update = self.learning_rate * node.delta_weight * inputs[j] #Adjusts the weight with momentum node.weights[j] += weight_update + node.momentum[j] #Adjusts the momentum value node.momentum[j] = weight_update #Updates the bias node node.weights[-1] += self.learning_rate * node.delta_weight #Sets the new inputs to the output vector of current layer inputs = [node.output for node in self.NN[i]]
[ "def update_weight(self, learn_rate):\n pass", "def update_weights(self) -> None:\n for neuron in self.__neurons__:\n neuron.update_weight(self.__inputs__)", "def update_weights(self):\n for layer in xrange(len(self.weights)):\n self.update_weights_layer(layer)", "def update_weights(self):\n # print(\"\\u001b[31;1m|py|\\u001b[0m\\u001b[37m\", \"ModelInterface::\", inspect.currentframe().f_code.co_name)\n\n for k, optimizer in self.optimizers.items():\n self.models[k].before_update()\n optimizer.step()\n self.models[k].inc_step()", "def update_weights(self) :\n for layer in self.layers :\n try:\n layer.update_weights()\n except Exception as e :\n pass", "def update_weights(self, gradients, learning_rate):\n for weights, gradient in zip(self.weights, gradients):\n assert gradient.shape == weights.shape\n weights += gradient * learning_rate;", "def _increase_weights(self, nodes):\n # type: (Iterable[int]) -> None\n for node in nodes:\n data = self.weights_graph.nodes[node]\n data[\"usage\"] += 1\n usage = float(data[\"usage\"]) / data[\"capacity\"]\n exp_factor = bounded_exp(max(0, self.epsilon * usage))\n for _,_,edata in self.weights_graph.in_edges(node, data=True):\n edata[\"weight\"] = exp_factor", "def updateWeights(self, initialInputs):\n self.firstLayer.updateWeight(initialInputs)", "def sgd_update(trainables, learning_rate=1e-2):\n for node in trainables:\n node.value -= learning_rate * node.gradients[node]", "def update_weights(self, network, l_rate, batch_size):\n for layer in range(len(network)):\n layer_data = network[layer]\n for neuron_id in range(len(layer_data['layer'])):\n for weight_id in range(len(layer_data['layer'][neuron_id]['weights'])):\n # update weight\n layer_data['layer'][neuron_id]['weights'][weight_id] += l_rate * (layer_data['layer'][neuron_id]['delta_weights'][weight_id])/batch_size\n # initialise delta_weight\n layer_data['layer'][neuron_id]['delta_weights'][weight_id] = 0", "def _update_weights(self, _batch_weight_gradients):\n for _weight_gradient in _batch_weight_gradients:\n _weight_gradient = list(reversed(_weight_gradient))\n for _layer in reversed(range(len(self._layers))):\n self._layers[_layer].update_weights(-self._learning_rate*_weight_gradient[_layer])", "def _initialize_weights(self):\n for n1, n2 in self.weights_graph.edges():\n data = self.weights_graph.edges[n1, n2]\n data[\"weight\"] = float(1)\n for _, data in self.weights_graph.nodes(data=True):\n data[\"usage\"] = 0\n\n for vs in self.terminals.values():\n self._increase_weights(vs)", "def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)", "def update(self):\n if not hasattr(self,\"verbose\"):\n self.verbose = 0\n if not hasattr(self,\"deltas\") or self.deltas is None:\n self.deltas = [np.zeros(dw.shape) for w,dw,n in self.weights()]\n for ds,(w,dw,n) in zip(self.deltas,self.weights()):\n ds.ravel()[:] = self.momentum * ds.ravel()[:] + self.learning_rate * dw.ravel()[:]\n w.ravel()[:] += ds.ravel()[:]\n if self.verbose:\n LOG.info(\"{} {} {}\".format(n, (np.amin(w), np.amax(w)), (np.amin(dw), np.amax(dw))))", "def set_weights(self, weights):\n\n weight_index = 0\n for layer in self.NN:\n for node in layer:\n for i in range(len(node.weights)):\n #print(weight_index)\n try:\n node.weights[i] = weights[weight_index]\n except Exception as e:\n print(weight_index)\n print(len(weights))\n sys.exit()\n\n weight_index += 1", "def __update(self, weights, datasets):\n # acquire write lock\n self.read_write.acquire()\n\n while self.readers > 0:\n self.read_write.wait()\n\n self.weights = utility.averageParam(\n (self.weights, self.datasets),\n (weights, datasets)\n )\n \n self.weight_send = helper.arrays_tolist(self.weights)\n self.datasets += datasets\n\n if self.save == 0:\n self.__save_model()\n self.save = SAVE_MODEL\n else:\n self.save -= 1\n\n # release write lock\n self.read_write.release()\n\n return", "def set_weights(self, weights):\n self.actor_critic.load_state_dict(weights)\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.detach().exp()\n\n # Update target networks by polyak averaging.\n self.iter += 1\n self.update_target_networks()", "def update_weights_and_state(self, weights=None, state=None):\n for trainer in self._trainer_per_task:\n acc_model_with_loss = trainer.accelerated_model_with_loss\n if weights is not None:\n self._model.weights = weights\n acc_model_with_loss.replicate_weights(trainer.model_with_loss.weights)\n if state is not None:\n self._model.state = state\n acc_model_with_loss.replicate_state(trainer.model_with_loss.state)", "def update(self):\n if self.enabled:\n for avg, weight in zip(self.averages, self.weights):\n self._update_single(avg, weight)\n self.num_updates += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns on array of all weights in the network for training use
def get_weights(self): weights = [] for layer in self.NN: for node in layer: for weight in node.weights: weights.append(weight) return weights
[ "def get_weights(self, ):\n return [w for l in self.weights for w in l.flat]", "def get_all_weights(self):\n\n # add weights for each layer if layer is a Dense layer and return the list\n return [l.weights for l in self.layers if isinstance(l, Dense)]", "def weights ( self ) :\n N = len ( self ) \n return array ( 'd' , ( self.weight ( i ) for i in range ( N ) ) )", "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "def allweights(self):\n aw = list(self.weights())\n weights,derivs,names = list(zip(*aw))\n weights = [w.ravel() for w in weights]\n derivs = [d.ravel() for d in derivs]\n return np.concatenate(weights),np.concatenate(derivs)", "def init_weights(self):\r\n self.weights = [0 for i in range(len(self.inputs[0][0]))]", "def get_weights_A(self):\r\n # Get the weights from task A (in a numpy array, so that they are static)\r\n self.weightsA = []\r\n # Convert trainable weights to tensors\r\n for w in self.net.trainable_variables:\r\n self.weightsA.append(tf.convert_to_tensor(w.numpy()))", "def weights(self) -> np.ndarray:\n self._check_fitted()\n return np.asarray(self._fit_result.x)", "def initialize_weights(self):\n for p in self.parameters():\n data = p.data\n if data.dim() == 1:\n # bias\n data.zero_()\n elif data.dim() == 2:\n # linear weight\n n = data.size(1)\n stdv = 1.0 / math.sqrt(n)\n data.normal_(0, stdv)\n elif data.dim() in (3, 4):\n # conv weight\n n = data.size(1)\n for k in data.size()[2:]:\n n *= k\n stdv = 1.0 / math.sqrt(n)\n data.normal_(0, stdv)\n else:\n raise NotImplementedError \n self.decoder.embed.weight.data.normal_(0, 1)\n for i in range(len(self.decoder.decoder)):\n bias = self.decoder.decoder[i].bias_ih\n n = bias.size(0)\n start, end = n // 4, n // 2\n bias.data[start:end].fill_(1.0)", "def _initialize_weights(self):\n self.weights = np.random.randn(self.number_of_classes,self.input_dimensions+1)", "def init_weights(self):\r\n # embedding\r\n nn.init.uniform_(self.emb_layer.weight.data, a=-0.1, b=0.1)\r\n # hidden\r\n for hidden_layer in self.hidden_layers:\r\n hidden_layer.init_weights_uniform()\r\n # output\r\n nn.init.uniform_(self.output_layer.weight.data, a=-0.1, b=0.1)\r\n nn.init.zeros_(self.output_layer.bias.data)", "def init_weights(self):\r\n if self.init_seed:\r\n np.random.seed(self.init_seed)\r\n\r\n weights_list = []\r\n biases_list = []\r\n\r\n for layer in range(self.nb_layers):\r\n new_W = np.random.randn(self.K_list[layer], self.K_list[layer + 1])\r\n new_b = np.zeros(self.K_list[layer + 1])\r\n weights_list.append(new_W)\r\n biases_list.append(new_b)\r\n\r\n self.weights_list = weights_list\r\n self.biases_list = biases_list", "def init_weights(self):\r\n default_init_weights(self, 1)", "def get_weight(self):\n return self.graph_weights.reshape(self.size_graph_rows, self.size_graph_cols)", "def getWeight(self):\n return np.concatenate([self.weight.ravel()] * 4)", "def set_weights(self, weights):\n\n weight_index = 0\n for layer in self.NN:\n for node in layer:\n for i in range(len(node.weights)):\n #print(weight_index)\n try:\n node.weights[i] = weights[weight_index]\n except Exception as e:\n print(weight_index)\n print(len(weights))\n sys.exit()\n\n weight_index += 1", "def loadWeights():\r\n final = []\r\n f = open(\"data/weight.txt\", 'r')\r\n for line in f:\r\n final.append(float(line))\r\n f.close()\r\n return final", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings", "def _initialize_weights(self):\n for n1, n2 in self.weights_graph.edges():\n data = self.weights_graph.edges[n1, n2]\n data[\"weight\"] = float(1)\n for _, data in self.weights_graph.nodes(data=True):\n data[\"usage\"] = 0\n\n for vs in self.terminals.values():\n self._increase_weights(vs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the weights of the nodes in the network after training them
def set_weights(self, weights): weight_index = 0 for layer in self.NN: for node in layer: for i in range(len(node.weights)): #print(weight_index) try: node.weights[i] = weights[weight_index] except Exception as e: print(weight_index) print(len(weights)) sys.exit() weight_index += 1
[ "def set_weights(self, weights):\n self.actor_critic.load_state_dict(weights)\n self.alpha_optimizer.step()\n self.alpha = self.log_alpha.detach().exp()\n\n # Update target networks by polyak averaging.\n self.iter += 1\n self.update_target_networks()", "def do_change_nodes_weight(self, args):\n node_strings = args.nodes.split(',')\n lb = self.findlb(args.loadbalancer)\n for n in lb.nodes:\n destination = \"%s:%d\" % (n.address, n.port)\n if destination in node_strings:\n n.weight = args.weight\n n.update()", "def set_weights(self, weights):\n self.weights = copy.deepcopy(weights)", "def _initialize_weights(self):\n for n1, n2 in self.weights_graph.edges():\n data = self.weights_graph.edges[n1, n2]\n data[\"weight\"] = float(1)\n for _, data in self.weights_graph.nodes(data=True):\n data[\"usage\"] = 0\n\n for vs in self.terminals.values():\n self._increase_weights(vs)", "def reinit_weights(self):\n self.w = 0.01 * np.random.randn(self.prev_layer.get_shape()[0], self.nodes)", "def _set_node_weights(self):\n parent = self.Parent\n if parent is None: #root of tree always has weight of 1.0\n self.NodeWeight = 1.0\n else:\n self.NodeWeight = parent.NodeWeight * \\\n (self.BranchLength + self.BranchSum)/parent.BranchSum\n for child in self:\n child._set_node_weights()", "def update_node_weights(self, inputs):\n\n #Iterates through each node in each layer\n for i in range(len(self.NN)):\n for node in self.NN[i]:\n #Iterates through each value in the inputs and assigns weights\n for j in range(len(inputs)):\n #Multiplies the weight gradient by the learning rate and input value\n weight_update = self.learning_rate * node.delta_weight * inputs[j]\n #Adjusts the weight with momentum\n node.weights[j] += weight_update + node.momentum[j]\n #Adjusts the momentum value\n node.momentum[j] = weight_update\n #Updates the bias node\n node.weights[-1] += self.learning_rate * node.delta_weight\n #Sets the new inputs to the output vector of current layer\n inputs = [node.output for node in self.NN[i]]", "def update_weights(self) -> None:\n for neuron in self.__neurons__:\n neuron.update_weight(self.__inputs__)", "def set_weights(self, new_weights):\n feed_dict = {}\n tmp = self.network_weights[\"weights_gener\"][\"out\"]\n new_weights = np.asarray(new_weights, dtype=np.float32)\n tf_dtype = tf.as_dtype(tmp.dtype.name.split(\"_\")[0])\n if hasattr(tmp, \"_assign_placeholder\"):\n assign_placeholder = self.network_weights[\"weights_gener\"][\n \"out\"\n ]._assign_placeholder\n assign_op = self.network_weights[\"weights_gener\"][\"out\"]._assign_op\n else:\n assign_placeholder = tf.placeholder(tf_dtype, shape=new_weights.shape)\n assign_op = self.network_weights[\"weights_gener\"][\"out\"].assign(\n assign_placeholder\n )\n self.network_weights[\"weights_gener\"][\n \"out\"\n ]._assign_placeholder = assign_placeholder\n self.network_weights[\"weights_gener\"][\"out\"]._assign_op = assign_op\n feed_dict[assign_placeholder] = new_weights\n self.sess.run(assign_op, feed_dict=feed_dict)", "def _increase_weights(self, nodes):\n # type: (Iterable[int]) -> None\n for node in nodes:\n data = self.weights_graph.nodes[node]\n data[\"usage\"] += 1\n usage = float(data[\"usage\"]) / data[\"capacity\"]\n exp_factor = bounded_exp(max(0, self.epsilon * usage))\n for _,_,edata in self.weights_graph.in_edges(node, data=True):\n edata[\"weight\"] = exp_factor", "def set_recurrent_weights(self, weights):\n self.w_rec.weight = nn.Parameter(torchify(weights))", "def _assign_node_weights(self):\n _CONFIG_SERVER_SCORE = 11\n _QUORUM_MANAGER_SCORE = 8\n _QUORUM_SCORE = 5\n _MANAGER_SCORE = 3\n _CLIENT_SCORE = 1\n\n for node in self.state['nodes'].keys():\n\n fullname = self.state['nodes'][node]['admin_node_name']\n\n if self.state['nodes'][node]['roles'] == 'quorum-manager':\n self.state['nodes'][node]['weight'] = _QUORUM_MANAGER_SCORE\n elif self.state['nodes'][node]['roles'] == 'quorum':\n self.state['nodes'][node]['weight'] = _QUORUM_SCORE\n elif self.state['nodes'][node]['roles'] == 'manager':\n self.state['nodes'][node]['weight'] = _MANAGER_SCORE\n else:\n self.state['nodes'][node]['weight'] = _CLIENT_SCORE\n\n\n # check to see if node is primary/secondary config server\n # - don't want them both in the same group\n if self.state['primary_server'] == fullname or \\\n self.state['secondary_server'] == fullname:\n self.state['nodes'][node]['weight'] = _CONFIG_SERVER_SCORE\n \n\n return", "def update_weights(self):\n for layer in xrange(len(self.weights)):\n self.update_weights_layer(layer)", "def update_weights(self) :\n for layer in self.layers :\n try:\n layer.update_weights()\n except Exception as e :\n pass", "def set_nodes(self, nodes):\n self.nodes = nodes\n self.update_size()", "def set_weights(self, new_model):\n self.model.set_weights(new_model.model.get_weights())", "def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"You called `set_weights(weights)` on optimizer \" + self._name +\n \" with a weight list of length \" + str(len(weights)) +\n \", but the optimizer was expecting \" + str(len(params)) +\n \" weights. Provided weights: \" + str(weights)[:50] + \"...\")\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\"Optimizer weight shape \" + str(pv.shape) +\n \" not compatible with \"\n \"provided weight shape \" + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)", "def update_target_model(self):\n self.target_network.set_weights(self.q_network.get_weights())\n # vedere se funziona invece questo\n #for t, e in zip(self.target_network.trainable_variables,\n # self.primary_network.trainable_variables): t.assign(t * (1 - TAU) + e * TAU)", "def reset_weights(self):\n # TODO: Maybe use xavier initialization instead.\n self.delete_torch_layers()\n weights = np.random.randn(len(self.connections)) * self.weight_init_std\n self.weights = weights.tolist()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set flavors in a list
def store_flavors(self, *flavors_list): self.flavors = flavors_list return self
[ "def flavor_aware_sync_flavors(context, event):\n flavors = interfaces.IFlavors(context).content_flavors # tuple of names\n anno = IAnnotations(context)\n anno[interfaces.FLAVORS_KEY] = flavors", "def flavors(self, **kwargs):\n flavors = AwsFlavor(session=self.session)\n data = flavors.fetch()\n result = flavors.list(data)\n return self.update_dict(result, kind=\"flavor\")", "def create_flavors(attrs={}, count=2):\n flavors = []\n for i in range(0, count):\n flavors.append(FakeFlavor.create_one_flavor(attrs))\n\n return flavors", "def list_flavors(*args, **kwargs):\n return __list_flavors(*args, **kwargs)", "def show_flavors(self):\n print(f\"The following flavors are available {self.flavors}\")", "def get_flavors(flavors=None, count=2):\n if flavors is None:\n flavors = FakeServer.create_flavors(count)\n return mock.MagicMock(side_effect=flavors)", "def display_flavors(self):\n\t\tprint(\"The following flavors are offered: \")\n\t\tfor flavor in self.flavors:\n\t\t\tprint(\"- \" + flavor.title())", "def createEntityListFromResponse(self, response, detail):\r\n theList = []\r\n data = response[\"flavors\"]\r\n for jsonObj in data:\r\n flavor = Flavor(\"\")\r\n flavor.initFromResultDict(jsonObj)\r\n theList.append(flavor)\r\n return EntityList(theList, detail, self)", "def update_flavor(self, flavor, **attrs):\n return self._update(_flavor.Flavor, flavor, **attrs)", "def show_flavor(self):\n print(f\"\\nFollowing flavors are avaiable:\")\n for flavor in self.flavors:\n print(f\"- {flavor.title()}\")", "def flavor(self, name=None):\n flavors = AwsFlavor()\n flavors.update()\n for flavor in flavors.get():\n if flavor['name'] == name:\n return [flavor]\n return []", "def get_icecream_flavors(self):\n print(f\"These are the Ice-Cream Flavors Available:\\n {self.icecream_flavors}\")", "def show_flavors(self):\n print(\"Available ice cream flavors are: \", end=\"\")\n for flavor in self.flavors:\n if flavor == self.flavors[-1]:\n print(f\"{flavor.title()}.\", end=\"\")\n else:\n print(f\"{flavor.title()}, \", end=\"\")\n print(\"\")", "def addFlavor(self, flavor, strength):\n\t\tfor item in self.flavors:\n\t\t\tif item['Name'].lower() == flavor.lower():\n\t\t\t\titem['Strength']= strength\n\t\t\t\treturn\n\t\tself.flavors.append({'Name': flavor, 'Strength': strength})", "def list_flavors(self):\n return self._service.list_flavors()", "def build_flavours(compose, names):\n return [build_flavour(compose, name) for name in names]", "def update_flavor(self, name, spec, properties=None, public=True):\n dry_run_txt = ' DRY_RUN:' if self.dry_run else ''\n flavor = self.get_by_name('flavor', name)\n if not flavor:\n # Create new flavor\n self.logger.debug('=>%s create flavor %s', dry_run_txt, name)\n if not self.dry_run:\n flavor = self.client.flavors.create(name=name,\n ram=spec['ram'],\n vcpus=spec['vcpus'],\n disk=spec['disk'],\n is_public=public)\n # Check to see if an update are needed\n update = False\n if flavor and getattr(flavor, 'os-flavor-access:is_public') != public:\n update = True\n for k, v in spec.items():\n if flavor and v != getattr(flavor, k):\n update = True\n if update:\n self.logger.debug('=>%s update flavor %s', dry_run_txt, name)\n if not self.dry_run:\n # delete old\n self.client.flavors.delete(flavor.id)\n # create new\n flavor = self.client.flavors.create(name=name,\n ram=spec['ram'],\n vcpus=spec['vcpus'],\n disk=spec['disk'],\n is_public=public)\n # if no flavor we cannot do properties\n if not flavor:\n return\n # Unset old properties\n for k, v in flavor.get_keys().items():\n if k not in properties:\n if not self.dry_run:\n flavor.unset_keys([k])\n self.logger.debug('=>%s unset flavor properties %s', dry_run_txt, k)\n # Add new properties\n update = False\n if not properties:\n return\n flavor_keys = flavor.get_keys()\n for k, v in properties.items():\n # flavor keys return everything as unicode so we use string match\n if str(v) != flavor_keys.get(k):\n self.logger.debug('=>%s set flavor properties %s', dry_run_txt, k)\n if not self.dry_run:\n try:\n flavor.set_keys({k:v})\n except novaclient.exceptions.BadRequest as e:\n self.logger.debug('=> %s', e)", "def test_check_flavors_success(self, flav_destroy, get_context, flav_list):\n # Setup for check flavor that exists\n sys_col = self.RSD.driver.PODM.get_system_collection.return_value\n sys_col.get_member.return_value = self.system_inst\n self.RSD.rsd_flavors = {\n 'mock_flav_id': {'id': 'flav_id',\n 'rsd_systems': [self.system_inst.identity]}}\n self.RSD.check_flavors(sys_col, ['/redfish/v1/Systems/System1'])\n\n # Confirm the list of available flavors\n # No flavors need to be deleted\n get_context.assert_called()\n flav_list.assert_called_with(get_context.return_value)\n sys_col.get_member.assert_called_with('/redfish/v1/Systems/System1')\n flav_destroy.assert_not_called()", "def search_flavors(*args, **kwargs):\n return __search_flavors(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }