query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Guess a poem's meter via Levenshtein distance from candidates
def guess_meter(tokenized_poem): joined_lines = [''.join(line) for line in scanscion(tokenized_poem) if line] line_lengths = [len(line) for line in joined_lines] num_lines = len(joined_lines) meters = [] for line in joined_lines: meters.append(levenshtein(line, POSSIBLE_METERS)) guessed_meter = max(zip((meters.count(item) for item in set(meters)), set(meters)))[1] return joined_lines, num_lines, line_lengths, guessed_meter
[ "def levenshtein(string, candidates):\n\n distances = defaultdict(int)\n num_lines = len(string)\n\n for k, v in candidates.items():\n expanded = False\n # Expands the length of each candidate to match the length of the compared string\n if len(v) != len(string):\n v = (v * (num_lines // len(v) + 1))[:num_lines]\n expanded = True\n\n edit_distance = distance(string, v)\n\n # If we expanded the candidate, then it is a worse match than what we have already\n if edit_distance in distances and expanded:\n continue\n\n distances[distance(string, v)] = k\n\n return distances[min(distances)]", "def _fuzzysearch_score_fcn(v_text, phrase, max_l_dist=1):\n if v_text is None:\n return -9999\n\n matches = find_near_matches(\n phrase.lower(),\n v_text.lower(),\n max_l_dist=max_l_dist)\n\n return sum([1 - 1. * m.dist / (max_l_dist + 1) for m in matches])", "def levenshtein_distance_using_lexical_tree(lexical_tree, input_string, strategy=0, case_sensitive=0):", "def test_levenshteinDistance_bridgedb_doge(self):\n distance = util.levenshteinDistance('bridgedb', 'doge')\n self.assertEqual(distance, 6)", "def test_levenshteinDistance_bat_cat(self):\n distance = util.levenshteinDistance('bat', 'cat')\n self.assertEqual(distance, 1)", "def _match_term_to_results_with_levenshtein(self, current_search_term, ocr_results):\n possible_matches = []\n for result in ocr_results:\n ocr_result_word = result[0]\n distance = editdistance.eval(current_search_term, ocr_result_word)\n similarity = 1 - distance / max(len(ocr_result_word), len(current_search_term)) \n if similarity > self.minimum_word_similarity:\n possible_matches.append(result)\n\n return possible_matches", "def test_levenshteinDistance_feidanchaoren0043_feidanchaoren0011(self):\n email1 = Address('feidanchaoren0043@gmail.com')\n email2 = Address('feidanchaoren0011@gmail.com')\n # Fuzzy match if the Levenshtein Distance is less than or equal to:\n fuzzyMatch = 4\n distance = util.levenshteinDistance(email1.local, email2.local)\n self.assertLessEqual(distance, fuzzyMatch)", "def rel_levenshtein(s1, s2):\n maxlen = max(len(s1), len(s2))\n if maxlen > 0:\n return levenshtein(s1, s2) / float(maxlen)\n else:\n return 0", "def fuzzy_string_search(st, li):\n best_match = 0\n best_rat = 0\n for (i, s) in enumerate(li):\n rat = fuzz.partial_ratio(s, st)\n if rat > best_rat:\n best_match = i\n best_rat = rat\n\n return best_match, best_rat", "def get_match(target, candidates, w2vmodel):\n # parse target string into a list of tokens\n new_s1 = get_token_list(target)\n scores = {candidates.index(s): pulp.value(word_mover_distance_probspec(new_s1, s, w2vmodel).objective) for\n s in\n candidates}\n return candidates[min(scores, key=scores.get)]", "def match_word2vec(data, entry, max_):\n fuzz_flag = False\n entry = entry.split()\n i = 0.0\n query_meaning = 0\n for words in entry:\n try:\n query_meaning += model[words]\n except KeyError:\n continue\n i += 1\n try:\n query_meaning = query_meaning / i\n except ZeroDivisionError:\n query_meaning = 0\n i = 0.0\n for pair in data:\n for qns in data[pair][\"Question\"]:\n question_meaning = 0.0\n words = qns.split()\n for word in words:\n try:\n question_meaning += model[word]\n except KeyError:\n continue\n i += 1\n try:\n question_meaning = question_meaning / i\n except ZeroDivisionError:\n query_meaning = 0\n try:\n score = 1 - spatial.distance.cosine(query_meaning, question_meaning)\n except ValueError:\n score = 0\n if math.isnan(score):\n print(\"FAILED: query/question not in model dict\")\n fuzz_flag = True\n score = 0\n if score >= max_:\n max_ = score\n response_ = data[pair][\"Answer\"]\n closest_match = qns\n print('COSINE SIMILARITY: ' + str(max_))\n if max_ > 0.5:\n return random.choice(response_), max_\n elif fuzz_flag:\n # FUZZY WUZZY HERE\n max_ = 0\n entry = ' '.join(entry)\n for pair in data:\n for qns in data[pair][\"Question\"]:\n metrics = fuzz.ratio(qns, entry) # Retrieving\n if metrics > max_:\n max_ = metrics\n max_ = max_ / 100.0\n response_ = data[pair][\"Answer\"]\n closest_match = qns\n print('FUZZY WUZZY SIMILARITY: ' + str(max_))\n if max_ > 0.5:\n return random.choice(response_), 'test'\n return closest_match, max_\n # word2vec ENDS HERE----------------------------------", "def get_prob(potential_token, candidate_address_tokens):\n\n #If this potential match token matches one of the tokens in the candidate address, then compute how\n #unusual this token is amongst potential addresses. The more unusual the better\n\n # If the token in the potential match address is in the candidate address,\n # then great - let's get the probability of the term and return it\n prob = self.data_getter.get_freq(potential_token)\n if potential_token in candidate_address_tokens:\n\n return_value = prob\n\n #logger.debug(\"potential token: {} found {}\".format(potential_token,return_value))\n return return_value\n\n\n #if this token from one of the potetial matches is not in the candidate address, then maybe there's a spelling error?\n #Compare this token to each token in the candidate address looking for similarities\n\n best_score = 1\n\n # If the token in the address from the list of potential matches is not IN the candidate address, it is possibly a misspelling\n # So look through the tokens of the candidate address seeing if any of them fuzzy match the potential_token\n\n for candidate_token in candidate_address_tokens:\n\n if is_number(candidate_token) and is_number(potential_token) and self.fuzzy_matched_one_number == False:\n\n #We will want to check whether the tokens are 'number like' - if so, then 125b matches 125 and vice versa, but\n #225 does not match 125 closely. 125 however, is a reasonable match for 126.\n t_num = get_number(candidate_token)\n p_num = get_number(potential_token)\n\n\n #Calculate a distance metric using arbitrary constants such as 5 and 2. Monotonic in closeness to actual number\n\n d_num1 = t_num + 5\n d_num2 = p_num + 5\n\n #how far away is potential from candidate?\n distance = math.fabs(d_num1-d_num2)/(max(d_num1,d_num2))\n if distance != 0:\n distance += 0.2\n\n #logger.debug(\"t_num = {}, p_num = {}, distance = {}, main_prob {}\".format(t_num, p_num, distance, prob))\n\n #logger.debug(\"adjust up by {}\".format(((distance+1)**4)))\n\n\n if prob == None: #If the prob is None that means we couldn't find it - use a fairly standard prob in this case\n prob = 3.0e-7\n\n prob = prob *((distance+1)**4)*10\n\n #logger.debug(\"using prob {}\".format(prob))\n\n if prob < 1:\n self.fuzzy_matched_one_number = True\n\n best_score = min(best_score, prob)\n\n elif not is_number(candidate_token) and not is_number(potential_token):\n\n #proceed to fuzzy match only if both tokens are >3 characters, otherwise best score remains 1\n if len(candidate_token)> 3 and len(potential_token)>3:\n l_ratio = levenshtein_ratio(candidate_token, potential_token)\n\n #If the ratio is better than 0.7 assume it's a spelling error\n if l_ratio>MISSPELLING_THRESHOLD:\n # It makes most sense to use 'potential token' here as we can be sure it's in the list of term frequencies\n\n if prob is None:\n prob = 1\n prob = prob*100*(1/(l_ratio**6))\n\n #logger.info(\"fuzzy matched: {} against {} with prob {}\".format(candidate_token,potential_token, prob))\n\n best_score = min(best_score, prob)\n\n #Calculate the edit distance ratio - how many edits do we need to make as a proportion\n #of all characters in the shortest string?\n\n #If this is 0.7 or above, assume we have a\n\n #If we haven't found any sort of match return 1 (i.e. leave the probability unalterned)\n #logger.debug(\"potential token: {} returning from else {}\".format(potential_token,best_score))\n\n return best_score", "def levenshteinCheck(self):\n if self.tracks is None:\n return\n ng = ' '.join(self.api_query_input)\n self.lvsd = []\n for track in self.tracks:\n current = track.split(' ')\n if '' in current:\n del current[current.index('')]\n self.tracks[self.tracks.index(track)] = ' '.join(current)\n # find levenshtein distance between trackname and ngram\n self.lvsd.append(levenshtein(ng, track))\n #return self.lvsd", "def get_corrections(self, word):\n norm = self.normalize(word)\n candidates = self.word_list.get(norm)\n if not candidates:\n return [(0, \"NO SUGGESTION\")]\n def rank_candidates():\n for cand in candidates:\n yield dameraulevenshtein(cand, word), cand\n ranked = list(rank_candidates())\n best_score = min(ranked, key=lambda x: x[0])[0]\n return [ c for c in ranked if c[0] == best_score ]", "def get_distances(self, word):\n for name in self.app_names:\n self.distances[name] = self.levenshtein(word, name)", "def _seed_match(lp, goal):\n\n goall = _entry_to_tuple(goal)\n scores = {}\n for part_name, seed in lp.iteritems():\n score = 0\n seedl = _entry_to_tuple(str(seed))\n\n if seedl[0] == goall[0]:\n # At least we want a distribution match for it to be\n # considered\n scores[part_name] = Levenshtein.seqratio(goall, seedl)\n else:\n scores[part_name] = 0\n if scores:\n selected, score = max(scores.iteritems(), key = operator.itemgetter(1))\n return selected, score, lp[selected]\n return None, 0, None", "def levenshtein_distance(str_1, str_2):\n return textdistance.levenshtein.normalized_similarity(str_1, str_2)", "def test_levenshteinDistance_bar_cat(self):\n distance = util.levenshteinDistance('bar', 'cat')\n self.assertEqual(distance, 2)", "def correct_misspelling(token, distance_threshold=2):\n if in_dictionary(token):\n return token\n suggested_words = suggest_words(token)\n if suggested_words is not None:\n num_modified_characters = []\n for suggested_word in suggested_words:\n distance = calculate_levenshtein_distance(token, suggested_word)\n num_modified_characters.append(distance)\n # this min is showing errors since it takes an empy/none variable as inputen \n min_num_modified_characters = min(num_modified_characters)\n best_arg = num_modified_characters.index(min_num_modified_characters)\n if distance_threshold > min_num_modified_characters:\n best_suggestion = suggested_words[best_arg]\n return best_suggestion\n else:\n return token\n else:\n return token" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Guess a poem's rhyme via Levenshtein distance from candidates
def guess_rhyme_type(tokenized_poem): joined_lines = ''.join(rhyme_scheme(tokenized_poem)) no_blanks = joined_lines.replace(' ', '') guessed_rhyme = levenshtein(no_blanks, POSSIBLE_RHYMES) return joined_lines, guessed_rhyme
[ "def levenshtein(string, candidates):\n\n distances = defaultdict(int)\n num_lines = len(string)\n\n for k, v in candidates.items():\n expanded = False\n # Expands the length of each candidate to match the length of the compared string\n if len(v) != len(string):\n v = (v * (num_lines // len(v) + 1))[:num_lines]\n expanded = True\n\n edit_distance = distance(string, v)\n\n # If we expanded the candidate, then it is a worse match than what we have already\n if edit_distance in distances and expanded:\n continue\n\n distances[distance(string, v)] = k\n\n return distances[min(distances)]", "def _match_term_to_results_with_levenshtein(self, current_search_term, ocr_results):\n possible_matches = []\n for result in ocr_results:\n ocr_result_word = result[0]\n distance = editdistance.eval(current_search_term, ocr_result_word)\n similarity = 1 - distance / max(len(ocr_result_word), len(current_search_term)) \n if similarity > self.minimum_word_similarity:\n possible_matches.append(result)\n\n return possible_matches", "def levenshtein_distance_using_lexical_tree(lexical_tree, input_string, strategy=0, case_sensitive=0):", "def get_corrections(self, word):\n norm = self.normalize(word)\n candidates = self.word_list.get(norm)\n if not candidates:\n return [(0, \"NO SUGGESTION\")]\n def rank_candidates():\n for cand in candidates:\n yield dameraulevenshtein(cand, word), cand\n ranked = list(rank_candidates())\n best_score = min(ranked, key=lambda x: x[0])[0]\n return [ c for c in ranked if c[0] == best_score ]", "def match_word2vec(data, entry, max_):\n fuzz_flag = False\n entry = entry.split()\n i = 0.0\n query_meaning = 0\n for words in entry:\n try:\n query_meaning += model[words]\n except KeyError:\n continue\n i += 1\n try:\n query_meaning = query_meaning / i\n except ZeroDivisionError:\n query_meaning = 0\n i = 0.0\n for pair in data:\n for qns in data[pair][\"Question\"]:\n question_meaning = 0.0\n words = qns.split()\n for word in words:\n try:\n question_meaning += model[word]\n except KeyError:\n continue\n i += 1\n try:\n question_meaning = question_meaning / i\n except ZeroDivisionError:\n query_meaning = 0\n try:\n score = 1 - spatial.distance.cosine(query_meaning, question_meaning)\n except ValueError:\n score = 0\n if math.isnan(score):\n print(\"FAILED: query/question not in model dict\")\n fuzz_flag = True\n score = 0\n if score >= max_:\n max_ = score\n response_ = data[pair][\"Answer\"]\n closest_match = qns\n print('COSINE SIMILARITY: ' + str(max_))\n if max_ > 0.5:\n return random.choice(response_), max_\n elif fuzz_flag:\n # FUZZY WUZZY HERE\n max_ = 0\n entry = ' '.join(entry)\n for pair in data:\n for qns in data[pair][\"Question\"]:\n metrics = fuzz.ratio(qns, entry) # Retrieving\n if metrics > max_:\n max_ = metrics\n max_ = max_ / 100.0\n response_ = data[pair][\"Answer\"]\n closest_match = qns\n print('FUZZY WUZZY SIMILARITY: ' + str(max_))\n if max_ > 0.5:\n return random.choice(response_), 'test'\n return closest_match, max_\n # word2vec ENDS HERE----------------------------------", "def correct_misspelling(token, distance_threshold=2):\n if in_dictionary(token):\n return token\n suggested_words = suggest_words(token)\n if suggested_words is not None:\n num_modified_characters = []\n for suggested_word in suggested_words:\n distance = calculate_levenshtein_distance(token, suggested_word)\n num_modified_characters.append(distance)\n # this min is showing errors since it takes an empy/none variable as inputen \n min_num_modified_characters = min(num_modified_characters)\n best_arg = num_modified_characters.index(min_num_modified_characters)\n if distance_threshold > min_num_modified_characters:\n best_suggestion = suggested_words[best_arg]\n return best_suggestion\n else:\n return token\n else:\n return token", "def test_levenshteinDistance_bridgedb_doge(self):\n distance = util.levenshteinDistance('bridgedb', 'doge')\n self.assertEqual(distance, 6)", "def rel_levenshtein(s1, s2):\n maxlen = max(len(s1), len(s2))\n if maxlen > 0:\n return levenshtein(s1, s2) / float(maxlen)\n else:\n return 0", "def trie_levenshtein_spellcorrect(self,\n word: str,\n number_of_results: int=1,\n max_distance: int=-1,\n verbose=False) -> (t.List[t.Tuple[int, str]]):\n # store all the current states in a sorted set, this allows to work only with the less costly option\n # (one set is a tuple (cost, node, remaining word characters, word up to node))\n candidate_cost_node_word_set: t.Set[t.Tuple[int, TrieNode, str, str]]\n candidate_cost_node_word_set = SortedSet(\n iterable={(0, self.trie.root, word, \"\")},\n # the key is the cost, then the frequency, then the remaining length \n key=lambda x: (x[0], len(x[2]), x[1].frequency))\n\n # store the optimal distance result\n optimal_cost_node_word_list: t.Set[t.Tuple[int, str]]\n optimal_cost_node_word_list = SortedSet()\n\n # loop on all the nodes until either:\n while ( # we find enough valid final nodes\n len(optimal_cost_node_word_list) < number_of_results and \n # we have no more candidates\n len(candidate_cost_node_word_set) > 0 and \n # we have no more candidates under the cost threshold\n (max_distance < 0 or candidate_cost_node_word_set[0][0] < max_distance)): \n\n # get the least costly candidate\n current_cost, current_node, current_word_left, current_correct_word = candidate_cost_node_word_set.pop(0)\n\n # --- stop condition ---\n # we consider a solution as valid if the current node is a leaf and the word to study is empty\n if len(current_word_left) == 0 and current_node.is_leaf:\n optimal_cost_node_word_list.add((current_cost, current_correct_word))\n \n # --- previous base cases ---\n # the word to study is empty or the branch is a cul-de-sac\n elif len(current_word_left) == 0: # the word is empty\n # get remaining distance to the next leaf and the corresponding node and word\n # (number of additions to go from the current word to a true word)\n cost_to_next_leaf, next_leaf, word_to_next_leaf = sortest_distance_to_next_leaf(current_node)\n candidate_cost_node_word_set.add(\n (current_cost + cost_to_next_leaf, next_leaf, \"\", current_correct_word + word_to_next_leaf))\n\n elif len(current_node.children) < 1: # the branch is a cul-de-sac\n # if the current node is a leaf (it should be, but let's be safe) add it back to the candidates as a \n # valid result, otherwise discard the branch (by not adding the node back)\n if current_node.is_leaf:\n # add the remaining size of the current word to the cost\n # (number of deletions to go from the current word to a true word)\n candidate_cost_node_word_set.add(\n (current_cost + len(current_word_left), current_node, \"\", current_correct_word))\n\n else:\n # --- previously reccursive cases ---\n # edit is removing the first character\n for child in current_node.children.values():\n candidate_cost_node_word_set.add(\n (current_cost + 1, child, current_word_left, current_correct_word + child.value))\n \n # edit is inserting the first character\n candidate_cost_node_word_set.add(\n (current_cost + 1, current_node, current_word_left[1:], current_correct_word))\n \n # edit is replacing the first character\n # no edit needed on the last character\n for child in current_node.children.values():\n candidate_cost_node_word_set.add(\n # +1 to the cost if a replace is needed, +0 if no replace is needed\n (current_cost + (1 if child.value != current_word_left[0] else 0),\n child, current_word_left[1:], current_correct_word + child.value))\n\n if verbose:\n print(\"Stopped because \" + \n (\"we find enough valid final nodes\" if len(optimal_cost_node_word_list) >= number_of_results else \n (\"we have no more candidates\" if len(candidate_cost_node_word_set) <= 0 else \n \"we have no more candidates under the cost threshold\")))\n\n return list(optimal_cost_node_word_list)", "def correct_misspelling_ngram(token, levenshtein_treshold=3):\n if in_dictionary(token):\n return token\n suggested_words = suggest_words(token)\n jaccard_coefficients = []\n best_suggested_words = []\n if suggested_words is not None:\n token_bigrams = make_ngrams(token, 2)\n for suggested_word in suggested_words:\n distance = calculate_levenshtein_distance(token, suggested_word)\n if distance < levenshtein_treshold:\n suggested_bigrams = make_ngrams(suggested_word, 2)\n jaccard_coefficient = calculate_jaccard_coefficient(\n token_bigrams, suggested_bigrams)\n jaccard_coefficients.append(jaccard_coefficient)\n best_suggested_words.append(suggested_word)\n highest_jaccard = max(jaccard_coefficients)\n best_arg = jaccard_coefficients.index(highest_jaccard)\n word = best_suggested_words[best_arg]\n return word\n else:\n return word", "def _seed_match(lp, goal):\n\n goall = _entry_to_tuple(goal)\n scores = {}\n for part_name, seed in lp.iteritems():\n score = 0\n seedl = _entry_to_tuple(str(seed))\n\n if seedl[0] == goall[0]:\n # At least we want a distribution match for it to be\n # considered\n scores[part_name] = Levenshtein.seqratio(goall, seedl)\n else:\n scores[part_name] = 0\n if scores:\n selected, score = max(scores.iteritems(), key = operator.itemgetter(1))\n return selected, score, lp[selected]\n return None, 0, None", "def relevance(distance: float):\n universe = np.arange(0, 100, 1)\n return fuzz.interp_membership(universe, fuzz.gaussmf(universe, 0.0, 5), distance)", "def closest_word_match(word):\n distances = {}\n for other_word in ALL_WORDS:\n dist = Levenshtein.distance(word, other_word)\n if dist not in distances:\n distances[dist] = []\n distances[dist].append(other_word)\n i = 1\n while i not in distances or distances[i] == []:\n i += 1\n if len(distances[i]) == 1:\n log(f\"Repaired '{word}' -> '{distances[i][0]}'.\", LOG_DEBUG, \"Who's on First?\")\n return distances[i][0]\n return None", "def test_levenshteinDistance_bat_cat(self):\n distance = util.levenshteinDistance('bat', 'cat')\n self.assertEqual(distance, 1)", "def test_suggestion_rankings(self):\n answers = {\"problem\": \"MALADIES_FONGIQUES\", \"rotation\": [], \"department\": \"01\"}\n engine = Engine(answers, [], [])\n practices = engine.calculate_results()\n suggestions = engine.get_suggestions(practices)\n\n # There should be two practices with weight 1.5\n self.assertEqual(len(suggestions), 3)\n weights = list(map(lambda x: x.weight, suggestions))\n self.assertEqual(len(list(filter(lambda x: x == 1.5, weights))), 2)", "def _fuzzysearch_score_fcn(v_text, phrase, max_l_dist=1):\n if v_text is None:\n return -9999\n\n matches = find_near_matches(\n phrase.lower(),\n v_text.lower(),\n max_l_dist=max_l_dist)\n\n return sum([1 - 1. * m.dist / (max_l_dist + 1) for m in matches])", "def guess_meter(tokenized_poem):\n\n joined_lines = [''.join(line) for line in scanscion(tokenized_poem) if line]\n line_lengths = [len(line) for line in joined_lines]\n num_lines = len(joined_lines)\n\n meters = []\n for line in joined_lines:\n meters.append(levenshtein(line, POSSIBLE_METERS))\n\n guessed_meter = max(zip((meters.count(item) for item in set(meters)), set(meters)))[1]\n\n return joined_lines, num_lines, line_lengths, guessed_meter", "def edit_levenshtein(c1, c2):\n return 0 if c1 == c2 else -1", "def test_levenshteinDistance_feidanchaoren0043_feidanchaoren0011(self):\n email1 = Address('feidanchaoren0043@gmail.com')\n email2 = Address('feidanchaoren0011@gmail.com')\n # Fuzzy match if the Levenshtein Distance is less than or equal to:\n fuzzyMatch = 4\n distance = util.levenshteinDistance(email1.local, email2.local)\n self.assertLessEqual(distance, fuzzyMatch)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Propagates a state vector
def propagate_state(r,v,t0,tf,bstar=0.21109E-4): kep = state_kep(r,v) return propagate_kep(kep,t0,tf,bstar)
[ "def apply_state(self, state):", "def forward(self, t, state):\n xs = state[:, :-1]\n dlogp = state[:, -1:]\n state = (xs, dlogp)\n *dxs, div = self._dynamics(t, state)\n state = torch.cat([*dxs, div], dim=-1)\n return state", "def _unwrap_state_vector(self):\n return [\n state_vector(i)\n for i in range(\n self.idx_state_var,\n self.idx_state_var + self.num_state_variables,\n )\n ]", "def propagate(self):\n for sample in self.input_value:\n # perform forward propagation on one sample\n layer_output = sample\n for l in self.layers:\n layer_output = l.activate(layer_output)\n self.forward_propagation_output.append(layer_output) #stores propagation output value of one sample\n return self.forward_propagation_output", "def forward(self, t, state):\n\n *dxs, dlogp = self._dynamics(self._t_max - t, state)\n return [-dx for dx in dxs] + [-dlogp]", "def swap_gate_statevector_nondeterministic():\n targets = []\n # initial state as |10+>\n # Swap(0,1).(X^I^H), Permutation (0,1,2) -> (1,0,2), |1+0>\n targets.append(np.array([0, 0, 0, 0, 1, 0, 1, 0]) / np.sqrt(2))\n # Swap(0,2).(X^I^H), # Permutation (0,1,2) -> (2,1,0),\n targets.append(np.array([0, 1, 0, 0, 0, 1, 0, 0]) / np.sqrt(2))\n # Swap(2,0).Swap(0,1).(X^I^H), Permutation (0,1,2) -> (2,0,1)\n targets.append(np.array([0, 1, 0, 1, 0, 0, 0, 0]) / np.sqrt(2))\n return targets", "def state_to_output( self, vec, fill_value = None, var_info = None,\n extend = True ):\n return self.variables.state_to_output( vec, fill_value, var_info, extend )", "def augment_state(self, state, reference):\n augmented_state = [state[x] for x in self.ac_states] + [state[self.tracked_state] - reference[self.tracked_state]]\n return torch.tensor(augmented_state, requires_grad=True)", "def propagate_states(state_vectors, epoch_time, end_time):\n \n # Convert times to strings \n epoch_time_str = batch_time_string_from_datetime(epoch_time)\n start_time_str = epoch_time_str\n end_time_str = batch_time_string_from_datetime(end_time)\n print(\"Propagating %i states to propagate from %s to %s\" %\n (len(state_vectors), start_time_str, end_time_str))\n \n url = \"https://pro-equinox-162418.appspot.com/_ah/api/adam/v1\"\n rest = RestRequests(url)\n batches_module = Batches(rest)\n \n # Create batches from statevectors\n batches = []\n propagation_params = PropagationParams({\n 'start_time': start_time_str,\n 'end_time': end_time_str,\n 'project_uuid': 'ffffffff-ffff-ffff-ffff-ffffffffffff'\n })\n for state_vector in state_vectors:\n opm_params = OpmParams({\n 'epoch': start_time_str,\n 'state_vector': state_vector\n })\n batches.append(Batch(propagation_params, opm_params))\n\n # submit batches and wait till they finish running \n BatchRunManager(batches_module, batches).run()\n\n # Get final states\n end_state_vectors = []\n for batch in batches:\n end_state_vectors.append(batch.get_results().get_end_state_vector())\n\n return end_state_vectors", "def forward_propagate(self, inputs):\r\n \r\n \r\n # the input layer activation is just the input itself\r\n activations = inputs\r\n self.activations[0] = activations\r\n \r\n # iterate through the network layers\r\n for i, w in enumerate(self.weights):\r\n # calculate matrix multiplication between previous activation and weight matrix\r\n net_inputs = np.dot(activations, w)\r\n \r\n # calculate the activations\r\n activations = self._sigmoid(net_inputs)\r\n \r\n self.activations[i + 1] = activations\r\n \r\n return activations", "def apply_state(self, state):\n if len(state) > len(self.inputs): raise TypeError(\"Too many input states specified\")\n inputs = self.inputs.copy()\n self.inputs = []\n for i,s in enumerate(state):\n v = inputs[i]\n if s == '/': \n self.inputs.append(v)\n continue\n if s in ('0', '1'):\n self.scalar.add_power(-1)\n self.set_type(v, 2)\n if s == '1':\n self.set_phase(v, Fraction(1))\n elif s in ('+', '-'):\n self.scalar.add_power(-1)\n self.set_type(v, 1)\n if s == '-':\n self.set_phase(v, Fraction(1))\n else:\n raise TypeError(\"Unknown input state \" + s)", "def forward_propagate(self, input):\n return self.next_layer.forward_propagate(input)", "def forward_states(X, wx, wRec):\n # Initialise the matrix that holds all states for all input sequences.\n # The initial state s0 is set to 0.\n S = np.zeros((X.shape[0], X.shape[1]+1))\n # Use the recurrence relation defined by update_state to update the \n # states trough time.\n for k in range(0, X.shape[1]):\n # S[k] = S[k-1] * wRec + X[k] * wx\n S[:,k+1] = update_state(X[:,k], S[:,k], wx, wRec)\n return S", "def _recurseively_extend_beliefvector(self, beliefstate, vector):\n\n if isinstance(beliefstate, dict):\n for key in beliefstate:\n self._recurseively_extend_beliefvector(beliefstate[key],\n vector)\n if isinstance(beliefstate, float):\n vector.append(beliefstate)", "def get_state_werner_pure_state_vector() -> np.ndarray:\n state_vec_0 = np.array([1, 0], dtype=np.complex128) # |0>\n state_vec_1 = np.array([0, 1], dtype=np.complex128) # |1>\n\n # |0>|0>|1>\n vec_0 = tensor_product_for_vecs([state_vec_0, state_vec_0, state_vec_1])\n # |0>|1>|0>\n vec_1 = tensor_product_for_vecs([state_vec_0, state_vec_1, state_vec_0])\n # |1>|0>|0>\n vec_2 = tensor_product_for_vecs([state_vec_1, state_vec_0, state_vec_0])\n\n pure_state_vec = 1 / np.sqrt(3) * (vec_0 + vec_1 + vec_2)\n return pure_state_vec", "def apply2(gate, pos0, pos1, state):\n assert(0 <= pos0 and pos0 < state.ndim)\n assert(0 <= pos1 and pos1 < state.ndim)\n assert(pos0 != pos1)\n \n def update(s):\n reshaped = np.reshape(s, (4, -1))\n updated = np.dot(gate, reshaped)\n return np.reshape(updated, s.shape)\n\n return with_head_position(update, pos0, pos1)(state)", "def linearize(self, state=None, action=None):\n if state is None:\n state = np.zeros(self.dim_state)\n if action is None:\n action = np.zeros(self.dim_action)\n\n if type(state) is not torch.Tensor:\n state = torch.tensor(state)\n if type(action) is not torch.Tensor:\n action = torch.tensor(action)\n\n state.requires_grad = True\n action.requires_grad = True\n\n f = self.func(None, state, action)\n\n a = np.zeros((self.dim_state[0], self.dim_state[0]))\n b = np.zeros((self.dim_state[0], self.dim_action[0]))\n \n for i in range(self.dim_state[0]):\n aux = torch.autograd.grad(\n f[i], state, allow_unused=True, retain_graph=True\n )[0]\n if aux is not None:\n a[i] = aux.numpy()\n\n aux = torch.autograd.grad(\n f[i], action, allow_unused=True, retain_graph=True\n )[0]\n if aux is not None:\n b[i] = aux.numpy()\n\n ad, bd, _, _, _ = signal.cont2discrete(\n (a, b, 0, 0), self.step_size, method=\"zoh\"\n )\n return LinearSystem(ad, bd)", "def creation(i,state_in):\n coef = np.sqrt(state_in[i]+1)\n state_out=state_in.copy()\n state_out[i] = state_out[i]+1\n return state_out,coef", "def _forward_propagation(self):\n a, self.cache = self.__calc_a(self.X)\n self.cost = np.sum(self._cost_func['func'](a, self.y))", "def switchingFunction(self, state):\n\n x, y, z, dx, dy, dz, m, L1, L2, L3, L4, L5, L6, L7 = state\n\n Lv_, lv = self.unitVector(np.array([L4, L5, L6]))\n\n S = -lv * self.ve / m - L7 + 1\n\n return S" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
kep_to_sat(kep,epoch,bstar=0.21109E4,whichconst=wgs72,afspc_mode=False) Converts a set of keplerian elements into a Satellite object.
def kep_to_sat(kep,epoch,bstar=0.21109E-4,whichconst=wgs72,afspc_mode=False): deg2rad = np.pi / 180.0; # 0.0174532925199433 xpdotp = 1440.0 / (2.0 * np.pi); # 229.1831180523293 tumin = whichconst.tumin satrec = Satellite() satrec.error = 0; satrec.whichconst = whichconst # Python extension: remembers its consts satrec.satnum = 0 dt_obj = datetime.utcfromtimestamp(epoch) t_obj = dt_obj.timetuple() satrec.epochdays = (t_obj.tm_yday + t_obj.tm_hour/24 + t_obj.tm_min/1440 + t_obj.tm_sec/86400) satrec.ndot = 0 satrec.nddot = 0 satrec.bstar = bstar satrec.inclo = kep[2] satrec.nodeo = kep[4] satrec.ecco = kep[1] satrec.argpo = kep[3] satrec.mo = __true_to_mean(kep[5],kep[1]) satrec.no = 86400/(2*np.pi*(kep[0]**3/398600.4405)**0.5) satrec.no = satrec.no / xpdotp; # rad/min satrec.a = pow( satrec.no*tumin , (-2.0/3.0) ); # ---- find standard orbital elements ---- satrec.inclo = satrec.inclo * deg2rad; satrec.nodeo = satrec.nodeo * deg2rad; satrec.argpo = satrec.argpo * deg2rad; satrec.mo = satrec.mo * deg2rad; satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0; satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0; satrec.epochyr = dt_obj.year satrec.jdsatepoch = epoch/86400.0 + 2440587.5 satrec.epoch = dt_obj # ---------------- initialize the orbit at sgp4epoch ------------------- sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar, satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no, satrec.nodeo, satrec) return satrec
[ "def save_ephem(sat, tle_dir, cadence, location, alpha, out_dir):\n\n # instantiate an empty dict\n sat_ephem = {}\n sat_ephem[\"sat_id\"] = sat\n sat_ephem[\"time_array\"] = []\n sat_ephem[\"sat_alt\"] = []\n sat_ephem[\"sat_az\"] = []\n\n # Make output directory tree\n Path(f\"{out_dir}/ephem_data\").mkdir(parents=True, exist_ok=True)\n Path(f\"{out_dir}/ephem_plots\").mkdir(parents=True, exist_ok=True)\n\n tle_path = Path(f\"{tle_dir}/{sat}.txt\")\n\n # Skip if the file is empty\n if tle_path.stat().st_size != 0:\n\n # Check if tle file exists\n tle_path.is_file()\n sats, epochs = load_tle(tle_path)\n epoch_range = epoch_ranges(epochs)\n\n for i in range(len(epoch_range) - 1):\n t_arr, index_epoch = epoch_time_array(\n epoch_range, index_epoch=i, cadence=cadence\n )\n\n try:\n passes, alt, az = sat_pass(sats, t_arr, index_epoch, location=location)\n\n for pass_index in passes:\n time_array, sat_alt, sat_az = ephem_data(t_arr, pass_index, alt, az)\n\n sat_ephem[\"time_array\"].append(time_array)\n sat_ephem[\"sat_alt\"].append(sat_alt)\n sat_ephem[\"sat_az\"].append(sat_az)\n\n # Catch exceptions in sat_pass\n # sometimes sat_object is empty and can't be iterated over\n except Exception:\n pass\n\n plt = sat_plot(sat, sat_ephem[\"sat_alt\"], sat_ephem[\"sat_az\"], alpha=alpha)\n plt.savefig(f\"{out_dir}/ephem_plots/{sat}.png\")\n plt.close()\n np.savez_compressed(f\"{out_dir}/ephem_data/{sat}.npz\", **sat_ephem)\n\n return f\"Saved sky coverage plot of satellite [{sat}] to {out_dir}ephem_plots/{sat}.png \\nSaved ephemeris of satellite [{sat}] to {out_dir}ephem_data/{sat}.npz\"\n\n return f\"File {tle_dir}/{sat} is empty, skipping\"", "def snow_main_simple(inp_ta, inp_precip, inp_doy, inp_hourdec, dtstep, init_swe=None, init_d_snow=None, inp_sw=None, which_melt='clark2009', alb_swe_thres=5.0, \\\n num_secs_output=86400, **config):\n\n num_timesteps = inp_ta.shape[0]\n\n storage_interval = num_secs_output / dtstep\n # calculate how many days in input file\n num_out_steps = int(1 + num_timesteps / storage_interval)\n\n # set up storage arrays\n shape_xy = inp_ta.shape[1:]\n if len(shape_xy) == 2:\n st_swe = np.empty((num_out_steps, shape_xy[0], shape_xy[1])) * np.nan\n st_melt = np.empty((num_out_steps, shape_xy[0], shape_xy[1])) * np.nan\n st_acc = np.empty((num_out_steps, shape_xy[0], shape_xy[1])) * np.nan\n st_alb = np.empty((num_out_steps, shape_xy[0], shape_xy[1])) * np.nan\n elif len(shape_xy) == 1:\n st_swe = np.empty((num_out_steps, shape_xy[0])) * np.nan\n st_melt = np.empty((num_out_steps, shape_xy[0])) * np.nan\n st_acc = np.empty((num_out_steps, shape_xy[0])) * np.nan\n st_alb = np.empty((num_out_steps, shape_xy[0])) * np.nan\n # set up initial states of prognostic variables if not passed in\n if init_swe is None:\n init_swe = np.zeros(shape_xy) # default to no snow\n swe = init_swe\n if init_d_snow is None:\n init_d_snow = np.ones(shape_xy) * 30 # default to a month since snowfall\n d_snow = init_d_snow\n # set up daily buckets for melt and accumulation\n bucket_melt = swe * 0\n bucket_acc = swe * 0\n swe_day_before = swe * 0\n\n # store initial swe value\n st_swe[0, :] = init_swe\n st_melt[0, :] = 0\n st_acc[0, :] = 0\n ii = 1\n\n # run through and update SWE for each timestep in input data\n for i in range(num_timesteps):\n # d_snow += dtstep / 86400.0 # now handled with daily threshold in at end of each day\n if 'inp_alb' in config:\n config['alb'] = config['inp_alb'][i, :]\n\n swe, d_snow, melt, acc = calc_dswe(swe, d_snow, inp_ta[i, :], inp_precip[i, :], inp_doy[i], dtstep, sw=inp_sw[i, :], which_melt=which_melt, **config)\n\n # print swe[0]\n bucket_melt = bucket_melt + melt\n bucket_acc = bucket_acc + acc\n\n if i != 0 and inp_hourdec[i] == 0 or inp_hourdec[i] == 24: # output daily\n d_snow += 1\n swe_alb = swe - swe_day_before\n d_snow[(swe_alb > alb_swe_thres)] = 0\n swe_day_before = swe\n\n if (i + 1) % storage_interval == 0: # if timestep divisible by the storage interval\n st_swe[ii, :] = swe\n st_melt[ii, :] = bucket_melt\n st_acc[ii, :] = bucket_acc\n st_alb[ii, :] = calc_albedo_snow(d_snow, swe, **config)\n ii = ii + 1 # move storage counter for next output timestep\n bucket_melt = bucket_melt * 0 # reset buckets\n bucket_acc = bucket_acc * 0\n\n return st_swe, st_melt, st_acc, st_alb", "def Tsat(self, p):\n\n p = Pressure(p).MPa\n\n if p < 0.000611213 or p > 22.064:\n raise ValueError('No saturation temperature for this pressure')\n\n n = self.data['n']\n beta = p ** 0.25\n E = beta ** 2 + n[2] * beta + n[5]\n F = n[0] * beta ** 2 + n[3] * beta + n[6]\n G = n[1] * beta ** 2 + n[4] * beta + n[7]\n D = (2 * G) / (-F - np.sqrt(F ** 2 - 4 * E * G))\n\n return Temperature(0.5 * (n[9] + D - np.sqrt((n[9] + D) ** 2 - 4 * (n[8] + n[9] * D))))", "def makestations(self,P,T,Tmin,Tmax):\r\n rainstation = self.project.rainfall_stations.add('Grebenau avg',P,(0,0,0))\r\n self.project.use_nearest_rainfall()\r\n\r\n # Temperature data\r\n meteo = self.project.meteo_stations.add_station('Grebenau avg',(0,0,0))\r\n meteo.T = T\r\n meteo.Tmin = Tmin\r\n meteo.Tmax = Tmax\r\n self.project.use_nearest_meteo()\r\n \r\n return rainstation", "def altimeter_to_slp(altim, elev, T):\n #Bring in the neccessary libraries\n from metpy.units import units\n import metpy.constants as mpconsts\n from numpy import exp\n\n #Make sure the temperature is in Kelvin\n T = T.to('kelvin')\n\n #Make sure the elevation is measured in meters\n z = elev.to('meter')\n\n #Calculate the station pressure using the function altimeter_to_station_pressure()\n p = altimeter_to_station_pressure(altim, elev)\n\n #Calculate the scale height\n H = mpconsts.Rd * T / mpconsts.g\n\n #Calculate the pressure at sea level\n psl = p * exp(z/H)\n\n return psl", "def test_earth_relief_01d_igpp_synbath(data_source):\n data = load_earth_relief(resolution=\"01d\", data_source=data_source)\n assert data.name == \"elevation\"\n assert data.attrs[\"units\"] == \"meters\"\n assert data.attrs[\"long_name\"] == \"Earth elevation relative to the geoid\"\n assert data.attrs[\"vertical_datum\"] == \"EGM96\"\n assert data.attrs[\"horizontal_datum\"] == \"WGS84\"\n assert data.gmt.registration == 0\n assert data.shape == (181, 361)\n npt.assert_allclose(data.lat, np.arange(-90, 91, 1))\n npt.assert_allclose(data.lon, np.arange(-180, 181, 1))\n npt.assert_allclose(data.min(), -8600.5)\n npt.assert_allclose(data.max(), 5559.0)", "def get_earth_tesseral_switch(self):\n return self.get_abstract_item(\"General\", \"Earth Tesseral switch\")", "def choose_kepler_spline(all_time,\n all_flux,\n bkspaces,\n maxiter=5,\n penalty_coeff=1.0,\n verbose=True,\n all_input_mask=None):\n # Initialize outputs.\n best_spline = None\n metadata = SplineMetadata()\n\n # Compute the assumed standard deviation of Gaussian white noise about the\n # spline model. We assume that each flux value f[i] is a Gaussian random\n # variable f[i] ~ N(s[i], sigma^2), where s is the value of the true spline\n # model and sigma is the constant standard deviation for all flux values.\n # Moreover, we assume that s[i] ~= s[i+1]. Therefore,\n # (f[i+1] - f[i]) / sqrt(2) ~ N(0, sigma^2).\n scaled_diffs = [np.diff(f) / np.sqrt(2) for f in all_flux]\n scaled_diffs = np.concatenate(scaled_diffs) if scaled_diffs else np.array([])\n if not scaled_diffs.size:\n best_spline = [np.array([np.nan] * len(f)) for f in all_flux]\n metadata.light_curve_mask = [\n np.zeros_like(f, dtype=np.bool) for f in all_flux\n ]\n return best_spline, metadata\n\n # Compute the median absolute deviation as a robust estimate of sigma. The\n # conversion factor of 1.48 takes the median absolute deviation to the\n # standard deviation of a normal distribution. See, e.g.\n # https://www.mathworks.com/help/stats/mad.html.\n sigma = np.median(np.abs(scaled_diffs)) * 1.48\n \n \n #Now if we don't input any input mask we need to create a set of input masks that are all true\n if np.all(all_input_mask == None): \n all_input_mask = []\n for eachtime in all_time:\n all_input_mask.append(np.ones_like(eachtime, dtype=np.bool))\n \n \n for bkspace in bkspaces:\n nparams = 0 # Total number of free parameters in the piecewise spline.\n npoints = 0 # Total number of data points used to fit the piecewise spline.\n ssr = 0 # Sum of squared residuals between the model and the spline.\n\n spline = []\n light_curve_mask = []\n bad_bkspace = False # Indicates that the current bkspace should be skipped.\n for time, flux, this_input_mask in zip(all_time, all_flux, all_input_mask):\n # Fit B-spline to this light-curve segment.\n try:\n spline_piece, mask = kepler_spline(\n time, flux, bkspace=bkspace, maxiter=maxiter, input_mask = this_input_mask)\n except InsufficientPointsError as e:\n # It's expected to occasionally see intervals with insufficient points,\n # especially if periodic signals have been removed from the light curve.\n # Skip this interval, but continue fitting the spline.\n if verbose:\n warnings.warn(str(e))\n spline.append(np.array([np.nan] * len(flux)))\n light_curve_mask.append(np.zeros_like(flux, dtype=np.bool))\n continue\n except SplineError as e:\n # It's expected to get a SplineError occasionally for small values of\n # bkspace. Skip this bkspace.\n if verbose:\n warnings.warn(\"Bad bkspace {}: {}\".format(bkspace, e))\n metadata.bad_bkspaces.append(bkspace)\n bad_bkspace = True\n break\n\n spline.append(spline_piece)\n light_curve_mask.append(mask)\n\n # Accumulate the number of free parameters.\n total_time = np.max(time) - np.min(time)\n nknots = int(total_time / bkspace) + 1 # From the bspline implementation.\n nparams += nknots + 3 - 1 # number of knots + degree of spline - 1\n\n # Accumulate the number of points and the squared residuals.\n npoints += np.sum(mask)\n ssr += np.sum((flux[mask] - spline_piece[mask])**2)\n\n if bad_bkspace or not npoints:\n continue\n\n # The following term is -2*ln(L), where L is the likelihood of the data\n # given the model, under the assumption that the model errors are iid\n # Gaussian with mean 0 and standard deviation sigma.\n likelihood_term = npoints * np.log(2 * np.pi * sigma**2) + ssr / sigma**2\n\n # Penalty term for the number of parameters used to fit the model.\n penalty_term = nparams * np.log(npoints)\n\n # Bayesian information criterion.\n bic = likelihood_term + penalty_coeff * penalty_term\n\n if best_spline is None or bic < metadata.bic:\n best_spline = spline\n metadata.light_curve_mask = light_curve_mask\n metadata.input_light_curve_mask = all_input_mask\n metadata.bkspace = bkspace\n metadata.likelihood_term = likelihood_term\n metadata.penalty_term = penalty_term\n metadata.bic = bic\n\n if best_spline is None:\n # All bkspaces resulted in a SplineError, or all light curve intervals had\n # insufficient points.\n best_spline = [np.array([np.nan] * len(f)) for f in all_flux]\n metadata.light_curve_mask = [\n np.zeros_like(f, dtype=np.bool) for f in all_flux\n ]\n metadata.input_light_curve_mask = [\n np.zeros_like(f, dtype=np.bool) for f in all_flux\n ]\n \n\n return best_spline, metadata", "def eme_700day_2018(self, flyby_dist):\n elements_earth_launch = [149600147887.68948, 0.016678913612078988, 4.2914948114489875e-05, 3.0511667130642532, 5.029905567754205, 2.3790693213784753]\n elements_mars = [227935414087.26007, 0.0933324133158771, 0.03225613287661293, 0.8640335870435378, 5.003255854143598, 6.049811097247842]\n elements_earth_entry = [149597795100.11346, 0.016733410527094122, 4.642665726484085e-05, 3.095208080733212, 4.985102945894168, 2.3882414620556034]\n\n launchepoch = '2018-05-22 00:00:00'\n marsflybyepoch = '2018-Aug-22 00:00:00'\n entryepoch = '2020-May-22 00:00:00'\n\n sequence = [ (launchepoch, planet_ss('earth'), elements_earth_launch, 200*1000),\n (marsflybyepoch, planet_ss('mars'), elements_mars, flyby_dist),\n (entryepoch, planet_ss('earth'), elements_earth_entry, 0) ]\n\n planets = []\n for pl in sequence:\n name = pl[1].name\n muself = pl[1].mu_self\n musun = pl[1].mu_central_body\n radius = pl[1].radius\n body = planet(epoch_from_string(pl[0]), pl[2], musun, muself, radius, radius+pl[3], name)\n planets.append(body)\n\n return planets", "def perihelion_aphelion(epoch, perihelion=True):\n\n if not isinstance(epoch, Epoch):\n raise TypeError(\"Invalid input value\")\n # First approximation\n k = 0.03393 * (epoch.year() - 2003.52)\n if perihelion:\n k = round(k)\n else:\n k = round(k + 0.5) - 0.5\n jde = 2452830.12 + k * (10764.21676 - k * 0.000827)\n # Compute the epochs three months before and after\n jde_before = jde - 90.0\n jde_after = jde + 90.0\n # Compute the Sun-Saturn distance for each epoch\n l, b, r_b = Saturn.geometric_heliocentric_position(Epoch(jde_before))\n l, b, r = Saturn.geometric_heliocentric_position(Epoch(jde))\n l, b, r_a = Saturn.geometric_heliocentric_position(Epoch(jde_after))\n # Call an interpolation object\n m = Interpolation([jde_before, jde, jde_after], [r_b, r, r_a])\n sol = m.minmax()\n return Epoch(sol)", "def compute_solar_day_sunset(self, previous_day_panchaanga=None):\n # If solar transition happens before the current sunset but after the previous sunset, then that is taken to be solar day 1.\n self.compute_sun_moon_transitions(previous_day_panchaanga=previous_day_panchaanga)\n solar_month_sunset = NakshatraDivision(jd=self.jd_sunset, ayanaamsha_id=self.computation_system.ayanaamsha_id).get_anga(\n anga_type=AngaType.SIDEREAL_MONTH)\n\n solar_sidereal_month_end_jd = None\n if previous_day_panchaanga is None or previous_day_panchaanga.solar_sidereal_date_sunset.day > 28 :\n anga_finder = zodiac.AngaSpanFinder.get_cached(ayanaamsha_id=self.computation_system.ayanaamsha_id, anga_type=AngaType.SIDEREAL_MONTH)\n solar_month_sunset_span = anga_finder.find(jd1=self.jd_sunset - 32, jd2=self.jd_sunset + 5, target_anga_id=solar_month_sunset)\n solar_sidereal_month_day_sunset = len(self.city.get_sunsets_in_period(jd_start=solar_month_sunset_span.jd_start, jd_end=self.jd_sunset + 1/48.0))\n if solar_sidereal_month_day_sunset == 1 and solar_month_sunset_span.jd_start > self.jd_sunrise:\n solar_sidereal_month_end_jd = solar_month_sunset_span.jd_start\n elif solar_sidereal_month_day_sunset == 30 and solar_month_sunset_span.jd_end < self.jd_next_sunrise:\n solar_sidereal_month_end_jd = solar_month_sunset_span.jd_end\n else:\n solar_sidereal_month_day_sunset = previous_day_panchaanga.solar_sidereal_date_sunset.day + 1\n from jyotisha.panchaanga.temporal import time\n self.solar_sidereal_date_sunset = time.BasicDateWithTransitions(month=solar_month_sunset.index, day=solar_sidereal_month_day_sunset, month_transition=solar_sidereal_month_end_jd)", "def calc_a_sat(self):\n\n # get the flux data at 100kW\n flux_data = extract_mcnp('n', self.experiment.P)\n\n # sum to only energy dependent (exclude the first cos group)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n\n # get response functions\n responses = response_data()\n\n # this pulls only the rfs for the gold foil tube\n response_functions = []\n for name, response in responses.items():\n if 'au' in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n\n # fold the rfs and the flux together, convert to uCi / atom\n a_sat_atom = np.sum(response_functions * flux, axis=1) * (1 / 3.7E4)\n\n # only care about the ones that match the experiment\n self.a_sat_atom = a_sat_atom[:self.experiment.n]\n\n return", "def test_earth_relief_30s_synbath():\n data = load_earth_relief(\n region=[-95, -94, -1.5, -1],\n resolution=\"30s\",\n registration=\"pixel\",\n data_source=\"synbath\",\n )\n assert data.shape == (60, 120)\n npt.assert_allclose(data.min(), -3552.5)\n npt.assert_allclose(data.max(), -2154)", "def ephem_data(t_arr, pass_index, alt, az):\n\n i, j = pass_index\n\n # A list of times at which alt/az were calculated\n # Convert to unix time to match the rf explorer timestamps\n time_array = Time(t_arr.tt[i : j + 1], scale=\"tt\", format=\"jd\").unix\n\n sat_az = az.radians[i : j + 1]\n sat_alt = alt.degrees[i : j + 1]\n\n return (time_array, sat_alt, sat_az)", "def gen_planets_image(self, PA_offset=0, quick_PSF=True, use_cmask=False, wfe_drift=None, **kwargs):\n if len(self.planets)==0:\n _log.info(\"No planet info at self.planets\")\n return 0.0\n\n if PA_offset is None: PA_offset=0\n\n image_shape = ypix, xpix = (self.det_info['ypix'], self.det_info['xpix'])\n image = np.zeros(image_shape)\n bar_offset = self.bar_offset\n bar_offpix = bar_offset / self.pixelscale\n for pl in self.planets:\n\n # Create slope image (postage stamp) of planet\n if pl.get('sptype') is None:\n sp = self.planet_spec(**pl)\n else:\n sp = stellar_spectrum(pl['sptype'])\n renorm_args = pl['renorm_args']\n if (renorm_args is not None) and (len(renorm_args) > 0):\n sp_norm = sp.renorm(*renorm_args)\n sp_norm.name = sp.name\n sp = sp_norm\n\n # Location relative to star\n xoff, yoff = pl['xyoff_pix']\n\n # Add in PA offset\n if PA_offset!=0:\n xoff, yoff = xy_rot(xoff, yoff, PA_offset)\n # Convert to arcsec\n xoff_asec, yoff_asec = np.array([xoff, yoff]) * self.pix_scale\n\n # Add in bar offset for PSF generation\n xoff_asec += self.bar_offset\n r, th = xy_to_rtheta(xoff_asec, yoff_asec)\n if quick_PSF:\n psf_planet = self.gen_offset_psf(r, th, return_oversample=False)\n obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave)\n psf_planet *= obs.effstim('counts')\n else:\n psf_planet = self.gen_offset_psf(r, th, sp=sp, return_oversample=False, wfe_drift=wfe_drift)\n\n # Expand to full size\n psf_planet = pad_or_cut_to_size(psf_planet, image_shape)\n # Shift to position relative to center of image\n delx, dely = (xoff + bar_offpix, yoff)\n if ('FULL' in self.det_info['wind_mode']) and (self.mask is not None):\n cdict = coron_ap_locs(self.module, self.channel, self.mask, full=True)\n xcen, ycen = cdict['cen_sci']\n delx += (xcen - xpix/2)\n dely += (ycen - ypix/2)\n psf_planet = fshift(psf_planet, delx=delx, dely=dely, pad=True)\n\n # Determine if any throughput loss due to coronagraphic mask\n # artifacts, such as the mask holder or ND squares.\n # Planet positions are relative to the center of the mask,\n # which is not centered in a full detector.\n # All subarrays should have the mask placed at the center.\n detid = self.Detectors[0].detid\n cmask = self.mask_images[detid]\n if use_cmask and (cmask is not None):\n # First, anything in a rectangular region around the\n # mask has already been correctly accounted for\n if (np.abs(xoff_asec+bar_offset)<10) and (np.abs(yoff_asec)<5):\n trans = 1\n elif 'FULL' in self.det_info['wind_mode']:\n # If a full detector observation, then adjust\n # to be relative to mask location\n xpos = int(xcen + xoff + bar_offpix)\n ypos = int(ycen + yoff)\n cmask_sub = cmask[ypos-3:ypos+3,xpos-3:xpos+3]\n trans = np.mean(cmask_sub)\n else:\n xpos, ypox = (int(delx), int(dely))\n cmask_sub = cmask[ypos-3:ypos+3,xpos-3:xpos+3]\n trans = np.mean(cmask_sub)\n\n #print(trans)\n psf_planet *= trans\n\n # Add to image\n image += psf_planet\n\n return image", "def corestationary(self,guess=None):\n if guess is None: guess = np.array(self.y0[:-1])\n else: guess = np.array(guess)\n y = self.model.inputSX(cs.DAE_X)\n t = self.model.inputSX(cs.DAE_T)\n p = self.model.inputSX(cs.DAE_P)\n ode = self.model.outputSX()\n fn = cs.SXFunction([y,t,p],[ode])\n kfn = cs.KinsolSolver(fn)\n abstol = 1E-10\n kfn.setOption(\"abstol\",abstol)\n kfn.setOption(\"constraints\",(2,)*self.NEQ)\n kfn.setOption(\"linear_solver\",\"dense\")\n kfn.setOption(\"numeric_jacobian\",True)\n kfn.setOption(\"u_scale\",(100/guess).tolist())\n kfn.setOption(\"numeric_hessian\",True)\n kfn.setOption(\"disable_internal_warnings\",True)\n kfn.init()\n kfn.setInput(self.paramset,1)\n kfn.setOutput(guess)\n kfn.evaluate()\n y0out = kfn.output().toArray()\n \n if any(np.isnan(y0out)):\n raise RuntimeError(\"findstationary: KINSOL failed to find \\\n acceptable solution\")\n \n self.ss = y0out.flatten()\n \n if np.linalg.norm(self.dydt(self.ss)) >= abstol or any(y0out <= 0):\n raise RuntimeError(\"findstationary: KINSOL failed to reach \\\n acceptable bounds\")\n \n self.eigs = np.linalg.eigvals(self.dfdy(self.ss))", "def makeKst2Kpi(name,\n KaonPT,\n KaonIPCHI2,\n PionPT,\n PionIPCHI2,\n PionPIDK,\n KstarPT,\n KaonPIDK,\n KstarVCHI2,\n KstarMassWin):\n\n _stdKaons = DataOnDemand(Location=\"Phys/StdLooseKaons/Particles\")\n _stdPions = DataOnDemand(Location=\"Phys/StdLoosePions/Particles\")\n\n _Kstar2Kpi = CombineParticles()\n\n _Kstar2Kpi.DecayDescriptor = \"[K*(892)0 -> K+ pi-]cc\"\n _Kstar2Kpi.DaughtersCuts = {\"K+\" : \"(PT > %(KaonPT)s *MeV) & (PIDK > %(KaonPIDK)s) & (MIPCHI2DV(PRIMARY)> %(KaonIPCHI2)s)\" % locals()\n ,\"pi-\" : \"(PT > %(PionPT)s *MeV) & (PIDK < %(PionPIDK)s) & (MIPCHI2DV(PRIMARY)> %(PionIPCHI2)s)\"% locals()}\n\n _Kstar2Kpi.CombinationCut = \"(ADAMASS('K*(892)0') < %(KstarMassWin)s *MeV)\"% locals()\n _Kstar2Kpi.MotherCut = \"(VFASPF(VCHI2/VDOF)< %(KstarVCHI2)s) & (PT > %(KstarPT)s *MeV)\"% locals()\n\n\n return Selection (name,\n Algorithm = _Kstar2Kpi,\n RequiredSelections = [_stdKaons,_stdPions])", "def set_earth_tesseral_switch(self, switch=False):\n self.set_abstract_item(\"General\", \"Earth Tesseral switch\", switch)", "def soli8e(ex, ey, ez, ep, D, eqp=None):\n ir = ep[0]\n ngp = ir*ir*ir\n\n if eqp == None:\n eq = np.zeros((3, 1))\n else:\n eq = eqp\n\n if ir == 1:\n g1 = 0.0\n w1 = 2.0\n gp = np.array([g1, g1, g1]).reshape(1, 3)\n w = np.array([w1, w1, w1]).reshape(1, 3)\n elif ir == 2:\n g1 = 0.577350269189626\n w1 = 1\n gp = np.zeros((8, 3))\n w = np.zeros((8, 3))\n gp[:, 0] = np.array([-1, 1, 1, -1, -1, 1, 1, -1])*g1\n w[:, 0] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n gp[:, 1] = np.array([-1, -1, 1, 1, -1, -1, 1, 1])*g1\n w[:, 1] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n gp[:, 2] = np.array([-1, -1, -1, -1, 1, 1, 1, 1])*g1\n w[:, 2] = np.array([1, 1, 1, 1, 1, 1, 1, 1])*w1\n else:\n g1 = 0.774596669241483,\n g2 = 0.0\n w1 = 0.555555555555555\n w2 = 0.888888888888888\n\n gp = np.zeros((27, 3))\n w = np.zeros((27, 3))\n\n I1 = np.array([-1, 0, 1, -1, 0, 1, -1, 0, 1]).reshape(1, 9)\n I2 = np.array([0, -1, 0, 0, 1, 0, 0, 1, 0]).reshape(1, 9)\n\n gp[:, 0] = np.concatenate((I1, I1, I1), axis=1)*g1\n gp[:, 0] = np.concatenate((I2, I2, I2), axis=1)*g2 + gp[:, 0]\n\n I1 = np.abs(I1)\n I2 = np.abs(I2)\n\n w[:, 0] = np.concatenate((I1, I1, I1), axis=1)*w1\n w[:, 0] = np.concatenate((I2, I2, I2), axis=1)*w2 + w[:, 0]\n\n I1 = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1]).reshape(1, 9)\n I2 = np.array([0, 0, 0, 1, 1, 1, 0, 0, 0]).reshape(1, 9)\n\n gp[:, 1] = np.concatenate((I1, I1, I1), axis=1)*g1\n gp[:, 1] = np.concatenate((I2, I2, I2), axis=1)*g2 + gp[:, 1]\n\n I1 = np.abs(I1)\n I2 = np.abs(I2)\n\n w[:, 1] = np.concatenate((I1, I1, I1), axis=1)*w1\n w[:, 1] = np.concatenate((I2, I2, I2), axis=1)*w2 + w[:, 1]\n\n I1 = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1]).reshape(1, 9)\n I2 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0]).reshape(1, 9)\n I3 = np.abs(I1)\n\n gp[:, 2] = np.concatenate((I1, I2, I3), axis=1)*g1\n gp[:, 2] = np.concatenate((I2, I3, I2), axis=1)*g2 + gp[:, 2]\n\n w[:, 2] = np.concatenate((I3, I2, I3), axis=1)*w1\n w[:, 2] = np.concatenate((I2, I3, I2), axis=1)*w2 + w[:, 2]\n\n wp = w[:, 0]*w[:, 1]*w[:, 2]\n\n xsi = gp[:, 0]\n eta = gp[:, 1]\n zet = gp[:, 2]\n r2 = ngp*3\n\n N = np.zeros((ngp, 8))\n dNr = np.zeros((r2, 8))\n\n N[:, 0] = (1-xsi)*(1-eta)*(1-zet)/8\n N[:, 1] = (1+xsi)*(1-eta)*(1-zet)/8\n N[:, 2] = (1+xsi)*(1+eta)*(1-zet)/8\n N[:, 3] = (1-xsi)*(1+eta)*(1-zet)/8\n N[:, 4] = (1-xsi)*(1-eta)*(1+zet)/8\n N[:, 5] = (1+xsi)*(1-eta)*(1+zet)/8\n N[:, 6] = (1+xsi)*(1+eta)*(1+zet)/8\n N[:, 7] = (1-xsi)*(1+eta)*(1+zet)/8\n\n dNr[0:r2+1:3, 0] = -(1-eta)*(1-zet)\n dNr[0:r2+1:3, 1] = (1-eta)*(1-zet)\n dNr[0:r2+1:3, 2] = (1+eta)*(1-zet)\n dNr[0:r2+1:3, 3] = -(1+eta)*(1-zet)\n dNr[0:r2+1:3, 4] = -(1-eta)*(1+zet)\n dNr[0:r2+1:3, 5] = (1-eta)*(1+zet)\n dNr[0:r2+1:3, 6] = (1+eta)*(1+zet)\n dNr[0:r2+1:3, 7] = -(1+eta)*(1+zet)\n dNr[1:r2+2:3, 0] = -(1-xsi)*(1-zet)\n dNr[1:r2+2:3, 1] = -(1+xsi)*(1-zet)\n dNr[1:r2+2:3, 2] = (1+xsi)*(1-zet)\n dNr[1:r2+2:3, 3] = (1-xsi)*(1-zet)\n dNr[1:r2+2:3, 4] = -(1-xsi)*(1+zet)\n dNr[1:r2+2:3, 5] = -(1+xsi)*(1+zet)\n dNr[1:r2+2:3, 6] = (1+xsi)*(1+zet)\n dNr[1:r2+2:3, 7] = (1-xsi)*(1+zet)\n dNr[2:r2+3:3, 0] = -(1-xsi)*(1-eta)\n dNr[2:r2+3:3, 1] = -(1+xsi)*(1-eta)\n dNr[2:r2+3:3, 2] = -(1+xsi)*(1+eta)\n dNr[2:r2+3:3, 3] = -(1-xsi)*(1+eta)\n dNr[2:r2+3:3, 4] = (1-xsi)*(1-eta)\n dNr[2:r2+3:3, 5] = (1+xsi)*(1-eta)\n dNr[2:r2+3:3, 6] = (1+xsi)*(1+eta)\n dNr[2:r2+3:3, 7] = (1-xsi)*(1+eta)\n\n dNr = dNr/8.0\n\n Ke = np.zeros((24, 24))\n fe = np.zeros((24, 1))\n\n ex = np.asarray(ex).reshape((8, 1))\n ey = np.asarray(ey).reshape((8, 1))\n ez = np.asarray(ez).reshape((8, 1))\n\n JT = dNr@np.concatenate((ex, ey, ez), axis=1)\n\n eps = np.finfo(float).eps\n\n for i in range(ngp):\n indx = [i*3, i*3+1, i*3+2]\n detJ = np.linalg.det(JT[indx, :])\n if detJ < 10*eps:\n print('Jacobideterminant equal or less than zero!')\n JTinv = np.linalg.inv(JT[indx, :])\n dNx = JTinv@dNr[indx, :]\n\n B = np.zeros((6, 24))\n N2 = np.zeros((3, 24))\n\n B[0, 0:24:3] = dNx[0, :]\n B[1, 1:25:3] = dNx[1, :]\n B[2, 2:26:3] = dNx[2, :]\n B[3, 0:24:3] = dNx[1, :]\n B[3, 1:25:3] = dNx[0, :]\n B[4, 0:24:3] = dNx[2, :]\n B[4, 2:26:3] = dNx[0, :]\n B[5, 1:25:3] = dNx[2, :]\n B[5, 2:26:3] = dNx[1, :]\n\n N2[0, 0:24:3] = N[i, :]\n N2[1, 1:25:3] = N[i, :]\n N2[2, 2:26:3] = N[i, :]\n\n Ke = Ke + (np.transpose(B)@D@B)*detJ*wp[i]\n fe = fe + (np.transpose(N2)@eq)*detJ*wp[i]\n\n if eqp != None:\n return Ke, fe\n else:\n return Ke" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Propagates a set of keplerian elements.
def propagate_kep(kep,t0,tf,bstar=0.21109E-4): sat = kep_to_sat(kep,t0,bstar=bstar) tf = datetime.utcfromtimestamp(tf).timetuple() pos, vel = sat.propagate( tf.tm_year, tf.tm_mon, tf.tm_mday, tf.tm_hour, tf.tm_min, tf.tm_sec) return np.array(list(pos)),np.array(list(vel))
[ "def propagate(self):\n generations = {}\n nodes = self.getAllNodes()\n\n for n in nodes:\n nGen = len(self.ancestors(n))\n generations.setdefault(nGen, []).append(n)\n\n nGen = range(1, max(generations.keys())+1)\n for gen in nGen:\n thisGeneration = generations[gen]\n for node in thisGeneration:\n parentNode = self.nodeDict[node.parentLabel()]\n #add the parent is is not the top parent\n if parentNode: node.add(parentNode)", "def run_propagations( g ):\n assert g\n print('run_propagations')\n #list of propagations\n lcppropagations = ['qquality','path','partof']\n for i in lcppropagations:\n g = enrich_workflow_prop( g, i )\n\n g = run_inferences( g )\n return g", "def partitions_iter(elements,currentset=[]):\n if not elements:\n yield currentset\n else:\n for newset in powerset(elements):\n if newset:\n for p in partitions_iter([a for a in elements if a not in list(squash(currentset))+list(newset)],currentset+[list(newset)]):\n yield p", "def propagate(self):\n for sample in self.input_value:\n # perform forward propagation on one sample\n layer_output = sample\n for l in self.layers:\n layer_output = l.activate(layer_output)\n self.forward_propagation_output.append(layer_output) #stores propagation output value of one sample\n return self.forward_propagation_output", "def propagate(self,update_ids=True):\n \n #This is not necesary for all propagations, but is safer to do it\n #When propagating a sub system this must not be done\n if update_ids: self.update_ids()\n \n while len(self._np_rays)>0:\n ri=self._np_rays.pop(0)\n self.propagate_ray(ri)\n self._p_rays.append(ri)", "def propagate(self, session: Session) -> None:\n\n # Push all inputs\n for port in self.In:\n port.push(session)\n\n # Propagate essential children (in topological order)\n for child in self.blocks.essential_sorted():\n child.propagate(session)\n\n for port in child.Out:\n port.push(session)", "def pierce_elements(self, aCentroid, aEID, pSource, normal):\r\n #direction = -1. # TODO: direction of normal...?\r\n (sElements, sDists) = self.centroidTree.getCloseElementIDs(aCentroid)\r\n log.info(\"aCentroid = %s\" % aCentroid)\r\n log.info(\"sElements = %s\" % sElements)\r\n log.info(\"sDists = %s\" % ListPrint(sDists))\r\n #(nearbySElements, nearbyDistances) = sElements\r\n piercedElements = []\r\n\r\n for sEID, sDist in zip(sElements, sDists):\r\n #print \"aEID=%s sEID=%s\" % (aEID, sEID)\r\n (sArea, sNormal, sCentroid) = self.structuralModel.get_element_properties(sEID)\r\n sNodes = self.structuralModel.get_element_nodes(sEID)\r\n nNodes = len(sNodes)\r\n\r\n pEnd = pSource + normal * 10.\r\n #pEnd2 = pSource - normal * 10.\r\n if nNodes == 3: # TODO: is this enough of a breakdown?\r\n (sA, sB, sC) = sNodes\r\n #pEnd = pSource+normal*10.\r\n tuv = pierce_plane_vector(sA, sB, sC, pSource, pEnd, piercedElements)\r\n #tuv2= piercePlaneVector(sA, sB, sC, pSource, pEnd2, piercedElements)\r\n elif nNodes == 4:\r\n (sA, sB, sC, sD) = sNodes\r\n tuv = pierce_plane_vector(sA, sB, sC, pSource, pEnd, piercedElements)\r\n #tuv2= piercePlaneVector(sA, sB, sC, pSource, pEnd2, piercedElements)\r\n #self.pierceTriangle(sA, sB, sC, sCentroid, sNormal, piercedElements)\r\n #self.pierceTriangle(sA, sC, sD, sCentroid, sNormal, piercedElements)\r\n else:\r\n raise RuntimeError('invalid element; nNodes=%s' % nNodes)\r\n\r\n t1, u1, v1 = tuv\r\n #t2, u2, v2 = tuv2\r\n\r\n isInside = False\r\n #if self.isInside(u1, v1) or self.isInside(u2, v2):\r\n if self.is_inside(u1, v1):\r\n isInside = True\r\n #pIntersect = pSource + (pEnd - pSource) * t1\r\n pIntersect = pEnd * t1 +pSource * (1 - t1)\r\n #P = A + (B - A) * t\r\n tuv = pierce_plane_vector(sA, sB, sC, pSource, pIntersect, piercedElements)\r\n #print \"t,u,v=\", tuv\r\n\r\n piercedElements.append([sEID, pIntersect, u1, v1, sDist])\r\n\r\n #t = min(t1, t2)\r\n #print \"t1=%6.3g t2=%6.3g\" % (t1, t2)\r\n #if isInside:\r\n #print \"*t[%s]=%6.3g u1=%6.3g v1=%6.3g u2=%6.3g v2=%6.3g\" %(sEID,t,u1,v1,u2,v2)\r\n #else:\r\n #print \" t[%s]=%6.3g u1=%6.3g v1=%6.3g u2=%6.3g v2=%6.3g\" %(sEID,t,u1,v1,u2,v2)\r\n\r\n #if isInside:\r\n #print \"*t[%s]=%6.3g u1=%6.3g v1=%6.3g d=%g\" %(sEID,t1,u1,v1,sDist)\r\n #else:\r\n #print \" t[%s]=%6.3g u1=%6.3g v1=%6.3g d=%g\" %(sEID,t1,u1,v1,sDist)\r\n\r\n log.info(\"avgDist = %g\" % mean(sDists))\r\n (piercedElements, nPiercings) = self.fix_piercings(sElements, piercedElements)\r\n distribution = self.distribute_unit_load(aEID, piercedElements, nPiercings)\r\n\r\n return (distribution)", "def apply_gravity(self, g):\n for element in self._elements:\n element.calculate_gravity(g)", "def peel_clusters(self, *args, **kwargs):\n for layer in self.graph.S.values():\n for vertex in layer.values():\n if vertex.cluster is not None:\n cluster = self.get_vertex_cluster(vertex)\n self.peel_edge(cluster, vertex)", "def linBP_directed(X, W, P,\n eps=1,\n echo=True,\n numMaxIt=10,\n convergencePercentage=None, convergenceThreshold=0.9961947,\n debug=1,\n paperVariant=True):\n\n\n # -- Create variables for convergence checking and debugging\n if debug >= 1:\n n, n2 = W.shape\n n3, k = X.shape\n k2, k3 = P.shape\n assert(n == n2 & n2 == n3)\n assert(k == k2 & k2 == k3)\n if debug >= 2:\n F1 = X.copy()\n if debug >= 3:\n listF = [X] # store the belief matrices for each iteration\n listConverged = [] # store the percentage of converged nodes for each iteration\n\n # -- Initialize values\n Pc1 = row_recentered_residual(P, paperVariant=paperVariant).dot(eps) # scaled by eps\n Pc2T = row_recentered_residual(P.transpose(), paperVariant=paperVariant).dot(eps)\n WsT = W.transpose()\n Cstar = (WsT.dot( np.ones((n, k), dtype=np.int) ).dot(Pc1) + W.dot( np.ones((n, k), dtype=np.int) ).dot(Pc2T)).dot(1./k)\n F = X\n Const = X + Cstar # Cstar includes\n\n if echo:\n D_in = degree_matrix(W, indegree=True, undirected=False, squared=True)\n D_out = degree_matrix(W, indegree=False, undirected=False, squared=True)\n Pstar1 = Pc2T * Pc1\n Pstar2 = Pc1 * Pc2T\n\n # -- Actual loop including convergence conditions\n converged = False\n actualNumIt = 0\n\n\n while actualNumIt < numMaxIt and not converged:\n actualNumIt += 1\n\n # -- Calculate new beliefs\n if echo is False:\n F = Const + WsT.dot(F).dot(Pc1) + W.dot(F).dot(Pc2T)\n else:\n F = Const + WsT.dot(F).dot(Pc1) + W.dot(F).dot(Pc2T) - D_in.dot(F).dot(Pstar1) - D_out.dot(F).dot(Pstar2)\n\n # -- Check convergence and store information if debug\n if convergencePercentage is not None or debug >= 2:\n actualPercentageConverged = matrix_convergence_percentage(F1, F, threshold=convergenceThreshold) # TODO: allow similarity\n diff = np.linalg.norm(F - F1) # interrupt loop if it is diverging\n if (convergencePercentage is not None and actualPercentageConverged >= convergencePercentage)\\\n or (diff > 1e10):\n converged = True\n F1 = F # save for comparing in *next* iteration\n\n if debug == 3:\n listF.append(F) # stores (actualNumIt+1) values\n listConverged.append(actualPercentageConverged)\n\n\n # -- Various return formats\n if debug <= 1:\n return F\n elif debug == 2:\n return F, actualNumIt, actualPercentageConverged\n else:\n return np.array(listF), actualNumIt, listConverged", "def add_many(self, edges):\n edges_ = (e.edge for e in edges)\n super(WeightedEdgeSet, self).add_many(edges_)\n\n for e in edges:\n self.update_weight(e.src, e.dest, e.weight)", "def distribute_entrypoint_weight(fep_nodes, fep_population,\n entrypoints_to_edges):\n edge_population = defaultdict(float)\n no_paths_found = 0\n for entry in fep_nodes:\n num_paths_at_fep = sum([len(v) for v in entrypoints_to_edges[entry].values()])\n if num_paths_at_fep > 0:\n average_weight = fep_population[entry] / float(num_paths_at_fep)\n for exit, feasible_edges in entrypoints_to_edges[entry].items():\n for (s,t) in feasible_edges:\n edge_population[(s,t)] += average_weight\n else:\n no_paths_found += 1\n print \"For {0} entries no trips have been found.\".format(no_paths_found)\n return edge_population\n #return dict(edge_population)", "def update_all_apertures(self, to_iterate=True):\n gb = self.gb\n for g, d in gb:\n\n apertures = np.ones(g.num_cells)\n if g.dim == (self.Nd - 1):\n # Initial aperture\n\n apertures *= self.initial_aperture\n # Reconstruct the displacement solution on the fracture\n g_h = gb.node_neighbors(g)[0]\n data_edge = gb.edge_props((g, g_h))\n if pp.STATE in data_edge:\n u_mortar_local = self.reconstruct_local_displacement_jump(\n data_edge, from_iterate=to_iterate\n )\n apertures -= u_mortar_local[-1].clip(max=0)\n if to_iterate:\n pp.set_iterate(\n d,\n {\"aperture\": apertures.copy(), \"specific_volume\": apertures.copy()},\n )\n else:\n state = {\n \"aperture\": apertures.copy(),\n \"specific_volume\": apertures.copy(),\n }\n pp.set_state(d, state)\n\n for g, d in gb:\n parent_apertures = []\n num_parent = []\n if g.dim < (self.Nd - 1):\n for edges in gb.edges_of_node(g):\n e = edges[0]\n g_h = e[0]\n\n if g_h == g:\n g_h = e[1]\n\n if g_h.dim == (self.Nd - 1):\n d_h = gb.node_props(g_h)\n if to_iterate:\n a_h = d_h[pp.STATE][pp.ITERATE][\"aperture\"]\n else:\n a_h = d_h[pp.STATE][\"aperture\"]\n a_h_face = np.abs(g_h.cell_faces) * a_h\n mg = gb.edge_props(e)[\"mortar_grid\"]\n # Assumes g_h is master\n a_l = (\n mg.mortar_to_slave_avg()\n * mg.master_to_mortar_avg()\n * a_h_face\n )\n parent_apertures.append(a_l)\n num_parent.append(np.sum(mg.mortar_to_slave_int().A, axis=1))\n else:\n raise ValueError(\"Intersection points not implemented in 3d\")\n parent_apertures = np.array(parent_apertures)\n num_parents = np.sum(np.array(num_parent), axis=0)\n\n apertures = np.sum(parent_apertures, axis=0) / num_parents\n\n specific_volumes = np.power(apertures, self.Nd - g.dim)\n if to_iterate:\n pp.set_iterate(\n d,\n {\n \"aperture\": apertures.copy(),\n \"specific_volume\": specific_volumes.copy(),\n },\n )\n else:\n state = {\n \"aperture\": apertures.copy(),\n \"specific_volume\": specific_volumes.copy(),\n }\n pp.set_state(d, state)\n\n return apertures", "def fit(self):\n # if self.verbose == 1:\n # print ('The list of all perturbation with its probability: \\n')\n # for perturb in range(len(self.p_list)):\n # print('%s perturbation with probability of: %s \\n' %(self.p_list[perturb], self.p_prob[perturb]))\n #p_current, error_vec_current ,error_vec_normal_current = self.minus_log_prob_neuron(self.neuron) # log probability of the current neuron\n p_current, error_vec_current ,error_vec_normal_current = self.kl_distance(self.neuron) # log probability of the current neuron\n acc = 0\n for i in range(self.ite):\n if(self.verbose ==1):\n #p_current, er , error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, er , error_vec_normal_current = self.kl_distance(self.neuron)\n #print('feature of current is: \\n %s' %(self.neuron.features)+ '\\n')\n print('\\n and its probability is: %s' %p_current)\n per = self.select_proposal() # MCMC index\n p_sym, details = self.do_MCMC(per)\n #p_proposal, error_vec_proposal, error_vec_normal_proposal = self.minus_log_prob_neuron(self.neuron)\n p_proposal, error_vec_proposal, error_vec_normal_proposal = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n #print('feature of proposal is: \\n %s' %(self.neuron.features))\n print('\\n and its probability is: %s' %p_proposal)\n a = min(1, p_sym * np.exp(p_current - p_proposal)) # Metropolis choice, notice that the values are minus log probability\n B = self.accept_proposal(a) # the boolean of acceptance\n if(B):\n p_current = p_proposal\n error_vec_current = error_vec_proposal\n error_vec_normal_current = error_vec_normal_proposal\n self.trend[:,i] = error_vec_proposal\n self.trend_normal[:,i] = error_vec_normal_proposal\n acc = acc + 1\n else:\n self.undo_MCMC(per, details)\n self.trend[:,i] = error_vec_current\n self.trend_normal[:,i] = error_vec_normal_current\n if len(self.neuron.nodes_list) == self.neuron.n_soma:\n self.neuron = self.initial_neuron(int(self.n_node/self.initial_seg),self.initial_seg)\n #p_current, error_vec_current, error_vec_normal_current = self.minus_log_prob_neuron(self.neuron)\n p_current, error_vec_current, error_vec_normal_current = self.kl_distance(self.neuron)\n if(self.verbose ==1):\n print ('\\n')\n print('Selected perturbation = ' + per)\n print('the p of acceptance was %s and it was %s that it`s been accepted.'%(a,B))\n print ('\\n')\n if(np.remainder(i,100)==0):\n self.evo.append(deepcopy(self.neuron))\n self.neuron.set_nodes_values()\n print acc", "def _p_to_e_on_basis(self, A):\n e = self.realization_of().e()\n P_refine = Poset((A.refinements(), A.parent().lt))\n c = prod((-1)**(i-1) * factorial(i-1) for i in A.shape())\n R = self.base_ring()\n return e._from_dict({B: R(P_refine.moebius_function(B, A) / ZZ(c))\n for B in P_refine}, remove_zeros=False)", "def dmp_raise(f, l, u, K):\n if not l:\n return f\n\n if not u:\n if not f:\n return dmp_zero(l)\n\n k = l - 1\n\n return [dmp_ground(c, k) for c in f]\n\n v = u - 1\n\n return [dmp_raise(c, l, v, K) for c in f]", "def propagate(self, time_steps=1):\n for _ in range(time_steps):\n # advance the wavefunction by dt\n self.single_step_propagation()\n\n # calculate the Ehrenfest theorems\n self.get_ehrenfest()\n\n return self.wavefunction", "def bridge_3d_elements(\n deck_elements: DeckShells, all_pier_elements: PierShells\n) -> List[Shell]:\n all_elements = list(itertools.chain.from_iterable(deck_elements))\n for pier_element in all_pier_elements:\n all_elements.append(pier_element)\n assert isinstance(all_elements[0], Shell)\n assert isinstance(all_elements[-1], Shell)\n return all_elements", "def processThickElements(sequence,lattice):\n\n\t#walk the XAL tree to get all nodes of a given kind\n\tkinds=(\"DH\",\"QH\",\"QV\",\"PQ\",\"RG\",\"BCM\")\n\tallElements=Vector()\n\tnodesOfKind(sequence,kinds,allElements)\n\t#conv Vector to list\n\tallElements=list(allElements)\n\t#sort all elements by their position\n\tsortByPos(allElements)\n\t#append all elements to the lattice\n\tfor el in allElements:\n\t\tlattice.append(el)", "def accumulate_by_keys(keys, g_ema, g_train, decay=0.999):\n dict_trn = dict(g_train.named_parameters())\n dict_ema = dict(g_ema.named_parameters())\n\n for k in keys:\n assert k in dict_ema, \"key %s is not in the param dict of G_ema.\" % k\n dict_ema[k].data.mul_(decay).add_(dict_trn[k].data, alpha=1 - decay)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP Network Introduction and protocol version
def test_dpp_network_intro_version(dev, apdev): check_dpp_capab(dev[0], min_ver=3) try: id, hapd = run_dpp_auto_connect(dev, apdev, 1, stop_after_prov=True) dev[0].select_network(id, freq=2412) dev[0].wait_connected() finally: dev[0].set("dpp_config_processing", "0", allow_fail=True)
[ "def protocolVersion():", "def eth_protocolVersion(self):\n self.payload[\"method\"] = \"eth_protocolVersion\"\n self.payload[\"id\"] = 67\n response = requests.post(self.url, data=json.dumps(self.payload), headers=self.headers).json()\n result = int(response[\"result\"], 16)\n print(\"Protocol Version: %d\" % result)", "def edns_version(self):\n return _ldns.ldns_pkt_edns_version(self)\n #parameters: const ldns_pkt *,\n #retvals: uint8_t", "def pmonsd_version():\n return (1,0)#Must always be the same as the version in src/SemiDetHelper.cxx\n #Also remember to update the supported versions in __actual_parse(..)", "def recognize_udp_protocol(self, data, buff):\n if len(data) == 48:\n buff = unpack(\"!BBBbiIIIIIIIIII\", data)\n year = datetime.now().year\n if int(buff[11] / 31536000 + 1900) == year:\n self.protocol = \"NTP\"\n\n if len(data) > 3:\n number = data[:2]\n reply_code = data[3] & 15\n if number == buff[:2] and 0 <= reply_code <= 9:\n self.protocol = \"DNS\"", "def request_version_and_flags(self, req, msg):", "def _info():\n\n # This is what prints out in the default command page, it should be\n # as short as possible.\n emitter.publish(\"Proxy and VPN access to DC/OS cluster\")\n return 0", "def version(self):\n return (self.hdr['type'] >> 13) & 0x7", "def extract_data_protocol(request_data):\n data_list = request_data.split()\n method = data_list[1]\n version = data_list[2]\n try:\n assert version == PROTOCOL, 'Exception: Undefined App Layer Protocol...'\n except AssertionError, _e:\n print _e\n response_message = encapsulate_data_protocol(417,\n 'Expectation Failed')\n return response_message\n host = data_list[data_list.index('Host:') + 1]\n port = data_list[data_list.index('Port:') + 1]\n cookie = None if data_list[data_list.index('Cookie:') + 1] == 'None' \\\n else int(data_list[data_list.index('Cookie:') + 1])\n # Call helper function to prepare response message.\n response_message = execute_request(method, host, port, cookie)\n return response_message", "def protocol_version(self):\n ret = self._get_attr(\"protocolVersion\")\n return ret", "def AddrNetVersion(self) -> bytes:\n return self.m_addr_net_ver", "def print_info(self):\n\n print \"\"\"src_port: %d\\t dst_port: %d\\t sequence_num: %d\\t ack_num: %d\n data_offset: %d\\t urg: %d\\t ack: %d\\t psh: %d\\t rst: %d\\t syn: %d\\t fin: %d\\t\n window_size: %d\\t checksum: %s\\t urgent_pointer: %s\\t opt_paddings: %s\"\"\" % (\n self.src_port, self.dst_port, self.sequence_num,\n self.ack_num, self.data_offset, self.flag_urg, \n self.flag_ack, self.flag_psh, self.flag_rst, \n self.flag_syn, self.flag_fin, self.window_size, \n self.checksum, self.urgent_pointer, self.opt_paddings)", "def algorithmInfo():\n\t\treturn r\"\"\"Kennedy, J. and Eberhart, R. \"Particle Swarm Optimization\". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.\"\"\"", "def give_ipv4(self):\n ip_8_bit_pos = 0\n ip_16_bit_pos = 128\n ip_24_bit_pos = 1\n for component in self.netkit_components:\n for IF in component.attr['IF']:\n\tprefix = component.attr['map_IF_prefix'][IF]\n\tprefix_length = len(prefix)\n\tif prefix_length <= 3: # 8 first bits\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_8_bit_pos)+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 7: # ex : 123.201\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 11:\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_24_bit_pos)\n\telse :\n\t print \"Error in prefix, this length is not supported\"\n\t sys.exit(-1)\n if ip_24_bit_pos < 255:\n\t ip_24_bit_pos+=1\n\telse:\n\t ip_24_bit_pos = 1\n\t if ip_16_bit_pos < 255:\n\t ip_16_bit_pos+=1\n\t else:\n\t ip_16_bit_pos=128\n\t if ip_8_bit_pos < 255:\n\t ip_8_bit_pos +=1\n\t else:\n\t print \"Error, to much elements. trololol this error will never be printed\"\n\t sys.exit(-1)", "def get_vers_info(self):\n tmpl = \"cisco_ios_show_version.textfsm\"\n cmd = \"show version\"\n self.show_output_json[cmd] = self.send_command(cmd, tmpl)\n output = self.show_output_json[cmd][0]\n trantab = str.maketrans(\"\", \"\", \"\\'\\\"{}[]\")\n self.model = str(output[\"hardware\"]).translate(trantab)\n self.version = output[\"version\"]\n self.serial_num = str(output[\"serial\"]).translate(trantab)", "def _print_verbage(self):\n print \"\\nReceive Path:\"\n print \"Using RX d'board %s\" % (self.subdev.side_and_name(),)\n print \"Rx gain: %g\" % (self.gain,)\n print \"modulation: %s\" % (self._demod_class.__name__)\n print \"bitrate: %sb/s\" % (eng_notation.num_to_str(self._bitrate))\n print \"samples/symbol: %3d\" % (self._samples_per_symbol)\n print \"decim: %3d\" % (self._decim)\n print \"Rx Frequency: %s\" % (eng_notation.num_to_str(self._rx_freq))\n # print \"Rx Frequency: %f\" % (self._rx_freq)", "def get_software_version(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 0F\"))\n temp = self.board_socket.recv(1024)\n return(temp[3:10])", "def print_peering_info(peering_info, network, device):\n \n peering_graph_link = \"\"\"http://81.5.223.17/functions/graphics/mac_acc.php?type=b&dev_id=25&ip=%s&time_range=d\"\"\"\n print \"\"\"<tr><td colspan=2>&nbsp;</td></tr>\"\"\"\n print \"\"\"<tr><td colspan=2 class=TextPurpleBold>Peering Information</td></tr>\"\"\"\n print \"\"\"<tr>\"\"\"\n print \"\"\"<td>AS</td>\"\"\"\n print \"\"\"<td>%s</td>\"\"\" % peering_info[0][1]\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>AS-SET</td>\"\"\"\n print \"\"\"<td>%s</td>\"\"\" % peering_info[0][2]\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>Max prefix</td>\"\"\"\n print \"\"\"<td>%s</td>\"\"\" % peering_info[0][9]\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>MD5 Password</td>\"\"\"\n print \"\"\"<td>%s</td>\"\"\" % peering_info[0][3]\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>Routeserver</td>\"\"\"\n if peering_info[0][4] == 1:\n print \"\"\"<td>yes</td>\"\"\"\n else:\n print \"\"\"<td>no</td>\"\"\"\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>Session</td>\"\"\"\n if peering_info[0][5] == 1:\n print \"\"\"<td>UP</td>\"\"\"\n else:\n print \"\"\"<td>DOWN</td>\"\"\"\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>Contact</td>\"\"\"\n if peering_info[0][6] != None and peering_info[0][6] != \"NULL\":\n print \"\"\"<td><a href=\"mailto:%s\" class=LinkPurpleBold>%s</a></td>\"\"\" % (peering_info[0][6], peering_info[0][6])\n else:\n print \"\"\"<td>%s</td>\"\"\" % (peering_info[0][6])\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td>Peering device</td>\"\"\"\n print \"\"\"<td>%s</td>\"\"\" % peering_info[0][8].lower()\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td valign=top>Comment</td>\"\"\"\n print \"\"\"<td><textarea class=b_eingabefeld_white readonly>%s</textarea></td>\"\"\" % peering_info[0][7]\n print \"\"\"</tr><tr>\"\"\"\n print \"\"\"<td colspan=2>&nbsp;</td>\"\"\"\n## print \"\"\"</tr><tr>\"\"\"\n## if len(device.split(\";\")) == 1:\n##\tprint \"\"\"<td colspan=2><a href=\"http://81.5.223.17/functions/graphics/mac_acc.php?type=b&dev_name=%s&ip=%s&time_range=d\" target=_blank>\n##\t <img src=\"http://81.5.223.17/functions/graphics/mac_acc.php?type=b&dev_name=%s&ip=%s&time_range=d\" border=0></a></td>\"\"\" \\\n##\t % (device, network.replace(\".\", \"-\"), device, network.replace(\".\", \"-\"))\n## elif len(device.split(\";\")) > 1:\n##\tfor i in range(0, len(device.split(\";\"))):\n##\t print \"\"\"<td colspan=2>%s<br>\n##\t\t<a href=\"http://81.5.223.17/functions/graphics/mac_acc.php?type=b&dev_name=%s&ip=%s&time_range=d\" target=_blank>\n##\t\t<img src=\"http://81.5.223.17/functions/graphics/mac_acc.php?type=b&dev_name=%s&ip=%s&time_range=d\" border=0></a></td>\"\"\" \\\n##\t\t% ( device.split(\";\")[i], device.split(\";\")[i], network.replace(\".\", \"-\"), device.split(\";\")[i].strip(), network.replace(\".\", \"-\"))\n##\t print \"\"\"</tr><tr>\"\"\"\n## else:\n##\tprint \"\"\"<td colspan=2>&nbsp;</td>\"\"\"\n print \"\"\"</tr>\"\"\"", "def version():\n print('Lizzy Client', VERSION)", "def ftduino_direct_get_version(self):\n return self.comm('ftduino_direct_get_version')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP Network Introduction and protocol version change
def test_dpp_network_intro_version_change(dev, apdev): check_dpp_capab(dev[0], min_ver=3) try: dev[0].set("dpp_version_override", "2") id, hapd = run_dpp_auto_connect(dev, apdev, 1, stop_after_prov=True) dev[0].set("dpp_version_override", "3") dev[0].select_network(id, freq=2412) dev[0].wait_connected() finally: dev[0].set("dpp_config_processing", "0", allow_fail=True)
[ "def protocolVersion():", "def eth_protocolVersion(self):\n self.payload[\"method\"] = \"eth_protocolVersion\"\n self.payload[\"id\"] = 67\n response = requests.post(self.url, data=json.dumps(self.payload), headers=self.headers).json()\n result = int(response[\"result\"], 16)\n print(\"Protocol Version: %d\" % result)", "def updateProtocolSection(self):\n self.protocol = self.fileReadMappedSection('ProtocolSection',KEYS_PROTOCOL)", "def protocol(ctx: Context, protocol_public_id):\n upgrade_item(ctx, \"protocol\", protocol_public_id)", "def recognize_udp_protocol(self, data, buff):\n if len(data) == 48:\n buff = unpack(\"!BBBbiIIIIIIIIII\", data)\n year = datetime.now().year\n if int(buff[11] / 31536000 + 1900) == year:\n self.protocol = \"NTP\"\n\n if len(data) > 3:\n number = data[:2]\n reply_code = data[3] & 15\n if number == buff[:2] and 0 <= reply_code <= 9:\n self.protocol = \"DNS\"", "def _make_v6_json_protocol(\n *,\n pipettes: Dict[str, Pipette] = {\n \"pipette-id-1\": Pipette(name=\"p10_single\"),\n },\n labware_definitions: Dict[str, LabwareDefinition] = {\n \"example/plate/1\": _load_labware_definition_data(),\n \"example/trash/1\": _load_labware_definition_data(),\n },\n labware: Dict[str, Labware] = {\n \"labware-id-1\": Labware(\n displayName=\"Source Plate\", definitionId=\"example/plate/1\"\n ),\n \"labware-id-2\": Labware(displayName=\"Trash\", definitionId=\"example/trash/1\"),\n },\n commands: List[protocol_schema_v6.Command] = [],\n modules: Dict[str, Module] = {\n \"module-id-1\": Module(model=\"magneticModuleV2\"),\n \"module-id-2\": Module(model=\"thermocyclerModuleV2\"),\n },\n liquids: Dict[str, Liquid] = {\n \"liquid-id-555\": Liquid(\n displayName=\"water\", description=\"water description\", displayColor=\"#F00\"\n )\n },\n) -> protocol_schema_v6.ProtocolSchemaV6:\n return protocol_schema_v6.ProtocolSchemaV6(\n otSharedSchema=\"#/protocol/schemas/6\",\n schemaVersion=6,\n metadata=SD_Metadata(),\n robot=Robot(model=\"OT-2 Standard\", deckId=\"ot2_standard\"),\n pipettes=pipettes,\n labwareDefinitions=labware_definitions,\n labware=labware,\n commands=commands,\n liquids=liquids,\n modules=modules,\n )", "def give_ipv4(self):\n ip_8_bit_pos = 0\n ip_16_bit_pos = 128\n ip_24_bit_pos = 1\n for component in self.netkit_components:\n for IF in component.attr['IF']:\n\tprefix = component.attr['map_IF_prefix'][IF]\n\tprefix_length = len(prefix)\n\tif prefix_length <= 3: # 8 first bits\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_8_bit_pos)+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 7: # ex : 123.201\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 11:\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_24_bit_pos)\n\telse :\n\t print \"Error in prefix, this length is not supported\"\n\t sys.exit(-1)\n if ip_24_bit_pos < 255:\n\t ip_24_bit_pos+=1\n\telse:\n\t ip_24_bit_pos = 1\n\t if ip_16_bit_pos < 255:\n\t ip_16_bit_pos+=1\n\t else:\n\t ip_16_bit_pos=128\n\t if ip_8_bit_pos < 255:\n\t ip_8_bit_pos +=1\n\t else:\n\t print \"Error, to much elements. trololol this error will never be printed\"\n\t sys.exit(-1)", "def generate_core():\n yaml_new = ruamel.yaml.YAML()\n print(bcolors.WARNING + \" [*] Generating Peer Core\")\n peer = {\n \"id\": \"peer\",\n \"networkId\": \"byfn\",\n \"listenAddress\": \"0.0.0.0:7051\",\n \"address\": \"0.0.0.0:7051\",\n \"addressAutoDetect\": False,\n \"keepalive\": {\n \"interval\": \"7200s\",\n \"timeout\": \"20s\",\n \"minInterval\": \"60s\",\n \"client\": {\n \"interval\": \"60s\",\n \"timeout\": \"20s\",\n },\n \"deliveryClient\": {\n \"interval\": \"60s\",\n \"timeout\": \"20s\"\n }\n },\n \"gateway\": {\n \"enabled\": True\n },\n \"gossip\": {\n \"bootstrap\": \"127.0.0.1:7051\",\n \"useLeaderElection\": True,\n \"orgLeader\": False,\n \"membershipTrackerInterval\": \"5s\",\n \"endpoint\": None,\n \"maxBlockCountToStore\": 100,\n \"maxPropagationBurstLatency\": \"10ms\",\n \"maxPropagationBurstSize\": 10,\n \"propagateIterations\": 1,\n \"propagatePeerNum\": 3,\n \"pullInterval\": \"4s\",\n \"pullPeerNum\": 3,\n \"requestStateInfoInterval\": \"4s\",\n # Determines frequency of pushing state info messages to peers(unit: second)\n \"publishStateInfoInterval\": \"4s\",\n # Maximum time a stateInfo message is kept until expired\n \"stateInfoRetentionInterval\": None,\n # Time from startup certificates are included in Alive messages(unit: second)\n \"publishCertPeriod\": \"10s\",\n # Should we skip verifying block messages or not (currently not in use)\n \"skipBlockVerification\": False,\n # Dial timeout(unit: second)\n \"dialTimeout\": \"3s\",\n # Connection timeout(unit: second)\n \"connTimeout\": \"2s\",\n # Buffer size of received messages\n \"recvBuffSize\": 20,\n # Buffer size of sending messages\n \"sendBuffSize\": 200,\n # Time to wait before pull engine processes incoming digests (unit: second)\n # Should be slightly smaller than requestWaitTime\n \"digestWaitTime\": \"1s\",\n # Time to wait before pull engine removes incoming nonce (unit: milliseconds)\n # Should be slightly bigger than digestWaitTime\n \"requestWaitTime\": \"1500ms\",\n # Time to wait before pull engine ends pull (unit: second)\n \"responseWaitTime\": \"2s\",\n # Alive check interval(unit: second)\n \"aliveTimeInterval\": \"5s\",\n # Alive expiration timeout(unit: second)\n \"aliveExpirationTimeout\": \"25s\",\n # Reconnect interval(unit: second)\n \"reconnectInterval\": \"25s\",\n # This is an endpoint that is published to peers outside of the organization.\n # If this isn't set, the peer will not be known to other organizations.\n \"externalEndpoint\": None,\n # Leader election service configuration\n \"election\": {\n # Longest time peer waits for stable membership during leader election startup (unit: second)\n \"startupGracePeriod\": \"15s\",\n # Interval gossip membership samples to check its stability (unit: second)\n \"membershipSampleInterval\": \"1s\",\n # Time passes since last declaration message before peer decides to perform leader election (unit: second)\n \"leaderAliveThreshold\": \"10s\",\n # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second)\n \"leaderElectionDuration\": \"5s\"\n },\n \"pvtData\": {\n \"pullRetryThreshold\": \"60s\",\n \"transientstoreMaxBlockRetention\": 1000,\n \"pushAckTimeout\": \"3s\",\n \"btlPullMargin\": 10,\n \"reconcileBatchSize\": 10,\n \"reconcileSleepInterval\": \"1m\",\n \"reconciliationEnabled\": True,\n \"skipPullingInvalidTransactionsDuringCommit\": False,\n },\n \"state\": {\n \"enabled\": True,\n \"checkInterval\": \"10s\",\n \"responseTimeout\": \"3s\",\n \"batchSize\": 10,\n \"blockBufferSize\": 100,\n \"maxRetries\": 3\n },\n },\n \"tls\": {\n \"enabled\": True,\n \"clientAuthRequired\": False,\n \"cert\": {\n \"file\": \"tls/server.crt\",\n },\n \"key\": {\n \"file\": \"tls/server.key\",\n },\n \"rootcert\": {\n \"file\": \"tls/ca.crt\",\n },\n \"clientRootCAs\": {\n \"files\": [\n \"tls/ca.crt\"\n ]\n },\n \"clientKey\": {\n \"file\": None\n },\n \"clientCert\": {\n \"file\": None\n }\n },\n \"authentication\": {\n \"timewindow\": \"15m\"\n },\n \"fileSystemPath\": \"/var/hyperledger/production\",\n \"BCCSP\": {\n \"Default\": \"SW\",\n \"SW\": {\n \"Hash\": \"SHA2\",\n \"Security\": 256,\n \"FileKeyStore\": {\n \"KeyStore\": None,\n },\n },\n \"PKCS11\": {\n \"Library\": None,\n \"Label\": None,\n \"Pin\": None,\n \"Hash\": None,\n \"Security\": None\n }\n },\n \"mspConfigPath\": \"msp\",\n \"localMspId\": \"SampleOrg\",\n \"client\": {\n \"connTimeout\": \"3s\"\n },\n \"deliveryclient\": {\n \"reconnectTotalTimeThreshold\": \"3600s\",\n \"connTimeout\": \"3s\",\n \"reConnectBackoffThreshold\": \"3600s\",\n \"addressOverrides\": None,\n },\n \"localMspType\": \"bccsp\",\n \"profile\": {\n \"enabled\": False,\n \"listenAddress\": \"0.0.0.0:6060\"\n },\n \"handlers\": {\n \"authFilters\": [\n { \"name\": \"DefaultAuth\" },\n { \"name\": \"ExpirationCheck\" },\n ],\n \"decorators\": [\n { \"name\": \"DefaultDecorator\" }\n ],\n \"endorsers\": {\n \"escc\": {\n \"name\": \"DefaultEndorsement\",\n \"library\": None,\n }\n },\n \"validators\": {\n \"vscc\": {\n \"name\": \"DefaultValidation\",\n \"library\": None,\n }\n }\n },\n \"validatorPoolSize\": None,\n \"discovery\": {\n \"enabled\": True,\n \"authCacheEnabled\": True,\n \"authCacheMaxSize\": 1000,\n \"authCachePurgeRetentionRatio\": 0.75,\n \"orgMembersAllowedAccess\": False,\n },\n \"limits\": {\n \"concurrency\": {\n \"qscc\": 5000,\n }\n }\n }\n print(bcolors.OKGREEN + \" [+] Generating Peer Core COMPLETE\")\n\n print(bcolors.WARNING + \" [*] Generating VM Core \")\n vm = {\n \"endpoint\": \"unix:///var/run/docker.sock\",\n \"docker\": {\n \"tls\": {\n \"enabled\": True,\n \"ca\": {\n \"file\": \"docker/ca.crt\",\n },\n \"cert\": {\n \"file\": \"docker/tls.crt\",\n },\n \"key\": {\n \"file\": \"docker/tls.key\",\n },\n },\n \"attachStdout\": False,\n \"hostConfig\": {\n \"NetworkMode\": \"host\",\n \"Dns\": None,\n \"LogConfig\": {\n \"Type\": \"json-file\",\n \"Config\": {\n \"max-size\": DoubleQuotedScalarString(\"50m\"),\n \"max-file\": DoubleQuotedScalarString(\"5\")\n }\n },\n \"Memory\": 2147483648\n }\n }\n }\n\n print(bcolors.OKGREEN + \" [+] Generating VM Core COMPLETE\")\n\n print(bcolors.WARNING + \" [*] Generating Chaincode Core \")\n chaincode = {\n \"id\": {\n \"path\": None,\n \"name\": None,\n },\n \"builder\": \"$(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION)\",\n \"pull\": False,\n \"golang\": {\n \"runtime\": \"$(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION)\",\n \"dynamicLink\": False,\n },\n \"java\": {\n \"runtime\": \"$(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION)\",\n },\n \"node\": {\n \"runtime\": \"$(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION)\",\n },\n \"externalBuilders\": [],\n \"installTimeout\": \"300s\",\n \"startuptimeout\": \"300s\",\n \"executetimeout\": \"30s\",\n \"mode\": \"net\",\n \"keepalive\": 0,\n \"system\": {\n \"_lifecycle\": \"enable\",\n \"cscc\": \"enable\",\n \"lscc\": \"enable\",\n \"escc\": \"enable\",\n \"vscc\": \"enable\",\n \"qscc\": \"enable\",\n },\n \"logging\": {\n \"level\": \"info\",\n \"shim\": \"warning\",\n \"format\": SingleQuotedScalarString('%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} '\n '-> %{level:.4s} %{id:03x}%{color:reset} %{message}')\n }\n }\n\n print(bcolors.OKGREEN + \" [+] Generating Chaincode Core COMPLETE\")\n\n print(bcolors.WARNING + \" [*] Generating Ledger Core \")\n ledger = {\n \"blockchain\": None,\n \"state\": {\n \"stateDatabase\": \"goleveldb\",\n \"totalQueryLimit\": 100000,\n \"couchDBConfig\": {\n \"couchDBAddress\": \"127.0.0.1:5984\",\n \"username\": None,\n \"password\": None,\n \"maxRetries\": 3,\n \"maxRetriesOnStartup\": 12,\n \"requestTimeout\": \"35s\",\n \"internalQueryLimit\": 1000,\n \"maxBatchUpdateSize\": 1000,\n \"warmIndexesAfterNBlocks\": 1,\n \"createGlobalChangesDB\": False,\n \"cacheSize\": 64,\n }\n },\n \"history\": {\n \"enableHistoryDatabase\": True,\n },\n \"pvtdataStore\": {\n \"collElgProcMaxDbBatchSize\": 5000,\n \"collElgProcDbBatchesInterval\": 1000\n }\n\n }\n\n print(bcolors.OKGREEN + \" [+] Generating Ledger Core COMPLETE\")\n print(bcolors.WARNING + \" [*] Generating Operations Core \")\n operations = {\n \"listenAddress\": \"127.0.0.1:9443\",\n \"tls\": {\n \"enabled\": True,\n \"cert\": {\n \"file\": None,\n },\n \"key\": {\n \"file\": None,\n },\n \"clientAuthRequired\": False,\n \"clientRootCAs\": {\n \"files\": []\n }\n }\n }\n print(bcolors.OKGREEN + \" [+] Generating Operations Core COMPLETE\")\n print(bcolors.WARNING + \" [*] Generating Metrics Core \")\n\n metrics = {\n \"provider\": \"disabled\",\n \"statsd\": {\n \"network\": \"udp\",\n \"address\": \"127.0.0.1:8125\",\n \"writeInterval\": \"10s\",\n \"prefix\": None\n }\n }\n print(bcolors.OKGREEN + \" [*] Generating Metrics Core COMPLETE\")\n\n print(bcolors.OKBLUE + \"======= Generating final Structure =======\")\n final = {\n \"peer\": peer,\n \"vm\": vm,\n \"chaincode\": chaincode,\n \"ledger\": ledger,\n \"operations\": operations,\n \"metrics\": metrics\n }\n\n # yaml_new.dump(final, sys.stdout)\n f = open(\"core.yaml\", \"w\")\n yaml_new.dump(final, f)\n print(bcolors.HEADER + \"========================================\")\n print(\">>> core.yaml has been dumped!\")\n print(\"========================================\")", "def protocol_later(self, other_pv):\n return utility.protocol_earlier(other_pv, self.protocol_version)", "def update_target_network(self):\n\t\tpass", "def edns_version(self):\n return _ldns.ldns_pkt_edns_version(self)\n #parameters: const ldns_pkt *,\n #retvals: uint8_t", "def test_Bridge_updateFromExtraInfoDescriptor_pt_changed_port(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 4)\n\n for pt in self.bridge.transports:\n if pt.methodname == 'obfs4':\n self.assertEqual(pt.address, ipaddr.IPv4Address('179.178.155.140'))\n self.assertEqual(pt.port, 36493)\n\n # Change the port of obfs4 transport in the extrainfo descriptor:\n transportline = self.extrainfo.transport['obfs4']\n self.extrainfo.transport['obfs4'] = (transportline[0],\n 31337,\n transportline[2])\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n for pt in self.bridge.transports:\n if pt.methodname == 'obfs4':\n self.assertEqual(pt.address, ipaddr.IPv4Address('179.178.155.140'))\n self.assertEqual(pt.port, 31337)", "def __init__(self, protocol):\r\n self.protocol = protocol\r\n self.protocol.protocol_flags['MSDP'] = False\r\n self.protocol.negotiationMap[MSDP] = self.msdp_to_evennia\r\n self.protocol.will(MSDP).addCallbacks(self.do_msdp, self.no_msdp)\r\n self.msdp_reported = {}", "def extract_data_protocol(request_data):\n data_list = request_data.split()\n method = data_list[1]\n version = data_list[2]\n try:\n assert version == PROTOCOL, 'Exception: Undefined App Layer Protocol...'\n except AssertionError, _e:\n print _e\n response_message = encapsulate_data_protocol(417,\n 'Expectation Failed')\n return response_message\n host = data_list[data_list.index('Host:') + 1]\n port = data_list[data_list.index('Port:') + 1]\n cookie = None if data_list[data_list.index('Cookie:') + 1] == 'None' \\\n else int(data_list[data_list.index('Cookie:') + 1])\n # Call helper function to prepare response message.\n response_message = execute_request(method, host, port, cookie)\n return response_message", "def test_invalid_protocol_version(self):\n self.ae = ae = AE()\n ae.add_supported_context(Verification)\n scp = ae.start_server((\"localhost\", 11112), block=False)\n\n assert self.fsm.current_state == \"Sta1\"\n\n def AE_2(dul):\n conn = dul.to_provider_queue.get(False)\n pdu = A_ASSOCIATE_RQ()\n pdu.from_primitive(conn.request)\n pdu.protocol_version = 0x0002\n bytestream = pdu.encode()\n dul.socket.send(bytestream)\n return \"Sta5\"\n\n FINITE_STATE.ACTIONS[\"AE-2\"] = (\"Bluh\", AE_2, \"Sta5\")\n\n self.assoc.start()\n\n timeout = 0\n while (\n not self.assoc.is_established\n and not self.assoc.is_rejected\n and not self.assoc.is_aborted\n and not self.assoc.dul._kill_thread\n and timeout < 10\n ):\n time.sleep(0.05)\n timeout += 0.05\n\n assert self.assoc.is_rejected\n assert self.assoc.acceptor.primitive.result == 0x01\n assert self.assoc.acceptor.primitive.result_source == 0x02\n assert self.assoc.acceptor.primitive.diagnostic == 0x02\n\n timeout = 0\n while self.fsm.current_state != \"Sta1\" and timeout < 10:\n time.sleep(0.05)\n timeout += 0.05\n\n assert self.fsm.current_state == \"Sta1\"\n\n scp.shutdown()", "def __init__(self, header_bytes: bytes) -> None:\n tcp_header_first_word = unpack('!HH', header_bytes[:4])\n self.source_port = tcp_header_first_word[0]\n self.destination_port = tcp_header_first_word[1]\n\n self.sequence_number = header_bytes[4:8]\n self.acknowledgement_number = header_bytes[8:12]\n\n tcp_header_fourth_word = unpack('!HH', header_bytes[12:16])\n self.data_offset = tcp_header_fourth_word[0] >> 12\n self.reserved = (tcp_header_fourth_word[0] >> 9) & 0x7\n\n self.ns = bool(tcp_header_fourth_word[0] & 0x100) # pylint:disable=invalid-name\n self.cwr = bool(tcp_header_fourth_word[0] & 0x80)\n self.ece = bool(tcp_header_fourth_word[0] & 0x40)\n self.urg = bool(tcp_header_fourth_word[0] & 0x20)\n self.ack = bool(tcp_header_fourth_word[0] & 0x10)\n self.psh = bool(tcp_header_fourth_word[0] & 0x8)\n self.rst = bool(tcp_header_fourth_word[0] & 0x4)\n self.syn = bool(tcp_header_fourth_word[0] & 0x2)\n self.fin = bool(tcp_header_fourth_word[0] & 0x1)\n\n self.window = tcp_header_fourth_word[1]\n\n tcp_header_fifth_word = unpack('!HH', header_bytes[16:20])\n self.checksum = tcp_header_fifth_word[0]\n self.urgent_pointer = tcp_header_fifth_word[1]\n\n self.options = None\n option_word_count = self.data_offset - 5\n if option_word_count:\n self.options = header_bytes[20:(20 + option_word_count * 4)]", "def request_version_and_flags(self, req, msg):", "def decode(self):\n\n self.src_port = int(data_to_hex_str(self.message[0:2]), 16)\n self.dst_port = int(data_to_hex_str(self.message[2:4]), 16)\n self.sequence_num = int(data_to_hex_str(self.message[4:8]), 16)\n self.ack_num = int(data_to_hex_str(self.message[8:12]), 16)\n self.data_offset = int(data_to_hex_str(self.message[12])[0:3], 16) * 4\n\n #parse the flags: bit operation\n flags = ord(self.message[13])\n if ((flags & (1 << 5)) != 0):\n self.flag_urg = 1\n else:\n self.flag_urg = 0\n\n if ((flags & (1 << 4)) != 0):\n self.flag_ack = 1\n else:\n self.flag_ack = 0\n\n if ((flags & (1 << 3)) != 0):\n self.flag_psh = 1\n else:\n self.flag_psh = 0\n\n if ((flags & (1 << 2)) != 0):\n self.flag_rst = 1\n else:\n self.flag_rst = 0\n\n if ((flags & (1 << 1)) != 0):\n self.flag_syn = 1\n else:\n self.flag_syn = 0\n\n if ((flags & 1) != 0):\n self.flag_fin = 1\n else:\n self.flag_fin = 0\n\n self.window_size = int(data_to_hex_str(self.message[14 : 16]), 16)\n self.checksum = data_to_hex_str(self.message[16 : 18])\n self.urgent_pointer = data_to_hex_str(self.message[18 : 20])\n\n header_len = self.data_offset\n if (header_len > 20):\n self.opt_paddings = data_to_hex_str(self.message[20 : header_len])", "def test_Bridge_updateFromExtraInfoDescriptor_pt_died(self):\n self.bridge.updateFromNetworkStatus(self.networkstatus)\n self.bridge.updateFromServerDescriptor(self.serverdescriptor)\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 4)\n\n # Remove the obfs3 transport from the extrainfo descriptor:\n self.extrainfo.transport.pop('obfs3')\n self.bridge.updateFromExtraInfoDescriptor(self.extrainfo)\n\n self.assertEqual(len(self.bridge.transports), 3)\n\n for pt in self.bridge.transports:\n self.failIfEqual(pt.methodname, 'obfs3')", "def read33(self):\n #print('Start read33, read message from Mast to PC')\n \"\"\"Byte sequence\n 0: Start byte 0x02\n 1: Address: PC = 0x33\n 2,3: Software version, higher and lower byte\n 4-6: No of starts of lifting motor (MOT1) upwards\n 7-9: No of starts of MOT1 downwards\n 10-12: Working time of MOT1\n 13,14: Current on MOT1\n 15-17: No of starts of rotating motor upwards (MOT2)\n 18-20: No of starts of MOT2 downwards\n 21-23: Working time MOT2\n 24,25: Current MOT2\n 26: Status of exits\n 27: Status of entrances\n 28,29: Angle of MOT2 (0-1023)\n 30-32: Height of MOT1 (0-8191)\n 33: Level of radio Signal\n 34: Alarms: \n 35: Status MOT1\n 36: Status MOT2\n 37: Cycle status\n 38: Checksum\n 39: End byte 0x04\n \"\"\"\n nbytes, msg = self.sio.readmsg(startseq=b'\\x04\\x02\\x33', bytestoread=40) # look for start sequence 02 33\n #print('Read %03i chars: %s' % (nbytes, msg.hex()))\n\n self.status['crc_calc'] = sum(msg[3:39]) % 256 # calculate checksum\n self.status['crc_read'] = msg[39] # read checksum, must be same as crc_calc\n if self.status['crc_read'] != self.status['crc_calc']:\n print('ERROR: Bad Message, checksum does not coincide')\n return\n\n self.status['firmware'] = float(msg[3]) + 0.01 * float(msg[4])\n self.status['output'] = [msg[27] & 1 != 0, msg[27] & 2 != 0, msg[27] & 4 != 0]\n self.status['heartbeat'] = msg[27] & 248 != 0 #?? What is this??\n self.status['input'] = [msg[28] & 1 != 0, msg[28] & 2 != 0, msg[28] & 4 != 0, msg[28] & 8 != 0]\n self.status['level'] = msg[34] # level of radio signal\n self.status['error_short_circuit'] = msg[35] & 16 != 0\n self.status['error_connection'] = msg[35] & 128 != 0\n try: # TODO diesen Status (Cycle Status) anders aufschlüsseln (nur werte 7 od 9 gültig)\n self.status['cycle'] = self.cycle[msg[37]]\n except:\n self.status['cycle'] = (msg[37], 'unknown ' + str(msg[37]))\n\n self.lift_motor['up_cnt'] = int.from_bytes(msg[5:8], byteorder='big', signed=False)\n self.lift_motor['down_cnt'] = int.from_bytes(msg[8:11], byteorder='big', signed=False)\n self.lift_motor['work_hrs'] = int.from_bytes(msg[11:14], byteorder='big', signed=False)\n self.lift_motor['current'] = 0.1 * int.from_bytes(msg[14:16], byteorder='big', signed=False)\n self.lift_motor['encoder'] = int.from_bytes(msg[31:34], byteorder='big', signed=False) # Height of the mast\n try:\n self.lift_motor['cycle'] = self.cycle[msg[36]]\n except:\n self.lift_motor['cycle'] = (msg[36], 'unkown ' + str(msg[36]))\n self.lift_motor['error_undercurrent'] = msg[35] & 1 != 0\n self.lift_motor['error_overcurrent'] = msg[35] & 2 != 0\n self.lift_motor['error_encoder'] = msg[35] & 32 != 0\n\n self.rot_motor['up_cnt'] = int.from_bytes(msg[16:19], byteorder='big', signed=False)\n self.rot_motor['down_cnt'] = int.from_bytes(msg[19:22], byteorder='big', signed=False)\n self.rot_motor['work_hrs'] = int.from_bytes(msg[22:25], byteorder='big', signed=False)\n self.rot_motor['current'] = 0.1 * int.from_bytes(msg[25:27], byteorder='big', signed=False)\n self.rot_motor['encoder'] = int.from_bytes(msg[29:31], byteorder='big', signed=False)\n try:\n self.rot_motor['cycle'] = self.cycle[msg[38]]\n except:\n self.rot_motor['cycle'] = (msg[38], 'unkown ' + str(msg[38]))\n self.rot_motor['error_undercurrent'] = msg[35] & 4 != 0\n self.rot_motor['error_overcurrent'] = msg[35] & 8 != 0\n self.rot_motor['error_encoder'] = msg[35] & 64 != 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP Network Introduction and protocol version missing from request
def test_dpp_network_intro_version_missing_req(dev, apdev): check_dpp_capab(dev[0], min_ver=3) try: dev[0].set("dpp_version_override", "2") id, hapd = run_dpp_auto_connect(dev, apdev, 1, stop_after_prov=True) dev[0].set("dpp_version_override", "3") dev[0].set("dpp_test", "92") dev[0].select_network(id, freq=2412) ev = dev[0].wait_event(["DPP-INTRO"], timeout=10) if ev is None: raise Exception("DPP network introduction result not seen on STA") if "status=8" not in ev: raise Exception("Unexpected network introduction result on STA: " + ev) finally: dev[0].set("dpp_config_processing", "0", allow_fail=True)
[ "def request_version_and_flags(self, req, msg):", "def protocolVersion():", "def extract_data_protocol(request_data):\n data_list = request_data.split()\n method = data_list[1]\n version = data_list[2]\n try:\n assert version == PROTOCOL, 'Exception: Undefined App Layer Protocol...'\n except AssertionError, _e:\n print _e\n response_message = encapsulate_data_protocol(417,\n 'Expectation Failed')\n return response_message\n host = data_list[data_list.index('Host:') + 1]\n port = data_list[data_list.index('Port:') + 1]\n cookie = None if data_list[data_list.index('Cookie:') + 1] == 'None' \\\n else int(data_list[data_list.index('Cookie:') + 1])\n # Call helper function to prepare response message.\n response_message = execute_request(method, host, port, cookie)\n return response_message", "def eth_protocolVersion(self):\n self.payload[\"method\"] = \"eth_protocolVersion\"\n self.payload[\"id\"] = 67\n response = requests.post(self.url, data=json.dumps(self.payload), headers=self.headers).json()\n result = int(response[\"result\"], 16)\n print(\"Protocol Version: %d\" % result)", "def alpn_protocol(self, request: httputil.HTTPServerRequest) -> Response:\n assert request.connection is not None\n proto = request.connection.stream.socket.selected_alpn_protocol() # type: ignore[attr-defined]\n return Response(proto.encode(\"utf8\") if proto is not None else \"\")", "def get_request_msg(self) -> str:", "def unpack(breq_original):\n\t\tbreq = bytearray(breq_original)\n\n\t\t# Extract request ID and length.\n\t\tr_id, r_len = struct.unpack(\"<BI\", breq[:5])\n\t\tbreq = breq[5:(5+r_len)]\n\t\t# Create a dict of parameters.\n\t\td = {\"id\":r_id}\n\n\t\t# Join\n\t\tif r_id == Protocol.REQ_JOIN:\n\t\t\t# Extract nickname\n\t\t\tbnlen, = struct.unpack(\"<I\", breq[:4])\n\t\t\tbreq = breq[4:]\n\t\t\tbname, = struct.unpack(\"<{}s\".format(bnlen), breq[:bnlen])\n\t\t\td[\"name\"] = bname.decode(\"utf-8\")\n\t\t\t# Extract document name\n\t\t\td[\"doc\"] = breq[bnlen:].decode(\"utf-8\")\n\t\t# Or leave?\n\t\telif r_id == Protocol.REQ_LEAVE:\n\t\t\t# No arguments here.\n\t\t\tpass\n\t\t# A full text request?\n\t\telif r_id == Protocol.REQ_TEXT:\n\t\t\t# No arguments\n\t\t\tpass\n\t\telif r_id == Protocol.RES_TEXT:\n\t\t\t# Extract version, cursor\n\t\t\tversion, cursor, = struct.unpack(\"<II\", breq[:8])\n\t\t\td[\"version\"] = version\n\t\t\td[\"cursor\"] = cursor\n\t\t\t# Extract text\n\t\t\td[\"text\"] = breq[8:].decode(\"utf-8\")\n\t\t# Commit?\n\t\telif r_id == Protocol.RES_COMMIT:\n\t\t\t# Extract version\n\t\t\tversion, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"version\"] = version\n\t\t\td[\"sequence\"] = []\n\t\t\t# Extract operations\n\t\t\tbreq = breq[4:]\n\t\t\twhile len(breq) > 0:\n\t\t\t\tbreq, dop = Protocol.unpack_op(breq)\n\t\t\t\td[\"sequence\"].append(dop)\n\t\t# Ok response\n\t\telif r_id == Protocol.RES_OK:\n\t\t\treq, = struct.unpack(\"<B\", breq[:1])\n\t\t\td[\"req_id\"] = req\n\t\t# Error response\n\t\telif r_id == Protocol.RES_ERROR:\n\t\t\terror, = struct.unpack(\"<I\", breq[:4])\n\t\t\td[\"error\"] = error\n\t\treturn d", "def formRequestPacket(request):\r\n magicNumber = 0x497E\r\n packetType = 0x0001\r\n #Assign the appropriate request type\r\n #Checks already conducted in input phase\r\n if request == \"date\":\r\n requestType = 0x0001\r\n elif request == \"time\":\r\n requestType = 0x0002\r\n \r\n #Create and fill out the bytearray\r\n requestPacket = bytearray(6)\r\n requestPacket[0:2] = magicNumber.to_bytes(2, byteorder=\"big\")\r\n requestPacket[2:4] = packetType.to_bytes(2, byteorder=\"big\")\r\n requestPacket[4:6] = requestType.to_bytes(2, byteorder=\"big\")\r\n return requestPacket", "def _parse_request(self) -> None:\n action_and_name_from_request = self._request.split(PROTOCOL)\n self._action_from_request = action_and_name_from_request[0].split()[0]\n self._name_from_request = ' '.join(action_and_name_from_request[0].split()[1:])\n self._phone_from_request = self._request.split('\\r\\n')[1]", "def test_NPNAndALPNNoAdvertise(self):\n protocols = [b'h2', b'http/1.1']\n negotiatedProtocol, lostReason = negotiateProtocol(\n clientProtocols=protocols,\n serverProtocols=[],\n )\n self.assertIsNone(negotiatedProtocol)\n self.assertIsNone(lostReason)", "def test_nextProtocolMechanismsNPNIsSupported(self):\n supportedProtocols = sslverify.protocolNegotiationMechanisms()\n self.assertTrue(\n sslverify.ProtocolNegotiationSupport.NPN in supportedProtocols\n )", "def create_discover_payload(self):\n discoverRequest = ET.Element(\"discoverRequest\")\n type = ET.SubElement(discoverRequest, \"type\")\n type.text = self._module.paramgram[\"type\"]\n if self._module.paramgram[\"root_ip\"] and self._module.paramgram[\"type\"] == \"SmartScan\":\n rootIP = ET.SubElement(discoverRequest, \"rootIP\")\n rootIP.text = self._module.paramgram[\"root_ip\"]\n includeRange = ET.SubElement(discoverRequest, \"includeRange\")\n includeRange.text = self._module.paramgram[\"include_range\"]\n excludeRange = ET.SubElement(discoverRequest, \"excludeRange\")\n excludeRange.text = self._module.paramgram[\"exclude_range\"]\n # PROCESS OPTIONS\n noPing = ET.SubElement(discoverRequest, \"noPing\")\n noPing.text = str(self._module.paramgram[\"no_ping\"]).lower()\n onlyPing = ET.SubElement(discoverRequest, \"onlyPing\")\n onlyPing.text = str(self._module.paramgram[\"only_ping\"]).lower()\n\n delta = ET.SubElement(discoverRequest, \"delta\")\n delta.text = str(self._module.paramgram[\"delta\"]).lower()\n\n vmOff = ET.SubElement(discoverRequest, \"vmOff\")\n vmOff.text = str(self._module.paramgram[\"vm_off\"]).lower()\n\n vmTemplate = ET.SubElement(discoverRequest, \"vmTemplate\")\n vmTemplate.text = str(self._module.paramgram[\"vm_templates\"]).lower()\n\n discoverRoute = ET.SubElement(discoverRequest, \"discoverRoute\")\n discoverRoute.text = str(self._module.paramgram[\"discover_routes\"]).lower()\n\n winexeBased = ET.SubElement(discoverRequest, \"winexeBased\")\n winexeBased.text = str(self._module.paramgram[\"winexe_based\"]).lower()\n\n unmanaged = ET.SubElement(discoverRequest, \"unmanaged\")\n unmanaged.text = str(self._module.paramgram[\"unmanaged\"]).lower()\n\n monitorWinEvents = ET.SubElement(discoverRequest, \"monitorWinEvents\")\n monitorWinEvents.text = str(self._module.paramgram[\"monitor_win_events\"]).lower()\n\n monitorWinPatch = ET.SubElement(discoverRequest, \"monitorWinPatch\")\n monitorWinPatch.text = str(self._module.paramgram[\"monitor_win_patches\"]).lower()\n\n monitorInstSw = ET.SubElement(discoverRequest, \"monitorInstSw\")\n monitorInstSw.text = str(self._module.paramgram[\"monitor_installed_sw\"]).lower()\n\n nameResolutionDnsFirst = ET.SubElement(discoverRequest, \"nameResolutionDnsFirst\")\n nameResolutionDnsFirst.text = str(self._module.paramgram[\"name_resolution_dns_first\"]).lower()\n\n xmlstr = ET.tostring(discoverRequest, 'utf-8')\n return xmlstr", "def _dump_request(self, hpack_decoder):\n assert(self.stream_done())\n\n # get request header.\n header_list = hpack_decoder.decode(self._server_recv_header)\n req_header = dict(util.header_base64_decode(header_list))\n message_encoding = req_header.get(GRPC_MESSAGE_ENCODING, None)\n\n # get request data info.\n req_data = None\n if self._server_recv_data:\n req_data = proto_util.parse_rpc_data_frame(\n self._server_recv_data,\n req_header[':path'],\n myconfig.PARAMETER_REQUEST,\n message_encoding\n )\n self.__req_path = req_header[':path']\n\n print('request header: ')\n print(textwrap.indent(pprint.pformat(req_header), ' '*4))\n print('request data(LPS parse in protobuf): ')\n for lps in req_data:\n print(textwrap.indent(str(lps.__class__), ' '*4))\n print(textwrap.indent(str(lps), ' '*8))", "def test_simple_request_message(self):\n \n message = \"begin ims1.0\\r\\nmsg_type request\\nmsg_id ex009 any_ndc \\ne-mail foo.bar.ssi@domain.name.de \\ntime 1999/06/13 to 1999/06/14 \\nbull_type idc_reb \\nbulletin ims1.0\\nstop\"\n \n parser = IMSParser()\n \n result = parser.parse(message)\n \n #print(\"\\nresult = %s\\n\" %(result))\n \n # check mandatory fields\n self.assertEqual(result['MSGFORMAT'],'ims1.0')\n self.assertEqual(result['MSGTYPE'],'request')\n self.assertEqual(result['MSGID'],'ex009')\n self.assertEqual(result['TARGET'],'EMAIL')\n self.assertEqual(result['EMAILADDR'],'foo.bar.ssi@domain.name.de')\n \n # optional for this request\n self.assertEqual(result['SOURCE'],'any_ndc')\n \n # product_1\n self.assertTrue(result.has_key('PRODUCT_1'))\n \n self.assertEqual(result['PRODUCT_1'], {'FORMAT': 'ims1.0', 'STARTDATE': '1999/06/13', 'BULLTYPE': 'idc_reb', 'ENDDATE': '1999/06/14', 'TYPE': 'BULLETIN'})", "def CheckCapabilityNegotiation(self,environ,start_response,responseHeaders):\n\t\tua=sa=None\n\t\tif \"HTTP_DATASERVICEVERSION\" in environ:\n\t\t\tmajor,minor,ua=ParseDataServiceVersion(environ[\"HTTP_DATASERVICEVERSION\"])\n\t\telse:\n\t\t\tmajor=2\n\t\t\tminor=0\n\t\tif \"HTTP_MAXDATASERVICEVERSION\" in environ:\n\t\t\tmaxMajor,maxMinor,sa=ParseMaxDataServiceVersion(environ[\"HTTP_MAXDATASERVICEVERSION\"])\n\t\telse:\n\t\t\tmaxMajor=major\n\t\t\tmaxMinor=minor\n\t\tif major>2 or (major==2 and minor>0):\n\t\t\t# we can't cope with this request\n\t\t\treturn None\n\t\telif maxMajor>=2:\n\t\t\tresponseHeaders.append(('DataServiceVersion','2.0; pyslet %s'%info.version))\n\t\t\treturn 2\n\t\telse:\n\t\t\tresponseHeaders.append(('DataServiceVersion','1.0; pyslet %s'%info.version))\n\t\t\treturn 1", "def request_fewer_flags(self, req, msg):", "def no_params(self):\n NO = []\n if self.TCPIP_SNAP:\n no_params = self.__snap_stanza_read(self.TCPIP_SNAP, 'no -a')\n if no_params:\n for record in no_params:\n if 'somaxconn' in record:\n NO.append({'tun_name' : 'somaxconn', 'tun_value' : record.split('=')[1].lstrip()})\n if 'sb_max' in record:\n NO.append({'tun_name' : 'sb_max', 'tun_value' : record.split('=')[1].lstrip()})\n if 'tcp_ephemeral_high' in record:\n NO.append({'tun_name' : 'tcp_ephemeral_high', 'tun_value' : record.split('=')[1].lstrip()})\n if 'tcp_ephemeral_low' in record:\n NO.append({'tun_name' : 'tcp_ephemeral_low', 'tun_value' : record.split('=')[1].lstrip()})\n if 'udp_ephemeral_high' in record:\n NO.append({'tun_name' : 'udp_ephemeral_high', 'tun_value' : record.split('=')[1].lstrip()})\n if 'udp_ephemeral_low' in record:\n NO.append({'tun_name' : 'udp_ephemeral_low', 'tun_value' : record.split('=')[1].lstrip()})\n if 'tcp_recvspace' in record:\n NO.append({'tun_name' : 'tcp_recvspace', 'tun_value' : record.split('=')[1].lstrip()})\n if 'tcp_sendspace' in record:\n NO.append({'tun_name' : 'tcp_sendspace', 'tun_value' : record.split('=')[1].lstrip()})\n else:\n return None\n else:\n return None\n return NO", "def parse_request(self):\n self.method, self.location, self.http_version = \\\n self.request_line.decode(\"utf-8\").split()", "def print_http_nfc_lease_info(info):\n print 'Lease timeout: {0.leaseTimeout}\\n' \\\n 'Disk Capacity KB: {0.totalDiskCapacityInKB}'.format(info)\n device_number = 1\n if info.deviceUrl:\n for device_url in info.deviceUrl:\n print 'HttpNfcLeaseDeviceUrl: {1}\\n' \\\n 'Device URL Import Key: {0.importKey}\\n' \\\n 'Device URL Key: {0.key}\\n' \\\n 'Device URL: {0.url}\\n' \\\n 'Device URL Size: {0.fileSize}\\n' \\\n 'SSL Thumbprint: {0.sslThumbprint}\\n'.format(device_url,\n device_number)\n if not device_url.targetId:\n print \"No targetId found for this device\"\n print \"Device is not eligible for export. This could be a mounted iso or img of some sort\"\n print \"It will NOT be downloaded\\n\"\n\n device_number += 1\n else:\n print 'No devices were found.'", "def edns_version(self):\n return _ldns.ldns_pkt_edns_version(self)\n #parameters: const ldns_pkt *,\n #retvals: uint8_t" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP/PKEXv2 over TCP and automatic connection
def test_dpp_tcp_pkex_auto_connect_2(dev, apdev, params): run_dpp_tcp_pkex_auto_connect_2(dev, apdev, params, False)
[ "def __connect_NN_socket(self):\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"*\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.bind(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" bind\")\n \n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")\n \n elif self.mode == \"many2many\":\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")", "def startProtocol(self):\n self.transport.connect(self.host, self.port)\n logging.info(\"Connect with %s:%d\" % (self.host, self.port))", "def connect(self):\n\n # Open TCP connection to GPIB-ETHERNET\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.sock.settimeout(self.timeout)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect((self.host, self.port))\n\n if self.sock.send('\\1') != 1: # check for valid connection\n print \"send 1 error\"\n self.close()\n ret = ord(self.sock.recv(1)[0])\n if ret == 0:\n print \"connected to API\"\n else:\n print \"connection error\"\n self.close()\n\n self._isOpen = True", "def connect_p2p():\n return connect(\"p2p\")", "def __connect(self):\n self.session = xnatpy.connect(\n self.server, user=self.user, password=self.password\n )", "def fakeConnection(self):\n\n## if self.sentSYN and not self.receivedSYN:\n## dhost = self.peerIP\n## dport = self.peerPort\n## shost = self.myIP\n## sport = self.myPort\n## argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN)\n## argc = len(argv)\n## print 'Send SYN', self.SYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n## self.sp.fakeConnection(argv, argc)\n \n #print 'Fake connection:', self.sentSYN, self.receivedSYN, '\\n'\n if self.sentSYN and self.receivedSYN:\n dhost = self.peerIP\n dport = self.peerPort\n shost = self.myIP\n sport = self.myPort\n argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN, '%ld'%self.peerSYN)\n argc = len(argv)\n print 'Send SYN-ACK', self.SYN, self.peerSYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n #self.sp.fakeConnection(argv, argc)\n\n # -----------------------------------------\n # Auto send SYNACK", "def __connect_ZMQ_socket(self):\n endpoint = \"tcp://\" + self.ip + \":\" + str(self.port)\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n self.sock.bind(endpoint)\n if self.debug or self.network == \"direct\":\n if not self.topic == \"/nep_node\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" bind\")\n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n self.sock.connect(endpoint)\n if self.debug or self.network == \"direct\":\n if not self.topic == \"/nep_node\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" connect\")\n elif self.mode == \"many2many\":\n self.sock.connect(endpoint)\n if self.debug or self.network == \"direct\":\n print(\"PUB: \" + self.topic + \" endpoint: \" + endpoint + \" connect\")", "def send_packet():", "def connect(self):\n try:\n self.logger.debug(\"connect(), opening communication at '%s'\" % self._address)\n opencomm(self._address)\n # Open TC2 Resource\n self.logger.debug(\"connect(), open resource\")\n mpos_openresource(ResourceId.TC2,self._cplnum, BlockingMode.NOT_BLOCKING)\n self.logger.debug(\"connect(), log clock selection\")\n mps_logclockselect(self._cplnum, ClockMode.INTERNAL)\n self.logger.debug(\"connect(), set clock divider\")\n mps_setclkdiv(self._cplnum, ClockDivider.DIV_8192);\n self.logger.debug(\"connect(), startdownloadto\")\n startdownloadto(self._cplnum, \"spy.mplog\");\n self.logger.debug(\"connect(), opening log\")\n mps_openlog(self._cplnum, LogEvent.EN_C1 | \n LogEvent.EN_C2 | \n LogEvent.EN_C3 | \n LogEvent.EN_C7 | \n LogEvent.EN_CHAR | \n LogEvent.EN_ETU, \n 0)\n self.logger.debug(\"connect(),vcc selection\")\n mps_vccselectmv(self._cplnum, 5000);\n self._atr= mps_oncmm(self._cplnum, 2000000);\n self.logger.debug(\"connect(), get atr and send pps, atr: %s\",self._atr)\n mps_sendppscmm(self._cplnum, 0, 9, 6);\n self.logger.info(about())\n except ResourceMgmt as res:\n self.logger.error(\"Resource allocation is failed, errno=\",res.args[0])\n #Try to overide resource and closing it down\n if(res.args[1] == 3902):\n mpos_openresource(ResourceId.TC2,self._cplnum,BlockingMode.OVERRIDE)\n mpos_closeresource(ResourceId.TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise res\n\n except SpyMgmt as spy:\n self.logger.error(\"Spying failed, errno: \",spy.args[1])\n mps_closelog(self._cplnum)\n mps_enddownload(self._cplnum)\n mpos_closeresource(TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise spy\n \n except RuntimeError as re:\n self.logger.error(\"Caught runtime error: %s, %d \" % re.args)\n self.closedown()\n raise re\n\n except Exception as ex:\n self.logger.error(\"Caught unknown exception: %s, %d\" % ex.args)\n self.closedown() \n raise ex", "def __connect(self):\n\n try:\n # in connect we use only 'json' content type\n\n rqst_dict = DSAPIRequests.connect(email=self.username, password=self.password)\n\n try:\n print(\"Connecting to DSpace API...\")\n result = self.send_request(rqst_dict, c_type='json')\n print(\"Connection request returned: \", result)\n self.api_token = result['api-token']\n except Exception as e:\n print(\"Failed to connect to DSpace API because of the following reason: \" + str(e))\n raise e\n\n except Exception as e:\n raise e", "def serverConnect(self):\n\t\tself.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.rtspSocket.connect((self.serverAddr, self.serverPort))", "def connect(self):\n\t\tself.stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.stream.connect((self.host, self.port))\n\t\t# timeout after 5 seconds\n\t\tself.stream.settimeout(5)", "def connect(self):\n # Get connected with all server nodes\n for ID, addr in self._server_namebook.items():\n server_ip = addr[1]\n server_port = addr[2]\n _add_receiver_addr(self._sender, server_ip, server_port, ID)\n _sender_connect(self._sender)\n\n # Send client address to server nodes\n self._addr = self._get_local_usable_addr()\n client_ip, client_port = self._addr.split(':')\n\n msg = KVStoreMsg(\n type=KVMsgType.IP_ID,\n rank=0, # a tmp client ID\n name=self._addr,\n id=None,\n data=None,\n shape=None,\n c_ptr=None)\n\n for server_id in range(self._server_count):\n _send_kv_msg(self._sender, msg, server_id)\n\n _receiver_wait(self._receiver, client_ip, int(client_port), self._server_count)\n\n # Recv client ID from server\n msg = _recv_kv_msg(self._receiver)\n assert msg.rank == 0\n self._client_id = int(msg.name)\n\n # Recv shared-tensor information from server\n msg = _recv_kv_msg(self._receiver)\n assert msg.rank == 0\n data_str = msg.name.split('|')\n for data in data_str:\n if data != '':\n tensor_name, dtype = self._deserialize_shared_tensor(data)\n while True:\n if (os.path.exists(tensor_name+'shape-'+str(self._machine_id))):\n break\n else:\n time.sleep(1) # wait until the file been created \n shape, data_type = self._read_data_shape_type(tensor_name+'shape-'+str(self._machine_id))\n assert data_type == dtype\n shared_data = empty_shared_mem(tensor_name, False, shape, dtype)\n dlpack = shared_data.to_dlpack()\n self._data_store[tensor_name] = F.zerocopy_from_dlpack(dlpack)\n if '-data-' in tensor_name:\n self._data_name_list.append(tensor_name[0:-6])\n self._has_data.add(tensor_name)\n\n # Get full shape of each data\n for name in self._data_name_list:\n data_shape = list(F.shape(self._data_store[name+'-data-']))\n data_shape[0] = 0\n msg = KVStoreMsg(\n type=KVMsgType.GET_SHAPE,\n rank=self._client_id,\n name=name,\n id=None, \n data=None,\n shape=None,\n c_ptr=None)\n # send msg\n for m_id in range(self._machine_count):\n s_id = m_id * self._group_count\n _send_kv_msg(self._sender, msg, s_id)\n # recv msg\n for m_id in range(self._machine_count):\n back_msg = _recv_kv_msg(self._receiver)\n assert back_msg.type == KVMsgType.GET_SHAPE_BACK\n data_shape[0] += ((F.asnumpy(back_msg.shape)).tolist())[0]\n self._full_data_shape[name] = tuple(data_shape)\n\n print(\"KVClient %d connect to kvstore successfully!\" % self.get_id())", "def tcp_request(self):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n\n try:\n sock.connect((self.host, self.port))\n self.opened_tcp = \"+\"\n\n try:\n data = sock.recv(512).decode()\n\n except timeout:\n # It is not a post protocol because there is no greeting.\n # It may be HTTP.\n sock.send(\"GET / HTTP/1.1{0}{0}\".format(linesep).encode())\n\n try:\n data = sock.recv(512).decode()\n if data.startswith(\"HTTP\"):\n self.protocol = \"HTTP\"\n except timeout:\n # This is not a protocol from the list.\n return\n\n else:\n # It may be a post server.\n if data.startswith(\"220\"):\n # Mail-server is connected to electrical power station.\n data = data.lower()\n if data.find(\"smtp\") > 0:\n self.protocol = \"SMTP\"\n elif data.find(\"ftp\") > 0:\n self.protocol = \"FTP\"\n elif data.startswith(\"+OK\"):\n self.protocol = \"POP3\"\n\n # TCP is closed in following cases.\n except timeout:\n self.opened_tcp = \"-\"\n except error:\n debug(\"Can't get information about TCP on port: %s.\", self.port)\n self.opened_tcp = \"-\"\n finally:\n sock.close()", "def connectMPD():\n client = MPDClient() # create client object\n client.timeout = 10 # network timeout in seconds (floats allowed), default: None\n client.idletimeout = None # timeout for fetching the result of the idle command is handled seperately, default: None\n try:\n client.connect(\"localhost\", 6600) # connect to localhost:6600\n except Exception :\n print \"Can Connect to MPD...\"", "def http_connect(self):\r\n host, port, self.uri, is_ssl = self.connection_args\r\n self.connection = self.conn_class(host, port=port)\r\n self.connection.set_debuglevel(self.debuglevel)", "def init(self):\n self.ctx = self.directEthernetServer.context()\n p = self.directEthernetServer.packet(context=self.ctx)\n p.connect(self.port)\n yield p.send()", "def connectionMade(self):\n super().connectionMade()\n # negociate telnet options\n self.transport.negotiationMap[LINEMODE] = self.telnet_LINEMODE\n self.transport.negotiationMap[PLUGIN] = self.telnet_PLUGIN\n self.transport.negotiationMap[TTYPE] = self.telnet_TTYPE\n self.transport.will(LINEMODE)\n self.transport.do(SGA)\n self.transport.will(NAWS)\n self.transport.will(TTYPE)\n self.NAWS()\n self._start_keyboard_listener()\n # here is a good place to start a programmatic interaction with the server.\n \n self.transport.write(b'ascenseur\\n\\n')\n self.transport.write(b'porte\\n')\n self.transport.write(b'\\n')\n self.transport.write(b'technique\\n')\n self.transport.write(b'automate\\n')\n self.transport.write(b'1\\n')\n self.transport.write(b'az\\n')", "def main():\n handshake()\n send_file()\n teardown_connection()", "def open_connection(self):\r\n\r\n\r\n buf = ctypes.create_string_buffer(16) # at least 8 byte\r\n ret = self.check(self._dll.PH_OpenDevice(self._deviceID, ctypes.byref(buf)))\r\n self._serial = buf.value.decode() # .decode() converts byte to string\r\n if ret >= 0:\r\n self._connected_to_device = True\r\n self.logMsg('Connection to the Picoharp 300 established',\r\n msgType='status')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP/PKEXv2 over TCP and automatic connection status
def test_dpp_tcp_pkex_auto_connect_2_status(dev, apdev, params): run_dpp_tcp_pkex_auto_connect_2(dev, apdev, params, True)
[ "def startProtocol(self):\n self.transport.connect(self.host, self.port)\n logging.info(\"Connect with %s:%d\" % (self.host, self.port))", "def connect(self):\n\n # Open TCP connection to GPIB-ETHERNET\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.sock.settimeout(self.timeout)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect((self.host, self.port))\n\n if self.sock.send('\\1') != 1: # check for valid connection\n print \"send 1 error\"\n self.close()\n ret = ord(self.sock.recv(1)[0])\n if ret == 0:\n print \"connected to API\"\n else:\n print \"connection error\"\n self.close()\n\n self._isOpen = True", "def test4_output_tcp_enable(self):\n cmd = 'python3 -c \"from dnstap_receiver.receiver import start_receiver; start_receiver()\" -c ./tests/dnstap_tcp.conf'\n o = execute_dnstap(cmd)\n \n self.assertRegex(o, b\"Output handler: tcp\")", "def tcp_request(self):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n\n try:\n sock.connect((self.host, self.port))\n self.opened_tcp = \"+\"\n\n try:\n data = sock.recv(512).decode()\n\n except timeout:\n # It is not a post protocol because there is no greeting.\n # It may be HTTP.\n sock.send(\"GET / HTTP/1.1{0}{0}\".format(linesep).encode())\n\n try:\n data = sock.recv(512).decode()\n if data.startswith(\"HTTP\"):\n self.protocol = \"HTTP\"\n except timeout:\n # This is not a protocol from the list.\n return\n\n else:\n # It may be a post server.\n if data.startswith(\"220\"):\n # Mail-server is connected to electrical power station.\n data = data.lower()\n if data.find(\"smtp\") > 0:\n self.protocol = \"SMTP\"\n elif data.find(\"ftp\") > 0:\n self.protocol = \"FTP\"\n elif data.startswith(\"+OK\"):\n self.protocol = \"POP3\"\n\n # TCP is closed in following cases.\n except timeout:\n self.opened_tcp = \"-\"\n except error:\n debug(\"Can't get information about TCP on port: %s.\", self.port)\n self.opened_tcp = \"-\"\n finally:\n sock.close()", "def __connect_NN_socket(self):\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"*\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.bind(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" bind\")\n \n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")\n \n elif self.mode == \"many2many\":\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")", "def starttls(self):\n starttls_command = \"STARTTLS\"\n\n if DEBUG:\n logging.debug(repr(\"{}{}\".format(starttls_command, CRLF)))\n logging.info(\"C:{}{}\".format(starttls_command, CRLF))\n\n self.sock.send(starttls_command + CRLF)\n\n try:\n response = self.sock.recv(KILO)\n except socket.timeout as err:\n logging.error(err.message)\n raise\n\n logging.info(\"S:\" + response)\n\n if not compare_status(status.READY, response):\n runtime_err = build_unexpected_status_error(response[0:3], status.READY)\n logging.error(runtime_err.message)\n raise runtime_err\n\n self.sock = ssl.SSLSocket(self.sock)\n logging.info(\"CURRENTLY IN TLS MODE\")\n return response", "def Start_connection(self):\n\t\tself.__client()\n\t\twhile self.__rc == 0:\n\t\t\tself.__rc = self.client_.loop()\n\t\tprint(f'rc: {self.__rc}')", "def keep_alive(self):\n self.send_tcp_msg('00')", "def _dpi_monitor(self):\n while True:\n # send a REST request to DPI server\n try:\n if self.dpi_info['ip']:\n s = requests.session()\n s.keep_alive = False\n r = s.get('http://'+self.dpi_info['ip']+\":\"+self.dpi_info['port'])\n res = r.json()\n res['dpid'] = self.dpi_info['dpid']\n res['period'] = SimpleMonitor.DPI_REQ_INTERVAL\n event = DPIMessage(res)\n self.send_event_to_observers(event)\n except:\n # clear dpi and wait next connection\n print(\"DPI disconnected..\")\n self.dpi_info = {'mac': None, 'port_no': None, 'dpid': None, 'name': None, 'ip': None, 'port': None, 'tree': None}\n return\n\n # XXX: only check three protocols currently\n # print(\"DPI Request --------------\\n\")\n # res_info = {'Yahoo': 0, 'Facebook': 0, 'Google': 0}\n # for x in res.get('detected.protos', []):\n # if x['name'] == 'Yahoo':\n # res_info['Yahoo'] = x['bytes']\n # if x['name'] == 'Facebook':\n # res_info['Facebook'] = x['bytes']\n # if x['name'] == 'Google':\n # res_info['Google'] = x['bytes']\n\n # with open(\"dpi_log.txt\", \"a\") as dpioutput:\n # ts = time.time()\n # ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n # dpioutput.write(\"Protocol\\tBytes\\t\\t\")\n # dpioutput.write(ts)\n # dpioutput.write(\"\\nYahoo\\t\")\n # dpioutput.write(\"Facebook\\t\")\n # dpioutput.write(\"Google\\n\")\n # dpioutput.write(str(res_info[\"Yahoo\"])+\"\\t\")\n # dpioutput.write(str(res_info[\"Facebook\"])+\"\\t\")\n # dpioutput.write(str(res_info[\"Google\"])+\"\\n\")\n\n hub.sleep(SimpleMonitor.DPI_REQ_INTERVAL)\n pass", "def connect(self):\n try:\n self.logger.debug(\"connect(), opening communication at '%s'\" % self._address)\n opencomm(self._address)\n # Open TC2 Resource\n self.logger.debug(\"connect(), open resource\")\n mpos_openresource(ResourceId.TC2,self._cplnum, BlockingMode.NOT_BLOCKING)\n self.logger.debug(\"connect(), log clock selection\")\n mps_logclockselect(self._cplnum, ClockMode.INTERNAL)\n self.logger.debug(\"connect(), set clock divider\")\n mps_setclkdiv(self._cplnum, ClockDivider.DIV_8192);\n self.logger.debug(\"connect(), startdownloadto\")\n startdownloadto(self._cplnum, \"spy.mplog\");\n self.logger.debug(\"connect(), opening log\")\n mps_openlog(self._cplnum, LogEvent.EN_C1 | \n LogEvent.EN_C2 | \n LogEvent.EN_C3 | \n LogEvent.EN_C7 | \n LogEvent.EN_CHAR | \n LogEvent.EN_ETU, \n 0)\n self.logger.debug(\"connect(),vcc selection\")\n mps_vccselectmv(self._cplnum, 5000);\n self._atr= mps_oncmm(self._cplnum, 2000000);\n self.logger.debug(\"connect(), get atr and send pps, atr: %s\",self._atr)\n mps_sendppscmm(self._cplnum, 0, 9, 6);\n self.logger.info(about())\n except ResourceMgmt as res:\n self.logger.error(\"Resource allocation is failed, errno=\",res.args[0])\n #Try to overide resource and closing it down\n if(res.args[1] == 3902):\n mpos_openresource(ResourceId.TC2,self._cplnum,BlockingMode.OVERRIDE)\n mpos_closeresource(ResourceId.TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise res\n\n except SpyMgmt as spy:\n self.logger.error(\"Spying failed, errno: \",spy.args[1])\n mps_closelog(self._cplnum)\n mps_enddownload(self._cplnum)\n mpos_closeresource(TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise spy\n \n except RuntimeError as re:\n self.logger.error(\"Caught runtime error: %s, %d \" % re.args)\n self.closedown()\n raise re\n\n except Exception as ex:\n self.logger.error(\"Caught unknown exception: %s, %d\" % ex.args)\n self.closedown() \n raise ex", "def fakeConnection(self):\n\n## if self.sentSYN and not self.receivedSYN:\n## dhost = self.peerIP\n## dport = self.peerPort\n## shost = self.myIP\n## sport = self.myPort\n## argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN)\n## argc = len(argv)\n## print 'Send SYN', self.SYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n## self.sp.fakeConnection(argv, argc)\n \n #print 'Fake connection:', self.sentSYN, self.receivedSYN, '\\n'\n if self.sentSYN and self.receivedSYN:\n dhost = self.peerIP\n dport = self.peerPort\n shost = self.myIP\n sport = self.myPort\n argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN, '%ld'%self.peerSYN)\n argc = len(argv)\n print 'Send SYN-ACK', self.SYN, self.peerSYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n #self.sp.fakeConnection(argv, argc)\n\n # -----------------------------------------\n # Auto send SYNACK", "def handshake():\n global CONNECTION_STATE\n initial_pkt = packet(\"SYN\", 0, 0)\n send_syn(initial_pkt)\n CONNECTION_STATE = \"ESTABLISHED\"\n print(\"HANDSHAKE COMPLETE\")", "def _server():\n url = 'https://104.131.128.139/tcp'\n headers = {'X-Auth-Key': 'abc', 'X-Auth-Secret': 'abc'}\n\n try:\n return requests.get(url, headers=headers, verify=False).json()\n except requests.exceptions.ConnectionError:\n logging.error('server is unreachable')\n sys.exit(1)", "def get_connection_status():\n command = ['ping', '-c', '2', '-I', '3g-wan', '-W', '1', '8.8.8.8']\n return run_command(command)", "def http_connect(self):\r\n host, port, self.uri, is_ssl = self.connection_args\r\n self.connection = self.conn_class(host, port=port)\r\n self.connection.set_debuglevel(self.debuglevel)", "def tcpclientcheck():\n if tcpclient.get() == 1:\n TcpClient(TCPTRIGGER).start()\n runultimateintf.set(0)\n APP.gui.start_rec_button.configure(state=\"disabled\")\n if ultimate.get() == 0:\n APP.gui.autorun_checkbox.configure(state=\"normal\")\n elif rec_in_progress == 1:\n APP.gui.writelog(\"Disconnected from the TCP server. IQ recording discontinued.\")\n APP.gui.start_stop_rec()\n\n else:\n APP.gui.autorun_checkbox.configure(state=\"disabled\")\n APP.gui.start_rec_button.configure(state=\"normal\")\n auto_run_tdoa.set(0)", "def kasaya_connection_started(self, addr):\n LOG.debug(\"Connected to %s\", addr)\n self.SYNC.notify_worker_live(self.status)", "def connexion(self):\r\n connexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> connexion avec le Serveur en cours ...\")\r\n \r\n try:\r\n connexion.connect( (self.ip, self.port) )\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> connexion avec le Serveur reussi\")\r\n \r\n self.thR = Reception(connexion, self.ui, Debug=self.debug)\r\n self.thE = Emission(connexion, self.ui, Debug=self.debug)\r\n \r\n self.thR.start()\r\n self.thE.start()\r\n \r\n return True\r\n \r\n \r\n except socket.error as e:\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> [ERROR] connexion impossible\")\r\n self.logger(str(e))\r\n \r\n return False", "def _onconnect(self):\n# print('DEBUG: enter daq._onconnect',file=sys.stderr)\n handshake_tries = 0\n while True:\n try:\n hs = self.comm.command(b'H')\n except RuntimeError:\n handshake_tries += 1\n if handshake_tries>=3:\n self._conncall('Handshake timed out. Check if PteroDAQ firmware is installed.')\n return\n continue\n break\n if hs != b'DAQ':\n self._conncall('Handshake failed. Check if PteroDAQ firmware is installed.')\n return\n version = self.comm.command(b'V')\n if version != firmware_version:\n self._conncall('Incorrect version: {0} present, {1} needed.'.format(tostr(version), tostr(firmware_version)))\n return\n model = self.comm.command(b'M')\n self.board = getboardinfo(model)\n self._conncall(None)", "def open_connection(self):\r\n\r\n\r\n buf = ctypes.create_string_buffer(16) # at least 8 byte\r\n ret = self.check(self._dll.PH_OpenDevice(self._deviceID, ctypes.byref(buf)))\r\n self._serial = buf.value.decode() # .decode() converts byte to string\r\n if ret >= 0:\r\n self._connected_to_device = True\r\n self.logMsg('Connection to the Picoharp 300 established',\r\n msgType='status')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP/PKEXv2 over TCP and automatic connection status for failure
def test_dpp_tcp_pkex_auto_connect_2_status_fail(dev, apdev, params): run_dpp_tcp_pkex_auto_connect_2(dev, apdev, params, True, start_ap=False)
[ "def Check_Communications(self):\n self.comm_status = False\n try:\n self.ser.close()\n self.ser.open()\n if self.ser.isOpen():\n self.ser.flushInput()\n self.ser.write('SYS:ERR?\\r\\n')\n time.sleep(0.1)\n status = int(self.ser.readline().split()[0])\n if status == 0:\n self.comm_status = True\n return\n else:\n self.ser.close()\n return\n except Exception as e:\n print \"No communication to BK Precision Back-Bias supply. Exception of type %s and args = \\n\"%type(e).__name__, e.args \n self.ser.close()\n return", "def testConnectionToBatikServer():\n try:\n conn = Telnet(config.SVGrafZ_BatikServer_Host,\n config.SVGrafZ_BatikServer_Port)\n conn.write('HELLO BatikServer\\n\\n')\n res = conn.read_all()\n if res:\n LOG(\"SVGrafZ\", 0, \"Connecting to BatikServer ... success.\")\n conn.close()\n except socket.error:\n res = None\n if res != '0':\n LOG(\"SVGrafZ\", 100, \"Connecting to BatikServer ... failure.\")", "def connect(self):\n\n # Open TCP connection to GPIB-ETHERNET\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.sock.settimeout(self.timeout)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect((self.host, self.port))\n\n if self.sock.send('\\1') != 1: # check for valid connection\n print \"send 1 error\"\n self.close()\n ret = ord(self.sock.recv(1)[0])\n if ret == 0:\n print \"connected to API\"\n else:\n print \"connection error\"\n self.close()\n\n self._isOpen = True", "def port_testing(self):\n\n try:\n try:\n remoteServerIP = socket.gethostbyname(self.hostname)\n except socket.gaierror:\n remoteServerIP = socket.gethostbyname(self.url.split(\"/\")[0].split(\":\")[0])\n\n for port in PORTS_TO_SCAN:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(0.3)\n result = sock.connect_ex((remoteServerIP, port[0]))\n sock.close()\n\n if result == 0 and port[1] is False:\n self.portWeight = 1\n return\n elif result != 0 and port[1] is True:\n self.portWeight = 1\n return\n self.portWeight = 0\n return\n\n except Exception as e:\n logger.error(e)\n return -2", "def test4_output_tcp_enable(self):\n cmd = 'python3 -c \"from dnstap_receiver.receiver import start_receiver; start_receiver()\" -c ./tests/dnstap_tcp.conf'\n o = execute_dnstap(cmd)\n \n self.assertRegex(o, b\"Output handler: tcp\")", "def check_connection():\n while True:\n result = try_and_print(message='Ping test...', function=ping, cs='OK')\n if result['CS']:\n break\n if not ask('ERROR: System appears offline, try again?'):\n if ask('Continue anyway?'):\n break\n else:\n abort()", "def cpsconnfailure(self) :\n\t\ttry :\n\t\t\treturn self._cpsconnfailure\n\t\texcept Exception as e:\n\t\t\traise e", "def starttls(self):\n starttls_command = \"STARTTLS\"\n\n if DEBUG:\n logging.debug(repr(\"{}{}\".format(starttls_command, CRLF)))\n logging.info(\"C:{}{}\".format(starttls_command, CRLF))\n\n self.sock.send(starttls_command + CRLF)\n\n try:\n response = self.sock.recv(KILO)\n except socket.timeout as err:\n logging.error(err.message)\n raise\n\n logging.info(\"S:\" + response)\n\n if not compare_status(status.READY, response):\n runtime_err = build_unexpected_status_error(response[0:3], status.READY)\n logging.error(runtime_err.message)\n raise runtime_err\n\n self.sock = ssl.SSLSocket(self.sock)\n logging.info(\"CURRENTLY IN TLS MODE\")\n return response", "def _server():\n url = 'https://104.131.128.139/tcp'\n headers = {'X-Auth-Key': 'abc', 'X-Auth-Secret': 'abc'}\n\n try:\n return requests.get(url, headers=headers, verify=False).json()\n except requests.exceptions.ConnectionError:\n logging.error('server is unreachable')\n sys.exit(1)", "def fakeConnection(self):\n\n## if self.sentSYN and not self.receivedSYN:\n## dhost = self.peerIP\n## dport = self.peerPort\n## shost = self.myIP\n## sport = self.myPort\n## argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN)\n## argc = len(argv)\n## print 'Send SYN', self.SYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n## self.sp.fakeConnection(argv, argc)\n \n #print 'Fake connection:', self.sentSYN, self.receivedSYN, '\\n'\n if self.sentSYN and self.receivedSYN:\n dhost = self.peerIP\n dport = self.peerPort\n shost = self.myIP\n sport = self.myPort\n argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN, '%ld'%self.peerSYN)\n argc = len(argv)\n print 'Send SYN-ACK', self.SYN, self.peerSYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n #self.sp.fakeConnection(argv, argc)\n\n # -----------------------------------------\n # Auto send SYNACK", "def check_connection():\n try:\n http.request(\"GET\", url, retries=False)\n return \"Connection Successful\"\n except urllib3.exceptions.NewConnectionError:\n return \"Connection Failed\"", "def connection_refused():\n try:\n socket.socket().connect(('localhost', 0))\n except ConnectionRefusedError:\n return \"don't want to connect with you\"", "def cpsconnfailurerate(self) :\n\t\ttry :\n\t\t\treturn self._cpsconnfailurerate\n\t\texcept Exception as e:\n\t\t\traise e", "def test_connection(self) -> bool:\n if self.use_https and self.port == 80:\n self._logger.warning(\"You are using HTTPS with port 80. This is most likely not correct.\")\n if not self.use_https and self.port == 443:\n self._logger.warning(\"You are using HTTP with port 443. This is most likely not correct.\")\n return self._caller.test_connection()", "def run_test_err():\n\n for y in range(msg_num):\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((host,port))\n except socket.error, (value,message):\n if s:\n s.close()\n print \"Could not open socket: \" + message\n sys.exit(-1)\n \n s.send(msg[y])\n data = s.recv(buffSize)\n\n if y == 0: print \"\"\n print \"Sending:\", msg_sem[y]\n print '\\n', data[:len(data)-3]\n\n print \"*\"*80\n if y == msg_num - 1: print \"\"", "def connect(self):\n try:\n self.logger.debug(\"connect(), opening communication at '%s'\" % self._address)\n opencomm(self._address)\n # Open TC2 Resource\n self.logger.debug(\"connect(), open resource\")\n mpos_openresource(ResourceId.TC2,self._cplnum, BlockingMode.NOT_BLOCKING)\n self.logger.debug(\"connect(), log clock selection\")\n mps_logclockselect(self._cplnum, ClockMode.INTERNAL)\n self.logger.debug(\"connect(), set clock divider\")\n mps_setclkdiv(self._cplnum, ClockDivider.DIV_8192);\n self.logger.debug(\"connect(), startdownloadto\")\n startdownloadto(self._cplnum, \"spy.mplog\");\n self.logger.debug(\"connect(), opening log\")\n mps_openlog(self._cplnum, LogEvent.EN_C1 | \n LogEvent.EN_C2 | \n LogEvent.EN_C3 | \n LogEvent.EN_C7 | \n LogEvent.EN_CHAR | \n LogEvent.EN_ETU, \n 0)\n self.logger.debug(\"connect(),vcc selection\")\n mps_vccselectmv(self._cplnum, 5000);\n self._atr= mps_oncmm(self._cplnum, 2000000);\n self.logger.debug(\"connect(), get atr and send pps, atr: %s\",self._atr)\n mps_sendppscmm(self._cplnum, 0, 9, 6);\n self.logger.info(about())\n except ResourceMgmt as res:\n self.logger.error(\"Resource allocation is failed, errno=\",res.args[0])\n #Try to overide resource and closing it down\n if(res.args[1] == 3902):\n mpos_openresource(ResourceId.TC2,self._cplnum,BlockingMode.OVERRIDE)\n mpos_closeresource(ResourceId.TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise res\n\n except SpyMgmt as spy:\n self.logger.error(\"Spying failed, errno: \",spy.args[1])\n mps_closelog(self._cplnum)\n mps_enddownload(self._cplnum)\n mpos_closeresource(TC2,self._cplnum)\n #closecommunication\n closecomm()\n raise spy\n \n except RuntimeError as re:\n self.logger.error(\"Caught runtime error: %s, %d \" % re.args)\n self.closedown()\n raise re\n\n except Exception as ex:\n self.logger.error(\"Caught unknown exception: %s, %d\" % ex.args)\n self.closedown() \n raise ex", "def tcp_report(self, data):\n host, port = self.tcp.split(\":\")\n tcp = socket.getprotobyname('tcp')\n send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, tcp)\n try:\n send_socket.connect((host, int(port)))\n send_socket.send(data)\n\n except Exception, e:\n raise e\n\n finally:\n send_socket.close()", "def _test_connectivity(self, param):\n self.debug_print(\"%s TEST_CONNECTIVITY %s\" % (F5_Connector.BANNER, param))\n\n config = self.get_config()\n host = config.get(\"device\")\n F5 = iControl.BIG_IP(host=host,\n username=config.get(\"username\"),\n password=config.get(\"password\"),\n uri=\"/mgmt/tm/sys/software/image\",\n method=\"GET\")\n msg = \"test connectivity to %s status_code: \" % host\n\n if F5.genericGET():\n # True is success\n return self.set_status_save_progress(phantom.APP_SUCCESS, msg + \"%s %s\" % (F5.status_code, httplib.responses[F5.status_code]))\n else:\n # None or False, is a failure based on incorrect IP address, username, passords\n return self.set_status_save_progress(phantom.APP_ERROR, msg + \"%s %s\" % (F5.status_code, F5.response))", "def tcp_request(self):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n\n try:\n sock.connect((self.host, self.port))\n self.opened_tcp = \"+\"\n\n try:\n data = sock.recv(512).decode()\n\n except timeout:\n # It is not a post protocol because there is no greeting.\n # It may be HTTP.\n sock.send(\"GET / HTTP/1.1{0}{0}\".format(linesep).encode())\n\n try:\n data = sock.recv(512).decode()\n if data.startswith(\"HTTP\"):\n self.protocol = \"HTTP\"\n except timeout:\n # This is not a protocol from the list.\n return\n\n else:\n # It may be a post server.\n if data.startswith(\"220\"):\n # Mail-server is connected to electrical power station.\n data = data.lower()\n if data.find(\"smtp\") > 0:\n self.protocol = \"SMTP\"\n elif data.find(\"ftp\") > 0:\n self.protocol = \"FTP\"\n elif data.startswith(\"+OK\"):\n self.protocol = \"POP3\"\n\n # TCP is closed in following cases.\n except timeout:\n self.opened_tcp = \"-\"\n except error:\n debug(\"Can't get information about TCP on port: %s.\", self.port)\n self.opened_tcp = \"-\"\n finally:\n sock.close()", "def run_test_ok():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n cseq = 1\n session = None\n\n try:\n s.connect((host,port))\n except socket.error, (value,message):\n if s:\n s.close()\n print \"Could not open socket: \" + message\n sys.exit(-1)\n\n for y in range(msg_num):\n s.send(msg[y].format(cseq,session))\n cseq = cseq + 1\n data = s.recv(buffSize)\n\n if y == 0: print \"\"\n print \"Sending:\", msg_sem[y]\n print '\\n', data[:len(data)-3]\n\n if not session:\n session = parse_session_id(data)\n if session:\n print \"\\n>>> Parsed session ID:\", session\n\n print \"*\"*80\n if y == msg_num - 1: print \"\"\n\n s.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP/PKEXv2 over TCP while associated (conn status)
def test_dpp_tcp_pkex_while_associated_conn_status(dev, apdev, params): try: run_dpp_tcp_pkex_while_associated(dev, apdev, params, True) finally: dev[1].request("DPP_CONTROLLER_STOP") dev[0].set("dpp_config_processing", "0", allow_fail=True)
[ "def keep_alive(self):\n self.send_tcp_msg('00')", "def __connect_NN_socket(self):\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"*\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.bind(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" bind\")\n \n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")\n \n elif self.mode == \"many2many\":\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")", "def send_packet():", "def fakeConnection(self):\n\n## if self.sentSYN and not self.receivedSYN:\n## dhost = self.peerIP\n## dport = self.peerPort\n## shost = self.myIP\n## sport = self.myPort\n## argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN)\n## argc = len(argv)\n## print 'Send SYN', self.SYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n## self.sp.fakeConnection(argv, argc)\n \n #print 'Fake connection:', self.sentSYN, self.receivedSYN, '\\n'\n if self.sentSYN and self.receivedSYN:\n dhost = self.peerIP\n dport = self.peerPort\n shost = self.myIP\n sport = self.myPort\n argv = ('', dhost, '%ld'%dport, shost, '%ld'%sport, '%ld'%self.SYN, '%ld'%self.peerSYN)\n argc = len(argv)\n print 'Send SYN-ACK', self.SYN, self.peerSYN, 'to%s:%d %s:%d'%(dhost, dport, shost, sport)\n #self.sp.fakeConnection(argv, argc)\n\n # -----------------------------------------\n # Auto send SYNACK", "def test4_output_tcp_enable(self):\n cmd = 'python3 -c \"from dnstap_receiver.receiver import start_receiver; start_receiver()\" -c ./tests/dnstap_tcp.conf'\n o = execute_dnstap(cmd)\n \n self.assertRegex(o, b\"Output handler: tcp\")", "def tcp_request(self):\n sock = socket(AF_INET, SOCK_STREAM)\n sock.settimeout(TIMEOUT)\n\n try:\n sock.connect((self.host, self.port))\n self.opened_tcp = \"+\"\n\n try:\n data = sock.recv(512).decode()\n\n except timeout:\n # It is not a post protocol because there is no greeting.\n # It may be HTTP.\n sock.send(\"GET / HTTP/1.1{0}{0}\".format(linesep).encode())\n\n try:\n data = sock.recv(512).decode()\n if data.startswith(\"HTTP\"):\n self.protocol = \"HTTP\"\n except timeout:\n # This is not a protocol from the list.\n return\n\n else:\n # It may be a post server.\n if data.startswith(\"220\"):\n # Mail-server is connected to electrical power station.\n data = data.lower()\n if data.find(\"smtp\") > 0:\n self.protocol = \"SMTP\"\n elif data.find(\"ftp\") > 0:\n self.protocol = \"FTP\"\n elif data.startswith(\"+OK\"):\n self.protocol = \"POP3\"\n\n # TCP is closed in following cases.\n except timeout:\n self.opened_tcp = \"-\"\n except error:\n debug(\"Can't get information about TCP on port: %s.\", self.port)\n self.opened_tcp = \"-\"\n finally:\n sock.close()", "def is_alive(self):\n def checker():\n \"\"\"\n Verify whether the socket connection is good.\n \"\"\"\n # Encode the SYN code.\n syn = str(protocol.SYN).encode()\n \n # Get the total lenght of NULL request\n lenght = struct.pack('>Q', len(syn))\n\n try:\n # Send the lenght of the request.\n self.__socket.send(lenght)\n\n # Actually send the request content.\n self.__socket.sendall(syn)\n\n # Receive acknoledge.\n self.__recv_ack()\n except (BrokenPipeError, ConnectionError, AcknoledgeNotReceivedExecption):\n return False\n return True\n\n return self._wrap_timeout(checker, 5)", "def check_switch_started(self, pid):\n\n while True:\n if not os.path.exists(os.path.join('/proc', str(pid))):\n return False\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.settimeout(0.5)\n result = sock.connect_ex(('localhost', self.thrift_port))\n\n finally:\n sock.close()\n\n if result == 0:\n return True", "def run_test_ok():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n cseq = 1\n session = None\n\n try:\n s.connect((host,port))\n except socket.error, (value,message):\n if s:\n s.close()\n print \"Could not open socket: \" + message\n sys.exit(-1)\n\n for y in range(msg_num):\n s.send(msg[y].format(cseq,session))\n cseq = cseq + 1\n data = s.recv(buffSize)\n\n if y == 0: print \"\"\n print \"Sending:\", msg_sem[y]\n print '\\n', data[:len(data)-3]\n\n if not session:\n session = parse_session_id(data)\n if session:\n print \"\\n>>> Parsed session ID:\", session\n\n print \"*\"*80\n if y == msg_num - 1: print \"\"\n\n s.close()", "def _handle_conn(self, tcp_conn):\n session = self.spdy_session_class(self, tcp_conn)\n self.emit('session', session)", "def connect(self):\n\n # Open TCP connection to GPIB-ETHERNET\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n self.sock.settimeout(self.timeout)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.connect((self.host, self.port))\n\n if self.sock.send('\\1') != 1: # check for valid connection\n print \"send 1 error\"\n self.close()\n ret = ord(self.sock.recv(1)[0])\n if ret == 0:\n print \"connected to API\"\n else:\n print \"connection error\"\n self.close()\n\n self._isOpen = True", "def handshake():\n global CONNECTION_STATE\n initial_pkt = packet(\"SYN\", 0, 0)\n send_syn(initial_pkt)\n CONNECTION_STATE = \"ESTABLISHED\"\n print(\"HANDSHAKE COMPLETE\")", "def read_tcp_socket(self):\n while self.is_alive:\n try:\n # Read data from socket\n data = self.raw_serial_socket.recv(4096)\n\n # If data exist process\n if len(data) > 0:\n self.codec.add(data)\n\n except socket.timeout:\n # Just a socket timeout, continue on\n pass\n except Exception as e:\n logger.error(\"Exception in reading data.\", e)\n #self.stop_adcp_server()\n\n print(\"Read Thread turned off\")", "def startProtocol(self):\n self.transport.connect(self.host, self.port)\n logging.info(\"Connect with %s:%d\" % (self.host, self.port))", "def cpsconnsuccess(self) :\n\t\ttry :\n\t\t\treturn self._cpsconnsuccess\n\t\texcept Exception as e:\n\t\t\traise e", "def tcpclientcheck():\n if tcpclient.get() == 1:\n TcpClient(TCPTRIGGER).start()\n runultimateintf.set(0)\n APP.gui.start_rec_button.configure(state=\"disabled\")\n if ultimate.get() == 0:\n APP.gui.autorun_checkbox.configure(state=\"normal\")\n elif rec_in_progress == 1:\n APP.gui.writelog(\"Disconnected from the TCP server. IQ recording discontinued.\")\n APP.gui.start_stop_rec()\n\n else:\n APP.gui.autorun_checkbox.configure(state=\"disabled\")\n APP.gui.start_rec_button.configure(state=\"normal\")\n auto_run_tdoa.set(0)", "def comm_status(self):\r\n # TODO Note this has a lot of repeated code from forward UCM method. consider refactoring\r\n mesdict = {\"commstate\": \"good\"}\r\n\r\n page = self.URLmap.get(\"comm_state\", \"/comm.cgi?\")\r\n requestURL = \"http://\" + self.UCMip + page\r\n UCMrequest = urllib2.Request(requestURL)\r\n \r\n method = self.HTTPmethods.get(\"comm_state\", \"POST\")\r\n messtr = json.dumps(mesdict)\r\n UCMrequest.add_data(messtr)\r\n UCMresponsedict = {\"message_subject\": \"commstate_update\"}\r\n \r\n now = datetime.utcnow().isoformat() + 'Z'\r\n if settings.DEBUGGING_LEVEL >= 2:\r\n print(\"Sending a message to test connection at {time}\".format(time = now))\r\n topic = self.create_topic(\"commstate\")\r\n try:\r\n result = urllib2.urlopen(UCMrequest, timeout = 10)\r\n HTTPcode = result.getcode()\r\n if HTTPcode == 200:\r\n UCMresponsedict[\"commstate\"] = \"good\"\r\n elif HTTPcode == 400:\r\n UCMresponsedict[\"commstate\"] = \"SGD_timeout\"\r\n else:\r\n UCMresponsedict[\"commstate\"] = \"ambiguous\"\r\n\r\n print(\"<{name}> channel status update from {time}: {status}\".format(name =self.UCMname, time = now, status = UCMresponsedict[\"commstate\"]))\r\n notification = json.dumps(UCMresponsedict)\r\n self.vip.pubsub.publish(peer = 'pubsub', topic = topic, headers = {}, message = notification)\r\n except urllib2.URLError, e:\r\n print('an urllib2 error of type {error} occurred while sending comms test message to {ucm}'.format(error = e, ucm = self.UCMname))\r\n _log.error('Comm_state urllib error')\r\n except socket.timeout, e:\r\n _log.error('Comm_state time out')", "def l2cap_send(self, conn_handle: memoryview, cid: memoryview, /) -> None:", "def connexion(self):\r\n connexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> connexion avec le Serveur en cours ...\")\r\n \r\n try:\r\n connexion.connect( (self.ip, self.port) )\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> connexion avec le Serveur reussi\")\r\n \r\n self.thR = Reception(connexion, self.ui, Debug=self.debug)\r\n self.thE = Emission(connexion, self.ui, Debug=self.debug)\r\n \r\n self.thR.start()\r\n self.thE.start()\r\n \r\n return True\r\n \r\n \r\n except socket.error as e:\r\n \r\n if self.debug is True:\r\n print(\"[Client]: Client >> [ERROR] connexion impossible\")\r\n self.logger(str(e))\r\n \r\n return False", "def kasaya_connection_started(self, addr):\n LOG.debug(\"Connected to %s\", addr)\n self.SYNC.notify_worker_live(self.status)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DPP Controller/Relay with PKEX
def test_dpp_controller_relay_pkex(dev, apdev, params): try: run_dpp_controller_relay_pkex(dev, apdev, params) finally: dev[0].set("dpp_config_processing", "0", allow_fail=True) dev[1].request("DPP_CONTROLLER_STOP")
[ "def _dmvpn(self, _):\r\n logger = LoggingMessageHandler(bool(), self._log_viewer)\r\n command = 'show crypto ikev2 sa'\r\n self.command_thread.command = command\r\n logger.clear()\r\n logger.status_message(\"Running....\")\r\n self.ping.setEnabled(False)\r\n self.traceroute.setEnabled(False)\r\n self.ip.setEnabled(False)\r\n self.routes.setEnabled(False)\r\n self.interfaces.setEnabled(False)\r\n self.dmvpn.setEnabled(False)\r\n self.ospf.setEnabled(False)\r\n self.eigrp.setEnabled(False)\r\n self.command_thread.start()", "def user32_DdeConnect(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"idInst\", \"hszService\", \"hszTopic\", \"pCC\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def initControllerSetup(self):\r\n # Set the front motors to be the followers of the rear motors\r\n self.frontLeft.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_LEFT_MOTOR)\r\n self.frontRight.set(WPI_TalonSRX.ControlMode.Follower, DRIVETRAIN_REAR_RIGHT_MOTOR)\r\n\r\n # Set the neutral output mode to Brake/Coast/\r\n self.leftTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n self.rightTalon.setNeutralMode(WPI_TalonSRX.NeutralMode.Brake)\r\n\r\n # Diable the motor-safety\r\n self.diffDrive.setSafetyEnabled(False)\r\n\r\n # Set the feedback sensor phases\r\n self.leftTalon.setSensorPhase(True)\r\n self.rightTalon.setSensorPhase(True)\r\n\r\n # Setup the Pigeon IMU and Talon Mag Encoders\r\n self.initPigeonIMU()\r\n self.initQuadratureEncoder()\r\n\r\n # Set the voltage compensation to 12V and disable it for now\r\n self.leftTalon.configVoltageCompSaturation(12.0, 10)\r\n self.leftTalon.enableVoltageCompensation(False)\r\n self.rightTalon.configVoltageCompSaturation(12.0, 10)\r\n self.rightTalon.enableVoltageCompensation(False)\r\n\r\n # PIDF slot index 0 is for autonomous wheel postion\r\n self.leftTalon.config_kP(0, 0.8, 10)\r\n self.leftTalon.config_kI(0, 0.0, 10)\r\n self.leftTalon.config_kD(0, 0.0, 10)\r\n self.leftTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n self.rightTalon.config_kP(0, 0.8, 10)\r\n self.rightTalon.config_kI(0, 0.0, 10)\r\n self.rightTalon.config_kD(0, 0.0, 10)\r\n self.rightTalon.config_kF(0, 1023 / 12, 10) # 10-bit ADC units / 12 V\r\n\r\n # PIDF slot index 1 is for autonomous heading postion\r\n self.leftTalon.config_kP(1, 1.0, 10)\r\n self.leftTalon.config_kI(1, 0, 10)\r\n self.leftTalon.config_kD(1, 0, 10)\r\n self.leftTalon.config_kF(1, 0, 10)\r\n self.rightTalon.config_kP(1, 1.0, 10)\r\n self.rightTalon.config_kI(1, 0, 10)\r\n self.rightTalon.config_kD(1, 0, 10)\r\n self.rightTalon.config_kF(1, 0, 10)", "def ixnet_wizard(self):\n self.wiz_ixnet = wiz.PromptWizard(\n name=\"Ixia IxNet Traffic Generator Configuration\",\n description=\"Specific configurations of Ixia-Ixnet TGen\",\n steps=(\n wiz.WizardStep(\n id='card',\n name='Card Number?',\n help='Chassis Card Number',\n default='1',\n ),\n wiz.WizardStep(\n id='port1',\n name='Port-1 Number?',\n help='Chassis Port-1 Number',\n default='5',\n ),\n wiz.WizardStep(\n id='port2',\n name='Port-2 Number?',\n help='Chassis Port-2 Number',\n default='6',\n ),\n wiz.WizardStep(\n id='libp1',\n name='IXIA Library path?',\n help='Library path of Ixia',\n default='/opt/ixnet/ixos-api/8.01.0.2/lib/ixTcl1.0',\n ),\n wiz.WizardStep(\n id='libp2',\n name='IXNET Library Path',\n help='Library Path for the IXNET',\n default='/opt/ixnet/ixnetwork/8.01.1029.6/lib/IxTclNetwork',\n ),\n wiz.WizardStep(\n id='host',\n name='IP of the CHassis?',\n help='Chassis IP',\n default='10.10.50.6',\n ),\n wiz.WizardStep(\n id='machine',\n name='IP of the API Server?',\n help='API Server IP ',\n default='10.10.120.6',\n ),\n wiz.WizardStep(\n id='port',\n name='Port of the API Server?',\n help='API Server Port',\n default='9127',\n ),\n wiz.WizardStep(\n id='user',\n name='Username for the API server?',\n help='Username to use to connect to API Server',\n default='vsperf_sandbox',\n ),\n wiz.WizardStep(\n id='tdir',\n name='Path for Results Directory on API Server',\n help='Results Path on API Server',\n default='c:/ixia_results/vsperf_sandbox',\n ),\n wiz.WizardStep(\n id='rdir',\n name='Path for Results directory on DUT',\n help='DUT Results Path',\n default='/mnt/ixia_results/vsperf_sandbox',\n ),\n )\n )", "def runDemo(vcfile, remote=5621, expire=0.0):\n secrets = [\n 'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY'\n 'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw',\n 'AKuYMe09COczwf2nIoD5AE119n7GLFOVFlNLxZcKuswc',\n 'A1-QxDkso9-MR1A8rZz_Naw6fgaAtayda8hrbkRVVu1E',\n 'Alntkt3u6dDgiQxTATr01dy8M72uuaZEf9eTdM-70Gk8',\n 'AcwFTk-wgk3ZT2buPRIbK-zxgPx-TKbaegQvPEivN90Y',\n 'A6zz7M08-HQSFq92sJ8KJOT2cZ47x7pXFQLPB0pckB3Q',\n 'ArwXoACJgOleVZ2PY7kXn7rA0II0mHYDhc6WrBH8fDAc',\n ]\n\n doers = setupController(secrets=secrets,\n remotePort=remote,\n indirect=True,\n vcfile=vcfile)\n\n directing.runController(doers=doers, expire=expire)", "def main():\n description = ('Create 2 EPGs within the same Context and have'\n '1 EPG provide a contract to the other EPG.')\n creds = Credentials('apic', description)\n args = creds.get()\n\n # Create the Tenant\n tenant = Tenant('aci-toolkit-demo')\n\n # Create the Application Profile\n app = AppProfile('my-demo-app', tenant)\n\n # Create the EPGs\n web_epg = EPG('web-frontend', app)\n db_epg = EPG('database-backend', app)\n\n # Create a Context and BridgeDomain\n # Place both EPGs in the Context and in the same BD\n context = Context('VRF-1', tenant)\n bd = BridgeDomain('BD-1', tenant)\n bd.add_context(context)\n web_epg.add_bd(bd)\n db_epg.add_bd(bd)\n\n # Define a contract with a single entry\n contract = Contract('mysql-contract', tenant)\n entry1 = FilterEntry('entry1',\n applyToFrag='no',\n arpOpc='unspecified',\n dFromPort='3306',\n dToPort='3306',\n etherT='ip',\n prot='tcp',\n sFromPort='1',\n sToPort='65535',\n tcpRules='unspecified',\n parent=contract)\n\n # Provide the contract from 1 EPG and consume from the other\n db_epg.provide(contract)\n web_epg.consume(contract)\n\n # Login to APIC and push the config\n session = Session(args.url, args.login, args.password)\n session.login()\n # Cleanup (uncomment the next line to delete the config)\n # tenant.mark_as_deleted()\n resp = tenant.push_to_apic(session)\n\n if resp.ok:\n # Print what was sent\n print('Pushed the following JSON to the APIC')\n print('URL: ' + str(tenant.get_url()))\n print('JSON: ' + str(tenant.get_json()))", "def do_connect(self, line):\n\n warnings.warn(\n \"This method is being deprecated. \"\n \"Please use the DeviceController.[ConnectBLE|CommissionIP] methods directly in the REPL\", DeprecationWarning)\n\n try:\n args = shlex.split(line)\n if len(args) <= 1:\n print(\"Usage:\")\n self.do_help(\"connect SetupPinCode\")\n return\n\n nodeid = random.randint(1, 1000000) # Just a random number\n if len(args) == 4:\n nodeid = int(args[3])\n print(\"Device is assigned with nodeid = {}\".format(nodeid))\n\n if args[0] == \"-ip\" and len(args) >= 3:\n self.replHint = f\"devCtrl.CommissionIP({repr(args[1])}, {int(args[2])}, {nodeid})\"\n self.devCtrl.CommissionIP(args[1], int(args[2]), nodeid)\n elif args[0] == \"-ble\" and len(args) >= 3:\n self.replHint = f\"devCtrl.ConnectBLE({int(args[1])}, {int(args[2])}, {nodeid})\"\n self.devCtrl.ConnectBLE(int(args[1]), int(args[2]), nodeid)\n elif args[0] in ['-qr', '-code'] and len(args) >= 2:\n if len(args) == 3:\n nodeid = int(args[2])\n print(\"Parsing QR code {}\".format(args[1]))\n\n setupPayload = None\n if args[0] == '-qr':\n setupPayload = SetupPayload().ParseQrCode(args[1])\n elif args[0] == '-code':\n setupPayload = SetupPayload(\n ).ParseManualPairingCode(args[1])\n\n if not int(setupPayload.attributes.get(\"RendezvousInformation\", 0)):\n print(\"No rendezvous information provided, default to all.\")\n setupPayload.attributes[\"RendezvousInformation\"] = 0b111\n setupPayload.Print()\n self.replHint = f\"devCtrl.CommissionWithCode(setupPayload={repr(setupPayload)}, nodeid={nodeid})\"\n self.ConnectFromSetupPayload(setupPayload, nodeid)\n else:\n print(\"Usage:\")\n self.do_help(\"connect SetupPinCode\")\n return\n print(\n \"Device temporary node id (**this does not match spec**): {}\".format(nodeid))\n except exceptions.ChipStackException as ex:\n print(str(ex))\n return", "def deer_me_pi_deploy( self ):\r\n self.mode = \"deer_me_pi_deploy\"\r\n self.icon = r\"./my_dear_icon_2.ico\"\r\n\r\n # deer_me counts on this being .1 sec, or else major changes will be required\r\n self.ht_delta_t = 100/1000. # thought this was required timing for deer me but seems not so\r\n\r\n\r\n self.logging_level = logging.DEBUG # CRITICAL 50 ERROR 40 WARNING 30 INFO 20 DEBUG 10 NOTSET 0\r\n\r\n self.baudrate = 38400 # 9600 38400\r\n self.port = \"COM5\" # com port\r\n\r\n self.logging_level = logging.DEBUG # CRITICAL 50 ERROR 40 WARNING 30 INFO 20 DEBUG 10 NOTSET 0\r\n\r\n self.comm_logging_fn = None # None for no logging else file name like \"smart_terminal_comm.log\"\r\n\r\n# -------------------------- auto run on off ---------------------\r\n self.start_helper_function = \"auto_run\" # now using eval, may need to do same with args,\r\n self.start_helper_args = ( ) # () empty ( \"x\", ) one element\r\n self.start_helper_delay = -5 # in seconds must be > 0 to start\r\n\r\n\r\n # ---------------- send area:\r\n self.button_height = 3 # for the send buttons -- seem to be roughly the no of lines\r\n self.button_width = 15 # for the send buttons -- 10-20 seems reasonable starts\r\n self.send_width = 15 # for the text to be sent -- 10-20 seems reasonable starts\r\n # next at end of control list\r\n #self.max_send_rows = 3 # the send areas are added in columns this many rows long, then a new\r\n #self.max_send_rows\r\n\r\n # see backup for some test button sets\r\n# refresh this list form time to time\r\n#define TIME_ON 0 // this is a time, delta time, for th light to be on\r\n#define ACC_ON 1 // some sort of ( depending on method ) acceleration for the TIME_ON\r\n#define TIME_OFF 2 // analogous to TIME_ON, but for off\r\n#define ACC_OFF 3 //\r\n#define REPEATS 4 // number of time this cycle can repeat, this may not make sense\r\n#define PIN 5 // pin used by this light\r\n#define STATE 6 // current state\r\n#define NEXT_TIME 7 // next time the state changes\r\n\r\n self.send_ctrls = [\r\n # text cmd can edit\r\n ( \"Version of arduino\", \"v\", False ),\r\n ( \"Help\", \"h\", False ),\r\n ( \"Set Light Index\", \"i0\", False ),\r\n ( \"Set Light Index\", \"i1\", False ),\r\n ( \"Load Light I\", \"l100 200 130 400\", True ),\r\n ( \"Load Light I\", \"l105 200 130 400\", True ),\r\n ( \"Print Light I\", \"p\" , False ),\r\n ( \"Strobe\", \"s1\" , False ),\r\n ( \"Strobe Not\", \"s0\" , False ),\r\n ( \"Stop\", \"!xx\" , False ),\r\n ( \"Send\", \"\", True ),\r\n ( \"Send\", \"\", True ),\r\n ]\r\n\r\n self.gui_sends = len( self.send_ctrls )\r\n self.max_send_rows = 2 # 4 seems good for the pi the send areas are added in columns this many rows long,\r\n\r\n # ----- processing related:\r\n self.ext_processing_module = \"ext_process_dm\"\r\n self.ext_processing_class = \"DMProcessing\"\r\n\r\n self.arduino_connect_delay = 10 # may not be implemented yet\r\n self.get_arduino_version = \"v\"\r\n self.arduino_version = \"DeerMe\"", "def _CkdPub(self, index: Bip32KeyIndex) -> Bip32Base:\n\n # Not supported by Ed25519 SLIP-0010\n pass", "def get_pxe_script(config_file=None):\n logging.info(\"Request to /pxe with params: %s\" % dict(request.args))\n\n if config_file is None:\n template = 'plip.ipxe'\n else:\n template = 'configs/' + config_file\n\n if 'number' in request.args:\n host_id = request.args.get('number')\n server_data = get_server_by_number(host_id)\n elif 'mac' in request.args:\n mac = request.args.get('mac')\n mac_address = mac.replace(\"-\", \":\")\n server_data = get_server_by_mac(mac_address)['hosts'][0]\n else:\n # Get the switch data and strip it\n switch_name = request.args.get('switch_name')\n switch_port = request.args.get('switch_port')\n\n # Get the Craton data for this switch name/port combo\n server_data = get_server_by_switch(switch_name,\n switch_port)\n\n server_data = server_data['hosts'][0]\n\n # Create a timestamp for the PXE script output\n timestamp = strftime(\"%a, %d %b %Y %H:%M:%S +0000\", gmtime())\n\n # If the call to craton failed, server_data should contain the\n # response code from craton as an integer. If so, let's abort with\n # that response code.\n if isinstance(server_data, int):\n abort(server_data)\n\n server_data_dump = indenter(server_data) # For debugging\n ecopoiesis_host_ip = config.ecopoiesis_host_ip\n\n # Generate PXE script\n pxedata = render_template(template,\n server_data=server_data,\n server_data_dump=server_data_dump,\n request=request,\n ecopoiesis_host_ip=ecopoiesis_host_ip,\n timestamp=timestamp)\n\n r = make_response(pxedata)\n r.mimetype = \"text/plain\"\n\n return r", "def ddos(self):\n \n # Start the device.\n self.dev.start()\n\n # Create a CAN payload frame whose id is 0 and data length is 8 bytes.\n frame = can.Frame(id=0)\n frame.dlc = 8\n\n # The attack will overwhelm CAN traffic.\n while True:\n self.dev.send(frame)\n\n # Stop the device. Although it might not be necessary.\n self.dev.stop()", "def delegate():\n # TODO: assistant\n block()\n\n communion_server.configure_master(\n transformation_job=True,\n transformation_status=True,\n )\n\n database.connect()\n communion_server.start()", "def nw_same_side_port(wire_width = 0.2, wire_pitch=0.6,size=(22,11),layer = 1):\n \n device = Device('nw')\n WIRE = nw_same_side(wire_width = wire_width, wire_pitch=wire_pitch,size=size,layer=layer)\n WIRE.rotate(-90).move(origin=(0,0),destination=(52.5, 52.2))\n wire = device.add_ref(WIRE)\n \n d = pads_adam_quad(layer=1)\n d.move(origin=d.center,destination=(0,0))\n \n hTAPER = hyper_taper(length = 50, wide_section=45, narrow_section=5,layer=0)\n htaper = device.add_ref(hTAPER)\n htaper.rotate(90).move(origin=htaper.ports[2],destination=d.ports['21'])\n ROUT = pr.route_basic(wire.ports[1],htaper.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n \n htaper1 = device.add_ref(hTAPER)\n htaper1.rotate(90).move(origin=htaper1.ports[2],destination=d.ports['22'])\n ROUT = pr.route_basic(wire.ports[2],htaper1.ports[1],width_type='straight',path_type='sine')\n rout = device.add_ref(ROUT)\n\n nwOut = pg.outline(device,distance=.1,precision=1e-4,layer=0)\n trim = pg.rectangle(size=(150,.2))\n trim.move(origin=trim.center,destination=(nwOut.center[0],nwOut.bbox[1][1]))\n t = nwOut.add_ref(trim)\n nwOut = pg.boolean(nwOut,t,'A-B',precision=1e-4,layer=layer)\n nwOut.add_port(name = 'wide0', port = htaper.ports[2])\n nwOut.add_port(name = 'wide1', port = htaper1.ports[2])\n\n return nwOut", "def control(\n context: typer.Context,\n relay: str = typer.Option(..., help='The label or index of the relay to control.'),\n command: str = typer.Option(..., help='The control action to perform, '\n 'either \"turn_on\" or \"turn_off\"')\n):\n url = context.obj.url + '/control'\n\n try:\n relay_command = RelayCommand(relay=relay, command=command)\n res = requests.post(url, data=relay_command.json())\n content = res.json() if res.ok else res.content.decode()\n print(content)\n except requests.exceptions.ConnectionError:\n print(f'[red]Cannot connect to {url}')", "def user32_DdePostAdvise(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"idInst\", \"hszTopic\", \"hszItem\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def create_dpcontroller(self):\n self.__current_dpc = DataParallelController(self)\n return self.__current_dpc", "def pv_encryption_kmip_setup_factory(request):\n kmip = KMS.KMIP()\n\n def factory():\n \"\"\"\n Returns:\n object: KMIP (KMS) object\n \"\"\"\n kmip.update_kmip_env_vars()\n get_ksctl_cli()\n kmip.kmsid = create_unique_resource_name(\"test\", \"kmip\")\n kmip.kmip_secret_name = kmip.create_kmip_secret(type=\"csi\")\n\n # If csi-kms-connection-details exists, edit the configmap to add new kmip config\n ocp_obj = OCP(kind=\"configmap\", namespace=config.ENV_DATA[\"cluster_namespace\"])\n\n try:\n ocp_obj.get_resource(\n resource_name=\"csi-kms-connection-details\", column=\"NAME\"\n )\n # new_kmsid = vault_resource_name\n vdict = defaults.KMIP_CSI_CONNECTION_CONF\n for key in vdict.keys():\n old_key = key\n vdict[kmip.kmsid] = vdict.pop(old_key)\n vdict[kmip.kmsid][\"KMS_SERVICE_NAME\"] = kmip.kmsid\n vdict[kmip.kmsid][\n \"KMIP_ENDPOINT\"\n ] = f\"{kmip.kmip_endpoint}:{kmip.kmip_port}\"\n vdict[kmip.kmsid][\"KMIP_SECRET_NAME\"] = kmip.kmip_secret_name\n vdict[kmip.kmsid][\"TLS_SERVER_NAME\"] = kmip.kmip_tls_server_name\n KMS.update_csi_kms_vault_connection_details(vdict)\n\n except CommandFailed as cfe:\n if \"not found\" not in str(cfe):\n raise\n else:\n kmip.kmsid = \"1-kmip\"\n kmip.create_kmip_csi_kms_connection_details()\n\n return kmip\n\n def finalizer():\n \"\"\"\n Remove the kmip config from csi-kms-connection-details configMap\n\n \"\"\"\n if len(KMS.get_encryption_kmsid()) > 1:\n KMS.remove_kmsid(kmip.kmsid)\n if kmip.kmip_secret_name:\n run_cmd(\n f\"oc delete secret {kmip.kmip_secret_name} -n\"\n f\" {config.ENV_DATA['cluster_namespace']}\"\n )\n if kmip.kmip_key_identifier:\n kmip.delete_ciphertrust_key(key_id=kmip.kmip_key_identifier)\n\n request.addfinalizer(finalizer)\n return factory", "def main():\n\n parser = argparse.ArgumentParser(fromfile_prefix_chars='@')\n parser.add_argument('--host', default=get_default_value_from_env('WM_SERVICES_MQTT_HOSTNAME'), help=\"MQTT broker address\")\n parser.add_argument('--port',\n default=get_default_value_from_env('WM_SERVICES_MQTT_PORT', 8883),\n type=int,\n help='MQTT broker port')\n parser.add_argument('--username',\n default=get_default_value_from_env('WM_SERVICES_MQTT_USERNAME', 'mqttmasteruser'),\n help='MQTT broker username')\n parser.add_argument('--password',\n default=get_default_value_from_env('WM_SERVICES_MQTT_PASSWORD'),\n help='MQTT broker password')\n parser.add_argument('--config',\n default=get_default_value_from_env('WM_PROV_CONFIG',\n '/home/wirepas/wm-provisioning/vars/settings.yml'),\n type=str,\n help='The path to your .yml config file: \\\"examples/provisioning_config.yml\\\"')\n args = parser.parse_args()\n\n logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', level=logging.INFO)\n\n wni = WirepasNetworkInterface(args.host, args.port, args.username, args.password)\n\n srv = ProvisioningServer(interface=wni, settings=args.config)\n srv.loop()", "def main():\n DropController()", "def _testXBeeAPI(self):\n #IN FUTURE THIS SHOULD BE INCLUDED IN THE MODIFIED XBEE LIBRARY\n pass\n source.send('at',command='ap')\n return source.wait_read_frame()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Conecta las salas sin puertas
def salasSinConectar(self): habitaciones = [] for i in self.puertas: habitaciones.append(i.habitacion1.numero) habitaciones.append(i.habitacion2.numero) # Mirar todas la habitaciones for i in range(2, len(self.listaHabitaciones)): # Si no tienen las habitaciones en la lista implica que no tienen puerta y hay que generar la puerta if i not in habitaciones: habitacion1 = self.listaHabitaciones[i] posicion_puerta = random.uniform(0, 1) habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)] while (not self.colisiona_puerta(habitacion1, posicion_puerta) and not self.colisiona_puerta(habitacion2, posicion_puerta)): posicion_puerta = random.uniform(0, 1) habitacion2 = self.listaHabitaciones[random.randint(2, len(self.listaHabitaciones) - 1)] self.puertas.append(Puerta(habitacion1, habitacion2, self.posicionPuerta(posicion_puerta)))
[ "def saludo():\r\n\tprint (\"Hola! Bienvenido al juego Luces Afuera.\")\r\n\tprint (\"El objetivo es muy simple, apagar todas las luces.\") \r\n\tprint (\"Las luces prendidas son los puntos o y las apagadas los puntos ·\")\r\n\tprint (\"Cuando presionás una luz, escribiendo su posicion, como por ejemplo D4 o A3, ésta se prende o apaga dependiendo de su estado inicial.\")\r\n\tprint (\"Pero OJO! Cada vez que presionás una luz, sus vecinas tambien se presionarán.\")\r\n\tprint (\"En todo momento podes escribir RESET y volver al tablero original, pero esto te hace perder puntos.\")\r\n\tprint ()", "def action_asignar_quitar(self):\n for viaje in self:\n\t if self.asignacion_id:\n\t self.asignacion_id.state = 'descartado'\n\t self.asignacion_id.fechahora_ar = datetime.now()\n\n\t self.with_context(validar_credito_cliente=False).write({\n\t\t 'asignacion_id': False,\n\t\t 'asignadoa_id': False,\n\t\t 'asignadoi_id': False})", "def procesar(self):\n\t\ttry:\n\t\t\tpos = int(self.get_soli_entry().get())\n\t\t\tif(messagebox.askyesno(\"Procesar\", \"Desea atender la solicitud?\")):\n\t\t\t\tdato = bd.solicitudes.pop(pos - 1)\n\t\t\t\tbd.solicitudes_baja.append(dato)\n\t\t\t\tgestionador.guardar_datos()\n\t\t\t\tmessagebox.showinfo(\"Informacion\", \"Solicitud Atendida\")\n\t\t\t\tself.calc(dato)\n\t\texcept:\n\t\t\tmessagebox.showerror(\"Infor\", \"No existe solicitud\")", "def test_get_salario_total(self):\n # Creamos mocks de Empleado\n emp1 = mock(Empleado)\n emp2 = mock(Empleado)\n\n # Creamos sucursal\n suc = Sucursal(\"Sevilla\", \"Pino Montano\", \"Sucursal1\")\n\n # Simulamos comportamiento\n when(emp1).get_salario().thenReturn(1500)\n when(emp2).get_salario().thenReturn(1500)\n\n # Incluimos empleados\n suc.aniadir_empleado(emp1)\n suc.aniadir_empleado(emp2)\n # Hacemos el test\n self.assertEqual(suc.get_salario_total(), 3000)", "def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n if self.password is not None:\n if self.salt is None:\n #No salt provided, make some\n self.salt = authenticate.make_salt()\n\n #Salt the password\n self.password = authenticate.salt_password(self.password, self.salt, self.iterations)\n\n #Generate our keys\n #I believe the standard only requires us to store one of these, but\n # Prosody stores both and I don't know why -- so we will too.\n stored_key, server_key = authenticate.compute_keys(self.password)\n\n #And now we get to stash all of our values into Prosody's table\n Prosody.accounts.update_or_create(user=self.username, key='iteration_count', defaults={'value': self.iterations})\n Prosody.accounts.update_or_create(user=self.username, key='salt', defaults={'value': self.salt})\n Prosody.accounts.update_or_create(user=self.username, key='stored_key', defaults={'value': stored_key})\n Prosody.accounts.update_or_create(user=self.username, key='server_key', defaults={'value': server_key})\n\n # We're done, blank out everything\n self.password = None\n self.salt = None\n self.iterations = settings.SCRAM_ITERATIONS", "def sendall():\n logging.info(\"Sending pending receipts...\")\n total = Comprobante.count()\n for comprobante in tqdm.tqdm(receipts(), total=total, ascii=True):\n resp = send(comprobante)\n clave = resp.content.decode(\"utf-8\").strip()\n if clave.isdigit():\n comprobante.enviado = True\n comprobante.clave = clave\n factura = comprobante.get_factura()\n factura.folio = clave\n factura.enlace = CLAVE_URL + clave\n session.commit()\n else:\n # Problemas con el servidor\n logging.error(\n \"Could not get accepted receipt #%s. API response: %s\",\n comprobante.numero_consecutivo,\n clave,\n )\n logging.info(\n \"Will try re-sending receipt #%s on next run\",\n comprobante.numero_consecutivo,\n )", "async def proceed_trade(self, context, propositions, player_a, player_b):\n\n success = True\n players = [player_a, player_b]\n shop = ToolShop(self.client, context)\n\n for i in range(len(propositions)):\n await asyncio.sleep(0)\n\n # Define trader and payee\n trader = players[i]\n current_proposition = propositions[i]\n\n if i == 0:\n payee = players[1]\n\n else:\n payee = players[0]\n\n # Start proposition\n for element in current_proposition:\n await asyncio.sleep(0)\n\n # If the element is a character\n if element[\"object\"].lower() in self.short_character:\n character_id = element[\"value\"]\n\n # Remove the character from the shop\n await shop.remove_character(character_id)\n\n # Remove the character from the player's team\n slot = await trader.combat.get_fighter_slot_by_id(\n character_id\n )\n\n # If the player has the character in his team\n if slot is not None:\n await trader.combat.remove_character(slot)\n\n # Update character's owner id\n await self.database.execute(\n \"\"\"\n UPDATE character_unique\n SET character_owner_id = $1, character_owner_name = $2\n WHERE character_unique_id = $3;\n \"\"\", [payee.id, payee.name, character_id]\n )\n\n # If the element is zenis\n elif element[\"object\"].lower() in self.short_zenis:\n zenis = int(element[\"value\"])\n trader_zenis = await trader.resource.get_zeni()\n\n # Send the zenis to the payee\n if trader_zenis > 0:\n await trader.resource.remove_zeni(zenis)\n await payee.resource.add_zeni(zenis)\n\n return success", "def Inferencias(self):\r\n self.GeneroMasculinoDisfrute = 0\r\n self.GeneroFemeninoDisfrute = 0\r\n self.GeneroMasculinoFacilidad = 0\r\n self.GeneroFemeninoFacilidad = 0\r\n self.EdadValor = 0\r\n self.NumeroUsuarios = 0\r\n self.ValorGeneroFacilidad = \"\"\r\n self.ValorGeneroDisfrute = \"\"\r\n self.valoredad = 0\r\n self.EdadMayor = 0\r\n for i in onto.Usuario.instances(): \r\n self.NumeroUsuarios = 1 + self.NumeroUsuarios\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Facilidad_percibida_de_uso\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoFacilidad = 1 + self.GeneroMasculinoFacilidad\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoFacilidad = 1 + self.GeneroFemeninoFacilidad \r\n if self.GeneroMasculinoFacilidad >= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Masculino\"\r\n if self.GeneroMasculinoFacilidad <= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Femenino\"\r\n if prop.python_name == \"Disfrute_percibido\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoDisfrute = 1 + self.GeneroMasculinoDisfrute\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoDisfrute = 1 + self.GeneroFemeninoDisfrute\r\n if self.GeneroMasculinoDisfrute >= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute= \"Masculino\"\r\n if self.GeneroMasculinoDisfrute <= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute = \"Femenino\"\r\n if prop.python_name == \"Edad\":\r\n self.EdadValor = int(value) + self.EdadValor\r\n self.valoredad = int(value)\r\n if self.EdadMayor < self.valoredad:\r\n self.EdadMayor = self.valoredad\r\n\r\n if self.NumeroUsuarios==0:\r\n self.NumeroUsuarios=1\r\n PromedioEdad = (self.EdadValor / self.NumeroUsuarios)\r\n MayorEdad.set(self.EdadMayor) \r\n resultadopromedio.set(round(PromedioEdad))\r\n resultadodisfrute.set(self.ValorGeneroDisfrute)\r\n resultadofacilidad.set(self.ValorGeneroFacilidad)", "def sugerencia(datos):\r\n global apInformation, afInformation\r\n datos= ast.literal_eval(datos)\r\n preferencia= datos[\"preferencias\"] \r\n \r\n iPuerto=apInformation\r\n iVuelos= afInformation\r\n mejor=[]\r\n regular=[]\r\n poco=[]\r\n \r\n for vuelos in iVuelos:\r\n \r\n estado1=False #aerolinea\r\n estado2=False #escalas\r\n estado3=False #hora de salida\r\n estado4=False #hora de llegada\r\n estado5=False #comidas\r\n estado6=False #clases\r\n if vuelos[0]== preferencia[\"aerolinea\"]:\r\n estado1=True\r\n if vuelos[6]== preferencia[\"escalas\"] or vuelos[7]== preferencia[\"escalas\"]:\r\n estado2=True\r\n if vuelos[3]==preferencia[\"horaSalida\"]:\r\n estado3=True\r\n if vuelos[5]==preferencia[\"horaLlegada\"] :\r\n estado4=True\r\n if vuelos[6][0] in preferencia[\"comida\"] or vuelos[6][-1] in preferencia[\"comida\"]:\r\n estado5=True\r\n if vuelos[-1][0] in preferencia[\"clase\"] or vuelos[-2][0] in preferencia[\"clase\"] or vuelos[-3][0] in preferencia[\"clase\"] or vuelos[-4][0] in preferencia[\"clase\"]or vuelos[-5][0] in preferencia[\"clase\"]:\r\n estado6=True\r\n if estado1==True and estado2==True and estado3==True and estado4==True and estado5==True and estado6==True:\r\n mejor.append(vuelos)\r\n if mejor==[]: \r\n for vuelos in iVuelos:\r\n estado1=False #aerolinea\r\n estado2=False #escalas\r\n estado3=False #hora de salida\r\n estado4=False #hora de llegada\r\n estado5=False #comidas\r\n estado6=False #clases\r\n if vuelos[0]== preferencia[\"aerolinea\"]:\r\n estado1=True\r\n if vuelos[6]== preferencia[\"escalas\"] or vuelos[7]== preferencia[\"escalas\"]:\r\n estado2=True\r\n if vuelos[3]==preferencia[\"horaSalida\"]:\r\n estado3=True\r\n if vuelos[5]==preferencia[\"horaLlegada\"] :\r\n estado4=True\r\n if vuelos[6][0] in preferencia[\"comida\"] or vuelos[6][-1] in preferencia[\"comida\"]:\r\n estado5= True\r\n if vuelos[-1][0] in preferencia[\"clase\"] or vuelos[-2][0] in preferencia[\"clase\"] or vuelos[-3][0] in preferencia[\"clase\"] or vuelos[-4][0] in preferencia[\"clase\"]or vuelos[-5][0] in preferencia[\"clase\"]:\r\n estado6=True\r\n if (estado1==True and estado2==True and estado3==True and estado4==True) and (not vuelos in mejor):\r\n regular.append(vuelos)\r\n if mejor==[] and regular==[]: \r\n for vuelos in iVuelos:\r\n estado1=False #aerolinea\r\n estado2=False #escalas\r\n estado3=False #hora de salida\r\n estado4=False #hora de llegada\r\n estado5=False #comidas\r\n estado6=False #clases\r\n \r\n if vuelos[0]== preferencia[\"aerolinea\"]:\r\n estado1=True\r\n if vuelos[6]== preferencia[\"escalas\"] or vuelos[7]== preferencia[\"escalas\"]:\r\n estado2=True\r\n if vuelos[3]==preferencia[\"horaSalida\"]:\r\n estado3=True\r\n if vuelos[5]==preferencia[\"horaLlegada\"] :\r\n estado4=True\r\n if vuelos[6][0] in preferencia[\"comida\"] or vuelos[6][-1] in preferencia[\"comida\"]:\r\n estado5=True\r\n if vuelos[-1][0] in preferencia[\"clase\"] or vuelos[-2][0] in preferencia[\"clase\"] or vuelos[-3][0] in preferencia[\"clase\"] or vuelos[-4][0] in preferencia[\"clase\"]or vuelos[-5][0] in preferencia[\"clase\"]:\r\n estado6=True\r\n if (estado1==True or estado2==True or estado3==True or estado4==True or estado5==True or estado6==True) and (not vuelos in mejor) and (not vuelos in regular):\r\n poco.append(vuelos)\r\n if preferencia[\"aerolinea\"]==\"\" and preferencia[\"escalas\"]==\"\" and preferencia[\"horaSalida\"]==\"\" and preferencia[\"horaLlegada\"]==\"\" and preferencia[\"comida\"]==[] and preferencia[\"clase\"]==[]:\r\n flash(\"no hay ningun sugerido con base a su preferencia\")\r\n return render_template('menu.html',usuario=datos)\r\n \r\n sugeridos={\"mejor\":mejor,\"regular\":regular,\"poco\":poco}\r\n return render_template('vuelo.html',usuario=datos,vuelos=sugeridos)", "def profit(nbConso, prix,coutMenu,coutEntretien):", "async def salary(self, ctx, role: discord.Role):\n salary = self.guilds[ctx.guild.id].get(role.id, None)\n if salary is None:\n await ctx.send(await _(ctx, \"That role does not have a salary!\"))\n else:\n await ctx.send((await _(ctx, \"{} has a daily salary of {}\")).format(role, salary))", "def registra(self):\r\n lista_de_datos=[]#esta lista ayuda a almacenar temporalmente los datos para posteriormente convertirlos en una tupla\r\n \"\"\"Funcion que llama a las otras funciones\"\"\"\r\n dic=self.solicitar_datos()\r\n set_get_datos(self.obj_equipo, dic)\r\n #covierte los datos de diccionario en una tupla\r\n for valor in dic.values():\r\n lista_de_datos.append(valor)\r\n #convvertir la lista en una tupla\r\n tupla_de_datos=tuple(lista_de_datos)\r\n #llama a la funcion agregar_registro de la clase conexion_equipos\r\n estatus=self.obj_conexion.agregar_registro(tupla_de_datos)\r\n #si el estatus es true\r\n if estatus:\r\n print(Fore.GREEN+\" Registro agregado correctamente\"+Fore.RESET)\r\n else:\r\n print(Fore.WHITE,Back.RED+\" Registro no agregado\"+Fore.RESET,Back.RESET)", "def guardar(self):\n bi=wslib2.conectar()\n #poshoy+posini ya no va aca, porque el turno se actualiza al bloquearlo ya\n idturno=bi.insertar_turnoSB(self.getFechaTurno(), self.getAgenda().headers[\"idagenda\"], \\\n self.getPaciente().getId(), 0, self.getOperador(), \"s\", \"s\" , self.getHostAddress(), \\\n self.getOS().getId(), self.getOS().sigla, self.getIdHorario() , self.esPrimeraVez())\n \n self.setId(idturno[0]) \n self.__setFechaOtorg__()", "def pasarTurno(self):\n self._turnos_pasados += 1", "def savepacs(pacout):\n if len(pacout) == 0: return\n\n print \"saving \",len(pacout),\"pacs to\",outtable\n put = conn.cursor()\n put.executemany(\n format(PAC.insert_statement(outtable, schema)),\n pacout\n )\n put.close()\n conn.commit()", "def Huachiclave(self):\n\n query = \"\"\"SELECT timestamp,huachiclave,cantidad,entregado FROM huachilate WHERE entregado = '0' ORDER BY timestamp\"\"\"\n\n query2 = \"\"\"INSERT INTO huachilate (timestamp,huachiclave,cantidad,entregado) VALUES (?,?,?,?)\"\"\"\n\n resultado = self.cursor.execute(query).fetchall()\n\n if resultado == []:\n \n timestamp = time.time()\n \n huachiclave = \"\".join(random.choices(string.ascii_letters + string.digits,k = 7))\n\n cantidad = random.randint(5000,50000)\n\n self.cursor.execute(query2,(timestamp,huachiclave,cantidad,0))\n\n self.conn.commit()\n\n return (timestamp,huachiclave,cantidad,0)\n \n else:\n return resultado[-1]", "def action_intento_quitar(self):\n for viaje in self:\n\t if self.asignacion_id:\n\t self.asignacion_id.state = 'descartado'\n\t self.asignacion_id.fechahora_ar = datetime.now()\n\n\t self.asignacion_id = False\n\t self.asignadoa_id = False\n\t self.asignadoi_id = False", "def repite_saludo(n, saludo):\n\treturn saludo * n", "def main():\n \n # Directorio donde guardaremos los bloques\n dir = 'archivos_formateados/'\n if not os.path.exists(dir):\n os.mkdir(dir)\n\n # Variables inicializadas que usare\n inicio = 0\n final = 578704 #578703\n\n #Creacion de archivos .json semejantes a un diccionario\n for i in range (inicio,final):\n bloque = str(i)\n \n # Hacemos una request para obtener el bloque\n peticion = 'http://api.coinsecrets.org/block/' + bloque\n resp = requests.get(peticion)\n #time.sleep(1)\n datos_bloque = resp.json()\n\n # Abrimos un archivo y volcamos la informacion formateada\n with open(dir + bloque + '.json', 'w') as file_dict:\n json.dump(datos_bloque, file_dict, indent = 4)\n file_dict.close()\n\n print 'Bloque',bloque, 'almacenado'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generar Enemigos y Tesoros de manera aleatoria
def generar_Enemigos_Tesoros(self): for i in range(2, len(self.listaHabitaciones)): """ Aquí definirías como poner los enemigos si tienes un gráfo de enemigos etc mirando las puertas y poniendo enemigo o no """ enemigoOTesoro = random.uniform(0, 1) # Probabilidad de que halla tesoro o enemigo if enemigoOTesoro <= 0.80: # Sala Enemigo llave = random.uniform(0, 1) # Probabilidad de poner llave a un enemigo if llave > 0.9: self.enemigos.append(Enemigo(self.listaHabitaciones[i].x+self.listaHabitaciones[i].ancho/2, self.listaHabitaciones[i].y+self.listaHabitaciones[i].alto/2, 7, 7, True)) else: self.enemigos.append(Enemigo( self.listaHabitaciones[i].x + self.listaHabitaciones[i].ancho/2, self.listaHabitaciones[i].y + self.listaHabitaciones[i].alto/2, 7, 7, False)) else: # Sala Tesoro self.tesoros.append(Tesoro(self.listaHabitaciones[i].x + self.listaHabitaciones[i].ancho / 2, self.listaHabitaciones[i].y + self.listaHabitaciones[i].alto / 2, 8, 8))
[ "def enem():\r\n global esl, xyz, h, POW, punt2, puntaje2, livesl, m, punt1, puntaje1, ene, en, en1, n, esn, lives,contene, xz, x, contene2, esm \r\n for o in range(len(ene)):\r\n \r\n # golpear tortuga mario\r\n if((c.coords(mario)[0]> c.coords(ene[o][0])[0]-50 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+50 and c.coords(mario)[1]> c.coords(ene[o][0])[1] and c.coords(mario)[1]< c.coords(ene[o][0])[1]+120 and ene[o][1]!=7 and ene[o][1]!=2 and ene[o][1]!=9) or xyz==True):\r\n ene[o][1]= 2\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg1, anchor=NW)\r\n # golpear tortuga luigo\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-50 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+50 and c.coords(luigi)[1]> c.coords(ene[o][0])[1] and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+100 and ene[o][1]!=7 and ene[o][1]!=2 and ene[o][1]!=9):\r\n ene[o][1]= 2\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg1, anchor=NW)\r\n # morir mario\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-25 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+25 and c.coords(mario)[1]> c.coords(ene[o][0])[1]-30 and c.coords(mario)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]!=2 and esm==True):\r\n esm=False\r\n c.coords(mario, 750, 665)\r\n lives-=1\r\n if(lives==4):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==3):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==2):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==1):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n if(lives==0):\r\n c.delete(livem[len(livem)-1])\r\n livem.pop(len(livem)-1)\r\n c.coords(mario, -1000, -1000)\r\n # morir luigi\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-25 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+25 and c.coords(luigi)[1]> c.coords(ene[o][0])[1]-30 and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]!=2 and esl==True):\r\n esl=False\r\n c.coords(luigi, 750, 665)\r\n livesl-=1\r\n if(livesl==4):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==3):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==2):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==1):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n if(livesl==0):\r\n c.delete(livel[len(livel)-1])\r\n livel.pop(len(livel)-1)\r\n c.coords(luigi, -1000, -1000)\r\n # matar enemigo y puntuacion mario\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-45 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+45 and c.coords(mario)[1]> c.coords(ene[o][0])[1]-30 and c.coords(mario)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]==2):\r\n puntuacion()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n if(c.coords(mario)[0]> c.coords(ene[o][0])[0]-45 and c.coords(mario)[0]< c.coords(ene[o][0])[0]+45 and c.coords(mario)[1]> c.coords(ene[o][0])[1] and c.coords(mario)[1]< c.coords(ene[o][0])[1]+100 and (ene[o][1]==7 or ene[o][1]==9)):\r\n puntuacion()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n # matar enemigo y puntuacion luigi\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-45 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+45 and c.coords(luigi)[1]> c.coords(ene[o][0])[1]-30 and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+30 and ene[o][1]==2):\r\n puntuacion2()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n if(c.coords(luigi)[0]> c.coords(ene[o][0])[0]-45 and c.coords(luigi)[0]< c.coords(ene[o][0])[0]+45 and c.coords(luigi)[1]> c.coords(ene[o][0])[1] and c.coords(luigi)[1]< c.coords(ene[o][0])[1]+100 and (ene[o][1]==7 or ene[o][1]==9)):\r\n puntuacion2()\r\n c.delete(ene[o][0])\r\n ene.remove(ene[o])\r\n break\r\n # movimiento enemigo 1\r\n if(ene[o][1]==0):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg, anchor=NW)\r\n c.move(ene[o][0],-3-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=1\r\n elif(ene[o][1]==1):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg2, anchor=NW)\r\n c.move(ene[o][0],-3-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=0\r\n # moviento enemigo 2\r\n if(ene[o][1]==3):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg3, anchor=NW)\r\n c.move(ene[o][0],3+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=4\r\n elif(ene[o][1]==4):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg4, anchor=NW)\r\n c.move(ene[o][0],3+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=3\r\n # movimiento enemigo 3\r\n if(contene<=5): \r\n if(ene[o][1]==5):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg6, anchor=NW)\r\n c.move(ene[o][0],-3-x,-10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=5\r\n contene+=1\r\n elif(contene>=5):\r\n if(ene[o][1]==5):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg5, anchor=NW)\r\n c.move(ene[o][0],-3-x,10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=5\r\n contene+=1\r\n if(contene==15):\r\n contene=0\r\n # movimiento enemigo 4\r\n if(contene2<=5): \r\n if(ene[o][1]==6):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg9, anchor=NW)\r\n c.move(ene[o][0],3+x,-10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=6\r\n contene2+=1\r\n elif(contene2>=5):\r\n if(ene[o][1]==6):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg10, anchor=NW)\r\n c.move(ene[o][0],3+x,10)\r\n ventana.update()\r\n ventana.after(1)\r\n ene[o][1]=6\r\n contene2+=1\r\n if(contene2==15):\r\n contene2=0\r\n # movimiento enemigo 5\r\n if(ene[o][1]==7):\r\n en=c.coords(ene[o][0])[0]\r\n en1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(en, en1, image=eneimg7, anchor=NW)\r\n c.move(ene[o][0],-9-x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n if(ene[o][1]==9):\r\n ca=c.coords(ene[o][0])[0]\r\n ca1=c.coords(ene[o][0])[1]\r\n c.delete(ene[o][0])\r\n ene[o][0]= c.create_image(ca, ca1, image=eneimg11, anchor=NW)\r\n c.move(ene[o][0],9+x,0)\r\n ventana.update()\r\n ventana.after(1)\r\n ventana.after(85-y, enem)", "def reemplazo_generacional(self, individuos):\n #\n # ------ IMPLEMENTA AQUI TU CÓDIGO --------------------------------\n #\n \"\"\"\n Reemplaza n individuos de la generacion al azar\n \"\"\"\n reemplazo = [(self.adaptación(individuo), individuo)\n for individuo in individuos][:self.n_reemplazos]\n reemplazo.append(max(self.población))\n random.shuffle(self.población)\n reemplazo = reemplazo + self.población\n self.población = reemplazo[:self.n_población]", "def edicoes(palavra1, palavra2):\r\n pass", "def createtxtED(mapa,dirs):\n x=grass.read_command('r.stats',flags='a',input=mapa)\n \n y=x.split('\\n')\n os.chdir(dirs)\n txtsaida=mapa+'PCT_Borda.txt'\n txtreclass=open(mapa+'_EDGE.txt','w')\n txtreclass.write('COD'',''HA\\n')\n if y!=0:\n for i in y:\n if i !='':\n ##print i\n f=i.split(' ')\n if '*' in f :\n break\n else:\n ##print f\n ids=f[0]\n ids=int(ids)\n ##print ids\n ha=f[1]\n ha=float(ha)\n haint=float(ha)\n \n haint=haint/10000+1\n ##print haint\n \n ##print haint\n haint=round(haint,2)\n txtreclass.write(`ids`+','+`haint`+'\\n')\n txtreclass.close()", "def create_target_enrichment_from_te(te, apps, schema_editor):\n db_alias = schema_editor.connection.alias\n # We guess which type of te this was.\n if te.planning_version == 0:\n # This means it was planned in the no-tails era.\n fwd_tail = \"\"\n rev_tail = \"\"\n type = te_types[\"PCR\",apps,schema_editor]\n else:\n # This means it was planned in the PCR1 (with-tails) era.\n # NOTE: this might apply to new types of tes as well.\n fwd_tail = LEFT_TAIL\n rev_tail = RIGHT_TAIL\n type = te_types[\"PCR_with_tails\",app,schema_editor]\n # First we generate the primers from the UGSs.\n # No physical locations!\n OldPrimer = apps.get_model(\"linapp\",\"Primer\")\n d = unpack_slice(te.left.slice, apps, schema_editor)\n head = d[\"referencevalue\"].sequence\n d[\"name\"] = \"Mig_auto_fwd_{}\".format(te.id)\n d[\"type\"] = target_types[\"Plain\", apps, schema_editor]\n d[\"strand\"] = \"+\"\n d[\"sequence\"] = get_or_create_sequence(fwd_tail+head, apps, schema_editor)\n d[\"tail\"] = primer_tails[fwd_tail,apps,schema_editor]\n old_left = OldPrimer.objects.using(db_alias).create(**d)\n d = unpack_slice(te.right.slice, apps, schema_editor)\n head = rc(d[\"referencevalue\"].sequence)\n d[\"name\"] = \"Mig_auto_rev_{}\".format(te.id)\n d[\"type\"] = target_types[\"Plain\", apps, schema_editor]\n d[\"strand\"] = \"-\"\n d[\"sequence\"] = get_or_create_sequence(rev_tail+head, apps, schema_editor)\n d[\"tail\"] = primer_tails[rev_tail,apps,schema_editor]\n old_right = OldPrimer.objects.using(db_alias).create(**d)\n # Now we can generate the te.\n OldTargetEnrichment = apps.get_model(\"linapp\", \"TargetEnrichment\")\n old_te = OldTargetEnrichment.objects.using(db_alias).create(\n type=type,\n amplicon=getdna(\n te.chromosome,\n te.left.slice.start_pos,\n te.right.slice.end_pos,\n ),\n chromosome_id=te.chromosome_id,\n left=old_left,\n right=old_right,\n )\n target_ids = [target.old_target_id for target in \\\n te.targets.using(db_alias).all()]\n old_te.targets.add(*target_ids)\n old_te.partner.add(*get_partner_ids(te, apps, schema_editor))\n # No physical locations!\n te.old_te = old_te\n te.save()\n return old_te", "def att_totex(self,arquivo=None):\n\n if arquivo is None:\n arquivo = str(self.matricula).zfill(6) + '.tex'\n\n with open(arquivo, 'w') as f:\n f.write('\\\\section*{' + str(self.nome_completo) + '\\\\hfill ' + str(self.matricula).zfill(6) + '}\\n')\n\n f.write('\\\\begin{multicols}{2}\\n \\\\scriptsize')\n for s in range(1,3):\n f.write('\\\\begin{center} \\\\begin{tabular}{|c|c|c|c|c|c|c|}\\\\toprule\\n')\n f.write('\\\\multicolumn{7}{|c|}{' + str(s) + '$^\\\\circ$ semestre} \\\\\\\\ \\\\midrule\\n')\n f.write('& S & T & Q & Q & S & S \\\\\\\\ \\\\midrule\\n')\n for i in range(1,17):\n f.write(str(i) );\n for j in range(2,8):\n\n f.write('& ')\n\n for t in self.turmas_a_lecionar:\n if t.semestralidade == s and (j,i) in t.horarios:\n f.write(str(t.codigo) + ' ' + str(t.turma))\n\n f.write('\\\\\\\\ \\\\midrule \\n')\n\n f.write('\\\\end{tabular} \\\\end{center}\\n\\n')\n\n f.write('\\\\end{multicols}\\n')\n f.write('\\\\begin{multicols}{2}\\n')\n f.write('\\\\begin{center} \\\\begin{tabular}{|lm{6cm}|}\\n')\n f.write('\\\\multicolumn{2}{c}{Disciplinas a lecionar} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{1$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 1]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\midrule\\n')\n f.write('\\\\multicolumn{2}{|c|}{2$^\\\\circ$ Semestre} \\\\\\\\ \\\\midrule \\\\midrule\\n')\n for t in [i for i in self.turmas_a_lecionar if i.semestralidade == 2]:\n f.write(str(t.codigo) + ' & ' + t.nome + '\\\\\\\\ \\\\midrule\\n')\n f.write('\\\\end{tabular} \\\\end{center} \\\\vfill\\\\columnbreak\\n')\n f.write('\\\\end{multicols}\\n')", "def makePoem():\n adjFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\adj.txt\"\n advFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\adv.txt\"\n nounFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\noun.txt\"\n prepFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\prepositions.txt\"\n verbFile = r\"C:\\Users\\shockma\\Documents\\Special\\Python\\extract\\verb.txt\"\n adj = getWord(adjFile, 3)\n adv = getWord(advFile, 1)\n noun = getWord(nounFile, 3)\n prep = getWord(prepFile, 2)\n verb = getWord(verbFile, 3)\n aan = aOrAn(adj)\n return aan[0] + ' ' + adj[0] \\\n + ' ' + noun[0] + '\\n\\n' \\\n + aan[0] + ' ' + adj[0] \\\n + ' ' + noun[0] \\\n + ' ' + verb[0] \\\n + ' ' + prep[0] \\\n + ' the ' + adj[1] \\\n + ' ' + noun[1] + '\\n' \\\n + adv[0] \\\n + ', the ' + noun[0] \\\n + ' ' + verb[1] + '\\n'\\\n + 'the ' + noun[1] \\\n + ' ' + verb[2] \\\n + ' ' + prep [1] \\\n + ' ' + aan[2].lower() + ' ' + adj[2] \\\n + ' ' + noun[2]", "def Inferencias(self):\r\n self.GeneroMasculinoDisfrute = 0\r\n self.GeneroFemeninoDisfrute = 0\r\n self.GeneroMasculinoFacilidad = 0\r\n self.GeneroFemeninoFacilidad = 0\r\n self.EdadValor = 0\r\n self.NumeroUsuarios = 0\r\n self.ValorGeneroFacilidad = \"\"\r\n self.ValorGeneroDisfrute = \"\"\r\n self.valoredad = 0\r\n self.EdadMayor = 0\r\n for i in onto.Usuario.instances(): \r\n self.NumeroUsuarios = 1 + self.NumeroUsuarios\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Facilidad_percibida_de_uso\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoFacilidad = 1 + self.GeneroMasculinoFacilidad\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoFacilidad = 1 + self.GeneroFemeninoFacilidad \r\n if self.GeneroMasculinoFacilidad >= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Masculino\"\r\n if self.GeneroMasculinoFacilidad <= self.GeneroFemeninoFacilidad:\r\n self.ValorGeneroFacilidad = \"Femenino\"\r\n if prop.python_name == \"Disfrute_percibido\" and value>59:\r\n for prop in i.get_properties():\r\n for value in prop[i]:\r\n if prop.python_name == \"Genero\":\r\n if str(value) == \"Masculino\":\r\n self.GeneroMasculinoDisfrute = 1 + self.GeneroMasculinoDisfrute\r\n elif str(value) == \"Femenino\":\r\n self.GeneroFemeninoDisfrute = 1 + self.GeneroFemeninoDisfrute\r\n if self.GeneroMasculinoDisfrute >= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute= \"Masculino\"\r\n if self.GeneroMasculinoDisfrute <= self.GeneroFemeninoDisfrute:\r\n self.ValorGeneroDisfrute = \"Femenino\"\r\n if prop.python_name == \"Edad\":\r\n self.EdadValor = int(value) + self.EdadValor\r\n self.valoredad = int(value)\r\n if self.EdadMayor < self.valoredad:\r\n self.EdadMayor = self.valoredad\r\n\r\n if self.NumeroUsuarios==0:\r\n self.NumeroUsuarios=1\r\n PromedioEdad = (self.EdadValor / self.NumeroUsuarios)\r\n MayorEdad.set(self.EdadMayor) \r\n resultadopromedio.set(round(PromedioEdad))\r\n resultadodisfrute.set(self.ValorGeneroDisfrute)\r\n resultadofacilidad.set(self.ValorGeneroFacilidad)", "def saludo():\r\n\tprint (\"Hola! Bienvenido al juego Luces Afuera.\")\r\n\tprint (\"El objetivo es muy simple, apagar todas las luces.\") \r\n\tprint (\"Las luces prendidas son los puntos o y las apagadas los puntos ·\")\r\n\tprint (\"Cuando presionás una luz, escribiendo su posicion, como por ejemplo D4 o A3, ésta se prende o apaga dependiendo de su estado inicial.\")\r\n\tprint (\"Pero OJO! Cada vez que presionás una luz, sus vecinas tambien se presionarán.\")\r\n\tprint (\"En todo momento podes escribir RESET y volver al tablero original, pero esto te hace perder puntos.\")\r\n\tprint ()", "def tour(self, lieu):\n if random.randint(0, 100) <= self.agressivite: # si l'ennemi attaque\n persos_l = self.game.get_all_persos_lieu(lieu) # on recupere tous les persos dans ce lieu\n #\n if len(persos_l) >= 1: # s'il y a des persos dans ce lieu\n # on va d'abord choisir le type d'attaque de l'ennemi\n # comme ca, si l'ennemi n'a pas d'attaques disponnibles,\n # on ne va pas aller chercher tous les persos d'un lieu inutiliement\n tp_att = \"corps à corps\"\n cac = self.get_attaque(\"corps à corps\")\n dist = self.get_attaque(\"distance\")\n if cac is None and dist is None: # l'ennemi ne peut pas attaquer\n return\n elif cac is None:\n tp_att = \"distance\"\n elif dist is None:\n tp_att = \"corps à corps\"\n else:\n if self.moy_lst(cac) > self.moy_lst(dist):\n tp_att = \"corps à corps\"\n else:\n tp_att = \"distance\"\n # on va chercher un personnage cible\n p_cible = None\n for p in persos_l:\n if p.classe == \"tank\": # on attaque en priorité les tanks\n p_cible = p\n #\n if p_cible is None: # s'il n'y a pas de tanks dans ce lieu, on prend la premiere perso dans ce lieu\n p_cible = persos_l[0]\n # l'ennemi va attaquer le perso cible\n mess = self.attaque_cible(p_cible, tp_att)\n mess = json.dumps({\"type\": \"message\", \"value\": mess})\n self.game.server.send_all(mess)", "def get_naive_AI_orders (board, entities, turn, ship_list, nb_columns, nb_lines):\n # Initializing the order\n order = ''\n\n # Deleting the destroyed vessel from ship_list\n for ship in ship_list :\n if ship not in entities :\n del ship_list[ship_list.index(ship)]\n\n #Creating ship for the first turn\n if turn <=1 :\n ship_name = str(random.randint(1,100000000))\n ship_type = random.choice(['tanker','cruiser'])\n order += ship_name + ':' + ship_type\n ship_list.append(ship_name)\n return order, ship_list\n\n\n #generate ship orders\n if random.random() < 0.03 :\n ship_name = str(random.randint(1,100000000))\n ship_type = random.choice(['tanker','cruiser'])\n order += ' ' + ship_name + ':' + ship_type\n ship_list.append(ship_name)\n return order, ship_list\n\n #generate upgrade orders\n if random.random() < 0.1:\n upgrade_choice = random.choice(['regeneration','storage','range','move'])\n order += ' upgrade:' + upgrade_choice\n\n #generate movement orders\n if random.random() < 1.1 and len(ship_list) > 1: # toujours\n for iteration in range (1, 5):\n ship_name=ship_list[random.randint(0,len(ship_list)-1)]\n ship_coord_y = entities[ship_name]['coordinates'][0]\n ship_coord_x = entities[ship_name]['coordinates'][1]\n coordinates_y = str(random.randint(ship_coord_y - 1,ship_coord_y + 1))\n coordinates_x=str(random.randint(ship_coord_x - 1,ship_coord_x + 1))\n order += ' ' + ship_name + ':@' + coordinates_y + '-' + coordinates_x\n #generate attack orders\n if random.random() < 1.1 and len(ship_list) > 1: # toujours\n for iteration in range (1, 3):\n ship_name = ship_list[random.randint(0, len(ship_list) - 1)]\n coordinates_y = str(random.randint(1, nb_lines))\n coordinates_x = str(random.randint(1, nb_columns))\n damages = str(random.randint(1, 40))\n order += ' ' + ship_name + ':*' + coordinates_y + '-' + coordinates_x + '=' + damages\n\n #energy giving\n if random.random() < 1.1 and len(ship_list) > 1:\n giver = ship_list[random.randint(0,len(ship_list) - 1)]\n receiver = ship_list[random.randint(0,len(ship_list) - 1)]\n order += ' ' + giver + ':>' + receiver\n #energy abosorption\n if random.random() < 1.1 and len(ship_list) > 1:\n ship_name = ship_list[random.randint(0,len(ship_list) - 1)]\n coordinates_y = str(random.randint(1, nb_lines))\n coordinates_x = str(random.randint(1, nb_columns))\n order += ' ' + ship_name + ':<' + coordinates_y + \"-\" + coordinates_x\n\n\n return order, ship_list", "def gerarSentencas(self,n):\n sentencasTerminadas = set([])\n sentencasPorTerminar = set([])\n naoMaisDerivaveis = set([])\n \n #para cada um dos betas dessa producao \n for beta in self.producaoInicial.obterListaBetas():\n if len(beta) == 1:\n sentencasTerminadas.add(beta)\n elif len(beta) == 2:\n sentencasPorTerminar.add(beta)\n\n #depois de incializar as duas sentencas, comeca a derivar as possiveis\n while len(sentencasPorTerminar) > 0:\n\n #pega uma das sentencas por terminar\n sentenca = sentencasPorTerminar.pop()\n naoMaisDerivaveis.add(sentenca)\n\n #para cada um dos simbolos dessa sentenca\n for indice in range(len(sentenca)):\n simbolo = sentenca[indice]\n \n #se for um simbolo nao terminal\n if simbolo in self.naoTerminais:\n \n #pega a producao que corresponde a esse simbolo e todas as suas possiveis transicoes\n producao = self.obterProducao(simbolo)\n transPossiveis = producao.obterListaBetas()\n \n #para cada uma de suas transicoes\n for trans in transPossiveis:\n \n #verifica se aplicando essa transicao a sentenca ainda esta dentro do tamanho desejado (-1 pq tira o nao terminal que\n #pode ser substituido por um terminal )\n if len(sentenca) - 2 + len(trans) <= n:\n \n #faz a substituicao\n nova = sentenca.replace(simbolo, trans)\n terminada = True\n \n if nova != \"&\":\n nova = nova.replace('&','')\n\n #verifica se essa sentenca ainda pode ser derivada\n for s in nova:\n if s in self.naoTerminais:\n terminada = False\n\n if terminada and len(nova)<=n:\n sentencasTerminadas.add(nova)\n\n elif not terminada and nova not in naoMaisDerivaveis:\n sentencasPorTerminar.add(nova)\n \n return sentencasTerminadas", "def generateenemy(self, board):\n self.createenemy(board, 'E')\n return board", "def poderGenerarLB(self):\n\t\t\n\t\tcantItems = 0\n\t\tfor i in self.items:\n\t\t\tcantItems = cantItems + 1\n\t\taprobados = 0\n\t\tfor i in self.items:\n\t\t\tif i.estado == 'aprobado':\n\t\t\t\taprobados = aprobados + 1\n\t\t\t\n\t\tassert cantItems != aprobados, 'Se puede generar una linea base cuando todos los items de la fase esta estan aprobados'", "def trouveEncadrements(jeu,coup,tous=True):\n ret=[]\n if checkEncadrementDirection(jeu,coup[0],coup[1],1,1):\n ret.append([1,1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],0,1):\n ret.append([0,1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],-1,1):\n ret.append([-1,1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],-1,0):\n ret.append([-1,0])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],-1,-1):\n ret.append([-1,-1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],0,-1):\n ret.append([0,-1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],1,-1):\n ret.append([1,-1])\n if not tous:\n return ret\n if checkEncadrementDirection(jeu,coup[0],coup[1],1,0):\n ret.append([1,0])\n return ret", "def generate_all_obstructions(env = None):\n import settings\n \n if env is None:\n env = openravepy.Environment() \n env.Load('boxes.dae')\n elif type(env) is str:\n filename = env\n env = openravepy.Environment() \n env.Load(filename)\n \n #env.SetViewer('qtcoin')\n robot=env.GetRobots()[0]\n utils.pr2_tuck_arm(robot)\n manip = robot.SetActiveManipulator('rightarm')\n objects = [b\n for b in env.GetBodies()\n if b.GetName().startswith(\"random_\")]\n \n obstructions_text = []\n position_index = 0\n for obj in objects: \n #trying to grasp\n print \"Testing object \", obj\n try:\n get_collision_free_grasping_pose(\n robot, \n obj,\n max_trials=settings.collision_free_grasping_samples\n )\n print \"Object \", obj, \"is graspable\"\n except GraspingPoseError:\n print \"Object \", obj, \"is NOT graspable, getting occlusions\"\n collision_list = reachability.get_occluding_objects_names(robot,\n obj,\n lambda b:b.GetName().startswith(\"random\"),\n settings.occluding_objects_grasping_samples,\n just_one_attempt=False)\n for coll in collision_list:\n for obstr in coll:\n s = \"(Obstructs p%d %s %s)\" %(position_index,\n obstr, obj.GetName())\n obstructions_text.append(s)\n position_index += 1\n\n print \"\\n\\n\\n\"\n print \"\\n\".join(obstructions_text)", "def crear_todo_el_mapa(self):\n for i in range(0,self.cantidad_de_paredes_no_rompibles): # Defino paredes no rompibles\n self.pos_de_paredes_no_rompibles_en_x += self.distancia_entre_paredes_no_rompibles * (i != 0)\n for g in range(0,self.cantidad_de_paredes_no_rompibles):\n if self.pos_de_paredes_no_rompibles_en_y == 570:\n self.pos_de_paredes_no_rompibles_en_y = 90\n self.pos_de_paredes_no_rompibles_en_y += self.distancia_entre_paredes_no_rompibles * (g != 0) \n self.paredes_no_rompibles.append(parednorompible.Parednorompible([self.pos_de_paredes_no_rompibles_en_x,self.pos_de_paredes_no_rompibles_en_y]))\n \n for i in range(0,len(self.paredes_no_rompibles)):\n self.lista_de_objetos.append(self.paredes_no_rompibles[i].set_estado_de_algunas_casillas()) # Se setean el estado de las casillas con una pared no rompible encima\n \n for i in range(0,self.cantidad_de_casillas): # Defino casillas y a su vez esta crea las casillas rompibles\n self.id_casilla[0] += 1 * (i != 0)\n self.pos_de_casillas_en_x += self.longitud_de_lado_de_casilla * (i != 0)\n for g in range(0,self.cantidad_de_casillas):\n if self.pos_de_casillas_en_y == 650:\n self.pos_de_casillas_en_y = 10 \n self.pos_de_casillas_en_y += self.longitud_de_lado_de_casilla * (g != 0)\n if self.id_casilla[1] == 8:\n self.id_casilla[1] = 0\n self.id_casilla[1] += 1 * (g != 0)\n self.set_id_casilla = (self.id_casilla[0],self.id_casilla[1])\n self.casillas.append(casillas.Casilla(self.set_id_casilla,[self.pos_de_casillas_en_x,self.pos_de_casillas_en_y],self.lista_de_objetos,self.provabilidad_de_spawn_de_casillas_rompibles))\n \n self.crear_portal()\n self.crear_white_walkers() # Se crean tres objetos de la clase WhiteWalker", "def get_exemplu_apartamente():\r\n apartamente = []\r\n p = 100\r\n for i in range(0,10):\r\n adauga_apartament(apartamente,i*p,i*p+1,i*p+2,i*p+3,i*p+4)\r\n return apartamente", "def newGen(self, poblacion):\r\n nuevaGeneracion = self.seleccionarElite(poblacion)\r\n # Los primeros sujetos de la nueva generación son la élite de la anterior generación\r\n # Ahora vamos a seleccionar individuos al azar entre la poblacion para generar hijos\r\n while len(nuevaGeneracion) != self.TAM_POBLACION:\r\n padre = poblacion[r.randint(0, self.TAM_POBLACION - 1)]\r\n madre = poblacion[r.randint(0, self.TAM_POBLACION - 1)]\r\n hijo = self.cruzarIndividuos(padre, madre)\r\n if hijo not in nuevaGeneracion:\r\n nuevaGeneracion.append(hijo)\r\n return nuevaGeneracion" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set variables to represent the common column names used in this class directly. This should make future schema changes a little easier to handle. It is NOT meant to function as a general column map, just to abstract values which are used within this class.
def _colNames(self): self.mjdCol = 'expMJD' self.fieldIdCol = 'fieldID' self.raCol = 'fieldRA' self.decCol = 'fieldDec' self.propIdCol = 'propID' self.propConfCol = 'propConf' self.propNameCol = 'propName' #(propname == proptype) # For config parsing. self.versionCol = 'version' self.sessionDateCol = 'sessionDate' self.runCommentCol = 'runComment'
[ "def _get_column_mapping(cls) -> Dict[str, str]:\n pass", "def _populate_table_keywords(self):\n for idx, column in enumerate(self.columns):\n for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():\n val = getattr(column, attr)\n if val is not None:\n keyword = keyword + str(idx + 1)\n self._header[keyword] = val", "def rename_columns(self):\r\n self.columns = [self._date, self._net_purchase, self._gross_sale, self._tax, self._margin]\r\n self.all_data.columns = self.columns", "def setColumns( self, names ):\n self.columns = names", "def _create_db_columns_def_(self):\r\n\r\n columns = {}\r\n first_dict = self.new_data[0]\r\n\r\n for key, value in first_dict.items():\r\n columns.update({key: None})\r\n\r\n for key, value in first_dict.items():\r\n if key == 'IpAddress':\r\n columns[key] = 'TEXT PRIMARY KEY'\r\n elif isinstance(value, str):\r\n columns[key] = \"TEXT\"\r\n elif isinstance(value, float):\r\n columns[key] = \"REAL\"\r\n else:\r\n columns[key] = \"TEXT\"\r\n\r\n return columns", "def extract_column_names(self) -> Dict[str, Tuple[str, str]]:\n fields = []\n for field in self.properties.keys():\n if not is_airbyte_column(field):\n fields.append(field)\n result = {}\n field_names = set()\n for field in fields:\n field_name = self.name_transformer.normalize_column_name(field, in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(field, in_jinja=True)\n if field_name_lookup in field_names:\n # TODO handle column name duplicates or collisions deterministically in this stream\n for i in range(1, 1000):\n field_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=False)\n field_name_lookup = self.name_transformer.normalize_column_identifier_case_for_lookup(field_name)\n jinja_name = self.name_transformer.normalize_column_name(f\"{field}_{i}\", in_jinja=True)\n if field_name_lookup not in field_names:\n break\n field_names.add(field_name_lookup)\n result[field] = (field_name, jinja_name)\n return result", "def _create_humanized_column_names_mapping(self) -> Dict[str, str]:\n hcnm = {\n forecast_parameter.value: forecast_parameter.name\n for forecast_parameter in DWDForecastParameter\n }\n\n return hcnm", "def _extra_field_columns(self):\n return sql.SQL(', ').join(self._extra_field_identifiers)", "def setColumns(self, *args):\n if not args:\n self._column_to_role = {col: role for col, role in enumerate(itertools.chain(self._role_to_prop.keys(),\n self._ref_role_to_prop.keys()))}\n self._column_names = [prop for prop in itertools.chain(self._role_to_prop.values(),\n self._ref_role_to_prop.values())]\n return\n\n names = args[0].toVariant() if isinstance(args[0], qtc.QVariant) else list(map(lambda a: str(a), args))\n self._column_names = names\n\n self._column_to_role = {}\n for col, name in enumerate(names):\n try:\n role = next(filter(lambda rn: rn[1] == name, itertools.chain(self._role_to_prop.items(),\n self._ref_role_to_prop.items())))[0]\n except:\n continue\n\n self._column_to_role[col] = role", "def __init__(self, shard_mapping_id, table_name, column_name, type_name,\n global_group):\n super(ShardMapping, self).__init__()\n self.__shard_mapping_id = shard_mapping_id\n self.__table_name = table_name\n self.__column_name = column_name\n self.__type_name = type_name\n self.__global_group = global_group", "def columns(self) -> typing.Mapping['series.Column', parser.Column]:\n return {}", "def _mapColumnNames(self, oldColumns, newColumns):\n newToOldNameMapping = {}\n oldColumnsRemoved = []\n newColumnsAdded = []\n for ocol in oldColumns:\n if ocol in newColumns:\n newToOldNameMapping[ocol] = ocol\n elif self._renamedColumns.has_key(ocol):\n newToOldNameMapping[self._renamedColumns[ocol]] = ocol\n else:\n oldColumnsRemoved.append(ocol)\n for ncol in newColumns:\n if not ncol in oldColumns:\n if not ncol in self._renamedColumns.values():\n newColumnsAdded.append(ncol)\n return (oldColumnsRemoved, newColumnsAdded, newToOldNameMapping)", "def _static_columns(e, t, key, ins=None):\n column_data_keys = [\"Name\", \"Key\", \"UpdatedUser\", \"UpdatedTime\", \"UpdatedUserGroup\", \"CreateTime\", \"TradeTime\",\n \"TraderID\", \"Portfolio\", \"RelTrade\", \"Status\", \"RelInstrument\", \"ConfoSent\", \"ConfoText\",\n \"Acquirer\", \"Counterparty\", \"BenchmarkInstrument\", \"Insid\", \"AmendmentReason\", \"Instype\",\n \"AmendmentReasonType\"]\n column_data = {}\n for col_name in column_data_keys:\n column_data[col_name] = \"\"\n\n column_data['Name'] = e.record_type\n column_data['Key'] = str(key)\n column_data['UpdatedUser'] = _cleanup(e.updat_usrnbr.userid)\n column_data['UpdatedTime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(e.updat_time))\n column_data['UpdatedUserGroup'] = _cleanup(e.updat_usrnbr.grpnbr.grpid)\n if t:\n column_data['CreateTime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(e.creat_time))\n column_data['TradeTime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t.time))\n column_data['TraderID'] = _cleanup(t.creat_usrnbr.userid)\n column_data['Portfolio'] = _cleanup(t.prfnbr.prfid)\n column_data['RelTrade'] = str(t.trdnbr)\n column_data['Status'] = str(t.status)\n column_data['RelInstrument'] = _cleanup(t.insaddr.insid)\n column_data['ConfoSent'] = _cleanup(str(t.add_info('Confo Date Sent')))\n column_data['ConfoText'] = _cleanup(t.add_info('Confo Text'))\n # simulated trades may be missing the acquirer\n if t.acquirer_ptynbr:\n column_data['Acquirer'] = _cleanup(t.acquirer_ptynbr.ptyid)\n column_data['Counterparty'] = _cleanup(t.counterparty_ptynbr.ptyid)\n column_data['BenchmarkInstrument'] = 'No'\n column_data['AmendmentReason'] = _cleanup(t.add_info(at_addInfoSpecEnum.AMEND_REASON_TRD))\n column_data['AmendmentReasonType'] = _cleanup(t.add_info(at_addInfoSpecEnum.AMEND_REASON_TYPE_TRD))\n column_data['Insid'] = _cleanup(t.insaddr.insid)\n column_data['Instype'] = _cleanup(t.insaddr.instype)\n\n if e.record_type == 'Instrument':\n # !!!the instrument amendment reasons have priority\n if _cleanup(e.add_info(at_addInfoSpecEnum.AMEND_REASON_INS)):\n column_data['AmendmentReason'] = _cleanup(e.add_info(at_addInfoSpecEnum.AMEND_REASON_INS))\n if _cleanup(e.add_info(at_addInfoSpecEnum.AMEND_REASON_TYPE_INS)):\n column_data['AmendmentReasonType'] = _cleanup(e.add_info(at_addInfoSpecEnum.AMEND_REASON_TYPE_INS))\n if ins:\n column_data['BenchmarkInstrument'] = 'Yes'\n column_data['Insid'] = _cleanup(ins.insid)\n column_data['Instype'] = _cleanup(ins.instype)\n\n output = ''\n for c_key, item in column_data.items():\n output = output + \"<{0}>{1}</{0}>\\n\".format(c_key, item)\n\n return output", "def updateColumns(self):\r\n if self._table_name:\r\n Base.cursor.execute(f\"describe {self._table_name}\")\r\n results = Base.cursor.fetchall()\r\n if Base.cursor.rowcount>0:\r\n self._column_list = []\r\n for column in results:\r\n self._column_list.append(column[0])\r\n if column[3] == \"PRI\":\r\n self.pk = column[0]\r\n setattr(self,column[0],None)\r\n else:\r\n raise Exception(f\"Table {self._table_name} has no columns\")", "def _create_columns(self,\n column_names,\n logical_types,\n semantic_tags,\n use_standard_tags,\n column_descriptions,\n column_metadata):\n datacolumns = {}\n for name in column_names:\n if logical_types and name in logical_types:\n logical_type = logical_types[name]\n else:\n logical_type = None\n if semantic_tags and name in semantic_tags:\n semantic_tag = semantic_tags[name]\n else:\n semantic_tag = None\n if column_descriptions:\n description = column_descriptions.get(name)\n else:\n description = None\n if column_metadata:\n metadata = column_metadata.get(name)\n else:\n metadata = None\n dc = DataColumn(self._dataframe[name], logical_type, semantic_tag, use_standard_tags, name, description, metadata)\n datacolumns[dc.name] = dc\n return datacolumns", "def sql_vars(self):\n return {\n 'constraint_name': self.name,\n 'attr': self._attr.slug,\n 'db_table': self.db_table.name,\n 'minlen': self._attr.minlen,\n }", "def sql_vars(self):\n return {\n 'index_name': self.name,\n 'spec': self.spec,\n 'db_table': self.db_table.name,\n }", "def define_four_columns(self, rdf, colnames):\n for name in colnames:\n rdf = rdf.Define(name, \"rdfentry_\")\n\n return rdf", "def get_column_names(self):\r\n return [column.key for column in self.table.columns]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch 'colnames' from 'tableName'. colnames = the columns to fetch from the table. sqlconstraint = sql constraint to apply to data (minus "WHERE"). distinctExpMJD = group by expMJD to get unique observations only (default True). groupBy = group by col 'groupBy' (will override group by expMJD). tableName = the opsim table to query.
def fetchMetricData(self, colnames, sqlconstraint, distinctExpMJD=True, groupBy='expMJD', tableName='Summary'): # To fetch data for a particular proposal only, add 'propID=[proposalID number]' as constraint, # and to fetch data for a particular filter only, add 'filter ="[filtername]"' as a constraint. if (groupBy is None) and (distinctExpMJD is False): warnings.warn('Doing no groupBy, data could contain repeat visits that satisfy multiple proposals') table = self.tables[tableName] if (groupBy is not None) and (groupBy != 'expMJD'): if distinctExpMJD: warnings.warn('Cannot group by more than one column. Using explicit groupBy col %s' %(groupBy)) metricdata = table.query_columns_Array(chunk_size = self.chunksize, constraint = sqlconstraint, colnames = colnames, groupByCol = groupBy) elif distinctExpMJD: metricdata = table.query_columns_Array(chunk_size = self.chunksize, constraint = sqlconstraint, colnames = colnames, groupByCol = self.mjdCol) else: metricdata = table.query_columns_Array(chunk_size = self.chunksize, constraint = sqlconstraint, colnames = colnames) return metricdata
[ "def getColumns(self, tableName):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n c = self.conn.execute('pragma table_info(%s)' % tableName)\n return c.fetchall()", "def fetchFieldsFromSummaryTable(self, sqlconstraint, raColName=None, decColName=None):\n # Fetch field info from the Output table, by selecting unique fieldID + ra/dec values.\n # This implicitly only selects fields which were actually observed by opsim.\n if raColName is None:\n raColName = self.raCol\n if decColName is None:\n decColName = self.decCol\n table = self.tables['Summary']\n fielddata = table.query_columns_Array(constraint=sqlconstraint,\n colnames=[self.fieldIdCol, raColName, decColName],\n groupByCol=self.fieldIdCol)\n return fielddata", "def getColumnDescriptors(self, tableName):\n pass", "def getColNames(self, dbType='rawDb', tableName=''):\n\n if len(tableName) == 0:\n colsAndTypes = self.getColNamesAndTypes(dbType=dbType)\n else:\n colsAndTypes = self.getColNamesAndTypes(\n dbType=dbType, tableName=tableName)\n if not colsAndTypes:\n return None\n cols = []\n for tup in colsAndTypes:\n cols.append(tup[0])\n return cols", "def get_table_columns(self, table_name):\n\n sql = \"\"\"select * from {} limit 1\"\"\".format(table_name)\n\n rows, meta = self.query(sql)\n # will fail if we get no rows\n return [str(r[0]) for r in meta]", "def retrieve(self, table, cols, col_rules):\n # todo: add string comp support\n cursor = self.conn.cursor()\n num_cols = len(col_rules)\n\n # from the table select all the columns to filter for\n sql_cmd = \"select \" + \", \".join([key for key in col_rules]) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n filter_sets = cursor.fetchall()\n\n # repeat every argument number of times it appears in the selection\n mult = [len(re.findall(\"{}\", col_rules[key])) for key in col_rules]\n\n def _repeat_vals(vals, repeats):\n rep_vals = []\n [[rep_vals.append(vals[i]) for _ in range(repeats[i])] for i in range(num_cols)]\n return rep_vals\n\n filter_sets = [_repeat_vals(set, mult) for set in filter_sets]\n\n # evaluate every row to get a boolean mask of examples\n rule_tmp = \"(\" + \") and (\".join([col_rules[key] for key in col_rules]) + \")\"\n sel_mask = [eval(rule_tmp.format(*val_set)) for val_set in filter_sets]\n\n # from the table get all the columns to retrieve\n sql_cmd = \"select \" + \" ,\".join(cols) + \" from \\\"\" + table + \"\\\"\"\n cursor.execute(sql_cmd)\n sel_sets = cursor.fetchall()\n\n # apply a boolean mask to take only entries that fit the selection rule\n sel_sets = list(compress(sel_sets, sel_mask))\n sel_vals = [list(x) for x in zip(*sel_sets)]\n return sel_vals", "def _colNames(self):\n self.mjdCol = 'expMJD'\n self.fieldIdCol = 'fieldID'\n self.raCol = 'fieldRA'\n self.decCol = 'fieldDec'\n self.propIdCol = 'propID'\n self.propConfCol = 'propConf'\n self.propNameCol = 'propName' #(propname == proptype)\n # For config parsing.\n self.versionCol = 'version'\n self.sessionDateCol = 'sessionDate'\n self.runCommentCol = 'runComment'", "def readColumnData(self, tableName, columnName, cursor):\n try:\n cursor.execute(\"Select \" + columnName + \" from \" + tableName)\n result = cursor.fetchall()\n cursor.execute(\"SHOW COLUMNS FROM \" + tableName + \" where Field in('%s')\" % columnName)\n statistics = cursor.fetchall()\n details = []\n info = []\n\n for i in result:\n info.append(i[0])\n\n logging.info(info)\n\n for j in statistics[0]:\n details.append(j)\n\n # conclusiveList = [details, content]\n df = DataFrame(details, columns=['Specifications'])\n logging.info(\"Successfully returned Data Frame.\")\n logging.info(\"readColumnData: {}\".format(df))\n\n return df\n except Error as e:\n logging.error(\"readColumnData: {}\".format(e))\n return None", "def getColNamesAndTypes(self, dbType='rawDb', tableName=''):\n if len(tableName) == 0:\n # caller didn't supply a table name, so get it from the\n # class init\n tableName = self._p['table']\n\n # Establish connection to database\n db = getDatabaseInfo(self._p[dbType])\n if db['type'] != 'postgres' and db['type'] != 'aircloak':\n print(f\"DB type '{db['type']}' must be 'postgres' or 'aircloak'\")\n return None\n connStr = str(\n f\"host={db['host']} port={db['port']} dbname={db['dbname']} user={db['user']} password={db['password']}\")\n conn = psycopg2.connect(connStr)\n cur = conn.cursor()\n # Query it for column names\n if db['type'] == 'postgres':\n sql = str(f\"\"\"select column_name, data_type \n from information_schema.columns where\n table_name='{tableName}'\"\"\")\n elif db['type'] == 'aircloak':\n sql = str(f\"show columns from {tableName}\")\n try:\n cur.execute(sql)\n except psycopg2.Error as e:\n print(f\"Error: getColNamesAndTypes() query: '{e}'\")\n self.cleanUp(cleanUpCache=False, doExit=True)\n ans = cur.fetchall()\n ret = []\n for row in ans:\n ret.append((row[0], row[1]))\n conn.close()\n return ret", "def query_table(self, table_name, columns_queried = [],predicate= '' ):\r\n columns = '\"' + '\",\"'.join(columns_queried) +'\"'\r\n if predicate == '':\r\n query = \"SELECT %s from %s ;\" % (columns, table_name,)\r\n result = self.engine.execute(query)\r\n else:\r\n query = \"SELECT %s from %s where %s;\" % (columns, table_name, predicate)\r\n result = self.engine.execute(query)\r\n\r\n result = [[row[ci] for ci in columns_queried] for row in result]\r\n\r\n\r\n return result", "def columnsFromSchema(self, tableName, soClass):\n\n fieldqry = \"\"\"\\\n SELECT rf.RDB$FIELD_NAME as field,\n t.RDB$TYPE_NAME as t,\n f.RDB$FIELD_LENGTH as flength,\n f.RDB$FIELD_SCALE as fscale,\n rf.RDB$NULL_FLAG as nullAllowed,\n coalesce(rf.RDB$DEFAULT_SOURCE, f.rdb$default_source) as thedefault,\n f.RDB$FIELD_SUB_TYPE as blobtype\n FROM RDB$RELATION_FIELDS rf\n INNER JOIN RDB$FIELDS f ON rf.RDB$FIELD_SOURCE = f.RDB$FIELD_NAME\n INNER JOIN RDB$TYPES t ON f.RDB$FIELD_TYPE = t.RDB$TYPE\n WHERE rf.RDB$RELATION_NAME = '%s'\n AND t.RDB$FIELD_NAME = 'RDB$FIELD_TYPE'\"\"\"\n\n colData = self.queryAll(fieldqry % tableName.upper())\n results = []\n for field, t, flength, fscale, nullAllowed, thedefault, blobType in colData:\n field = field.strip().lower()\n if thedefault:\n thedefault = thedefault.split(' ')[1]\n if thedefault.startswith(\"'\") and thedefault.endswith(\"'\"):\n thedefault = thedefault[1:-1]\n idName = str(soClass.sqlmeta.idName or 'id').upper()\n if field.upper() == idName:\n continue\n colClass, kw = self.guessClass(t, flength, fscale)\n kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field).strip()\n kw['dbName'] = field\n kw['notNone'] = not nullAllowed\n kw['default'] = thedefault\n results.append(colClass(**kw))\n return results", "def getColumns(self, colnames, filterby=None, allowempty=True):\n def evaluate(l):\n for i in l:\n if i == '' or i == None:\n return False\n return True \n \n coldata=[] \n for c in colnames:\n vals = self.getColumnData(columnName=c, filterby=filterby) \n coldata.append(vals) \n if allowempty == False: \n result = [i for i in zip(*coldata) if evaluate(i) == True] \n coldata = zip(*result) \n return coldata", "def selectAll(table):\n\tcursor.execute(\"Select * from %s\" %(table))\n\trows = cursor.fetchall()\n\tcolnames = [desc[0] for desc in cursor.description]\n\treturn [colnames,rows]", "def populate_scoped_cols(self, scoped_tbls):\n\n columns = []\n for tbl in scoped_tbls:\n if tbl.schema:\n # A fully qualified schema.relname reference\n schema = self.escape_name(tbl.schema)\n relname = self.escape_name(tbl.name)\n\n if tbl.is_function:\n query = render_template(\"/\".join([self.sql_path, 'functions.sql']),\n schema_name=schema,\n func_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n func = None\n if status:\n for row in res['rows']:\n func = FunctionMetadata(row['schema_name'], row['func_name'],\n row['arg_list'], row['return_type'],\n row['is_aggregate'], row['is_window'],\n row['is_set_returning'])\n if func:\n columns.extend(func.fieldnames())\n else:\n # We don't know if schema.relname is a table or view. Since\n # tables and views cannot share the same name, we can check\n # one at a time\n\n query = render_template(\"/\".join([self.sql_path, 'columns.sql']),\n object_name='table',\n schema_name=schema,\n rel_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n if status:\n if len(res['rows']) > 0:\n # Table exists, so don't bother checking for a view\n for record in res['rows']:\n columns.append(record['column_name'])\n else:\n query = render_template(\"/\".join([self.sql_path, 'columns.sql']),\n object_name='view',\n schema_name=schema,\n rel_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n if status:\n for record in res['rows']:\n columns.append(record['column_name'])\n else:\n # Schema not specified, so traverse the search path looking for\n # a table or view that matches. Note that in order to get proper\n # shadowing behavior, we need to check both views and tables for\n # each schema before checking the next schema\n for schema in self.search_path:\n relname = self.escape_name(tbl.name)\n\n if tbl.is_function:\n query = render_template(\"/\".join([self.sql_path, 'functions.sql']),\n schema_name=schema,\n func_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n func = None\n if status:\n for row in res['rows']:\n func = FunctionMetadata(row['schema_name'], row['func_name'],\n row['arg_list'], row['return_type'],\n row['is_aggregate'], row['is_window'],\n row['is_set_returning'])\n if func:\n columns.extend(func.fieldnames())\n else:\n query = render_template(\"/\".join([self.sql_path, 'columns.sql']),\n object_name='table',\n schema_name=schema,\n rel_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n if status:\n if len(res['rows']) > 0:\n # Table exists, so don't bother checking for a view\n for record in res['rows']:\n columns.append(record['column_name'])\n else:\n query = render_template(\"/\".join([self.sql_path, 'columns.sql']),\n object_name='view',\n schema_name=schema,\n rel_name=relname)\n\n if self.conn.connected():\n status, res = self.conn.execute_dict(query)\n if status:\n for record in res['rows']:\n columns.append(record['column_name'])\n\n return columns", "def what_columns(table):\n print [c.name for c in table.c]", "def get_cols_query(owner, table, condition=\"\"):\n query = '''SELECT column_name FROM all_tab_cols WHERE owner = '{}' AND table_name = '{}' '''.format(owner, table)\n if condition != \"\":\n query = query + \"AND {}\".format(condition)\n return query", "def _get_fields(cursor, table_name):\n try:\n cursor.execute(\"select * from {}\".format(table_name))\n except sqlite3.OperationalError:\n print(\"error: table {} does not exist in database\".format(table_name))\n return []\n names = [f[0] for f in cursor.description]\n return names", "def _colnames_from_description(self, context, cursor_description):\n\n dialect = context.dialect\n case_sensitive = dialect.case_sensitive\n translate_colname = context._translate_colname\n description_decoder = (\n dialect._description_decoder\n if dialect.description_encoding\n else None\n )\n normalize_name = (\n dialect.normalize_name if dialect.requires_name_normalize else None\n )\n untranslated = None\n\n self.keys = []\n\n for idx, rec in enumerate(cursor_description):\n colname = rec[0]\n coltype = rec[1]\n\n if description_decoder:\n colname = description_decoder(colname)\n\n if translate_colname:\n colname, untranslated = translate_colname(colname)\n\n if normalize_name:\n colname = normalize_name(colname)\n\n self.keys.append(colname)\n if not case_sensitive:\n colname = colname.lower()\n\n yield idx, colname, untranslated, coltype", "def get_column_names(self):\r\n return [column.key for column in self.table.columns]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch field information (fieldID/RA/Dec) from Output table.
def fetchFieldsFromSummaryTable(self, sqlconstraint, raColName=None, decColName=None): # Fetch field info from the Output table, by selecting unique fieldID + ra/dec values. # This implicitly only selects fields which were actually observed by opsim. if raColName is None: raColName = self.raCol if decColName is None: decColName = self.decCol table = self.tables['Summary'] fielddata = table.query_columns_Array(constraint=sqlconstraint, colnames=[self.fieldIdCol, raColName, decColName], groupByCol=self.fieldIdCol) return fielddata
[ "def _output_field_columns(self):\n return sql.SQL(', ').join(map(sql.Identifier, self._output_field_names))", "def readZTFfields(fielddef_file=\"/home/matteo/work/ZTF/Calibration/ZTF_Fields.txt\"):\n ftab_cols=[\n \"ID\", \"RA\", \"Dec\", \"Ebv\", \"Gal Long\", \n \"Gal Lat\", \"Ecl Long\", \"Ecl Lat\", \"Entry\"]\n fields=Table.read(fielddef_file, format='ascii', data_start=1, \n names=ftab_cols)\n primary=fields[fields['ID']<=879]\n secondary=fields[fields['ID']>=1001]\n return primary, secondary", "def read_output_fields(self, ut_file=None, field_list=None,\n sequence_list=None):\n if ut_file is None:\n ut_file = str(self.inp_template.with_suffix('.ut'))\n # verify FEA output presence\n if not self._check_fea_output_presence(Path(ut_file)):\n raise RuntimeError('No finite element analysis output found for'\n f' the class inp script {self.inp_template}.'\n ' Cannot load results.')\n # get output file\n SDZset._clean_comments(ut_file)\n # load FEA calc metadata\n self.load_FEA_metadata(ut_file)\n metadata_field_list = [*self.metadata['node']]\n metadata_field_list.extend(self.metadata['integ'])\n if field_list is None:\n field_list = metadata_field_list\n else:\n # check if required fields are in fea metadata\n if not set(field_list) <= set(metadata_field_list):\n raise ValueError(f'Inputed field list ({field_list}) is'\n f' not contained into FEA output field'\n f' list ({metadata_field_list})')\n # load nodal fields for each sequence and for each variable\n Nodal_field_sequence = []\n Integ_field_sequence = []\n for t in self.metadata['Sequence']:\n if sequence_list is not None:\n if t not in sequence_list:\n continue\n nodal_fields_dic = {}\n integ_fields_dic = {}\n for fieldname in field_list:\n if fieldname in self.metadata['node']:\n nodal_fields_dic[fieldname] = UR.ReadFieldFromUt(\n fileName=ut_file, fieldname=fieldname, time=t,\n atIntegrationPoints=False)\n elif fieldname in self.metadata['integ']:\n integ_fields_dic[fieldname] = UR.ReadFieldFromUt(\n fileName=ut_file, fieldname=fieldname, time=t,\n atIntegrationPoints=True)\n Nodal_field_sequence.append(nodal_fields_dic)\n Integ_field_sequence.append(integ_fields_dic)\n return Nodal_field_sequence, Integ_field_sequence", "def get_field_info(self):\n return self.world.field_info", "def getFields(self):\n raise RuntimeError('This function needs vetting')\n return self.hdr.getLookupLengthFields()", "def _makeColumnFromFieldInfo(ctx, colName, fi):\n\tif len(fi.userData)==1:\n\t\tres = svcs.OutputField.fromColumn(fi.userData[0])\n\telse: \n\t\tres = base.makeStruct(svcs.OutputField, name=colName)\n\tres.name = ctx.getName(colName)\n\tres.ucd = fi.ucd\n\tres.unit = fi.unit\n\tres.type = fi.type\n\n\t# XXX TODO: do something with stc's \"broken\" attribute\n\tres.stc = fi.stc\n\n\tif len(fi.userData)>1:\n\t\tres.description = (\"This field has traces of: %s\"%(\"; \".join([\n\t\t\tf.description for f in fi.userData if f.description])))\n\n\tif fi.tainted:\n\t\tres.description = (res.description+\" -- *TAINTED*: the value\"\n\t\t\t\" was operated on in a way that unit and ucd may be severely wrong\")\n\n\t# The xtype may be set by the node classes; this is used downstream\n\t# to transform to STC-S strings.\n\tif \"xtype\" in fi.properties:\n\t\tres.xtype = fi.properties[\"xtype\"]\n\t\tres.needMunging = True\n\t\n\t# dates and timestamps should be ISO format for TAP or consistency with it\n\tif res.type==\"date\" or res.type==\"timestamp\":\n\t\tres.xtype = \"adql:TIMESTAMP\"\n\t\n\t# integral types must have a null value set since we can't be\n\t# sure that a query yields defined results for all of them.\n\t# Tough luck if our artificial value is already taken by the table\n\t# (remedy: select a suitable null value in the column metadata)\n\tif (res.type in _artificialNULLs \n\t\t\tand (\n\t\t\t\tnot (res.values and res.values.nullLiteral)\n\t\t\t\tor fi.tainted)):\n\t\tnullLiteral = _artificialNULLs[res.type]\n\t\tif res.values:\n\t\t\tres.feedObject(\"values\", res.values.change(nullLiteral=nullLiteral))\n\t\telse:\n\t\t\tres.feedObject(\"values\", base.makeStruct(rscdef.Values, \n\t\t\t\tnullLiteral=nullLiteral))\n\n\tres.verbLevel = 1\n\tres.finishElement()\n\treturn res", "def get_fieldinfo(verbose=True):\n fieldinfo = collections.OrderedDict()\n\n fieldinfo['CDFS_1'] = {'name':'candels-cdfs-01', 'ra':53.062397 , 'dec':-27.80815506}\n fieldinfo['CDFS_2'] = {'name':'candels-cdfs-02', 'ra':53.06840134, 'dec':-27.82277679}\n fieldinfo['CDFS_3'] = {'name':'candels-cdfs-03', 'ra':53.07440948, 'dec':-27.83739662}\n fieldinfo['CDFS_4'] = {'name':'candels-cdfs-04', 'ra':53.08042145, 'dec':-27.85201454}\n fieldinfo['CDFS_5'] = {'name':'candels-cdfs-05', 'ra':53.08643341, 'dec':-27.86663437}\n fieldinfo['CDFS_6'] = {'name':'candels-cdfs-06', 'ra':53.07892227, 'dec':-27.80284119}\n fieldinfo['CDFS_7'] = {'name':'candels-cdfs-07', 'ra':53.08493423, 'dec':-27.81746101}\n fieldinfo['CDFS_8'] = {'name':'candels-cdfs-08', 'ra':53.09094238, 'dec':-27.83208084}\n fieldinfo['CDFS_9'] = {'name':'candels-cdfs-09', 'ra':53.09695435, 'dec':-27.84669876}\n fieldinfo['CDFS_10'] = {'name':'candels-cdfs-10', 'ra':53.10297012, 'dec':-27.86131859}\n fieldinfo['CDFS_11'] = {'name':'candels-cdfs-11', 'ra':53.09545135, 'dec':-27.79752731}\n fieldinfo['CDFS_12'] = {'name':'candels-cdfs-12', 'ra':53.1014595 , 'dec':-27.81214523}\n fieldinfo['CDFS_13'] = {'name':'candels-cdfs-13', 'ra':53.10747528, 'dec':-27.82676315}\n fieldinfo['CDFS_14'] = {'name':'candels-cdfs-14', 'ra':53.11348724, 'dec':-27.84138107}\n fieldinfo['CDFS_15'] = {'name':'candels-cdfs-15', 'ra':53.11950302, 'dec':-27.85599899}\n fieldinfo['CDFS_16'] = {'name':'candels-cdfs-16', 'ra':53.13603592, 'dec':-27.8506794 }\n fieldinfo['CDFS_17'] = {'name':'candels-cdfs-17', 'ra':53.15256882, 'dec':-27.84535599}\n fieldinfo['CDFS_18'] = {'name':'candels-cdfs-18', 'ra':53.1690979 , 'dec':-27.84003258}\n fieldinfo['CDFS_19'] = {'name':'candels-cdfs-19', 'ra':53.18562698, 'dec':-27.83470535}\n fieldinfo['CDFS_20'] = {'name':'candels-cdfs-20', 'ra':53.20215225, 'dec':-27.82937813}\n fieldinfo['CDFS_21'] = {'name':'candels-cdfs-21', 'ra':53.21867752, 'dec':-27.82404709}\n fieldinfo['CDFS_22'] = {'name':'candels-cdfs-22', 'ra':53.13002014, 'dec':-27.83606148}\n fieldinfo['CDFS_23'] = {'name':'candels-cdfs-23', 'ra':53.14654922, 'dec':-27.83073997}\n fieldinfo['CDFS_24'] = {'name':'candels-cdfs-24', 'ra':53.16307449, 'dec':-27.82541656}\n\n return fieldinfo", "def get_field_info(ar, column_names=None):\n def getit():\n\n if ar.request is None:\n columns = None\n else:\n columns = [\n str(x) for x in ar.request.REQUEST.getlist(\n constants.URL_PARAM_COLUMNS)]\n if columns:\n all_widths = ar.request.REQUEST.getlist(\n constants.URL_PARAM_WIDTHS)\n hiddens = [\n (x == 'true') for x in ar.request.REQUEST.getlist(\n constants.URL_PARAM_HIDDENS)]\n fields = []\n widths = []\n ah = ar.actor.get_handle()\n for i, cn in enumerate(columns):\n col = None\n for e in ah.list_layout.main.columns:\n if e.name == cn:\n col = e\n break\n if col is None:\n raise Exception(\"No column named %r in %s\" %\n (cn, ar.ah.list_layout.main.columns))\n if not hiddens[i]:\n fields.append(col)\n widths.append(int(all_widths[i]))\n else:\n if column_names:\n from lino.core import layouts\n ll = layouts.ColumnsLayout(column_names, datasource=ar.actor)\n lh = ll.get_layout_handle(settings.SITE.kernel.default_ui)\n columns = lh.main.columns\n columns = [e for e in columns if not e.hidden]\n else:\n ah = ar.actor.get_request_handle(ar)\n columns = ah.list_layout.main.columns\n\n # render them so that babelfields in hidden_languages get hidden:\n for e in columns:\n e.value = e.ext_options()\n #\n columns = [e for e in columns if not e.value.get('hidden', False)]\n\n columns = [e for e in columns if not e.hidden]\n\n widths = [\"%d\" % (col.width or col.preferred_width)\n for col in columns]\n #~ 20130415 widths = [\"%d%%\" % (col.width or col.preferred_width) for col in columns]\n #~ fields = [col.field._lino_atomizer for col in columns]\n fields = columns\n\n headers = [column_header(col) for col in fields]\n\n oh = ar.actor.override_column_headers(ar)\n if oh:\n for i, e in enumerate(columns):\n header = oh.get(e.name, None)\n if header is not None:\n headers[i] = header\n #~ print 20120507, oh, headers\n\n return fields, headers, widths\n \n u = ar.get_user()\n if u is None:\n return getit()\n else:\n return jsgen.with_user_profile(u.profile, getit)", "def get_table_fields(self, table):\n sql = 'PRAGMA TABLE_INFO(%s)' % table\n self.c.execute(sql)\n fields = [tup[1] for tup in self.c.fetchall()]\n return fields", "def getFields(sorted=True):", "def __getField(self, record, field):\n\t\t(offset, length) = (self.allFields[field].ffOffset, self.allFields[field].maxlength)\n\t\treturn record[offset:offset+length].strip()", "def fetchFieldsFromFieldTable(self, propID=None, degreesToRadians=True):\n # Note that you can't select any other sql constraints (such as filter).\n # This will select fields which were requested by a particular proposal or proposals,\n # even if they didn't get any observations.\n tableName = 'Field'\n if propID is not None:\n query = 'select f.%s, f.%s, f.%s from %s as f' %(self.fieldIdCol, self.raCol, self.decCol,\n self.dbTables['Field'][0])\n query += ', %s as p where (p.Field_%s = f.%s) ' %(self.dbTables['Proposal_Field'][0],\n self.fieldIdCol, self.fieldIdCol)\n if hasattr(propID, '__iter__'): # list of propIDs\n query += ' and ('\n for pID in propID:\n query += '(p.Proposal_%s = %d) or ' %(self.propIdCol, int(pID))\n # Remove the trailing 'or' and add a closing parenthesis.\n query = query[:-3]\n query += ')'\n else: # single proposal ID.\n query += ' and (p.Proposal_%s = %d) ' %(self.propIdCol, int(propID))\n query += ' group by f.%s' %(self.fieldIdCol)\n fielddata = self.queryDatabase(tableName, query)\n if len(fielddata) == 0:\n fielddata = np.zeros(0, dtype=list(zip([self.fieldIdCol, self.raCol, self.decCol],\n ['int', 'float', 'float'])))\n else:\n table = self.tables[tableName]\n fielddata = table.query_columns_Array(colnames=[self.fieldIdCol, self.raCol, self.decCol],\n groupByCol = self.fieldIdCol)\n if degreesToRadians:\n fielddata[self.raCol] = fielddata[self.raCol] * np.pi / 180.\n fielddata[self.decCol] = fielddata[self.decCol] * np.pi / 180.\n return fielddata", "def get_fields(self):\n for field_index in xrange(self.num_fields):\n yield dex_field(self, field_index)", "def getfield(self,pkt,s):\n s,id,wtype = self.get_field_header(s)\n if wtype == 2: # descriptor\n s,l = self.get_varint(s)\n self.set_format(l)\n return s[l:],self.m2i(pkt,s[:l])\n if wtype == 0: # varint\n olen = len(s)\n s,v = self.get_varint(s)\n self.set_format(olen - len(s))\n return s,self.m2i(pkt,v)\n if wtype == 1:\n self.set_format(8)\n return s[8:],self.m2i(pkt,s[:8])\n if wtype == 5:\n self.set_format(4)\n return s[4:],self.m2i(pkt,s[:4])\n return s,''", "def _resolve_output_field(self):\n # This guess is mostly a bad idea, but there is quite a lot of code\n # (especially 3rd party Func subclasses) that depend on it, we'd need a\n # deprecation path to fix it.\n sources_iter = (\n source for source in self.get_source_fields() if source is not None\n )\n for output_field in sources_iter:\n for source in sources_iter:\n if not isinstance(output_field, source.__class__):\n raise FieldError(\n \"Expression contains mixed types: %s, %s. You must \"\n \"set output_field.\"\n % (\n output_field.__class__.__name__,\n source.__class__.__name__,\n )\n )\n return output_field", "def getRawFields(sorted=True):", "def getOutputById(self, id_):\n\t\tquery = 'SELECT * from outputs where id = %s'\n\t\tself.executeQuery(query, (id_,))\n\t\trawOutput = self.fetchOne()\n\t\toutput = Output.Output()\n\t\toutput.setInputFromDb(rawOutput)\n\t\treturn output", "def get_record_fields(self, variable, field):\n # Empty result\n result = []\n\n for env_file in self._env_entryid_files:\n # Wait and resolve in self rather than in env_file\n logger.debug(\n \"(get_record_field) Searching in {}\".format(env_file.__class__.__name__)\n )\n if field == \"varid\":\n roots = env_file.scan_children(\"entry\")\n else:\n roots = env_file.get_nodes_by_id(variable)\n\n for root in roots:\n if root is not None:\n if field == \"raw\":\n result.append(env_file.get_raw_record(root))\n elif field == \"desc\":\n result.append(env_file.get_description(root))\n elif field == \"varid\":\n result.append(env_file.get(root, \"id\"))\n elif field == \"group\":\n result.extend(env_file.get_groups(root))\n elif field == \"valid_values\":\n # pylint: disable=protected-access\n vv = env_file._get_valid_values(root)\n if vv:\n result.extend(vv)\n elif field == \"file\":\n result.append(env_file.filename)\n\n if not result:\n for env_file in self._env_generic_files:\n roots = env_file.scan_children(variable)\n for root in roots:\n if root is not None:\n if field == \"raw\":\n result.append(env_file.get_raw_record(root))\n elif field == \"group\":\n result.extend(env_file.get_groups(root))\n elif field == \"file\":\n result.append(env_file.filename)\n\n return list(set(result))", "def get_fields(self):\n if not self.fields:\n with DatabaseConnection(\n self.conn_dic, self.table, geometry_column=self.geom\n ) as db:\n self.fields = db.fields\n if self.source_srid is None:\n self.source_srid = db.source_srid\n\n return self.fields" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch field information (fieldID/RA/Dec) from Field (+Proposal_Field) tables. propID = the proposal ID (default None), if selecting particular proposal can be a list degreesToRadians = RA/Dec values are in degrees in the Field table (so convert to radians).
def fetchFieldsFromFieldTable(self, propID=None, degreesToRadians=True): # Note that you can't select any other sql constraints (such as filter). # This will select fields which were requested by a particular proposal or proposals, # even if they didn't get any observations. tableName = 'Field' if propID is not None: query = 'select f.%s, f.%s, f.%s from %s as f' %(self.fieldIdCol, self.raCol, self.decCol, self.dbTables['Field'][0]) query += ', %s as p where (p.Field_%s = f.%s) ' %(self.dbTables['Proposal_Field'][0], self.fieldIdCol, self.fieldIdCol) if hasattr(propID, '__iter__'): # list of propIDs query += ' and (' for pID in propID: query += '(p.Proposal_%s = %d) or ' %(self.propIdCol, int(pID)) # Remove the trailing 'or' and add a closing parenthesis. query = query[:-3] query += ')' else: # single proposal ID. query += ' and (p.Proposal_%s = %d) ' %(self.propIdCol, int(propID)) query += ' group by f.%s' %(self.fieldIdCol) fielddata = self.queryDatabase(tableName, query) if len(fielddata) == 0: fielddata = np.zeros(0, dtype=list(zip([self.fieldIdCol, self.raCol, self.decCol], ['int', 'float', 'float']))) else: table = self.tables[tableName] fielddata = table.query_columns_Array(colnames=[self.fieldIdCol, self.raCol, self.decCol], groupByCol = self.fieldIdCol) if degreesToRadians: fielddata[self.raCol] = fielddata[self.raCol] * np.pi / 180. fielddata[self.decCol] = fielddata[self.decCol] * np.pi / 180. return fielddata
[ "def get_fields(self):\n config = self.config['locations']['arcGIS']\n url = f\"{config['url']}{config['fields']['endpoint']}\"\n params = config['fields']['params']\n field_coordinates = self.get_converted_coordinates(\n url, params, self.proj_3857\n )\n\n field_locations = []\n ignored_fields = []\n\n for feature in field_coordinates['features']:\n attrs = feature['attributes']\n # Only fetch the location has a valid Prop_ID and Expose is 'Y'\n if (\n utils.is_valid_field(attrs['Prop_ID'])\n and attrs['Expose'] == 'Y'\n ):\n field_location = FieldLocation(feature)\n field_locations.append(field_location)\n else:\n ignored_fields.append(attrs['OBJECTID'])\n\n if ignored_fields:\n logger.warning((\n \"These fields OBJECTID's were ignored because they don't have\"\n f\"a valid Prop_ID or shouldn't be exposed: {ignored_fields}\\n\"\n ))\n\n return field_locations", "def get_fields(self):\n if not self.fields:\n with DatabaseConnection(\n self.conn_dic, self.table, geometry_column=self.geom\n ) as db:\n self.fields = db.fields\n if self.source_srid is None:\n self.source_srid = db.source_srid\n\n return self.fields", "def get_field(self, _id):\n for field in self.field:\n if field.id == _id:\n return field\n else:\n raise RuntimeError('Field not found for id: ' + _id)", "def get_field_by_id_for_dataset_by_id(self, datasetid: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetid\": datasetid,\n \"fieldid\": fieldid,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetid}/fields/${fieldid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Field)", "def get_field_info(ar, column_names=None):\n def getit():\n\n if ar.request is None:\n columns = None\n else:\n columns = [\n str(x) for x in ar.request.REQUEST.getlist(\n constants.URL_PARAM_COLUMNS)]\n if columns:\n all_widths = ar.request.REQUEST.getlist(\n constants.URL_PARAM_WIDTHS)\n hiddens = [\n (x == 'true') for x in ar.request.REQUEST.getlist(\n constants.URL_PARAM_HIDDENS)]\n fields = []\n widths = []\n ah = ar.actor.get_handle()\n for i, cn in enumerate(columns):\n col = None\n for e in ah.list_layout.main.columns:\n if e.name == cn:\n col = e\n break\n if col is None:\n raise Exception(\"No column named %r in %s\" %\n (cn, ar.ah.list_layout.main.columns))\n if not hiddens[i]:\n fields.append(col)\n widths.append(int(all_widths[i]))\n else:\n if column_names:\n from lino.core import layouts\n ll = layouts.ColumnsLayout(column_names, datasource=ar.actor)\n lh = ll.get_layout_handle(settings.SITE.kernel.default_ui)\n columns = lh.main.columns\n columns = [e for e in columns if not e.hidden]\n else:\n ah = ar.actor.get_request_handle(ar)\n columns = ah.list_layout.main.columns\n\n # render them so that babelfields in hidden_languages get hidden:\n for e in columns:\n e.value = e.ext_options()\n #\n columns = [e for e in columns if not e.value.get('hidden', False)]\n\n columns = [e for e in columns if not e.hidden]\n\n widths = [\"%d\" % (col.width or col.preferred_width)\n for col in columns]\n #~ 20130415 widths = [\"%d%%\" % (col.width or col.preferred_width) for col in columns]\n #~ fields = [col.field._lino_atomizer for col in columns]\n fields = columns\n\n headers = [column_header(col) for col in fields]\n\n oh = ar.actor.override_column_headers(ar)\n if oh:\n for i, e in enumerate(columns):\n header = oh.get(e.name, None)\n if header is not None:\n headers[i] = header\n #~ print 20120507, oh, headers\n\n return fields, headers, widths\n \n u = ar.get_user()\n if u is None:\n return getit()\n else:\n return jsgen.with_user_profile(u.profile, getit)", "def get_field_by_id_for_dataset(self, datasetresourcename: str, fieldid: str, query_params: Dict[str, object] = None) -> Field:\n if query_params is None:\n query_params = {}\n\n path_params = {\n \"datasetresourcename\": datasetresourcename,\n \"fieldid\": fieldid,\n }\n\n path = Template(\"/catalog/v2alpha2/datasets/${datasetresourcename}/fields/${fieldid}\").substitute(path_params)\n url = self.base_client.build_url(path)\n response = self.base_client.get(url, params=query_params)\n return handle_response(response, Field)", "def readZTFfields(fielddef_file=\"/home/matteo/work/ZTF/Calibration/ZTF_Fields.txt\"):\n ftab_cols=[\n \"ID\", \"RA\", \"Dec\", \"Ebv\", \"Gal Long\", \n \"Gal Lat\", \"Ecl Long\", \"Ecl Lat\", \"Entry\"]\n fields=Table.read(fielddef_file, format='ascii', data_start=1, \n names=ftab_cols)\n primary=fields[fields['ID']<=879]\n secondary=fields[fields['ID']>=1001]\n return primary, secondary", "def fetchPropInfo(self):\n propIDs = {}\n # Add WFD and DD tags by default to propTags as we expect these every time. (avoids key errors).\n propTags = {'WFD':[], 'DD':[]}\n # If do not have full database available:\n if 'Proposal' not in self.tables:\n propData = self.tables['Summary'].query_columns_Array(colnames=[self.propIdCol])\n for propid in propData[self.propIdCol]:\n propIDs[int(propid)] = propid\n else:\n table = self.tables['Proposal']\n # Query for all propIDs.\n propData = table.query_columns_Array(colnames=[self.propIdCol, self.propConfCol,\n self.propNameCol], constraint='')\n for propid, propname in zip(propData[self.propIdCol], propData[self.propConfCol]):\n # Strip '.conf', 'Prop', and path info.\n propIDs[int(propid)] = re.sub('Prop','', re.sub('.conf','', re.sub('.*/', '', propname)))\n # Find the 'ScienceType' from the config table, to indicate DD/WFD/Rolling, etc.\n table = self.tables['Config']\n sciencetypes = table.query_columns_Array(colnames=['paramValue', 'nonPropID'],\n constraint=\"paramName like 'ScienceType'\")\n if len(sciencetypes) == 0:\n # Then this was an older opsim run without 'ScienceType' tags,\n # so fall back to trying to guess what proposals are WFD or DD.\n for propid, propname in propIDs.items():\n if 'universal' in propname.lower():\n propTags['WFD'].append(propid)\n if 'deep' in propname.lower():\n propTags['DD'].append(propid)\n else:\n # Newer opsim output with 'ScienceType' fields in conf files.\n for sc in sciencetypes:\n # ScienceType tag can be multiple values, separated by a ','\n tags = [x.strip(' ') for x in sc['paramValue'].split(',')]\n for sciencetype in tags:\n if sciencetype in propTags:\n propTags[sciencetype].append(int(sc['nonPropID']))\n else:\n propTags[sciencetype] = [int(sc['nonPropID']),]\n return propIDs, propTags", "def readField(self, *args):\r\n return _osgDB.FieldReader_readField(self, *args)", "def getZTFfield(fid, fields):\n found=[o for o in fields if o.id==fid]\n if len(found)!=1:\n print \"big problems.....\"\n return\n return found[0]", "def _load_fields(self, field_filename):\n\n df = pd.read_csv(field_filename,\n names=['field_id','ra','dec','ebv','l','b',\n 'ecliptic_lon', 'ecliptic_lat', 'number'],\n sep='\\s+',usecols=['field_id','ra','dec', 'l','b', \n 'ecliptic_lon', 'ecliptic_lat'],index_col='field_id',\n skiprows=1)\n\n\n # drop fields below dec of -32 degrees for speed\n # (grid_id = 0 has a row at -31.5)\n df = df[df['dec'] >= -32]\n\n # label the grid ids\n grid_id_boundaries = \\\n {0: {'min':1,'max':999},\n 1: {'min':1001,'max':1999},\n 2: {'min':2001,'max':2999},\n 3: {'min':3001,'max':3999}}\n\n # intialize with a bad int value\n df['grid_id'] = 99\n\n for grid_id, bounds in list(grid_id_boundaries.items()):\n w = (df.index >= bounds['min']) & \\\n (df.index <= bounds['max'])\n df.loc[w,'grid_id'] = grid_id\n\n self.fields = df\n self.field_coords = self._field_coords()", "def extractFields(self, dxlFileContent):\n \n extractedFields = []\n fields = dxlFileContent.getElementsByTagName(\"field\")\n \n for field in fields:\n dico = {}\n settings = {}\n dico['type'] = 'PlominoField'\n dico['id'], dico['title'] = self.getIdTitleAttributes(field)\n\n # Field types ----\n # set the fieldType from the dict in dxlConfig.py \n if field.getAttribute('type') in FIELD_TYPES:\n dico['FieldType'] = FIELD_TYPES[field.getAttribute('type')]\n else:\n dico['FieldType'] = 'TEXT'\n \n # import the field settings ----\n # - Text field\n if dico['FieldType'] == 'TEXT':\n # widget\n if field.getAttribute(\"multiline\"):\n settings['widget'] = 'TEXTAREA'\n else:\n settings['widget'] = 'TEXT'\n \n # - Number field\n if dico['FieldType'] == 'NUMBER':\n settings['type'] = 'FLOAT' # to avoid loosing information from dxl file \n \n \n # - Selection field\n if dico['FieldType'] == 'SELECTION':\n # widget\n if field.getElementsByTagName(\"keywords\")[0].getAttribute(\"ui\") in FIELD_TYPES_ATTR:\n settings['widget'] = FIELD_TYPES_ATTR[field.getElementsByTagName(\"keywords\")[0].getAttribute('ui')]\n else:\n settings['widget'] = 'SELECT'\n \n # list of items\n if field.getElementsByTagName(\"textlist\")[0].getElementsByTagName(\"text\") is not None:\n selectionList = []\n for entry in field.getElementsByTagName(\"textlist\")[0].getElementsByTagName(\"text\"):\n selectionList.append(entry.firstChild.nodeValue)\n \n settings['selectionlist'] = selectionList\n \n else:\n settings['selectionlist'] = ['Selection list not set']\n # TODO: tester lorsque les paramètres n'existent pas\n \n # - Name field\n if dico['FieldType'] == 'NAME':\n # type\n if field.getAttribute(\"allowmultivalues\"):\n settings['type'] = 'MULTI'\n # separator\n # if field.getAttribute(\"listinputseparators\") in FIELD_TYPES_ATTR:\n # settings['separator'] = FIELD_TYPES_ATTR[field.getAttribute(\"listinputseparators\")]\n # else:\n # settings['separator'] = ''\n \n else:\n settings['type'] = 'SINGLE'\n\n dico['settings'] = settings\n\n # Field mode ----\n if field.getAttribute('kind') in FIELD_MODES:\n dico['FieldMode'] = FIELD_MODES[field.getAttribute('kind')]\n else: \n dico['FieldMode'] = 'EDITABLE'\n \n # formula and ValidationFormula ----\n dico['ValidationFormula'] = ''\n dico['formula'] = ''\n if dico['FieldMode'] != 'EDITABLE':\n for code in self.extractCode(field):\n if code['event'] == 'inputvalidation':\n dico['ValidationFormula'] = '# ' + code['content']\n else:\n dico['formula'] = '# ' + code['content']\n \n# '\\n#------------ \\n# code from lotus domino' + \\\n# '\\n# Event: ' + code['event'] + \\\n# '\\n# code type: ' + code['type'] + \\\n# '\\n#------------ \\n# ' + str(code['content']).replace('\\n', '\\n# ') \n\n extractedFields.append(dico)\n\n return extractedFields", "def get_field(self, field_name):\n for field in self.fields:\n if field.name == field_name:\n return field\n\n return None", "def load_fieldinfo(self, preprocess = True, columns = None, isindict = None, show_progress=False):\n if show_progress:\n show_progress='Loading Field Info'\n if preprocess and os.path.exists(os.path.join(self.path2database, 'fieldinfo')):\n return pd.read_hdf(os.path.join(self.path2database, 'fieldinfo', 'fieldnames.hdf'))\n else:\n return self.parse_fields()", "def get_field_info(self):\n return self.world.field_info", "def project_field(shape, shape_data, projection, efield):\n if projection == \"random\":\n return np.array(efield)\n elif projection == \"parallel\":\n if shape == \"sphere\":\n return np.array(efield)\n else:\n return np.array(shape_data)\n elif projection == \"perpendicular\":\n if shape == \"sphere\":\n return np.array(efield)\n else:\n proj_field = []\n data = np.array(shape_data)\n for field in efield:\n proj = field - (np.dot(data, field) * data)\n size = np.linalg.norm(proj_field)\n if size > 0.0001:\n proj_field.append(proj)\n # end if\n # end for\n return np.array(proj_field)\n else:\n print(\"Error in project_field, projection unkown: \", projection, file=sys.stderr)\n exit(1)\n return", "def types_using_field(coll, field_id, property_uri):\n type_ids = set()\n type_uris = set()\n group_ids = set()\n # Look at field definition\n f = coll_field(coll, field_id)\n add_to_set(f.get(ANNAL.CURIE.field_entity_type, \"\"), type_uris)\n # Look at groups that reference field\n for g in coll_groups(coll):\n if field_in_field_list(g[ANNAL.CURIE.group_fields], field_id, property_uri):\n add_to_set(g.get_id(), group_ids)\n add_to_set(extract_entity_id(g.get(ANNAL.CURIE.record_type, \"\")), type_uris)\n # Look at views that reference field or groups\n for v in coll_views(coll):\n if ( field_in_field_list(v[ANNAL.CURIE.view_fields], field_id, property_uri) or\n group_in_field_list(v[ANNAL.CURIE.view_fields], coll, group_ids) ):\n add_to_set(extract_entity_id(v.get(ANNAL.CURIE.record_type, \"\")), type_uris)\n # Look at lists that reference field or groups\n for l in coll_lists(coll):\n if ( field_in_field_list(l[ANNAL.CURIE.list_fields], field_id, property_uri) or\n group_in_field_list(l[ANNAL.CURIE.list_fields], coll, group_ids) ):\n add_to_set(extract_entity_id(l.get(ANNAL.CURIE.record_type, \"\")), type_uris)\n add_to_set(extract_entity_id(l.get(ANNAL.CURIE.default_type, \"\")), type_uris)\n # Collect type ids\n for t in coll_types(coll):\n type_uri = t.get(ANNAL.CURIE.uri, \"\")\n supertype_uris = set( s[ANNAL.CURIE.supertype_uri] for s in t.get(ANNAL.CURIE.supertype_uris,[]) )\n if (type_uri in type_uris) or (supertype_uris & type_uris):\n add_to_set(t.get_id(), type_ids)\n return type_ids", "def getFieldDecimal(self, n_field):\n assert 0, u'Empty method'", "def getFieldData(self) -> \"SoFieldData const *\":\n return _coin.SoSelectOne_getFieldData(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch the proposal IDs as well as their (short) proposal names and science type tags from the full opsim database. Returns dictionary of propID / propname, and dictionary of propTag / propID. If not using a full database, will return dict of propIDs with empty propnames + empty propTag dict.
def fetchPropInfo(self): propIDs = {} # Add WFD and DD tags by default to propTags as we expect these every time. (avoids key errors). propTags = {'WFD':[], 'DD':[]} # If do not have full database available: if 'Proposal' not in self.tables: propData = self.tables['Summary'].query_columns_Array(colnames=[self.propIdCol]) for propid in propData[self.propIdCol]: propIDs[int(propid)] = propid else: table = self.tables['Proposal'] # Query for all propIDs. propData = table.query_columns_Array(colnames=[self.propIdCol, self.propConfCol, self.propNameCol], constraint='') for propid, propname in zip(propData[self.propIdCol], propData[self.propConfCol]): # Strip '.conf', 'Prop', and path info. propIDs[int(propid)] = re.sub('Prop','', re.sub('.conf','', re.sub('.*/', '', propname))) # Find the 'ScienceType' from the config table, to indicate DD/WFD/Rolling, etc. table = self.tables['Config'] sciencetypes = table.query_columns_Array(colnames=['paramValue', 'nonPropID'], constraint="paramName like 'ScienceType'") if len(sciencetypes) == 0: # Then this was an older opsim run without 'ScienceType' tags, # so fall back to trying to guess what proposals are WFD or DD. for propid, propname in propIDs.items(): if 'universal' in propname.lower(): propTags['WFD'].append(propid) if 'deep' in propname.lower(): propTags['DD'].append(propid) else: # Newer opsim output with 'ScienceType' fields in conf files. for sc in sciencetypes: # ScienceType tag can be multiple values, separated by a ',' tags = [x.strip(' ') for x in sc['paramValue'].split(',')] for sciencetype in tags: if sciencetype in propTags: propTags[sciencetype].append(int(sc['nonPropID'])) else: propTags[sciencetype] = [int(sc['nonPropID']),] return propIDs, propTags
[ "def testOpsimDbPropID(self):\n propids, propTags = self.oo.fetchPropInfo()\n self.assertTrue(len(list(propids.keys())) > 0)\n self.assertTrue(len(propTags['WFD']) > 0)\n self.assertTrue(len(propTags['DD']) > 0)\n for w in propTags['WFD']:\n self.assertTrue(w in propids)\n for d in propTags['DD']:\n self.assertTrue(d in propids)", "def __fetchProvenance(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath)\n pD = provU.fetch()\n return pD[self.__provKeyName] if self.__provKeyName in pD else {}\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def db_get_galleryprops(galleryname):\n \n # Use function in lidpix_db which returns row?\n \n return Gallery_object", "def get_proposals():\n\n url = f\"{BLUZELLE_PRIVATE_TESTNET_URL}:{BLUZELLE_API_PORT}/cosmos/gov/v1beta1/proposals\"\n result = requests.get(url)\n if result.status_code != 200:\n returnReqError(url, result)\n return None\n\n proposals = result.json()[\"proposals\"]\n\n proposal_list = []\n for proposal in proposals:\n # Total deposited amount\n amount = int(float(proposal[\"total_deposit\"][0][\"amount\"]) / BLZ_UBNT_RATIO)\n\n # Format status\n status = \" \".join([t.capitalize() for t in proposal[\"status\"].split(\"_\")[2:]])\n\n # Format submit time\n submit_time = datetime.datetime.strptime(\n proposal[\"submit_time\"][:26], \"%Y-%m-%dT%H:%M:%S.%f\"\n )\n formatted_submit_time = submit_time.strftime(\"%d %b %Y, %#I:%M:%S%p UTC\")\n\n # Format voting start time\n voting_start_time = datetime.datetime.strptime(\n proposal[\"voting_start_time\"][:26], \"%Y-%m-%dT%H:%M:%S.%f\"\n )\n formatted_voting_start_time = voting_start_time.strftime(\n \"%d %b %Y, %#I:%M:%S%p UTC\"\n )\n\n proposal_list.append(\n {\n \"id\": proposal[\"proposal_id\"],\n \"title\": proposal[\"content\"][\"title\"],\n \"status\": status,\n \"submit_time\": formatted_submit_time,\n \"voting_start_time\": formatted_voting_start_time,\n \"total_deposit\": f\"{amount} {BLZ_SYMBOL}\",\n }\n )\n\n # Reverse in order to get latest proposal first\n proposal_list.reverse()\n\n return proposal_list", "def get_perspectives_dictionaries(res_lexical_entries):\n\n perspective_ids = (\n\n set(\n lexical_entry.dbObject.parent_id\n for lexical_entry in res_lexical_entries))\n\n if len(perspective_ids) > 2:\n perspective_ids = ids_to_id_query(perspective_ids)\n\n perspective_query = (\n\n DBSession\n\n .query(\n dbPerspective)\n\n .filter(\n\n tuple_(\n dbPerspective.client_id,\n dbPerspective.object_id)\n\n .in_(\n perspective_ids)))\n\n log.debug(\n '\\n perspective_query:\\n ' +\n str(perspective_query.statement.compile(compile_kwargs = {\"literal_binds\": True})))\n\n perspective_list = (\n perspective_query.all())\n\n res_perspectives = [\n graphene_obj(perspective, Perspective)\n for perspective in perspective_list]\n\n dictionary_ids = (\n\n set(\n perspective.dbObject.parent_id\n for perspective in res_perspectives))\n\n if len(dictionary_ids) > 2:\n dictionary_ids = ids_to_id_query(dictionary_ids)\n\n dictionary_query = (\n\n DBSession\n\n .query(\n dbDictionary)\n\n .filter(\n\n tuple_(\n dbDictionary.client_id,\n dbDictionary.object_id)\n\n .in_(\n dictionary_ids)))\n\n log.debug(\n '\\n dictionary_query:\\n ' +\n str(dictionary_query.statement.compile(compile_kwargs = {\"literal_binds\": True})))\n\n dictionary_list = (\n dictionary_query.all())\n\n res_dictionaries = [\n graphene_obj(dictionary, Dictionary)\n for dictionary in dictionary_list]\n\n return res_perspectives, res_dictionaries", "def proteinDl(combinedId):\n print(\"Downloading secondary metabolite proteins\")\n\n proteins = bio.dbFetch(\"\"\"\n SELECT torg.name, torg.org_id, proteins.prot_seqkey, sp.sm_short, proteins.prot_seq FROM (SELECT * FROM organism WHERE name IN ('%s')) torg\n JOIN smurf_papa AS sp ON torg.org_id = sp.org_id AND sp.sm_short != 'none'\n JOIN proteins ON sp.org_id = proteins.org_id AND sp.sm_protein_id = proteins.prot_seqkey;\n \"\"\" % \"','\".join(orgs) )\n\n proteins = [(org, org_id, protein_id, sm_short, bio.cleanProtSeq(seq.decode(\"UTF-8\"))) for org, org_id, protein_id, sm_short, seq in proteins]\n\n return(proteins)", "async def GetEvolutions(self):\n from_list = []\n into_list = []\n evolution_list = self.flat_evolution_list(\n await PokemonFetch().get_pokemon_evolution_chain(self.EvolutionChainUrl),\n new_l = [])\n\n for n, i in enumerate(evolution_list):\n if str(self.ID) in i:\n if n-1!=-1:\n from_list = evolution_list[n-1]\n if n+1!=len(evolution_list):\n into_list = evolution_list[n+1]\n\n full_list = await PokemonFetch().get_pokemon_id_list(from_list + into_list)\n\n return {\"from\":[i for i in full_list if str(i.ID) in from_list],\n \"into\":[i for i in full_list if str(i.ID) in into_list]}", "def load_prochi_maps(self,projpref):\n query = {}\n cursor = self.prochi_map.find(query)\n\n for doc in cursor:\n if 'MProchi' in doc:\n if doc['Prochi'].startswith(projpref):\n self.mprochiToProchi[doc['MProchi']] = doc['Prochi']\n if 'Anochi' in doc:\n #print doc['MProchi'], doc['Anochi']\n self.mprochiToAnochi[doc['MProchi']] = doc['Anochi']\n if 'plateId' in doc:\n self.plateIdToProchi[doc['plateId']] = doc['Prochi']", "def uniprotAPICall(protein_name):\n # API call to UniRef DB\n base_url = \"http://www.uniprot.org/uniprot/\"\n extension = \".xml\"\n my_response = requests.get(base_url + protein_name + extension)\n \n # For successful API call, response code will be 200 (OK)\n if not my_response.ok:\n print \"UniProt node not found: \" + str(protein_name) \n return\n\n # get root of the XML response\n root = ET.fromstring(my_response.content)\n rep_member = root.find('{http://uniprot.org/uniprot}entry')\n\n # set up dict to put in info\n member_dict = {}\n\n # Add any properties that have type - id pairings\n for prop in rep_member.iter():\n if 'type' in prop.attrib and 'id' in prop.attrib:\n member_dict[prop.attrib['type'].replace(\" \", \"_\")] = prop.attrib['id']\n # else:\n # member_dict[prop.attrib['type'].replace(\n # \" \", \"_\")] = prop.attrib['id']\n \n # Get protein accession. Ex: Q8KM74\n member_dict['UniProtKB_accession'] = rep_member.find('{http://uniprot.org/uniprot}accession').text\n member_dict['id'] = member_dict['UniProtKB_accession']\n\n # Get specific protein accession. Ex: Q8KM74_METTR\n member_dict['UniProtKB_ID'] = rep_member.find('{http://uniprot.org/uniprot}name').text\n\n # Get source organism\n member_dict['source_organism'] = rep_member.find('{http://uniprot.org/uniprot}organism').find('{http://uniprot.org/uniprot}name').text\n\n # Get protein existance: http://www.uniprot.org/help/protein_existence\n member_dict['protein_existence'] = rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib['type'] if 'type' in rep_member.find('{http://uniprot.org/uniprot}proteinExistence').attrib else None\n \n # Get protein length\n member_dict['length'] = int(rep_member.find('{http://uniprot.org/uniprot}sequence').attrib['length']) if 'length' in rep_member.find('{http://uniprot.org/uniprot}sequence').attrib else None\n\n #print member_dict\n #name = UniProtKB_accession, UniProtKB_ID (has the _1343), UniProtKB_accession, id = UniProtKB_ID, length, protein_name, source_organism, NCBI_taxonomy, UniParc_ID, Pfam,Supfam\n\n return ClustNode(member_dict)", "def get_properties(\n self,\n qids: list,\n pids: list,\n pids_to_label: Union[list, str] = None,\n replace_values_with_labels: bool = False,\n page_size: int = 50,\n ) -> pd.DataFrame:\n res_generator = self.ge.result_generator(\n qids, page_limit=page_size, timeout=self.timeout\n )\n\n if pids_to_label is not None:\n if isinstance(pids_to_label, list):\n pids_all = list(set(pids + pids_to_label))\n elif pids_to_label == \"all\":\n pids_all = list(set(pids))\n pids_to_label = pids_all\n else:\n pids_all = list(set(pids))\n\n docs = flatten_list_of_lists(\n [\n simplify_wbgetentities_result(\n doc, lang=\"en\", properties=pids_all, use_redirected_qid=False\n )\n for doc in res_generator\n ]\n )\n doc_df = pd.json_normalize(docs)\n\n # add columns with empty string values for any that are missing\n proposed_cols = self._pids_to_df_cols(pids_all)\n actual_cols = [col for col in doc_df.columns if col.startswith(\"claims\")]\n extra_cols = list(set(proposed_cols) - set(actual_cols))\n\n for c in extra_cols:\n doc_df[c] = \"\"\n\n self.doc_df = doc_df\n\n if pids_to_label is not None:\n self.get_labels_for_properties(\n pids_to_label, replace_qids=replace_values_with_labels\n )", "def pageprops(self):\n params = {\n 'action': 'query',\n 'titles': self.title,\n 'prop': 'pageprops',\n }\n return tuple(self.wiki.request(**params)['query']['pages']\n .values())[0]['pageprops']", "def primer_dict( db, plates ):\n dd = defaultdict(list)\n for well, primer in MASTER_MIX_TEMPLATE.items():\n dd[primer].append(well)\n for plate in plates:\n primers, _ = plate_to_custom_primers( db, plate )\n primer_counts = [ (primer.name, primers[pp])\n for pp in sorted(primers)\n for primer in (pp.fwd_primer, pp.rev_primer) ]\n for primer_name, lines in rows_for_custom_primers( primer_counts ):\n for wells in lines:\n dd[primer_name].extend( wells )\n return dict( (primer_name, sorted(l)) for primer_name, l in dd.items() )", "def pageprops(self):\n params = {\n 'action': 'query',\n 'titles': self.title,\n 'prop': 'pageprops',\n }\n return list(self.wiki.request(**params)['query']['pages']\n .values())[0]['pageprops']", "def getProtAssoc(databaseName, path, idProt=\"Hepcidin\"):\n\t\n\t\n\t\n\tconnect, cursor = connection(path+\"/\"+databaseName)\n\t#cursor = connect.cursor()\n\t\n\t#PRINT SOME INFORMATIONS\n\tprint(\"SQL: SELECT DISTINCT LOWER(TargetLabel) FROM \"+bcolors.HEADER+\"tname\"+bcolors.ENDC+\" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+bcolors.HEADER+idProt+bcolors.ENDC+\"%\\\") AND LOWER(TargetEntityType)=LOWER(\\\"p\\\") ORDER BY Period\")\n\tprint(\"ProtID querry: \"+bcolors.HEADER+idProt+bcolors.ENDC)\n\t\n\t#DO THE MATHS\n\tcursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\") #get all tables names\n\tfor ttuples in cursor.fetchall():\n\t\ttname = ttuples[0]\n\t\tprint(\"Searching assoc in \" +bcolors.HEADER+tname+bcolors.ENDC+ \" ...\")\n\n\t\tsqlstr = \"SELECT DISTINCT LOWER(TargetLabel) FROM \" +tname+ \" WHERE LOWER(SourceLabel) LIKE LOWER(\\\"%\"+idProt+\"%\\\") AND LOWER(TargetEntityType)=LOWER(\\\"p\\\") ORDER BY Period\"\n\t\tcursor.execute(sqlstr)\n\n\t\t#FILE WRITING\n\t\twith open(path+\"/requestResult/\"+idProt+\"_protAssoc_\"+tname+\".txt\", \"w\") as f:\n\t\t\tfor elements in cursor.fetchall():\n\t\t\t\tf.write(elements[0]+\"\\n\")\n\n\tconnect.commit()\n\tcloseConnection(cursor, connect)", "def get_job_info(self):\n try:\n rows = self.db_manager.get_conn().execute((\n \"select profile_job.id as pid, profile_job.workload_id, \" +\n \"profile_job.work_instance_id, profile_job.execution_time, \" +\n \"profile_job.exit_status, profile_job.status, \" +\n \"workload.id as wid, \" +\n \"workload.working_dir, workload.client_id \" +\n \"from profile_job, workload where profile_job.id = %s and \" +\n \"workload.id = profile_job.workload_id\") % self.job_id)\n for row in rows:\n info = {'id' : row['pid'], \n 'workload_id' : row['workload_id'],\n 'work_instance_id' : row['work_instance_id'],\n 'execution_time' : row['execution_time'],\n 'exit_status' : row['exit_status'],\n 'status' : row['status'],\n 'working_dir' : row['working_dir'],\n 'client_id' : row['client_id'],\n 'executable' : self.job_desc['executable'],\n 'params' : self.params,\n 'inst_type' : self.inst_type,\n 'workload_name' : self.workload_name}\n return info\n except psycopg2.Error:\n self.logger.exception(\"Error getting inst types from database.\")\n self.logger.debug(\"The set of instances from the database:\")", "def fetch_uniprot_gene_map(self, taxon_id):\n protein_dict = dict()\n params = urllib.parse.urlencode(\n {'query': self._build_biomart_gene_query(str(taxon_id))})\n conn = http.client.HTTPConnection('www.ensembl.org')\n conn.request(\"GET\", '/biomart/martservice?' + params)\n response = conn.getresponse()\n for line in response:\n line = line.decode('utf-8').rstrip()\n row = line.split('\\t')\n if len(row) < 7:\n continue\n (ensembl_gene_id, external_gene_name,\n description, gene_biotype, entrezgene,\n peptide_id, uniprot_swissprot) = row[0:7]\n protein_dict[str(uniprot_swissprot)] = ensembl_gene_id\n conn.close()\n return protein_dict", "def pi_group_browser():\n\n c = get_cursor()\n\n c.execute(\"\"\"select * from pi_design_group\"\"\")\n\n rows = c.fetchall()\n # now grab the associated product designs\n for row in rows:\n c.execute(\"\"\"select pd.*, p.name as product_name\n from (product_design as pd, product as p)\n where pd.pi_design_group_id = %s\n and p.product_id = pd.product_id\n order by product_design_id\"\"\",\n (row['pi_design_group_id'],))\n\n row['product_designs'] = c.fetchall()\n for product_design in row['product_designs']:\n c.execute(\"\"\"select b480x430_afile, b96x96_afile\n from product_design_detail_image\n where product_design_id = %s\n order by seq\"\"\",\n (product_design['product_design_id'],))\n product_design['detail_images'] = c.fetchall()\n\n pi_groups = {\n 'pi_design_groups': rows\n }\n\n c.execute(\"\"\"select * from pi_product_group\"\"\")\n\n rows = c.fetchall()\n # now grab the associated product designs\n for row in rows:\n c.execute(\"\"\"select pd.*, p.name as product_name\n from (product_design as pd, product as p)\n where pd.pi_product_group_id = %s\n and p.product_id = pd.product_id\n order by product_design_id\"\"\",\n (row['pi_product_group_id'],))\n\n row['product_designs'] = c.fetchall()\n for product_design in row['product_designs']:\n c.execute(\"\"\"select b480x430_afile, b96x96_afile\n from product_design_detail_image\n where product_design_id = %s\n order by seq\"\"\",\n (product_design['product_design_id'],))\n product_design['detail_images'] = c.fetchall()\n\n pi_groups['pi_product_groups'] = rows\n\n return pi_groups", "def get_pmids_for_dois(doi_records,verbose=False):\n print('Grabbing information from pubmed')\n print('This will take a while because we have to throttle our request rate')\n doi_pmid_cvt={}\n doi_pmids=[]\n bad_cvt=[]\n for id in doi_records.keys():\n\n time.sleep(0.5) # slow down requests so that we don't get locked out\n # first try searching using the DOI\n handle = Entrez.esearch(db=\"pubmed\", retmax=10, term=id)\n record = Entrez.read(handle)\n handle.close()\n # if DOI search fails, then try searching using title\n if len(record['IdList'])!=1:\n if verbose:\n print('%d matches for doi, trying title'%len(record['IdList']))\n handle = Entrez.esearch(db=\"pubmed\", retmax=10, term=doi_records[id]['message']['title'][0])\n record = Entrez.read(handle)\n\n if len(record['IdList'])==1:\n doi_pmid_cvt[id]=record['IdList'][0]\n doi_pmids.append(record['IdList'][0])\n if verbose:\n print(record['IdList'])\n else:\n print('no/bad PMID for %s (%d records)'%(id,len(record['IdList'])))\n if verbose:\n print(record['IdList'])\n bad_cvt.append(id)\n\n if verbose:\n print('problem searching for %s'%id)\n\n pickle.dump(doi_pmid_cvt,open( 'doi_pmid_cvt.pkl','wb'))\n pickle.dump(doi_pmids,open( 'doi_pmids.pkl','wb'))\n return doi_pmids,doi_pmid_cvt,bad_cvt", "def db_pop_personnel(db):\n\n # get data\n phones = readNumberFile()\n names = readNameFile()\n middle_is = string.ascii_uppercase # string\n street_names = ['Main Street', 'Dublin Avenue', 'First Street', 'Fourteenth Avenue',\n 'Washington Circle', 'Jefferson Way', 'Johnson Boulevard', 'Adams Trail']\n zip_codes = ['23421', '23456', '23590', '23481', '23406']\n cities = ['Huntsville', 'Madison', 'Athens', 'Hampton Cove', 'Owens Cross Roads']\n positions = ['Driver', 'Office Worker', 'Shipment Manager', 'Maintenance Worker']\n categories = [0, 1, 2, 3]\n\n # get n entries (n<100)\n n = 20\n phones = phones[:n]\n names = names[:n]\n\n for name, phone in zip(names, phones):\n # get data for personnel\n first_name = name[0].rstrip('\\n')\n last_name = name[1].rstrip('\\n')\n middle_i = middle_is[randint(0, 25)]\n ph = phone.rstrip('\\n')\n address = str(randint(10, 5000)) + ' ' + sample(street_names, 1)[0] + \\\n ' ' + sample(cities, 1)[0] + ', AL ' + sample(street_names, 1)[0]\n start_date = fake.date_between(start_date='-30y', end_date='today')\n rate = float(randint(15000, 150000))\n category = categories[randint(0, 3)]\n\n if category == 0:\n position = \"Office Worker\"\n elif category == 1:\n position = \"Shipment Manager\"\n elif category == 2:\n position = \"Maintenance\"\n else:\n position = \"Driver\"\n # populate Personnel\n p = Personnel()\n p.first_name = first_name\n p.last_name = last_name\n p.middle_i = middle_i\n p.address = address\n p.phone = ph\n p.rate = rate\n p.start_time = start_date\n p.position = position\n p.password_plain = 'password'\n p.category = category\n\n db.session.add(p)\n db.session.commit()\n\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the run length for a particular opsim run (years). runLengthParam = the 'paramName' in the config table identifying the run length (default nRun).
def fetchRunLength(self, runLengthParam='nRun'): if 'Config' not in self.tables: print('Cannot access Config table to retrieve runLength; using default 10 years') runLength = 10.0 else: table = self.tables['Config'] runLength = table.query_columns_Array(colnames=['paramValue'], constraint=" paramName = '%s'"%runLengthParam) runLength = float(runLength['paramValue'][0]) # Years return runLength
[ "def encode_as_run_length_string(self):\n self._create_lookup()\n column_map = self.__get_column_map()\n row_number, column_number = self.__get_scanning_dimension(column_map)\n suffix = '_%i' % (row_number)\n run_length_list = self.__convert_1D_to_run_length_list(row_number,\n column_number)\n run_length_string = self.__convert_rl_list_to_string(run_length_list)\n run_length_string += suffix\n return run_length_string", "def testOpsimDbRunLength(self):\n nrun = self.oo.fetchRunLength()\n self.assertEqual(nrun, 0.0794)", "def findLengthFromParam(*args, **kwargs):\n \n pass", "def __len__(self):\n length = int(np.ceil(len(self.samples) / float(self.batch_size)))\n return length", "def get_segment_length(self):\n # extract segment length for calculating minimun drop later\n reaches = self.reaches[[\"geometry\", \"iseg\", \"rchlen\"]].copy()\n seglen = reaches.groupby(\"iseg\")[\"rchlen\"].sum()\n self.segment_data.loc[seglen.index, \"seglen\"] = seglen\n return seglen", "def valid_length(self, length):\r\n length = math.ceil(length * self.resample)\r\n for _ in range(self.depth):\r\n length = math.ceil((length - self.kernel_size) / self.stride) + 1\r\n length = max(length, 1)\r\n for _ in range(self.depth):\r\n length = (length - 1) * self.stride + self.kernel_size\r\n length = int(math.ceil(length / self.resample))\r\n return int(length)", "def getNIterations(self):\n return self.n_iterations", "def valid_length(self, length):\n if self.resample:\n length *= 2\n\n for _ in range(self.depth):\n length = math.ceil((length - self.kernel_size) / self.stride) + 1\n length = max(1, length)\n\n for idx in range(self.depth):\n length = (length - 1) * self.stride + self.kernel_size\n\n if self.resample:\n length = math.ceil(length / 2)\n return int(length)", "def sequence_run_length(self, sequence_run_length):\n self._sequence_run_length = sequence_run_length", "def max_run_length(x: np.ndarray, val: int):\n if x.size == 0:\n return 0\n else:\n y = np.array(x[1:] != x[:-1])\n i = np.append(np.where(y), len(x) - 1)\n run_lengths = np.diff(np.append(-1, i))\n run_length_values = x[i]\n return max([rl for rl, v in zip(run_lengths, run_length_values) if v == val], default=0)", "def get_length_test_period(self):\n test_period = self.data.index[-1] - self.data.index[0]\n for filter in self.kept:\n if 'filter_time' == filter['name']:\n test_period = filter['index'][-1] - filter['index'][0]\n self.length_test_period = test_period.ceil('D').days", "def getRunCount(self):\r\n runCount = stackless.getruncount() + self.timeKeeper.getSleeperCount()\r\n return (runCount - 1) # subtract the timeKeeper tasklet\r", "def get_spectro_length(self, waveform_length: int):\n return waveform_length//self.nstep+1", "def ST_Length(geos):\n return arctern.ST_Length(geos)", "def getLength(self):\n return HopperLowLevel.getSectionLength(self.__internal_section_addr__)", "def length(self):\r\n arg_str = p2e._base._util._convert_args_to_string(\"get.object.length\", \r\n self._object._eco_id)\r\n val = p2e._app.Request(arg_str)\r\n return p2e._base._util._convert_str_to_type(val, float)", "def getLength(self):\n return HopperLowLevel.getSegmentLength(self.__internal_segment_addr__)", "def range_length(self):\n if self._range_length is None:\n self._range_length = int(np.prod([len(x) for x in self.space_map.values()]))\n return self._range_length", "def testcases_length(self):\n total = self.S(len(self.nodes), self.number_of_partitions)\n total *= len(self.target_nodes)\n total **= self.number_of_rounds\n return total" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the latitude, longitude, and height of the telescope used by the config file.
def fetchLatLonHeight(self): if 'Config' not in self.tables: print('Cannot access Config table to retrieve site parameters; using sims.utils.Site instead.') site = Site(name='LSST') lat = site.latitude_rad lon = site.longitude_rad height = site.elev else: table = self.tables['Config'] lat = table.query_columns_Array(colnames=['paramValue'], constraint="paramName = 'latitude'") lat = float(lat['paramValue'][0]) lon = table.query_columns_Array(colnames=['paramValue'], constraint="paramName = 'longitude'") lon = float(lon['paramValue'][0]) height = table.query_columns_Array(colnames=['paramValue'], constraint="paramName = 'height'") height = float(height['paramValue'][0]) return lat, lon, height
[ "def DefaultConfig(self) -> str:\n cfg = \\\n\"\"\"[Location]\nlat = 15.3\nlon = -120.2\nalt = 50\nname = Arayat\ntz = Asia/Manila\n\n[Track]\nsats = AO-92,SO-50,ISS,FO-29,FOX-1B,IO-86,AO-7,AO-27,AO-73,XW-2B,XW-2F,LILACSAT-2\n\n[Pass]\nminalt = 20.0\n\n[Tle]\nfiles = 'https://www.celestrak.com/NORAD/elements/amateur.txt,'\n\"\"\"\n return cfg", "def get_longitude(filepath):\n image_file = open(filepath, 'rb')\n tags = exifread.process_file(image_file)\n return tags['GPS GPSLongitude']", "def get_sysdig_config():\n if os.path.exists(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\"))):\n config_parser = ConfigParser.SafeConfigParser()\n config_parser.read(os.path.abspath(os.path.join(__file__, os.pardir, \"config.ini\")))\n try:\n sysdig_api_key = config_parser.get('sysdig', 'api_key')\n hostname = config_parser.get('sysdig', 'hostname')\n all_metrics = config_parser.get('sysdig', 'all_metrics').split(',')\n sysdig_http_proxy = config_parser.get('sysdig', 'sysdig_http_proxy')\n sysdig_https_proxy = config_parser.get('sysdig', 'sysdig_https_proxy')\n sysdig_metric_chunk_size= config_parser.get('sysdig', 'metric_chunk_size')\n sysdig_host_chunk_size=config_parser.get('sysdig', 'host_chunk_size')\n except ConfigParser.NoOptionError:\n logger.error(\n \"Agent not correctly configured. Check config file.\")\n sys.exit(1)\n\n if len(sysdig_api_key) == 0:\n logger.warning(\n \"Agent not correctly configured(API KEY). Check config file.\")\n exit()\n if len(hostname) == 0:\n logger.warning(\n \"Agent not correctly configured. Check config file.\")\n exit()\n\n sysdig_config = {\n \"sysdig_api_key\": sysdig_api_key,\n \"hostname\": hostname,\n \"all_metrics\": all_metrics,\n \"httpProxy\": sysdig_http_proxy,\n \"httpsProxy\": sysdig_https_proxy,\n \"host_chunk_size\":sysdig_host_chunk_size,\n \"metric_chunk_size\":sysdig_metric_chunk_size\n }\n else:\n logger.warning(\"No config file found. Exiting...\")\n exit()\n\n return sysdig_config", "def get_config():\n current_dir = os.getcwd()\n config_path = find_config_path(current_dir)\n if not config_path:\n print('No .pjconfig file found')\n raise\n try:\n cf = open(config_path, 'r')\n config_text = cf.read()\n except:\n print('Unable to read the .pjconfig file')\n raise\n finally:\n cf.close()\n\n try:\n config_data = parse_json(config_text)\n except:\n print('Your .pjconfig file is not valid JSON. Please fix it and try again.')\n raise\n base_dir = os.path.dirname(config_path)\n\n return [config_data, base_dir]", "def getConfig():\n return Cuebot.Config", "def get_leland_location(self):\n req = requests.get(self.gps_source_url)\n return {\"lat\": req.json()['latitude'],\n \"lon\": req.json()['longitude']}", "def getLocationID():\n import ConfigParser\n config = ConfigParser.SafeConfigParser()\n config.read(CONFIG_FILE)\n try:\n locationID = config.getint('Headlines', 'locationID')\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n # Default is Rascal Micro home (Boston, USA)\n locationID = 4930956\n except Exception, e:\n print '## getLocationID ## Unexpected error: %s' % str(e)\n locationID = 4930956\n return locationID", "def get_config(self):\n return self.ag_config", "def getConfig():\n if sys.platform == \"Windows\":\n cred_file = \"C:\\\\Windows\\\\carbonblack\\\\credentials.cbc\"\n else:\n home = expanduser(\"~\")\n cred_file = f\"{home}/.carbonblack/credentials.cbc\"\n\n with open(cred_file) as file:\n datafile = file.readlines()\n for line in datafile:\n if \"url\" in line:\n address = line.split(\"=\")[1]\n elif \"token\" in line:\n auth_token = line.split(\"=\")[1]\n elif \"org\" in line:\n org = line.split(\"=\")[1]\n auth_token = str(auth_token).strip(\"\\n\")\n headers = {\n \"X-Auth-Token\": auth_token,\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n return (address, headers, org)", "def getlatlon():\n pass", "def GetBridgeInfoFromConf():\n bridges = {}\n with open('/usr/local/bluedon/www/cache/waf_bridge.conf', 'r') as f:\n for line in f.readlines():\n bridgeInfo = line.strip().split() # br0 vEth0,vEth1 num\n if len(bridgeInfo) == 3:\n bridges[bridgeInfo[0]] = [bridgeInfo[1]]\n return bridges", "def get_drone_location(self):\r\n return self._drone.x, self._drone.y, self._drone.z", "def readConfig(self):\n ##Open ConfigFile\n self.config=ConfigObj(infile='sims/tcpwater/config', unrepr=True)", "def get_config(self) -> NodeManagerConfig:", "def get_config(self):\n parser = etree.XMLParser(remove_blank_text=True)\n source = open(self.module.cfg_path)\n config = etree.parse(source, parser).getroot()\n source.close()\n return config", "def vpp_show_lisp_rloc_config(node):\n\n vat = VatExecutor()\n vat.execute_script_json_out('lisp/show_lisp_rloc_config.vat', node)\n return JsonParser().parse_data(vat.get_script_stdout())", "def get_current_secrets(file_location):\n config = configparser.ConfigParser()\n config.read(file_location)\n if 'THERMAL_CAMERA' not in config:\n config['THERMAL_CAMERA'] = {}\n if 'KEY' not in config:\n config['KEY'] = {}\n config['KEY']['key'] = Fernet.generate_key().decode('utf-8')\n return config", "def load_settings():\n # Load settings.ini\n config = configparser.ConfigParser()\n config.read(SETTINGS_PATH)\n config.sections()\n server_address = config['SERVER']['Address']\n server_port = config['SERVER']['Port']\n window_height = int(config['CLIENT']['Window height'])\n window_width = int(config['CLIENT']['Window width'])\n\n return server_address, server_port, window_height, window_width", "def weather_config() -> str:\r\n\r\n #Accessing Weather Api Key\r\n config_file = config_handle()\r\n api_key = config_file[\"api_keys\"][\"weather_key\"]\r\n return api_key" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check whether the seeing column is 'seeing' or 'finSeeing' (v2.x simulator vs v3.0 simulator). Returns the name of the seeing column.
def fetchSeeingColName(self): # Really this is just a bit of a hack to see whether we should be using seeing or finseeing. # With time, this should probably just go away. table = self.tables['Summary'] try: table.query_columns_Array(colnames=['seeing',], numLimit=1) seeingcol = 'seeing' except ValueError: try: table.query_columns_Array(colnames=['finSeeing',], numLimit=1) seeingcol = 'finSeeing' except ValueError: raise ValueError('Cannot find appropriate column name for seeing.') print('Using %s for seeing column name.' %(seeingcol)) return seeingcol
[ "def testOpsimDbSeeingColName(self):\n seeingcol = self.oo.fetchSeeingColName()\n self.assertTrue(seeingcol, 'finSeeing')", "def bhbe_col(heroes):\n heroes = clean_heroes(heroes)\n cond = heroes[(heroes['Eye color'].str.contains('blue',\n case=False,\n regex=True)) &\n (heroes['Hair color'].str.contains('blond',\n case=False,\n regex=True))]\n return heroes.isin(cond)['name']", "def get_col_by_name(self, col_name):\n return self.get_column_info(match_func=lambda c, col_name=col_name: c.name == col_name or c.metadata[\"real_name\"]==col_name, first=True, ignore=False)", "def hasColumn (self, columnName):\n \n return self.columnMap.has_key (columnName)", "def has_column(self, table_name, name):\n return self.execute(\n \"\"\"\n SELECT EXISTS(\n SELECT 1\n FROM information_schema.columns\n WHERE table_name=%s AND column_name=%s\n )\"\"\",\n [table_name, name],\n )[0][0]", "def _is_internal(self, colname):\n return colname in {_F.LINENO, _F.ROWIDX}", "def is_valid_col(self, col_name):\n return col_name in self.col_names", "def isHumanLocation(self):\n\t\tfact=situation_assessment_msgs.msg.Fact()\n\t\tfact.model=str(self.robot_name_)\n\t\tfact.subject=str(self.human_name_)+\"_torso\"\n\t\tfact.predicate=['isAt']\n\n\t\tres=self.queryDatabase(fact)\n\t\tif (len(res)>0):\n\t\t\t# rospy.loginfo(\"ObservationsCollector handover location is %s\",res[0])\n\t\t\treturn str(res[0]==self.handover_location_).lower()\n\t\telse:\n\t\t\t# rospy.loginfo(\"ObservationsCollector no response for isAt\")\n\t\t\treturn 'false'", "def find_or_create_col(rat_obj, usage, name, dtype):\n ncols = rat_obj.GetColumnCount()\n for col in range(ncols):\n if rat_obj.GetUsageOfCol(col) == usage:\n return col, False\n\n # got here so can't exist\n rat_obj.CreateColumn(name, dtype, usage)\n # new one will be last col\n return ncols, True", "def get_angular_difference_column_name(self):\n if hasattr(self, \"angular_difference_col_name\"):\n return self.angular_difference_col_name\n else:\n return ANGULAR_DIFFERENCE_COL_NAME", "def check_designer_action_column_names(draft):\n ret = []\n for grid in DesignerGrid.objects.filter(draft=draft):\n if len(DesignerColumnGrid.objects.filter(draft=draft, level=grid.level, \\\n column=grid.column)) == 0:\n message = \"in %s column %s row %s needs a column name.\" % (grid.level, \\\n grid.column, \\\n grid.row)\n ret.append(Error(message=message, action=grid.action))\n return ret", "def is_hidden(self, row, column):\n\n return self.board[row][column].cellStatus\n pass", "def supports_gradebook_column_lookup(self):\n return # boolean", "def isWinningCol(board):\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col] and board[0][col] != blank:\n return board[0][col]\n return -1", "def frame_check(df, col, col_type=str):\n\n if not type(df) == pd.core.frame.DataFrame:\n raise TypeError('Data must be passed as a DataFrame.')\n\n if not type(col) == str:\n raise TypeError('Column name must be passed as a string.')\n \n if not col in df.columns:\n raise KeyError('Column not found in DataFrame. Please check column name.')", "def check_column(board, col):\n symbol = board[0][col]\n for row in range(1, SIZE):\n if board[row][col] != symbol:\n return None\n return symbol # Will only get here is all symbols in column match", "def _not_in_col(self, col, number):\n for i in range(self.size):\n if self.grid[i][col] == number:\n return False\n return True", "def severity_column_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity_column_name\")", "def get_column_number(self, col_name):\n for col_in in range(1, self.input_file['worksheet'].max_column + 1):\n if col_name in self.input_file['worksheet'].cell(column=col_in, row=1).value:\n return col_in\n return -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns opsim run name (machine name + session ID) from Session table.
def fetchOpsimRunName(self): if 'Session' not in self.tables: print('Could not access Session table to find this information.') runName = 'opsim' else: table = self.tables['Session'] res = table.query_columns_Array(colnames=['sessionID', 'sessionHost']) runName = str(res['sessionHost'][0]) + '_' + str(res['sessionID'][0]) return runName
[ "def get_next_session_name_stura():\n config = settings.VOTING_SESSIONS_CONFIG\n return get_next_session_name(config['weekday'])", "def get_session_key(self):\n return self.model['session_key']", "def redis_session_key(self):\n return RedisKeys.session_info.format(session_id=self.session_id)", "def generate_session_name():\n invoker = sts_client.get_caller_identity()\n user_match = re.fullmatch(r\"arn:aws:iam::[0-9]+:user/(.*)\", invoker['Arn'])\n if user_match:\n return str(invoker['Account']) + \"-\" + user_match.group(1)\n role_match = re.fullmatch(r\"arn:aws:sts::[0-9]+:assumed-role/.*/(.*)\", invoker['Arn'])\n if role_match:\n return role_match.group(1)\n return str(invoker['Account'])", "def get_loris_session_id(self):\n\n # check if there are any visit label in BIDS structure, if not,\n # will use the default visit label set in the config module\n visit_label = self.bids_ses_id if self.bids_ses_id else self.default_vl\n\n session = Session(\n self.db, self.verbose, self.cand_id, visit_label,\n self.center_id, self.project_id, self.cohort_id\n )\n loris_vl_info = session.get_session_info_from_loris()\n\n if not loris_vl_info:\n message = \"ERROR: visit label \" + visit_label + \"does not exist in \" + \\\n \"the session table for candidate \" + self.cand_id + \\\n \"\\nPlease make sure the visit label is created in the \" + \\\n \"database or run bids_import.py with the -s option -s if \" + \\\n \"you wish that the insertion pipeline creates the visit \" + \\\n \"label in the session table.\"\n print(message)\n exit(lib.exitcode.SELECT_FAILURE)\n\n return loris_vl_info['ID']", "def get_loris_session_id(self):\n\n # check if there are any visit label in BIDS structure, if not,\n # will use the default visit label set in the config module\n visit_label = self.bids_ses_id if self.bids_ses_id else self.default_vl\n\n session = Session(\n self.db, self.verbose, self.cand_id, visit_label,\n self.center_id, self.project_id, self.cohort_id\n )\n loris_vl_info = session.get_session_info_from_loris()\n\n if not loris_vl_info:\n message = \"ERROR: visit label \" + visit_label + \" does not exist in \" + \\\n \"the session table for candidate \" + str(self.cand_id) + \\\n \"\\nPlease make sure the visit label is created in the \" + \\\n \"database or run bids_import.py with the -s option -s if \" + \\\n \"you wish that the insertion pipeline creates the visit \" + \\\n \"label in the session table.\"\n print(message)\n exit(lib.exitcode.SELECT_FAILURE)\n\n return loris_vl_info['ID']", "def get_SessionId(self):\n query = QtSql.QSqlQuery()\n query.exec(\"SELECT sessionID FROM logs\");\n if(query.last()):\n return(query.value(0)+1)\n else:\n return(1)", "def get_session_id(cls, message: Message) -> str:\n avp = next((x for x in message.avps if x[0] == \"Session-Id\"), None)\n if avp is not None:\n return avp[2]", "def project_name(self, session):\n # FIXME: This is not adequate, see comments elsewhere about it.\n session = int(session) # To allow a string, e.g. '01' for 1\n return self.course.projects[session]", "def session_from_sessid(self, sessid):\r\n return self.sessions.get(sessid)", "def _get_session_record_id(session_token: str) -> str:\n return MEMCACHED_PREFIX + \"sessions.\" + session_token", "def session_cookie_name(self):\n r=Loader.capi.cppcms_capi_session_get_session_cookie_name(self.d)\n self.check()\n return r.decode()", "def getSessionId(self) -> \"SbName const &\":\n return _coin.ScXMLStateMachine_getSessionId(self)", "def get_runame_at_same_node_with_active_clusterstate(wo_omu_name,sp_omu_name):\n\n command=\"fshascli -v |grep ClusterState|grep RecoveryUnit| awk '{print $2}' \"\n out = connections.execute_mml_without_check(command)\n\n ru1 = out.split('\\n')[1].strip()\n ru2 = out.split('\\n')[2].strip()\n\n command='fshascli -s %s'%(ru1)\n ret1 = connections.execute_mml_without_check(command)\n\n command='fshascli -s %s'%(ru2)\n ret2 = connections.execute_mml_without_check(command)\n\n if ret1.count('role(ACTIVE)') == 1:\n match = re.search(r\"/(.*)/(.*)\", ru1, re.I)\n if match is None:\n exceptions.raise_ILError(\"ILKeywordSyntaxError\", \"%s is not unit\"%ru1)\n else:\n if match.group(1) in wo_omu_name:\n return wo_omu_name\n elif match.group(1) in sp_omu_name:\n return sp_omu_name\n\n if ret2.count('role(ACTIVE)') == 1:\n match = re.search(r\"/(.*)/(.*)\", ru2, re.I)\n if match is None:\n exceptions.raise_ILError(\"ILKeywordSyntaxError\", \"%s is not unit\"%ru2)\n else:\n if match.group(1) in wo_omu_name:\n return wo_omu_name\n elif match.group(1) in sp_omu_name:\n return sp_omu_name", "def testOpsimDbSimName(self):\n simname = self.oo.fetchOpsimRunName()\n self.assertTrue(isinstance(simname, str))\n self.assertEqual(simname, 'opsimblitz1_1133')", "def get_session_key(self):\n pass", "def run_id(self):\n raise NotImplementedError('Each experiment must generate a run_id that is unique across runs')", "def get_windows_from_session(sess_name):\n cmd = (CMD_LIST_WINDOWS % sess_name).split(config.CMD_SEP)\n s = util.exec_cmd(cmd)\n return s.split('\\n')", "def get_session_token(workspace, viewer_host, viewer_port):\n\n try:\n session_file = os.path.join(workspace, '.codechecker.session.json')\n with open(session_file, 'r',\n encoding=\"utf-8\", errors=\"ignore\") as sess_file:\n sess_dict = json.load(sess_file)\n\n host_port_key = viewer_host + ':' + str(viewer_port)\n return sess_dict['tokens'][host_port_key]\n except IOError as ioerr:\n print(\"Could not load session for session getter because \" +\n ioerr.strerror)\n return None\n except KeyError as err:\n print(\"Could not load session for session getter because \" + str(err))\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the requested number of visits for proposals in propId. Returns a dictionary Nvisits{u/g/r/i/z/y}
def fetchRequestedNvisits(self, propId=None): visitDict = {} if propId is None: # Get all the available propIds. propData = self.tables['Proposal'].query_columns_Array(colnames=[self.propIdCol, self.propNameCol], constraint='') else: # Get the propType info to go with the propId(s). if hasattr(propId, '__iter__'): constraint = '(' for pi in propId: constraint += '(propId = %d) or ' %(pi) constraint = constraint[:-4] + ')' else: constraint = 'propId = %d' %(propId) propData = self.tables['Proposal'].query_columns_Array(colnames=[self.propIdCol, self.propNameCol], constraint=constraint) for pId, propType in zip(propData[self.propIdCol], propData[self.propNameCol]): perPropConfig = self.tables['Config'].query_columns_Array(colnames=['paramName', 'paramValue'], constraint = 'nonPropID = %d and paramName!="userRegion"' %(pId)) filterlist = self._matchParamNameValue(perPropConfig, 'Filter') if propType == 'WL': # For WL proposals, the simple 'Filter_Visits' == the requested number of observations. nvisits = np.array(self._matchParamNameValue(perPropConfig, 'Filter_Visits'), int) elif propType == 'WLTSS': seqDict, nvisits = self._parseSequences(perPropConfig, filterlist) visitDict[pId] = {} for f, N in zip(filterlist, nvisits): visitDict[pId][f] = N nvisits = {} for f in ['u', 'g', 'r', 'i', 'z', 'y']: nvisits[f] = 0 for pId in visitDict: for f in visitDict[pId]: nvisits[f] += visitDict[pId][f] return nvisits
[ "def count_votes(votes):\r\n diction = {}\r\n for vote in votes:\r\n if not vote.celebrity:\r\n pass\r\n elif vote.celebrity in diction:\r\n diction[vote.celebrity] = diction[vote.celebrity] + 1\r\n else:\r\n diction[vote.celebrity] = 1\r\n return diction", "def compute_indegrees(digraph):\n keys = list(digraph.keys())\n degree_dic = {}\n for key in keys:\n count = 0\n for val_set in list(digraph.values()):\n for val in val_set:\n if val == key:\n count += 1\n degree_dic[key] = count\n return degree_dic", "def tally_points(polygons, points):\n #traverse the 2 data structures and for each shape, count points that are found there. Enhancement would be to reduce time complexity. \n d = dict()\n for name, poly in polygons.items():\n #print(name, \":\", poly)\n count = 0\n for pt in points:\n if pt.within(poly):\n count += 1\n d.update({name:count})\n \n #return file name and dictionary for QA testing\n return(d)", "def count_occurrences(n, documents):\n occurences = dict()\n\n for doc in documents:\n for gram in doc[\"grams\"][str(n)]:\n s_gram = ', '.join(gram)\n if not s_gram in occurences:\n occurences[s_gram] = dict()\n occurences[s_gram][\"docs\"] = []\n occurences[s_gram][\"occurrences\"] = 0\n\n occurences[s_gram][\"occurrences\"] += 1\n\n if not doc[\"pmid\"] in occurences[s_gram][\"docs\"]:\n occurences[s_gram][\"docs\"].append(doc[\"pmid\"])\n\n occurences = sorted(occurences.items(), key=lambda kv: (len(kv[1]['docs']),kv[1]['occurrences']))\n occurences.reverse()\n\n return occurences", "def computeCountDict():\n countDict = {}\n # Run through each review's tf dictionary and increment countDict's (word, doc) pair\n for review in tfDict:\n # print(review)\n for word in review:\n if word in countDict:\n countDict[word] += 1\n else:\n countDict[word] = 1\n return countDict", "def neighbor_counts(living):\n n = collections.Counter()\n for x in map(neighbors, living):\n n.update(x)\n return dict(n)", "def num_visited(instructions, num_santas=1):\n locs = [ [0,0] for i in range(num_santas) ]\n visited = {}\n csanta = 0\n visited[h(locs[csanta])] = 1\n for move in instructions:\n if move == '^':\n locs[csanta][1] += 1\n elif move =='v':\n locs[csanta][1] -= 1\n elif move == '>':\n locs[csanta][0] += 1\n elif move == '<':\n locs[csanta][0] -= 1\n\n if h(locs[csanta]) not in visited:\n visited[h(locs[csanta])] = 1\n else:\n visited[h(locs[csanta])] += 1\n \n csanta = (csanta + 1) % num_santas\n\n return len(visited.values())", "def test_portals_id_designs_nk_members_count_get(self):\n pass", "def test_portals_id_designs_count_get(self):\n pass", "def test_portals_id_designs_nk_design_members_count_get(self):\n pass", "def get_nps_segment_data(pro_detract_count):\n nps_segment_data = {\n 'promoters': {\n 'count': 0,\n 'percent': 0.0\n },\n 'passives': {\n 'count': 0,\n 'percent': 0.0\n },\n 'detractors': {\n 'count': 0,\n 'percent': 0.0\n }\n }\n\n nps_segment = pro_detract_count\n\n if nps_segment['total'] == 0:\n return nps_segment_data\n\n promoters_percentage = (nps_segment['promoters'] / nps_segment['total']) * 100\n passives_percentage = (nps_segment['passives'] / nps_segment['total']) * 100\n detractors_percentage = (nps_segment['detractors'] / nps_segment['total']) * 100\n\n nps_segment_data['promoters']['count'] = int(round(nps_segment['promoters']))\n nps_segment_data['promoters']['percent'] = round(promoters_percentage, 2)\n\n nps_segment_data['passives']['count'] = int(round(nps_segment['passives']))\n nps_segment_data['passives']['percent'] = round(passives_percentage, 2)\n\n nps_segment_data['detractors']['count'] = int(round(nps_segment['detractors']))\n nps_segment_data['detractors']['percent'] = round(detractors_percentage, 2)\n\n return nps_segment_data", "def test_portals_id_designs_nk_comments_count_get(self):\n pass", "def get_presentations_number_of_user(self, userid):\n return len(self.get_presentations_of_user(userid))", "def test_portals_id_designs_nk_commenters_count_get(self):\n pass", "def ip_frequencies(self):\n frequencies = {}\n for ip in self.data.keys():\n frequency = 0\n ip_info = self.data[ip] # Instance of IpInfo\n for line_type in ip_info.data.keys():\n if isinstance(ip_info.data[line_type], int):\n frequency += ip_info.data[line_type]\n else: # the None key\n frequency += len(ip_info.data[line_type])\n frequencies[ip] = frequency\n return frequencies", "def get_continent_populations():\r\n\r\n poptotaldict = {}\r\n\r\n lines = country_pop.splitlines()[1:]\r\n for line in lines:\r\n data = line.split('\\t')\r\n if data[2] not in poptotaldict:\r\n poptotaldict.update({data[2] : int(data[5].replace(',', ''))})\r\n else:\r\n poptotaldict[data[2]] = int(data[5].replace(',', '')) + poptotaldict[data[2]]\r\n return poptotaldict", "def test_ppo_paper_count():\n ppo_entries = rldb.find_all({\n 'source-title': 'Proximal Policy Optimization Algorithm',\n })\n\n assert len(ppo_entries) == (\n 0\n + 49 # A2C\n + 49 # ACER\n + 49 # PPO\n )", "def get_count_teachers(self, *arg):\n grade_count = {}\n\n if (arg):\n if arg[0] == 'grade':\n for key in K12:\n i = 0\n for t in self.teachers: #! need to find a way to make this generic\n if key == t.grade_level:\n i += 1\n grade_count[key]= i #! not very pythonic\n for key in grade_count:\n print('Grade: ', key, ' Count: ', grade_count[key])\n else:\n if (self.students):\n print('Number of teachers in school', len(self.teachers))\n else:\n print('There are no teachers!')", "def visitors(start_date, end_date):\n visitors = {}\n request = _build_request()\n date = start_date\n while date <= end_date:\n date_str = str(date)\n visitors[str(date)] = int(\n request.get(\n ids=\"ga:\" + profile_id,\n start_date=date_str,\n end_date=date_str,\n metrics=\"ga:visitors\",\n ).execute()[\"rows\"][0][0]\n )\n date += timedelta(days=1)\n return visitors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
compute the intersectionoverunion score both inputs should be categorical (as opposed to onehot)
def iou_score(pred_cls, true_cls, nclass=7, drop=drop): intersect_ = [] union_ = [] for i in range(nclass): if i not in drop: intersect = ((pred_cls == i) + (true_cls == i)).eq(2).sum().item() union = ((pred_cls == i) + (true_cls == i)).ge(1).sum().item() intersect_.append(intersect) union_.append(union) return np.array(intersect_), np.array(union_)
[ "def intersection_over_union(heatmap1: np.ndarray, heatmap2: np.ndarray) -> float:\n intersection = np.bitwise_and(heatmap1, heatmap2)\n union = np.bitwise_or(heatmap1, heatmap2)\n\n count_inter = float(np.count_nonzero(intersection))\n count_union = float(np.count_nonzero(union))\n\n iou = count_inter / count_union\n\n return iou", "def overlap_images(gtimage, predimage):\n\n\n gtimage=(numpy.array(gtimage)>127)*1\n predimage=(numpy.array(predimage)>127)*1\n\n intersec = numpy.bitwise_and(gtimage, predimage)\n intersec_val = float(numpy.sum(intersec))\n\n union = numpy.bitwise_or(gtimage, predimage)\n\n union_val = float(numpy.sum(union))\n\n if union_val == 0:\n return 0\n else:\n if float(intersec_val / union_val)>0.5:\n return 1\n else:\n return 0", "def imageIou(img1,img2):\n\n intersection = np.sum( img1 * img2 != 0)\n union = np.sum( (img1 + img2) != 0 )\n return intersection / union", "def match_scores_agg(PhisX, PhisY, flagsX, flagsY, alpha, thresh):\n # Can speedup aggregate with one vector per word assumption.\n # Take dot product between correponding VLAD vectors\n u = (PhisX * PhisY).sum(axis=1)\n # Propogate error flags\n flags = np.logical_or(flagsX.T[0], flagsY.T[0])\n assert len(flags) == len(u), 'mismatch'\n u[flags] = 1\n score_list = selectivity(u, alpha, thresh, out=u)\n return score_list", "def eval_intersection(I1, I2):\n I1 = np.ascontiguousarray(I1, dtype='int64')\n I2 = np.ascontiguousarray(I2, dtype='int64')\n n = I1.shape[0]\n assert I2.shape[0] == n\n k1, k2 = I1.shape[1], I2.shape[1]\n ninter = 0\n for i in range(n):\n ninter += ranklist_intersection_size(\n k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i]))\n return ninter", "def iou(boxes1, boxes2, add1=False):\n intersect = intersection(boxes1, boxes2, add1)\n area1 = area(boxes1, add1)\n area2 = area(boxes2, add1)\n union = np.expand_dims(area1, axis=1) + np.expand_dims(area2, axis=0) - intersect\n return intersect / union", "def contingency_map(array1, array2, threshold1=0., threshold2=0.):\n array1_thres = array1 > threshold1\n array2_thres = array2 > threshold2\n contingency = array1*0\n contingency += np.int16(array2_thres)\n contingency += np.int16(array1_thres)*2\n return array1_thres, array2_thres, contingency", "def calculate_area(pred, label, num_classes, ignore_index=255):\n if len(pred.shape) == 2:\n pred = pred[np.newaxis, :, :]\n if len(label.shape) == 2:\n label = label[np.newaxis, :, :]\n if not pred.shape == label.shape:\n raise ValueError('Shape of `pred` and `label should be equal, '\n 'but there are {} and {}.'.format(\n pred.shape, label.shape))\n\n # Delete ignore_index\n mask = label != ignore_index\n pred = pred + 1\n label = label + 1\n pred = pred * mask\n label = label * mask\n\n pred = np.eye(num_classes + 1)[pred] # F.one_hot(pred, num_classes + 1)\n label = np.eye(num_classes + 1)[label] # F.one_hot(pred, num_classes + 1)\n pred = pred[:, :, :, 1:]\n label = label[:, :, :, 1:]\n\n pred_area = []\n label_area = []\n intersect_area = []\n\n for i in range(num_classes):\n pred_i = pred[:, :, :, i]\n label_i = label[:, :, :, i]\n pred_area_i = np.sum(pred_i)\n label_area_i = np.sum(label_i)\n intersect_area_i = np.sum(pred_i * label_i)\n pred_area.append(pred_area_i)\n label_area.append(label_area_i)\n intersect_area.append(intersect_area_i)\n pred_area = np.array(pred_area)\n label_area = np.array(label_area)\n intersect_area = np.array(intersect_area)\n return intersect_area, pred_area, label_area", "def _compute_binary_classification_accuracy(self, h:torch.tensor, t_binary:torch.tensor):\n assert h.size(0) == t_binary.size(0) >= 0\n assert len(h.size()) == len(t_binary.size()) == 1\n # assert 0 <= h.max().item() <= 1\n # assert 0 <= t_binary.max().item() <= 1\n\n if h.size(0) == t_binary.size(0) == 0:\n # if all samples are rejected, return zeros.\n acc = 0.0\n pre = 0.0\n rec = 0.0\n else:\n # conditions (true,false,positive,negative)\n condition_true = (h==t_binary)\n condition_false = (h!=t_binary)\n condition_pos = (h==torch.ones_like(h))\n condition_neg = (h==torch.zeros_like(h))\n\n # TP, TN, FP, FN\n true_pos = torch.where(condition_true & condition_pos, torch.ones_like(h), torch.zeros_like(h))\n true_neg = torch.where(condition_true & condition_neg, torch.ones_like(h), torch.zeros_like(h))\n false_pos = torch.where(condition_false & condition_pos, torch.ones_like(h), torch.zeros_like(h))\n false_neg = torch.where(condition_false & condition_neg, torch.ones_like(h), torch.zeros_like(h))\n\n tp = float(true_pos.sum())\n tn = float(true_neg.sum())\n fp = float(false_pos.sum())\n fn = float(false_neg.sum())\n\n # accuracy, precision, recall\n acc = float((tp+tn)/(tp+tn+fp+fn+1e-12))\n pre = float(tp/(tp+fp+1e-12))\n rec = float(tp/(tp+fn+1e-12))\n\n return acc, pre, rec", "def IoU(detection1, detection2):\n\n # determine the (x, y)-coordinates of the intersection rectangle\n xA = max(detection1[0], detection2[0])\n yA = max(detection1[1], detection2[1])\n xB = min(detection1[2], detection2[2])\n yB = min(detection1[3], detection2[3])\n\n # area of intersection\n interArea = max(0, xB - xA) * max(0, yB - yA)\n\n # compute the area of both the prediction and ground-truth rectangles\n boxAArea = (detection1[3] - detection1[1]) * (detection1[2] - detection1[0])\n boxBArea = (detection2[3] - detection2[1]) * (detection2[2] - detection2[0])\n\n # compute the intersection over union by taking the intersection\n # area and dividing it by the sum of prediction + ground-truth\n # areas - the interesection area\n iou = interArea / float(boxAArea + boxBArea - interArea)\n\n # return the intersection over union value\n return iou", "def compute_overlap(a,b):\n return list((Counter(a) & Counter(b)).elements())", "def get_intersection_matrix(pair_names, unions_names, cutoff, dset_dict):\n\n dset_nr = len(pair_names)+1 #pairs and union\n\n # Counter is 3-dimensional for keeping both abs number of intersection AND\n # percentages. \n\n counter = np.zeros([dset_nr, cutoff, 2]) # 0-based\n\n # Get the pairs \n for (indx1, (main_name, sub_name)) in enumerate(pair_names):\n # Get the pair-dsets\n main_dset = dset_dict[main_name]\n sub_dset = dset_dict[sub_name]\n\n # Iterate through all (polyA-cluster, read_count) points in the\n # datasets, and add the polyA-clusters to two temporary lists, indexed\n # by the read count from 0 to cutoff-1.\n main_cls = [[] for val in range(cutoff)]\n sub_cls = [[] for val in range(cutoff)]\n\n for (dset, dset_l) in [(main_dset, main_cls), (sub_dset, sub_cls)]:\n\n for (read_nr, clusters) in dset.iteritems():\n if read_nr <= 0:\n debug()\n if read_nr > cutoff-1:\n dset_l[cutoff-1].append(clusters) # add if > cutoff\n else:\n dset_l[read_nr-1] = clusters\n\n #if dset_l[-1] != []:\n #debug()\n\n # Flatten the last arrays\n main_cls[-1] = sum(main_cls[-1], [])\n sub_cls[-1] = sum(sub_cls[-1], [])\n\n # Get number of intersections \n isect_nrs = [len(set.intersection(set(main_cls[count]),\n set(sub_cls[count]))) for count in\n range(0, cutoff)]\n\n # Get percent of intersection relative to 'main' dataset (will be all or\n # annot)\n isect_pcnt = []\n for (indx, isect_nr) in enumerate(isect_nrs):\n\n # Only calculate percentage if more than 1 cluster with this read count\n if main_cls[indx] != 0:\n isect_pcnt.append(isect_nrs[indx]/len(main_cls[indx]))\n else:\n isect_pcnt.append(0)\n\n # Add the number and intersection to the array\n counter[indx1,:,0] = isect_nrs\n counter[indx1,:,1] = isect_pcnt\n\n # Now all the pairs have been added. Add the unions\n # Take the union of all dsetsxcept\n all_cls = [[] for val in range(cutoff)]\n\n # add all the clusters from the union datasets to all_cls\n for u_name in unions_names:\n for (read_nr, clusters) in dset_dict[u_name].iteritems():\n\n if read_nr > cutoff-1:\n all_cls[cutoff-1].append(clusters) # add if > cutoff\n else:\n all_cls[read_nr-1].append(clusters)\n\n # flatten all_cls (which has all the clusters in the union dsets)\n # and take union at the same tim\n all_cls = [sum(el, []) for el in all_cls]\n\n # Get number of intersections \n # (using main_cls from the previous for-loop -- dirty :S)\n all_I_nrs = [len(set.intersection(set(main_cls[count]),\n set(all_cls[count]))) for count in\n range(0, cutoff)]\n\n # Get percent of intersection relative to 'main' dataset (will be all or annot)\n all_I_pcnt = []\n for (indx, isect_nr) in enumerate(isect_nrs):\n\n # Only calculate percentage if more than 1 cluster with this read count\n if main_cls[indx] != 0:\n all_I_pcnt.append(all_I_nrs[indx]/len(main_cls[indx]))\n else:\n all_I_pcnt.append(0)\n\n # Add the number and intersection to the array\n counter[-1,:,0] = all_I_nrs\n counter[-1,:,1] = all_I_pcnt\n\n ### flip things around; put union row first. This is for better compliance\n # with downstream code\n\n newcount = np.zeros([dset_nr, cutoff, 2])\n newcount[0] = counter[-1]\n newcount[1:] = counter[0:-1]\n\n return newcount", "def correspondences(labels1,labels2):\n q = 100000\n assert amin(labels1)>=0 and amin(labels2)>=0\n assert amax(labels2)<q\n combo = labels1*q+labels2\n result = unique(combo)\n result = array([result//q,result%q])\n return result", "def add_intersectional(self,var_list=None,tuple_lens=2):\n # comput var list if not passed\n if not var_list:\n var_list = list(self.get_vars_per_type('categorical'))\n\n # make tuple len a list for looping\n if not(type(tuple_lens)==list):\n tuple_lens = list(range(2,tuple_lens+1))\n\n # loop over lengths\n for k in tuple_lens:\n # generate tuples of cat variables\n vl_tuples = itertools.combinations(var_list,k)\n for cur_var_list in vl_tuples:\n # create column name\n cur_var_list = list(cur_var_list)\n new_name = '_'.join(cur_var_list)\n\n # lambda to merge the valuse fo the current columns\n mergerow = lambda row: '_'.join([str(row[i_col]) for i_col in\n cur_var_list])\n # apply and save to df\n self.df[new_name] = self.df.apply(mergerow,axis=1)\n\n\n self.update_meta_df_cluster()\n\n return self.df", "def multi_intersect(input):\n arr = input[0].ravel()\n for this in input[1:]:\n arr = np.intersect1d(arr, this.ravel())\n\n return arr", "def computeOQScore(data):\n data[\"oqScore\"] = data[\"traitBased\"] + data[\"exceptionalBased\"]", "def _intersect_and_union(\n segmap_list: T.List[T.Array], mask_list: T.List[T.Array], num_classes: int, ignore_index: int\n) -> T.Dict[str, T.Array]:\n\n zero_array = np.zeros(num_classes, dtype=np.float)\n total_area_dict = {\n \"segmap\": zero_array.copy(),\n \"mask\": zero_array.copy(),\n \"intersect\": zero_array.copy(),\n \"union\": zero_array.copy(),\n }\n\n for segmap, mask in zip(segmap_list, mask_list):\n\n bool_array = mask != ignore_index\n segmap = segmap[bool_array]\n mask = mask[bool_array]\n intersect = segmap[segmap == mask]\n\n bins = np.arange(num_classes + 1)\n segmap_area, _ = np.histogram(segmap, bins=bins)\n mask_area, _ = np.histogram(mask, bins=bins)\n intersect_area, _ = np.histogram(intersect, bins=bins)\n union_area = segmap_area + mask_area - intersect_area\n\n total_area_dict[\"segmap\"] += segmap_area\n total_area_dict[\"mask\"] += mask_area\n total_area_dict[\"intersect\"] += intersect_area\n total_area_dict[\"union\"] += union_area\n\n return total_area_dict", "def intersects(*args, **kwargs):\n \n pass", "def evaluate(ground_truth_labels: type_alias.TensorLike,\n predicted_labels: type_alias.TensorLike,\n grid_size: int = 1,\n name: str = \"intersection_over_union_evaluate\") -> tf.Tensor:\n with tf.name_scope(name):\n ground_truth_labels = tf.convert_to_tensor(value=ground_truth_labels)\n predicted_labels = tf.convert_to_tensor(value=predicted_labels)\n\n shape.compare_batch_dimensions(\n tensors=(ground_truth_labels, predicted_labels),\n tensor_names=(\"ground_truth_labels\", \"predicted_labels\"),\n last_axes=-grid_size,\n broadcast_compatible=True)\n\n ground_truth_labels = asserts.assert_binary(ground_truth_labels)\n predicted_labels = asserts.assert_binary(predicted_labels)\n\n sum_ground_truth = tf.math.reduce_sum(\n input_tensor=ground_truth_labels, axis=list(range(-grid_size, 0)))\n sum_predictions = tf.math.reduce_sum(\n input_tensor=predicted_labels, axis=list(range(-grid_size, 0)))\n intersection = tf.math.reduce_sum(\n input_tensor=ground_truth_labels * predicted_labels,\n axis=list(range(-grid_size, 0)))\n union = sum_ground_truth + sum_predictions - intersection\n\n return tf.where(\n tf.math.equal(union, 0), tf.ones_like(union), intersection / union)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that establishes the connection with Appium server and loads the Application to emulator via Appium server. Appium server used Android UIAutomator/adb_server to enable the automated testing
def setUp(self): "Setup for the test" desired_caps = {} desired_caps['platformName'] = 'Android' desired_caps['platformVersion'] = '9' desired_caps['deviceName'] = 'Pixel (Edited)' desired_caps['newCommandTimeout'] = '3000' desired_caps['automationName'] ='uiautomator2' # Returns abs path relative to this file and not cwd desired_caps['app'] = os.path.abspath(os.path.join(os.path.dirname(__file__),'ApiDemos-debug.apk')) desired_caps['appPackage'] = "io.appium.android.apis" desired_caps['appActivity'] = "ApiDemos" self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) #self.driver.implicitly_wait(10)
[ "def launch_appium(args, emulator):\n # prepare cmd\n cmd = ['appium', '--no-reset', '-p', str(args.appium_port), '-U', emulator.serial,\n '--session-override', '--log-level', 'info', '-bp', str(args.appium_back_port)]\n Appium = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # wait for listener binding result\n line = Appium.stdout.readline()\n while 'listener' not in line:\n line = Appium.stdout.readline()\n if 'Could not start' in line:\n running_logger.error('Could not start appium')\n running_logger.error(' '.join(cmd))\n return None\n return Appium", "def setUp(self):\n global desired_caps\n\n self.logger.info(\"Configuring desired capabilities\")\n\n if self.app == 'android': desired_caps = self.android()\n\n self.logger.info(\"Initiating Appium driver\")\n self.driver = webdriver.Remote(\"http://0.0.0.0:4723/wd/hub\", desired_caps)\n\n # set global wait\n self.driver.implicitly_wait(5) # waits 5 seconds", "def ConnectDevice(self):\n assert self._CanConnect()\n if FLAGS.skip_connect_device:\n return True\n connect_args = [self.android_platform.real_adb,\n 'connect',\n 'localhost:%s' % self.emulator_adb_port]\n logging.info('Connecting adb server to device: %s', connect_args)\n connect_task = None\n if not self.adb_server_port:\n self.adb_server_port = portpicker.PickUnusedPort()\n elif self.adb_server_port < 0 or self.adb_server_port > 65535:\n logging.warn('Invalid adb server port %d, skip connecting',\n self.adb_server_port)\n return\n\n try:\n logging.info('Starting: %s', connect_args)\n connect_task = common.SpawnAndWaitWithRetry(\n connect_args,\n proc_output=True,\n exec_env=self._AdbEnv(),\n timeout_seconds=ADB_SHORT_TIMEOUT_SECONDS,\n retries=5)\n logging.info('Done: %s', connect_args)\n except common.SpawnError:\n return False\n\n connect_stdout = connect_task.borg_out\n logging.info('Status: %s ', connect_stdout)\n odd_successful_connection = 'localhost:%s:%s' % (self.emulator_adb_port,\n self.emulator_adb_port)\n return ('connected' in connect_stdout or\n odd_successful_connection in connect_stdout)", "def prepare_emulator(self):", "async def test_server_init() -> None:\n requester = UpnpTestRequester(RESPONSE_MAP)\n server = AiohttpNotifyServer(requester, (\"192.168.1.2\", 8090))\n assert server._loop is not None\n assert server.listen_host == \"192.168.1.2\"\n assert server.listen_port == 8090\n assert server.callback_url == \"http://192.168.1.2:8090/notify\"\n assert server.event_handler is not None\n\n server = AiohttpNotifyServer(\n requester, (\"192.168.1.2\", 8090), \"http://1.2.3.4:8091/\"\n )\n assert server.callback_url == \"http://1.2.3.4:8091/\"", "async def connect(self):\n raise RuntimeWarning(\"Connection to a server disallowed in instances of the mock bot.\")", "def connect():\n global adb_socket\n if adb_socket is not None:\n raise RuntimeError('connection already existed')\n\n host, port = config.HOST, config.PORT\n\n connection = socket.socket()\n try:\n connection.connect((host, port))\n except ConnectionError as _:\n warn_msg = 'failed when connecting to adb server: {}:{}, retrying ...'.format(host, port)\n warnings.warn(warn_msg)\n reboot_adb_server()\n connect()\n return\n\n adb_socket = connection", "def _install_android_app(app_path):\n args = [\"adb\", \"install\", app_path]\n logging.info(\"Install testapp: %s\", \" \".join(args))\n _run_with_retry(args, device=_DEVICE_ANDROID, type=_RESET_TYPE_WIPE_REBOOT)", "def setUpModule():\n global WEBDRIVER_SERVER_URL\n global WEBDRIVER_PROCESS\n if not WEBDRIVER_SERVER_URL:\n WEBDRIVER_SERVER_URL = 'http://localhost:%d' % WEBDRIVER_PORT\n WEBDRIVER_PROCESS = subprocess.Popen([WEBDRIVER_EXE,\n '--port=%d' % WEBDRIVER_PORT])\n time.sleep(3)", "def launch_app(self):\n self._selenium_web_driver().launch_app()", "def connect(self, test_client = 0):\n return self.app.connect(test_client = test_client)", "def _ConnectToEmulatorConsole(self):\n assert self._CanConnect(), 'missing details to connect to emulator.'\n attempts = 0\n while attempts < 5:\n try:\n sock = telnetlib.Telnet('localhost', self.emulator_telnet_port, 60)\n self._TryAuth(sock)\n return sock\n\n except socket.error as e:\n if e.errno == 111:\n # not bound yet!\n logging.info('Emulator console port not bound yet.')\n time.sleep(2)\n attempts += 1\n else:\n raise e\n self._reporter.ReportFailure('tools.android.emulator.console.CannotConnect',\n {'attempts': attempts})\n # Since we failed connecting to the console, let's kill all processes and\n # retry.\n self._TransientDeath(\n 'Tried %s times to connect to emu console.' % attempts)", "def __init__(\n self,\n app_path,\n host_app_path,\n out_dir,\n release=False,\n retries=1,\n test_cases=None,\n test_args=None,\n env_vars=None,\n ):\n test_runner.DeviceTestRunner.__init__(\n self,\n app_path,\n out_dir,\n env_vars=env_vars,\n retries=retries,\n test_args=test_args,\n test_cases=test_cases,\n )\n self.shards = 1 # For tests on real devices shards=1\n self.version = None\n self.platform = None\n self.host_app_path = None\n if host_app_path != 'NO_PATH':\n self.host_app_path = os.path.abspath(host_app_path)\n self.homedir = ''\n self.release = release\n self.set_up()\n self._init_sharding_data()\n self.start_time = time.strftime('%Y-%m-%d-%H%M%S', time.localtime())\n self.test_results['path_delimiter'] = '/'", "def test_attach(self):\n dut = self.android_devices[0]\n dut.droid.wifiAwareAttach(False)\n autils.wait_for_event(dut, aconsts.EVENT_CB_ON_ATTACHED)\n autils.fail_on_event(dut, aconsts.EVENT_CB_ON_IDENTITY_CHANGED)", "def deploy_android():\n\n _check_command('ant')\n _check_command('adb')\n _check_command('cordova')\n _check_command('android')\n\n # generate html for android\n generate_html(cordova=True)\n\n with lcd(_get_runtime()[1]):\n device = None\n local('cordova build')\n\n with settings(warn_only=True):\n cmd = 'cordova run android 2>&1'\n out = local(cmd, capture=True)\n\n if out and out.return_code != 0:\n if out.find('INSTALL_PARSE_FAILED_INCONSISTENT_CERTIFICATES') != -1:\n\n # app is installed with wrong certificate try and uninstall app\n local('adb uninstall {0}'.format(_config('package', section='app')))\n\n # retry install\n local(cmd)\n else:\n print out\n raise SystemExit(out.return_code)", "def test_add_device_app():\n di = no_datastore_interface.NoDatastoreInterface()\n assert di.add_device_app(api_key, device_name, app_name) is None", "def start_anontunnel_android(self):\n from android import AndroidService\n service = AndroidService('Anonymous downloading Service', 'Anonymous tunnels are running...')\n service.start('Anonymous tunnels service started')\n self.service = service", "def setupHttpdServerOnLAN():\n from shutil import copyfile\n\n src_file_dir = \"/root/automation/tools/2.0/START_SERVERS/httpd/\"\n src_file = [\"index.html\", \"selenium_feature_test.html\", \"eg_landscape.jpg\"]\n dst_dir = \"/var/www/html/\"\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n for i in src_file:\n full_src_file = os.path.join(src_file_dir, i)\n full_dst_file = os.path.join(dst_dir, i)\n if os.path.exists(full_src_file):\n copyfile(full_src_file, full_dst_file)\n else:\n print \"AT_ERROR : File %s isn\\'t exists!\" % full_src_file\n return False\n\n s_log, k_result = run(\"service httpd restart\", withexitstatus=True, timeout=60)\n if not k_result:\n print \"Service httpd is ready for test...\"\n return True\n return False", "def test_one_emulator(mock_tools, android_sdk):\n mock_tools.subprocess.check_output.return_value = devices_result(\"one_emulator\")\n\n assert android_sdk.devices() == {\n \"emulator-5554\": {\n \"name\": \"Android SDK built for x86\",\n \"authorized\": True,\n },\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
checks if the checkbox is enabled return True if enabled else False
def checkbox_enabled(self): return self.driver.find_element_by_id("android:id/checkbox").get_attribute("checked")
[ "def _get_isEnabledCheckBoxChecked(self) -> \"bool\" :\n return _core.GroupCommandInput__get_isEnabledCheckBoxChecked(self)", "def is_enabled(self):\n return self.element_info.enabled #and self.top_level_parent().element_info.enabled", "def _is_enabled(self, state):\n enabled = True\n\n if isinstance(self._enabled, State):\n enabled = bool(state.get(\n self._enabled.name, self._enabled.default))\n\n else:\n enabled = bool(self._enabled)\n\n return enabled", "def GetEnabled(self):\n return self._is_enabled", "def _get_isChecked(self) -> \"bool\" :\n return _core.CheckBoxControlDefinition__get_isChecked(self)", "def is_checked(self):\n return self.get_attribute('checked')", "def checkboxcheck(self):\n if ultimate.get() == 1:\n self.ultimate_fav_checkbox.configure(state=\"normal\")\n self.run_ultimate_intf_checkbox.configure(state=\"normal\")\n if ultimate.get() == 0:\n self.ultimate_fav_checkbox.configure(state=\"disabled\")\n self.run_ultimate_intf_checkbox.configure(state=\"disabled\")", "def is_enabled(node):\n return not node[\"disable\"].value()", "def is_enabled(self):\n return getattr(self._thread_locals, 'enabled', True)", "async def enabled(self) -> bool:\n response = await self._adguard._request(\"safesearch/status\")\n return response[\"enabled\"]", "def getBoolFromCB(objCheckBox):\n if objCheckBox.GetValue():\n return 1\n else:\n return 0", "def input_enabled(self):\n return self._input_enabled", "def disabled(kls):\n from wouso.core.config.models import BoolSetting\n\n return BoolSetting.get('setting-%s' % kls.name()).get_value() is False", "def _set_isChecked(self, *args) -> \"bool\" :\n return _core.CheckBoxControlDefinition__set_isChecked(self, *args)", "def is_enabled(suppression_dict):\n \n # Get the disabled flag\n if 'disabled' in suppression_dict:\n return not NotableEventSuppression.get_boolean(suppression_dict['disabled'], False)\n else:\n return False", "def isEnabled(self) -> \"SbBool\":\n return _coin.SoEngineOutput_isEnabled(self)", "def is_enabled(self) -> bool:\n if not self._system.dax_sim_enabled:\n # Check if the system was just booted\n last_asf = self.core_cache.get(self._CACHE_LAST_ASF_KEY)\n if len(last_asf) == 0:\n # Device was just booted, trap RF is off\n return False\n\n # Return the enabled flag stored as a system dataset\n # Can raise a KeyError if the key was not set before, which means the state is ambiguous\n enabled: bool = self.get_dataset_sys(self._ENABLED_KEY) # Helps the type checker\n return enabled", "def isElementEnabled(self, *args):\n return _coin.SoState_isElementEnabled(self, *args)", "def _helper_disabled(self):\n disabled = ADDON.getSetting('disabled')\n if not disabled:\n ADDON.setSetting('disabled', 'false') # create default entry\n disabled = 'false'\n\n if disabled == 'true':\n self._log('inputstreamhelper is disabled in settings.xml.')\n return True\n else:\n self._log('inputstreamhelper is enabled. You can disable inputstreamhelper by setting \\\"disabled\\\" to \\\"true\\\" in settings.xml (Note: only recommended for developers knowing what they\\'re doing!)')\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the difference between tof for extrapolation and recon (recon_tof_2 recon_tof_1) (extrap_tof_2 extrap_tof_1)
def get_delta_tof(self, event, det_1, det_2): times = [None, None, None, None] dets = (det_1, det_2, self.global_key(det_1), self.global_key(det_2)) # find times for each detector type in dets and fill into times for hit in event["data"]: for i, det in enumerate(dets): if hit["detector"] == det: times[i] = hit["hit"]["t"] # return None or the difference between tofs if None in times: return None else: delta_tof = (times[1] - times[0]) - (times[3] - times[2]) return delta_tof
[ "def error(self, F):\n return abs((F(self.b) - F(self.a)) - self.approx)", "def backward_differences(T):\n\tnumOfTimes = len(T)\n\t#the number of steps in the method\n\tm = numOfTimes - 1\n\t#generate the initial differences, which\n\t#is just the standard basis.\n\tD = np.array([ [np.float64((i+1)==(numOfTimes-j)) for i in range(numOfTimes)] for j in range(numOfTimes)])\n\tdifferences = np.zeros_like(D)\n\tdifferences[0] = D[0]\n\t\n\t\n\tfor q in range(1,numOfTimes):\n\t\tfor j in range(numOfTimes - q):\n\t\t\tD[j] = first_difference([T[m-j],T[m-j-q]],[D[j],D[j+1]])\n\t\t\tdifferences[q] = D[0]\n\treturn differences", "def diff(self, pT2):\n return _almathswig.Transform_diff(self, pT2)", "def _thermal_diffusion(self):\r\n if self.mineral == 'apatite': # Farley et al. (2000)\r\n Do = 50\r\n Ea = 137.522\r\n if self.mineral == 'zircon': # Reiners et al. (2004)\r\n Do = 0.46\r\n Ea = 169.0336\r\n \r\n R = 0.00831447\r\n T = self.T + 273.15\r\n \r\n d = (Do * np.exp(-Ea / (R * T))) * 1e8\r\n d = (d[:-1] + d[1:]) / 2 #average for dt\r\n \r\n self.diffusivities = d", "def get_delta_upper(P1, P2, target_eps=1.0,ncomp=500,nx=1E6):\n\n\n L,error_term = get_L(P1, P2, target_eps=1.0,ncomp=500, error_tol=1e-6)\n\n #nx = int(nx)\n dx = 2.0*L/nx # discretisation interval \\Delta x\n x = np.linspace(-L,L-dx,nx,dtype=np.complex128) # grid for the numerical integration\n\n #Determine the privacy loss function\n Lx=np.log(P1/P2)\n\n\n\n omega_y=np.zeros(nx)\n\n for i in range(0,len(Lx)):\n ii = int(np.ceil((L+Lx[i])/dx))\n omega_y[ii]+=P1[i]\n\n\n fx = omega_y\n half = int(nx/2)\n\n # Flip fx, i.e. fx <- D(fx), the matrix D = [0 I;I 0]\n temp = np.copy(fx[half:])\n fx[half:] = np.copy(fx[:half])\n fx[:half] = temp\n\n # Compute the DFT\n FF1 = np.fft.fft(fx)\n\n # Take elementwise powers and compute the inverse DFT\n cfx = np.fft.ifft((FF1**ncomp))\n\n # Flip again, i.e. cfx <- D(cfx), D = [0 I;I 0]\n temp = np.copy(cfx[half:])\n cfx[half:] = cfx[:half]\n cfx[:half] = temp\n\n sum=np.sum(cfx)\n\n # first jj for which 1-exp(target_eps-x)>0,\n # i.e. start of the integral domain\n jj = int(np.floor(float(nx*(L+target_eps)/(2*L))))\n\n # Evaluate \\delta(target_eps) and \\delta'(target_eps)\n exp_e = 1-np.exp(target_eps-x)\n integrand = exp_e*cfx\n sum_int=np.sum(integrand[jj+1:])\n delta = sum_int\n delta += error_term\n #print('Unbounded DP-delta after ' + str(int(ncomp)) + ' compositions:' + str(np.real(delta)) + ' (epsilon=' + str(target_eps) + ')')\n return np.real(delta)", "def get_delta_t(data1: list, data2: list, time: list):\r\n # time1 = get_max(data1, time)[1]\r\n # time2 = get_max(data2, time)[1]\r\n\r\n time1 = time[argrelextrema(data1, np.greater)[0]]\r\n time2 = time[argrelextrema(data2, np.greater)[0]]\r\n\r\n # for shit in time1:\r\n # print(shit)\r\n delta_t = abs(time1[1] - time2[1])\r\n\r\n return delta_t", "def cdf(self, t):\n idx = np.searchsorted(self.T, t) # gives idx of first number bigger than t\n\n if idx==0: # extrapolation \n a = (self.F_T[1] - self.F_T[0])/(self.T[1] - self.T[0])\n res = self.F_T[0] - a * (self.T[0]-t) \n return max(0, res)\n\n if idx==len(self.T): # extrapolation \n a = (self.F_T[-1] - self.F_T[-2])/(self.T[-1] - self.T[-2])\n res = self.F_T[-1] + a * (t-self.T[-1]) \n return min(1, res)\n\n a = (t-self.T[idx-1])/(self.T[idx] - self.T[idx-1])\n return (1-a)*self.F_T[idx-1] + a*self.F_T[idx]", "def diffusion():\n return 5.1412512431", "def diff_flux(e=1, ref=DEFAULT_REF):\n if ref == 'hegra':\n f = hegra['diff_flux']\n g = hegra['index']\n return f * e ** (-g)\n elif ref == 'hess_pl':\n f = hess_pl['diff_flux']\n g = hess_pl['index']\n return f * e ** (-g)\n elif ref == 'hess_ecpl':\n f = hess_ecpl['diff_flux']\n g = hess_ecpl['index']\n e_c = hess_ecpl['cutoff']\n return f * e ** (-g) * np.exp(-e / e_c)\n elif ref == 'meyer':\n return meyer(e)\n else:\n raise ValueError('Unknown ref: %s' % ref)", "def calc_reb_delta_t(self):\n max_rebound_val = self.calc_or_read_from_cache('max_rebound_val')\n steady_state_avg_amp = self.calc_or_read_from_cache('sag_steady_state_avg_amp')\n\n rebound_amp_change = max_rebound_val - steady_state_avg_amp\n twenty_percent_rebound_voltage = steady_state_avg_amp + rebound_amp_change * 0.20\n eighty_percent_rebound_voltage = steady_state_avg_amp + rebound_amp_change * 0.80\n\n reb_data = self.data()[self.offset_pnt:]\n closest_pnt20 = self.find_nearest_pnt_series(reb_data, \n twenty_percent_rebound_voltage)\n closest_pnt80 = self.find_nearest_pnt_series(reb_data, \n eighty_percent_rebound_voltage)\n\n self._cache['closest_pnt20'] = closest_pnt20\n self._cache['closest_pnt80'] = closest_pnt80\n\n reb_delta_t = (closest_pnt80-closest_pnt20)/self.calc_or_read_from_cache('points_per_ms')\n return reb_delta_t", "def difference(E1, b, d, n, delta_n, ir_reps1=np.empty(0)):\n # Load the energy levels in the second basis\n os.chdir(\"../B\" + str(b) + \" D\" + str(d) + \" N\" + str(n + delta_n))\n E2, ket2 = eigensystem.get(return_ket=True)\n if ir_reps1.size:\n ir_reps2 = eigensystem.levels(E2, ket2)\n\n if E2.size > E1.size:\n E_diff = (E2[:E1.size] - E1) / E1\n if ir_reps1.size:\n ir_diff = ir_reps2[:ir_reps1.size] - ir_reps1\n else:\n E_diff = (E1[:E2.size] - E2) / E2\n if ir_reps1.size:\n ir_diff = ir_reps1[:ir_reps2.size] - ir_reps2\n\n os.chdir(\"../B\" + str(b) + \" D\" + str(d) + \" N\" + str(n))\n if ir_reps1.size == 0:\n return np.abs(E_diff)\n return np.abs(E_diff), ir_diff", "def get_delta_lower(P1, P2, target_eps=1.0,ncomp=500,nx=1E6):\n\n\n L,error_term = get_L(P1, P2, target_eps=1.0,ncomp=500, error_tol=1e-6)\n\n\n nx = int(nx)\n dx = 2.0*L/nx # discretisation interval \\Delta x\n x = np.linspace(-L,L-dx,nx,dtype=np.complex128) # grid for the numerical integration\n\n #Determine the privacy loss function\n Lx=np.log(P1/P2)\n\n\n omega_y=np.zeros(nx)\n\n\n for i in range(0,len(Lx)):\n ii = int(np.floor((L+Lx[i])/dx))\n omega_y[ii]+=P1[i]\n\n\n fx = omega_y\n half = int(nx/2)\n\n # Flip fx, i.e. fx <- D(fx), the matrix D = [0 I;I 0]\n temp = np.copy(fx[half:])\n fx[half:] = np.copy(fx[:half])\n fx[:half] = temp\n\n # Compute the DFT\n FF1 = np.fft.fft(fx)\n\n # Take elementwise powers and compute the inverse DFT\n cfx = np.fft.ifft((FF1**ncomp))\n\n # Flip again, i.e. cfx <- D(cfx), D = [0 I;I 0]\n temp = np.copy(cfx[half:])\n cfx[half:] = cfx[:half]\n cfx[:half] = temp\n\n sum=np.sum(cfx)\n\n assert(np.allclose(sum, 1.))\n\n # first jj for which 1-exp(target_eps-x)>0,\n # i.e. start of the integral domain\n jj = int(np.floor(float(nx*(L+target_eps)/(2*L))))\n\n # Evaluate \\delta(target_eps) and \\delta'(target_eps)\n exp_e = 1-np.exp(target_eps-x)\n integrand = exp_e*cfx\n sum_int=np.sum(integrand[jj+1:])\n delta = sum_int\n delta -= error_term\n\n return np.real(delta)", "def get_equilibrium_T(self):\n return self.equations.get_equilibrium_T()", "def delta(self,element1,element2):\n \n delta = (self.model[element1]/self.model[element2]*self.solar[element2].loc[0]/self.solar[element1].loc[0]-1)*1000\n return delta", "def eRates(self, Gp, Gm, eDiffp, eDiffm, corrDiffpm, tp, tm):\r\n #For now we take the derivative of the function fp and fm, because the \r\n #measured difference doesn't change anything in the derivative. So we put \r\n #zero for the measured difference in the functions.\r\n \r\n eDiffp_2 = eDiffp*eDiffp\r\n eDiffm_2 = eDiffm*eDiffm\r\n \r\n #Error in gamma+, including the correlation\r\n ap = 1/self.ddiffpdGp(tp, Gp, Gm, self.dGp)\r\n bp = 1/self.ddiffmdGp(tm, Gp, Gm, self.dGp)\r\n eGp = np.sqrt( eDiffp_2*ap**2 + eDiffm_2*bp**2 + 2*ap*bp*corrDiffpm )\r\n \r\n #Error in gamma-, including the correlation\r\n am = 1/self.ddiffpdGm(tp, Gp, Gm, self.dGm)\r\n bm = 1/self.ddiffmdGm(tm, Gp, Gm, self.dGm)\r\n eGm = np.sqrt( eDiffp_2*am**2 + eDiffm_2*bm**2 + 2*am*bm*corrDiffpm ) \r\n \r\n #Correlation between gamma+ and gamma-\r\n corrGpm = (ap*am*eDiffp_2 + bp*bm*eDiffm_2 +\r\n (ap*bm + am*bp)*corrDiffpm )\r\n \r\n return (eGp, eGm, corrGpm)", "def calc_error(T, order, depth_tria_rec, t, w, case):\n\n I = calc_Int_Tria(T, order, depth_tria_rec, t, w, case) \n I2 = calc_Int_Tria(T, order, depth_tria_rec + 1, t, w, case) \n print(\"I =\", I)\n print(\"I2 =\", I2)\n err = abs(I - I2)/abs(I)\n\n return err", "def equilibrium_Boltzman_ratio(g_1,E_1,g_2,E_2,T):\n delta_E = E_1-E_2\n if DEBUG:\n print \"energy difference =\",delta_E,\"1/cm\"\n print \" =\",c*delta_E,\"hz\"\n print \" =\",h*c*delta_E,\"ergs\"\n print \" =\",h*c*delta_E/k,\"K\"\n return (g_1/g_2)*M.exp(-delta_E/T)", "def erfc_taylor(p: RealMultivarTaylor) -> RealMultivarTaylor:\n return 1. - erf_taylor(p)", "def test_differentiate():\n # Expect exact results\n t = np.linspace(0, 4, 9)\n u = 2*t + 7\n dudt = differentiate(u, dt=t[1]-t[0])\n diff = abs(dudt - 2).max()\n tol = 1E-15\n assert diff < tol" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
subtract time from specified detector or set to None if not found
def time_offset(self, detector_hits): reco_delta_t = None global_delta_t = None for hit in detector_hits: if hit["detector"] == self.config.time_from: reco_delta_t = hit["hit"]["t"] elif hit["detector"] == self.global_key(self.config.time_from): global_delta_t = hit["hit"]["t"] if reco_delta_t == None or global_delta_t == None: # time_from detector was not found; default to None for hit in detector_hits: hit["hit"]["t"] = 1e9 # time must be a float - so this is an error code (urk)! return delta_t = -global_delta_t + reco_delta_t for hit in detector_hits: if self.is_global(hit["detector"]): hit["hit"]["t"] += delta_t
[ "def _elapsed_time_to_timestamp(probe: MeaterProbe) -> datetime | None:\n if not probe.cook or not hasattr(probe.cook, \"time_elapsed\"):\n return None\n return dt_util.utcnow() - timedelta(seconds=probe.cook.time_elapsed)", "def dispatchTime(inc):\n\tif len(FDNY[inc][dti])==0 or len(FDNY[inc][ati])==0:\n\t\tdispt = -99 # arbitrary flag value\n\telse:\n\t\tdeclared = epoch(dstr=FDNY[inc][dti])\n\t\tarrived = epoch(dstr=FDNY[inc][ati])\n\t\tdispt = arrived - declared\n\treturn dispt", "def detector_dead_time(data, dead_time, nonparalyzing=0.0, paralyzing=0.0):\n from .deadtime import apply_detector_dead_time\n\n data = copy(data)\n if nonparalyzing != 0.0 or paralyzing != 0.0:\n data.detector = copy(data.detector)\n apply_detector_dead_time(data, tau_NP=nonparalyzing,\n tau_P=paralyzing)\n elif dead_time is not None:\n data.detector = copy(data.detector)\n apply_detector_dead_time(data, tau_NP=dead_time.tau_NP,\n tau_P=dead_time.tau_P)\n elif data.detector.deadtime is not None and not np.all(np.isnan(data.detector.deadtime)):\n try:\n tau_NP, tau_P = data.detector.deadtime\n except Exception:\n tau_NP, tau_P = data.detector.deadtime, 0.0\n data.detector = copy(data.detector)\n apply_detector_dead_time(data, tau_NP=tau_NP, tau_P=tau_P)\n else:\n raise ValueError(\"no valid deadtime provided in file or parameter\")\n\n return data", "def _seconds_since_check(self) -> int:\n if self.update_time is None:\n return 0\n return int(time.time()) - self.update_time", "def time_available(self):\n if self.last_played_match:\n return self.last_played_match.played + timedelta(hours=4)\n return None", "def _remaining_time_to_timestamp(probe: MeaterProbe) -> datetime | None:\n if (\n not probe.cook\n or not hasattr(probe.cook, \"time_remaining\")\n or probe.cook.time_remaining < 0\n ):\n return None\n return dt_util.utcnow() + timedelta(seconds=probe.cook.time_remaining)", "def __init__(self):\n self.time_passed = -1", "def decrease_time(self):\n pass", "def lookup(self, time, time_cutoff=None):\n\t\t#do a binary search over the point set, comparing times\n\t\tpos = bisect(self.times, time)\n\t\tposition = None\n\t\tif pos==self.point_count:\n\t\t\tposition = self.points[pos-1]\n\t\telif pos>0 and (time - self.times[pos-1]) < (self.times[pos]-time):\n\t\t\t#check which of the two adjacent times is closer to time\n\t\t\tposition = self.points[pos-1]\n\t\telse:\n\t\t\tposition = self.points[pos]\n\n\t\tif time_cutoff is None or abs(position.time - time) <= time_cutoff:\n\t\t\treturn position\n\t\treturn None", "def get_delta_tof(self, event, det_1, det_2):\n times = [None, None, None, None]\n dets = (det_1, det_2, self.global_key(det_1), self.global_key(det_2))\n # find times for each detector type in dets and fill into times\n for hit in event[\"data\"]:\n for i, det in enumerate(dets):\n if hit[\"detector\"] == det:\n times[i] = hit[\"hit\"][\"t\"]\n # return None or the difference between tofs\n if None in times:\n return None\n else:\n delta_tof = (times[1] - times[0]) - (times[3] - times[2])\n return delta_tof", "def halt_diff(self):\n if self._halt_end_time == 0:\n return None\n else:\n diff = self._halt_end_time - time.time()\n if diff < 0:\n return 0\n return diff", "def time_track_stop(name):\n\tglobal _time_track_start_time_dict\n\tglobal _time_track_dict\n\texecution_time = time.time() - _time_track_start_time_dict[name]\n\t_time_track_dict[name] = _time_track_dict.get(name, 0) + execution_time", "def _seconds_until_time(self, target_time):\n self.logger.info('Requested target time is %s', target_time)\n now = datetime.today()\n target_datetime = now.replace(hour=target_time.hour,\n minute=target_time.minute,\n second=target_time.second,\n microsecond=target_time.microsecond)\n if now > target_datetime: # time already passed- move to the next day.\n self.logger.debug('Time already passed, targeting time for tomorrow')\n target_datetime += timedelta(days=1)\n else:\n self.logger.debug('Time did not pass, targeting time for today')\n\n self.logger.debug('Requested target datetime is %s', target_datetime)\n return (target_datetime - now).total_seconds()", "def zero_time(self):\n self.time = self.time - self.time.min()", "def time_in_match_engineer(row) -> float:\n to_return = None\n # First, validate the input data.\n try:\n match_time_since_half = row.eventSec\n except AttributeError:\n # If the row/DataFrame passed to the function does not have the\n # proper column(s).\n err_msg = \"The row/DataFrame passed to this function does not have\\\n\t\tthe column `eventSec`. This column is required to run this function.\"\n\n print(err_msg)\n raise ValueError\n\n # Next, perform necessary calculation to arrive at feature value.\n total_match_time = 90 * 60 # Note how this is measured in seconds.\n if row.matchPeriod == \"2H\":\n # If we have to add the time elapsed in the first half to the\n # time in the match specified in the data.\n match_time_since_half += 45 * 60\n elif row.matchPeriod == \"E1\":\n # If we have to add the times elapsed in the first and second halves\n # to the time in the match specified in the data.\n match_time_since_half += 90 * 60\n elif row.matchPeriod == \"E2\":\n # If we have to add the time elapsed in the first and second halves\n # as well as the time elapsed in the first period of the extra time\n # to the time in the match specified in the data.\n match_time_since_half += 90 * 60 + 15 * 60\n\n normed_match_time = match_time_since_half / total_match_time\n\n to_return = normed_match_time\n\n return to_return", "def get_death_tracker(cls, sim_info: SimInfo) -> Union[DeathTracker, None]:\n if sim_info is None:\n return None\n return sim_info.death_tracker", "def get_currently_selected_time(self):\n tstmp = self.time_select.currentText()\n orig_tstmp = None\n if tstmp: # on loading new xyzrph, vesselcenter is updated before timestamps are loaded. But it gets updated later so skip here\n orig_tstmp = self.timestamps[self.timestamps_converted.index(tstmp)]\n return orig_tstmp", "def _find_note_off(num, track):\n time_offset = 0\n for msg in track:\n time_offset += msg.time\n if msg.type == 'note_off' and msg.note == num:\n return time_offset\n return None", "def uptime(self,launch_time):\n lt_datetime = datetime.strptime(launch_time[:-5], '%Y-%m-%dT%H:%M:%S')\n lt_delta = datetime.utcnow() - lt_datetime\n return lt_delta" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate a data source. Based on the inferred data source type it will validate resource or package. Default output format is YAML with a front matter.
def console_validate( # Source source: List[str] = common.source, name: str = common.resource_name, type: str = common.type, path: str = common.path, scheme: str = common.scheme, format: str = common.format, encoding: str = common.encoding, innerpath: str = common.innerpath, compression: str = common.compression, schema: str = common.schema, hash: str = common.hash, bytes: int = common.bytes, fields: int = common.fields, rows: int = common.rows, basepath: str = common.basepath, # Dialect dialect: str = common.dialect, header_rows: str = common.header_rows, header_join: str = common.header_join, comment_char: str = common.comment_char, comment_rows: str = common.comment_rows, sheet: str = common.sheet, table: str = common.table, keys: str = common.keys, keyed: bool = common.keyed, # Detector buffer_size: int = common.buffer_size, sample_size: int = common.sample_size, field_type: str = common.field_type, field_names: str = common.field_names, field_confidence: float = common.field_confidence, field_float_numbers: bool = common.field_float_numbers, field_missing_values: str = common.field_missing_values, schema_sync: bool = common.schema_sync, # Checklist checklist: str = common.checklist, checks: str = common.checks, pick_errors: str = common.pick_errors, skip_errors: str = common.skip_errors, # Command parallel: bool = common.parallel, limit_rows: int = common.limit_rows, limit_errors: int = common.limit_errors, yaml: bool = common.yaml, json: bool = common.json, debug: bool = common.debug, trusted: bool = common.trusted, standards: str = common.standards, # Deprecated resource_name: str = common.resource_name, ): console = Console() name = name or resource_name # Setup system if trusted: system.trusted = trusted if standards: system.standards = standards # type: ignore # Create source source = helpers.create_source(source, path=path) if not source and not path: note = 'Providing "source" or "path" is required' helpers.print_error(console, note=note) raise typer.Exit(code=1) try: # Create dialect dialect_obj = helpers.create_dialect( descriptor=dialect, header_rows=header_rows, header_join=header_join, comment_char=comment_char, comment_rows=comment_rows, sheet=sheet, table=table, keys=keys, keyed=keyed, ) # Create detector detector_obj = helpers.create_detector( buffer_size=buffer_size, sample_size=sample_size, field_type=field_type, field_names=field_names, field_confidence=field_confidence, field_float_numbers=field_float_numbers, field_missing_values=field_missing_values, schema_sync=schema_sync, ) # Create checklist checklist_obj = helpers.create_checklist( descriptor=checklist, checks=checks, pick_errors=pick_errors, skip_errors=skip_errors, ) # Create resource resource = Resource( source=helpers.create_source(source), name=name, path=path, scheme=scheme, format=format, datatype=type, compression=compression, innerpath=innerpath, encoding=encoding, hash=hash, bytes=bytes, fields=fields, rows=rows, schema=schema, basepath=basepath, detector=detector_obj, ) # Add dialect if dialect_obj: resource.dialect = dialect_obj # Validate resource report = resource.validate( checklist_obj, name=name, parallel=parallel, limit_rows=limit_rows, limit_errors=limit_errors, ) code = int(not report.valid) except Exception as exception: helpers.print_exception(console, debug=debug, exception=exception) raise typer.Exit(code=1) # Yaml mode if yaml: content = report.to_yaml().strip() print(content) raise typer.Exit(code=code) # Json mode if json: content = report.to_json() print(content) raise typer.Exit(code=code) # Default mode labels = ["Row", "Field", "Type", "Message"] props = ["row_number", "field_number", "type", "message"] names = ["dataset"] + [task.name for task in report.tasks] matrix = [report.errors] + [task.errors for task in report.tasks] # Status if report.tasks: console.rule("[bold]Dataset") view = Table(title="dataset") view.add_column("name") view.add_column("type") view.add_column("path") view.add_column("status") for task in report.tasks: status = "VALID" if task.valid else "INVALID" style = "green" if task.valid else "bold red" status_row = [task.name, task.type, task.place, status] view.add_row(*status_row, style=style) console.print(view) # Errors if not report.valid: console.rule("[bold]Tables") for name, errors in zip(names, matrix): if errors: view = Table(title=name) for label in labels: view.add_column(label) for error in errors: error_row: List[str] = [] for prop in props: error_row.append(str(getattr(error, prop, None))) view.add_row(*error_row) console.print(view) # Proper retcode raise typer.Exit(code=code)
[ "def main(source):\n if source is None:\n click.echo(\n \"You need to supply a file or url to a schema to a swagger schema, for\"\n \"the validator to work.\"\n )\n return 1\n try:\n load(source)\n click.echo(\"Validation passed\")\n return 0\n except ValidationError as e:\n raise click.ClickException(str(e))", "def validate(self, data):\n source = data.get(\"source\")\n if not source:\n raise exceptions.ValidationError(dict(source=\"Source is required.\"))\n\n project = data.get(\"project\")\n if source.project != project:\n raise exceptions.ValidationError(\n dict(source=\"Source must be in the same project.\")\n )\n\n source_types = (\"Github\", \"GoogleDocs\", \"GoogleDrive\", \"GoogleSheets\")\n if source.type_name not in source_types:\n raise exceptions.ValidationError(\n dict(\n source=f\"Source must be one of these types: {', '.join(source_types)}.\"\n )\n )\n\n # If the `reviewer` is a username or id then check that it is a\n # valid email address\n reviewer = data.get(\"reviewer\")\n if reviewer:\n try:\n try:\n data[\"reviewer\"] = User.objects.get(id=reviewer)\n except ValueError:\n data[\"reviewer\"] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n try:\n validate_email(reviewer)\n except ValidationError:\n raise exceptions.ValidationError(\n dict(\n reviewer=\"Reviewer is not a valid username, user id, or email address.\"\n )\n )\n else:\n data[\"reviewer\"] = None\n data[\"reviewer_email\"] = reviewer\n else:\n data[\"reviewer\"] = None\n\n return data", "def validate(self):\n if self._checkDatatype():\n subtools.validateFiles(self.inputFile, self.chromSizesFile, self.fileType, self.options)\n else:\n raise TypeError(\"validateFiles cannot validate format {0}. Only the following formats can be validated by this tool: \\n{1}\\n\".format(self.fileType, self.FILE_TYPE))", "def validate(self, in_filename, file_format, entrypoint_name=None):\n\n if file_format == FileFormat.JSON:\n self.from_JSON(in_filename, entrypoint_name)\n else:\n self.from_XML(in_filename, entrypoint_name)", "def validate(datatype, source):\n if datatype == \"antibodies\":\n sheet = \"Antibodies\"\n else:\n sheet = \"Dataset\"\n response = read(source, sheet)\n if failed(response):\n return response\n\n table = response[\"table\"]\n if datatype == \"antibodies\":\n return antibodies.validate(table)\n else:\n return datasets.validate(datatype, table)", "def check_all_types(\n src_dict: Dict[str, Any],\n sinks: MutableSequence[Union[cwl.WorkflowStepInput, cwl.WorkflowOutputParameter]],\n type_dict: Dict[str, Any],\n) -> Dict[str, List[SrcSink]]:\n validation: Dict[str, List[SrcSink]] = {\"warning\": [], \"exception\": []}\n for sink in sinks:\n if isinstance(sink, cwl.WorkflowOutputParameter):\n sourceName = \"outputSource\"\n sourceField = sink.outputSource\n elif isinstance(sink, cwl.WorkflowStepInput):\n sourceName = \"source\"\n sourceField = sink.source\n else:\n continue\n if sourceField is not None:\n if isinstance(sourceField, MutableSequence):\n linkMerge = sink.linkMerge or (\n \"merge_nested\" if len(sourceField) > 1 else None\n )\n srcs_of_sink = []\n for parm_id in sourceField:\n srcs_of_sink += [src_dict[parm_id]]\n else:\n parm_id = cast(str, sourceField)\n if parm_id not in src_dict:\n raise SourceLine(sink, sourceName, ValidationException).makeError(\n f\"{sourceName} not found: {parm_id}\"\n )\n srcs_of_sink = [src_dict[parm_id]]\n linkMerge = None\n for src in srcs_of_sink:\n check_result = check_types(\n type_dict[cast(str, src.id)],\n type_dict[cast(str, sink.id)],\n linkMerge,\n getattr(sink, \"valueFrom\", None),\n )\n if check_result == \"warning\":\n validation[\"warning\"].append(SrcSink(src, sink, linkMerge, None))\n elif check_result == \"exception\":\n validation[\"exception\"].append(SrcSink(src, sink, linkMerge, None))\n return validation", "def validate_descriptions(ctx):\n\n level = {'broken': ERROR, 'loaded': WARN, 'valid': SUCCESS}\n valid_keys = str(get_fiscal_field_names()).replace('[', '[None, ')\n\n for source in ctx.obj['sources']:\n color = level[source.validation_status]\n echo('\\n{}\\n'.format(join('data', source.id, SOURCE_FILE)))\n messages = []\n\n if source.validation_status == 'broken':\n messages.append('{}'.format(source.traceback))\n\n elif source.validation_status == 'loaded':\n for e in source.validation_errors:\n error = e.replace(valid_keys, 'fiscal fields')\n messages.append(error)\n else:\n messages.append('Valid :-)')\n\n message = '{}'.format('\\n'.join(messages))\n secho(message, **color)", "def _check_sources(self):\n for source_name, source in self.sources.items():\n if \"data\" not in source or \"ref_column\" not in source:\n raise ValueError(\n \"Each source needs to have a `data` and a `ref_column` property\"\n )\n if not isinstance(source[\"data\"], pd.DataFrame):\n raise ValueError(\n \"The `data` property of each source must contain a DatFrame\"\n )\n if not isinstance(source[\"data\"].index, pd.DatetimeIndex):\n raise ValueError(\n \"The `data` DataFrame must have a pd.DatetimeIndex for each source\"\n )\n if source[\"data\"].index.duplicated().any():\n raise ValueError(\n \"The input dataframe must not have duplicate index values, \"\n \"convert the data into a normalized wide format\"\n )\n if (\n not isinstance(source[\"ref_column\"], str)\n or source[\"ref_column\"] not in source[\"data\"].columns\n ):\n raise ValueError(\n \"Each source must have a string specifying the reference column, and the reference\"\n \"column must be available in the source's DataFrame\"\n )\n if self.ref_source_name not in self.sources.keys():\n raise ValueError(\n \"The reference source name must be available in the source dict\"\n )", "def schema_val(self, messages=None):\n self._ymlproc = YAMLProcessor(self._ymlfile)\n self._schemaproc = SchemaProcessor(self._schemafile)\n valid = True\n\n log.debug(\n \"BEGIN: Schema-based validation for YAML '%s' with schema '%s'\",\n self._ymlfile,\n self._schemafile,\n )\n\n # Make sure the yml and schema have been loaded\n if self._ymlproc.loaded and self._schemaproc.loaded:\n # Load all of the yaml documents. Could be more than one in the same YAML file.\n for docnum, data in enumerate(\n yaml.load_all(self._ymlproc.data, Loader=yaml.Loader)\n ):\n # Since YAML allows integer keys but JSON does not, we need to first\n # dump the data as a JSON string to encode all of the potential integers\n # as strings, and then read it back out into the YAML format. Kind of\n # a clunky workaround but it works as expected.\n data = yaml.load(json.dumps(data), Loader=yaml.Loader)\n\n # Now we want to get a validator ready\n v = jsonschema.Draft4Validator(self._schemaproc.data)\n\n # Loop through the errors (if any) and set valid = False if any are found\n # Display the error message\n for error in v.iter_errors(data):\n msg = (\n f\"Schema-based validation failed for YAML file ' {self._ymlfile} '\"\n )\n self.ehandler.process(\n docnum, self._ymlproc.doclines, error, messages\n )\n valid = False\n\n if not valid:\n log.error(msg)\n\n elif not self._ymlproc.loaded:\n raise util.YAMLError(\"YAML must be loaded in order to validate.\")\n elif not self._schemaproc.loaded:\n raise jsonschema.SchemaError(\"Schema must be loaded in order to validate.\")\n\n log.debug(\"END: Schema-based validation complete for '%s'\", self._ymlfile)\n return valid", "def ValidateSchema(self):\n schema_errors = []\n list_of_invalid_schema = yaml_validator.Validator(SCHEMA_PATH).Iterate(\n self.parsed_yaml)\n for error in list_of_invalid_schema:\n schema_errors.append('{}'.format(error))\n if schema_errors:\n return InvalidSchemaError(invalid_schema_reasons=schema_errors)\n return None", "def __validate_source(self) -> None:\n source = self.search_source\n self.review_manager.logger.debug(f\"Validate SearchSource {source.filename}\")\n\n # if \"query\" not in source.search_parameters:\n # Note : for md-sources, there is no query parameter.\n # raise colrev_exceptions.InvalidQueryException(\n # f\"Source missing query search_parameter ({source.filename})\"\n # )\n\n if \"query\" in source.search_parameters:\n pass\n # if \"simple_query_string\" in source.search_parameters[\"query\"]:\n # if \"query\" in source.search_parameters[\"query\"][\"simple_query_string\"]:\n # pass\n # else:\n # raise colrev_exceptions.InvalidQueryException(\n # \"Source missing query/simple_query_string/query \"\n # f\"search_parameter ({source.filename})\"\n # )\n\n # elif \"url\" in source.search_parameters[\"query\"]:\n # pass\n # # else:\n # raise colrev_exceptions.InvalidQueryException(\n # f\"Source missing query/query search_parameter ({source.filename})\"\n # )\n\n self.review_manager.logger.debug(f\"SearchSource {source.filename} validated\")", "def validate_schema(datamapping, schema):\n try:\n raw_args = {k: v for k, v in datamapping.items()}\n args, errors = schema.deserialize(raw_args), {}\n except colander.Invalid as exc:\n args, errors = {}, exc.asdict()\n return _validation(args=args, errors=errors, raw_args=raw_args)", "def _validate_input(self) -> None:\n\n if self.config[\"input\"][\"data_type\"] == \"sftp\":\n sftp_config_keys = [\n \"sftp_host\",\n \"sftp_username\",\n \"sftp_source_path\",\n \"sftp_private_key\",\n ]\n for key in sftp_config_keys:\n if key not in self.config[\"input\"][\"config\"]:\n raise ValueError(f\"Key not present in the config: {key}\")\n\n elif self.config[\"input\"][\"data_type\"] == \"local\":\n if \"source_path\" not in self.config[\"input\"][\"config\"]:\n raise ValueError(\"Key not present in the config: source_path\")\n else:\n # Check if local_directory is absolute path. If not, then set it.\n local_directory = self.config[\"input\"][\"config\"][\"source_path\"]\n if isinstance(local_directory, list):\n local_directory = [\n self._get_absolute_path(local_path)\n for local_path in local_directory]\n for local_path in local_directory:\n if not os.path.exists(local_path):\n raise ValueError(f\"Path does not exist: {local_path}\")\n else:\n local_directory = utils.get_absolute_path(local_directory)\n if not os.path.exists(local_directory):\n raise ValueError(f\"Path does not exist: {local_directory}\")\n self.config[\"input\"][\"local_directory\"] = local_directory\n\n # Raise error if data_format it not valid input formats\n if self.config[\"input\"][\"data_format\"] not in self.valid_input_formats:\n raise ValueError(\"Invalid value for key in input: data_format\")\n\n elif self.config[\"input\"][\"data_type\"] == \"mock\":\n self._update_mock_data()\n\n else:\n raise ValueError(\"Invalid value for the key: data_location\")", "def validating(jsondata, schema):\n print(\"Validating the input data using jsonschema:\")\n try:\n validate(jsondata, schema)\n sys.stdout.write(\"Validation OK\\n\")\n except jsonschema.exceptions.ValidationError as ve:\n sys.stderr.write(\"Record #{}: ERROR\\n\".format(jsondata))\n sys.stderr.write(str(ve) + \"\\n\")", "def validate(self):\n file_content = filesystem_utils.get_content(self._file_path)\n self.parse_yaml_file(file_content)", "def load_local(self, source: Union[str, Path, TextIO] = None):\n if isinstance(source, str):\n source = Path(source)\n\n def _load_schema(path, file_data):\n try:\n return json.loads(file_data)\n except JSONDecodeError as e:\n raise InvalidSchema(\"Could not parse schema file {}: {}\".format(path, e.msg))\n\n if source is None:\n # No source, read from stdin\n schema = _load_schema(\"[stdin]\", sys.stdin.read())\n elif hasattr(source, \"is_dir\") and source.is_dir():\n # Read each json file in directory\n schemas = []\n for file_path in source.glob(\"*.json\"):\n schemas.append(_load_schema(file_path, file_path.read_text(encoding=\"utf8\")))\n schema = ChainMap(*schemas)\n elif hasattr(source, \"read\"):\n # Read file handle\n schema = _load_schema(source.name, source.read())\n elif hasattr(source, \"read_text\"):\n # Read pathlib Path\n schema = _load_schema(source.name, source.read_text())\n else:\n raise InvalidSchema(\n \"Did not recognise provided source as either a \"\n \"directory path, file path, or file handle: {}\".format(source)\n )\n\n for api_name, api_schema in schema.items():\n self.local_schemas[api_name] = api_schema\n\n return schema", "def validate(self) -> Tuple[bool, Optional[ValidationError]]:\n path = os.path.dirname(__file__)\n with open(f\"{path}/schemas/{self.__resource_name__}.json\") as f:\n try:\n jsonschema.validate(self.__dict__, json.loads(f.read()))\n return True, None\n except SchemaError:\n # TODO: Logging\n raise InternalError\n except ValidationError as ve:\n # TODO: Logging\n return False, ve", "def test_validator_datafeed():\n\n v = validator.SchemaValidator(\n './tests/files/validator_constraints.ttl',\n './tests/files/test_report.html')\n\n with open('./tests/files/validator_data_feed.json') as f:\n dump = json.load(f)\n\n v.add_entity(dump)\n v.close()\n os.remove('./tests/files/test_report.html')\n\n expected = []\n expected.append(utils.ResultRow(\n 'Id: movieid1',\n 'Name of movie must be string.',\n '.name',\n '123',\n 'Violation'\n ))\n expected.append(utils.ResultRow(\n 'Id: id2',\n 'Name of person must be string.',\n '.actor.name',\n '123',\n 'Warning'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of organization must be string.',\n '.creator.url',\n '345',\n 'Info'\n ))\n expected.append(utils.ResultRow(\n 'Id: id3',\n 'Name of person must be string.',\n '.creator.name',\n '123',\n 'Warning'\n ))\n\n assert(len(v.reports['Movie']) == len(expected)\n ), 'Expected report count not equal.'\n\n for m in expected:\n assert m in v.reports['Movie'], 'Expected report not generated.'", "def validate(args):\n if args.dataset_dir is None:\n error('Must select dataset with --dataset-dir')\n else:\n # Validation set must be given.\n validate_config.dataset = {\n 'dataset_dir': args.dataset_dir\n }\n # Checks and sets the type of noise.\n if args.noise not in corruption_types:\n error('Unknown noise type', args.noise)\n validate_config.noise = corruption_types[args.noise]\n # Specify the pickled file of the trained network.\n if args.network_snapshot is None:\n error('Must specify trained network filename with --network-snapshot')\n validate_config.network_snapshot = args.network_snapshot\n # Choose the validate function as the function to run.\n submit_config.run_func = validation.validate\n # Runs this validation setup.\n submit.submit_run(submit_config, **validate_config)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper method that takes in a sentence and the current spacy entity type, and returns a true if that type is in the given sentence (used for filtering)
def sentence_has_type(sentence, type): for word in sentence.ents: if word .label_ == type: return True return False
[ "def is_sentence(self):\n return self.parent == 'S'", "def is_content_sentence(symbol_stream):\n return any(symbol[0] is not None and in_ranges(symbol[0], WORD_RANGES)\n for symbol in symbol_stream)", "def __contains__(self, s):\n search_by_expression = self._valid_search(s)\n return any(search_by_expression(l) for l in self.lines)", "def is_present(self, timepoint, word): \n model = self.get_model(timepoint)\n return word in model.word.values", "def is_real_sentence(only_token, sentence):\n \n first_word = \"\"\n if only_token:\n first_word = sentence[0]\n else:\n first_word = sentence[0][0]\n\n if '---------------------' in first_word or first_word == '-DOCSTART-':\n return False\n else:\n return True", "def contains_entity(tag):\n return bool(ENTITY_CHARS_RE.search(tag))", "def __contains__(self, item):\n if isinstance(item, six.string_types):\n return item in self.terms\n elif isinstance(item, Term):\n return item.id in self.terms\n else:\n raise TypeError(\"'in <Ontology>' requires string or Term as left \"\n \"operand, not {}\".format(type(item)))", "def is_included(content, words):", "def is_org(nlp, text, company_name):\n \n doc = nlp(text) #select text of the news\n for t in doc.ents:\n \t# print(t)\n \tif t.lower_ == company_name: #if company name is called\n \t\tif t.label_ == \"ORG\": #check they actually mean the company\n \t\t\treturn True\n return False", "def generates(self, sentence):\n try:\n parses = self._parser.parse(sentence.get_words())\n return list(parses) != []\n except:\n return False", "def is_word(word):\n return (True in (wordtype(word)))", "def contains(collection: Iterable, entity: Any) -> bool:\n return entity in collection", "def clause_is_present(clause, search):\n\n for elem in surface_selectables(search):\n if clause == elem: # use == here so that Annotated's compare\n return True\n else:\n return False", "def includes_phrase(self, text):\n\n phrase = self.phrase.lower()\n phrase_words = phrase.split(' ')\n\n # remove punctuation\n text = [' ' if c in string.punctuation else c for c in text.lower()]\n text_words = [word for word in ''.join(text).split(' ') if len(word)]\n\n if len(phrase_words) == 1:\n return phrase in text_words\n\n # work through multiple words\n try:\n start_w_index = text_words.index(phrase_words[0])\n phrase_word_count = 1\n index = start_w_index + phrase_word_count\n status = False\n\n # as long as other words follow\n while index < len(text_words):\n if phrase_words[phrase_word_count] == text_words[index]:\n phrase_word_count += 1\n else: # word is not in phrase\n break\n if phrase_word_count == len(phrase_words): # all words\n status = True\n break\n index += 1\n return status\n except ValueError: # first phrase word not in text\n return False", "def has_word(self, word):\n if word in self.model:\n return True\n return False", "def hasWordBeginningWith(sentence: str, query: str) -> bool:\n words = [sentence]\n\n # split the sentence into words by each valid separator\n for char in \" ,.'-\":\n if char not in query:\n newWords = []\n # split each word by the next separator\n for word in words:\n newWords += word.split(char)\n\n words = newWords # replace the previous list of words by the new one\n\n # Check if one of the words begins with the query, ignore casing\n for word in words:\n if word.lower().startswith(query.lower()):\n return True\n\n return False", "def is_word_in_text(what_word, where_word) -> bool:\n return where_word.find(what_word) != -1", "def is_in(self, obj):\n for w in self.words:\n if w in obj:\n return True\n\n return False", "def is_superlative(tok):\n text = tok.text.lower()\n if text in irr_superlatives:\n return True\n elif re.search(\"est$\", text):\n return text[:-3] == tok.lemma_\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Go through all sentences in parsed and extract regex matchings, return the most frequent of these
def extract_frequent_regex_match(parsed, regex): regex_matches = [] for sentence in parsed: matches = re.findall(regex, sentence.text) if matches: regex_matches.extend(matches) if regex_matches: return Counter(regex_matches) else: return '___no_match___'
[ "def most_frequent(s):\n words=[]\n words=s.split(\" \")\n words=sorted(words)\n word_count={}\n counts=[]\n for word in words:\n counts.append(words.count(word))\n m=counts.index(max(counts))\n return (words[m])\n \n # USING OrderedDict\n '''\n for word in words:\n word_count[word]=words.count(word)\n max_count=max(word_count.values())\n for word in OrderedDict(sorted(word_count.items(), key=lambda t:t[0])):\n if word_count[word]==ma\n x_count:\n return (\"Using OrderedDict:\", word)\n '''\n \n \n \n # HINT: Use the built-in split() function to transform the string s into an\n # array\n \n # HINT: Sort the new array by using the built-in sorted() function or\n # .sort() list method\n \n # HINT: Iterate through the array and count each occurance of every word\n # using the .count() list method\n \n # HINT: Find the number of times the most common word appears using max()\n \n # HINT: Locate the index of the most frequently seen word\n \n # HINT: Return the most frequent word. Remember that if there is a tie,\n # return the first (tied) word in alphabetical order.", "def most_frequent_bigrams(text, n, lowercase=False):\r\n # YOUR CODE HERE\r\n\r\n from collections import Counter\r\n\r\n if lowercase:\r\n words = [word.strip().lower() for word in text.split()]\r\n else:\r\n words = [word.strip() for word in text.split()]\r\n\r\n bigrams = list(zip(words,words[1:]))\r\n bi_count = Counter(bigrams)\r\n\r\n most_freq_biagram = []\r\n\r\n for i,j in bi_count.most_common(n):\r\n most_freq_biagram.append(i)\r\n\r\n return most_freq_biagram\r\n\r\n pass", "def most_frequent(s):\n\n # HINT: Use the built-in split() function to transform the string s into an\n # array\n words = s.split(\" \")\n\n # HINT: Sort the new array by using the built-in sorted() function or\n # .sort() list method\n\n # HINT: Iterate through the array and count each occurance of every word\n # using the .count() list method\n\n dict = {}\n for word in words:\n dict[word] = words.count(word)\n\n # HINT: Find the number of times the most common word appears using max()\n result = max(dict, key=dict.get)\n\n # HINT: Locate the index of the most frequently seen word\n\n # HINT: Return the most frequent word. Remember that if there is a tie,\n # return the first (tied) word in alphabetical order.\n\n return result", "def most_frequent_words(subreddit):\n freq_dists = []\n names = []\n titles_all = []\n for name, data in subreddit.items()[-1:]:\n titles_subs = []\n all_words = ['']\n for sub_id, sub in data.items():\n all_words = \" \".join([fixer(comment, True, False) \n for comment in sub.comments]).split()\n \n titles_subs.append(sub.title) \n \n freq_dist = nltk.probability.FreqDist(all_words)\n names.append(name)\n titles_all.append(titles_subs)\n freq_dists.append(freq_dist)\n return names, freq_dists, titles_all", "def top_sentences(query, sentences, idfs, n):\n\n ranks = {sentence:0 for sentence in sentences.keys()}\n\n #--Scan each sentence and get its rank (matching word measure):\n for sentence, words in sentences.items():\n #--Get words in BOTH sentence and query-string:\n MATCHED_words = query.intersection(words)\n #--IDF score for each sentence:\n for word in MATCHED_words:\n ranks[sentence] += idfs[word]\n\n #--Sort the resulting dictionary, high-to-low:\n topsentences = [ (val, key) for (key, val) in ranks.items() ]\n topsentences.sort(key=lambda x:x[0], reverse=True)\n\n #--Check for ties, if so get most dense, highest-[idf]ranked sentence:\n tied = []\n for idf, sentence in topsentences:\n if idf == topsentences[0][0]:\n tied.append(sentence)\n else: #--else on to 2nd highest, so stop looking...\n break\n\n if len(tied) == n or len(tied) < n:\n return tied\n\n #--Get density of highest-ranked [tied] sentences:\n densities = {sentence:0 for sentence in tied}\n for tie in tied:\n count = 0\n words = tokenize(tie)\n for word in words:\n if word in query:\n count += 1\n\n density = count / len(tie)\n densities[tie] = density\n\n\n\n #--Return the 'n' matchiest sentences; if a tie, return densest sentence:\n D = [ (val, key) for (key, val) in densities.items() ]\n D.sort(key=lambda x:x[0], reverse=True)\n ans = [ sentence for density, sentence in D[:n] ]\n #\n #\n #\n return ans", "def test_run():\n print(most_frequent(\"cat bat mat mat cat\")) # output: 'cat'\n print(most_frequent(\"betty bought a bit of butter but the butter was bitter\")) # output: 'butter'", "def three_most_common_words(path):\n\n '''\n regex pattern details:\n \n (?:(?<=\\s)|(?<=^)) : Positive Lookbehind for space character or beginning of string\n ([a-zA-Z]+ : Match 1 or more alphabetic characters\n [-]? : Match 0 or 1 hyphens\n [a-zA-Z]*) - Match 0 or more alphabetic characters\n (?=\\s) - Positive Lookahead for space character\n '''\n word_pattern = re.compile(\"(?:(?<=\\s)|(?<=^))([a-zA-Z]+[-]?[a-zA-Z]*)(?=\\s)\")\n word_occurrences = {}\n\n try:\n with open(path) as file:\n for line in file:\n # find matching words and convert to lowercase\n words = [word.lower() for word in word_pattern.findall(line)]\n\n # increment word count for each word\n for word in words:\n if word in word_occurrences:\n word_occurrences[word] += 1\n else:\n word_occurrences[word] = 1\n\n # sort dictionary values and take top three\n three_tuples = sorted(word_occurrences.items(), key=operator.itemgetter(1), reverse=True)[:3]\n three_words = [i[0] for i in three_tuples]\n\n except FileNotFoundError:\n print(path + \": No such file or directory\")\n sys.exit(1)\n\n return three_words", "def most_frequent_Nouns(self, tagger=0, magnitude=20):\n dict_nouns = Counter()\n for tokenized_review in self.tokenized_corpora:\n part_of_speech = np.array(pos_tag(tokenized_review))\n part_of_speech_nouns_only = np.where(np.logical_or(part_of_speech == 'NN', part_of_speech == 'NNS'))\n nouns_indexes = part_of_speech_nouns_only[0]\n for i in nouns_indexes:\n noun = tokenized_review[i]\n dict_nouns[noun] += 1\n return dict_nouns.most_common(magnitude)", "def most_frequent_words(self, corpora, magnitude=100):\n all_words = []\n for review in corpora:\n review_vector = self.tokenizer.tokenize(review) #tokenize \n if self.lemmatization_enabled:\n review_vector = [self.stemmer.lemmatize(word) for word in review_vector] #lem\n else: \n review_vector = [self.stemmer.stem(word) for word in review_vector] #stem\n for word in review_vector:\n word.lower()\n all_words.append(word)\n return np.array(FreqDist(all_words).most_common(magnitude))[:,0]", "def get_word_largest_score(sentence):\n scores = []\n split_sentence = sentence.split()\n nopunct = []\n for i in split_sentence:\n nopunct.append(remove_punctuation(i))\n scores.append(score_word(remove_punctuation(i)))\n return max(zip(scores,nopunct), key=get_word)", "def most_frequent_words(text, n, lowercase=False):\r\n # YOUR CODE HERE\r\n\r\n from collections import Counter\r\n\r\n if lowercase:\r\n words = [word.strip().lower() for word in text.split()]\r\n else:\r\n words = [word.strip() for word in text.split()]\r\n\r\n word_count = Counter(words)\r\n # most_freq = list(word_count.most_common(n))\r\n\r\n most_freq_list = []\r\n for i,j in word_count.most_common(n):\r\n most_freq_list.append(i)\r\n\r\n return most_freq_list\r\n\r\n pass", "def extract_sentences_of_frequent_words(self):\n if not self.frequent_words:\n logging.info('%s:FileInterpreter: Cannot find sentences of '\\\n 'frequent words, file %s does not have frequent '\\\n 'words.', script_name, self.filename)\n return\n\n sentences_of_frequent_words = []\n # Parse content and separate sentences (tokenise sentences)\n tokenized_sentences = nltk.tokenize.sent_tokenize( self.content )\n\n # For every word we collected as frequent,\n for word in self.frequent_words:\n # Check if word is included in sentence\n word_matched_sentences = [ sentence\n for sentence in tokenized_sentences\n if word[0] in nltk.tokenize\n .word_tokenize( sentence.lower() ) ]\n\n sentences_of_frequent_words.append(word_matched_sentences)\n\n self.sentences_of_frequent_words = sentences_of_frequent_words", "def sentence_frequency(self, sentence, text):\n\t\tresult = re.findall(sentence + \"+\", text)\n\t\treturn len(result)", "def main():\n # TODO: ask the user to input some `text`\n\n\n # TODO: count the number of occurences of each word in the text\n\n\n # TODO: sort by descending order of occurences and display the result", "def most_common_words(df, sentence, cl, label, **kwargs):\n\n df_ = df[df[cl]==label]\n df_ = df_[sentence].tolist()\n docx = ' '.join(str(x) for x in df_)\n docx = docx.split()\n word_counter = Counter(docx)\n\n top = 10\n\n for key, value in kwargs.items():\n if key == 'top':\n top = value\n\n for word, count in word_counter.most_common(top):\n print(word, ': ', count)", "def most_frequent(data: list) -> str:\n #return max(Counter(data), key=lambda i: Counter(data)[i])\n return max(data, key=data.count)", "def _most_similar(self, *args, **kwargs):\n topn = kwargs.get(\"topn\", 10)\n # Query for extra, since we filter some bad ones out\n kwargs[\"topn\"] = topn + 20\n words = self._model.most_similar(*args, **kwargs)\n words = [(w.lower(), n) for w, n in words]\n\n exclude_substrings = True\n if exclude_substrings:\n input_words = kwargs[\"positive\"]\n words = [\n (w.lower(), round(n, 3))\n for w, n in words\n if not (\n any(c not in ascii_letters for c in w) or\n any(w in i_w for i_w in input_words) or\n any(i_w in w for i_w in input_words) or\n any(editdistance.eval(w, i_w) <= 3 for i_w in input_words)\n )\n ]\n return words", "def cal_sent_scores(self, sentence):\n word_count = 0\n max_word_pos_score = 0\n max_word_neg_score = 0\n for word, tag in sentence:\n pos_score = 0\n neg_score = 0\n synsets = self.iswn.senti_synsets(word, tag) \n num_synsets = len(synsets) \n word_pos_score = 0\n word_neg_score = 0\n if num_synsets >=1 : \n for synset in synsets:\n word_pos_score += synset.pos_score\n word_neg_score += synset.neg_score\n word_pos_score = word_pos_score/num_synsets #average synsets scores\n word_neg_score = word_neg_score/num_synsets\n if max_word_pos_score < word_pos_score :\n max_word_pos_score = word_pos_score\n if max_word_neg_score < word_neg_score :\n max_word_neg_score = word_neg_score\n \n return max_word_pos_score, max_word_neg_score", "def genreClassifier(test_tokens, genre_models):\n tokens = test_tokens\n most_common = Counter(test_tokens).most_common()\n top100 = [x[0] for x in most_common]\n top100 = top100[:100]\n\n models = {\n 'children': genre_models['children']['good_turing_uni'], \n 'history': genre_models['history']['good_turing_uni'], \n 'crime': genre_models['crime']['good_turing_uni']\n }\n\n probs = {'children':1, 'history': 1, 'crime': 1}\n for word in top100:\n for genre in probs:\n if word in models[genre]:\n probs[genre] *= models[genre][word]\n print probs\n return max(probs, key=probs.get)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
filter parsed to only contain sentences with a matching regex form
def filter_regex_match_sentences(parsed, pattern): matches = list(filter(lambda sent: re.findall(pattern, sent.text), parsed)) return matches
[ "def __filter_text(self, text):\r\n analyzer_num_tag = self.analyzer_type.num\r\n analyzer_noun_tag = self.analyzer_type.noun\r\n analyzer_loc_tag = self.analyzer_type.loc\r\n surname = clean_text.get_surname(self.url)\r\n sentence = []\r\n out_text = []\r\n surname_re = re.compile(r'' + surname)\r\n for sent in text:\r\n for token in sent:\r\n if (analyzer_num_tag in token and (self.pattern.match(token[0]) is not None)) or (\r\n analyzer_loc_tag in token and analyzer_noun_tag in token and surname_re.match(\r\n str(token[0])) is None):\r\n sentence.append(token)\r\n if [tup for tup in sentence if analyzer_num_tag in tup]:\r\n if [tup for tup in sentence if analyzer_loc_tag in tup]:\r\n out_text.append(sentence)\r\n sentence = []\r\n return out_text", "def syntactic_filter(sentences, bot_dict):\n output_sentences = []\n print \"Before syntax filter there were \" + str(len(sentences)) + \" sentences.\"\n for sentence in sentences:\n print \"==================\"\n print str(sentence) + \"\\n\"\n tokens = nltk.tokenize.wordpunct_tokenize(sentence)\n posTagged = nltk.pos_tag(tokens)\n justTags = []\n for word, tag in posTagged:\n justTags.append(tag)\n print str(justTags) + \"\\n\"\n rd_parser = nltk.RecursiveDescentParser(grammar1)\n try:\n if len(rd_parser.nbest_parse(justTags)) > 0:\n output_sentences.append(sentence)\n except ValueError:\n pass\n print \"After the syntax filter there were \" + str(len(output_sentences)) + \" sentences.\"\n print output_sentences\n return output_sentences", "def filter_paragraph(p):\n # Expect a minimum number of words.\n tokens = p.split()\n if len(tokens) < 6:\n return True\n\n # Require some letters.\n if not re.search(_SOME_ALPHA_RE, p):\n return True\n\n # Keep this one at the end, probably the most complicated logic.\n # We try to detect sentences, which should have a minimum of 3 tokens\n # with only alphabetic characters.\n last = 0\n found_sentence = False\n num_alpha = 0\n for i, x in enumerate(tokens):\n if x == '.':\n if i - last > 3 and num_alpha >= 3:\n found_sentence = True\n break\n last = i\n num_alpha = 0\n if re.match(_ONLY_ALPHA_RE, x):\n num_alpha += 1\n if not found_sentence:\n return True\n\n return False", "def filter_study(title, condition, ec):\n lines = [title + '.']\n for l in condition.split('\\n'):\n lines.append(l + '.')\n segments = re.split(\n r'\\n+|(?:[A-Za-z0-9\\(\\)]{2,}\\. +)|(?:[0-9]+\\. +)|(?:[A-Z][A-Za-z]+ )+?[A-Z][A-Za-z]+: +|; +| (?=[A-Z][a-z])',\n ec, flags=re.MULTILINE)\n for i, l in enumerate(segments):\n l = l.strip()\n if l:\n if l:\n if ' ' in l and l[-1] not in string.punctuation:\n l += '.'\n lines.append(l)\n text = '\\n'.join(lines)\n cp = subprocess.run(['iconv', '-t', 'ascii//TRANSLIT'], input=text, stdout=subprocess.PIPE, universal_newlines=True)\n return cp.stdout", "def filter_sentence(citing_sentence):\r\n\r\n if citing_sentence == None:\r\n return \" \" #filtered_sentences_noNone.append(\" \")\r\n\r\n if citing_sentence != None:\r\n citing_sentence = re.sub(\"[\\<\\[].*?[\\>\\]]\", \"\", citing_sentence) #to remove citations\r\n #citing_sentence = re.sub(\"[*?]\", \"\", citing_sentence) #to remove citations\r\n citing_sentence = re.sub('[0-9]+', '', citing_sentence)\r\n to_delete = [\"Introduction\", \"Background\", \"Conclusions\",\"the\", \"and\", \"therefore\", \"thus\", \"et\", \"al.\"]#, \"\\n\", \"<\\sub>\", \"bibr\", \"ref\", \"rid\", \"type\", \"xref\"] #, \"/p\\np\\n\", \"\\p\"]\r\n for word in to_delete:\r\n citing_sentence = re.sub(word, \"\", citing_sentence)\r\n #print(word)\r\n #print(citing_sentence)\r\n #citing_sentence = re.sub(\"\\?\", \"\", citing_sentence) #to remove citations\r\n citing_sentence = ' '.join([word for word in citing_sentence.split() if word not in (stopwords.words('english'))])\r\n return citing_sentence", "def sentence_filter(self, sentence) -> str:\n self.sentence = sentence\n\n filtered_sentence = self.special_filter(self.bracket_filter(self.sentence))\n\n return filtered_sentence", "def extract_statements(\n text=None, \n nlp=None, \n make_sentence=False, \n n_min_word_paragraph=50, \n n_max_word_paragraph=200\n ):\n \n # remove non ASCII characters\n text = remove_non_ascii(text)\n \n \n lines = []\n prev = \"\"\n n_words = 0\n for line in text.split('\\n'):\n # aggregate consecutive lines where text may be broken down\n # only if next line starts with a space or previous does not end with punctation mark and between\n if((line.startswith(' ') or not prev.endswith(('.','?', '!'))) and n_words <= n_max_word_paragraph):\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n # min words in paragraph\n elif n_words <=n_min_word_paragraph:\n prev = prev + ' ' + line\n n_words = len(prev.split())\n \n else:\n # new paragraph\n lines.append(prev)\n prev = line\n n_words = 0\n \n # don't forget left-over paragraph\n lines.append(prev)\n # clean paragraphs from extra space, unwanted characters, urls, etc.\n # best effort clean up, consider a more versatile cleaner\n sentences = []\n for line in lines:\n \n # removing header number\n line = re.sub(r'^\\s?\\d+(.*)$', r'\\1', line)\n # removing trailing spaces\n line = line.strip()\n # words may be split between lines, ensure we link them back together\n line = re.sub('\\\\s?-\\\\s?', '-', line)\n # remove space prior to punctuation\n line = re.sub(r'\\s?([,:;\\.])', r'\\1', line)\n # ESG contains a lot of figures that are not relevant to grammatical structure\n line = re.sub(r'\\d{5,}', r' ', line)\n # remove mentions of URLs\n line = re.sub(r'((http|https)\\:\\/\\/)?[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*', r' ', line)\n # remove multiple spaces\n line = re.sub('\\\\s+', ' ', line)\n \n # split paragraphs into well defined sentences using spacy\n if make_sentence:\n try:\n for part in list(nlp(line).sents):\n part_strip = str(part).strip()\n # remove senteces with only 30 characters\n if len(part_strip) > 30:\n sentences.append(part_strip)\n except ValueError:\n print(\"Check if nlp model was loaded\")\n else:\n sentences.append(line)\n \n return sentences", "def find_sentences_with_entity(requested_entity, text):\n\n accepted_splits = []\n \n for m in re.finditer(requested_entity, text): \n #goal here is to get the sentence itself instead of cutting it off in the middle, doesn't work perfectly yet\n search_area = text[m.start()-300:m.end()+300]\n splits = search_area.split('.')\n # splits = splits[1:-1]\n for split in splits:\n if requested_entity in split:\n if split not in accepted_splits:\n # st.write(split)\n accepted_splits.append(split)\n \n accepted_splits = list(set(accepted_splits))\n\n return accepted_splits", "def get_sentences(text):\n\n\n lines = re.findall(r'\\s*([A-Z].+?[\\?\\!\\.])\\s+',text,flags=re.MULTILINE | re.DOTALL)\n \n\n return [line.replace('\\n',' ') for line in lines]", "def _filter(self, tokens):\n\t\tz = filter(lambda w: len(w) > 1 and w not in self.stopwords, tokens)\n\t\treturn [strip_special(w) for w in z]", "def grept(regex, p_raw,s=False,lc=False):\n results = []\n if type(regex) == type(list()):\n regex_list = regex\n else:\n regex_list = [regex]\n\n if type(p_raw) == type(list()):\n str_list = p_raw\n else:\n str_list = [p_raw]\n \n for entry in str_list:\n for line in entry.split('\\n'):\n for re_tmp in regex_list:\n if re.search(re_tmp, line) != None:\n results.append(line)\n continue\n return results", "def pre_process(self, comment):\n comment = comment.strip()\n tokenizer = nltk.tokenize.RegexpTokenizer(r'\\w+')\n tokenized_c = tokenizer.tokenize(comment)\n filtered_c = [w for w in tokenized_c if not w.lower() in self.stop_words]\n return filtered_c", "def special_filter(self, sentence):\n self.sentence = sentence\n\n SENTENCE_MARK = ['?', '!']\n NOISE = ['o', 'n', 'u', 'b', 'l']\n EXCEPT = ['/', '+', '*', '-', '@', '$', '^', '&', '[', ']', '=', ':', ';', '.', ',']\n\n new_sentence = str()\n\n for idx, ch in enumerate(self.sentence):\n if ch not in SENTENCE_MARK:\n # o/, n/ 등 처리\n if idx + 1 < len(self.sentence) and ch in NOISE and self.sentence[idx + 1] == '/':\n continue\n\n if ch == '#':\n new_sentence += '샾'\n\n elif ch not in EXCEPT:\n new_sentence += ch\n\n pattern = re.compile(r'\\s\\s+')\n\n new_sentence = re.sub(pattern, ' ', new_sentence.strip())\n\n return new_sentence", "def extract_uncertain(): #Add function strings\n sudan_processed = remove_stopwords()\n return [row for row in sudan_processed if bool(re.search(\"reportedly\", row[-5]))]", "def syntactic_filter_fast(sentences, bot_dict):\n output_sentences = []\n print \"Before syntax filter there were \" + str(len(sentences)) + \" sentences.\"\n for sentence in sentences:\n print \"==================\"\n print str(sentence) + \"\\n\"\n tokens = nltk.tokenize.wordpunct_tokenize(sentence)\n justTags = []\n # print self.pos_lexicon_word_pos\n for word in tokens[:-1]:\n tag = random.choice(bot_dict[word])\n justTags.append(tag)\n justTags.append(tokens[-1])\n print str(justTags) + \"\\n\"\n rd_parser = nltk.RecursiveDescentParser(grammar1)\n try:\n if len(rd_parser.nbest_parse(justTags)) > 0:\n output_sentences.append(sentence)\n except ValueError:\n pass\n print \"After the syntax filter there were \" + str(len(output_sentences)) + \" sentences.\"\n print output_sentences\n return output_sentences", "def handle_sentence(story, story_filename, sentence_num, output_file, stats,\n filter_list, filter_stats):\n # parse the sentence\n # now it is just a matter of iterating over our items and removing them\n # one by one\n # first, get just the set we care about\n count = 0\n\n # get the first dependency tree\n dsent = story.dparsed_sentences[sentence_num]\n sent = story.original_sentences[sentence_num]\n\n # get the triples of the dep graph\n p = [list(parse.triples()) for parse in dsent]\n\n dsent = dsent[0]\n interesting_records = dsent.nodes\n for filter_func in filter_list:\n pre_filter_count = len(interesting_records)\n\n interesting_records = {k: v for k, v in interesting_records.items() if\n filter_func(sent, dsent, k, v)}\n\n post_filter_count = len(interesting_records)\n\n # Add this to our statistics\n if pre_filter_count != post_filter_count:\n filter_stats[filter_func.__qualname__] +=\\\n pre_filter_count - post_filter_count\n\n count = len(interesting_records)\n\n # Get statistics based on the sentence\n results = get_statistics_for_sentence(stats, dsent.nodes,\n interesting_records)\n\n # if the count is greater than 0, we have something to remove, so let's go!\n if count == 0:\n return results\n\n # INTERESTING SENTENCE FOUND - let's write it out to the file\n words_with_apos = 0\n words_seen = -1\n\n for i, n in dsent.nodes.items():\n words_seen += 1\n if i == 0:\n continue\n # Weird quirk for counting - we need to count apostrophes differently\n if \"'\" in n['word']:\n words_with_apos += 1\n\n words_to_remove = []\n # check to see if the node has any of the phenomena we care about\n # TODO DSF see if it is in the interesting_records list??\n if i in interesting_records.keys():\n # This is the occurrence we should consider,\n # so do the work\n words_to_remove.append(words_seen - words_with_apos)\n\n # also remove any dependent nodes\n remove_dependent_nodes(dsent.nodes, i, words_to_remove)\n words_to_remove.sort()\n # Now, let's build up the sentence into parts:\n # 1) Before the words to be removed\n # 2) the words to be removed\n # 3) the words after the words to be removed .\n to_delete = ''\n split_sent = sent.split()\n last_ind_pre_remove = words_to_remove[0] - 1\n if last_ind_pre_remove < 0:\n last_ind_pre_remove = 0\n\n first_ind_post_remove = words_to_remove[-1]\n if first_ind_post_remove > len(split_sent):\n first_ind_post_remove = len(split_sent) - 1\n\n preremove = \" \".join(split_sent[0: last_ind_pre_remove]).strip()\n postremove = \" \".join(split_sent[first_ind_post_remove:]).strip()\n for i in split_sent[\n words_to_remove[0] - 1: words_to_remove[-1]\n ]:\n to_delete += i + ' '\n to_delete = to_delete[:-1].strip()\n\n # print(split_sent)\n # print(\"Start: \" + str(words_to_remove[0]) + \"End: \" +\n # str(words_to_remove[-1]))\n # print(preremove + ' [' + to_delete + '] ' + postremove)\n\n # Fix up our punctuation a bit\n preremove, to_delete, postremove = clean_sentence(preremove,\n to_delete,\n postremove)\n\n # Finally, write out the line to the fs\n write_line(story, dsent.nodes, n, words_to_remove, output_file,\n story_filename, sentence_num, preremove, to_delete,\n postremove)", "def filter(self, predicate):\n return filter(predicate, self.sentences)", "def sentences(a, b):\n\n # Turn the inputs into sentences\n asplit = splitter(a, \"se\", 0)\n bsplit = splitter(b, \"se\", 0)\n\n # Find matches\n matches = matcher(asplit, bsplit)\n\n return matches", "def analyze(text):\n\n for token in tokenize(text):\n normalized = normalize(token)\n if filter_text(normalized):\n yield normalized" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dates, extract the average date given
def get_average_date(date_list): month_count = [0] * 12 month_dates = [[], [], [], [], [], [], [], [], [], [], [], []] # Count frequency of each month, and sort dates by their month for date in date_list: for i in range(12): if constants.MONTH_NAMES[i] in date: month_count[i] += 1 month_dates[i].append(date) # Find max count and get the sentences from that month max_count = -1 most_freq_month = -1 for j in range(12): if month_count[j] > max_count: max_count = month_count[j] most_freq_month = j freq_month_dates = month_dates[most_freq_month] freq_month = constants.MONTH_FULL_NAMES[most_freq_month] years = [] days = [] for date in freq_month_dates: nums = re.findall('([0-9]+)', date) for num in nums: if int(num) > 1900: years.append(num) elif int(num) < 31: days.append(num) counted_days = Counter(days) counted_years = Counter(years) return freq_month + ' ' + counted_days.most_common(1)[0][0] + ', ' + counted_years.most_common(1)[0][0]
[ "def get_avg(input_list):\n return sum(input_list)/len(input_list)", "def find_average(input_list):\r\n return sum(input_list)/len(input_list)", "def __calculate_average(self, list):\n return reduce(lambda x, y: x + y, list) / len(list)", "def average_calculation(stocklist):\n total_volume = 0.0\n total_sale = 0.0\n for stockdata in stocklist:\n volume_date = stockdata.get(\"Volume\")\n close_date = stockdata.get(\"Close\")\n total_sale_date = float(volume_date) * float(close_date)\n total_volume += volume_date\n total_sale += total_sale_date\n #Using for loop adds all the daily values to total values.\n\n average_calculated = total_sale / total_volume\n average_rounded = round(average_calculated, 2)\n #Rounded to 2 decimal places matching the test_mining case.\n date_month = stocklist[0].get(\"Date\")\n average_month = (date_month[0:7], average_rounded)\n\n return average_month", "def get_average_month_year(date_list):\n month_count = [0] * 12\n month_dates = [[], [], [], [], [], [], [], [], [], [], [], []]\n\n # Count frequency of each month, and sort dates by their month\n for date in date_list:\n for i in range(12):\n if constants.MONTH_NAMES[i] in date:\n month_count[i] += 1\n month_dates[i].append(date)\n\n # Find max count and get the sentences from that month\n max_count = -1\n most_freq_month = -1\n for j in range(12):\n if month_count[j] > max_count:\n max_count = month_count[j]\n most_freq_month = j\n freq_month_dates = month_dates[most_freq_month]\n freq_month = constants.MONTH_FULL_NAMES[most_freq_month]\n\n years = []\n for date in freq_month_dates:\n nums = re.findall('([0-9]+)', date)\n for num in nums:\n if int(num) > 1900:\n years.append(num)\n\n counted_years = Counter(years)\n\n return freq_month + ' ' + counted_years.most_common(1)[0][0]", "def get_date_avg(filename):\n games = read_data_from_file(filename)\n\n games_dates = [int(game.rstrip().split(\"\\t\")[2]) for game in games]\n average_year = round(sum(games_dates) / len(games_dates))\n return average_year", "def averages_on_datetimes(key):\n averages = {\n HN_KEY: [],\n DT_KEY: []\n }\n\n for dt in _get_datetimes():\n averages[HN_KEY].append(average(_get_data()[dt][HN_KEY][key]))\n averages[DT_KEY].append(average(_get_data()[dt][DT_KEY][key]))\n\n return averages", "def get_average_rates_from_date(date_from):\n sub = Rate.query\\\n .group_by('bank_id')\\\n .filter(Rate.update_time >= date_from)\\\n .order_by('update_time desc')\\\n .subquery()\n\n return db.session.query(sub.c.update_time,\n func.avg(sub.c.usd_buying),\n func.avg(sub.c.usd_selling),\n func.avg(sub.c.eur_buying),\n func.avg(sub.c.eur_selling)).one()", "def compute_ghzdays_average(data_set):\n\n daily_average = 0\n count = 0\n for row in data_set:\n # Manual submissions show up with 0 days compute. Can't divide by zero!\n if float(row[4]) > 0:\n daily_average += (float(row[6]) / float(row[4]))\n count += 1\n\n # Average GHz-days per day for all entries.\n daily_average = daily_average / count\n\n return daily_average", "def avg_std_per_day_between_dates(dt1, dt2):\n getter = GetFlight()\n mean_ArrDelay = np.zeros(7)\n mean_DepDelay = np.zeros(7)\n prop_cancelled = np.zeros(7)\n std_ArrDelay = np.zeros(7)\n std_DepDelay = np.zeros(7)\n try:\n for d in range(7):\n mean_ArrDelay[d], mean_DepDelay[d], std_ArrDelay[d], std_DepDelay[d] = _comp_mean_std_delay(getter.get_day_flights_between_dates(dt1, dt2, d))\n prop_cancelled[d] = _comp_cancelled_proportion(getter.get_day_flights_between_dates(dt1, dt2, d, True))\n except NotEnoughTime:\n print(\"Exception : 7 days or more are needed between dt1 and dt2\")\n del getter\n return mean_ArrDelay, mean_DepDelay, std_ArrDelay, std_DepDelay, prop_cancelled", "def calculate_averages(data):\n # data = [[\"date\", \"adj_close\", \"volume\"],...]\n if not data:\n print(\"Unable to download data \\nExiting program\")\n quit(1)\n monthly_average = []\n yearly_average = []\n\n curr_year = \"\"\n curr_month = \"\"\n month_temp = [0, 0]\n yearly_temp = [0, 0]\n for entry in data:\n\n if curr_month == \"\": # Instantiates current month and year for the first line in the values\n curr_month = entry[0].split(\"-\")[1]\n curr_year = entry[0].split(\"-\")[0]\n\n if not curr_month == entry[0].split(\"-\")[1]: # New month has begun, calculating average and appending to list\n avg = month_temp[0] / float(month_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-{curr_month}-15\", \"%Y-%m-%d\").date()\n monthly_average.append([date_obj, avg])\n month_temp = [0, 0] # Resetting values for new current month\n curr_month = entry[0].split(\"-\")[1]\n\n if not curr_year == entry[0].split(\"-\")[0]: # New year has begun, calculating average and appending it to list\n avg = yearly_temp[0] / float(yearly_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-07-01\", \"%Y-%m-%d\").date()\n yearly_average.append([date_obj, avg])\n yearly_temp = [0, 0] # Resetting values for new current year\n curr_year = entry[0].split(\"-\")[0]\n\n # ((v1*c1)+(v2*c2)+(v3*c3)+(v4*c4)...+(vn*cn)) / (v1+v2+v3+v4...+vn)\n month_temp[0] += entry[2] * entry[1] # volume * close\n month_temp[1] += entry[2] # volume\n yearly_temp[0] += entry[2] * entry[1]\n yearly_temp[1] += entry[2]\n\n # Calculating and adding averages for last month and last year of the data set\n avg = yearly_temp[0] / float(yearly_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-07-01\", \"%Y-%m-%d\").date()\n yearly_average.append([date_obj, avg])\n avg = month_temp[0] / float(month_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-{curr_month}-15\", \"%Y-%m-%d\").date()\n monthly_average.append([date_obj, avg])\n\n return monthly_average, yearly_average", "def findMovingAverage(date,window,data):\n day = date\n count = 0\n try:\n while count < window: # Going back finding the start date excluding weekends\n try:\n data[day]\n count+=1\n except KeyError:\n pass\n day -= timedelta(days=1)\n maList = []\n count1 = 0\n day += timedelta(days=1)\n while count1 < count:\n try:\n maList.append(data[day])\n count1 += 1\n except KeyError:\n pass\n day += timedelta(days=1)\n\n movingAve = round((sum(maList)/len(maList)),2)\n\n except OverflowError:\n raise OverflowError\n print(\"\\nNot enough previous data to calculate the desired moving average.\")\n print(\"Either change the simulation period or increase the period of the data\")\n print(\"Program terminated\\n\")\n sys.exit(1)\n raise\n\n return movingAve", "def average_day(data, state):\n\n average_day = []\n # loop through the raw data from each day\n for key, value in data.items():\n if not len(value.index) == 1440:\n continue\n # calculate minute spent in given state per hour for each animal\n per_hour_df = create_mins_per_hour(value, state)\n # average mins per hour for a given day across all animals\n average_mins_day = average_cows(per_hour_df)\n # create a list of the distributions for each day\n for i in range(0,len(average_mins_day)):\n if not len(average_day)==24:\n average_day.append([average_mins_day[i]])\n else:\n average_day[i].append(average_mins_day[i])\n # find average of distribution\n for i in range(0, len(average_day)):\n average_day[i] = sum(average_day[i])/len(average_day[i])\n return average_day", "def calc_averages(list):\n col1_sum = 0\n col2_sum = 0\n for i3 in range(len(list)):\n col1_sum += list[i3][0]\n col2_sum += list[i3][1]\n col1_avg = col1_sum / len(list)\n col2_avg = col2_sum / len(list)\n return col1_avg, col2_avg", "def get_average_expense_amount(date, next_date):\n if date is None:\n return Expense.objects().average('amount')\n \n total = 0\n num = 0\n for expense in Expense.objects():\n if expense.date >= date and expense.date <= next_date:\n total += expense.amount\n num += 1\n\n try:\n return total / num\n except ZeroDivisionError:\n return 0", "def mean_from_list(num_list):\n num_sum = sum_numbers(num_list)\n mean = num_sum / len(num_list)\n return mean", "def calculate_mean_on_range(start, end, list) -> float64:\n return float(sum(list[start:end]) / (end - start))", "def _get_duration_average(test_list):\n return arrow.get(sum([test._duration for test in test_list])/len(test_list)).format('HH:mm:ss') if test_list else NA", "def averages(*args):\r\n \r\n numbers = []\r\n\r\n for i in args:\r\n numbers.append(i)\r\n\r\n media = mean(numbers)\r\n\r\n return media" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dates, extract the average month and year
def get_average_month_year(date_list): month_count = [0] * 12 month_dates = [[], [], [], [], [], [], [], [], [], [], [], []] # Count frequency of each month, and sort dates by their month for date in date_list: for i in range(12): if constants.MONTH_NAMES[i] in date: month_count[i] += 1 month_dates[i].append(date) # Find max count and get the sentences from that month max_count = -1 most_freq_month = -1 for j in range(12): if month_count[j] > max_count: max_count = month_count[j] most_freq_month = j freq_month_dates = month_dates[most_freq_month] freq_month = constants.MONTH_FULL_NAMES[most_freq_month] years = [] for date in freq_month_dates: nums = re.findall('([0-9]+)', date) for num in nums: if int(num) > 1900: years.append(num) counted_years = Counter(years) return freq_month + ' ' + counted_years.most_common(1)[0][0]
[ "def get_average_date(date_list):\n month_count = [0] * 12\n month_dates = [[], [], [], [], [], [], [], [], [], [], [], []]\n\n # Count frequency of each month, and sort dates by their month\n for date in date_list:\n for i in range(12):\n if constants.MONTH_NAMES[i] in date:\n month_count[i] += 1\n month_dates[i].append(date)\n\n # Find max count and get the sentences from that month\n max_count = -1\n most_freq_month = -1\n for j in range(12):\n if month_count[j] > max_count:\n max_count = month_count[j]\n most_freq_month = j\n freq_month_dates = month_dates[most_freq_month]\n freq_month = constants.MONTH_FULL_NAMES[most_freq_month]\n\n years = []\n days = []\n for date in freq_month_dates:\n nums = re.findall('([0-9]+)', date)\n for num in nums:\n if int(num) > 1900:\n years.append(num)\n elif int(num) < 31:\n days.append(num)\n\n counted_days = Counter(days)\n counted_years = Counter(years)\n\n return freq_month + ' ' + counted_days.most_common(1)[0][0] + ', ' + counted_years.most_common(1)[0][0]", "def get_date_avg(filename):\n games = read_data_from_file(filename)\n\n games_dates = [int(game.rstrip().split(\"\\t\")[2]) for game in games]\n average_year = round(sum(games_dates) / len(games_dates))\n return average_year", "def calculate_averages(data):\n # data = [[\"date\", \"adj_close\", \"volume\"],...]\n if not data:\n print(\"Unable to download data \\nExiting program\")\n quit(1)\n monthly_average = []\n yearly_average = []\n\n curr_year = \"\"\n curr_month = \"\"\n month_temp = [0, 0]\n yearly_temp = [0, 0]\n for entry in data:\n\n if curr_month == \"\": # Instantiates current month and year for the first line in the values\n curr_month = entry[0].split(\"-\")[1]\n curr_year = entry[0].split(\"-\")[0]\n\n if not curr_month == entry[0].split(\"-\")[1]: # New month has begun, calculating average and appending to list\n avg = month_temp[0] / float(month_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-{curr_month}-15\", \"%Y-%m-%d\").date()\n monthly_average.append([date_obj, avg])\n month_temp = [0, 0] # Resetting values for new current month\n curr_month = entry[0].split(\"-\")[1]\n\n if not curr_year == entry[0].split(\"-\")[0]: # New year has begun, calculating average and appending it to list\n avg = yearly_temp[0] / float(yearly_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-07-01\", \"%Y-%m-%d\").date()\n yearly_average.append([date_obj, avg])\n yearly_temp = [0, 0] # Resetting values for new current year\n curr_year = entry[0].split(\"-\")[0]\n\n # ((v1*c1)+(v2*c2)+(v3*c3)+(v4*c4)...+(vn*cn)) / (v1+v2+v3+v4...+vn)\n month_temp[0] += entry[2] * entry[1] # volume * close\n month_temp[1] += entry[2] # volume\n yearly_temp[0] += entry[2] * entry[1]\n yearly_temp[1] += entry[2]\n\n # Calculating and adding averages for last month and last year of the data set\n avg = yearly_temp[0] / float(yearly_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-07-01\", \"%Y-%m-%d\").date()\n yearly_average.append([date_obj, avg])\n avg = month_temp[0] / float(month_temp[1])\n date_obj = dt.datetime.strptime(f\"{curr_year}-{curr_month}-15\", \"%Y-%m-%d\").date()\n monthly_average.append([date_obj, avg])\n\n return monthly_average, yearly_average", "def _compute_month_avg(self, year, month):\n return self._compute_avg(self.fc.get_month_flights(year, month))", "def Average_Year(ser):\r\n Average_Year = ser.groupby([ser.index.month, ser.index.day]).mean() #Get average value at each day of year over whole series length\r\n try:\r\n Average_Year.index = daterangelist(datetime.datetime(2000,1,1), datetime.datetime(2001,1,1),1) #Fit these days onto a 366 day series (to account for leap years)\r\n except:\r\n minus = 366 - Average_Year.index.shape[0]\r\n Average_Year.index = daterangelist(datetime.datetime(2000,1,1), datetime.datetime(2001,1,1) - pd.Timedelta(str(minus) + ' days'),1) #If your series doesn't contain each day of the year, shrink the series\r\n av = Average_Year.index.copy()\r\n idx = np.where(av > pd.Timestamp('2000-09-30'))\r\n idx2 = np.where(av <= pd.Timestamp('2000-09-30'))\r\n new = av[idx].map(lambda t: t.replace(year=1999, month=t.month, day=t.day))\r\n new2 = av[idx2].map(lambda t: t.replace(year=2000, month=t.month, day=t.day))\r\n Av = pd.Series(np.concatenate((Average_Year.values[idx2],Average_Year.values[idx])), index=np.concatenate((new2,new)))\r\n Av = Av.sort_index() #Reshape the matrix so that it is as a 'water year', running from Oct 1 to Sept 30\r\n \r\n return Av", "def average_calculation(stocklist):\n total_volume = 0.0\n total_sale = 0.0\n for stockdata in stocklist:\n volume_date = stockdata.get(\"Volume\")\n close_date = stockdata.get(\"Close\")\n total_sale_date = float(volume_date) * float(close_date)\n total_volume += volume_date\n total_sale += total_sale_date\n #Using for loop adds all the daily values to total values.\n\n average_calculated = total_sale / total_volume\n average_rounded = round(average_calculated, 2)\n #Rounded to 2 decimal places matching the test_mining case.\n date_month = stocklist[0].get(\"Date\")\n average_month = (date_month[0:7], average_rounded)\n\n return average_month", "def _aggregate_by_year_month(historical_data: dict):\n if historical_data is None:\n return {}\n\n converted_response = {}\n\n # first pass assemble the basic return value\n for datapoint in historical_data:\n year = datapoint['date'].year\n month = datapoint['date'].month\n\n if year not in converted_response:\n converted_response[year] = {}\n if month not in converted_response[year]:\n converted_response[year][month] = []\n\n converted_response[year][month].append(datapoint['value'])\n\n # second pass calculate averages\n for year in converted_response.keys():\n for month in converted_response[year]:\n converted_response[year][month] = sum(\n converted_response[year][month]) / len(converted_response[year][month])\n\n return converted_response", "def calc_mean_year(fitsFiles, verbose=True):\n numObs = 0\n meanYear = 0.0\n mean_mjd = 0.0\n \n nfiles = len(fitsFiles)\n for ii in range(len(fitsFiles)):\n hdu = fits.open(fitsFiles[ii])\n hdr = hdu[0].header\n\n date = hdr['DATE-OBS']\n time = hdr['TIME-OBS']\n \n dateObj = dt.strptime(date + ' ' + time, '%Y-%m-%d %H:%M:%S')\n\n year = dtUtil.toYearFraction(dateObj)\n meanYear += year\n\n # Now to calculate MJD\n t = Time('{0} {1}'.format(date, time), format='iso', scale='utc')\n mjd = t.mjd\n mean_mjd += mjd\n\n if verbose:\n print('{0:12s} {1:12s} {2:8.3f} {3}'.format(date, time, year,\n fitsFiles[ii]))\n \n meanYear /= nfiles\n mean_mjd /= nfiles\n \n if verbose:\n print('*** AVERAGE YEAR = {0:8.4f} ***'.format(meanYear))\n print('**** AVERAGE MJD: {0} *****'.format(mean_mjd))\n\n return meanYear", "def month_avg_price(rows):\n\tmonthly_avg = {} \n\tfor i in range(len(rows)):\n\t\tif rows[i][4] != None:\n\t\t\tif i+1 < len(rows) and rows[i-1][0] == rows[i+1][0]:\n\t\t\t\trows[i][0] = rows[i-1][0]\n\t\t\tif rows[i][0] != None:\n\t\t\t\tmonth = month_to_string(rows[i][0].month)\n\t\t\t\tmonthly_avg[month] = monthly_avg.setdefault(month, [0, 0])\n\t\t\t\tmonthly_avg[month][0] += rows[i][4]\n\t\t\t\tmonthly_avg[month][1] += 1\n\treturn dict(map(lambda entry: (entry[0], entry[1][0] / entry[1][1]), monthly_avg.items()))", "def monthly_mean(df):\n monthly_mean = df.groupby(pd.Grouper(freq=\"MS\")).mean()\n month_pivot = (\n monthly_mean.groupby([monthly_mean.index.month, monthly_mean.index.year])\n .sum()\n .unstack()\n )\n return month_pivot", "def get_monthly_av(month, daily_weather):\n # create a list that only contains the daily weather for the specified month\n month_daily_weather = [day for day in daily_weather if day['month'] == month]\n\n rain = []\n maxt = []\n mint = []\n for day in month_daily_weather:\n rain.append(float(day['rain']))\n maxt.append(float(day['maxt']))\n mint.append(float(day['mint']))\n \n #rain = round(sum(rain)/len(rain), 2)\n avg_rain = round(statistics.mean(rain), 2)\n avg_maxt = round(statistics.mean(maxt), 2)\n avg_mint = round(statistics.mean(mint), 2)\n\n averages = {'month': month, 'avg_rain': avg_rain, 'avg_maxt': avg_maxt, 'avg_mint': avg_mint}\n return averages", "def calc_averages(list):\n col1_sum = 0\n col2_sum = 0\n for i3 in range(len(list)):\n col1_sum += list[i3][0]\n col2_sum += list[i3][1]\n col1_avg = col1_sum / len(list)\n col2_avg = col2_sum / len(list)\n return col1_avg, col2_avg", "def generate_cities_averages(temp, multi_cities, years):\n average_annual_temperatures = []\n for year in years:\n annual_temperatures = []\n for city in multi_cities:\n annual_temperatures.append(temp.get_yearly_temp(city, year))\n average_annual_temperatures.append(np.mean(annual_temperatures))\n return np.array(average_annual_temperatures)", "def _get_monthly_values(df):\n\t\tmonths = ['April', 'May', 'June', 'July', 'August', 'September',\n\t\t\t\t 'October', 'November', 'December', 'January', 'February', 'March']\n\t\tmonthly_values = []\n\t\tfor month in months:\n\t\t\tdf_month = df.loc[df['month'] == month, :]\n\t\t\ttry:\n\t\t\t\taverage = df_month.iloc[0]['average']\n\t\t\t\tcount = df_month.iloc[0]['count']\n\t\t\texcept IndexError:\n\t\t\t\taverage = 0\n\t\t\t\tcount = 0\n\t\t\tmonthly_values.append({'y': average, 'count': count})\n\t\treturn monthly_values", "def add_dates_part(all_dates_df: pd.DataFrame, aggregate_df: pd.DataFrame):\n\n # index over all_dates_df\n j = 0\n # index over aggregate_df\n index = 0\n while index < len(aggregate_df):\n\n counter = 1 # count every delta days\n month_arguments = []\n year_arguments = []\n\n while counter <= delta and j < len(all_dates_df):\n month_arguments.append(all_dates_df.loc[j, \"Is Beginning of a Month\"])\n year_arguments.append(all_dates_df.loc[j, \"Is Beginning of a Year\"])\n counter += 1\n j += 1\n\n month_avg = np.mean(month_arguments)\n year_avg = np.mean(year_arguments)\n\n k = index + 20\n\n while index < k:\n if month_avg < 0.5: # majority of the days are in the second half of the month\n aggregate_df.loc[index, 'Is Beginning of a Month'] = 0\n else:\n aggregate_df.loc[index, 'Is Beginning of a Month'] = 1\n\n if year_avg < 0.5: # the month is at the first half of the year\n aggregate_df.loc[index, 'Is Beginning of a Year'] = 0\n else:\n aggregate_df.loc[index, 'Is Beginning of a Year'] = 1\n index += 1\n\n return aggregate_df", "def create_summer_average_monthly(ds, sum_mon, sum_mon_str, varname, conv_factor, est_mon_total):\n\n monthly_data = False\n daily_data = False\n\n max_time_step = max(ds.time.dt.day.values)\n if(max_time_step < 31):\n monthly_data = True\n if(max_time_step == 31):\n daily_data = True\n\n # Scale by conversion factor\n ds = ds/conv_factor\n \n if(est_mon_total): \n if(monthly_data):\n # Estimate days in each month\n month_length = ds.time.dt.days_in_month\n \n # Estimate monthly total\n ds = ds * month_length\n\n if(daily_data):\n # Estimate monthly total\n ds = ds.resample(time='1M', skipna=False).sum(skipna=False)\n\n # Only keep data for summer months\n ds_summer = ds.sel(time = ds.time.dt.month.isin(sum_mon))\n\n # Average monthly (averaging across years)\n ds_summer_avg = ds_summer.groupby('time.month').mean()\n\n # Modify month coordinates to string\n ds_summer_avg = ds_summer_avg.assign_coords({'month': sum_mon_str})\n\n # Rename dataarray\n ds_summer_avg = ds_summer_avg.rename(varname) \n\n return ds_summer_avg", "def most_average_rainfall(data):\r\n min_difference = max(data.rainfall)\r\n round_average_rainfall = round(np.mean(data.rainfall), 2)\r\n for i in range(len(data.index)):\r\n current_difference = abs(data.rainfall[i] - round_average_rainfall)\r\n # computes min_difference from average to current rainfall\r\n if current_difference < min_difference:\r\n min_difference = current_difference\r\n req_month_index = i\r\n # computes month, year of month, year index provided.\r\n return index_to_name_month(int(data.date[req_month_index][4:]) - 1) + \", \" + data.date[req_month_index][:4]", "def generate_cities_averages(temp, multi_cities, years):\n\n average_annual_temps = []\n \n # For each year, get average annual temperature for all citites\n for year in years:\n multi_cities_sum = 0\n for city in multi_cities:\n total_year_temp = temp.get_yearly_temp(city, year)\n average_year_temp = total_year_temp.sum()/ len(total_year_temp)\n multi_cities_sum += average_year_temp\n \n average_annual_temps.append(multi_cities_sum / len(multi_cities))\n \n return np.array(average_annual_temps)", "def averages():\n year = request.args.get('year')\n query = db.session.query(climate_history.STATE, climate_history.DATE,\n func.avg(climate_history.TMAX).label('TMAX'),\n func.avg(climate_history.TMIN).label('TMIN'),\n func.avg(climate_history.TAVG).label('TAVG'),\n func.avg(climate_history.PRCP).label('PRCP')\n )\n if year is not None:\n query = query.filter(climate_history.DATE == year)\n\n results = query.group_by(climate_history.STATE, climate_history.DATE).all()\n\n return jsonify([{\"STATE\":fixstate(row[0]), \"STATE_NAME\":statename(row[0]), \"DATE\":row[1], \"TMAX\":row[2], \"TMIN\":row[3], \"TAVG\":row[4], \"PRCP\":row[5]} for row in results])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dates, extract the lowest
def get_lowest_date(date_list): min_date = [9999, '', 9999, 9999] for date in date_list: nums = re.findall('([0-9]+)', date) year = -1 month = '' month_num = -1 day = -1 for i in range(12): if constants.MONTH_NAMES[i] in date: month = constants.MONTH_NAMES[i] month_num = i break for num in nums: if int(num) > 1900: year = int(num) elif int(num) <= 31: day = int(num) if year != -1 and year < min_date[0] and month_num != -1 and month_num < min_date[2] and day != 0 and \ day < min_date[3]: min_date = [year, month, month_num, day] return min_date
[ "def min_value(my_list):\n aux = ordered_values(my_list)\n return aux[0]", "def findClosestDate(date, array):\n diff = [abs(date - x).days for x in array]\n return np.argmin(diff)", "def earliestDateStamp():", "def start_date_path(path):\n all_files = get_dirs(path)\n all_dates = [dt.strptime(f.split(\"=\")[1], \"%Y-%m-%d\").date()\n for f in all_files if len(f.split(\"=\"))==2]\n return np.min(all_dates)", "def mymin(items):\n smallest = items[0]\n for item in items[1:]:\n if item < smallest:\n smallest = item\n return smallest", "def minimum(values):\n minVal = math.inf\n for x in values:\n if x < minVal:\n minVal = x\n return minVal", "def find_valid_period_start_date(dates, date, period):\n\t\n\tperiod_start_date = date - period\n\tperiod_dates = dates[dates >= period_start_date]\n\tfirst_date = period_dates.iloc[0]\n\treturn first_date", "def find_all_lowest(l, f):\n if len(l) == 1: return l\n minvalue = min([f(x) for x in l])\n return [x for x in l if f(x) == minvalue]", "def min(self, comparer=None):\n\n return self.min_by(identity, comparer).map(first_only)", "def find_closest(filelist, ref_date = datetime.now()):\n closest = None\n for filename in filelist:\n date = parse_date(filename)\n time_diff = date_delta(date, ref_date)\n if not closest:\n closest = (filename, time_diff)\n if time_diff < closest[1]:\n closest = (filename, time_diff)\n return closest", "def findMin (l):\n min_l = min(l)\n min_index = l.index(min_l)\n return (min_l, min_index)\n pass", "def get_oldest(fromlist):\n oldest_timestamp = fromlist[0].data[0][1] #take the first timestamp from the first DataObject in the fromlist list\n for obj in fromlist:\n if obj.oldest_sample < oldest_timestamp:\n oldest_timestamp = obj.oldest_sample\n return oldest_timestamp", "def find_minimum(data):\n minimum_of_set = min(data)\n return minimum_of_set", "def get_acceptable_dates(date, margin):\n dates = [(date + timedelta(days=x)) for x in range(-margin, +margin + 1)]\n dates.sort()\n return dates", "def find_max_min(list_value):\n if len(set(list_value)) > 1:\n return [min(list_value), max(list_value)]\n else:\n return [list_value[0]]", "def earliest(self):\n earliest_pub = Image.objects.aggregate(Min('pub_date'))['pub_date__min']\n earliest_pub = datetime.datetime.fromordinal(earliest_pub.toordinal())\n return earliest_pub", "def get_missing_dates(dates):\r\n\r\n # find range and min from dates\r\n min_date = min(dates)\r\n num_days = (max(dates) - min_date).days\r\n\r\n # build full range of dates with range and min\r\n full_dates = [min_date + timedelta(days=i) for i in range(num_days)]\r\n\r\n # return difference between full range set and dates set\r\n return set(full_dates) - set(dates)", "def smallest_value(reader):\n line = time_series.skip_header(reader).strip()\n smallest = int(line)\n\n for line in reader:\n value = int(line.strip())\n\n if value < smallest:\n smallest = value \n return smallest", "def smallest_int(number_list):\n is_smallest = []\n for item in number_list:\n is_smallest = sorted(number_list)\n\n try:\n return is_smallest[0]\n except IndexError:\n return None\n #returns index 0 of a numerically-sorted list\n\n # Solution: \n # smallest = None\n # for item in number_list:\n # if smallest is None or item < smallest:\n # smallest = item\n # return smallest" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allowed device management levels, an empty list allows all management levels.
def allowed_device_management_levels(self) -> Sequence[str]: return pulumi.get(self, "allowed_device_management_levels")
[ "def allowed_mosaics(self):\n return []", "def management_groups(self) -> Optional[Sequence['outputs.ResourceIdResponse']]:\n return pulumi.get(self, \"management_groups\")", "def capabilities(self):\n return []", "def _checkManageCapabilities(self, irc, msg, channel):\n if channel != 'global':\n capability = ircdb.makeChannelCapability(channel, 'op')\n else:\n capability = 'admin'\n if not ircdb.checkCapability(msg.prefix, capability):\n irc.errorNoCapability(capability, Raise=True)", "def get_user_levels():\n return USER().levels", "def test_admin_policy() -> None:\n # Make sure it's valid\n POLICY_SCHEMA(system_policies.ADMIN_POLICY)\n\n perms = PolicyPermissions(system_policies.ADMIN_POLICY, None)\n assert perms.check_entity(\"light.kitchen\", \"read\")\n assert perms.check_entity(\"light.kitchen\", \"control\")\n assert perms.check_entity(\"light.kitchen\", \"edit\")", "async def _allowlist(self, ctx: commands.Context):\n settings = await self.config.allowed()\n await ctx.send(embed=discord.Embed(\n title=\"BotAccess Allowed Servers\",\n description=f\"{humanize_list([f'`{gu.name}` (`{g}`)' if (gu := self.bot.get_guild(g)) else f'`{g}`' for g in settings])}\",\n color=await ctx.embed_color()\n ))\n await ctx.send_help()", "def getPowerManagement(wifi):\n power = wifi.wireless_info.getPower()\n status = \"\"\n if (power.disabled):\n status = \":off\"\n else:\n if (power.flags & pythonwifi.flags.IW_POWER_TYPE):\n if (power.flags & pythonwifi.flags.IW_POWER_MIN):\n status = status + \" min\"\n if (power.flags & pythonwifi.flags.IW_POWER_MAX):\n status = status + \" max\"\n if (power.flags & pythonwifi.flags.IW_POWER_TIMEOUT):\n status = status + \" timeout:\"\n else:\n if (power.flags & pythonwifi.flags.IW_POWER_SAVING):\n status = status + \" saving:\"\n else:\n status = status + \" period:\"\n pm_mode_mask = power.flags & pythonwifi.flags.IW_POWER_MODE\n if (pm_mode_mask == pythonwifi.flags.IW_POWER_UNICAST_R):\n status = status + \"mode:Receive Unicast only received\"\n elif (pm_mode_mask == pythonwifi.flags.IW_POWER_MULTICAST_R):\n status = status + \"mode:Receive Multicast only received\"\n elif (pm_mode_mask == pythonwifi.flags.IW_POWER_ALL_R):\n status = status + \"mode:All packets received\"\n elif (pm_mode_mask == pythonwifi.flags.IW_POWER_FORCE_S):\n status = status + \"mode:Force sending\"\n elif (pm_mode_mask == pythonwifi.flags.IW_POWER_REPEATER):\n status = status + \"mode:Repeat multicasts\"\n if (power.flags & pythonwifi.flags.IW_POWER_ON):\n status = status + \":on\"\n return \"Power Management%s\" % (status, )", "def admin_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_groups\")", "def rights_status():\n return app.manager.admin_work_controller.rights_status()", "def supports_grade_system_admin(self):\n return # boolean", "async def _permissions(self, ctx: customContext):\r\n permlist = []\r\n for perm in ctx.guild.me.guild_permissions:\r\n if perm[1] == True:\r\n permlist.append(perm[0])\r\n\r\n required = [\r\n \"send_messages\",\r\n \"embed_links\",\r\n \"manage_messages\",\r\n \"ban_members\",\r\n \"kick_members\",\r\n \"add_reactions\",\r\n \"manage_nicknames\",\r\n \"external_emojis\",\r\n ]\r\n for perm in permlist:\r\n if perm in required:\r\n required.remove(perm)\r\n\r\n em = Embed(description=\", \".join(f\"`{perms}`\" for perms in permlist))\r\n em.add_field(\r\n name=\"Recommended permissions missing\",\r\n value=\", \".join(f\"`{perms}`\" for perms in required),\r\n )\r\n em.set_author(name=\"List of permissions the bot has in this server\")\r\n await ctx.send(embed=em)", "def __check_security_policy(self):\n\n cmd = \"setenforce 0; \"\n\n cmd = cmd + \"supolicy --live \\\"allow init logd dir getattr\\\";\"\n\n # # Depreciated supolicies. Still keep them for backup purpose\n cmd = cmd + \"supolicy --live \\\"allow init init process execmem\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow atfwd diag_device chr_file {read write open ioctl}\\\";\"\n cmd = cmd + \"supolicy --live \\\"allow init properties_device file execute\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n\n # # Suspicious supolicies: MI works without them, but it seems that they SHOULD be enabled...\n\n # # mi2log permission denied (logcat | grep denied), but no impact on log collection/analysis\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app app_data_file file {rename}\\\";\"\n\n # # Suspicious: why still works after disabling this command? Won't FIFO fail?\n cmd = cmd + \\\n \"supolicy --live \\\"allow init app_data_file fifo_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow init diag_device chr_file {getattr write ioctl}\\\"; \"\n\n # Nexus 6 only\n cmd = cmd + \\\n \"supolicy --live \\\"allow untrusted_app diag_device chr_file {write open getattr}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow system_server diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow netmgrd diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild diag_device chr_file {read write}\\\";\"\n cmd = cmd + \\\n \"supolicy --live \\\"allow rild debuggerd app_data_file {read open getattr}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file dir {search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse dir {read open search}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service mnt_user_file lnk_file {read}\\\";\"\n\n cmd = cmd + \\\n \"supolicy --live \\\"allow wcnss_service fuse file {read append getattr}\\\";\"\n\n main_utils.run_shell_cmd(cmd)", "def get_manage_messages_perms():\n return discord.Permissions(manage_messages=True, read_messages=True)", "def GetLogicalSystemNames(self):\n if len(self._logicalSystems) == 0:\n cmd = \"show configuration logical-systems | display set\"\n cmdResult = Session.ExecCommand(cmd).lower().splitlines()\n lsLines = [line for line in cmdResult if line.startswith(\"set logical system\")]\n if len(lsLines) == 0 : \n return [\"Default\"]\n else :\n repLSs = re.findall(r\"(?<=logical-systems ).[a-zA-Z0-9]+\", lsLines)\n self._logicalSystems = repLSs\n return self._logicalSystems", "def object_storage_management_patterns(self):\n return list(self._unit.received[\"object-storage-management-patterns\"] or [])", "def fan_modes(self):\n return self._current_capabilities.get(\"fanLevels\")", "def test_blockdev_list_permissions(self):\n self._test_permissions([_STRATIS_CLI, \"blockdev\", \"list\"], False, False)", "def get_management_access_info(self) -> dict:\n response = self._caller.get(\"mgmtaccess\")\n if response.status_code != requests.codes.ok:\n raise RuntimeError(f\"Fetching management access settings failed with HTTP {response.status_code}\")\n return response.json()", "def getAllow(self):\n return self.base.get(\"allow\", [])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allowed encryptions statuses, an empty list allows all statuses.
def allowed_encryption_statuses(self) -> Sequence[str]: return pulumi.get(self, "allowed_encryption_statuses")
[ "def getValidValuesForVpgStatuses(self):\n\n return requests.get(self.zvmip + self.endPoint + '/statuses', headers=self.headerwithkey, verify=False)", "def __init__status_choices__(self):\n self.fields['status'].choices = flag_settings.get_for_model(\n self.target_object, 'STATUSES')", "def get_viable_status():\n stats = []\n for key in Job.possible_status.keys():\n stats.append(Job.possible_status[key])\n return stats", "def status_choice_keys():\n return [Project.PREPARATION, Project.REVIEW, Project.FUNDED, Project.REJECTED]", "def allowed_values(cls: Type[_ActBlockStyle]) -> List[str]:\n return [item.value for item in cls]", "def statuses(self):\n status_set = self.values('status').distinct()\n \n status_list = []\n for status in status_set:\n status_list.append(status['status'])\n \n return status_list", "async def _allowlist(self, ctx: commands.Context):\n settings = await self.config.allowed()\n await ctx.send(embed=discord.Embed(\n title=\"BotAccess Allowed Servers\",\n description=f\"{humanize_list([f'`{gu.name}` (`{g}`)' if (gu := self.bot.get_guild(g)) else f'`{g}`' for g in settings])}\",\n color=await ctx.embed_color()\n ))\n await ctx.send_help()", "def get_all_deployment_statuses(self) -> List[bytes]:\n statuses = self.deployment_state_manager.get_deployment_statuses()\n return [status.to_proto().SerializeToString() for status in statuses]", "def mock_status() -> List[Status]:\n return [Status.FAILED, Status.SUCCEEDED, Status.SUCCEEDED, Status.SUCCEEDED]", "def status_verbose(self):\n return dict(Applicant.STATUS_TYPES)[self.status]", "def _GetIssueStatusesNeedingUpdating():\n statuses = [None]\n statuses.extend(issue_constants.OPEN_STATUSES)\n return statuses", "def get_all_status(self):\n return [status for status, in self.env.db_query(\"\"\"\n SELECT DISTINCT status FROM ticket\n \"\"\")]", "def closed_statuses(self):\n log = logging.getLogger('RunnerLog')\n\n default = ['Closed', 'Resolved']\n\n statuses = self.get('closed_statuses')\n if not statuses:\n return default\n\n try:\n # expecting a list here\n return json.loads(statuses)\n except ValueError as error:\n log.info('Invalid value for closed_statuses. Using default value. '\n 'ValueError {0}'.format(error))\n return default", "def batch_status(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def getValidValuesForVpgSubstatuses(self):\n\n return requests.get(self.zvmip + self.endPoint + '/substatuses', headers=self.headerwithkey, verify=False)", "async def get_unsubmitted_order_statuses(self) -> List[OrderStatus]:\n await self._wait_till_ready()\n session: Session = SQLConnectionManager.get_trade_fills_instance().get_shared_session()\n\n try:\n and_conditions: BooleanClauseList = self.get_order_filter()\n\n query: Query = (session\n .query(OrderStatus)\n .filter(Order.id == OrderStatus.order_id)\n .filter(OrderStatus.timestamp > self._last_submitted_order_status_timestamp)\n .filter(OrderStatus.status.in_(self.ACCEPTED_ORDER_STATUS_UPDATES))\n .filter(or_(*and_conditions))\n .order_by(OrderStatus.timestamp))\n\n new_order_statuses: List[OrderStatus] = query.all()\n return new_order_statuses\n except Exception as e:\n self.logger().error(f\"Failed to query for unsubmitted order statuses: {str(e)}\", exc_info=True)", "def operations_permitted_lower(self) -> List[str]:\n return [x.lower() for x in self.operations_permitted]", "def multi_filter_status(self, queryset, field_name, value):\n statuses = value.split(',')\n statuses = set(statuses) & self.STATUS_SET\n return queryset.filter(status__in=statuses)", "def get_limit_statuses(self):\n val = self._get_variable(VAR_ID.LIMIT_STATUS)\n an1_active = bool(val & 2**7) # bit mask to check if 7th bit is set\n an2_active = bool(val & 2**8) # bit mask to check if 8th bit is set\n return an1_active, an2_active", "def validate_status(self, value):\n if self.context['view'].action == 'create':\n if not value == THREAD_INVITE_PENDING:\n raise serializers.ValidationError(\"status must be PENDING for new invite\")\n return value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the device needs to be approved by the customer admin.
def require_admin_approval(self) -> bool: return pulumi.get(self, "require_admin_approval")
[ "def is_customer_initiated_maintenance_allowed(self) -> Optional[bool]:\n return pulumi.get(self, \"is_customer_initiated_maintenance_allowed\")", "def should_auto_approve():\n if settings.MODERATION_POLICY == moderation_policies.automatic.value:\n return True\n return False", "def can_charge(customer):\n if customer.date_purged is not None:\n return False\n if customer.default_source:\n return True\n return False", "def is_approved(self, object_):\n return object_.is_approved", "def supports_authorization_admin(self):\n return # boolean", "def chargeable(self):\n return not self.internal", "def is_admin(self):\n return self.authenticated and self.client['id'] == 'admin'", "def can_activate(self):\n return IPossibleLocalAgencyInfo.providedBy(self.context) and \\\n not ILocalAgencyInfo.providedBy(self.context)", "def IsRestricted(self):\n logging.info('Update restricted status')\n output = self._cr50_console.Command('ccd')\n if 'Capabilities' not in output:\n logging.error('`Capabilities` not in output of `ccd`, output:\\n%s',\n output)\n raise ValueError('Could not get ccd output.')\n\n is_restricted = 'IfOpened' in output or 'IfUnlocked' in output\n logging.info('Restricted status: %s', is_restricted)\n return is_restricted", "def is_capacitated(self):\n return self.capacitated", "def check_permission(self):\n if self.committee and self.committee.premium:\n if self.premium_but_free():\n return True\n\n # must be authenticated\n if not current_user.is_authenticated:\n return False\n\n if not current_user.is_confirmed():\n return False\n\n # check subscription\n return current_user.subscribed_to_committee(self.committee)\n\n return True", "def chargeable(self):\n return not self.internal and self.charged", "def is_admin_user(self):\n return getuser() == self.ADMIN_USER", "def access_granted(self):\n\t\treturn self.status in self.GRANTED_STATUSES", "def _is_admin(self):\n # only support one object\n self.ensure_one()\n\n return self.id == odoo.SUPERUSER_ID or\\\n self.sudo(self).has_group('base.group_erp_manager') or \\\n self.sudo(self).has_group('trobz_base.group_configure_user')", "def is_admin():\n return is_authenticated() and get_user_role() == 'admin'", "def oaallowsauthorpaid(self):\n return self._entry.get('oaAllowsAuthorPaid')", "def __can_upload(bill, approver):\n return (approver == bill.assign) and (bill.get_state_id() == STATE_DRAFT)", "def supports_vault_admin(self):\n return # boolean", "def is_developer(ctx):\n member = to_member(ctx.author.id)\n for role in member.roles:\n if role.id == int(os.getenv(\"DEVELOPERS_ROLE_ID\")):\n return True\n\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the device needs to be corp owned.
def require_corp_owned(self) -> bool: return pulumi.get(self, "require_corp_owned")
[ "def chargeable(self):\n return not self.internal", "def is_owner_or_server_owner():\r\n return commands.check(is_owner_or_server_owner_check)", "def chargeable(self):\n return not self.internal and self.charged", "def is_owner_check(ctx):\r\n return ctx.bot.owner_id == ctx.message.author.id", "def is_owner_or_administrator():\r\n return commands.check(is_owner_or_administrator_check)", "async def cog_check(self, ctx: commands.Context):\n if ctx.guild.id in self.bot.premium_guilds:\n return True\n if self.bot.isadmin(ctx.author):\n return True\n return False", "def is_managed(self, ns_name):\n return self.get_prefix_and_id(ns_name) is not None", "def is_item_owned(self, item):\n if item in self.items:\n return True\n return False", "def can_be_collected(self):\n # TODO\n return (\n self.paid and\n not self.collected and\n not self.cancelled and\n self.name is not None\n )", "def test_owner_no_ownership(self):\n self.assert_ownership(True)", "def isPhysical(self,uid):\n return( self.id2node[uid].group==\"Physical\" )", "def _validate_odoo_owner(self):\n try:\n self._get_odoo_project_owner()\n return True\n except OdooModelsIncorrect:\n self.add_note(\"Could not find odoo owner\")\n return False", "def is_needed(self):\n return False", "def HasODC(self):\n return self.__has('ODC')", "def is_owner_or_has_permission(permission):\r\n def check(ctx):\r\n \"\"\"Simple checking function\"\"\"\r\n if permission not in PERMISSIONS:\r\n return False\r\n return PERMISSIONS[permission](ctx.message.author.server_permissions)\r\n return commands.check(check)", "def is_owner_or_administrator_check(ctx):\r\n return ctx.message.author.id == ctx.bot.owner_id \\\r\n or ctx.message.author.server_permissions.administrator", "def has_current_medical_disclaimer(self):\n # TODO: Implement this\n return True", "def __is_requester_the_owner(self):\n obj = getattr(self, 'object', None)\n if obj:\n return self.owner_permission_class().has_object_permission(self.request, None, obj)\n else:\n return False", "def kube_managed(self):\n return bool(self.image)", "def can_preoccupy(self) -> bool:\n return self in {\n NomenclatureStatus.available,\n NomenclatureStatus.unpublished_pending,\n NomenclatureStatus.unjustified_emendation,\n NomenclatureStatus.hybrid_name,\n NomenclatureStatus.variant,\n NomenclatureStatus.justified_emendation,\n NomenclatureStatus.preoccupied,\n NomenclatureStatus.partially_suppressed,\n NomenclatureStatus.nomen_novum,\n NomenclatureStatus.as_emended,\n }" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether or not screenlock is required for the DevicePolicy to be true. Defaults to `false`.
def require_screenlock(self) -> bool: return pulumi.get(self, "require_screenlock")
[ "def allow_screen_capture(self):\n if \"allowScreenCapture\" in self._prop_dict:\n return self._prop_dict[\"allowScreenCapture\"]\n else:\n return None", "def can_device_lock_front_panel(self):\n return self.sdk.SCC_CanDeviceLockFrontPanel(self._serial)", "def is_locked() -> bool:\n session = Quartz.CGSessionCopyCurrentDictionary()\n return session.get('CGSSessionScreenIsLocked', False)", "def smartFunctionGui(self):\n isModelingReady = all(\n self.deviceStat[x] for x in ['mount', 'camera', 'astrometry']\n )\n\n if isModelingReady and self.app.data.buildP:\n self.ui.runModel.setEnabled(True)\n self.ui.plateSolveSync.setEnabled(True)\n self.ui.runFlexure.setEnabled(True)\n self.ui.runHysteresis.setEnabled(True)\n\n else:\n self.ui.runModel.setEnabled(False)\n self.ui.plateSolveSync.setEnabled(False)\n self.ui.runFlexure.setEnabled(False)\n self.ui.runHysteresis.setEnabled(False)\n\n if self.deviceStat.get('mount', False):\n self.ui.batchModel.setEnabled(True)\n\n else:\n self.ui.batchModel.setEnabled(False)\n\n stat = self.deviceStat.get('environOverall', None)\n\n if stat is None:\n self.ui.refractionGroup.setEnabled(False)\n self.ui.setRefractionManual.setEnabled(False)\n\n elif stat and self.deviceStat.get('mount', None):\n self.ui.refractionGroup.setEnabled(True)\n self.ui.setRefractionManual.setEnabled(True)\n\n else:\n self.ui.refractionGroup.setEnabled(False)\n self.ui.setRefractionManual.setEnabled(False)\n return True", "def caps_lock(self):\n ret = self._get_attr(\"capsLock\")\n return ret", "def _is_device_overriden():\n return heater_override | freezer_override", "def set_keep_screen_on(self, keep_on):\n window = self.window\n if not window:\n return\n if keep_on:\n window.addFlags(Window.FLAG_KEEP_SCREEN_ON)\n else:\n window.clearFlags(Window.FLAG_KEEP_SCREEN_ON)", "def sleep_mode(self) -> bool:\n return self.mode_toggle('sleep')", "def enable_ultra_ssd(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"enable_ultra_ssd\")", "def is_safety_mode_enabled(base_mode,custom_mode):\n if base_mode & mavlink.MAV_MODE_FLAG_SAFETY_ARMED:\n return True\n else:\n return False", "def enableFrontPanel(self):\n self.write(\"SYS:KEY:LOCK 0\")", "def Lock(self):\n logging.info('Lock the device')\n # `ccd reset` needs ccd to be in open state.\n self._cr50_console.Command('ccd open')\n self._cr50_console.Command('ccd reset')\n # Wait for Cr50 resetting.\n time.sleep(1)\n self._cr50_console.Command('ccd lock')\n # Wait for Cr50 resetting.\n time.sleep(1)\n return self.IsRestricted()", "def is_allow_attention_screen(self, is_allow_attention_screen):\n\n self._is_allow_attention_screen = is_allow_attention_screen", "def _observe_keep_screen_on(self, change):\n self.set_keep_screen_on(self.keep_screen_on)", "def allow_promiscuous(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_promiscuous\")", "def manual_mode(self) -> bool:\n return self.mode_toggle('manual')", "def is_locked(self) -> bool | None:\n return (\n self._device.lockState == LockState.LOCKED\n and self._device.motorState == MotorState.STOPPED\n )", "def enableDevice(record=bool, monitor=bool, device=\"string\", enable=bool, apply=bool):\n pass", "def getSupportedLock(self):\n \n return self.properties.get(Constants.PROP_SUPPORTED_LOCK)", "def _set_isLayoutGridLockEnabled(self, *args) -> \"bool\" :\n return _core.GridPreferences__set_isLayoutGridLockEnabled(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The allowed OS type.
def os_type(self) -> str: return pulumi.get(self, "os_type")
[ "def determine_os(self):\n system_type=\"\"\n try: #Linux check\n release_file = file(\"/etc/os-release\")\n for line in release_file:\n if re.search(\"^NAME\", line):\n system = line.split(\"=\")[-1]\n system = system[:-1]\n elif re.search(\"^VERSION_ID\",line):\n version = line.split(\"=\")[-1]\n version = system[:-1]\n system_type = \"\\\"\" + system + \" \" + version + \"\\\"\"\n except:\n #print(\"No(t known) Linux\")\n print(\"\")\n\n for system in self.data_access.supported_systems(): #BSD check\n if os.uname()[0] + \" \" + os.uname()[2].split('.')[0] == system:\n system_type = \"\\\"\" + system + \"\\\"\"\n\n if not system_type:\n system_type = \"\\\"Unknown\\\"\"\n\n return system_type", "def getOSType(self):\n with VirtualBoxException.ExceptionHandler():\n imachine = self.getIMachine()\n osType = self._vbox.getGuestOSType(imachine.OSTypeId)\n return osType", "def SysTypeString( ):\n\n if sys.platform == 'darwin':\n machType, osType = 'i386', 'darwin'\n else: \n machType = platform.uname()[4]\n if machType.startswith('x86_64'): machType = 'x86_64'\n osType = platform.system().lower()\n if osType.startswith( 'darwin' ): osType = 'darwin'\n\n # linuxType = ''\n # if platform.system() == 'Linux':\n # linuxType += '-' + MakeAlphaNum( platform.linux_distribution()[0].strip() )\n \n return machType + '-' + osType", "def operating_system(self):\n ret = self._get_attr(\"operatingSystem\")\n return ret", "def os_type_id(self):\n ret = self._get_attr(\"OSTypeId\")\n return ret", "def get_os():\r\n os_platform = sys.platform\r\n\r\n if os_platform.startswith('darwin'):\r\n return 'mac'\r\n elif os_platform.startswith('linux'):\r\n return 'linux'\r\n elif os_platform.startswith('win'):\r\n return 'windows'\r\n raise RuntimeError('Unsupported operating system.')", "def test_get_operating_system(self):\n pass", "def guest_os_types(self):\n ret = self._get_attr(\"guestOSTypes\")\n return [IGuestOSType(a) for a in ret]", "def UnderlyingSystemType(self) -> _n_2_t_4:", "def is_operating_system(self):\n\n elements = self.get(CPE.KEY_OS)\n return len(elements) > 0", "def platform() -> list:\n if GetOS.OS == \"Linux\":\n x = InformationManager(SysFiles.ver.value)\n return x.openF().read().split()[0]\n elif GetOS.OS == \"darwin\":\n x = get_output(\"sw_vers\")\n return x.split()[1:3]", "def software_type(self):\n return self._software_type", "def get_machine_type(self):\n\t\treturn(self.header[0x6d])", "def OSArchitecture(self) -> Architecture:", "def get_conf_host_os_type(self, host_ip):\n os_conf = {}\n root = self.parse_xml_file(self.xml_file_path)\n hosts_list = self.get_xml_item(root, 'Host')\n for host in hosts_list:\n os = host['attrib']['OSType'].strip()\n ips = [ip.strip() for ip in host['attrib']['HostIP'].split(',')]\n os_conf[os] = ips\n host_os = None\n for k, v in os_conf.items():\n if host_ip in v:\n host_os = constants.OS_TYPE.get(k, None)\n if not host_os:\n host_os = constants.OS_TYPE['Linux'] # Default OS type.\n\n LOG.debug('_get_host_os_type: Host %(ip)s OS type is %(os)s.',\n {'ip': host_ip, 'os': host_os})\n\n return host_os", "def system_type(self) -> pulumi.Input['StorageSystemSystemType']:\n return pulumi.get(self, \"system_type\")", "def GetDeviceType(self):\n if self._deviceType == DeviceType.Unknown:\n v = self.GetVersion()\n modelLine = next((line for line in v.splitlines() if \"Model:\" in line), None)\n if modelLine :\n model = modelLine.split(\":\")[1].strip()\n if model.startswith(\"ex\") or model.startswith(\"qfx\"): \n self._deviceType = DeviceType.Switch\n elif model.startswith(\"srx\") : \n self._deviceType = DeviceType.Firewall\n elif model.startswith(\"mx\") : \n self._deviceType = DeviceType.Router\n else:\n self._deviceType = DeviceType.Unknown\n \n if self._deviceType == DeviceType.Firewall :\n return \"Firewall\" \n elif self._deviceType == DeviceType.Router :\n return \"Router\" \n elif self._deviceType == DeviceType.Switch :\n return \"Switch\" \n else : \n return \"Unknown\"", "def device_type(self) -> str:\n return self.device_info.device_type", "def system_type(self) -> pulumi.Output['StorageSystemSystemType']:\n return pulumi.get(self, \"system_type\")", "def get_pkg_type():\n plt = get_os_name()\n if plt in PACK_TYPES:\n return PACK_TYPES[plt]\n raise UnsupportedOsError(f'No supported Package type for platform \"{plt}\"')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Only allows requests from devices with a verified Chrome OS. Verifications includes requirements that the device is enterprisemanaged, conformant to domain policies, and the caller has permission to call the API targeted by the request.
def require_verified_chrome_os(self) -> bool: return pulumi.get(self, "require_verified_chrome_os")
[ "def _check_permission_ha(self, request):\n if request[REQUEST_FROM] != self.sys_homeassistant:\n raise APIForbidden(\"Only HomeAssistant can use this API!\")", "def test_authorize_door_granted():\n result = requests.get(API_ENTRY_authorize_door.format(\"CB06.01.01\", \"729\")).content\n assert result == \"1\"", "def test_get_device_presence(self):\n pass", "def test_no_browser():\n\n transport = validating_transport(requests=[Request()] * 2, responses=[get_discovery_response()] * 2)\n credential = InteractiveBrowserCredential(client_id=\"client-id\", _server_class=Mock(), transport=transport)\n with patch(InteractiveBrowserCredential.__module__ + \"._open_browser\", lambda _: False):\n with pytest.raises(CredentialUnavailableError, match=r\".*browser.*\"):\n credential.get_token(\"scope\")", "def test_user_cant_edit_her_domains(self):\n request = self.u_client.patch(\n get_domain_url(self.u_domain),\n {'type': 'NATIVE'},\n )\n self.assertEqual(request.status_code, 403)", "def trust(self, mac):\n\n self.log.info( f'Device being trusted: {mac}' ) # Log info\n self.sendCMD( f'trust {mac}' ) # Trust the device", "def check_os() -> WebDriver:\n operation_system = platform.system()\n\n chromedriver_folder = Path(__file__).absolute().parents[1] / \"chromedriver\"\n\n if 'linux' in operation_system.lower():\n options = webdriver.ChromeOptions()\n driver = webdriver.Chrome(executable_path=\"/usr/bin/chromedriver\", chrome_options=options)\n elif 'darwin' in operation_system.lower():\n driver = webdriver.Chrome(executable_path=chromedriver_folder / \"chromedriver_mac\")\n elif 'win' in operation_system.lower():\n driver = webdriver.Chrome(executable_path=(chromedriver_folder / \"chromedriver.exe\"))\n else:\n raise OSNotRecognized('Couldn\\'t find out your operation system. Program will stop.')\n return driver", "def partner_unsupported_os_version_blocked(self):\n if \"partnerUnsupportedOsVersionBlocked\" in self._prop_dict:\n return self._prop_dict[\"partnerUnsupportedOsVersionBlocked\"]\n else:\n return None", "async def test_websocket_get_no_condition_capabilities(\n opp, opp_ws_client, device_reg, entity_reg\n):\n await async_setup_component(opp, \"device_automation\", {})\n expected_capabilities = {}\n\n client = await opp_ws_client(opp)\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"device_automation/condition/capabilities\",\n \"condition\": {\"domain\": \"deconz\"},\n }\n )\n msg = await client.receive_json()\n assert msg[\"id\"] == 1\n assert msg[\"type\"] == TYPE_RESULT\n assert msg[\"success\"]\n capabilities = msg[\"result\"]\n assert capabilities == expected_capabilities", "def OSSupportsExtendedProtection(self) -> bool:", "def test_get_asset_managed_device_by_moid(self):\n pass", "def test_user_cant_edit_other_domains(self):\n request = self.u_client.patch(\n get_domain_url(self.su_domain),\n {'type': 'NATIVE'},\n )\n self.assertEqual(request.status_code, 403)", "def test_get_platforms_usage(self):\n pass", "async def test_websocket_get_no_action_capabilities(\n opp, opp_ws_client, device_reg, entity_reg\n):\n await async_setup_component(opp, \"device_automation\", {})\n expected_capabilities = {}\n\n client = await opp_ws_client(opp)\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"device_automation/action/capabilities\",\n \"action\": {\"domain\": \"deconz\"},\n }\n )\n msg = await client.receive_json()\n assert msg[\"id\"] == 1\n assert msg[\"type\"] == TYPE_RESULT\n assert msg[\"success\"]\n capabilities = msg[\"result\"]\n assert capabilities == expected_capabilities", "def authorize_browser(self):\n\n try:\n if (not self._api):\n self._api = AWeberAPI(\n self.config.consumer_key, self.config.consumer_secret)\n self._request_wait()\n (self._request_token, self._token_secret) = \\\n self._api.get_request_token('oob')\n except APIException as e:\n (excType, excMsg) = str(e).split(': ', 1)\n raise ClientException(\n EXCEPTION_API + ': [' + excType + '] ' + excMsg)\n\n if (not self._api.authorize_url):\n raise ClientError(ERROR_NO_AUTH_URL)\n\n # Suppress browser terminal output\n stderr = os.dup(1)\n stdout = os.dup(2)\n os.close(1)\n os.close(2)\n os.open(os.devnull, os.O_RDWR)\n\n try:\n webbrowser.get().open(self._api.authorize_url)\n except:\n pass\n finally:\n os.dup2(stderr, 1)\n os.dup2(stdout, 2)\n\n self._api.user.request_token = self._request_token\n self._api.user.token_secret = self._token_secret\n\n return True", "def get_devices():\n url = 'https://www.chromium.org/chromium-os/developer-information-for-chrome-os-devices'\n response = requests.get(url)\n response.raise_for_status()\n html = response.text.split('<table id=\"goog-ws-list-table\"')[1].split('</table>')[0]\n html = '<table id=\"goog-ws-list-table\"' + html + '</table>'\n table = ET.XML(html.encode('utf-8'))\n keys = [k.text for k in table[0][0]]\n devices = []\n for row in table[1]:\n device = dict()\n for num, value in enumerate(row):\n device[keys[num]] = None\n if value.text:\n device[keys[num]] = value.text.strip()\n elif list(value)[0].text:\n device[keys[num]] = list(value)[0].text.strip()\n devices.append(device)\n return devices", "def ensure_entitled(request, app_name, logger):\n\n entitlement_key = \"insights\"\n if enable_smart_mgmt_check:\n entitlement_key = \"smart_management\"\n\n # TODO: Blueprint.before_request was not working as expected, using\n # before_app_request and checking URL here instead.\n if _is_mgmt_url(request.path) or _is_openapi_url(request.path, app_name):\n return # allow request\n\n auth_key = get_key_from_headers(request.headers)\n if auth_key:\n entitlements = json.loads(base64.b64decode(auth_key)).get(\"entitlements\", {})\n if entitlement_key in entitlements:\n if entitlements[entitlement_key].get(\"is_entitled\"):\n logger.debug(\"enabled entitlement found on header\")\n return # allow request\n else:\n logger.debug(\"identity header not sent for request\")\n\n # if we got here, reject the request\n logger.debug(\"entitlement not found for account.\")\n raise HTTPError(\n HTTPStatus.BAD_REQUEST, message=\"Entitlement not found for account.\"\n )", "def _checkManageCapabilities(self, irc, msg, channel):\n if channel != 'global':\n capability = ircdb.makeChannelCapability(channel, 'op')\n else:\n capability = 'admin'\n if not ircdb.checkCapability(msg.prefix, capability):\n irc.errorNoCapability(capability, Raise=True)", "def test_get_supported_platforms(self):\n response = self.client.open(\n '/api/v1/getSupportedPlatforms',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_managed_devices(self):\n\n # Device with (boxid = 1) OR (troubleshootingstatus = 0)\n filter_value = {\n \"value\": [\n {\n \"property\": \"boxid\",\n \"value\": [1],\n \"operator\": \"=\"\n },\n {\n \"property\": \"troubleshootingstatus\",\n \"value\": [0],\n \"operator\": \"=\"\n }\n ],\n \"operator\": \"OR\"\n }\n query = QUERY_FILTER % json.dumps(filter_value)\n\n response = self.request(PATH_MANAGED_DEVICES, query)\n\n if response.status_code == requests.codes.ok:\n return response.json()\n else:\n print(\"Unrecognised status for managed device fetch\" + response.status_code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Google Cloud services that are not subject to the Service Perimeter restrictions. Deprecated. Must be set to a single wildcard "". The wildcard means that unless explicitly specified by "restricted_services" list, any service is treated as unrestricted.
def unrestricted_services(self) -> Sequence[str]: warnings.warn("""Google Cloud services that are not subject to the Service Perimeter restrictions. Deprecated. Must be set to a single wildcard \"*\". The wildcard means that unless explicitly specified by \"restricted_services\" list, any service is treated as unrestricted.""", DeprecationWarning) pulumi.log.warn("""unrestricted_services is deprecated: Google Cloud services that are not subject to the Service Perimeter restrictions. Deprecated. Must be set to a single wildcard \"*\". The wildcard means that unless explicitly specified by \"restricted_services\" list, any service is treated as unrestricted.""") return pulumi.get(self, "unrestricted_services")
[ "def AddImplicitUnrestrictedServiceWildcard(ref, args, req):\n del ref, args # Unused in AddImplicitServiceWildcard\n\n m = util.GetMessages(version='v1beta')\n if req.servicePerimeter.perimeterType == (\n m.ServicePerimeter.PerimeterTypeValueValuesEnum.PERIMETER_TYPE_REGULAR):\n service_perimeter_config = req.servicePerimeter.status\n if not service_perimeter_config:\n service_perimeter_config = m.ServicePerimeterConfig\n service_perimeter_config.unrestrictedServices = ['*']\n req.servicePerimeter.status = service_perimeter_config\n return req", "def allow_service_without_endpoints(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_service_without_endpoints\")", "def check_services():\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def _AddServiceRestrictionArgs(parser, restriction_type, list_help,\n enable_help):\n group = parser.add_argument_group()\n repeated.AddPrimitiveArgs(\n group,\n 'perimeter',\n restriction_type + '-allowed-services',\n restriction_type + ' allowed services',\n metavar=restriction_type.upper() + '_SERVICE',\n include_set=False,\n additional_help=(list_help))\n group.add_argument(\n '--enable-' + restriction_type + '-service-restriction',\n default=None,\n action='store_true',\n help=enable_help)", "def _AddVpcRestrictionArgs(parser):\n _AddServiceRestrictionArgs(\n parser=parser,\n restriction_type='vpc',\n list_help='Services allowed to be called within the Perimeter when '\n 'VPC Service Restriction is enabled',\n enable_help=('When specified restrict API calls within the Service '\n 'Perimeter to the set of vpc allowed services. To disable '\n 'use \\'--no-enable-vpc-service-restriction\\'.'))", "def service_not_starts_with(self, service_not_starts_with):\n\n self._service_not_starts_with = service_not_starts_with", "def AddVpcServiceRestriction(args, req, version=None):\n return _AddServiceFilterRestriction(args, req, version, 'vpc')", "def deferrable_services():\n _svcs = services()\n _svcs.extend(['ovs-vswitchd', 'ovsdb-server',\n 'openvswitch-switch', 'ovs-record-hostname'])\n return list(set(_svcs))", "def get_services(self):\n services = self.docker.services.list(filters=self.settings['filter_services'])\n for blacklist_service in self.settings['blacklist_services']:\n for service in services:\n if service.name == blacklist_service:\n log.debug(f'Blacklisted {blacklist_service}')\n services.remove(service)\n return services", "def service_resource(self):\n\n return self.gce_project.service.firewalls()", "def service_not_contains(self, service_not_contains):\n\n self._service_not_contains = service_not_contains", "def find_service_providers(self, service: ServiceDescriptor) -> list:\n return ['ALICE', ]", "def get_enabled_services(credentials, project_name):\n enabled_services = []\n\n service_usage = discovery.build(\"serviceusage\", \"v1\", credentials=credentials)\n\n services_filter = \"state:\" + STATE_ENABLED\n services_request = service_usage.services().list(\n parent=project_name, pageSize=200, filter=services_filter\n )\n\n while services_request is not None:\n services = services_request.execute()\n\n if \"services\" not in services:\n break\n\n enabled_services = enabled_services + services[\"services\"]\n\n services_request = service_usage.services().list_next(\n previous_request=services_request, previous_response=services\n )\n\n return enabled_services", "def service_not_ends_with(self, service_not_ends_with):\n\n self._service_not_ends_with = service_not_ends_with", "def get_all_services(limit=None, columns=None, extra_filter=None):\n return query(\"GET services\\n\", limit=limit, columns=columns, \n item_type=\"services\" , extra_filter=extra_filter)", "def get_availables_services(self):\r\n self._service_locator.get_availables_services()", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def service_resource(self):\n\n return self.gce_project.service.zones()", "def get_trusted_services():\n from Acquire.Service import is_running_service as _is_running_service\n\n if _is_running_service():\n from Acquire.Service import get_this_service as _get_this_service\n from Acquire.Service import Service as _Service\n\n from Acquire.Service import (\n get_service_account_bucket as _get_service_account_bucket,\n )\n from Acquire.ObjectStore import ObjectStore as _ObjectStore\n from Acquire.ObjectStore import url_to_encoded as _url_to_encoded\n\n # we already trust ourselves\n service = _get_this_service()\n\n trusted_services = {}\n trusted_services[service.service_type()] = [service]\n\n bucket = _get_service_account_bucket()\n\n uidkey = \"_trusted/uid/\"\n datas = _ObjectStore.get_all_objects(bucket, uidkey)\n\n for data in datas:\n remote_service = _Service.from_data(data)\n\n if remote_service.should_refresh_keys():\n # need to update the keys in our copy of the service\n remote_service.refresh_keys()\n key = \"%s/%s\" % (uidkey, remote_service.uid())\n _ObjectStore.set_object_from_json(bucket, key, remote_service.to_data())\n\n if remote_service.service_type() in datas:\n datas[remote_service.service_type()].append(remote_service)\n else:\n datas[remote_service.service_type()] = [remote_service]\n\n return datas\n else:\n # this is running on the client\n from Acquire.Client import Wallet as _Wallet\n\n wallet = _Wallet()\n return wallet.get_services()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Specifies how APIs are allowed to communicate within the Service Perimeter.
def __init__(__self__, *, allowed_services: Sequence[str], enable_restriction: bool): pulumi.set(__self__, "allowed_services", allowed_services) pulumi.set(__self__, "enable_restriction", enable_restriction)
[ "def test_allow(self):\n self.validate_test(self.v1_controller.allow() == 'GET')", "def write_allow():\n return 'write-allow', PermissionConfig", "def getAllow(self):\n return self.base.get(\"allow\", [])", "def get_allowed_operations(cls, course_key: CourseKey, user: Optional[User] = None) -> Dict[str, bool]:\n return {\n # Either the app is available and configurable or not. You cannot disable it from the API yet.\n \"enable\": False,\n \"configure\": True,\n }", "def access_control(self):\n return '%s.0/16 allow' % '.'.join(self.ip_addr.split('.')[:3])", "def AddImplicitUnrestrictedServiceWildcard(ref, args, req):\n del ref, args # Unused in AddImplicitServiceWildcard\n\n m = util.GetMessages(version='v1beta')\n if req.servicePerimeter.perimeterType == (\n m.ServicePerimeter.PerimeterTypeValueValuesEnum.PERIMETER_TYPE_REGULAR):\n service_perimeter_config = req.servicePerimeter.status\n if not service_perimeter_config:\n service_perimeter_config = m.ServicePerimeterConfig\n service_perimeter_config.unrestrictedServices = ['*']\n req.servicePerimeter.status = service_perimeter_config\n return req", "def read_allow():\n return 'read-allow', PermissionConfig", "async def _allowlist(self, ctx: commands.Context):\n settings = await self.config.allowed()\n await ctx.send(embed=discord.Embed(\n title=\"BotAccess Allowed Servers\",\n description=f\"{humanize_list([f'`{gu.name}` (`{g}`)' if (gu := self.bot.get_guild(g)) else f'`{g}`' for g in settings])}\",\n color=await ctx.embed_color()\n ))\n await ctx.send_help()", "def allow(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"allow\")", "def enable_api(event, data):\n DriveDevice.storage.api_enabled = data", "def security_definitions(self):\n return None", "def options_for_resource(self, *args, **kwargs):\n response_body = ResourceOptionsJSONRepresentation(self.resource_description)\n return CommonResponse.options(self.get_allowed_methods(), response_body)", "def test_servicedef_disable(self):\n self.check_result('W0002',\n 'http://support.riverbed.com/apis/test/1.0',\n Result.DISABLED,\n 'tags:\\n'\n ' relint-disable: [ W0002 ] ')", "def extra_authorize_data(self) -> dict[str, Any]:\n return {\"scope\": \"basic devices_read\"}", "def allow_service_without_endpoints(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_service_without_endpoints\")", "def get_allowed_operations(cls, course_key: CourseKey, user: Optional[User] = None) -> Dict[str, bool]:\n return {\n \"enable\": True,\n \"configure\": True,\n }", "def CanPassThrough(self, fleet):\n pass", "def _AddServiceRestrictionArgs(parser, restriction_type, list_help,\n enable_help):\n group = parser.add_argument_group()\n repeated.AddPrimitiveArgs(\n group,\n 'perimeter',\n restriction_type + '-allowed-services',\n restriction_type + ' allowed services',\n metavar=restriction_type.upper() + '_SERVICE',\n include_set=False,\n additional_help=(list_help))\n group.add_argument(\n '--enable-' + restriction_type + '-service-restriction',\n default=None,\n action='store_true',\n help=enable_help)", "def test_disabled_feature_forbidden_update(self):\n self._test_method('put', False, dummy=123)", "def test_allow_functions(self):\r\n self.manager.create_api(self.Person, allow_functions=True)\r\n response = self.app.get('/api/eval/person?q={}')\r\n assert response.status_code != 400\r\n assert response.status_code == 204" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The list of APIs usable within the Service Perimeter. Must be empty unless 'enable_restriction' is True. You can specify a list of individual services, as well as include the 'RESTRICTEDSERVICES' value, which automatically includes all of the services protected by the perimeter.
def allowed_services(self) -> Sequence[str]: return pulumi.get(self, "allowed_services")
[ "def unrestricted_services(self) -> Sequence[str]:\n warnings.warn(\"\"\"Google Cloud services that are not subject to the Service Perimeter restrictions. Deprecated. Must be set to a single wildcard \\\"*\\\". The wildcard means that unless explicitly specified by \\\"restricted_services\\\" list, any service is treated as unrestricted.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"unrestricted_services is deprecated: Google Cloud services that are not subject to the Service Perimeter restrictions. Deprecated. Must be set to a single wildcard \\\"*\\\". The wildcard means that unless explicitly specified by \\\"restricted_services\\\" list, any service is treated as unrestricted.\"\"\")\n\n return pulumi.get(self, \"unrestricted_services\")", "def _AddServiceRestrictionArgs(parser, restriction_type, list_help,\n enable_help):\n group = parser.add_argument_group()\n repeated.AddPrimitiveArgs(\n group,\n 'perimeter',\n restriction_type + '-allowed-services',\n restriction_type + ' allowed services',\n metavar=restriction_type.upper() + '_SERVICE',\n include_set=False,\n additional_help=(list_help))\n group.add_argument(\n '--enable-' + restriction_type + '-service-restriction',\n default=None,\n action='store_true',\n help=enable_help)", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return api_client.listServiceOfferings(**cmd)", "def get_api_list(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getApiList\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def test_watch_apiregistration_v1beta1_api_service_list(self):\n pass", "def service_resource(self):\n\n return self.gce_project.service.firewalls()", "def applyFilters(response: list, helperSession=None) -> list:\n \n if helperSession is not None:\n session = helperSession\n else:\n from flask import session\n \n result = response\n filters = session[\"oauth\"]\n \n if \"filters\" in filters:\n filters = session[\"oauth\"][\"filters\"]\n onlyFiltered = []\n \n \n if \"only\" in filters and len(filters[\"only\"]) > 0:\n onlyFiltered = [service for service in response if service[\"informations\"][\"servicename\"] in filters[\"only\"]]\n else:\n onlyFiltered = response\n \n if \"except\" in filters and len(filters[\"except\"]) > 0:\n exceptFiltered = [service for service in onlyFiltered if service[\"informations\"][\"servicename\"] not in filters[\"except\"]]\n else:\n exceptFiltered = onlyFiltered\n \n result = exceptFiltered\n \n session[\"servicelist\"] = result\n return result", "def list():\n print(json.dumps(Config.get_apis()['apis'], indent=4, sort_keys=True))", "def get_services_to_enable(self):\n ldap_enabled_services = _get_from_dictionary(self.ldap_properties, \"ambari.ldap.enabled_services\")\n\n return [x.strip().lower() for x in ldap_enabled_services.strip().split(\",\")] \\\n if ldap_enabled_services \\\n else []", "def get_all_services(limit=None, columns=None, extra_filter=None):\n return query(\"GET services\\n\", limit=limit, columns=columns, \n item_type=\"services\" , extra_filter=extra_filter)", "def list_api(self) -> str:\n return pulumi.get(self, \"list_api\")", "def AddImplicitUnrestrictedServiceWildcard(ref, args, req):\n del ref, args # Unused in AddImplicitServiceWildcard\n\n m = util.GetMessages(version='v1beta')\n if req.servicePerimeter.perimeterType == (\n m.ServicePerimeter.PerimeterTypeValueValuesEnum.PERIMETER_TYPE_REGULAR):\n service_perimeter_config = req.servicePerimeter.status\n if not service_perimeter_config:\n service_perimeter_config = m.ServicePerimeterConfig\n service_perimeter_config.unrestrictedServices = ['*']\n req.servicePerimeter.status = service_perimeter_config\n return req", "def services(self):\n _log.debug('get service list')\n result = self._requestJSON('services', '')\n return self._getKey(result, 'name')", "def _ParseRestriction(args, perimeter_result, version, restriction_type,\n dry_run):\n if _IsServiceFilterUpdateSpecified(args, restriction_type):\n # If there is no service restriction message in the request, make an empty\n # one to populate.\n config = _GetConfig(perimeter_result, dry_run)\n if getattr(config, restriction_type + 'ServiceRestriction', None) is None:\n restriction_message = getattr(\n apis.GetMessagesModule('accesscontextmanager', version),\n restriction_type.capitalize() + 'ServiceRestriction')()\n setattr(config, restriction_type + 'ServiceRestriction',\n restriction_message)\n\n def FetchAllowed():\n return getattr(\n _GetConfig(perimeter_result, dry_run),\n restriction_type + 'ServiceRestriction').allowedServices\n\n return repeated.ParsePrimitiveArgs(args,\n restriction_type + '_allowed_services',\n FetchAllowed)", "def list(cls, api_client, **kwargs):\n\n cmd = {}\n cmd.update(kwargs)\n if 'account' in kwargs.keys() and 'domainid' in kwargs.keys():\n cmd['listall'] = True\n return api_client.listNetworkServiceProviders(**cmd)", "def test_list_apiregistration_v1beta1_api_service(self):\n pass", "def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]", "def get_services(self):\n xpath = [\"Services\", \"Service\"]\n return self.find_anywhere(xpath)", "def deferrable_services():\n _svcs = services()\n _svcs.extend(['ovs-vswitchd', 'ovsdb-server',\n 'openvswitch-switch', 'ovs-record-hostname'])\n return list(set(_svcs))", "def api_services():\n return [\n GitHubMemberService(),\n RepoService()\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Detect a face and return a cropped image singling out a face.
def crop_face(img): try: gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) face_cascade = cv2.CascadeClassifier('xml/haarcascade_frontalface_alt2.xml') faces = face_cascade.detectMultiScale(gray, 1.05, 5) face = np.array(0) # if face found if len(faces) > 0: (x, y, w, h) = faces[0] # extend the size of the face detected ext = int(abs(h-y) * 0.5) # test if extension fits on image, if not ext maximum amount if (y+h+ext) > img.shape[0]: ext = img.shape[0] - h face = img[y:y + h + ext, x:x + w] # if problem with extracting face, print error and raise FaceNotFound except Exception as e: print("Error1: ", e) raise FaceNotFound return face
[ "def __extract_face_crop(self, image, face_data):\n face_x, face_y, face_w, face_h = face_data[:4]\n\n start_x = int(face_x)\n end_x = start_x + int(face_w)\n start_y = int(face_y)\n end_y = start_y + int(face_h)\n\n start_x = max(0, start_x)\n end_x = min(image.shape[1], end_x)\n start_y = max(0, start_y)\n end_y = min(image.shape[0], end_y)\n\n # Crop the image.\n crop = image[start_y:end_y, start_x:end_x]\n\n # Resize the crop.\n crop = cv2.resize(crop, (400, 400))\n\n return crop", "def get_face(imagePath):\n image = cv2.imread(imagePath)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n cascPath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(cascPath)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.1,\n minNeighbors=5,\n minSize=(30, 30)\n )\n\n if len(faces) < 1:\n return None\n (x, y, w, h) = faces[0]\n crop_img = image[y:y + h, x:x + w]\n return crop_img", "def crop_face(self, new_foldername, image_name):\n file_types = ('.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG')\n\n files = [file_i for file_i in os.listdir(self.folderpath) if file_i.endswith(file_types)]\n\n filenames = [os.path.join(self.folderpath, fname)\n for fname in files]\n\n count = 0\n image_number = 0\n for file in filenames:\n image_number += 1\n print(' image number ', image_number)\n image = cv2.imread(file)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=3, minSize=(30, 30))\n\n print(\"[INFO] Found {0} Faces.\".format(len(faces)))\n\n for (x, y, w, h) in faces:\n count += 1\n w = w + 50\n h = h + 150\n p = 50\n crop_img = image[y - p + 1:y + h + p, x + 1:x + w]\n\n print(\"[INFO] Object found. Saving locally.\")\n try:\n sharpen = cv2.resize(crop_img, (150, 150), interpolation=cv2.INTER_AREA) # try something else\n\n if not os.path.exists(new_foldername):\n os.makedirs(new_foldername)\n cv2.imwrite(new_foldername + \"/\" + image_name + '_' + str(count) + \".jpg\", sharpen)\n except:\n pass\n print('Images saved in', new_foldername)", "def _load_crop(self, frame, face_bbox):\n frame_path = os.path.join(self.frame_dir, frame)\n\n image = cv2.imread(frame_path)\n if image is None:\n raise RuntimeError(\"Failed to read image: %s\" % (frame_path))\n\n # Extract the crop.\n return self.__extract_face_crop(image, face_bbox)", "def get_img_crop(img_path, bbox, scale=2.5):\n # print(img_path)\n img = cv2.imread(img_path)\n\n if img is None:\n print('reading empty image: ', img_path)\n return None\n\n shape = img.shape\n h, w = shape[:2]\n x_min, y_min, face_x, face_y = bbox\n x_mid = x_min + face_x / 2.0\n y_mid = y_min + face_y / 2.0\n\n if x_mid<=0 or y_mid<=0 or face_x<=0 or face_y<=0:\n return None\n\n x_min = int( max(x_mid - face_x * scale / 2.0, 0) )\n x_max = int( min(x_mid + face_x * scale / 2.0, w) )\n y_min = int( max(y_mid - face_y * scale / 2.0, 0) )\n y_max = int( min(y_mid + face_y * scale / 2.0, h) )\n\n if x_min >= x_max or y_min >= y_max:\n return None\n \n return img[y_min:y_max, x_min:x_max, :]", "def classify_face(self, face):\n face_transform = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(face_transform.astype('uint8'), 'RGB')\n\n transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]\n )\n\n img = transform(img)\n batch_t = torch.unsqueeze(img, 0).to(self.device)\n with torch.no_grad():\n out = self.clf(batch_t)\n _, pred = torch.max(out, 1)\n\n pred = np.array(pred[0].cpu())\n out = np.array(out[0].cpu())\n return pred, out", "def face_detect(face_detector, img):\n test_img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)\n grayed_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n face_coordinates = face_detector.detectMultiScale(grayed_img, 1.1, 5)\n return grayed_img, face_coordinates", "def face_highlight(img, bounding_boxes, scalar):\n img = img.astype('int32')\n # filter real faces based on detection confidence\n confidence_thresh = 0.85\n filtered_idx = bounding_boxes[:, 4]>=confidence_thresh\n filtered_bboxes = bounding_boxes[filtered_idx]\n\n # if no faces found, return a darker image\n if not len(filtered_bboxes):\n return np.clip(img-50, 0, 255).astype('uint8')\n\n nrof_faces = len(filtered_bboxes)\n\n # detect multiple faces or not\n det = filtered_bboxes[:, 0:4]\n det_arr = []\n img_size = np.asarray(img.shape)\n if nrof_faces>1:\n # if multiple faces found, we choose one face\n # which is located center and has larger size\n bounding_box_size = (det[:,2] - det[:,0]) * (det[:,3] - det[:,1])\n img_center = img_size / 2\n offsets = np.vstack([(det[:,0]+det[:,2])/2 - img_center[0],\n (det[:,1]+det[:,3])/2 - img_center[1]])\n offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)\n # some extra weight on the centering\n index = np.argmax(bounding_box_size - offset_dist_squared * 2.0)\n det_arr.append(det[index, :])\n else:\n det_arr.append(np.squeeze(det))\n\n det = np.squeeze(det_arr)\n # compute expanding bounding box\n bb, box_size = get_square_crop_box(det, scalar)\n # get the valid pixel index of cropped face\n face_left = np.maximum(bb[0], 0)\n face_top = np.maximum(bb[1], 0)\n face_right = np.minimum(bb[2], img_size[0])\n face_bottom = np.minimum(bb[3], img_size[1])\n\n # highlight the face with circle\n xx, yy = np.mgrid[:img_size[0], :img_size[1]]\n center_x = int((bb[3] + bb[1])/2)\n center_y = int((bb[2] + bb[0])/2)\n circle_r2 = int(0.25 * box_size**2)\n circle = (xx - center_x) ** 2 + (yy - center_y) ** 2\n highlight_mat = circle > circle_r2\n highlight_mat = np.repeat(np.expand_dims(highlight_mat, 2), 3, axis=2)\n # highlight the face with square\n #highlight_mat = np.ones_like(img)\n #highlight_mat[face_top:face_bottom, face_left:face_right, :] = 0\n \n return np.clip(img-50*highlight_mat, 0, 255).astype('uint8')", "def cropFaces(image):\n\n face_cascade = loadClassifier()\n faces = face_cascade.detectMultiScale(image, 1.25, 6)\n\n # Print number of faces found\n print('Number of faces detected:', len(faces))\n\n extracted_faces = []\n\n for face in tqdm(faces):\n x, y, w, h = [ v for v in face ]\n cv2.rectangle(image, (x,y), (x+w, y+h), GRAY_CODE, CHANNELS)\n extracted_faces.append(image[y:y+h, x:x+w] )\n \n return extracted_faces", "def crop_face(self, img, padding=0):\n img_arr = np.array(img)\n crop = self.get_bounds(img_arr)\n if not crop:\n return None\n crop[0] += padding\n crop[1] += padding\n crop[2] -= padding\n crop[3] -= padding\n return img.crop(crop)", "def ffp_detect(self, img):\r\n # convert to gray\r\n if img.ndim > 2:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n \r\n # detect face first\r\n bbox = self.face_detect(img).flatten()\r\n num_pts = self.face_lmks_model['num_pts']\r\n norm_width = self.face_lmks_model['norm_width']\r\n num_iter = self.face_lmks_model['num_iter']\r\n if bbox.shape[0] == 0:\r\n pts = np.zeros((num_pts, 2))\r\n return pts, 2, 0\r\n \r\n\r\n # obtain normalized face image and bounding box\r\n face_scale = norm_width/bbox[2]\r\n img = cv2.resize(img, None, fx=face_scale, fy=face_scale, interpolation=cv2.INTER_CUBIC) \r\n bbox_norm = (bbox*face_scale).round().astype(np.uint16)\r\n cut_x1 = max([0, bbox_norm[0] - self.face_lmks_model['margin']])\r\n cut_x2 = min([bbox_norm[0] + bbox_norm[2] + self.face_lmks_model['margin'], img.shape[1]-1])\r\n cut_y1 = max([0, bbox_norm[1] - self.face_lmks_model['margin']])\r\n cut_y2 = min([bbox_norm[1] + bbox_norm[3] + self.face_lmks_model['margin'], img.shape[0]-1])\r\n im_cut = img[cut_y1:cut_y2, cut_x1:cut_x2]\r\n bbox_cut = bbox_norm.copy()\r\n bbox_cut[0] = bbox_cut[0] - cut_x1 + 1\r\n bbox_cut[1] = bbox_cut[1] - cut_y1 + 1\r\n\r\n # detect facial landmarks with cascade framework\r\n for it in np.arange(num_iter):\r\n if it == 0:\r\n x0_norm = np.zeros((num_pts*2))\r\n x0_norm[0::2] = self.face_lmks_model['mm'][0::2] + bbox_cut[0] + bbox_cut[2]/2.0\r\n x0_norm[1::2] = self.face_lmks_model['mm'][1::2] + bbox_cut[1] + bbox_cut[3]/2.0\r\n # compute features\r\n temp = x0_norm.reshape(-1, 2)\r\n tkp = []\r\n for idx in range(temp.shape[0]):\r\n tkp.append(cv2.KeyPoint(temp[idx, 0], temp[idx, 1], 5.2, -1, 1, 0, 1))\r\n tkp, tdp = self.sift_extractor.compute(im_cut, tkp)\r\n tdp = tdp.reshape(1, -1)\r\n tdp = np.append(1, tdp/255.0)\r\n V_diff = np.dot(self.face_lmks_model['para_detect'][it]['R'], tdp)\r\n x0_norm = x0_norm + V_diff\r\n \r\n # confidence, evaluate the quality of facial landmark detection\r\n flag_succ, confidence = self.compute_confidence(im_cut, x0_norm.reshape((-1, 2)), \r\n self.face_detector['confidence_SIFT']['descriptor'],\r\n self.face_detector['confidence_SIFT']['thre_detect'])\r\n if flag_succ == 0:\r\n x0_norm = x0_norm.reshape((-1, 2))\r\n x_est = (x0_norm + np.array([cut_x1-1, cut_y1-1]).reshape((-1, 2)))/face_scale \r\n else:\r\n x_est = np.zeros((num_pts, 2))\r\n return x_est.reshape((-1, 2)), flag_succ, confidence", "def face_detection_to_roi(\r\n face_detection: Detection,\r\n image_size: Tuple[int, int]\r\n) -> Rect:\r\n absolute_detection = face_detection.scaled(image_size)\r\n left_eye = absolute_detection[FaceIndex.LEFT_EYE]\r\n right_eye = absolute_detection[FaceIndex.RIGHT_EYE]\r\n return bbox_to_roi(\r\n face_detection.bbox,\r\n image_size,\r\n rotation_keypoints=[left_eye, right_eye],\r\n scale=ROI_SCALE,\r\n size_mode=SizeMode.SQUARE_LONG\r\n )", "def cropImage():", "def crop_image(img, box):\n return img.crop(box)", "def _get_crop(cytomine, image_inst, geometry):\n bounds = dict()\n bounds[\"x\"], bounds[\"y\"], bounds[\"w\"], bounds[\"h\"] = geometry\n url = \"{}{}{}{}\".format(cytomine._Cytomine__protocol, cytomine._Cytomine__host, cytomine._Cytomine__base_path,\n image_inst.get_crop_url(bounds))\n resp, content = cytomine.fetch_url(url)\n if resp.status != 200:\n raise IOError(\"Couldn't fetch the crop for image {} and bounds {} from server at url {} (status : {}).\".format(image_inst.id, geometry, url, resp.status))\n tmp = cStringIO.StringIO(content)\n pil_image = PILImage.open(tmp) # fetch the image but this process inverts R and B channels (?)\n return np.asarray(pil_image)[:, :, (2, 1, 0)] # so reorder to channel to have a valid RGB image", "def crop_images(old_dir, new_dir):\n print(\"crop_images\")\n os.mkdir(new_dir)\n\n H, S, V = None, None, None\n for filename in os.listdir(old_dir):\n if not filename.endswith(\".jpg\"):\n continue\n\n index = filename.find(\".jpg\")\n name = filename[:index]\n\n # Approximate coordinates of face\n coords = scipy.io.loadmat(old_dir + name + \".mat\")\n start_x = int(coords[\"x\"][0][0] - 0.5*(coords[\"x\"][1][0] - coords[\"x\"][0][0]))\n end_x = int(coords[\"x\"][1][0] + 0.5*(coords[\"x\"][1][0] - coords[\"x\"][0][0]))\n start_y = int(coords[\"y\"][0][0] - (coords[\"y\"][3][0] - coords[\"y\"][0][0]))\n end_y = int(coords[\"y\"][3][0] + (coords[\"y\"][3][0] - coords[\"y\"][2][0]))\n img = io.imread(old_dir + filename)\n face = img[start_y:end_y, start_x:end_x]\n # Save cropped image\n scipy.misc.imsave(new_dir + name + \".png\", face)", "def locate_faces(input_image):\n face_cascade = cv2.CascadeClassifier(CASCADE_FILE_PATH)\n gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)\n # detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.2, 5)\n print(faces)\n return faces", "def _detect_face(self, frame):\n face_coords = list()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = self.detector(gray, 0)\n print(rects)\n # get bounding box for every face in the frame\n for i, d in enumerate(rects):\n x1 = d.left()-consts.PADDING\n y1 = d.top()-consts.PADDING\n x2 = d.right()+consts.PADDING\n y2 = d.bottom()+consts.PADDING\n face_coords.append((x1, y1, x2, y2))\n return face_coords", "def detect(self, src):\n pre = self.preprocess(src)\n seg = self.segment(pre)\n morph = self.morphological(seg)\n hulls = self.create_convex_hulls(morph)\n gate_im = self.bound_gate_using_poles(hulls, src)\n return gate_im" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open video, analyze face using the `model`
def start_video(model, model_vars): vid = cv2.VideoCapture(0) counter = 0 text = "" frame_title = "Press q to quit" while True: # Capture video _, frame = vid.read() # send image to CNN model every 50 iterations if counter == 50: try: img = process_image(frame, model_vars) # Error processing image, attempt next frame except: counter = 49 continue age, race, gender = model.predict(img) age, race, gender = process_results(age, race, gender, model_vars) text = f"Age: {age}, Race: {race}, Gender: {gender}" print('Prediction: ', text) counter = 0 try: # display the resulting frame cv2.putText(**optimize_text(text, frame)) cv2.imshow(frame_title, frame) except: counter = 49 continue # check if q pressed to quit program if cv2.waitKey(1) & 0xFF == ord('q'): break counter += 1 vid.release() cv2.destroyAllWindows()
[ "def run_model(model_path, **args):\r\n if args['model_type'] == 'normal':\r\n model_path = 'saved_models/normal_model'\r\n\r\n print(f\"Retrieving {args['model_type']} model...\")\r\n model = get_model(model_path)\r\n print(\"Model retrieved.\")\r\n model_vars = get_model_vars()\r\n # start video analysis using model\r\n if args.get('video', False):\r\n print(\"starting video\")\r\n start_video(model, model_vars)\r\n # if not video, then individual image will be analyzed\r\n else:\r\n img_path = args['img_path'][0]\r\n analyze_picture(model, model_vars, img_path)", "def process_video(video_path: Union[int, str]) -> None:\n cap = cv2.VideoCapture(video_path)\n while True:\n ret, image = cap.read()\n if not ret:\n print('Can\\'t get frame. Stop working.')\n cap.release()\n return\n faces = detector.inference(image)\n classes = []\n for face_coordinates in faces:\n x, y, w, h = get_coordinates(image, face_coordinates, COORDINATES_EXTEND_VALUE)\n class_result = classifier.inference(image[y:y + h, x:x + w, :])\n classes.append(class_result)\n image = draw_results(image, faces, classes)\n cv2.imshow('Video', image)\n if cv2.waitKey(1) == ord('q'):\n cap.release()\n return", "def run(self):\n cap = cv2.VideoCapture(0)\n while True:\n ret, frame = cap.read()\n if ret:\n boxes, face_probs = self.mtcnn.detect(frame)\n if boxes is not None and len(boxes) > 0:\n name_probs = []\n for box in boxes:\n y1, y2, x1, x2 = int(box[1]), int(box[3]), int(box[0]), int(box[2])\n face = frame[y1:y2, x1:x2]\n if face.size > 0:\n pred, probs = self.classify_face(face)\n name_probs.append(probs)\n\n self.draw(frame, boxes, face_probs, name_probs)\n else:\n cv2.putText(frame, \"Couldn't Find Any Faces\", (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n (0, 0, 255), 1, cv2.LINE_AA)\n cv2.imshow(\"Face Detection\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def Detect_From_Video(self, input_path:str, output_path:str, extract_objects = False, fps:int = 25):\n \n if (output_path is None) or (input_path is None):\n raise RuntimeError ('Output_Path should not be None, & One of Camera_input OR Input_Path must be specified.')\n \n elif self.modelLoaded != True:\n raise RuntimeError ('First You have to specify which model you want to use.')\n \n self.image_path = input_path\n self.output_path = output_path\n out = None\n \n cap = cv2.VideoCapture(self.image_path)\n \n length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n print(f'\\nThere are {length} Frames in this video')\n print('-' * 20)\n print('Detecting Objects in the Video... Please Wait...')\n print('-' * 20)\n\n while(cap.isOpened()):\n retreive, frame = cap.read()\n if not retreive:\n break\n \n frame = np.array(frame)[..., ::-1]\n \n if self.modelType == 'retinanet':\n im = self.model.predict(frame, './', debug = False, extract_objects = extract_objects)\n \n elif self.modelType == 'yolo':\n im = self.model.predict(frame, './', debug = False, iou = self.iou, score = self.score)\n \n elif self.modelType == 'tinyyolo':\n im = self.model.predict(frame, './', debug = False, iou = self.iou, score = self.score)\n\n elif self.modelType == 'centernet':\n im = self.model.predict(frame, './', debug = False)\n im = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)\n\n else:\n raise RuntimeError ('Invalid Model Type, For Video_ObjectDetection you can use \\n \"RetinaNet\" \\t \"CenterNet\" \\t \"YOLOv4\" \\t \"TinyYOLOv4\"')\n \n if out is None:\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n out = cv2.VideoWriter(self.output_path, fourcc, fps, (frame.shape[1], frame.shape[0]))\n \n out.write(im)\n print('Done. Processing has been Finished... Please Check Output Video.')\n out.release()\n cap.release()", "def open_video(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file', '/home')\n if fname[0]:\n f = open(fname[0], 'r')\n with f:\n self.changeFileSrc(fname[0],self.engine)", "def faceRecog():\n recognise.main()", "def run(self):\n\n cv2.namedWindow(consts.UPLOADER_WINDOW)\n # TODO : video capture source should be handled by camera.py and /\n # not default 0(webcam)\n self.camera = cv2.VideoCapture(0)\n while self.camera.isOpened() and self.ready_to_detect_face:\n _, frame = self.camera.read()\n face_coords = self._detect_face(frame)\n # draw rectangle bounding box for every face\n for i in face_coords:\n print(\"found face coords\")\n self._upload(frame)\n cv2.rectangle(frame,(i[0], i[1]),(i[2], i[3]),(255,0,0),2)\n print(f\"Detected face: uploading as {self.name} .. exiting\")\n self.ready_to_detect_face = False\n\n key = cv2.waitKey(100)\n cv2.imshow(consts.UPLOADER_WINDOW, frame)\n\n if key == 27: # exit on ESC\n break\n self.stop()", "def infer_on_stream(args, client):\n # Initialize the Inference Engine\n infer_network = Network()\n\n # Set Probability threshold for detections\n prob_threshold = args.prob_threshold\n\n # Load the model through `infer_network`\n infer_network.load_model(args.model, args.device, CPU_EXTENSION, num_requests=0)\n\n # Get a Input blob shape\n _, _, in_h, in_w = infer_network.get_input_shape()\n\n # Get a output blob name\n _ = infer_network.get_output_name()\n \n # Handle the input stream\n try:\n cap = cv2.VideoCapture(args.input)\n except FileNotFoundError:\n print(\"Cannot locate video file: \"+ args.input)\n except Exception as e:\n print(\"Something else went wrong with the video file: \", e)\n \n cap.open(args.input)\n _, frame = cap.read()\n\n people_total_count = 0\n people_in_a_frame = 0\n\n g_elapsed = 0\n entre_ROI_xmin = 400\n entre_ROI_ymin = 450\n exit_ROI_xmin = 550\n exit_ROI_ymin = 410\n\n fps = FPS().start()\n\n # Process frames until the video ends, or process is exited\n while cap.isOpened():\n # Read the next frame\n flag, frame = cap.read()\n if not flag:\n break\n \n fh = frame.shape[0]\n fw = frame.shape[1]\n key_pressed = cv2.waitKey(50)\n \n image_resize = cv2.resize(frame, (in_w, in_h), interpolation = cv2.INTER_AREA)\n image = np.moveaxis(image_resize, -1, 0)\n\n # Perform inference on the frame\n infer_network.exec_net(image, request_id=0)\n \n # Get the output of inference\n if infer_network.wait(request_id=0) == 0:\n result = infer_network.get_output(request_id=0)\n for box in result[0][0]: # Output shape is 1x1x100x7\n conf = box[2]\n if conf >= prob_threshold:\n xmin = int(box[3] * fw)\n ymin = int(box[4] * fh)\n xmax = int(box[5] * fw)\n ymax = int(box[6] * fh)\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 0, 255), 3)\n\n if xmin < entre_ROI_xmin and ymax < entre_ROI_ymin: \n if fsm.current == \"empty\":\n # Count a people\n people_in_a_frame += 1\n people_total_count += 1\n # Start the timer\n start_time = time.perf_counter()\n # Person entered a room - fsm state change\n fsm.enter()\n print(xmax, ymax)\n if args.output == \"WEB\":\n # Publish people_count messages to the MQTT server\n client.publish(\"person\", json.dumps({\"count\": people_in_a_frame}))\n log.info(\"#########################\")\n log.info(\"Person entered into frame\")\n log.info(\"#########################\")\n\n if xmin > exit_ROI_xmin and ymax < exit_ROI_ymin:\n if fsm.current == \"standing\":\n # Change the state to exit - fsm state change\n fsm.exit()\n stop_time = time.perf_counter()\n elapsed = stop_time - start_time\n \n # Update average time\n log.info(\"elapsed time = {:.12f} seconds\".format(elapsed))\n g_elapsed = (g_elapsed + elapsed) / people_total_count\n log.info(\"g_elapsed time = {:.12f} seconds\".format(g_elapsed))\n \n people_in_a_frame = 0\n\n if args.output == \"WEB\":\n # Publish duration messages to the MQTT server\n client.publish(\"person/duration\", json.dumps({\"duration\": g_elapsed}))\n client.publish(\"person\", json.dumps({\"count\": people_in_a_frame}))\n log.info(\"#########################\")\n log.info(\"Person exited from frame\")\n log.info(\"#########################\")\n\n log.info(\"xmin:{} xmax:{} ymin:{} ymax:{}\".format(xmin, xmax, ymin, ymax))\n \n if args.output != \"WEB\": \n # Update info on frame\n info = [\n (\"people_ccount\", people_total_count),\n ]\n \n # loop over the info tuples and draw them on our frame\n for (i, (k, v)) in enumerate(info):\n text = \"{}: {}\".format(k, v)\n cv2.putText(frame, text, (10, fh - ((i * 20) + 20)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)\n \n if args.output == \"WEB\":\n # Push to FFmpeg server\n sys.stdout.buffer.write(frame)\n\n sys.stdout.flush()\n else:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n #Break if escape key pressed\n if key_pressed == 27:\n break\n \n fps.update()\n \n # Release the out writer, capture, and destroy any OpenCV windows\n cap.release()\n\n if args.output == \"WEB\":\n client.disconnect()\n else:\n cv2.destroyAllWindows()\n \n fps.stop()\n\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))", "def analyze_video(self, filename):\r\n #Read in the video and analyze the swing for data points\r\n video_processor = Video_Processor(\"generatedVideos/\" + filename + \".avi\", True)\r\n video_processor.read_video(\"full_swing\")\r\n\r\n # Load the points into the golfer class\r\n golfer = Golfer(video_processor.points_frames)\r\n # Load in a new video processor to split the video into frames\r\n new_video_processor = Video_Processor(\"generatedVideos/toProcess.avi\")\r\n frames = new_video_processor.slice_video()\r\n\r\n # Load in a video splitter and pass in the frames and the golfer points.\r\n video_splitter = Swing_Divider(golfer, frames)\r\n video_splitter.slice_video(\"user_videos\")\r\n\r\n #csv_writer = CSV_Creator(golfer.get_golfer())\r\n #csv_writer.generate_csv(\"swing.csv\")\r\n\r\n # Make machine learning detections\r\n swing_scorer = EvaluateSwing()\r\n score = swing_scorer.process_probabilities(\"user_videos\")\r\n \r\n feedback_giver = GiveFeedback(score)\r\n\r\n feedback = (feedback_giver.get_setup(), feedback_giver.get_bswing(), feedback_giver.get_fswing())\r\n\r\n self.screen = Analysis_Screen(self.root, \"generatedVideos/full_swing.avi\", feedback)\r\n logging.info(\"Thread is closed\")", "def main():\n\n # Parse CLI arguments and initialize VideoCapture object.\n scene_detectors = get_available_detectors()\n timecode_formats = get_timecode_formats()\n args = get_cli_parser(\n scene_detectors.keys(), timecode_formats.keys()).parse_args()\n cap = cv2.VideoCapture()\n\n # Attempt to open the passed input (video) file.\n cap.open(args.input.name)\n if not cap.isOpened():\n if not args.quiet_mode:\n print('[PySceneDetect] FATAL ERROR - could not open video %s.' % \n args.input.name)\n return\n elif not args.quiet_mode:\n print('[PySceneDetect] Parsing video %s...' % args.input.name)\n\n # Print video parameters (resolution, FPS, etc...)\n video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n video_fps = cap.get(cv2.CAP_PROP_FPS)\n if not args.quiet_mode:\n print('[PySceneDetect] Video Resolution / Framerate: %d x %d / %2.3f FPS' % (\n video_width, video_height, video_fps ))\n\n # Load SceneDetector with proper arguments based on passed detector (-d).\n # TODO: Add minimum scene length as a variable argument.\n detection_method = args.detection_method.lower()\n detector = None\n if (detection_method == 'content'):\n detector = scene_detectors['content'](args.threshold, args.min_scene_len)\n elif (detection_method == 'threshold'):\n detector = scene_detectors['threshold'](\n args.threshold, args.min_percent/100.0, args.min_scene_len,\n block_size = args.block_size)\n \n # Perform scene detection using specified mode.\n if not args.quiet_mode:\n print('[PySceneDetect] Detecting scenes (%s mode)...' % detection_method)\n scene_list = list()\n frames_read = detect_scenes(cap, scene_list, [detector],\n args.stats_file, args.quiet_mode)\n # Print scene list if requested.\n if not args.quiet_mode:\n print('[PySceneDetect] Processing complete, found %d scenes in video.' %\n len(scene_list))\n print('[PySceneDetect] List of detected scenes:')\n if args.list_scenes:\n print ('----------------------------------------------')\n print (' Scene # | Frame # ')\n print ('----------------------------------------------')\n for scene_idx, frame_num in enumerate(scene_list):\n print (' %3d | %8d' % (scene_idx, frame_num))\n print ('----------------------------------------------')\n print('[PySceneDetect] Comma-separated timecode output:')\n\n # Print CSV separated timecode output.\n scene_list_msec = [(1000.0 * x) / float(video_fps) for x in scene_list]\n print([get_timecode_string(x) for x in scene_list_msec].__str__()[1:-1]\n .replace(\"'\",\"\").replace(' ', ''))\n\n # Cleanup, release all objects and close file handles.\n cap.release()\n if args.stats_file: args.stats_file.close()\n return", "def __call__(self, filename):\n # Create video reader and find length\n v_cap = cv2.VideoCapture(filename)\n v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Pick 'n_frames' evenly spaced frames to sample\n if self.n_frames is None:\n sample = np.arange(0, v_len)\n else:\n sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)\n\n # Loop through frames\n faces = []\n frames = []\n for j in range(v_len):\n success = v_cap.grab()\n if j in sample:\n # Load frame\n success, frame = v_cap.retrieve()\n if not success:\n continue\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n\n # Resize frame to desired size\n if self.resize is not None:\n frame = frame.resize([int(d * self.resize) for d in frame.size])\n frames.append(frame)\n\n # When batch is full, detect faces and reset frame list\n if len(frames) % self.batch_size == 0 or j == sample[-1]:\n faces.extend(self.detector(frames))\n frames = []\n\n v_cap.release()\n\n return faces", "def open_stream(device):\n\n global cap\n global frame_width\n global frame_height\n global frame_area\n\n if device is None:\n device = '12ft.mp4'\n\n try:\n\n # an integer X indicates the webcam address, ie. /dev/videoX\n cap = cv2.VideoCapture(int(device))\n # set resolution manually\n # the Logitech C920 is 1080p\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 1920)\n cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 1080)\n print \"Opened webcam at: /dev/video%s\" % device\n except:\n \n # if it's not an integer, it's a filepath for a video\n cap = cv2.VideoCapture(\"video_in/\" + device)\n print \"Opened video file at: %s\" % device\n # Figure out the video dimensions\n frame_width = 400#int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))\n frame_height = 640 #int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))\n if frame_width == frame_height == 0:\n print \"ERROR: resolution is 0x0; falling back to 12ft.mp4\"\n cap = cv2.VideoCapture('video_in/12ft.mp4')\n frame_width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))\n frame_height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))\n\n frame_area = frame_width * frame_height\n print \"Video resolution: %sx%s\" % (frame_width, frame_height)", "def detect_scenes_file(path, scene_manager):\n\n cap = cv2.VideoCapture()\n frames_read = -1\n frames_processed = -1\n video_fps = -1\n if not scene_manager.timecode_list:\n scene_manager.timecode_list = [0, 0, 0]\n\n # Attempt to open the passed input (video) file.\n cap.open(path)\n file_name = os.path.split(path)[1]\n if not cap.isOpened():\n if not scene_manager.quiet_mode:\n print('[PySceneDetect] FATAL ERROR - could not open video %s.' % path)\n return (video_fps, frames_read)\n elif not scene_manager.quiet_mode:\n print('[PySceneDetect] Parsing video %s...' % file_name)\n\n # Print video parameters (resolution, FPS, etc...)\n video_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n video_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n video_fps = cap.get(cv2.CAP_PROP_FPS)\n if not scene_manager.quiet_mode:\n print('[PySceneDetect] Video Resolution / Framerate: %d x %d / %2.3f FPS' % (\n video_width, video_height, video_fps))\n if scene_manager.downscale_factor >= 2:\n print('[PySceneDetect] Subsampling Enabled (%dx, Resolution = %d x %d)' % (\n scene_manager.downscale_factor,\n video_width / scene_manager.downscale_factor,\n video_height / scene_manager.downscale_factor))\n print('Verify that the above parameters are correct'\n ' (especially framerate, use --force-fps to correct if required).')\n\n # Convert timecode_list to absolute frames for detect_scenes() function.\n frames_list = []\n for timecode in scene_manager.timecode_list:\n if isinstance(timecode, int):\n frames_list.append(timecode)\n elif isinstance(timecode, float):\n frames_list.append(int(timecode * video_fps))\n elif isinstance(timecode, list) and len(timecode) == 3:\n secs = float(timecode[0] * 60 * 60) + float(timecode[1] * 60) + float(timecode[2])\n frames_list.append(int(secs * video_fps))\n else:\n frames_list.append(0)\n\n start_frame, end_frame, duration_frames = 0, 0, 0\n if len(frames_list) == 3:\n start_frame, end_frame, duration_frames = (\n frames_list[0], frames_list[1], frames_list[2])\n\n # Perform scene detection on cap object (modifies scene_list).\n frames_read, frames_processed = detect_scenes(\n cap, scene_manager, file_name, start_frame, end_frame, duration_frames)\n\n # Cleanup and return number of frames we read/processed.\n cap.release()\n return (video_fps, frames_read, frames_processed)", "def Video(self):\n self = self._cam._AcqMode.Video\n self.__call__(start=False)", "def run_on_video(self, video):\r\n video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)\r\n\r\n def process_predictions(frame, predictions):\r\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\r\n if \"panoptic_seg\" in predictions:\r\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\r\n vis_frame = video_visualizer.draw_panoptic_seg_predictions(\r\n frame, panoptic_seg.to(self.cpu_device), segments_info\r\n )\r\n elif \"instances\" in predictions:\r\n predictions = predictions[\"instances\"].to(self.cpu_device)\r\n vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)\r\n elif \"sem_seg\" in predictions:\r\n vis_frame = video_visualizer.draw_sem_seg(\r\n frame, predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\r\n )\r\n\r\n # Converts Matplotlib RGB format to OpenCV BGR format\r\n vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)\r\n return vis_frame\r\n\r\n frame_gen = self._frame_from_video(video)\r\n if self.parallel:\r\n buffer_size = self.predictor.default_buffer_size\r\n\r\n frame_data = deque()\r\n\r\n for cnt, frame in enumerate(frame_gen):\r\n frame_data.append(frame)\r\n self.predictor.put(frame)\r\n\r\n if cnt >= buffer_size:\r\n frame = frame_data.popleft()\r\n predictions = self.predictor.get()\r\n yield process_predictions(frame, predictions)\r\n\r\n while len(frame_data):\r\n frame = frame_data.popleft()\r\n predictions = self.predictor.get()\r\n yield process_predictions(frame, predictions)\r\n else:\r\n for frame in frame_gen:\r\n yield process_predictions(frame, self.predictor(frame))", "def test_api(test_video_file):\n # (str) -> None\n\n print(\"Running PySceneDetect API test...\")\n\n print(\"PySceneDetect version being used: %s\" % str(scenedetect.__version__))\n\n # Create a video_manager point to video file testvideo.mp4. Note that multiple\n # videos can be appended by simply specifying more file paths in the list\n # passed to the VideoManager constructor. Note that appending multiple videos\n # requires that they all have the same frame size, and optionally, framerate.\n video_manager = VideoManager([test_video_file])\n stats_manager = StatsManager()\n scene_manager = SceneManager(stats_manager)\n # Add ContentDetector algorithm (constructor takes detector options like threshold).\n scene_manager.add_detector(ContentDetector())\n base_timecode = video_manager.get_base_timecode()\n\n try:\n # If stats file exists, load it.\n if os.path.exists(STATS_FILE_PATH):\n # Read stats from CSV file opened in read mode:\n with open(STATS_FILE_PATH, 'r') as stats_file:\n stats_manager.load_from_csv(stats_file, base_timecode)\n\n start_time = base_timecode + 20 # 00:00:00.667\n end_time = base_timecode + 20.0 # 00:00:20.000\n # Set video_manager duration to read frames from 00:00:00 to 00:00:20.\n video_manager.set_duration(start_time=start_time, end_time=end_time)\n\n # Set downscale factor to improve processing speed.\n video_manager.set_downscale_factor()\n\n # Start video_manager.\n video_manager.start()\n\n # Perform scene detection on video_manager.\n scene_manager.detect_scenes(frame_source=video_manager)\n\n # Obtain list of detected scenes.\n scene_list = scene_manager.get_scene_list(base_timecode)\n # Like FrameTimecodes, each scene in the scene_list can be sorted if the\n # list of scenes becomes unsorted.\n\n print('List of scenes obtained:')\n for i, scene in enumerate(scene_list):\n print(' Scene %2d: Start %s / Frame %d, End %s / Frame %d' % (\n i+1,\n scene[0].get_timecode(), scene[0].get_frames(),\n scene[1].get_timecode(), scene[1].get_frames(),))\n\n # We only write to the stats file if a save is required:\n if stats_manager.is_save_required():\n with open(STATS_FILE_PATH, 'w') as stats_file:\n stats_manager.save_to_csv(stats_file, base_timecode)\n\n finally:\n video_manager.release()", "def run():\n # test_utils.visualize_hog()\n # test_utils.visualize_windows()\n\n # test_utils.test_data_prep('./../vehicles', './../non-vehicles', 'HSV')\n # test_utils.test_features_prep('./../vehicles', './../non-vehicles')\n # test_utils.test_find_cars('./../test_images', 'HSV')\n\n ret, mtx, dist = helpers.calibrateCamera('./../camera_cal/')\n # test_utils.test_camera_calibration('./../camera_cal/', mtx, dist)\n\n pipeline = helpers.make_pipeline(mtx, dist, 'HSV')\n\n output_file = './../output_project_video.mp4'\n clip1 = VideoFileClip('./../project_video.mp4')\n # clip1.save_frame('./7.0.png', 7.0)\n # clip1 = VideoFileClip('./../project_video.mp4').subclip(20,35)\n output_clip = clip1.fl_image(pipeline)\n output_clip.write_videofile(output_file, audio=False)", "def load_video(path):\n\n\treturn cv.CaptureFromFile(path)", "def open_video_file(filename):\n return Video(filename)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load model, start live video or individual picture analysis via model
def run_model(model_path, **args): if args['model_type'] == 'normal': model_path = 'saved_models/normal_model' print(f"Retrieving {args['model_type']} model...") model = get_model(model_path) print("Model retrieved.") model_vars = get_model_vars() # start video analysis using model if args.get('video', False): print("starting video") start_video(model, model_vars) # if not video, then individual image will be analyzed else: img_path = args['img_path'][0] analyze_picture(model, model_vars, img_path)
[ "def start_video(model, model_vars):\r\n vid = cv2.VideoCapture(0)\r\n counter = 0\r\n text = \"\"\r\n frame_title = \"Press q to quit\"\r\n while True:\r\n # Capture video\r\n _, frame = vid.read()\r\n \r\n # send image to CNN model every 50 iterations\r\n if counter == 50:\r\n try:\r\n img = process_image(frame, model_vars)\r\n # Error processing image, attempt next frame\r\n except:\r\n counter = 49\r\n continue\r\n \r\n age, race, gender = model.predict(img)\r\n age, race, gender = process_results(age, race, gender, model_vars)\r\n text = f\"Age: {age}, Race: {race}, Gender: {gender}\"\r\n print('Prediction: ', text)\r\n counter = 0\r\n \r\n try:\r\n # display the resulting frame\r\n cv2.putText(**optimize_text(text, frame))\r\n cv2.imshow(frame_title, frame)\r\n except:\r\n counter = 49\r\n continue\r\n \r\n # check if q pressed to quit program\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n counter += 1\r\n \r\n vid.release()\r\n cv2.destroyAllWindows()", "def run(model):\n parameters_settings = get_params_settings(model)\n parameters_skymodel = get_params_skymodel(model)\n parameters_simobserve = get_params_simobserve(model)\n parameters_simanalyze = get_params_simanalyze(model)\n parameters_sources = get_params_sources(model)\n\n if model.mode == \"Multiple Runs\":\n multi_run(model, parameters_settings, parameters_skymodel, parameters_sources, parameters_simobserve,\n parameters_simanalyze)\n\n elif model.mode == \"Single Run\":\n run_iteration(model, parameters_settings, parameters_skymodel, parameters_sources,\n parameters_simobserve, parameters_simanalyze)", "def train_model(self):\n if 'vpg' in self.model_name:\n while self.step_number < self.n_steps:\n self.train_one_batch_with_vpg()\n if 'dqn' in self.model_name:\n while self.step_number < self.n_steps:\n self.train_one_episode_with_dqn()\n if 'rs' in self.model_name:\n while self.step_number < self.n_steps:\n self.train_one_episode_with_rs()\n if 'ppo' in self.model_name:\n while self.step_number < self.n_steps:\n self.train_one_batch_with_ppo()\n self.save_model()\n self.close()", "def loadModel(self, model_path='',detection_speed=\"normal\",min_face_size = 24):\r\n\r\n if(detection_speed==\"normal\"):\r\n self.__input_image_min = 800\r\n self.__input_image_max = 1333\r\n elif(detection_speed==\"fast\"):\r\n self.__input_image_min = 400\r\n self.__input_image_max = 700\r\n elif(detection_speed==\"faster\"):\r\n self.__input_image_min = 300\r\n self.__input_image_max = 500\r\n elif (detection_speed == \"fastest\"):\r\n self.__input_image_min = 200\r\n self.__input_image_max = 350\r\n elif (detection_speed == \"flash\"):\r\n self.__input_image_min = 100\r\n self.__input_image_max = 250\r\n\r\n cache_dir = os.path.join(os.path.expanduser('~'), '.faceai')\r\n if (self.__modelLoaded == False):\r\n if(self.__modelType == \"\"):\r\n raise ValueError(\"You must set a valid model type before loading the model.\")\r\n elif(self.__modelType == \"mtcnn\"):\r\n des_file = '/'.join((cache_dir, self.__modelType))\r\n self.modelPath = download_file_from_google_drive(self.__model_id[self.__modelType], des_file)\r\n model = mtccn(self.modelPath,minfacesize=min_face_size)\r\n self.__model_collection.append(model)\r\n self.__modelLoaded = True", "def load(model):\n\n processList.append(_models_sequence[model])", "def load_model(self, model):\n self.brain.load_model(model)", "def run_model(spittal_instance, config):\n\n spit = spittal_instance\n prepare_files_in_section(spit.model, config['model'])\n spit.model.create_model_structures()\n spit.model.model_do_jobs()", "def run_train_model(self,model,X_train,y_train,X_test,y_test,model_path,logs_path,plots_path,activate_tensorboard=0,run_id=0,tl_type='full_fine_tune'):\t\t\t\n\t\timport tensorflow as tf\n\t\tfrom sklearn.model_selection import train_test_split\n\t\tfrom tensorflow.keras.models import load_model\n\t\tfrom tensorflow.keras.callbacks import ModelCheckpoint\n\t\tfrom tensorflow.keras.callbacks import TensorBoard\n\n\t\tmodel_file_path=model_path+'/trained_model_'+str(run_id)+'.h5'\n\t\t\n\t\t#X_train, X_test, y_train, y_test = train_test_split(X_in, Y_out, test_size = self.split_ratio)\n\t\tprint(\"Data Split Completed\")\n\t\t\n\t\t#Checkpointer to save the best model\n\t\tcheckpointer = tf.keras.callbacks.ModelCheckpoint(model_file_path, verbose=1, save_best_only=True,monitor='val_loss',save_weights_only=True)\n\t\t\n\t\tcallbacks=[checkpointer]\n\t\t\n\t\tif(activate_tensorboard==1):\n\t\t\t#Activating Tensorboard for Visualization\n\t\t\ttensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\t\t\tcallbacks=[checkpointer,tensorboard]\n\t\t\n\t\t#tensorboard = TensorBoard(log_dir=logs_path,histogram_freq=1, write_graph=True, write_images=True)\n\n\t\thistory=model.fit(x=X_train, y=y_train, validation_data=(X_test,y_test), epochs=self.epochs, batch_size=self.batch_size,callbacks=callbacks)\n\t\t\n\t\ttrainviz=TrainViz()\n\t\t#trainviz.training_plot(history,plots_path,run_id)\n\t\t\n\t\tif(tl_type=='variable_lr'):\n\t\t\tinference_model=load_model(model_file_path, custom_objects={'LRMultiplier': LRMultiplier})\n\t\telse:\n\t\t\tmodel.load_weights(model_file_path)\n\t\t\n\t\tprint('Compiling test metrics...')\n\t\ty_pred=model.predict(X_test)\n\n\t\tmetrics_eval=MetricsEval()\n\t\teval_metrics_reg,accuracy_metrics_df_reg=metrics_eval.metrics_eval_base(y_pred[0],y_test[0],logs_path)\n\t\teval_metrics_cla,accuracy_metrics_df_cla=metrics_eval.metrics_eval_classification(y_pred[1],y_test[1],logs_path)\n\n\t\treturn model,eval_metrics_reg,accuracy_metrics_df_reg,eval_metrics_cla,accuracy_metrics_df_cla", "def infinite_infer_run(): \n try:\n model_directory = \"/opt/awscam/artifacts/\"\n # model_name = \"mnist-8\" # onnx-model\n model_name = \"fingerModel.onnx\" # onnx-model\n\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n\n # When the ONNX model is imported via DeepLens console, the model is copied\n # to the AWS DeepLens device, which is located in the \"/opt/awscam/artifacts/\".\n model_file_path = os.path.join(model_directory, model_name)\n sess = rt.InferenceSession(model_file_path)\n \n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n \n # Preprocess the frame to crop it into a square and\n # resize it to make it the same size as the model's input size.\n input_img = preprocess(frame)\n\n # Inference.\n inferences = makeInferences(sess, input_img)\n inference = np.argmax(inferences)\n\n # TODO: Add the label of predicted digit to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness \n # cv2.putText()\n cv2.putText(frame, str(inference), (20,120), cv2.FONT_HERSHEY_COMPLEX, 5, (243, 252, 61), 4)\n \n # 255, 0, 0\n # 61, 252, 243\n\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n \n # Outputting the result logs as \"MQTT messages\" to AWS IoT.\n cloud_output = {}\n cloud_output[\"scores\"] = inferences.tolist()\n print(inference, cloud_output)\n print(input_img.shape, inferences.shape)\n\n except Exception as ex:\n # Outputting error logs as \"MQTT messages\" to AWS IoT.\n print('Error in lambda {}'.format(ex))\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(\"error details:\" + str(exc_type) + str(fname) + str(exc_tb.tb_lineno))", "def trainModel(self, Model) -> None:\n ...", "def run():\n # test_utils.visualize_hog()\n # test_utils.visualize_windows()\n\n # test_utils.test_data_prep('./../vehicles', './../non-vehicles', 'HSV')\n # test_utils.test_features_prep('./../vehicles', './../non-vehicles')\n # test_utils.test_find_cars('./../test_images', 'HSV')\n\n ret, mtx, dist = helpers.calibrateCamera('./../camera_cal/')\n # test_utils.test_camera_calibration('./../camera_cal/', mtx, dist)\n\n pipeline = helpers.make_pipeline(mtx, dist, 'HSV')\n\n output_file = './../output_project_video.mp4'\n clip1 = VideoFileClip('./../project_video.mp4')\n # clip1.save_frame('./7.0.png', 7.0)\n # clip1 = VideoFileClip('./../project_video.mp4').subclip(20,35)\n output_clip = clip1.fl_image(pipeline)\n output_clip.write_videofile(output_file, audio=False)", "def run_on_path(model, image_path):\n\n with Image.open(image_path) as img:\n predictions = preprocess_and_run(model, img)\n\n return predictions", "def demo(path_model):\n state = torch.load(path_model, map_location='cpu')\n model = models.resnet34(pretrained=True)\n num_ftrs = model.fc.in_features\n model.fc = torch.nn.Linear(num_ftrs, 10)\n model.load_state_dict(state['model'])\n model.eval()\n\n src_dir = 'demo/audio/'\n dest_dir = 'demo/images/demo'\n audio_files = os.listdir(f'{src_dir}')\n print(f'Generating Spectograms')\n file_names = os.listdir(src_dir)\n\n for curr_file in audio_files:\n data, sr = librosa.load(f'{src_dir}/{curr_file}', sr=16000)\n freq, times, spec = log_spectogram(data, sr)\n fig, ax = plt.subplots(1)\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1)\n ax.axis('off')\n ax.imshow(spec.T, aspect='auto', origin='lower')\n ax.axis('off')\n fig.savefig(f'{dest_dir}/{curr_file[:-4]}.jpg')\n plt.close(fig)\n\n tfms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.2582, 0.1298, 0.3936], [0.0526, 0.1985, 0.0859])\n ])\n\n demo_ds = datasets.ImageFolder('demo/images', transform=tfms)\n demo_dl = torch.utils.data.DataLoader(demo_ds, batch_size=1, shuffle=False)\n\n print('Performing prediction')\n\n classes = ['down', 'go', 'left', 'no', 'off', 'on',\n 'right', 'stop', 'up', 'yes']\n\n for i, (demo_input, _) in enumerate(demo_dl):\n output = model(demo_input)\n output_labels = torch.max(output, dim=1)[1]\n print(f'Prediction for {file_names[i]}:{classes[output_labels]}')", "def loadModel(self, detection_speed=\"normal\",min_face_size = 24):\r\n\r\n if(detection_speed==\"normal\"):\r\n self.__input_image_min = 800\r\n self.__input_image_max = 1333\r\n elif(detection_speed==\"fast\"):\r\n self.__input_image_min = 400\r\n self.__input_image_max = 700\r\n elif(detection_speed==\"faster\"):\r\n self.__input_image_min = 300\r\n self.__input_image_max = 500\r\n elif (detection_speed == \"fastest\"):\r\n self.__input_image_min = 200\r\n self.__input_image_max = 350\r\n elif (detection_speed == \"flash\"):\r\n self.__input_image_min = 100\r\n self.__input_image_max = 250\r\n\r\n\r\n if (self.__modelLoaded == False):\r\n if(self.__modelType == \"\"):\r\n raise ValueError(\"You must set a valid model type before loading the model.\")\r\n elif(self.__modelType == \"MTCCN\"):\r\n model = mtccn(self.modelPath,minfacesize=min_face_size)\r\n self.__model_collection.append(model)\r\n self.__modelLoaded = True", "def loadInit(self):\n # Read video\n self.video = cv2.VideoCapture(self.path)\n # Exit if video not opened.\n if not self.video.isOpened():\n print(\"Error - Could not open video\")\n sys.exit(-1)\n\n # store video width/height to variables\n self.video_width = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))\n self.video_height = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Read and parse existing groundtruth file\n if not(os.path.exists(self.groundtruth_path)):\n print(\"Error - Could not read a groundtruth file\")\n sys.exit(-1)\n\n # Read and parse existing tracking result file\n if not(os.path.exists(self.result_path)):\n print(\"Error - Could not read a tracking result file\")\n sys.exit(-1)\n\n # list of annotated bounding box objects\n self.gt_bounding_boxes = []\n # list of tracking result bounding box objects\n self.result_bounding_boxes = []\n\n # parsing groundtruth and result files\n self.gt_bounding_boxes = self.parser.parseGivenDataFile(self.groundtruth_path, self.video_width)\n self.result_bounding_boxes = self.parser.parseGivenDataFile(self.result_path, self.video_width)", "def post_init(self):\n import onnxruntime\n self.model_name = self.raw_model_path.split('/')[-1]\n self.tmp_model_path = self.get_file_from_workspace(f'{self.model_name}.tmp')\n if is_url(self.raw_model_path):\n import urllib.request\n download_path, *_ = urllib.request.urlretrieve(self.raw_model_path)\n self.raw_model_path = download_path\n self.logger.info(f'download the model at {self.raw_model_path}')\n if not os.path.exists(self.tmp_model_path):\n self._append_outputs(self.raw_model_path, self.outputs_name, self.tmp_model_path)\n self.logger.info(f'save the model with outputs [{self.outputs_name}] at {self.tmp_model_path}')\n self.model = onnxruntime.InferenceSession(self.tmp_model_path, None)\n self.inputs_name = self.model.get_inputs()[0].name\n self.to_device(self.model)", "def run_model(model, image_data):\n\n with Image.open(BytesIO(image_data)) as img:\n predictions = preprocess_and_run(model, img)\n\n return predictions", "def do_training():\n train_cls = Train()\n train_cls.run()", "def load_vgg_model():\n checkpoint_filepath = f\"{CURR_DIR}/model/vgg16-classifier-model.hdf5\"\n global vgg_model\n vgg_model = load_model(checkpoint_filepath)\n vgg_model._make_predict_function()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetches the feature.feature_id of the specified scaffold feature from Chado. This function assumes that only one unique scaffold per organism exists.
def get_scaffold_id(conn, scaffold_name: str = None, genus: str = 'Drosophila', species: str = 'melanogaster', scaffold_type: str = 'golden_path'): if scaffold_name is None: raise ValueError("No scaffold name specified.") scaffold_id_query = """ select feature_id from feature f join organism o on f.organism_id = o.organism_id join cvterm cvt on f.type_id = cvt.cvterm_id where o.genus = %s and o.species = %s and cvt.name = %s and f.is_obsolete = false and f.is_analysis = false and f.name = %s """ cur = conn.cursor() cur.execute(scaffold_id_query, (genus, species, scaffold_type, scaffold_name)) return cur.fetchone()[0]
[ "def get_alice_cds_193_seqfeature():\n seq_ftr = create_1_part_seqfeature(110297, 110537, 1, \"CDS\")\n return seq_ftr", "def get_alice_cds_124_seqfeature():\n seq_ftr = create_2_part_seqfeature(70374, 70902, 1, 70901, 71285, 1, \"CDS\")\n return seq_ftr", "def get_alice_cds_252_seqfeature():\n seq_ftr = create_2_part_seqfeature(152829, 153401, 1, 0, 4, 1, \"CDS\")\n return seq_ftr", "def get_alice_cds_139_seqfeature():\n seq_ftr = create_1_part_seqfeature(88120, 88447, -1, \"CDS\")\n return seq_ftr", "def _id_feature(self, feat, path, id_key):\n\n # Get input keyword arguments\n feats = self.INPUT.FEATURES\n # Get metadata for database\n k_tables = self.RUNTIME.DB.TABLE\n\n # Shorthand database name, table, key\n db, db_table, db_key = self._db_feature(feat)\n\n # Do not have\n if not db_table:\n return ['Voxel List not Supported']\n\n # Just check record of an ID\n if feat in feats.BOOL_LIST:\n if feat == k_tables.LIST[0]:\n return db.is_neuron(db_table, path, id_key)\n else:\n return db.is_synapse(db_table, path, id_key)\n\n # If the request gets a keypoint\n if feat in feats.POINT_LIST:\n # Get the resolution parameter\n res_xy = self.INPUT.RESOLUTION.XY\n resolution = self._get_int_query(res_xy)\n scales = 2**resolution\n # Load from either table\n if feat == k_tables.LIST[0]:\n return db.neuron_keypoint(db_table, path, id_key, scales)\n else:\n return db.synapse_keypoint(db_table, path, id_key, scales)\n\n # If the request asks for all links\n if feat == feats.SYNAPSE_LINKS.NAME:\n return db.synapse_parent(db_table, path, id_key)\n\n # Not yet supported\n return [db_table]", "def get_feature(self, f: Feature):\n try:\n return self.features[f.feature_id]\n except ValueError:\n print('Feature does not exist in the dataset.')", "def _FindSkaffold():\n skaffold = (\n properties.VALUES.code.skaffold_path_override.Get() or\n _FindOrInstallSkaffoldComponent() or\n file_utils.FindExecutableOnPath('skaffold'))\n if not skaffold:\n raise EnvironmentError('Unable to locate skaffold.')\n return skaffold", "def get_feature(self, feature_name):\n return self._get_feature_by_name(feature_name)[0]", "def get_label(scaffold):\n return BUILTIN_SCAFFOLDS[scaffold]", "def get_scaffold(self, mol):\n from rdkit.Chem.Scaffolds import MurckoScaffold\n return MurckoScaffold.MurckoScaffoldSmiles(\n mol=mol, includeChirality=self.include_chirality)", "def get_feature_id(cls, feature_class_name, params, runner, feature_conn=None, force_new=False):\n par = copy.deepcopy(params)\n par['bookmakers'] = sorted(par['bookmakers'])\n return super(MarketFeature, cls).get_feature_id(feature_class_name, par, runner,\n feature_conn=feature_conn, force_new=force_new)", "def _find_feature(self, feature, feature_cascade):\n # For eyebrows and forehead\n if feature_cascade == \"\":\n return []\n\n feature_cascade = cv2.CascadeClassifier(os.path.join(OPENCV_PATH, feature_cascade))\n color = cv2.cvtColor(self.im, cv2.CV_8U)\n # Params are changeable, 1.05 (bigger is stricter), 3 (bigger is stricter)\n features = feature_cascade.detectMultiScale(color, 1.1, 2)\n if len(features) == 0 or (feature == 'eyes' and len(features) < 2):\n debug(\"Couldn't find feature - {}\".format(feature))\n return []\n\n # Find both eyes\n if feature == 'eyes':\n eye_center_x = lambda coords: (coords[0] + coords[2]) / 2\n # Sort by eye center (x axis)\n eyes = sorted(features, key=eye_center_x)\n ret = [eyes[0], eyes[-1]] # Left eye, right eye\n else:\n ret = [features[0]] # [(x, y, w, h)]\n\n return ret", "def get_scaffold(self, mol):\n from rdkit.Chem.Scaffolds import MurckoScaffold\n return MurckoScaffold.MurckoScaffoldSmiles(\n mol=mol, includeChirality=self.include_chirality)", "def do_getfabricid(self, line):\n try:\n args = shlex.split(line)\n\n if (len(args) > 0):\n print(\"Unexpected argument: \" + args[1])\n return\n\n compressed_fabricid = self.devCtrl.GetCompressedFabricId()\n raw_fabricid = self.devCtrl.fabricId\n\n self.replHint = \"devCtrl.GetCompressedFabricId(), devCtrl.fabricId\"\n except exceptions.ChipStackException as ex:\n print(\"An exception occurred during reading FabricID:\")\n print(str(ex))\n return\n\n print(\"Get fabric ID complete\")\n\n print(\"Raw Fabric ID: 0x{:016x}\".format(raw_fabricid)\n + \" (\" + str(raw_fabricid) + \")\")\n\n print(\"Compressed Fabric ID: 0x{:016x}\".format(compressed_fabricid)\n + \" (\" + str(compressed_fabricid) + \")\")", "def get_category_index(self, feature, category, feature_dict):\n if str(category) in feature_dict[str(feature)]:\n return feature_dict[str(feature)][str(category)]\n else:\n return None", "def set_feature_genome_ids(self, feature_type, value=None):\n if value is None:\n value = self.id\n\n if feature_type.lower() == \"cds\":\n feature_list = self.cds_features\n elif feature_type.lower() == \"source\":\n feature_list = self.source_features\n # TODO implement.\n # elif feature_type.lower() == \"trna\":\n # feature_list = self.trna_features\n # elif feature_type.lower() == \"tmrna\":\n # feature_list = self.source_features\n else:\n feature_list = []\n\n for feature in feature_list:\n feature.genome_id = value", "def get_bg_id(self, business_group, force_refresh=False):\n\n if not self.business_group_id or force_refresh:\n try:\n req = self.config.session.get(\n f\"https://{self.config.vcac_server}/catalog-service/api/consumer/entitledCatalogItems?limit=998\",\n verify=self.config.verify, timeout=self.config.timeout)\n req.raise_for_status()\n response = json.loads(req.text)\n except requests.exceptions.RequestException as e:\n raise VraSdkRequestException(\n f\"Error getting business group id for bg {self.business_group}: {e}\")\n except Exception as e:\n raise VraSdkMainException(e)\n\n if 'content' in response:\n for catalog_item in response['content']:\n for elt in catalog_item['entitledOrganizations']:\n if elt[\"subtenantLabel\"] == business_group:\n return elt['subtenantRef']\n raise VraSdkMainException(\n f'No entitlement for the account {self.authentication_object.login} in business group {business_group}')\n else:\n raise VraSdkMainException(\n f'Unable get bg id list. No entitled catalog item for account {self.authentication_object.login}')", "def is_scaffold(mol: SmallMolecule) -> int:\n mol = to_mol(mol)\n smi = Chem.MolToSmiles(mol, canonical=True)\n return int(smi == MurckoScaffoldSmiles(mol=mol))", "def get(request,hash,db_name):\n db = models.Feature_Database.objects.get(name=db_name)\n sequence = models.Sequence.objects.get(db=db,hash=hash)\n\n if db.db_version != sequence.db_version:\n print 'feature list and database out of sync!'\n # feature out of date with database, re gather features\n hash = models.Giraffe_Mappable_Model.detect_features(sequence.sequence,db_name)\n\n res = []\n\n # get automated features\n\n if 'sc' in request.GET:\n features = []\n cutters = {}\n for f in sequence.sequence_feature_set.order_by(\"start\").select_related(\n 'feature_db_index',\n 'feature_db_index__feature',\n 'feature_db_index__feature__type',\n ):\n features.append(f)\n if f.feature.type_id == models.Feature_Type.ENZYME:\n if f.feature.name in cutters:\n cutters[f.feature.name] = cutters[f.feature.name]+1\n else:\n cutters[f.feature.name] = 1\n\n for f in features:\n if f.feature.type_id == models.Feature_Type.ENZYME:\n if cutters[f.feature.name] == 1:\n res.append(f.to_dict())\n else:\n res.append(f.to_dict())\n\n else:\n for f in sequence.sequence_feature_set.order_by(\"start\").select_related(\n 'feature_db_index',\n 'feature_db_index__feature',\n 'feature_db_index__feature__type',\n ):\n res.append(f.to_dict())\n\n # get annotated features\n\n for f in sequence.sequence_feature_annotated_set.order_by(\n \"start\"\n ).select_related('feature_type'):\n res.append(f.to_dict())\n\n # now sort everything by start\n\n res.sort(cmp=lambda x,y:cmp(int(x['start']),int(y['start'])))\n\n res = [len(sequence.sequence),res]\n\n if 'sequence' in request.GET:\n # also asked for sequence\n res.append(sequence.sequence)\n\n j = json.JSONEncoder().encode(res)\n\n if 'jsonp' in request.GET:\n j = request.GET['jsonp']+'('+j+')'\n http_res = HttpResponse(j,mimetype=\"text/javascript\",status=httplib.OK)\n\n else:\n # technically we should be returning \"application/json\", but\n # in that case browsers force user to download into a file,\n # and for debugging we want to be able to see the JSON list in\n # browser. looks like most browsers will handle JSON sent back\n # as text/html anyways.\n if request.is_ajax():\n http_res = HttpResponse(j,mimetype=\"application/json\",status=httplib.OK)\n else:\n http_res = HttpResponse(j,status=httplib.OK)\n\n # we tell browser to cache this; if the sequence change, the hash would\n # change. the only danger is if we re-blat the sequence, in that case the\n # features list cached by browser will be out of date. so client\n # should attach some kind of CGI string to invalidate cache.\n http_res['Cache-Control'] = 'max-age=2592000'\n return http_res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a Chado database connection, a location, and returns a dictionary of all miRNA / mRNA features that overlap the given location.
def get_overlapping_miRNA_mRNA(conn, location: dict = {}): # SQL query to look for overlapping transcript features. miRNA_mRNA_query = """ select f.uniquename, flybase.current_symbol(f.uniquename), cvt.name from featureloc_slice(%s, %s, %s) as fl join feature f on fl.feature_id=f.feature_id join cvterm cvt on f.type_id=cvt.cvterm_id where f.uniquename ~ '^FBtr\d+$' and f.is_obsolete = false and f.is_analysis = false and cvt.name in ('miRNA','mRNA') ; """ cur = conn.cursor() cur.execute(miRNA_mRNA_query, (location['srcfeature_id'], location['fmin'], location['fmax'])) # Return a dictionary containing all miRNA and mRNA features that overlap the given location. # The dictionary key is the FBtr ID and the value is a tuple with FBtr ID, symbol, and feature type. return {r[0]: r for r in cur}
[ "def generate_connectivity(conn, location_map):\n\n import networkx as nx\n\n df_cluster = pd.read_sql(\"\"\"\n SELECT\n m.user_id, m.cluster_id\n FROM\n media_events AS m, cluster AS c\n WHERE\n cluster_id IS NOT NULL AND m.cluster_id = c.id;\n \"\"\", conn)\n\n df_edge = pd.merge(df_cluster, df_cluster, left_on='user_id', right_on='user_id')\n\n all_edge = df_edge[['cluster_id_x', 'cluster_id_y']].values\n all_edge_tuple = set([(edge[0], edge[1]) for edge in all_edge])\n\n inverse_map = {val:key for key,val in enumerate(location_map)}\n \n graph = nx.Graph()\n\n for edge in all_edge_tuple:\n start, end = edge\n graph.add_edge(inverse_map[start], inverse_map[end])\n\n return nx.to_scipy_sparse_matrix(graph)", "def list_locations():", "def search_annotDB(annotDB, pwm, reportLocations=False):\n for id, annot in annotDB.iteritems():\n motifHits = pwm.find_in_region(annot.sequence)\n if reportLocations:\n yield id, motifHits\n else:\n yield id", "def glabrata_hitmap():\n\n min_mapq = arguments().q\n bam = pysam.AlignmentFile(data_directory + sorted_bam, \"rb\")\n\n if arguments().sp == 'cg':\n features_file = cg_features_file\n elif arguments().sp == 'sc':\n features_file = sc_features_file\n elif arguments().sp == 'ca':\n features_file = ca_features_file\n elif arguments().sp == 'sp':\n features_file = sp_features_file\n else:\n raise ValueError('Unknown species flag specified')\n\n if not os.path.exists(features_file):\n get_features()\n\n features = pd.read_csv(features_file)\n\n if arguments().sp == 'cg':\n gene_chroms = features['chrom'].unique()\n hit_map = {chrom: {'W': {}, 'C': {}} for chrom in gene_chroms}\n elif arguments().sp == 'sc':\n gene_chroms = ['ref|NC_001133|', 'ref|NC_001134|', 'ref|NC_001135|', 'ref|NC_001136|', 'ref|NC_001137|',\n 'ref|NC_001138|', 'ref|NC_001139|', 'ref|NC_001140|', 'ref|NC_001141|', 'ref|NC_001142|',\n 'ref|NC_001143|', 'ref|NC_001144|', 'ref|NC_001145|', 'ref|NC_001146|', 'ref|NC_001147|',\n 'ref|NC_001148|', 'ref|NC_001224|']\n hit_map = {chrom: {'W': {}, 'C': {}} for chrom in gene_chroms}\n elif arguments().sp == 'ca':\n gene_chroms = features['chrom'].unique()\n hit_map = {chrom: {'W': {}, 'C': {}} for chrom in gene_chroms}\n elif arguments().sp == 'sp':\n gene_chroms = features['chrom'].unique()\n hit_map = {chrom: {'W': {}, 'C': {}} for chrom in gene_chroms}\n else:\n raise ValueError('Unknown species flag specified')\n\n print('mapping hits')\n\n for line in bam:\n if line.mapq < min_mapq:\n continue\n\n raw_chrom = bam.getrname(line.reference_id)\n\n if raw_chrom not in hit_map:\n continue\n\n if raw_chrom not in gene_chroms:\n continue\n\n # Since start < end always, in alignments which are reversed (along the\n # Crick strand) the start of the fragment is actually at the 'end' point.\n if line.is_reverse:\n pos = line.reference_end\n strand = 'C'\n else:\n # BAM files use 0-based indexing, and we work in 1-based indexing,\n # so we have to add one.\n pos = line.reference_start + 1\n strand = 'W'\n\n hit_map[raw_chrom][strand][pos] = hit_map[raw_chrom][strand].get(pos, 0) + 1\n\n with open(os.path.splitext(data_directory + sorted_bam)[0] + \"_Hits.csv\", \"wb\") as out_file:\n writer = csv.writer(out_file)\n writer.writerow([\"Chromosome\", \"Strand\", \"Position\", \"Reads\"])\n for chrom in sorted(hit_map.keys()):\n for strand in hit_map[chrom].keys():\n for pos in sorted(hit_map[chrom][strand].keys()):\n writer.writerow([chrom, strand, pos, hit_map[chrom][strand][pos]])\n\n sc_trans()", "def get_cuff_dict(self):\n cuff_file = '/home/jorgsk/phdproject/3UTR/CUFF_LINKS/'\\\n 'cufflinks_3UTR_ends_merged_zeroed.bed'\n\n region_path = self.utrfile_path\n\n cmd = ['intersectBed', '-wa', '-wb', '-a', cuff_file, '-b', region_path]\n\n cuff_dict = {}\n # Run the above command and loop through output\n f = Popen(cmd, stdout=PIPE)\n for line in f.stdout:\n\n (chrm, beg, end, d, d, strnd, d, d, d, utr_id, d, d) = line.split()\n\n if utr_id in cuff_dict:\n cuff_dict[utr_id].append((chrm, beg, end, strnd))\n else:\n cuff_dict[utr_id] = [(chrm, beg, end, strnd)]\n\n # For the multi-exon utrs, add [] (they will probably not intersect with\n # annotated cuff sites.\n for utr_id in self.feature_coords.iterkeys():\n if utr_id not in cuff_dict:\n cuff_dict[utr_id] = []\n\n return cuff_dict", "def get_varIDs_overlapping_target_regions(df_vcf, target_regions, outdir):\n\n df_vcf = cp.deepcopy(df_vcf)\n\n if len(df_vcf)==0: raise ValueError(\"vcf is empty\")\n\n # get the END to be POS+1 if it is NaN\n if \"INFO_END\" in df_vcf.keys(): df_vcf[\"INFO_END\"] = df_vcf.apply(get_END_vcf_df_r_NaN_to_1, axis=1)\n else: df_vcf[\"INFO_END\"] = df_vcf.POS + 1\n\n # get the vcf to bed\n vcf_bed = \"%s/variants_locations.bed\"%outdir\n df_vcf[[\"#CHROM\", \"POS\", \"INFO_END\", \"ID\"]].to_csv(vcf_bed, sep=\"\\t\", header=False, index=False)\n\n # get the target regions to bed\n target_bed = \"%s/target_regions.bed\"%outdir\n target_regions[[\"chromosome\", \"start\", \"end\"]].to_csv(target_bed, sep=\"\\t\", header=False, index=False)\n\n # if the target regions are empty, define None as overlapping IDs\n if len(target_regions)==0: overlapping_IDs = set()\n\n else:\n\n # run bedtools to get the intersection\n intersection_vcf_bed = \"%s/variant_locations_intersecting_targetRegions.bed\"%outdir\n intersection_vcf_bed_stderr = \"%s.generating.stderr\"%intersection_vcf_bed\n print_if_verbose(\"running bedtools to get the variants that intersect the provided regions. The stderr is in %s\"%intersection_vcf_bed_stderr)\n\n intersection_vcf_bed_tmp = \"%s.tmp\"%intersection_vcf_bed\n run_cmd(\"%s intersect -a %s -b %s -wa > %s 2>%s\"%(bedtools, vcf_bed, target_bed, intersection_vcf_bed_tmp, intersection_vcf_bed_stderr))\n\n remove_file(intersection_vcf_bed_stderr)\n os.rename(intersection_vcf_bed_tmp, intersection_vcf_bed)\n\n # get into df\n df_vcf_intersection = pd.read_csv(intersection_vcf_bed, sep=\"\\t\", header=None, names=[\"chromosome\", \"start\", \"end\", \"ID\"])\n\n # check that all IDs are in the beginning\n if len(set(df_vcf_intersection.ID).difference(set(df_vcf.ID)))>0: raise ValueError(\"There are missing IDs\")\n\n # get the IDs that are overlapping\n if len(df_vcf_intersection)>0: overlapping_IDs = set.union(*df_vcf_intersection.ID.apply(lambda x: set(x.split(\";\"))))\n\n else: overlapping_IDs = set() \n\n return overlapping_IDs", "def overlaps(self, chromosome: str, start: int, stop: int) -> ty.Iterable[ty.List]:\n query = \"{chromosome}:{start}-{stop}\"\n process = sp.Popen([\"tabix\", str(self.info.compressed), query])\n for line in process.stdout:\n yield line.strip().split()", "def get_all_locations(self, input_df):\n return set(pd.unique(input_df[self._LOCATION_COLUMN_NAME]))", "def read_genetic_map(args):\n data = pd.read_csv(\n \"/data/projects/summary_statistics/utils/genetic_map.txt.gz\",\n sep=\"\\t\", compression=\"gzip\",\n dtype={\"Chromosome\": str}\n )\n\n # Sub-setting the data to get a region of X base pair on each side of the\n # hit\n chrom, start, end = parse_region(args.region)\n\n region = data[\"Chromosome\"] == chrom\n region = region & (data[\"Position(bp)\"] >= start)\n region = region & (data[\"Position(bp)\"] <= end)\n\n data = data[region]\n\n return data", "def get_nearby_crime_sorted_ind(latitude, longitude, crimeData): \n lat = crimeData['latitude'].copy()\n long = crimeData['longitude'].copy()\n lat.pop('crime_id')\n long.pop('crime_id')\n \n km = haversine_np(lat.astype('float64'),long.astype('float64'),latitude,longitude)\n \n selectedData = dict((k, v) for k, v in km.items() if v <= allowbleDistance)\n \n sortedData = dict(sorted(selectedData.items(), key=operator.itemgetter(1)))\n \n crimeList = [ set_dataframe_toObject(crimeID,crimeData.loc[crimeID]) for crimeID in sortedData.keys()]\n \n return crimeList", "def get_observations(config):\n\n results = {}\n features = os.path.join(config[\"gdb\"], \"Observations\")\n fields = [\"GpsPoint_ID\", \"Angle\", \"Distance\"]\n with arcpy.da.SearchCursor(features, fields) as cursor:\n for row in cursor:\n results[row[0]] = {\"ANGLE\": row[1], \"DISTANCE\": row[2]}\n return results", "def _generate_state_dictionary():\n path_to_location_file = os.getcwd() + \"/Data/uscities.csv\"\n df = pd.read_csv(path_to_location_file)\n state_locations = {}\n\n for index, row in df.iterrows():\n state_name = re.sub(r\"[^a-zA-Z]+\", ' ', row[3]).lower()\n if state_name not in state_locations:\n state_locations[state_name] = set()\n\n # Adding state ID to the dictionary\n state_locations[state_name].add(re.sub(r\"[^a-zA-Z]+\", ' ', row[2]).lower())\n\n # Adding city name to the dictionary\n if row[0]:\n state_locations[state_name].add(re.sub(r\"[^a-zA-Z]+\", ' ', row[0]).lower())\n\n # Adding county name to the dictionary\n if row[5]:\n state_locations[state_name].add(re.sub(r\"[^a-zA-Z]+\", ' ', row[5]).lower())\n\n return state_locations", "def _load_adm_areas(self):\n countries = {}\n\n pg.cur.execute(\"SELECT geonameid, ST_AsText(geom) FROM countries\")\n for geonameid, wkt in pg.cur.fetchall():\n if wkt:\n geom = geo.wkt_to_geom(wkt)\n path = geo.PolygonPath(geom)\n countries[geonameid] = path\n\n continents = {}\n pg.cur.execute(\"SELECT geonameid, ST_AsText(geom) FROM continents\")\n for geonameid, wkt in pg.cur.fetchall():\n if wkt:\n geom = geo.wkt_to_geom(wkt)\n path = geo.PolygonPath(geom)\n continents[geonameid] = path\n\n return countries, continents", "def co_loc(sample,bedfile):\n s = bedfile[bedfile['sample']==sample]\n locs=[]\n parents = s['donor'].unique()\n for index,row in s.iterrows():\n locs.append([row['chr'],int(row['start']),int(row['end']),row['donor']])\n return locs,parents", "def print_query_regions(bam):\n\n for template in locations:\n for primer in locations[template]:\n start, end = locations[template][primer]\n for read in bam.fetch(reference=template, start=start, end=end):\n # this is an AlignedSegment: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, end, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # this checks for sequences that overlap the start and end (none do in the Ondrej data set\n # if read.reference_start <= start and read.reference_end >= stop:\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, stop, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # get just the sequence that maps to the region\n seq = read.query_sequence\n beg_offset = None\n end_offset = None\n if read.reference_start < start:\n beg_offset = start - read.reference_start - 1\n if read.reference_end > end:\n end_offset = len(seq) - (read.reference_end - end)\n\n if beg_offset and end_offset:\n seq = seq[beg_offset:end_offset]\n elif beg_offset:\n seq = seq[beg_offset:]\n elif end_offset:\n seq = seq[:end_offset]\n\n print(\">{} {} {} {}\\n{}\".format(read.query_name, primer, read.reference_start, read.reference_end, seq))", "def constructAssociationLocusRBDictFromHDF5File(inputFname=None, locusPadding=0, tableName='association_locus'):\n\tfrom pymodule.algorithm.RBTree import RBDict\n\tfrom pymodule.yhio.CNV import CNVCompare, CNVSegmentBinarySearchTreeKey, get_overlap_ratio\n\t\n\tsys.stderr.write(\"Constructing association-locus RBDict from HDF5 file %s, (locusPadding=%s) ...\"%(inputFname, locusPadding))\n\treader = HDF5MatrixFile(inputFname, openMode='r')\n\tassociationLocusRBDict = RBDict()\n\tassociationLocusRBDict.locusPadding = locusPadding\n\tassociationLocusRBDict.HDF5AttributeNameLs = []\n\ttableObject = reader.getTableObject(tableName=tableName)\n\tfor attributeName, value in tableObject.getAttributes().iteritems():\n\t\tassociationLocusRBDict.HDF5AttributeNameLs.append(attributeName)\n\t\tsetattr(associationLocusRBDict, attributeName, value)\n\t\n\tcounter = 0\n\treal_counter = 0\n\tfor row in tableObject:\n\t\tif not row.chromosome:\t#empty chromosome, which happens when inputFname contains no valid locus, but the default null locus (only one).\n\t\t\tcontinue\n\t\tcounter += 1\n\t\tsegmentKey = CNVSegmentBinarySearchTreeKey(chromosome=row.chromosome, \\\n\t\t\t\t\t\tspan_ls=[max(1, row.start - locusPadding), row.stop + locusPadding], \\\n\t\t\t\t\t\tmin_reciprocal_overlap=1, no_of_peaks=row.no_of_peaks, \\\n\t\t\t\t\t\tno_of_results=row.no_of_results, connectivity=row.connectivity)\n\t\t\t\t\t\t#2010-8-17 overlapping keys are regarded as separate instances as long as they are not identical.\n\t\tif segmentKey not in associationLocusRBDict:\n\t\t\tassociationLocusRBDict[segmentKey] = []\n\t\tassociationLocusRBDict[segmentKey].append(row)\n\tsys.stderr.write(\"%s peaks in %s spans.\\n\"%(counter, len(associationLocusRBDict)))\n\treturn associationLocusRBDict", "def clustering_with_location(network):\n print('Clustering nodes using their location info...')\n communities = {}\n\n for vertex in network.nodes(data=True):\n loc = vertex[1]['location']\n try:\n communities[loc].append(vertex[0])\n except KeyError:\n communities[loc] = [vertex[0]]\n\n by_location_coms = NodeClustering(communities.values(), network,\n method_name='by_location',\n method_parameters=None, overlap=False)\n print('Done!\\n')\n return by_location_coms", "def get_feats_in_space(locs, ichr, bpmin, bpmax, bed):\n assert bpmin < bpmax, (locs, ichr, bpmin, bpmax)\n feats = bed.get_features_in_region(str(ichr), bpmin, bpmax)\n feats = [f for f in feats if not (f['start'] == locs[0] and f['end'] == locs[1])]\n if len(feats) != 0:\n assert feats[0]['seqid'] == str(ichr)\n return [(f['start'], f['end'], f['accn']) for f in feats]", "def populate_neighborhoods(e3sm_filename, ref_map_filename, debug=True,\n **kwargs):\n if debug:\n print('Debug is on')\n # need to pass in filename instead of ds for memoization to work\n e3sm_ds = xr.open_dataset(e3sm_filename).load()\n ref_map_ds = xr.open_dataset(ref_map_filename).load()\n \n if debug:\n print('Loaded e3sm_ds')\n print('Loaded ref_map_ds')\n print('indices up next')\n\n # key: e3sm_ds ncol index; val: ref_map_ds ncol index\n indices = e3sm_master_ncol_index(e3sm_ds, ref_map_ds, **kwargs)\n if debug:\n print('indices determined')\n print(len(indices))\n\n # key: ref_map_ds ncol index; val: e3sm_ds ncol index\n ref_ds_to_e3sm_indices = {val: key for key, val in indices.items()}\n if debug:\n print('ref_ds_to_e3sm_indices determined')\n\n neighbors = dict()\n if debug:\n print('populating neighborhoods')\n for ind_e3sm, ind_ref in indices.items():\n if debug:\n print(ind_e3sm, ind_ref)\n neighbors[ind_e3sm] = [ref_ds_to_e3sm_indices[idx]\n for idx in\n neighborhood_to_search(ind_e3sm, e3sm_ds,\n ref_map_ds,\n delta_lat_max=2,\n delta_lon_max=2)\n if (idx in ref_ds_to_e3sm_indices) and\n neighboring_cells(ind_ref, idx, ref_map_ds)]\n return neighbors" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
switch on 5.1 mode
def set_mode51(cls): cls.mode_51 = True
[ "def mode51(cls):\r\n return cls.mode_51", "def test_mode_toggle(self, caplog, api_mock):\n self.mock_api.return_value = ({'code': 0}, 200)\n fan = VeSyncAir131(DEV_LIST_DETAIL, self.vesync_obj)\n f = fan.auto_mode()\n assert f\n assert fan.mode == 'auto'\n f = fan.manual_mode()\n assert fan.mode == 'manual'\n assert f\n f = fan.sleep_mode()\n assert fan.mode == 'sleep'\n assert f", "def choose_mode( self, ):\r\n # =========== add your modes as desired starting here ========\r\n # ---------->> call modes here; I comment out ones I am not using. Makes it really easy to switch modes\r\n # these are modes I use, pretty much one for each micro-controller\r\n # project that I do. You can look at them as examples or delete the subroutines\r\n # pick one by un-commenting it. These are typically synced up with an Arduino app\r\n\r\n\r\n pass # if everything else is commented out\r\n #self.quick_start_mode()\r\n #self.tutorial_example_mode() # simple setup for documentation and basic terminal\r\n #self.accel_demo_mode() #\r\n #self.controlino_mode() #\r\n\r\n #self.ddclock_mode()\r\n #self.ddclock_david()\r\n #self.ddclock_test_mode()\r\n #self.ddclock_demo_1()\r\n #self.ddclock_demo_2()\r\n\r\n\r\n\r\n self.deer_me_dev()\r\n #self.deer_me_pi_deploy()\r\n\r\n #self.infra_red_mode() # not working, requires special modules from irtools\r\n #self.green_house_mode()\r\n #self.motor_driver_mode()\r\n #self.root_cellar_mode()\r\n #self.stepper_tester_mode()\r\n #self.serial_cmd_test() # for messing with master SerialCmd and SerialCmdMaster\r\n #self.terminal_mode()\r\n\r\n #self.two_axis_mode()\r\n #self.well_monitor_mode()\r\n\r\n\r\n # ---- additional stuff only for testing in addition to another mode\r\n #self.mode_plus_tests() # used only for testing change freely\r", "def detected_mode_set(self, event):\n self.mode.set(2)\n self.change_mode()", "def switch_mode(self):\n button_id = \"equitySwitchButton\" if self.mode == Mode.Invest else \"cfdSwitchButton\"\n elem = WebDriverWait(self.driver, self.timeout).until(expected_conditions.element_to_be_clickable((By.ID, button_id)))\n # If the button is active, you don't have to click on the button\n if \"active\" not in elem.get_attribute('class').split():\n elem.click()", "def mode_toggle(self, mode: str) -> bool:\n if mode.lower() not in self.modes:\n logger.debug('Invalid purifier mode used - %s',\n mode)\n return False\n head, body = self.build_api_dict('setPurifierMode')\n if not head and not body:\n return False\n\n body['payload']['data'] = {\n 'mode': mode.lower()\n }\n if mode == 'manual':\n body['payload'] = {\n 'data': {\n 'id': 0,\n 'level': 1,\n 'type': 'wind'\n },\n 'method': 'setLevel',\n 'type': 'APP'\n }\n\n r, _ = Helpers.call_api(\n '/cloud/v2/deviceManaged/bypassV2',\n method='post',\n headers=head,\n json_object=body,\n )\n\n if Helpers.code_check(r):\n if mode.lower() == 'manual':\n self.speed = 1\n self.mode = 'manual'\n else:\n self.mode = mode\n self.speed = 0\n return True\n logger.debug('Error setting purifier mode')\n return False", "def set_default_reweight(self):\n \n if 'reweight' in self.available_module:\n if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')):\n self.switch['reweight'] = 'ON'\n else:\n self.switch['reweight'] = 'OFF'\n else:\n self.switch['reweight'] = 'Not Avail.'", "def config_mode(self):\n\n pass", "def _5(self, _5):\n\n self.__5 = _5", "def set_nv_power_mode2(mode):\n mode_value = int(mode)\n if mode_value < 0 or mode_value > 4:\n print(\">>> Invalid value : \", mode_value , \"Valid range is between 0 to 4 - Mode not changed ! \")\n return\n power_mode = get_nv_power_mode()\n if int(power_mode.split(\"-\")[1]) == mode_value:\n print(\">>> Current mode is already: \", mode_value , \"Mode not changed ! \")\n return\n\n command = \"sudo nvpmodel -m \" + mode\n print(command)\n output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, universal_newlines=True)\n print(output.stdout)\n print(\" Change Mode Completed !\")", "def captured_mode_set(self, event):\n self.mode.set(1)\n self.change_mode()", "def set_auto_mode(self):\n if 'auto' in self.mist_modes:\n call_str = 'auto'\n elif 'humidity' in self.mist_modes:\n call_str = 'humidity'\n else:\n logger.debug('Trying auto mode, mode not set for this model, '\n 'please ensure %s model '\n 'is in configuration dictionary', self.device_type)\n call_str = 'auto'\n set_auto = self.set_humidity_mode(call_str)\n return set_auto", "def _switch_to_new_mode(self):\n prev_mode = get_current_debug_mode()\n set_debug_mode(self._mode)\n self._mode = prev_mode", "def orb5min_open(self):\n return self._orb5min_open", "def preset_5(self, packet):\n return self.preset(packet)", "def get_tune_mode(self, json_info):\n tune_mode = json_info[\"SocInfo\"][\"autoTilingMode\"]\n if self.offline_tune:\n tune_mode = \"RL\"\n return tune_mode", "def toSafeMode(self):\r\n self.start()\r\n time.sleep(0.03)\r\n # now we're in PASSIVE_MODE, so we repeat the above code...\r\n self.send( SAFE )\r\n # they recommend 20 ms between mode-changing commands\r\n time.sleep(0.03)\r\n # change the mode we think we're in...\r\n self.sciMode = SAFE_MODE\r\n # no response here, so we don't get any...\r\n return", "def test_initial_swmr_mode_on(self):\n self.assertTrue(self.f.swmr_mode)", "def set_mode(self, mode):\n self._write_byte(BNO055_OPR_MODE_ADDR, mode & 0xFF)\n # Delay for 30 milliseconds (datsheet recommends 19ms, but a little more\n # can't hurt and the kernel is going to spend some unknown amount of time\n # too).\n time.sleep(0.03)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
either mode 5.1 or 5.2/5.3/6.1
def mode51(cls): return cls.mode_51
[ "def getRSelMode(self,targetDevice):\n if (targetDevice in self.adc_based_acquisition):\n return \"e5x\"\n elif (targetDevice in [\"SAML22\"]):\n return \"l22\"\n elif (targetDevice in [\"PIC32CZCA80\", \"PIC32CZCA90\"]):\n return \"pic32cz\"\n else:\n return \"std\"", "def choose_mode( self, ):\r\n # =========== add your modes as desired starting here ========\r\n # ---------->> call modes here; I comment out ones I am not using. Makes it really easy to switch modes\r\n # these are modes I use, pretty much one for each micro-controller\r\n # project that I do. You can look at them as examples or delete the subroutines\r\n # pick one by un-commenting it. These are typically synced up with an Arduino app\r\n\r\n\r\n pass # if everything else is commented out\r\n #self.quick_start_mode()\r\n #self.tutorial_example_mode() # simple setup for documentation and basic terminal\r\n #self.accel_demo_mode() #\r\n #self.controlino_mode() #\r\n\r\n #self.ddclock_mode()\r\n #self.ddclock_david()\r\n #self.ddclock_test_mode()\r\n #self.ddclock_demo_1()\r\n #self.ddclock_demo_2()\r\n\r\n\r\n\r\n self.deer_me_dev()\r\n #self.deer_me_pi_deploy()\r\n\r\n #self.infra_red_mode() # not working, requires special modules from irtools\r\n #self.green_house_mode()\r\n #self.motor_driver_mode()\r\n #self.root_cellar_mode()\r\n #self.stepper_tester_mode()\r\n #self.serial_cmd_test() # for messing with master SerialCmd and SerialCmdMaster\r\n #self.terminal_mode()\r\n\r\n #self.two_axis_mode()\r\n #self.well_monitor_mode()\r\n\r\n\r\n # ---- additional stuff only for testing in addition to another mode\r\n #self.mode_plus_tests() # used only for testing change freely\r", "def get_mode(self):\n return self.mode", "def __check_mode(m):\n if m not in range(0, 3):\n return False\n return True", "def version_uses_new_config () -> bool:\n if app_version_major < 2:\n return False\n else:\n if app_version_minor < 1:\n return False\n else:\n return True", "def get_nv_power_mode2():\n output = subprocess.run(\"sudo nvpmodel -q\", shell=True, stdout=subprocess.PIPE, universal_newlines=True)\n #print(output.stdout)\n line = output.stdout.strip().split('\\n')\n print(line)\n #print(line[-1])\n mode = \"Mode-\"+line[-1]\n #print(mode)\n return mode", "def _get_manufacturing_mode(self):\n try:\n if 'manufacturing_mode' in self.facts:\n return self.facts['manufacturing_mode']\n response = self.config(command_list=[\"show chassis\"]).response()\n fpc_search = re.search('fpc', response)\n manuf_search = re.search('boot -h -m manufacturing', response)\n self.facts['manufacturing_mode'] = bool(response and(fpc_search and manuf_search))\n return self.facts['manufacturing_mode']\n except Exception as exp:\n self.log(level='WARN', message=exp)\n self.log(level='WARN', message=\"Unable to set manufacturing mode attribute\")\n return None", "def set_mode51(cls):\r\n cls.mode_51 = True", "def protocolVersion():", "def myst_version():\n return 0.13", "def getEdition():\n # type: () -> String\n return \"standard\"", "def get_mode_2f(self):\n return int(self.query(\"HARM?\"))", "def _get_robust_status(self, mode):\n if mode in {'george', 'random_gdro', 'superclass_gdro', 'true_subclass_gdro'}:\n return True\n elif mode == 'erm':\n return False\n raise ValueError(\n 'mode {mode} not valid. Use one of the following:\\n'\n + '[\"george\", \"random_gdro\", \"superclass_gdro\", \"true_subclass_gdro\", '\n + '\"erm\"]'\n )", "def orb5min_open(self):\n return self._orb5min_open", "def get_nv_power_mode():\n output = os.system(\"sudo nvpmodel -q > mfile.txt\")\n last = \"0\"\n if not output:\n f = open(\"mfile.txt\", \"r\")\n lines = f.read().splitlines()\n last = lines[-1]\n f.close()\n mode = \"Mode-\"+last\n print(\"mode is : \", mode)\n os.remove(\"mfile.txt\")\n return mode", "def pmonsd_version():\n return (1,0)#Must always be the same as the version in src/SemiDetHelper.cxx\n #Also remember to update the supported versions in __actual_parse(..)", "def test_handler_protocol_filters_five(self):\n\n class DeviceVersionFive(self.DeviceWithEverything):\n PROTOCOL_INFO = katcp.ProtocolFlags(\n 5, 0, [\n katcp.ProtocolFlags.MULTI_CLIENT,\n katcp.ProtocolFlags.MESSAGE_IDS])\n\n self._test_handler_protocol_filters(\n DeviceVersionFive,\n ['simple', 'version-5', 'flags', 'fewer-flags'])", "def mode(mode: str):\n return _run_speedify_cmd([\"mode\", mode])", "def _get_interface_aemode(self, aemode):\n if aemode == constants.LAG_MODE_ACTIVE_BACKUP:\n return 'active_standby'\n elif aemode == constants.LAG_MODE_BALANCE_XOR:\n return 'balanced'\n elif aemode == constants.LAG_MODE_8023AD:\n return '802.3ad'\n else:\n raise ConfigFail(\"Unknown interface AE mode: %s\" % aemode)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get command ids of given class
def cmd_ids(cls): ids = [] for command in cls.commands(): ids.append(CommandMapper.text2num()[command]) return ids
[ "def cmd_commands(self):\r\n return self.commands()", "def get_ids():", "def get_command_list (self):\r\n # Currently this is only used on Mac OS, for the Mac-only GUI\r\n # Distutils interface (by Jack Jansen)\r\n\r\n import distutils.command\r\n std_commands = distutils.command.__all__\r\n is_std = {}\r\n for cmd in std_commands:\r\n is_std[cmd] = 1\r\n\r\n extra_commands = []\r\n for cmd in self.cmdclass.keys():\r\n if not is_std.get(cmd):\r\n extra_commands.append(cmd)\r\n\r\n rv = []\r\n for cmd in (std_commands + extra_commands):\r\n klass = self.cmdclass.get(cmd)\r\n if not klass:\r\n klass = self.get_command_class(cmd)\r\n try:\r\n description = klass.description\r\n except AttributeError:\r\n description = \"(no description available)\"\r\n rv.append((cmd, description))\r\n return rv", "def get_command_list(self):\n return self._command_list", "def find_client_commands(obj):\n commands = []\n for name in dir(obj):\n if not name.startswith('_'):\n if is_regular_method(obj, name):\n attr = getattr(obj, name)\n commands.append(attr)\n return commands", "def class_ids(self):\n return [mask.class_id for mask in self.masklist]", "def list_commands(self, ctx):\n return self.commands.keys()", "def getCommands(self):\n return self.commands", "def get_sub_commands (self):\r\n commands = []\r\n for (cmd_name, method) in self.sub_commands:\r\n if method is None or method(self):\r\n commands.append(cmd_name)\r\n return commands", "def commands(self):\n return self.dataset.commands", "def get_command(self, message: \"pycord.models.message.Message\"):\n cmd_index = self.prefix(message)\n if not cmd_index:\n return []\n cmd_name, extra_info = message.content[cmd_index:].split(' ')[0], \\\n ' '.join(message.content[cmd_index:].split(' ')[1:])\n return [(self.commands[cmd], extra_info) for cmd in self.commands if cmd_name == cmd]", "def get_pids(self, class_id):\n\n count = ctypes.c_uint(0)\n count_ref = ctypes.byref(count)\n\n restype = ctypes.POINTER(ctypes.c_uint)\n self.pqos.lib.pqos_pid_get_pid_assoc.restype = restype\n p_pids = self.pqos.lib.pqos_pid_get_pid_assoc(class_id, count_ref)\n\n if p_pids:\n pids = [p_pids[i] for i in range(count.value)]\n free_memory(p_pids)\n else:\n pids = []\n\n return pids", "def _get_cmds_of_type(self, state, types=None):\n if(state == 0):\n self.__current_command_list = []\n self.__logger.debug(\"_get_cmds_of_type\")\n if(not types): # Return commands of all types\n types = ['app', 'dev_all', 'dev_rx', 'dev_tx']\n for cmd in dir(self):\n fn = getattr(self, cmd)\n if(hasattr(fn, 'dev_type')):\n if(fn.dev_type in types):\n self.__current_command_list.append(cmd)\n return self.__current_command_list", "def get_commands_to_register(self):\n return dict((key, get_class(value)) for key, value in\n self.simple_commands.items())", "def _get_ids(self, query):\n return [getattr(elm, 'id') for elm in query]", "def commands(self):\n return self._getNodes(pr.BaseCommand)", "def _class_name_to_command(self):\r\n\r\n command = []\r\n for i in range(len(self.__class__.__name__)):\r\n c = self.__class__.__name__[i]\r\n if i == 0:\r\n command.append(c.lower())\r\n elif i > 0 and c.isupper():\r\n command.append('_')\r\n command.append(c.lower())\r\n else:\r\n command.append(c)\r\n\r\n return ''.join(command)", "def add_commands(self, classes):\n #We instanciate all modules and then add them to the self.modules list\n for classe in classes:\n logger.debug(\"Registering %s\" % classe)\n objet = classe(self)\n self.modules.append(objet)", "def get_device_ids(self) -> Set[str]:\n stdout = self.run_cli_command(\"-d\")[0]\n \n return set([id.strip() for id in stdout.split(\"\\n\") if id.strip() != ''])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop the process of splitting off a clone from its parent volume and snapshot. All of the blocks that were formerly shared between the given clone and its parent volume that have already been split off will remain that way. This command fails if applied to a traditional volume. Cloning is a new capability that applies exclusively to flexible volumes.
def volume_clone_split_stop(self, volume): return self.request( "volume-clone-split-stop", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { } )
[ "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)", "def test_mirror_delete_parent_snap(rbd_mirror, pool_type, **kw):\n try:\n mirror1 = rbd_mirror.get(\"mirror1\")\n mirror2 = rbd_mirror.get(\"mirror2\")\n config = kw.get(\"config\")\n pool = config[pool_type][\"pool\"]\n image = config[pool_type][\"image\"]\n imagespec = pool + \"/\" + image\n snap_1 = kw[\"config\"][pool_type].get(\"snap\", f\"{image}_snap\")\n clone_1 = kw[\"config\"][pool_type].get(\"clone\", f\"{image}_clone\")\n\n rbd1, rbd2 = [\n Rbd(**kw, req_cname=cluster_name)\n for cluster_name in kw.get(\"ceph_cluster_dict\").keys()\n ]\n mirror2.wait_for_replay_complete(imagespec)\n if rbd1.snap_create(pool, image, snap_1):\n log.error(f\"Snapshot with name {snap_1} creation failed for {image}\")\n return 1\n snap_name_1 = f\"{pool}/{image}@{snap_1}\"\n if rbd1.protect_snapshot(snap_name_1):\n log.error(f\"Snapshot protect failed for {pool}/{image}\")\n return 1\n if rbd1.create_clone(snap_name_1, pool, clone_1):\n log.error(f\"Clone creation failed for {pool}/{clone_1}\")\n return 1\n\n # verify in secondary for clone mirror\n imagespec2 = pool + \"/\" + clone_1\n mirror1.wait_for_status(imagespec=imagespec2, state_pattern=\"up+stopped\")\n mirror2.wait_for_replay_complete(imagespec2)\n if mirror2.image_exists(imagespec2):\n log.error(f\"No such image info found for {imagespec2}\")\n return 1\n mirror2.wait_for_status(imagespec=imagespec2, state_pattern=\"up+replaying\")\n\n # delete parent snap while running io's in image\n with parallel() as p:\n p.spawn(\n mirror1.benchwrite, imagespec=imagespec, io=config.get(\"io_total\", \"1G\")\n )\n p.spawn(\n flatten_unprotect_delete_snap,\n rbd1,\n pool,\n image,\n clone_1,\n snap_1,\n snap_name_1,\n )\n\n # Verify for parent snapshot deletion\n if snap_1 in rbd1.snap_ls(pool_name=pool, image_name=image, snap_name=snap_1):\n log.info(f\"parent snapshot {snap_1} is not removed as expected\")\n return 1\n log.info(f\"parent snapshot {snap_1} is removed successfully\")\n\n # verify creating snapshot in secondary cluster\n snap_2 = kw[\"config\"][pool_type].get(\"snap\", f\"{image}_snap2\")\n if rbd2.snap_create(pool, image, snap_2):\n log.info(\n \"As expected Snapshot creation failed due to image lock in primary cluster\"\n )\n return 0\n\n except Exception as e:\n log.exception(e)\n return 1\n\n # Cleans up the configuration\n finally:\n mirror1.clean_up(peercluster=mirror2, pools=[pool])", "def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc", "def volume_clone_split_estimate(self, volume):\n return self.request( \"volume-clone-split-estimate\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-estimate': [ CloneSplitEstimateInfo, True ],\n } )", "def delete_clone(self, *params):\n if not params or len(params)==0:\n raise TypeError(\"delete_clone takes at lease 1 argument 0 given.\")\n elif params and len(params)>2:\n raise TypeError(\"delete_clone takes at lease 1 argument %u given.\" %(len(params)))\n disk=params[0]\n return self._delete(\"vdisk\", disk.getAttribute(\"name\"), disk.getProperties(), self.isClone)", "def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):\n return self.request( \"volume-clone-create\", {\n 'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],\n 'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],\n 'force_worm_clone': [ force_worm_clone, 'force-worm-clone', [ bool, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],\n 'qos_policy_group_name': [ qos_policy_group_name, 'qos-policy-group-name', [ basestring, 'None' ], False ],\n 'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n 'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],\n 'volume_type': [ volume_type, 'volume-type', [ basestring, 'None' ], False ],\n }, {\n } )", "def test_copy_vm_disks_after_cloned_as_thin(self, storage):\n self.copy_with_template(storage=storage, clone=False)", "def test_split_not_mirror(utils_patch):\n ret = {}\n ret[\"stdout\"] = \"\"\n ret[\n \"stderr\"\n ] = \"Unable to split datapool: Source pool must be composed only of mirrors\"\n ret[\"retcode\"] = 1\n mock_cmd = MagicMock(return_value=ret)\n\n with patch.dict(zpool.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zpool.__utils__, utils_patch\n ):\n ret = zpool.split(\"datapool\", \"backuppool\")\n res = OrderedDict(\n [\n (\"split\", False),\n (\n \"error\",\n \"Unable to split datapool: Source pool must be composed only of\"\n \" mirrors\",\n ),\n ]\n )\n assert ret == res", "def clone(self, source_name, snapshot_id, dest_name):\n wrap_popen('collie', 'vdi', 'clone', '-s', snapshot_id, source_name,\n dest_name)", "def test_copy_vm_disks_after_cloned_as_clone(self, storage):\n self.copy_with_template(storage=storage)", "def _create_clone_pair(self, pvol, svol):\n snapshot_name = '%(prefix)s%(svol)s' % {\n 'prefix': CLONE_NAME,\n 'svol': svol % _SNAP_HASH_SIZE,\n }\n try:\n body = {\"snapshotGroupName\": snapshot_name,\n \"snapshotPoolId\": self.storage_info['snap_pool_id'],\n \"pvolLdevId\": pvol,\n \"svolLdevId\": svol,\n \"isClone\": True,\n \"clonesAutomation\": True,\n \"copySpeed\": 'medium',\n \"isDataReductionForceCopy\": True}\n self.client.add_snapshot(body)\n except utils.HBSDError as ex:\n if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==\n rest_api.INVALID_SNAPSHOT_POOL and\n not self.conf.hitachi_snap_pool):\n msg = utils.output_log(\n MSG.INVALID_PARAMETER, param='hitachi_snap_pool')\n raise utils.HBSDError(msg)\n else:\n raise\n try:\n self._wait_copy_pair_status(svol, set([PSUS, SMPP, SMPL]))\n except Exception:\n with excutils.save_and_reraise_exception():\n try:\n self._delete_pair_from_storage(pvol, svol)\n except utils.HBSDError:\n utils.output_log(\n MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)", "def wait_for_clone(client, clone, timeout=600):\n clone_status_cmd = f\"ceph fs clone status {clone.get('vol_name')} {clone.get('target_subvol_name')} \"\n clone_status_cmd = (\n clone_status_cmd + f\"--group_name {clone.get('group_name')} --format json\"\n )\n cmd_out, cmd_rc = client.exec_command(\n sudo=True,\n cmd=clone_status_cmd,\n check_ec=clone.get(\"check_ec\", True),\n timeout=timeout,\n )\n status = json.loads(cmd_out)\n clone_state = status[\"status\"][\"state\"]\n if \"complete\" not in clone_state:\n raise AssertionError(f\"Clone state : {clone_state}\")", "def break_remote_clone_job(session, remote_clone_job_id, return_type=None,\n **kwargs):\n verify_remote_clone_id(remote_clone_job_id)\n\n path = '/api/remote_clones/{0}/break.json' \\\n .format(remote_clone_job_id)\n\n return session.post_api(path=path, return_type=return_type, **kwargs)", "def isClone(self, vdisk):\n if vdisk.objecttype==\"clone\":\n return True\n else:\n return False", "def volume_clone_split_status(self, volume=None):\n return self.request( \"volume-clone-split-status\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-details': [ CloneSplitDetailInfo, True ],\n } )", "def test_clone_image_status_error(self):\n self._clone_volume_from_image('error', False)", "def _clone(context, obj, clone_id):\n return context.manage_clone(obj, clone_id)", "def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the name of an Infinite Volume, either return its current size or set the Infinite Volume's size to the stated amount. This API is not supported for Flexible Volumes. This API is not supported on Infinite Volume constituents.
def volume_size_async(self, volume_name, new_size=None): return self.request( "volume-size-async", { 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ], 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ], }, { 'result-error-message': [ basestring, False ], 'result-jobid': [ int, False ], 'result-status': [ basestring, False ], 'result-error-code': [ int, False ], 'volume-size': [ basestring, False ], } )
[ "def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )", "def _extend_volume(self, name, new_size):\n LOG.debug('_extend__volume name: %s', name)\n params = {}\n params['volsize'] = ix_utils.get_bytes_from_gb(new_size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s/id/%s') % (\n FreeNASServer.REST_API_VOLUME,\n urllib.parse.quote_plus(\n self.configuration.ixsystems_dataset_path + '/' + name))\n ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,\n request_urn, jparams)\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while extending volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def resize_volume(self, size):\r\n curr_size = self.volume.size\r\n if size <= curr_size:\r\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\r\n \"than the current volume size of '%s'.\" % curr_size)\r\n body = {\"volume\": {\"size\": size}}\r\n self.manager.action(self, \"resize\", body=body)", "def VolumeExtend(new_size,\n gib,\n#pylint: disable=unused-argument\n volume_names,\n volume_ids,\n volume_prefix,\n volume_regex,\n volume_count,\n source_account,\n source_account_id,\n test,\n mvip,\n username,\n password):\n#pylint: enable=unused-argument\n options = copy.deepcopy(locals())\n options.pop(\"new_size\", None)\n options.pop(\"gib\", None)\n\n if gib:\n multiplier = 1024 * 1024 * 1024\n else:\n multiplier = 1000 * 1000 * 1000\n\n new_size = new_size * multiplier\n post_value = new_size\n if new_size % 4096 != 0:\n post_value = int((new_size // 4096 + 1) * 4096)\n\n return VolumeModify(property_name=\"totalSize\",\n property_value=new_size,\n post_value=post_value,\n **options)", "def resize(self, size):\r\n self.instance.resize_volume(size)\r\n self.size = size", "def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )", "def extend_volume(self, connection_properties):\n # The StorPool client (storpool_block service) running on this host\n # should have picked up the change already, so it is enough to query\n # the actual disk device to see if its size is correct.\n #\n volume_id = connection_properties.get('volume', None)\n if volume_id is None:\n raise exception.BrickException(\n 'Invalid StorPool connection data, no volume ID specified.')\n\n # Get the expected (new) size from the StorPool API\n volume = self._attach.volumeName(volume_id)\n LOG.debug('Querying the StorPool API for the size of %(vol)s',\n {'vol': volume})\n vdata = self._attach.api().volumeList(volume)[0]\n LOG.debug('Got size %(size)d', {'size': vdata.size})\n\n # Wait for the StorPool client to update the size of the local device\n path = '/dev/storpool/' + volume\n for _ in range(10):\n size = utils.get_device_size(self, path)\n LOG.debug('Got local size %(size)d', {'size': size})\n if size == vdata.size:\n return size\n time.sleep(0.1)\n else:\n size = utils.get_device_size(self, path)\n LOG.debug('Last attempt: local size %(size)d', {'size': size})\n return size", "def _to_volume(self, element, name=None):\r\n volId = findtext(element=element, xpath='volumeId',\r\n namespace=NAMESPACE)\r\n size = findtext(element=element, xpath='size', namespace=NAMESPACE)\r\n\r\n # Get our tags\r\n tags = self._get_resource_tags(element)\r\n\r\n # If name was not passed into the method then\r\n # fall back then use the volume id\r\n name = name if name else tags.get('Name', volId)\r\n\r\n # Get our extra dictionary\r\n extra = self._get_extra_dict(\r\n element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])\r\n\r\n return StorageVolume(id=volId,\r\n name=name,\r\n size=int(size),\r\n driver=self,\r\n extra=extra)", "def manage_existing_get_size(self, volume, existing_ref):\n volume_info = self._validate_manage_existing_ref(existing_ref)\n size = self._round_bytes_to_gib(volume_info['size'])\n\n return size", "def volume(self):\n return self.intrinsicValue(\"measuredvolume\")", "def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"maximum_volume_size\")", "def validate_volume_size(size):\n if size is None:\n raise exception.VolumeSizeNotSpecified()\n max_size = CONF.max_accepted_volume_size\n if int(size) > max_size:\n msg = (\"Volume 'size' cannot exceed maximum \"\n \"of %d Gb, %s cannot be accepted.\"\n % (max_size, size))\n raise exception.VolumeQuotaExceeded(msg)", "def scale(self, factor: 'float') -> \"void\":\n return _coin.SbViewVolume_scale(self, factor)", "async def async_api_adjust_volume(\n hass: ha.HomeAssistant,\n config: AbstractConfig,\n directive: AlexaDirective,\n context: ha.Context,\n) -> AlexaResponse:\n volume_delta = int(directive.payload[\"volume\"])\n\n entity = directive.entity\n current_level = entity.attributes[media_player.const.ATTR_MEDIA_VOLUME_LEVEL]\n\n # read current state\n try:\n current = math.floor(int(current_level * 100))\n except ZeroDivisionError:\n current = 0\n\n volume = float(max(0, volume_delta + current) / 100)\n\n data: dict[str, Any] = {\n ATTR_ENTITY_ID: entity.entity_id,\n media_player.const.ATTR_MEDIA_VOLUME_LEVEL: volume,\n }\n\n await hass.services.async_call(\n entity.domain, SERVICE_VOLUME_SET, data, blocking=False, context=context\n )\n\n return directive.response()", "def get_max_volume(self) -> float:", "def _volume(self, value: object = None):\n if value is None:\n return int(self._player_info().get(\"vol\"))\n try:\n if isinstance(value, str) and (value.startswith('+') or value.startswith('-')):\n self._logger.debug(\"Adjusting volume by \" + str(value) + \". Getting old volume...\")\n new_volume = max(0, min(100, self._volume()+int(math.floor(float(value)))))\n self._logger.debug(\"Adjusting volume \"+str(value)+\" to \"+str(new_volume)+\"...\")\n else:\n new_volume = max(0, min(100, int(math.floor(float(value)))))\n self._logger.debug(\"Setting volume to \" + str(int(new_volume)))\n except ValueError:\n raise AttributeError(\"Volume must be between 0 and 100 or -100 to +100, inclusive, not '\"+str(value)+\"'\")\n response = self._send(\"setPlayerCmd:vol:\" + str(new_volume))\n if response.status_code != 200:\n raise linkplayctl.APIException(\"Failed to set volume to '\"+str(new_volume)+\"'\")\n return response.content.decode(\"utf-8\")", "def root_volume_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"root_volume_size\")", "def getVolume(self):\n if (self.stockinformation.retrieved > datetime.datetime.now()-datetime.timedelta(seconds=10)):\n return self.stockinformation.volume\n else:\n stock_info = self.stockinformation\n stock_info.setData()\n stock_info.save()\n return self.stockinformation.volume" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display an estimate of additional storage required in the underlying aggregate to perform a volume clone split operation. This command fails if applied to a traditional volume. Cloning is a new capability that applies exclusively to flexible volumes.
def volume_clone_split_estimate(self, volume): return self.request( "volume-clone-split-estimate", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'clone-split-estimate': [ CloneSplitEstimateInfo, True ], } )
[ "def test_copy_vm_disks_after_cloned_as_thin(self, storage):\n self.copy_with_template(storage=storage, clone=False)", "def test_copy_vm_disks_after_cloned_as_clone(self, storage):\n self.copy_with_template(storage=storage)", "def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)", "def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc", "def test_clone_image_status_available(self):\n self._clone_volume_from_image('available', True)", "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def volume_clone_split_status(self, volume=None):\n return self.request( \"volume-clone-split-status\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-details': [ CloneSplitDetailInfo, True ],\n } )", "def test_clone_image_status_error(self):\n self._clone_volume_from_image('error', False)", "def loss(self):\n # with perfect declustering (which requires more OSDs\n # than placement groups/osd) we should expect to lose\n # about 1/2 of a placement group as a result of a second\n # drive failure.\n l = self.disk.size\n if self.copies > 1:\n l /= (2 * self.pgs)\n\n return l", "def test_copy_vm_disks_with_snapshot(self, storage):\n testflow.step(\"Taking snapshot of VM %s\", self.vm_name)\n assert ll_vms.addSnapshot(\n True, self.vm_name, self.snapshot_description\n ), (\"Failed to create snapshot for vm %s\" % self.vm_name)\n ll_jobs.wait_for_jobs([config.JOB_CREATE_SNAPSHOT])\n\n self.basic_copy(self.vm_name)\n helpers.attach_new_disks_to_vm(self.test_vm_name, self.new_disks)\n assert helpers.check_file_existence(\n self.test_vm_name, storage_type=storage\n )", "def VolumeExtend(new_size,\n gib,\n#pylint: disable=unused-argument\n volume_names,\n volume_ids,\n volume_prefix,\n volume_regex,\n volume_count,\n source_account,\n source_account_id,\n test,\n mvip,\n username,\n password):\n#pylint: enable=unused-argument\n options = copy.deepcopy(locals())\n options.pop(\"new_size\", None)\n options.pop(\"gib\", None)\n\n if gib:\n multiplier = 1024 * 1024 * 1024\n else:\n multiplier = 1000 * 1000 * 1000\n\n new_size = new_size * multiplier\n post_value = new_size\n if new_size % 4096 != 0:\n post_value = int((new_size // 4096 + 1) * 4096)\n\n return VolumeModify(property_name=\"totalSize\",\n property_value=new_size,\n post_value=post_value,\n **options)", "def _split_lot(\n num_shares,\n lot,\n lots,\n logger,\n type_of_lot,\n existing_loss_lot=None,\n existing_replacement_lot=None,\n):\n existing_lot_portion = float(num_shares) / float(lot.num_shares)\n new_lot_portion = float(lot.num_shares - num_shares) / float(lot.num_shares)\n\n new_lot = copy.deepcopy(lot)\n new_lot.num_shares -= num_shares\n new_lot.basis = int(round(new_lot.basis * new_lot_portion))\n new_lot.adjusted_basis = int(\n round(new_lot.adjusted_basis * new_lot_portion)\n )\n new_lot.proceeds = int(round(new_lot.proceeds * new_lot_portion))\n new_lot.adjustment = int(round(new_lot.adjustment * new_lot_portion))\n lots.add(new_lot)\n\n lot.num_shares = num_shares\n lot.basis = int(round(lot.basis * existing_lot_portion))\n lot.adjusted_basis = int(round(lot.adjusted_basis * existing_lot_portion))\n lot.proceeds = int(round(lot.proceeds * existing_lot_portion))\n lot.adjustment = int(round(lot.adjustment * existing_lot_portion))\n\n loss_lots = [lot] if type_of_lot == \"loss\" else [existing_loss_lot]\n split_off_loss_lots = [new_lot] if type_of_lot == \"loss\" else []\n replacement_lots = (\n [lot] if type_of_lot == \"replacement\" else [existing_replacement_lot]\n )\n split_off_replacement_lots = (\n [new_lot] if type_of_lot == \"replacement\" else []\n )\n logger.print_lots(\n \"Split {} in two\".format(type_of_lot),\n lots,\n loss_lots=loss_lots,\n split_off_loss_lots=split_off_loss_lots,\n replacement_lots=replacement_lots,\n split_off_replacement_lots=split_off_replacement_lots,\n )", "def clone_size(self):\n return self.plot('mean_clone_size', log=True)", "def test_extend_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n\n new_size = self._driver._size_bytes(self.TEST_NEWSIZE)\n self._driver._create_file(self.TEST_VOLPATH, new_size)\n\n self.mox.ReplayAll()\n\n self._driver.extend_volume(self.TEST_VOLUME, self.TEST_NEWSIZE)", "def volume_split(self, new_volume_name, plex):\n return self.request( \"volume-split\", {\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n 'plex': [ plex, 'plex', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def extend_volume(self, connection_properties):\n # The StorPool client (storpool_block service) running on this host\n # should have picked up the change already, so it is enough to query\n # the actual disk device to see if its size is correct.\n #\n volume_id = connection_properties.get('volume', None)\n if volume_id is None:\n raise exception.BrickException(\n 'Invalid StorPool connection data, no volume ID specified.')\n\n # Get the expected (new) size from the StorPool API\n volume = self._attach.volumeName(volume_id)\n LOG.debug('Querying the StorPool API for the size of %(vol)s',\n {'vol': volume})\n vdata = self._attach.api().volumeList(volume)[0]\n LOG.debug('Got size %(size)d', {'size': vdata.size})\n\n # Wait for the StorPool client to update the size of the local device\n path = '/dev/storpool/' + volume\n for _ in range(10):\n size = utils.get_device_size(self, path)\n LOG.debug('Got local size %(size)d', {'size': size})\n if size == vdata.size:\n return size\n time.sleep(0.1)\n else:\n size = utils.get_device_size(self, path)\n LOG.debug('Last attempt: local size %(size)d', {'size': size})\n return size", "def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)", "def _gather_clone_loss(clone, num_clones, regularization_losses):\n # The return value.\n sum_loss = None\n # Individual components of the loss that will need summaries.\n clone_loss = None\n regularization_loss = None\n # Compute and aggregate losses on the clone device.\n with tf.device(clone.device):\n all_losses = []\n clone_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOSSES,\n clone.scope)\n if clone_losses:\n clone_loss = tf.add_n(clone_losses, name='clone_loss')\n if num_clones > 1:\n clone_loss = tf.div(clone_loss, 1.0 * num_clones,\n name='scaled_clone_loss')\n all_losses.append(clone_loss)\n if regularization_losses:\n regularization_loss = tf.add_n(regularization_losses,\n name='regularization_loss')\n all_losses.append(regularization_loss)\n if all_losses:\n sum_loss = tf.add_n(all_losses)\n # Add the summaries out of the clone device block.\n if clone_loss is not None:\n tf.compat.v1.summary.scalar('/'.join(filter(None,\n ['Losses', clone.scope, 'clone_loss'])),\n clone_loss)\n if regularization_loss is not None:\n tf.compat.v1.summary.scalar('Losses/regularization_loss',\n regularization_loss)\n return sum_loss" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the given volume's language mapping.
def volume_get_language(self, volume): return self.request( "volume-get-language", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'language-code': [ basestring, False ], 'nfs-character-set': [ basestring, False ], 'oem-character-set': [ basestring, False ], 'language': [ basestring, False ], } )
[ "def get_language_pack(locale: str) -> dict:\n if check_locale(locale):\n for entry_point in entry_points(group=JUPYTERLAB_LANGUAGEPACK_ENTRY):\n if locale == entry_point.name:\n return entry_point.load()\n else:\n return {}\n else:\n print(\"Locale '{locale}' not valid!\".format(locale=locale))\n return {}", "def language_code(self):\n return self._book_dict[\"language_code\"]", "def findLanguageCodeForLocale(self, locale): #$NON-NLS-1$\r", "def getLanguage(cls, code):\n cls.initialize()\n return None if code is None else cls.languageIndex.get(code, None)", "def volume_charmap_get(self, volume):\n return self.request( \"volume-charmap-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'charmap': [ basestring, False ],\n } )", "def get_by_name(name):\n if name in _LANG:\n return _LANG[name]\n raise ValueError(\"Cannot determine language for %s\" % name)", "def volume_set_language(self, volume, language_code):\n return self.request( \"volume-set-language\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'language_code': [ language_code, 'language-code', [ basestring, 'None' ], False ],\n }, {\n } )", "def get_language(entry):\n index_url = entry.url.replace(\"robots.txt\", \"\")\n\n # hack around some issues here,\n if entry.domain in KNOWN_LANGUAGES:\n language = KNOWN_LANGUAGES.get(entry.domain)\n\n else:\n try:\n page = requests.get(index_url)\n try:\n languages = cld2.detect(page.content, isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n except:\n languages = cld2.detect(page.text.encode(\"utf8\"), isPlainText=False,\n hintTopLevelDomain=entry.domain.split('.')[-1])\n\n # ignoring 'is_reliable' flag here, set on baidu.com etc (even though detects\n # language appropiately\n language = languages.details[0].language_name if languages.details else 'Unknown'\n index_url = page.url\n\n except Exception as e:\n log.exception(\"Failed to analyze language for '%s'\", entry.domain)\n language = 'Failed'\n\n language = language.title()\n # traditional chinese -> chinese\n if language == 'Chineset':\n language = 'Chinese'\n return language, not urlparse(index_url).netloc.endswith(entry.domain)", "def get_journal_preferred_language(journal_name, ln):\n languages = get_journal_languages(journal_name)\n if ln in languages:\n return ln\n elif CFG_SITE_LANG in languages:\n return CFG_SITE_LANG\n elif languages:\n return languages\n else:\n return CFG_SITE_LANG", "def get_source_language(resources):\r\n return resources[0].source_language", "def get_language(self):\n return self._get_option('language')", "def get_published_languages(self):", "def get_locale():\n return request.accept_languages.best_match(app.config['LANGUAGES'].keys())", "def get_language(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'language', identifier, category_details)", "def _get_locale(self, key):\r\n return key[key.find('[') + 1:-1]", "def get(self, language: str) -> str:\n value = None\n\n try:\n # Get specified language\n value = self[language]\n\n # Default to english\n if value is None:\n value = self['en']\n except KeyError:\n # Default to the first property\n for language in self.keys():\n if language in self:\n value = self[language]\n break\n\n return value", "def language(self):\n pass", "def get_language():\n from django.conf import settings as st\n from django.utils import translation\n return request.session.get('language', st.LANGUAGE_CODE)\n return \"vi\"", "def get_language(uri):\n\n encoding = ENCODINGS['en']['encoding']\n try:\n language = uri.partition('?')[0].strip('/?').split('/')[-1]\n encoding = ENCODINGS[language]['encoding']\n except KeyError:\n language = 'en'\n return encoding, language" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check to see if a desired volume transition can be performed from one volume type to another. Only 7mode flexible volume to clustermode flexible volume and vice versa with limited options are supported at this time.
def volume_transition_check(self, source_node, volumes, operation_type=None, override_warnings=None, destination_vserver_name=None, non_disruptive=None): return self.request( "volume-transition-check", { 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ], 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ], 'override_warnings': [ override_warnings, 'override-warnings', [ bool, 'None' ], False ], 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ], 'volumes': [ volumes, 'volumes', [ VolumeTransitionVolinfo, 'None' ], True ], 'non_disruptive': [ non_disruptive, 'non-disruptive', [ bool, 'None' ], False ], }, { } )
[ "def _validate_manage_existing_vol_type(self, volume):\n replication_type = self._get_replication_type_from_vol_type(\n volume.volume_type)\n if replication_type == REPLICATION_TYPE_SYNC:\n raise exception.ManageExistingVolumeTypeMismatch(\n _(\"Unable to managed volume with type requiring sync\"\n \" replication enabled.\"))", "def changed_user_volume(self):\n return isinstance(self.original.action,\n types.ChannelAdminLogEventActionParticipantVolume)", "def volume_transition(self, source_node, volumes, affinity_node=None, operation_type=None, override_warnings=None, destination_vserver_name=None, non_disruptive=None):\n return self.request( \"volume-transition\", {\n 'affinity_node': [ affinity_node, 'affinity-node', [ basestring, 'None' ], False ],\n 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ],\n 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ],\n 'override_warnings': [ override_warnings, 'override-warnings', [ bool, 'None' ], False ],\n 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ],\n 'volumes': [ volumes, 'volumes', [ VolumeTransitionVolinfo, 'None' ], True ],\n 'non_disruptive': [ non_disruptive, 'non-disruptive', [ bool, 'None' ], False ],\n }, {\n 'job-id': [ int, False ],\n } )", "def _can_use_driver_migration(self, diff):\n # We can if there's no retype or there are no difference in the types\n if not diff:\n return True\n\n extra_specs = diff.get('extra_specs')\n qos = diff.get('qos_specs')\n enc = diff.get('encryption')\n\n # We cant' if QoS or Encryption changes and we can if there are no\n # extra specs changes.\n if qos or enc or not extra_specs:\n return not (qos or enc)\n\n # We can use driver assisted migration if we only change the backend\n # name, and the AZ.\n extra_specs = extra_specs.copy()\n extra_specs.pop('volume_backend_name', None)\n extra_specs.pop('RESKEY:availability_zones', None)\n return not extra_specs", "def subvoxel(self):\n return (not self.valid()) or self.volume() < 1", "def _check_local_volumes_present(self, block_device_info):\n bmap = block_device_info.get('block_device_mapping')\n for entry in bmap:\n connection_info = entry['connection_info']\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n reason = (_(\"Instances with attached '%s' volumes are not \"\n \"currently supported.\") % driver_type)\n raise exception.MigrationPreCheckError(reason=reason)", "def validate_volumes(k8s_conf):\n __validate_host_vols(k8s_conf)\n # TODO/FIXME - Add Ceph volume check after Ceph support has been fixed\n __validate_rook_vols(k8s_conf)", "def can_chain_with(self, other_vuln) -> bool:\n check_vector = False\n if self.__AV == \"network\":\n if other_vuln.__AV == \"local\" or other_vuln.__AV == \"network\":\n check_vector = True\n\n if self.__AV == \"adjacent_network\":\n if other_vuln.__AV == \"local\" or other_vuln.__AV == \"adjacent_network\":\n check_vector = True\n\n if self.__AV == \"local\":\n if other_vuln.__AV == \"local\":\n check_vector = True\n\n check_perm = False\n\n if self.__PR == \"none\":\n check_perm = other_vuln.__PR == \"none\" or other_vuln.__PR == \"low\"\n\n if self.__PR == \"low\":\n check_perm = other_vuln.__PR == \"low\" or other_vuln.__PR == \"high\"\n\n if self.__PR == \"high\":\n check_perm = other_vuln.__PR == \"high\"\n\n return check_vector and check_perm", "def should_run_widom(self):\n if self.ctx.zeopp['output_parameters'].get_dict()['POAV_Volume_fraction'] > 1e-5:\n self.report(\"Found accessible pore volume: continue\")\n return True\n else:\n self.report(\"NOT Found any accessible pore volume: stop\")\n return False", "def test_get_options_interval_movers_volume(self):\n pass", "def supports_volumes_api(client):\n if hasattr(client, 'volumes'):\n try:\n client.volumes()\n return True\n except InvalidVersion:\n # client supports it but server doesn't\n pass\n # client does not support\n return False", "def volume_get_supported_guarantees(self, volume):\n return self.request( \"volume-get-supported-guarantees\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'guarantee-types': [ Guarantee, True ],\n } )", "def allow_volume_expansion(self) -> bool:\n return typing.cast(\n bool,\n self._properties.get(\"allowVolumeExpansion\"),\n )", "def volume_verify_start(self, volume=None, fix_plex=None, log_only=None):\n return self.request( \"volume-verify-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'fix_plex': [ fix_plex, 'fix-plex', [ int, 'None' ], False ],\n 'log_only': [ log_only, 'log-only', [ bool, 'None' ], False ],\n }, {\n } )", "def check_expected_vol_status(self, vol, expected_state):\n vol.get()\n return vol.status == expected_state", "def test_manage_volume_volume_type_by_uuid(self):\n body = {'volume': {'host': 'host_ok',\n 'ref': 'fake_ref',\n 'volume_type': fake.VOLUME_TYPE_ID,\n 'bootable': True}}\n res = self._get_resp_post(body)\n self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)", "def test_volume_info(self):\n pass", "def test_manage_volume_volume_type_by_name(self):\n body = {'volume': {'host': 'host_ok',\n 'ref': 'fake_ref',\n 'volume_type': 'good_fakevt'}}\n res = self._get_resp_post(body)\n self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)", "def is_existing_flat_vlan_allowed():\n return bool(CONF.nuage_sut.nuage_sriov_allow_existing_flat_vlan)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Display the progress in separating clones from their underlying parent volumes and snapshots. If a clone name is specified, then the split status for that clone is provided. If no clone name is provided, then status is provided for all clones currently being split. This command fails if applied to a traditional volume, and EONTAPI_EVOLNOTFLEX is thrown. Cloning is a capability that applies exclusively to flexible volumes. This command fails if the volume specified is not a clone, and EVOLNOTCLONE is thrown. This command fails if the volume specified is not being split, and EVOLOPNOTUNDERWAY is thrown.
def volume_clone_split_status(self, volume=None): return self.request( "volume-clone-split-status", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'clone-split-details': [ CloneSplitDetailInfo, True ], } )
[ "def wait_for_clone(client, clone, timeout=600):\n clone_status_cmd = f\"ceph fs clone status {clone.get('vol_name')} {clone.get('target_subvol_name')} \"\n clone_status_cmd = (\n clone_status_cmd + f\"--group_name {clone.get('group_name')} --format json\"\n )\n cmd_out, cmd_rc = client.exec_command(\n sudo=True,\n cmd=clone_status_cmd,\n check_ec=clone.get(\"check_ec\", True),\n timeout=timeout,\n )\n status = json.loads(cmd_out)\n clone_state = status[\"status\"][\"state\"]\n if \"complete\" not in clone_state:\n raise AssertionError(f\"Clone state : {clone_state}\")", "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def volume_clone_split_estimate(self, volume):\n return self.request( \"volume-clone-split-estimate\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-estimate': [ CloneSplitEstimateInfo, True ],\n } )", "def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def test_clone_image_status_error(self):\n self._clone_volume_from_image('error', False)", "def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc", "def validate_clone_state(\n self, client, clone, expected_state=\"complete\", timeout=300\n ):\n end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)\n clone_transistion_states = []\n cmd_out, cmd_rc = self.get_clone_status(\n client,\n clone[\"vol_name\"],\n clone[\"target_subvol_name\"],\n group_name=clone.get(\"target_group_name\", \"\"),\n )\n status = json.loads(cmd_out.read().decode())\n if status[\"status\"][\"state\"] not in clone_transistion_states:\n clone_transistion_states.append(status[\"status\"][\"state\"])\n while status[\"status\"][\"state\"] != expected_state:\n cmd_out, cmd_rc = self.get_clone_status(\n client,\n clone[\"vol_name\"],\n clone[\"target_subvol_name\"],\n group_name=clone.get(\"target_group_name\", \"\"),\n )\n status = json.loads(cmd_out.read().decode())\n log.info(\n f\"Clone Status of {clone['vol_name']} : {status['status']['state']}\"\n )\n if status[\"status\"][\"state\"] not in [\n \"in-progress\",\n \"complete\",\n \"pending\",\n \"canceled\",\n ]:\n raise CommandFailed(f'{status[\"status\"][\"state\"]} is not valid status')\n if end_time < datetime.datetime.now():\n raise CommandFailed(\n f\"Clone creation has not reached to Complete state even after {timeout} sec\"\n f'Current state of the clone is {status[\"status\"][\"state\"]}'\n )\n return clone_transistion_states", "def pvc_clone_ui(\n self,\n project_name,\n pvc_name,\n cloned_pvc_access_mode=constants.ACCESS_MODE_RWO,\n cloned_pvc_name=None,\n ):\n clone_name = cloned_pvc_name or f\"{pvc_name}-clone\"\n self.navigate_persistentvolumeclaims_page()\n\n logger.info(f\"Search and select the project {project_name}\")\n self.do_click(self.pvc_loc[\"pvc_project_selector\"])\n self.do_send_keys(self.pvc_loc[\"search-project\"], text=project_name)\n\n self.wait_for_namespace_selection(project_name=project_name)\n\n logger.info(f\"Search for PVC {pvc_name}\")\n self.do_send_keys(self.pvc_loc[\"search_pvc\"], text=pvc_name)\n\n logger.info(f\"Go to PVC {pvc_name} page\")\n self.do_click(get_element_type(pvc_name))\n\n logger.info(\"Click on Actions\")\n self.do_click(self.pvc_loc[\"pvc_actions\"])\n\n logger.info(\"Click on Clone PVC from dropdown options\")\n self.do_click(self.pvc_loc[\"clone_pvc\"], enable_screenshot=True)\n\n logger.info(\"Clear the default name of clone PVC\")\n ocs_version = version.get_semantic_ocs_version_from_config()\n if (\n self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n self.do_clear(format_locator(self.pvc_loc[\"clone_name_input\"], clone_name))\n else:\n self.do_clear(self.pvc_loc[\"clone_name_input\"])\n\n logger.info(\"Enter the name of clone PVC\")\n if (\n self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n self.do_send_keys(\n format_locator(self.pvc_loc[\"clone_name_input\"], clone_name),\n text=clone_name,\n )\n else:\n self.do_send_keys(self.pvc_loc[\"clone_name_input\"], text=clone_name)\n\n if (\n not self.ocp_version_full == version.VERSION_4_6\n and ocs_version == version.VERSION_4_6\n ):\n logger.info(\"Select Access Mode of clone PVC\")\n self.do_click(self.pvc_loc[cloned_pvc_access_mode])\n\n logger.info(\"Click on Clone button\")\n self.do_click(generic_locators[\"confirm_action\"], enable_screenshot=True)", "def test_split_not_mirror(utils_patch):\n ret = {}\n ret[\"stdout\"] = \"\"\n ret[\n \"stderr\"\n ] = \"Unable to split datapool: Source pool must be composed only of mirrors\"\n ret[\"retcode\"] = 1\n mock_cmd = MagicMock(return_value=ret)\n\n with patch.dict(zpool.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zpool.__utils__, utils_patch\n ):\n ret = zpool.split(\"datapool\", \"backuppool\")\n res = OrderedDict(\n [\n (\"split\", False),\n (\n \"error\",\n \"Unable to split datapool: Source pool must be composed only of\"\n \" mirrors\",\n ),\n ]\n )\n assert ret == res", "def clone_status():\n INI_FILE1 = os.path.join(\"/\", CLONE_ARCHIVE_DIR, CLONE_ISO_INI)\n INI_FILE2 = os.path.join(tsconfig.PLATFORM_CONF_PATH, CLONE_ISO_INI)\n name = \"unknown\"\n result = \"unknown\"\n installed_at = \"unknown time\"\n for ini_file in [INI_FILE1, INI_FILE2]:\n if os.path.exists(ini_file):\n with open(ini_file) as f:\n s = f.read()\n for line in s.split(\"\\n\"):\n if line.startswith(NAME):\n name = line.split(\"=\")[1].strip()\n elif line.startswith(RESULT):\n result = line.split(\"=\")[1].strip()\n elif line.startswith(INSTALLED):\n installed_at = line.split(\"=\")[1].strip()\n break # one file was found, skip the other file\n if result != \"unknown\":\n if result == OK:\n print(\"\\nInstallation of cloned image [{}] was successful at {}\\n\"\n .format(name, installed_at))\n elif result == FAIL:\n print(\"\\nInstallation of cloned image [{}] failed at {}\\n\"\n .format(name, installed_at))\n else:\n print(\"\\ninstall-clone is in progress.\\n\")\n else:\n print(\"\\nCloned image is not installed on this node.\\n\")", "def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)", "def test_clone_image_status_available(self):\n self._clone_volume_from_image('available', True)", "def test_git_clone(self):\n git_clone_return, git_clone_data = service_utils._git_clone(\n path=self.temp_dir,\n branch='fake-branch',\n username=self.username,\n service_name='fake-repo')\n self.assertEqual(git_clone_return, 1)\n repo_dir = os.path.join(self.temp_dir, 'services', 'fake-repo')\n self.assertFalse(os.path.isdir(repo_dir))\n\n git_clone_return, git_clone_data = service_utils._git_clone(\n path=self.temp_dir,\n branch='master',\n username=self.username,\n service_name='service-horizon')\n self.assertEqual(git_clone_return, 0)\n repo_dir = os.path.join(self.temp_dir, 'services', 'service-horizon')\n self.assertTrue(os.path.isdir(repo_dir))", "def prepareCommand(self, client):\n return 'git clone {0} .'.format( client.location )", "def test_ls_remote_with_local_clone(orchestra: OrchestraShim):\n # Clone the component sources\n orchestra(\"clone\", \"component_A\")\n\n component = orchestra.configuration.components[\"component_A\"]\n repo_path = component.clone.environment[\"SOURCE_DIR\"]\n\n new_branch_name = \"new-branch\"\n # Change branch\n git.run(repo_path, \"checkout\", \"-b\", new_branch_name)\n current_commit = git.rev_parse(repo_path)\n\n assert component.branch() == new_branch_name\n assert component.commit() == current_commit", "def _split_lot(\n num_shares,\n lot,\n lots,\n logger,\n type_of_lot,\n existing_loss_lot=None,\n existing_replacement_lot=None,\n):\n existing_lot_portion = float(num_shares) / float(lot.num_shares)\n new_lot_portion = float(lot.num_shares - num_shares) / float(lot.num_shares)\n\n new_lot = copy.deepcopy(lot)\n new_lot.num_shares -= num_shares\n new_lot.basis = int(round(new_lot.basis * new_lot_portion))\n new_lot.adjusted_basis = int(\n round(new_lot.adjusted_basis * new_lot_portion)\n )\n new_lot.proceeds = int(round(new_lot.proceeds * new_lot_portion))\n new_lot.adjustment = int(round(new_lot.adjustment * new_lot_portion))\n lots.add(new_lot)\n\n lot.num_shares = num_shares\n lot.basis = int(round(lot.basis * existing_lot_portion))\n lot.adjusted_basis = int(round(lot.adjusted_basis * existing_lot_portion))\n lot.proceeds = int(round(lot.proceeds * existing_lot_portion))\n lot.adjustment = int(round(lot.adjustment * existing_lot_portion))\n\n loss_lots = [lot] if type_of_lot == \"loss\" else [existing_loss_lot]\n split_off_loss_lots = [new_lot] if type_of_lot == \"loss\" else []\n replacement_lots = (\n [lot] if type_of_lot == \"replacement\" else [existing_replacement_lot]\n )\n split_off_replacement_lots = (\n [new_lot] if type_of_lot == \"replacement\" else []\n )\n logger.print_lots(\n \"Split {} in two\".format(type_of_lot),\n lots,\n loss_lots=loss_lots,\n split_off_loss_lots=split_off_loss_lots,\n replacement_lots=replacement_lots,\n split_off_replacement_lots=split_off_replacement_lots,\n )", "def isClone(self, vdisk):\n if vdisk.objecttype==\"clone\":\n return True\n else:\n return False", "def clone(self, source_name, snapshot_id, dest_name):\n wrap_popen('collie', 'vdi', 'clone', '-s', snapshot_id, source_name,\n dest_name)", "def test_split_success(utils_patch):\n ret = {}\n ret[\"stdout\"] = \"\"\n ret[\"stderr\"] = \"\"\n ret[\"retcode\"] = 0\n mock_cmd = MagicMock(return_value=ret)\n\n with patch.dict(zpool.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zpool.__utils__, utils_patch\n ):\n ret = zpool.split(\"datapool\", \"backuppool\")\n res = OrderedDict([(\"split\", True)])\n assert ret == res" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resume RAID parity scrubbing on the named traditional volume, plex, or RAID group. If no name is given, then resume scrubbing on all RAID groups for which it is suspended.
def volume_scrub_resume(self, name=None): return self.request( "volume-scrub-resume", { 'name': [ name, 'name', [ basestring, 'None' ], False ], }, { } )
[ "def volume_scrub_suspend(self, name=None):\n return self.request( \"volume-scrub-suspend\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def resume(shelf=None):\n\n _act_on_guests(shelf, \"resume\")", "def volume_scrub_start(self, name=None):\n return self.request( \"volume-scrub-start\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def resume(self, pid):\n pass", "def resume(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.resume(scaling_group)", "def vm_resume(dut, vmName):\n run(['xec-vm', '-n', vmName, 'switch'], host=dut)", "def resume(self):\n self.r2api.frida_continue()", "def resume(self):\n self._call(\"resume\")", "def resume(self):\n assert self.running\n\n self._paused = False\n\n for process in self.processes:\n process.resume()", "async def resume(self, ctx):\n self.play_status[ctx.guild.id] = True\n player = self.players[ctx.guild.id]\n ctx.message.guild.voice_client.resume() # Resumes the audio stream\n await ctx.send(f\"Resumed {player.title}\")", "def pause_resume(self):\n if self.activeState in [LOOP,AGAIN,NEXT,UPDATE]:\n self.setActiveState(PAUSE)\n else:\n self.setActiveState(LOOP) \n self.status_update.emit(\"Status: Running...\")\n self.condition.wakeOne()", "def resume(self, scaling_group):\r\n uri = \"/%s/%s/resume\" % (self.uri_base, utils.get_id(scaling_group))\r\n resp, resp_body = self.api.method_post(uri)\r\n return None", "def restart_group(self, groupname):\n self._apply_group_func(groupname, self.restart_process)", "def resume_cleaning(self):\n if self.node.target_provision_state == states.MANAGEABLE:\n target_state = states.MANAGEABLE\n else:\n target_state = None\n self.process_event('resume', target_state=target_state)", "def volume_scrub_stop(self, name=None):\n return self.request( \"volume-scrub-stop\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def suspend_resume_vm_test(vm_name):\n assert ll_vms.suspendVm(True, vm_name), \"Failed to suspend vm\"\n logging.info(\"VM status: %s\", ll_vms.get_vm_state(vm_name=vm_name))\n assert ll_vms.startVm(\n positive=True, vm=vm_name,\n wait_for_status=config.VM_UP,\n timeout=2 * config_virt.VM_ACTION_TIMEOUT\n )\n return True", "def resume_game(request, responder):\n # TODO: this needs to take the user_id and get the last game_id\n\n responder.reply('resuming game...')\n\n frontend_update(request, responder, game_id='{OLD GAME_ID}', command='resume')", "async def on_resumed(self):\n\t print('[INFO] Resumed...')", "def resume_processes(self, scaling_processes=None):\n return self.connection.resume_processes(self.name, scaling_processes)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the options that have been set for the specified volume.
def volume_options_list_info(self, volume): return self.request( "volume-options-list-info", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'options': [ VolumeOptionInfo, True ], } )
[ "def get_volume_options(mnode, volname, option=None):\n if not option:\n _, get_vol_options, err = RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/options\" % volname, httplib.OK, None)\n else:\n _, get_vol_options, err = RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/options/%s\" % (volname, option),\n httplib.OK, None)\n if not err:\n get_vol_options = json.loads(get_vol_options)\n return get_vol_options\n return None", "def get_options(self) -> OptionValueContainer:\n return self._scoped_options", "def options(self):\n return self.data['options']", "def gluster_volume_options(sdv, sdvkey):\n # type: (dict, str) -> str\n try:\n vo = sdv[sdvkey]['volume_options']\n if util.is_none_or_empty(vo):\n raise KeyError()\n except KeyError:\n vo = None\n return vo", "def get_options():\n cursor = db.get_cursor()\n cursor.execute(SELECT_OPTIONS)\n options = cursor.fetchall()\n options = list(options)\n return options", "def options(self):\n return list(self._options.values())", "def options(self) -> OptionValueContainer:\n return self._scoped_options", "def test_get_options_interval_movers_volume(self):\n pass", "def vendor_options(self) -> pulumi.Output[Optional['outputs.VolumeAttachVendorOptions']]:\n return pulumi.get(self, \"vendor_options\")", "def options(self) -> List:\n return self._options", "def options(self) -> List[OptionInfo]:\n return []", "def get_options(self):\n # Changes to the options dict will not propagate to the\n # tokens, arguments or string representation of the command.\n # Therefore, the options are intended to be read-only which this\n # API hopefully makes clear by making the attribute \"private\" and\n # the accessor return a copy of the dict.\n return self._options.copy()", "def vendor_options(self) -> Optional[pulumi.Input['VolumeAttachVendorOptionsArgs']]:\n return pulumi.get(self, \"vendor_options\")", "def options(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Option]:", "def options(self):\n if self._options is None:\n self._options = productoptions.VariationOptions(self, self.product_range)\n return self._options", "def _GetOptions(option_set, config):\n options = list(option_set[_OptionsParser.ALL_CONFIGS])\n if config != _OptionsParser.ALL_CONFIGS:\n options.extend(option_set[config])\n return options", "def options(self):\n pclass_options = self.get_product_class().options.all()\n return set(pclass_options) or set(self.product_options.all())", "def query_options(self):\n return self.port.query(\"*OPT?\")", "def get_all_guild_options(self):\n return self.get_items(GuildOptions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of volumes and a breakdown of their space usage. This information is only available for online volumes. If no volume is specified, status is displayed for all online volumes on the filer. Note that if space status information for more than 20 volumes is desired, the volumespacelistinfoiter ZAPIs will be more efficient and should be used instead.
def volume_space_list_info(self, volume=None): return self.request( "volume-space-list-info", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'vol-space-infos': [ VolSpaceInfo, True ], } )
[ "def _display_oci_volume_list(volumes, output_mode, details, truncate):\n\n def _get_displayable_size(_, volume):\n return volume.get_size(format_str=OCI_VOLUME_SIZE_FMT.HUMAN.name)\n\n def _get_attached_instance_name(_, volume):\n global _this_instance_ocid\n if not volume.is_attached():\n return '-'\n _vol_instance_attach_to = volume.get_instance()\n if _vol_instance_attach_to.get_ocid() == _this_instance_ocid:\n return \"this instance\"\n pip = _vol_instance_attach_to.get_public_ip()\n if pip:\n return \"%s (%s)\" % (_vol_instance_attach_to.get_display_name(), _vol_instance_attach_to.get_public_ip())\n return _vol_instance_attach_to.get_display_name()\n\n def _get_comp_name(_, volume):\n \"\"\" keep track of compartment per ID as it may be expensive info to fetch \"\"\"\n _map = getattr(_get_comp_name, 'c_id_to_name', {})\n if volume.get_compartment_id() not in _map:\n _map[volume.get_compartment_id()] = volume.get_compartment().get_display_name()\n setattr(_get_comp_name, 'c_id_to_name', _map)\n return _map[volume.get_compartment_id()]\n\n if len(volumes) == 0:\n print('No other volumes found.')\n else:\n _title = 'Block volumes information'\n _columns = [['Name', 32, 'get_display_name'],\n ['Size', 6, _get_displayable_size],\n ['Attached to', 32, _get_attached_instance_name],\n ['OCID', 32, 'get_ocid']]\n if details:\n _columns.extend((['IQN', 14, 'get_iqn'],\n ['Compartment', 14, _get_comp_name],\n ['Availability domain', 19, 'get_availability_domain_name']))\n if output_mode == 'compat':\n printerKlass = get_row_printer_impl('text')\n else:\n printerKlass = get_row_printer_impl(output_mode)\n\n printer = printerKlass(title=_title, columns=_columns, text_truncate=truncate)\n printer.printHeader()\n for vol in volumes:\n printer.printRow(vol)\n printer.rowBreak()\n printer.printFooter()\n printer.finish()", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)", "def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )", "def describe_volume_status(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def volume_list(mnode):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes\", httplib.OK, None)", "def list(connection):\n volumes = get_watched_volumes(connection)\n\n if not volumes:\n logger.info('No watched volumes found')\n return\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n logger.info(\n '| {volume:<21} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume='Volume ID',\n volume_name='Volume name',\n interval='Interval',\n retention='Retention'))\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n\n for volume in volumes:\n if 'AutomatedEBSSnapshots' not in volume.tags:\n interval = 'Interval tag not found'\n elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS:\n interval = 'Invalid interval'\n else:\n interval = volume.tags['AutomatedEBSSnapshots']\n\n if 'AutomatedEBSSnapshotsRetention' not in volume.tags:\n retention = 0\n else:\n retention = volume.tags['AutomatedEBSSnapshotsRetention']\n\n # Get the volume name\n try:\n volume_name = volume.tags['Name']\n except KeyError:\n volume_name = ''\n\n logger.info(\n '| {volume_id:<14} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume_id=volume.id,\n volume_name=volume_name,\n interval=interval,\n retention=retention))\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def volume_footprint_list_info(self, volume=None):\n return self.request( \"volume-footprint-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-footprint-infos': [ VolFootprintInfo, False ],\n } )", "def volume_get_filer_info(self):\n return self.request( \"volume-get-filer-info\", {\n }, {\n 'disk-types': [ basestring, False ],\n 'default-raidtype': [ basestring, False ],\n 'checksum-types': [ basestring, False ],\n 'root-volume': [ basestring, False ],\n 'raidgroup-size': [ RaidgroupSizeInfo, True ],\n 'allowed-raidtypes': [ RaidtypeInfo, True ],\n 'snapshots-max': [ int, False ],\n } )", "def volume_list_paged(request, search_opts=None, marker=None, paginate=False,\n sort_dir=\"desc\"):\n has_more_data = False\n has_prev_data = False\n volumes = []\n\n # To support filtering with group_id, we need to use the microversion.\n c_client = _cinderclient_with_generic_groups(request)\n if c_client is None:\n return volumes, has_more_data, has_prev_data\n\n # build a dictionary of volume_id -> transfer\n transfers = {t.volume_id: t\n for t in transfer_list(request, search_opts=search_opts)}\n\n if paginate:\n page_size = utils.get_page_size(request)\n # sort_key and sort_dir deprecated in kilo, use sort\n # if pagination is true, we use a single sort parameter\n # by default, it is \"created_at\"\n sort = 'created_at:' + sort_dir\n for v in c_client.volumes.list(search_opts=search_opts,\n limit=page_size + 1,\n marker=marker,\n sort=sort):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n volumes, has_more_data, has_prev_data = update_pagination(\n volumes, page_size, marker, sort_dir)\n else:\n for v in c_client.volumes.list(search_opts=search_opts):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n\n return volumes, has_more_data, has_prev_data", "def describe_volumes(self, xml_bytes):\n root = XML(xml_bytes)\n result = []\n for volume_data in root.find(\"volumeSet\"):\n volume_id = volume_data.findtext(\"volumeId\")\n size = int(volume_data.findtext(\"size\"))\n snapshot_id = volume_data.findtext(\"snapshotId\")\n availability_zone = volume_data.findtext(\"availabilityZone\")\n status = volume_data.findtext(\"status\")\n create_time = volume_data.findtext(\"createTime\")\n create_time = datetime.strptime(\n create_time[:19], \"%Y-%m-%dT%H:%M:%S\")\n volume = model.Volume(\n volume_id, size, status, create_time, availability_zone,\n snapshot_id)\n result.append(volume)\n for attachment_data in volume_data.find(\"attachmentSet\"):\n instance_id = attachment_data.findtext(\"instanceId\")\n device = attachment_data.findtext(\"device\")\n status = attachment_data.findtext(\"status\")\n attach_time = attachment_data.findtext(\"attachTime\")\n attach_time = datetime.strptime(\n attach_time[:19], \"%Y-%m-%dT%H:%M:%S\")\n attachment = model.Attachment(\n instance_id, device, status, attach_time)\n volume.attachments.append(attachment)\n return result", "def volume_info(mnode, volname):\n return RestClient(mnode).handle_request(\"GET\",\n \"/v1/volumes/%s\" % volname,\n httplib.OK, None)", "def test_01_list_volumes(self):\n list_volume_response = Volume.list(\n self.apiclient,\n ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id],\n type='ROOT',\n listAll=True\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"List Volume response was not a valid list\"\n )\n self.assertEqual(\n len(list_volume_response),\n 3,\n \"ListVolumes response expected 3 Volumes, received %s\" % len(list_volume_response)\n )", "def lv_params(self):\n ALLLV = []\n if self.LVM_SNAP:\n vglist = self.__snap_stanza_read(self.LVM_SNAP, 'lsvg -o')\n if vglist:\n for vg in vglist:\n LVS = []\n tempfile = self.CWD + 'lvm/' + vg + '.snap'\n if access(tempfile, R_OK):\n lv_params = self.__snap_stanza_read(open(tempfile), 'lsvg -l ' + vg)\n if lv_params:\n for record in lv_params:\n LV = {}\n if not ('LV NAME' in record) and not (vg+':' in record):\n LV.update({'name' : record.split()[0]})\n LV.update({'type' : record.split()[1]})\n if record.split()[2] == record.split()[3]:\n LV.update({'copies' : '1'})\n else:\n if (int(record.split()[3])/int(record.split()[2])) == 2:\n LV.update({'copies' : '2'})\n else:\n LV.update({'copies' : 'N/A'})\n LV.update({'state' : record.split()[5]})\n LV.update({'mount' : record.split()[6]})\n LVS.append(LV)\n ALLLV.append({'volgroup' : vg , 'volumes' : LVS})\n else:\n print \"cannot access vg - \" + vg\n else:\n return None\n else:\n return None\n if self.FS_SNAP:\n df_params = self.__snap_stanza_read(self.FS_SNAP, 'df -k')\n if df_params:\n for vg in ALLLV:\n for lv in vg['volumes']:\n lv.update({'mounted' : 'No', 'used' : 'N/A', 'iused' : 'N/A'})\n for vg in ALLLV:\n for lv in vg['volumes']:\n for record in df_params:\n if '/dev/'+lv['name'] in record:\n lv.update({'mounted' : 'Yes', 'used' : record.split()[3], 'iused' : record.split()[5]})\n return ALLLV", "def get_storageinfo(ns):\n tf = TableFormatter(stdout, 0, True, {0: FIRST_COLUMN_MIN_SIZE})\n\n try:\n localfss = get_all_instances(ns, 'LMI_LocalFileSystem')\n except Exception:\n result = [(get_colored_string('error:', RED_COLOR),\n 'Missing class LMI_LocalFileSystem. Is openlmi-storage package installed on the server?')]\n tf.produce_output(result)\n return []\n\n total = 0\n free = 0\n for fs in localfss:\n if fs.FileSystemSize:\n total += fs.FileSystemSize\n if fs.AvailableSpace:\n free += fs.AvailableSpace\n\n result = [('Disk Space:', '%s total, %s free' % (format_memory_size(total),\n format_memory_size(free)))]\n tf.produce_output(result)\n return []", "def getOldUnusedVols(verbose,region):\n res = {}\n savings = 0\n dvolumes = getVolumePrices(region)\n ec2volumes = EC2C.describe_volumes(Filters=[\n {\n 'Name': 'status',\n 'Values': [\n 'available',\n ],\n }]).get('Volumes', [])\n\n today = datetime.datetime.now(datetime.timezone.utc)\n days30 = today-datetime.timedelta(days=30)\n for vol in ec2volumes:\n if not 'Tags' in vol:\n if vol['CreateTime'] < days30:\n if verbose:\n res[vol['VolumeId']] = str(vol['CreateTime'])+\";\"+str(vol['Size'])+\";\"+str(vol['VolumeType'])\n else:\n res[vol['VolumeId']] = str(vol['CreateTime'])\n savings += float(vol['Size'] * float(dvolumes[vol['VolumeType']]))\n return savings, res", "def list(self,\n **kwargs\n ):\n\n # dont filter_name=None,\n # dont filter_value=None,\n # dryrun=False):\n\n #:param filter_name (string)\n #:param filter_value (string)\n #:param volume_ids (list): The volume IDs\n\n # filter = \"[[\n # {\n # 'Name': 'xyz',\n # 'Values': [\n # 'abc',\n # ]\n # },\n # ]\"\n\n # filter = eval(filter)\n\n #banner('print kwargs')\n #print(kwargs)\n #print(kwargs['output'])\n\n client = boto3.client('ec2')\n dryrun = kwargs['--dryrun']\n #region = kwargs['--region']\n #vm = kwargs['--vm']# will need vm id from mongo records\n result = client.describe_volumes(\n DryRun=dryrun,\n # Filters=[\n # {\n # 'Name': {},\n # 'Values': [\n # filter_value,\n # ]\n # },\n # ],\n )\n #banner(\"raw results\")\n #print(result)\n #banner(\"raw results end\")\n result = self.update_dict(result)\n\n #print(self.Print(result, kind='volume', output=kwargs['output']))\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Restrict the specified Infinite Volume making it unavailable for data access. This API is not supported for Flexible Volumes. This API is not supported on Infinite Volume constituents.
def volume_restrict_async(self, volume_name): return self.request( "volume-restrict-async", { 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ], }, { 'result-error-message': [ basestring, False ], 'result-jobid': [ int, False ], 'result-error-code': [ int, False ], 'result-status': [ basestring, False ], } )
[ "def volume_restrict(self, name, cifs_delay=None):\n return self.request( \"volume-restrict\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n 'cifs_delay': [ cifs_delay, 'cifs-delay', [ int, 'None' ], False ],\n }, {\n } )", "def narrow(self, *args):\n return _coin.SbViewVolume_narrow(self, *args)", "def narrow(self, *args):\n return _coin.SbDPViewVolume_narrow(self, *args)", "def narrow(self, *args) -> \"SbViewVolume\":\n return _coin.SbViewVolume_narrow(self, *args)", "def _chopped_volume_default(self):\n grid = self.grid\n grid.trait_set(x_max=self.slicePosition[1])\n\n volume = mlab.pipeline.volume(\n grid,\n figure=self.vscene3d.mayavi_scene,\n vmin=self.dataRange[0],\n vmax=self.dataRange[1]\n )\n\n volume._otf = self.otf\n volume._volume_property.set_scalar_opacity(self.otf)\n\n return volume", "def narrow(self, *args) -> \"SbDPViewVolume\":\n return _coin.SbDPViewVolume_narrow(self, *args)", "def subvoxel(self):\n return (not self.valid()) or self.volume() < 1", "def volume(ctx, vol):\n avr = ctx.obj['avr']\n if vol:\n try:\n avr.volume = vol\n click.echo(avr.volume)\n except ReponseException as e:\n if \"Volume\" in str(e):\n msg = \"Volume must be specified in -0.5 increments.\"\n err = click.style(msg, fg='red')\n click.echo(err, err=True)\n else:\n click.echo(avr.volume)", "def get_advertiser_search_volume(self) -> int:\n raise NotImplementedError", "def volume(ctx, *args, **kwargs):", "def test_negative_no_volume_type(self):\n self.flags(default_volume_type=None)\n ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,\n is_admin=True)\n body = {'volume': {'host': 'host_ok',\n 'ref': 'fake_ref'}}\n req = webob.Request.blank('/v3/%s/os-volume-manage' % fake.PROJECT_ID)\n req.method = 'POST'\n req.headers['Content-Type'] = 'application/json'\n req.body = jsonutils.dump_as_bytes(body)\n res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt))\n self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int)", "def get_max_volume(self) -> float:", "def _full_volume_default(self):\n grid = self.full_grid\n\n y_min = 0\n if self.coreSym == 4:\n y_min = self.matrix.shape[1] + 2\n elif self.coreSym == 1:\n y_min = (self.matrix.shape[1] / 2) + 2\n grid.trait_set(y_min=y_min)\n\n volume = mlab.pipeline.volume(\n grid,\n figure=self.vscene3d.mayavi_scene,\n vmin=self.dataRange[0],\n vmax=self.dataRange[1]\n )\n\n volume._otf = self.otf\n volume._volume_property.set_scalar_opacity(self.otf)\n\n return volume", "def modify_volume_attribute(DryRun=None, VolumeId=None, AutoEnableIO=None):\n pass", "def test_azure_service_api_volume_get(self):\n pass", "def zNarrow(self, nearval: 'double', farval: 'double') -> \"SbDPViewVolume\":\n return _coin.SbDPViewVolume_zNarrow(self, nearval, farval)", "def build_mask(volume):\n\n data = np.array(volume, copy=False)\n mask = (data > 0).astype(np.float32)\n mask = pydeform.Volume(mask)\n mask.copy_meta_from(volume)\n\n return mask", "def zNarrow(self, nearval: 'float', farval: 'float') -> \"SbViewVolume\":\n return _coin.SbViewVolume_zNarrow(self, nearval, farval)", "def volume_get_supported_guarantees(self, volume):\n return self.request( \"volume-get-supported-guarantees\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'guarantee-types': [ Guarantee, True ],\n } )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get information on what possibilities and parameters exist for volumes on a given filer.
def volume_get_filer_info(self): return self.request( "volume-get-filer-info", { }, { 'disk-types': [ basestring, False ], 'default-raidtype': [ basestring, False ], 'checksum-types': [ basestring, False ], 'root-volume': [ basestring, False ], 'raidgroup-size': [ RaidgroupSizeInfo, True ], 'allowed-raidtypes': [ RaidtypeInfo, True ], 'snapshots-max': [ int, False ], } )
[ "def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )", "def do_showVolumes(self, filer):\n\t\tcommand = 'ssh -qn admin@%s vol show' % self.filer\n\t\tproc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\t\tp_stdout = proc.communicate()[0]\n\t\tprint p_stdout", "def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )", "def lv_params(self):\n ALLLV = []\n if self.LVM_SNAP:\n vglist = self.__snap_stanza_read(self.LVM_SNAP, 'lsvg -o')\n if vglist:\n for vg in vglist:\n LVS = []\n tempfile = self.CWD + 'lvm/' + vg + '.snap'\n if access(tempfile, R_OK):\n lv_params = self.__snap_stanza_read(open(tempfile), 'lsvg -l ' + vg)\n if lv_params:\n for record in lv_params:\n LV = {}\n if not ('LV NAME' in record) and not (vg+':' in record):\n LV.update({'name' : record.split()[0]})\n LV.update({'type' : record.split()[1]})\n if record.split()[2] == record.split()[3]:\n LV.update({'copies' : '1'})\n else:\n if (int(record.split()[3])/int(record.split()[2])) == 2:\n LV.update({'copies' : '2'})\n else:\n LV.update({'copies' : 'N/A'})\n LV.update({'state' : record.split()[5]})\n LV.update({'mount' : record.split()[6]})\n LVS.append(LV)\n ALLLV.append({'volgroup' : vg , 'volumes' : LVS})\n else:\n print \"cannot access vg - \" + vg\n else:\n return None\n else:\n return None\n if self.FS_SNAP:\n df_params = self.__snap_stanza_read(self.FS_SNAP, 'df -k')\n if df_params:\n for vg in ALLLV:\n for lv in vg['volumes']:\n lv.update({'mounted' : 'No', 'used' : 'N/A', 'iused' : 'N/A'})\n for vg in ALLLV:\n for lv in vg['volumes']:\n for record in df_params:\n if '/dev/'+lv['name'] in record:\n lv.update({'mounted' : 'Yes', 'used' : record.split()[3], 'iused' : record.split()[5]})\n return ALLLV", "def find_volumes():\n global email_message\n email_message += 'Finding volumes that match the requested filter: %(filter)s\\n\\n' % {\n 'filter': config.volumes['filter']\n }\n return conn.get_all_volumes(filters=config.volumes['filter'])", "def get_AVs(filter='F606W',verbose=True):\n\n fieldinfo = cft.get_fieldinfo()\n\n for ff in fieldinfo.keys():\n if verbose: print ' - Getting Av in filter '+filter+' for '+fieldinfo[ff]['name']\n Av,Ebv = kbs.getAv(fieldinfo[ff]['ra'],fieldinfo[ff]['dec'],filter)\n print Av, Ebv", "def describe_volumes(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def list(self,\n **kwargs\n ):\n\n # dont filter_name=None,\n # dont filter_value=None,\n # dryrun=False):\n\n #:param filter_name (string)\n #:param filter_value (string)\n #:param volume_ids (list): The volume IDs\n\n # filter = \"[[\n # {\n # 'Name': 'xyz',\n # 'Values': [\n # 'abc',\n # ]\n # },\n # ]\"\n\n # filter = eval(filter)\n\n #banner('print kwargs')\n #print(kwargs)\n #print(kwargs['output'])\n\n client = boto3.client('ec2')\n dryrun = kwargs['--dryrun']\n #region = kwargs['--region']\n #vm = kwargs['--vm']# will need vm id from mongo records\n result = client.describe_volumes(\n DryRun=dryrun,\n # Filters=[\n # {\n # 'Name': {},\n # 'Values': [\n # filter_value,\n # ]\n # },\n # ],\n )\n #banner(\"raw results\")\n #print(result)\n #banner(\"raw results end\")\n result = self.update_dict(result)\n\n #print(self.Print(result, kind='volume', output=kwargs['output']))\n\n return result", "def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device", "def test_volume_info(self):\n pass", "def _get_available_fields(self):\n return self.walkdir(PATH_DEBUGFS_KVM)[2]", "def volume_space_list_info(self, volume=None):\n return self.request( \"volume-space-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-space-infos': [ VolSpaceInfo, True ],\n } )", "def volumes_for(self, prod):\n if prod:\n return self.volumes['shared'] + self.volumes['prod']\n else:\n return self.volumes['shared'] + self.volumes['dev']", "def fpolicy_volume_list_info(self, policy_name):\n return self.request( \"fpolicy-volume-list-info\", {\n 'policy_name': [ policy_name, 'policy-name', [ basestring, 'None' ], False ],\n }, {\n 'include-volumes': [ FpolicyVolumesListInfo, True ],\n 'exclude-volumes': [ FpolicyVolumesListInfo, True ],\n } )", "def vg_params(self):\n LVM = []\n if self.LVM_SNAP:\n vglist = self.__snap_stanza_read(self.LVM_SNAP, 'lsvg -o')\n if vglist:\n for vg in vglist:\n VG = {}\n tempfile = self.CWD + 'lvm/' + vg + '.snap'\n if access(tempfile, R_OK):\n vg_params = self.__snap_stanza_read(open(tempfile), 'lsvg ' + vg)\n if vg_params:\n for record in vg_params:\n if 'VOLUME GROUP' in record:\n VG.update({'name' : record.split()[2]})\n if 'VG STATE' in record:\n VG.update({'state' : record.split()[2]})\n VG.update({'pp_size' : record.split()[5] + ' ' + record.split()[6]})\n if 'FREE PPs' in record:\n VG.update({'free_size' : record.split()[5]})\n if 'TOTAL PVs' in record:\n VG.update({'totalpv' : record.split()[2]})\n if 'AUTO ON' in record:\n VG.update({'activepv' : record.split()[2]})\n VG.update({'auto' : record.split()[5]})\n LVM.append(VG)\n else:\n print \"cannot access vg - \" + vg\n else:\n return None\n else:\n return None\n return LVM", "def get_bdev_info(self):\n targets = self.server_managers[-1].get_config_value('targets')\n bdev_tiers = 0\n bdev_info = []\n for engine in self.server_managers[-1].manager.job.yaml.engine_params:\n for index, tier in enumerate(engine.storage.storage_tiers):\n if tier.storage_class.value == 'nvme':\n bdev_tiers += 1\n for item, device in enumerate(tier.bdev_list.value):\n bdev_info.append(\n {'bdev': device,\n 'roles': ','.join(tier.bdev_roles.value or []),\n 'tier': index,\n 'tgt_ids': list(range(item, targets, len(tier.bdev_list.value)))})\n\n self.log.info('Detected NVMe devices in config')\n for bdev in bdev_info:\n self.log.info(' %s', dict_to_str(bdev, items_joiner=':'))\n return bdev_info", "def volumes(self):\r\n volumes = self.properties[self.VOLUMES]\r\n\r\n return ((vol[self.VOLUME_ID],\r\n vol[self.VOLUME_DEVICE]) for vol in volumes)", "def get_fru_info():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command fru print -N 50\")\n\n # Manipulate the \"Device not present\" line to create a \"state\" key.\n ret_values = re.sub(\"Device not present\", \"state : Device not present\",\n ret_values)\n\n return [vf.key_value_outbuf_to_dict(x) for x in re.split(\"\\n\\n\",\n ret_values)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove the specified plex from a mirrored traditional volume and create a new unmirrored traditional volume with the specified name that contains the splitoff plex. The original mirrored traditional volume becomes unmirrored. The plex to be split from the original traditional volume must be functional (not partial), but it could be inactive, resyncing, or outofdate. A 'volumesplit' operation can therefore be used to gain access to a plex that is not up to date with respect to its partner plex if its partner plex is currently failed. If the plex is offline at the time of the split, the resulting traditional volume will also be offline. Otherwise, the resulting traditional volume will be in the same online/offline/restricted state as the original traditional volume. Note that a split mirror can be joined back together via the "victimvolume" option to "volumemirror".
def volume_split(self, new_volume_name, plex): return self.request( "volume-split", { 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ], 'plex': [ plex, 'plex', [ basestring, 'None' ], False ], }, { } )
[ "def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def remove_segmented_mirror(self):\n self.sm = SegmentedMirror(indexed_aperture=self.aper_ind, seg_pos=self.seg_pos)", "def unmanage(self, volume):\n\n vol_name = self._get_vol_name(volume)\n if len(vol_name + UNMANAGED_SUFFIX) > MAX_VOL_LENGTH:\n unmanaged_vol_name = vol_name[:-len(UNMANAGED_SUFFIX)] + \\\n UNMANAGED_SUFFIX\n else:\n unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX\n LOG.info(\"Renaming existing volume %(ref_name)s to %(new_name)s\",\n {\"ref_name\": vol_name, \"new_name\": unmanaged_vol_name})\n self._rename_volume_object(vol_name, unmanaged_vol_name)", "def remove_split(self, split_name):\n\n if self._adapter.cache_exists(self._KEY_CURRENT_SPLITS, _SPLITIO_COMMON_CACHE_NAMESPACE):\n current_splits = decode(self._adapter.cache_get(self._KEY_CURRENT_SPLITS, _SPLITIO_COMMON_CACHE_NAMESPACE))\n current_splits.pop(split_name, None)\n self._adapter.cache_update(self._KEY_CURRENT_SPLITS, encode(current_splits), 0,\n _SPLITIO_COMMON_CACHE_NAMESPACE)\n\n return self._adapter.cache_del(self._KEY_TEMPLATE.format(suffix=split_name), _SPLITIO_COMMON_CACHE_NAMESPACE)", "def delLogicalVolume(self, lv):\n self.getElement().removeChild(lv.getElement())\n del self.lvs[lv.getAttribute(\"name\")]", "def test_split_not_mirror(utils_patch):\n ret = {}\n ret[\"stdout\"] = \"\"\n ret[\n \"stderr\"\n ] = \"Unable to split datapool: Source pool must be composed only of mirrors\"\n ret[\"retcode\"] = 1\n mock_cmd = MagicMock(return_value=ret)\n\n with patch.dict(zpool.__salt__, {\"cmd.run_all\": mock_cmd}), patch.dict(\n zpool.__utils__, utils_patch\n ):\n ret = zpool.split(\"datapool\", \"backuppool\")\n res = OrderedDict(\n [\n (\"split\", False),\n (\n \"error\",\n \"Unable to split datapool: Source pool must be composed only of\"\n \" mirrors\",\n ),\n ]\n )\n assert ret == res", "def promote_original_master(s, name):\n s.execute_command('SENTINEL', 'FAILOVER', name)", "def splitASLvols(imgFile, aslVolFile, pdVolFile):\n\ttrim.inputs.in_file = imgFile \n\ttrim.inputs.out_file = aslVolFile\n\ttrim.inputs.end_index = 1\n\ttrim.run()\t\n\ttrim.inputs.out_file = pdVolFile\n\ttrim.inputs.end_index = 2\n\ttrim.inputs.begin_index = 1\n\ttrim.run()", "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def _disable_trisync_replication(self, array, volume):\n try:\n array.set_pgroup(self._trisync_name,\n remvollist=[self._get_vol_name(volume)])\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_NOT_EXIST in err.text):\n ctxt.reraise = False\n LOG.warning(\"Removing Volume from sync-replicated \"\n \"Protection Group failed with message: %s\",\n err.text)", "def restore_ldev(self, pvol, svol):\n timeout = utils.MAX_PROCESS_WAITTIME\n\n params_s = {\"svolLdevId\": svol}\n result = self.client.get_snapshots(params_s)\n mun = result[0]['muNumber']\n body = {\"parameters\": {\"autoSplit\": True}}\n self.client.restore_snapshot(pvol, mun, body)\n\n self._wait_copy_pair_status(\n svol, PSUS, timeout=timeout, interval=10)", "def x_mirror(self, name: str, replace: Optional[str] = None) -> \"SimplePolygon\":\n # Grab some values from *simple_polygon* (i.e. *self*):\n simple_polygon: SimplePolygon = self\n simple_polygon_name: str = simple_polygon.name\n\n points: List[P2D] = simple_polygon.points\n\n # Compute *new_name* and *x_mirrored_points*:\n new_name: str = (name if replace is None\n else simple_polygon_name.replace(name, replace))\n point: P2D\n x_mirrored_points: List[P2D] = [P2D(point.x, -point.y) for point in points]\n\n # Construct the final *x_mirrored_simple_polygon* and return it.\n x_mirrored_simple_polygon: SimplePolygon = SimplePolygon(new_name,\n x_mirrored_points, lock=True)\n return x_mirrored_simple_polygon", "def delPhysicalVolume(self, pv):\n self.getElement().removeChild(pv.getElement())\n del self.pvs[pv.getAttribute(\"name\")]", "def splitLargeVolumes(vialvolumelist, splitvolume=100):\n outputlist = []\n\n for vial, volume in vialvolumelist:\n if float(volume) > splitvolume:\n steps = int(float(volume) // splitvolume) + 1\n for antal in xrange(steps):\n outputlist.append((vial, round((float(volume)/steps), 1)))\n else:\n outputlist.append((vial, volume))\n return outputlist", "def kill_split(self, split_name, default_treatment, change_number):\n self._split_synchronizers.split_sync.kill_split(split_name, default_treatment,\n change_number)", "def NETRBufVSplitOpen(self):\n self.NETROpen(Vim.Var('NETRSplitOrientation') + ' vsplit',\n use_rifle=False)", "def split_vessel(nodehandle, vesselname, desiredresourcedata):\n assert_str(vesselname)\n assert_str(desiredresourcedata)\n \n splitvesselretval = _do_signed_call(nodehandle[0], nodehandle[1],\n 'SplitVessel', vesselname, desiredresourcedata)\n \n # Get the new vessel names. The \"left\" vessel has the leftovers, the \n # \"right\" is of the size requested.\n leftovervesselname, exactvesselname = splitvesselretval.split()\n\n return (leftovervesselname, exactvesselname)", "def untrim(noChanges=bool, curveOnSurface=bool, object=bool, nodeState=int, replaceOriginal=bool, constructionHistory=bool, caching=bool, untrimAll=bool, name=\"string\"):\n pass", "def kill_split(self, split_name, default_treatment, change_number):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop RAID parity scrubbing on the named volume, plex, or group; if no name is given, on all RAID groups currently undergoing parity scrubbing.
def volume_scrub_stop(self, name=None): return self.request( "volume-scrub-stop", { 'name': [ name, 'name', [ basestring, 'None' ], False ], }, { } )
[ "def stop():\n # report operation\n llecho('Stopping all LVM volume groups')\n\n # make 10 tries to stop all volume groups\n status = 0\n\n for i in range(0, 10):\n\n # run the command to stop volume groups\n status = run(CMD_STOP_LVM)\n\n # stopped successfully: quit\n if status == 0:\n break\n\n # wait 1 second before another try\n time.sleep(1)\n\n # cannot stop volume groups: fail\n if status != 0:\n llecho('Error: cannot deactivate LVM volume groups')\n raise ZKVMError('PARTITIONER', 'LVM', 'DEACTIVATE_VG')", "async def stop_balancing(self) -> None:\n print('[Balancing] Stopping balancing')\n\n # reset speaker volumes for all rooms\n for room in self.config.rooms:\n speaker_volumes = []\n for _ in room.volume_interpolation.speakers:\n speaker_volumes.append(room.user_volume)\n\n # reset volume\n command = SonosVolumeCommand(room.volume_interpolation.speakers, speaker_volumes)\n self.sonos.send_command(command)\n\n # cleanup balancing info\n if room.room_id in self.room_info:\n del self.room_info[room.room_id]\n\n self.previous_volumes = {}", "def volume_stop(mnode, volname, force=False):\n return RestClient(mnode).handle_request(\n \"POST\", \"/v1/volumes/%s/stop\" % volname,\n httplib.OK, None)", "def test_volume_stop(self):\n pass", "def disable(self):\n for volume in self.volumes:\n try:\n self._renderer.RemoveVolume(volume)\n except:\n pass # TBD: any error logging.", "def stop_rotation(self, rotavap_name):\n self.logger.info(\"Stopping rotation for rotavap {0}...\".format(rotavap_name))\n rotavap_obj = self.rotavap[rotavap_name]\n rotavap_obj.stop_rotation()\n self.logger.info(\"Done.\")", "def kill_split(self, split_name, default_treatment, change_number):\n pass", "def kill_split(self, split_name, default_treatment, change_number):\n self._split_synchronizers.split_sync.kill_split(split_name, default_treatment,\n change_number)", "def stop_job(self, name):\r\n\r\n sessionid, name = self._parse_name(name)\r\n pname = \"%s.%s\" % (sessionid, name)\r\n\r\n with self._lock:\r\n state = self._get_state(sessionid, name)\r\n\r\n # put the number to 0\r\n state.numprocesses = 0\r\n # flag the state to stop\r\n state.stopped = True\r\n\r\n # notify that we are stoppping the process\r\n self._publish(\"stop\", name=pname)\r\n self._publish(\"job.%s.stop\" % pname, name=pname)\r\n\r\n self._stopall(state)", "def _stop_processes(self, name):\n if name not in self.processes:\n return\n\n # get the template\n state = self.processes[name]\n if state.stopped:\n return\n\n state.stopped = True\n\n # notify others that all processes of the templates are beeing\n # stopped.\n self._publish(\"stop\", name=name)\n self._publish(\"proc.%s.stop\" % name, name=name)\n\n # stop the flapping detection.\n if state.flapping_timer is not None:\n state.flapping_timer.stop()\n\n # iterrate over queued processes.\n while True:\n try:\n p = state.dequeue()\n except IndexError:\n break\n\n # notify other that the process is beeing stopped\n self._publish(\"stop_pid\", name=p.name, pid=p.id, os_pid=p.pid)\n self._publish(\"proc.%s.stop_pid\" % p.name, name=p.name,\n pid=p.id, os_pid=p.pid)\n\n # remove the pid from the running processes\n if p.id in self.running:\n self.running.pop(p.id)\n\n # stop the process\n p.stop()\n\n # track this process to make sure it's killed after the\n # graceful time\n self._tracker.check(p, state.graceful_timeout)", "def stop( self, type=STOPPED ):\n\n if self.status == type:\n return\n\n self.logMsg( \"Pool stopped, stopping all children.\" )\n\n for c in self.children:\n self.endChild( c.num )\n\n self.status = type \n self.reporting.stop( )", "def volume_scrub_suspend(self, name=None):\n return self.request( \"volume-scrub-suspend\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def stopMotor(self) -> None:\n ...", "def stopfsnapclean(self, fpgname, pause=False):", "def _remove_from_consistencygroup(self, group, remove_volumes):\n LOG.debug(_(\"Removing %(vols)s from consistencygroup %(group)s\") %\n {'vols': remove_volumes, 'group': group})\n\n ans = self.vmem_mg.snapshot.remove_luns_from_snapgroup(\n group, remove_volumes)\n\n if not ans['success']:\n msg = (_(\"Failed to remove volumes %(vols)s from \" +\n \"consistencygroup %(group)s: %(msg)s\") %\n {'vols': remove_volumes, 'group': group, 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)", "def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def _disable_trisync_replication(self, array, volume):\n try:\n array.set_pgroup(self._trisync_name,\n remvollist=[self._get_vol_name(volume)])\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_NOT_EXIST in err.text):\n ctxt.reraise = False\n LOG.warning(\"Removing Volume from sync-replicated \"\n \"Protection Group failed with message: %s\",\n err.text)", "def stop(self):\n Multipass.stop(self.name)", "def ungroup(self):\n for speaker in reversed(self._speakers):\n speaker.ungroup()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Suspend RAID parity scrubbing on the named traditional volume, plex, or RAID group. If no name is given, suspend scrubbing on all RAID groups currently being scrubbed.
def volume_scrub_suspend(self, name=None): return self.request( "volume-scrub-suspend", { 'name': [ name, 'name', [ basestring, 'None' ], False ], }, { } )
[ "def volume_scrub_resume(self, name=None):\n return self.request( \"volume-scrub-resume\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_scrub_stop(self, name=None):\n return self.request( \"volume-scrub-stop\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_scrub_start(self, name=None):\n return self.request( \"volume-scrub-start\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def suspend(shelf=None):\n\n _act_on_guests(shelf, \"suspend\")", "def restart_group(self, groupname):\n self._apply_group_func(groupname, self.restart_process)", "def suspend_resume_vm_test(vm_name):\n assert ll_vms.suspendVm(True, vm_name), \"Failed to suspend vm\"\n logging.info(\"VM status: %s\", ll_vms.get_vm_state(vm_name=vm_name))\n assert ll_vms.startVm(\n positive=True, vm=vm_name,\n wait_for_status=config.VM_UP,\n timeout=2 * config_virt.VM_ACTION_TIMEOUT\n )\n return True", "def suspend(self, pid):\n pass", "def volume_verify_suspend(self, volume=None):\n return self.request( \"volume-verify-suspend\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def cmd_switch_groups(self, name):\r\n self.qtile.cmd_switch_groups(self.name, name)", "def vm_ejectiso(vmname: str):\n subprocess.run(\"virsh --connect qemu:///system change-media {0} sda --eject --config\".format(vmname), shell=True, check=False)", "def disable_heal(mnode, volname):\n cmd = \"gluster volume heal %s disable\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n return False\n\n return True", "def ex_suspend_node(self, node):\r\n return self._perform_power_operation(node, 'suspend')", "def test_scrub_pause(utils_patch):\n ret = {}\n ret[\"stdout\"] = \"\"\n ret[\"stderr\"] = \"\"\n ret[\"retcode\"] = 0\n mock_cmd = MagicMock(return_value=ret)\n mock_exists = MagicMock(return_value=True)\n\n with patch.dict(zpool.__salt__, {\"zpool.exists\": mock_exists}), patch.dict(\n zpool.__salt__, {\"cmd.run_all\": mock_cmd}\n ), patch.dict(zpool.__utils__, utils_patch):\n ret = zpool.scrub(\"mypool\", pause=True)\n res = OrderedDict(OrderedDict([(\"scrubbing\", False)]))\n assert ret == res", "def vm_sleep_self(dut, who='all'):\n domlist = find_guest_vms(dut)\n for domain in domlist:\n if who == 'all' or domain['name'] == who:\n vmip = domains.domain_address(dut, domain['name'])\n print \"turning off hibernate :\" + str(call_exec_daemon(command='run', args=[r'powercfg -h off'], host=vmip, timeout=60))\n sleep(4)\n call_exec_daemon('run', [r'C:\\Windows\\System32\\rundll32.exe powrprof.dll,SetSuspendState sleep'], vmip)", "def suspend_processes(self, scaling_processes=None):\n return self.connection.suspend_processes(self.name, scaling_processes)", "def suspend(self):\n self._exec_cmd(_vix.VixVM_Suspend,\n self._vm_handle,\n 0, # Must be 0\n None,\n None\n )", "def pause(self, scaling_group):\r\n # NOTE: This is not yet implemented. The code is based on the docs,\r\n # so it should either work or be pretty close.\r\n return self._manager.pause(scaling_group)", "def disable(self):\n for volume in self.volumes:\n try:\n self._renderer.RemoveVolume(volume)\n except:\n pass # TBD: any error logging.", "def volume_restrict_async(self, volume_name):\n return self.request( \"volume-restrict-async\", {\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the junctionpath of the volume.
def volume_get_volume_path(self, volume, is_style_cifs): return self.request( "volume-get-volume-path", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'is_style_cifs': [ is_style_cifs, 'is-style-cifs', [ bool, 'None' ], False ], }, { 'junction': [ basestring, False ], } )
[ "def _get_volume_path(self):\n return heconflib.get_volume_path(\n self._parent.environment[ohostedcons.StorageEnv.SP_UUID],\n self._parent.environment[ohostedcons.StorageEnv.SD_UUID],\n self._parent.environment[ohostedcons.StorageEnv.IMG_UUID],\n self._parent.environment[ohostedcons.StorageEnv.VOL_UUID]\n )", "def path(self, *bits):\n out_path = self.mountpoint\n for bit in bits:\n bit = bit.lstrip(path_sep)\n out_path = join(out_path, bit)\n return out_path", "def get_volume_paths(self, connection_properties):\n volume_id = connection_properties.get('volume', None)\n if volume_id is None:\n raise exception.BrickException(\n 'Invalid StorPool connection data, no volume ID specified.')\n volume = self._attach.volumeName(volume_id)\n path = '/dev/storpool/' + volume\n dpath = connection_properties.get('device_path', None)\n if dpath is not None and dpath != path:\n raise exception.BrickException(\n 'Internal error: StorPool volume path {path} does not '\n 'match device path {dpath}',\n {path: path, dpath: dpath})\n return [path]", "def getSymlinkPath(self):\r\n # TODO: but how?\r\n raise UnsupportedOperationError(\"Not yet supported\")", "def current_path(self):\n # print(self.position)\n return os.sep.join([i.replace(os.sep, \"\") for i in self.position])", "def get_path(self) -> str:\n return f'{self.parent.path}.{self.key}' if self.parent else self.key", "def getSymlinkPath(self):\n # TODO: as in isSymlink\n raise NotImplementedError", "def get_cloudpath(self, resource):\n return resource.cloudvolume.layer_cloudpath", "def path_to(self, device: Device) -> BeamPath:\n return sorted(self.paths_to(device), key=self.imped_z)[-1]", "def getPNJunction(self):\n return self.pn_junction", "def getPath(self):\n # print(\"I'm serious. You actually did it. Here is your path again so you can see how far you have come.\")\n return self.pathTraveled", "def shared_data_volume_container_path(sdv, sdvkey):\n # type: (dict, str) -> str\n return sdv[sdvkey]['container_path']", "def find_s3_path(self):\n # Distributions that were structured as delta OTAs\n if self.is_delta_ota():\n return '%s%s' % (self.find_s3_root(), self.DELTA_OTA_ZIP_FILE)\n else:\n return self.find_s3_path_fota()", "def getPath(self) -> \"SoPath *\":\n return _coin.SoSearchAction_getPath(self)", "def getPath(self) -> \"SoPath const *\":\n return _coin.SoEventCallback_getPath(self)", "def getAttachedPath(self) -> \"SoPath *\":\n return _coin.SoPathSensor_getAttachedPath(self)", "def fullpath(self):\n top = self\n path = []\n while top:\n path.insert(0, str(top))\n top = top.directory\n return dpath(os.path.join(*path), **self.connection)", "def remote_path(self) -> str:\n return self._remote_path", "def getPlugPath(*args, **kwargs):\n \n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Aborts the volume move operation of the specified source volume. This is a synchronous API.
def volume_move_abort(self, source_volume): return self.request( "volume-move-abort", { 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ], }, { } )
[ "def volume_move_pause(self, source_volume):\n return self.request( \"volume-move-pause\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_move_resume(self, source_volume, cutover_window=None, is_manual_cutover=None, is_override_warnings=None, cutover_attempts=None, is_keep_source=None):\n return self.request( \"volume-move-resume\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_manual_cutover': [ is_manual_cutover, 'is-manual-cutover', [ bool, 'None' ], False ],\n 'is_override_warnings': [ is_override_warnings, 'is-override-warnings', [ bool, 'None' ], False ],\n 'cutover_attempts': [ cutover_attempts, 'cutover-attempts', [ int, 'None' ], False ],\n 'is_keep_source': [ is_keep_source, 'is-keep-source', [ bool, 'None' ], False ],\n }, {\n 'errors-warnings': [ ErrorsWarningsInfo, True ],\n } )", "async def abort(self, **kwargs: Any) -> None:\n\n # set abort event\n log.info(\"Aborting current image and sequence...\")\n self.expose_abort.set()\n\n # do camera-specific abort\n await self._abort_exposure()\n\n # wait until state is not EXPOSING anymore\n while await self.get_exposure_status() == ExposureStatus.EXPOSING:\n await asyncio.sleep(0.1)", "def volume_move_cutover(self, source_volume, cutover_window=None):\n return self.request( \"volume-move-cutover\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def abort_upgrade(self, context, upgrade):\n return self.call(context, self.make_msg('abort_upgrade',\n upgrade=upgrade))", "def abort(self) -> None:\n\t\tlog.info('Stopping acquisition')\n\n\t\terr = self.dll.saAbort(self.deviceHandle)\n\t\textrainfo: Optional[str] = None\n\t\tif err == saStatus.saDeviceNotConfiguredErr:\n\t\t\textrainfo = 'Device was already idle! Did you call abort ' \\\n\t\t\t\t\t\t'without ever calling initiate()'\n\n\t\tself.check_for_error(err, 'saAbort', extrainfo)", "def volume_move_status(self, source_volume=None, is_verbose=None):\n return self.request( \"volume-move-status\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_verbose': [ is_verbose, 'is-verbose', [ bool, 'None' ], False ],\n }, {\n 'status': [ VolMoveStatusInfo, True ],\n } )", "def AbortUnitOperation(self):\n # Ask the core server to abort the current unit operation by calling AbortSequence\n bSuccess = self.pCoreServer.CLIAbortUnitOperation(\"CLI\")\n if not bSuccess:\n print \"Failed to abort unit operation\"", "def cancelMove(self) -> None:\n frames_already_done = self._totalFrameNeeded - self._frameNeeded\n for _ in range(frames_already_done):\n self.unit.moveTo(self.sourceTile.graphics.center)\n self.isPerformed = True", "def cancel(self):\n self.sa_session.rollback()", "def abort(self):\n result = self.lastTransaction.abort()\n self.lastTransaction = None\n return result", "def SystemAbort(self):\n debug('GCSCommands.SystemAbort()')\n self.__msgs.send(chr(27))", "def cancel(self):\n self.log.info(\"Starting the cancel of transfer_wrapper %s\" % self)\n if self.dest.startswith('file:///'):\n dest = self.dest[7:]\n else:\n dest = self.dest\n if os.path.exists(dest):\n self.log.info(\"Unlinking partially complete dest file %s.\" % dest)\n try:\n os.unlink(dest)\n except Exception as exc:\n print_exc(exc)\n else:\n self.log.info(\"Destination path %s doesn't exist; not deleting.\" % \\\n dest)\n self._killflag = True\n if self.pid:\n self.log.info(\"Killing transfer process at PID %s.\" % str(self.pid))\n try:\n os.killpg(self.pid, signal.SIGTERM)\n self.log.info(\"Process return status: %s.\" % \\\n str(os.waitpid(self.pid, os.P_WAIT)))\n except:\n pass\n self.pid = None\n else:\n self.log.warning(\"I don't know what PID to kill! Doing nothing.\")\n self.log.info(\"Setting the kill flag, which should cause the \" \\\n \"transfer_wrapper to exit soon.\")", "def destroy_volume(self, volume):\r\n url = REST_BASE + '/storage/%s' % (volume.id)\r\n status = int(self.connection.request(action=url,\r\n method='DELETE').status)\r\n return status == httplib.OK", "def abort(self):\n\n self.progtrack.download_add_progress(0, -self.dlcurrent)\n self.progtrack.upload_add_progress(-self.ulcurrent)\n self.completed = True", "def volume_down(self):\n self._player.volume -= self._volume_increment", "def revert(self, volume_id):\n return self._snap_operation(3, volume_id)", "def abort(self):\n qparam = {DSORT_UUID: [self._dsort_id]}\n self._client.request(\n HTTP_METHOD_DELETE, path=f\"{URL_PATH_DSORT}/{DSORT_ABORT}\", params=qparam\n )", "def _delete_lun(self, volume):\n v = self.vmem_vip\n\n LOG.info(_(\"Deleting lun %s\"), volume['id'])\n\n try:\n self._send_cmd(v.lun.bulk_delete_luns,\n 'LUN deletion started',\n self.container, volume['id'])\n\n except ViolinBackendErrNotFound:\n LOG.info(_(\"Lun %s already deleted, continuing\"), volume['id'])\n\n except ViolinBackendErrExists:\n LOG.warn(_(\"Lun %s has dependent snapshots, skipping\"),\n volume['id'])\n raise exception.VolumeIsBusy(volume_name=volume['id'])\n\n except Exception:\n LOG.exception(_(\"Lun delete failed!\"))\n raise\n\n self.lun_tracker.free_lun_id_for_volume(volume)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the name of a flexible volume, get the autosize settings. This API is not supported for Infinite Volumes.
def volume_autosize_get(self, volume): return self.request( "volume-autosize-get", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'increment-size': [ basestring, False ], 'minimum-size': [ basestring, False ], 'grow-threshold-percent': [ int, False ], 'maximum-size': [ basestring, False ], 'shrink-threshold-percent': [ int, False ], 'is-enabled': [ bool, False ], 'mode': [ basestring, False ], } )
[ "def volume_autosize_set(self, volume, reset=None, increment_size=None, minimum_size=None, grow_threshold_percent=None, maximum_size=None, shrink_threshold_percent=None, is_enabled=None, mode=None):\n return self.request( \"volume-autosize-set\", {\n 'reset': [ reset, 'reset', [ bool, 'None' ], False ],\n 'increment_size': [ increment_size, 'increment-size', [ basestring, 'None' ], False ],\n 'minimum_size': [ minimum_size, 'minimum-size', [ basestring, 'None' ], False ],\n 'grow_threshold_percent': [ grow_threshold_percent, 'grow-threshold-percent', [ int, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'maximum_size': [ maximum_size, 'maximum-size', [ basestring, 'None' ], False ],\n 'shrink_threshold_percent': [ shrink_threshold_percent, 'shrink-threshold-percent', [ int, 'None' ], False ],\n 'is_enabled': [ is_enabled, 'is-enabled', [ bool, 'None' ], False ],\n 'mode': [ mode, 'mode', [ basestring, 'None' ], False ],\n }, {\n } )", "def _vmware_auto_resize_config(self, args: parser_extensions.Namespace):\n kwargs = {\n 'enabled': self._auto_resize_enabled(args),\n }\n if flags.IsSet(kwargs):\n return messages.VmwareAutoResizeConfig(**kwargs)\n return None", "def auto_scale(self):\r\n return self._get_client(\"autoscale\")", "def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )", "def flex_volume(self) -> Optional[pulumi.Input['AlertmanagerSpecVolumesFlexVolumeArgs']]:\n return pulumi.get(self, \"flex_volume\")", "def flex_volume(self) -> Optional[pulumi.Input['ThanosRulerSpecVolumesFlexVolumeArgs']]:\n return pulumi.get(self, \"flex_volume\")", "def get_volume_options(mnode, volname, option=None):\n if not option:\n _, get_vol_options, err = RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/options\" % volname, httplib.OK, None)\n else:\n _, get_vol_options, err = RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/options/%s\" % (volname, option),\n httplib.OK, None)\n if not err:\n get_vol_options = json.loads(get_vol_options)\n return get_vol_options\n return None", "def autosize_cooling_storage(self, **kwargs):\n\n demand = self.energy_simulation.__getattr__(\n 'cooling_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.cooling_storage.autosize(demand, **kwargs)", "def get_sfx_volume() -> float:\n return AudioSettings.get_volumes()[0]", "def volume_size_async(self, volume_name, new_size=None):\n return self.request( \"volume-size-async\", {\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-status': [ basestring, False ],\n 'result-error-code': [ int, False ],\n 'volume-size': [ basestring, False ],\n } )", "def __get_pv_attrs(k8s_conf, pv_name):\n core_client = k8s_core_client(k8s_conf)\n pv_list = core_client.list_persistent_volume()\n logger.debug('pv_list - %s', pv_list)\n for pv in pv_list.items:\n logger.debug('pv - %s', pv)\n if pv.metadata.name == pv_name:\n return pv.spec.capacity.get('storage'), pv.spec.host_path.path\n return None, None", "def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"maximum_volume_size\")", "def MaxVolume (self):\n return self.m_Pump.MaxSinglePistonModeVolume()", "def read_volume( mdserver_name, fields ):\n global conf\n\n ctl_root = VOLUME_CTL_ROOT( conf, {'NAME': mdserver_name} )\n conf_path = VOLUME_CONF_PATH( ctl_root )\n\n try:\n vol_conf = read_config( conf_path, fields )\n except Exception, e:\n raise MDMethodFailed( \"read_volume\", \"could not read config, exception = '%s'\" % e )\n\n ret = {}\n for f in fields:\n ret[f] = vol_conf[f]\n \n return ret", "def get_music_volume() -> float:\n return AudioSettings.get_volumes()[1]", "def _auto_resize_enabled(self, args: parser_extensions.Namespace):\n if flags.Get(args, 'enable_auto_resize'):\n return True\n if flags.Get(args, 'disable_auto_resize'):\n return False\n return None", "def get_max_volume(self) -> float:", "def volume(self):\n return self.intrinsicValue(\"measuredvolume\")", "def resize_volume(self, size):\r\n curr_size = self.volume.size\r\n if size <= curr_size:\r\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\r\n \"than the current volume size of '%s'.\" % curr_size)\r\n body = {\"volume\": {\"size\": size}}\r\n self.manager.action(self, \"resize\", body=body)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Resumes a previously paused volume move operation of a specified source volume. his is an asynchronous API. It will run a series of checks to determine if the volume move can be resumed. If there are no errors or warnings, the API will return successfully. The move will be resumed. The status of the move can be obtained from the volumemovestatus API. If any of the checks result in an error or warning, the API will return with an error. If the checks result in no errors but one or more warnings and isoverridewarnings is set to true, the API will return successfully and the move will be resumed.
def volume_move_resume(self, source_volume, cutover_window=None, is_manual_cutover=None, is_override_warnings=None, cutover_attempts=None, is_keep_source=None): return self.request( "volume-move-resume", { 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ], 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ], 'is_manual_cutover': [ is_manual_cutover, 'is-manual-cutover', [ bool, 'None' ], False ], 'is_override_warnings': [ is_override_warnings, 'is-override-warnings', [ bool, 'None' ], False ], 'cutover_attempts': [ cutover_attempts, 'cutover-attempts', [ int, 'None' ], False ], 'is_keep_source': [ is_keep_source, 'is-keep-source', [ bool, 'None' ], False ], }, { 'errors-warnings': [ ErrorsWarningsInfo, True ], } )
[ "def volume_move_pause(self, source_volume):\n return self.request( \"volume-move-pause\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "async def resume_(self, ctx):\r\n vc = ctx.voice_client\r\n\r\n if not vc or not vc.is_connected():\r\n return await ctx.send('I am not playing anything.')\r\n elif not vc.is_paused():\r\n return\r\n\r\n vc.resume()\r\n await ctx.send(f'**{ctx.author}**: resumed the song!')", "def pause_resume(self):\n if self.activeState in [LOOP,AGAIN,NEXT,UPDATE]:\n self.setActiveState(PAUSE)\n else:\n self.setActiveState(LOOP) \n self.status_update.emit(\"Status: Running...\")\n self.condition.wakeOne()", "def volume_move_abort(self, source_volume):\n return self.request( \"volume-move-abort\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def resume_move_messages(self):\n self.sdk.SCC_ResumeMoveMessages(self._serial)", "def volume_move_status(self, source_volume=None, is_verbose=None):\n return self.request( \"volume-move-status\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_verbose': [ is_verbose, 'is-verbose', [ bool, 'None' ], False ],\n }, {\n 'status': [ VolMoveStatusInfo, True ],\n } )", "async def resume(self, ctx):\r\n\t\tstate = self.get_voice_state(ctx.message.server)\r\n\t\tif state.is_playing():\r\n\t\t\tplayer = state.player\r\n\t\t\tplayer.resume()", "async def resume(self, ctx):\n self.play_status[ctx.guild.id] = True\n player = self.players[ctx.guild.id]\n ctx.message.guild.voice_client.resume() # Resumes the audio stream\n await ctx.send(f\"Resumed {player.title}\")", "def volume_scrub_resume(self, name=None):\n return self.request( \"volume-scrub-resume\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def resume(self):\n self._call(\"resume\")", "def resume(shelf=None):\n\n _act_on_guests(shelf, \"resume\")", "def resume(self):\n self.r2api.frida_continue()", "def case_resume(args: List[str]) -> None:\n\tprint(\"Resuming video download.\")\n\tcase_default(args,resume=True)", "async def resume(self):\n await self._pytheos.api.player.set_play_state(self._player.player_id, models.player.PlayState.Playing)", "def resume(self):\n if not self.is_playing() and self.has_media():\n if self.player.get_length() - self.player.get_time() > 0.5:\n self.player.audio_set_volume(self.player.audio_get_volume())\n self.player.pause()\n block_until = time.time() + 1\n while not self.player.is_playing() and time.time() < block_until: pass\n return True\n return False", "def resume(self, pid):\n pass", "async def _resume(self):\n voice_client = self.client\n \n data = {\n 'op' : self.RESUME,\n 'd' : {\n 'token' : voice_client._token,\n 'server_id' : str(voice_client.channel.guild.id),\n 'session_id' : voice_client._session_id,\n },\n }\n await self.send_as_json(data)", "async def resume_request(self, paused: dict):\n if 'oauth' in paused:\n await Oauth2Request.resume_extract(paused['oauth'])\n req_base = await self._get_retrieve_request_base()\n request2 = req_base(ObjectType=paused['object_type'], ContinueRequest=paused['continue_request'])\n await self.submit_request(request2)", "def _resume_paused_producer(self) -> None:\n if self._paused_producer and self._producer:\n self._paused_producer = False\n self._producer.resumeProducing()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start RAID parity scrubbing on the named traditional volume, plex, or RAID group. RAID parity scrubbing compares the data disks to the parity disk in a RAID group, correcting the parity disk's contents as necessary. If a plex name is given, then scrubbing is started on all RAID groups contained in the plex. If a RAID group name is given, then scrubbing is started only in that RAID group. If no name is given, then scrubbing is started on the RAID groups within all online traditional volumes and aggregates. Use 'volumescrublistinfo' to check scrub status.
def volume_scrub_start(self, name=None): return self.request( "volume-scrub-start", { 'name': [ name, 'name', [ basestring, 'None' ], False ], }, { } )
[ "def volume_split(self, new_volume_name, plex):\n return self.request( \"volume-split\", {\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n 'plex': [ plex, 'plex', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_scrub_resume(self, name=None):\n return self.request( \"volume-scrub-resume\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_start(mnode, volname, force=False):\n data = {\n \"force-start-bricks\": force\n }\n return RestClient(mnode).handle_request(\n \"POST\", \"/v1/volumes/%s/start\" % volname,\n httplib.OK, data)", "def volume_scrub_suspend(self, name=None):\n return self.request( \"volume-scrub-suspend\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def start(tolerant):\n # report operation\n llecho('Activating all existing LVM volume groups')\n\n # tolerant mode on: LVM with --partial\n if tolerant == True:\n status = run('%s %s' % (CMD_START_LVM, '--partial'))\n\n # tolerant mode off: start LVM manually\n else:\n status = run(CMD_START_LVM)\n\n # cannot start volume groups: fail\n if status != 0:\n llecho('Error: cannot activate LVM volume groups')\n sys.exit(1)", "def mount(self, volname):\n check = self.mount_check(volname)\n if check:\n return check\n cmdline = self.vol_dict[volname][\"cmdline\"]\n mount_cmd = docker[cmdline]\n mount_cmd()\n self.vol_dict[volname][\"mounted\"] = True\n return self.vol_dict[volname][\"Local\"]", "def volume_verify_start(self, volume=None, fix_plex=None, log_only=None):\n return self.request( \"volume-verify-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'fix_plex': [ fix_plex, 'fix-plex', [ int, 'None' ], False ],\n 'log_only': [ log_only, 'log-only', [ bool, 'None' ], False ],\n }, {\n } )", "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def rescan_vols(op_code):\n\n with open_scini_device() as fd:\n ioctl(fd, op_code, struct.pack('Q', 0))", "def volume_scrub_stop(self, name=None):\n return self.request( \"volume-scrub-stop\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def prepare_luna(luna_raw, luna_abbr, luna_data, luna_segment, force=False):\r\n\r\n print('start changing luna name')\r\n # A persistent file serves as a flag whether the preprocessing has been done before.\r\n finished_flag = '.flag_prepareluna'\r\n \r\n if not os.path.exists(finished_flag) or force:\r\n\r\n subsetdirs = [os.path.join(luna_raw,f) for f in os.listdir(luna_raw) if f.startswith('subset') and os.path.isdir(os.path.join(luna_raw,f))]\r\n if not os.path.exists(luna_data):\r\n os.mkdir(luna_data)\r\n\r\n luna_abbr_csv = os.path.join(os.path.dirname(os.path.realpath(__file__)), luna_abbr)\r\n abbrevs = np.array(pandas.read_csv(luna_abbr_csv,header=None))\r\n namelist = list(abbrevs[:,1])\r\n ids = abbrevs[:,0]\r\n \r\n # Move renamed .raw and .mhd files to 'luna_data'(a.k.a luna/allset).\r\n for d in subsetdirs: # For each luna subset folder.\r\n files = os.listdir(d)\r\n files.sort()\r\n for f in files:\r\n name = f[:-4] # remove the extension of file name.\r\n id = ids[namelist.index(name)]\r\n filename = '0'*(3-len(str(id)))+str(id)\r\n shutil.move(os.path.join(d,f),os.path.join(luna_data,filename+f[-4:]))\r\n # print(os.path.join(luna_data,str(id)+f[-4:]))\r\n\r\n # Update the 'ElementDataFile' key in .mhd file with new .raw file name in 'luna_data'(a.k.a luna/allset).\r\n files = [f for f in os.listdir(luna_data) if f.endswith('mhd')]\r\n for file in files:\r\n with open(os.path.join(luna_data,file),'r') as f:\r\n content = f.readlines()\r\n id = file.split('.mhd')[0]\r\n filename = '0'*(3-len(str(id)))+str(id)\r\n content[-1]='ElementDataFile = '+filename+'.raw\\n'\r\n # print(content[-1])\r\n with open(os.path.join(luna_data,file),'w') as f:\r\n f.writelines(content)\r\n\r\n # Renamed .mhd files in 'luna_segment'(a.k.a luna/seg-lungs-LUNA16) to abbreviated names.\r\n seglist = os.listdir(luna_segment)\r\n for f in seglist:\r\n if f.endswith('.mhd'):\r\n\r\n name = f[:-4]\r\n lastfix = f[-4:]\r\n else:\r\n name = f[:-5]\r\n lastfix = f[-5:]\r\n if name in namelist:\r\n id = ids[namelist.index(name)]\r\n filename = '0'*(3-len(str(id)))+str(id)\r\n\r\n shutil.move(os.path.join(luna_segment,f),os.path.join(luna_segment,filename+lastfix))\r\n # print(os.path.join(luna_segment,filename+lastfix))\r\n\r\n # Update the 'ElementDataFile' key in lung segment .mhd file in \r\n # 'luna_segment'(a.k.a luna/seg-lungs-LUNA16) with abbreviated .zraw file name.\r\n files = [f for f in os.listdir(luna_segment) if f.endswith('mhd')]\r\n for file in files:\r\n with open(os.path.join(luna_segment,file),'r') as f:\r\n content = f.readlines()\r\n id = file.split('.mhd')[0]\r\n filename = '0'*(3-len(str(id)))+str(id)\r\n content[-1]='ElementDataFile = '+filename+'.zraw\\n'\r\n # print(content[-1])\r\n with open(os.path.join(luna_segment,file),'w') as f:\r\n f.writelines(content)\r\n print('end changing luna name')\r\n f= open(finished_flag,\"w+\")", "def pipeline_subvolume(args, sample, individual=False, save_data=True, render=False, use_wide=False):\n # 1. Load sample\n # Unpack paths\n save_path = args.save_image_path\n print('Sample name: ' + sample)\n data, bounds = load_bbox(str(args.data_path / sample), args.n_jobs)\n print_orthogonal(data, savepath=str(save_path / \"Images\" / (sample + \"_input.png\")))\n if render:\n render_volume(data, str(save_path / \"Images\" / (sample + \"_input_render.png\")))\n\n # 2. Orient array\n data, angles = orient(data, bounds, args.rotation)\n print_orthogonal(data, savepath=str(save_path / \"Images\" / (sample + \"_orient.png\")))\n\n # 3. Crop and flip volume\n if use_wide:\n wide = args.size_wide\n else:\n wide = args.size['width']\n data, crop = crop_center(data, args.size['width'], wide, method=args.crop_method) # crop data\n print_orthogonal(data , savepath=str(save_path / \"Images\" / (sample + \"_orient_cropped.png\")))\n if render:\n render_volume(data, str(save_path / \"Images\" / (sample + \"_orient_cropped_render.png\")))\n\n # Different pipeline for large dataset\n if data.shape[0] > 799 and data.shape[1] > 799 and save_data:\n create_subvolumes(data, sample, args)\n return\n\n # Save crop data\n if data.shape[1] > args.size['width'] and save_data:\n save(save_path + '/' + sample + '_sub1', sample + '_sub1_', data[:, :args.size['width'], :])\n save(save_path + '/' + sample + '_sub2', sample + '_sub2_', data[:, -args.size['width']:, :])\n elif save_data:\n save(save_path + '/' + sample, sample, data)\n else:\n return data", "def set_segment_data_from_diversions(self, name: str, data):\n self._check_segment_data_name(name)\n if np.isscalar(data):\n self.set_segment_data_from_scalar(name, data, \"diversions\")\n return\n # Prepare mapping between divid <-> nseg\n nseg2divid = self.segment_data.loc[\n self.segment_data.iupseg != 0, \"divid\"]\n mapping = invert_series(nseg2divid)\n ignore = set(self.diversions.index[~self.diversions.in_model])\n dtype = self.segment_data[name].dtype\n data = transform_data_to_series_or_frame(\n data, dtype, self.time_index, mapping, ignore)\n self._set_segment_data(name, data)", "def setup_block_in_agentd_vsr_start(self):\n vs_obj = self.find_or_create_vol('AGENTD-BLOCK-VSR-ON-START', ['CREATE', 'BIND', 'PUBLISH', 'MOUNT', 'ATTACH_FS'],\\\n account_id=self.account_id(), vol_size_gib=1, service_plan_id=self.service_plan_id(),\\\n cluster_id=self.cluster.meta_id(), node_id=self.node.meta_id(), target_path='/mnt')\n print(\"Setting agentd REI\", REI_VREQ_VSC_BLOCK_ON_START)\n self.kubectl_helper.rei_set(REI_VREQ_VSC_BLOCK_ON_START, self.node.service_pod(), bool_value=True, do_not_delete=True)\n retry_after = self.indefinite_sequence([10, 10, 5])\n count = 0\n found = False\n while not found or count < 2: # got to see this at least twice to ensure that the VSR stays blocked\n for vsr_obj in vs_obj.vol_vsrs(self.nuvo_mgmt, is_terminated=False):\n if vsr_obj.requestedOperations == ['VOL_SNAPSHOT_CREATE']:\n if vsr_obj.volumeSeriesRequestState == 'PAUSING_IO':\n found = True\n count += 1\n print(\"Volume\", vs_obj.name, \"state:\", vs_obj.volumeSeriesState, \": found VOL_SNAPSHOT_CREATE VSR in state\", vsr_obj.volumeSeriesRequestState, \"count(max 2):\", count)\n else:\n print(\"Volume\", vs_obj.name, \"state:\", vs_obj.volumeSeriesState, \": found VOL_SNAPSHOT_CREATE VSR in state\", vsr_obj.volumeSeriesRequestState)\n break\n if not found:\n try:\n print(\"Volume\", vs_obj.name, \"state:\", vs_obj.volumeSeriesState, \": Issuing VOL_SNAPSHOT_CREATE VSR\")\n vsr_obj = obj_helper.VolumeSeriesRequestObject()\n vsr_obj.assemble(['VOL_SNAPSHOT_CREATE'], vol_id=vs_obj.meta_id())\n vsr_obj.create(self.nuvo_mgmt)\n vsr_obj.wait(self.nuvo_mgmt, non_terminal_states=['PAUSING_IO'])\n count = 1\n except RuntimeError as err:\n print(\"Ignoring error:\", err)\n if not found or count < 2:\n time.sleep(next(retry_after))\n print(\"Volume\", vs_obj.name, \"setup complete\")", "def main():\n env = os.environ.copy()\n db_root = util.get_db_root()\n assert db_root\n part = util.get_part()\n assert part\n\n information = util.get_part_information(db_root, part)\n\n valid_devices = []\n for name, device in util.get_devices(db_root).items():\n if device['fabric'] == information['device']:\n valid_devices.append(name)\n\n for part, data in util.get_parts(db_root).items():\n if data['device'] in valid_devices:\n command = \"make roi_only\"\n env['XRAY_PART'] = part\n cwd = os.getenv('XRAY_FUZZERS_DIR')\n subprocess.run(command.split(' '), check=True, env=env, cwd=cwd)", "def reload_volume( mdserver_name ):\n global conf\n \n ctl_root = VOLUME_CTL_ROOT( conf, {'NAME': mdserver_name} )\n pidfile_path = PIDFILE_PATH( ctl_root )\n\n # extract the pid \n pid = get_volume_pid( pidfile_path )\n if pid == None:\n raise MDMethodFailed( \"reload_volume\", \"Could not get volume PID\")\n\n # reload\n print \"reload_volume: about to reload %s (pid = %s)\" % (mdserver_name, pid)\n os.system(\"ps aux | grep mdserverd\")\n\n print \"command: %s -k %s\" % (conf['MD_BINARY'], str(pid))\n md_proc = subprocess.Popen( [conf['MD_BINARY'], '-k', str(pid)], close_fds = True )\n rc = md_proc.wait()\n\n time.sleep(1.0)\n print \"reload_volume: reloaded, rc = %s\" % rc\n os.system(\"ps aux | grep mdserverd\")\n \n\n return rc", "def test_named_volumes(self):\n volume_name = \"control_unittest_volume{}\".format(random.randint(1, 65535))\n self.container_volumes.append(volume_name)\n self.image = 'busybox'\n self.conf = {\n \"image\": self.image,\n \"container\": {\n \"name\": self.container_name,\n \"hostname\": \"busybox\",\n \"volumes\": [\"{}:/var\".format(volume_name)]\n }\n }\n serv = create_service(self.conf, './Controlfile')\n container = Container(serv).create(prod=False)\n container.start()\n self.assertEqual(len(container.inspect['Mounts']), 1)\n self.assertEqual(len(container.inspect['Mounts'][0]['Name']), len(volume_name))\n self.assertEqual(container.inspect['Mounts'][0]['Destination'], '/var')\n self.assertTrue(\n container.inspect['Mounts'][0]['Source'].startswith(\n '/var/lib/docker/volumes'),\n msg=\"Unexpected mount source: {}\".format(\n container.inspect['Mounts'][0]['Source']))", "def test_volume_start(self):\n pass", "def restart():\n cmd = f'supervisorctl restart pocs-power-server'\n print(f'Running: {cmd}')\n subprocess.run(cmd, shell=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turns an unmirrored traditional volume into a mirrored traditional volume by adding a plex to it. The plex is either newly formed from disks chosen from a spare pool or, if the "victimvolume" option is specified, is taken from another existing unmirrored volume. The volume must currently be unmirrored. Disks may be specified explicitly using the "mirrordisks" argument list option in the same way as with the 'volumecreate' and 'volumeadd' APIs. The number of disks specified must exactly match the number present in the existing traditional volume. If the disks to use are not explicitly specified, then the appropriate disks are automatically selected to match those already in the traditional volume's existing plex. It is not possible to directly mirror a flexible volume; if that is the goal, then consider using 'volumecontainer' to find the flexible volume's containing aggregate, then use 'aggrmirror' to mirror that aggregate (which, of course, will case all other volumes contained in the given aggregate to become mirrored as well).
def volume_mirror(self, volume, mirror_disks=None, force=None, victim_volume=None): return self.request( "volume-mirror", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ], 'force': [ force, 'force', [ bool, 'None' ], False ], 'victim_volume': [ victim_volume, 'victim-volume', [ basestring, 'None' ], False ], }, { 'bad-disks': [ DiskInfo, True ], } )
[ "def set_mirror_volume_mounts(self, mirror_volume_mounts=True):\n\n self.mirror_volume_mounts = mirror_volume_mounts\n return self", "def volume_split(self, new_volume_name, plex):\n return self.request( \"volume-split\", {\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n 'plex': [ plex, 'plex', [ basestring, 'None' ], False ],\n }, {\n } )", "def virtdisk_MirrorVirtualDisk(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"VirtualDiskHandle\", \"Flags\", \"Parameters\", \"Overlapped\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def test_cinder_volume_mirrored(self):\n volume = self.setup_test_cinder_volume()\n site_a_hash = zaza.openstack.utilities.ceph.get_rbd_hash(\n zaza.model.get_lead_unit_name('ceph-mon',\n model_name=self.site_a_model),\n 'cinder-ceph',\n 'volume-{}'.format(volume.id),\n model_name=self.site_a_model)\n self.wait_for_mirror_state(\n 'up+replaying',\n check_entries_behind_master=True,\n application_name=self.application_name + self.site_b_app_suffix,\n model_name=self.site_b_model)\n logging.info('Checking the Ceph RBD hashes of the primary and '\n 'the secondary Ceph images')\n site_b_hash = zaza.openstack.utilities.ceph.get_rbd_hash(\n zaza.model.get_lead_unit_name('ceph-mon' + self.site_b_app_suffix,\n model_name=self.site_b_model),\n 'cinder-ceph',\n 'volume-{}'.format(volume.id),\n model_name=self.site_b_model)\n logging.info(site_a_hash)\n logging.info(site_b_hash)\n self.assertEqual(site_a_hash, site_b_hash)", "def make_remix(remix_data, mvp_clips, output_type):\n concatenate = (\n concatenate_videoclips if output_type == 'video' else concatenate_audioclips\n )\n segments = []\n for _, segment_data in remix_data:\n clip = mvp_clips[segment_data['clip']]\n segment = clip.subclip(segment_data['begin'], segment_data['end'])\n segments.append(segment)\n\n return concatenate(segments)", "def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)", "def rebase_all(volumes:typing.Sequence[\"StitchSrcVolume\"], z_too=False):\n x0 = volumes[0].x0\n y0 = volumes[0].y0\n z0 = volumes[0].z0\n for volume in volumes[1:]:\n x0 = min(x0, volume.x0)\n y0 = min(y0, volume.y0)\n z0 = min(z0, volume.z0)\n for volume in volumes:\n if z_too:\n volume.rebase(x0, y0, z0)\n else:\n volume.rebase(x0, y0)", "def volume_add(self, volume, disk_size_with_unit=None, mirror_disks=None, disk_size=None, force=None, disks=None, raid_group=None, disk_count=None):\n return self.request( \"volume-add\", {\n 'disk_size_with_unit': [ disk_size_with_unit, 'disk-size-with-unit', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'disk_size': [ disk_size, 'disk-size', [ int, 'None' ], False ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'disks': [ disks, 'disks', [ DiskInfo, 'None' ], True ],\n 'raid_group': [ raid_group, 'raid-group', [ basestring, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'disk_count': [ disk_count, 'disk-count', [ int, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )", "def _createVolumesMd(self, volumes):\n mdPath = self.protocol._getTmpPath('viewer_volumes.xmd')\n cleanPath(mdPath)\n md = xmipp.MetaData()\n \n for volFn in volumes:\n md.clear()\n md.setValue(xmipp.MDL_IMAGE, volFn, md.addObject())\n blockName = volFn.split(\"/\")[3]\n #print \"Volume: \", volFn, blockName\n md.write(\"%s@%s\"% (blockName, mdPath), xmipp.MD_APPEND)\n return [self.createDataView(mdPath)]", "def mirror_rpm(self, repo_name, alias=None):\n print 'Mirror creation start'\n if alias is None:\n alias = repo_name\n cmds = []\n # create clean repository\n for conf in self.DISRS['rpm']:\n name = '%s_%s%s' % (alias, conf['os'], conf['dist'])\n path = RPM_PATH % name\n src_path = SRPM_PATH % name\n cmds.append(['mkdir', '-p', '%sPackages/' % path])\n cmds.append(['mkdir', '-p', '%sPackages/' % src_path])\n # download packages and run repo index\n cmds.extend(self.manage_rpms(repo_name, alias, update=False))\n for cmd in cmds:\n self.run(cmd)\n print 'Mirror creation done'", "def add_mirror(self, v: UVec) -> None:\n n = Vec3(v).normalize()\n self.add(f\"mirror(v = [{n.x:g}, {n.y:g}, {n.z:g}])\")", "def mount(self, volume_id, client_name, mountpath, do_vssprotection=True):\n return self._snap_operation(0, volume_id, client_name, mountpath, do_vssprotection)", "def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)", "def manage_existing(self, volume, existing_ref):\n self._validate_manage_existing_vol_type(volume)\n self._validate_manage_existing_ref(existing_ref)\n\n ref_vol_name = existing_ref['name']\n current_array = self._get_current_array()\n connected_hosts = \\\n current_array.list_volume_private_connections(ref_vol_name)\n if len(connected_hosts) > 0:\n raise exception.ManageExistingInvalidReference(\n existing_ref=existing_ref,\n reason=_(\"%(driver)s manage_existing cannot manage a volume \"\n \"connected to hosts. Please disconnect this volume \"\n \"from existing hosts before importing\"\n ) % {'driver': self.__class__.__name__})\n new_vol_name = self._generate_purity_vol_name(volume)\n LOG.info(\"Renaming existing volume %(ref_name)s to %(new_name)s\",\n {\"ref_name\": ref_vol_name, \"new_name\": new_vol_name})\n self._rename_volume_object(ref_vol_name,\n new_vol_name,\n raise_not_exist=True)\n # Check if the volume_type has QoS settings and if so\n # apply them to the newly managed volume\n qos = None\n qos = self._get_qos_settings(volume.volume_type)\n if qos is not None:\n self.set_qos(current_array, new_vol_name, qos)\n else:\n current_array.set_volume(new_vol_name,\n iops_limit='',\n bandwidth_limit='')\n volume.provider_id = new_vol_name\n async_enabled = self._enable_async_replication_if_needed(current_array,\n volume)\n repl_status = fields.ReplicationStatus.DISABLED\n if async_enabled:\n repl_status = fields.ReplicationStatus.ENABLED\n return {\n 'provider_id': new_vol_name,\n 'replication_status': repl_status,\n 'metadata': {'array_volume_name': new_vol_name,\n 'array_name': current_array.array_name},\n }", "def _create_volume_from_snapshot(self, snapshot, volume):\n\n cinder_volume_id = volume['id']\n cinder_snapshot_id = snapshot['id']\n size_mb = volume['size'] * units.Ki\n result = None\n spec_dict = {}\n\n LOG.debug(\"Copying snapshot %(snap_id)s onto volume %(vol_id)s \"\n \"%(dpy_name)s\",\n {'snap_id': cinder_snapshot_id,\n 'vol_id': cinder_volume_id,\n 'dpy_name': snapshot['display_name']})\n\n source_lun_info = self.vmem_mg.lun.get_lun_info(snapshot['volume_id'])\n if source_lun_info['subType'] != 'THICK':\n msg = _('Lun copy currently only supported for thick luns')\n LOG.warn(msg)\n raise exception.ViolinBackendErr(message=msg)\n\n spec_dict = self._process_extra_specs(volume)\n selected_pool = self._get_storage_pool(\n volume, size_mb, spec_dict['pool_type'], \"create_lun\")\n\n try:\n result = self.vmem_mg.lun.copy_snapshot_to_new_lun(\n source_lun=snapshot['volume_id'],\n source_snapshot_comment=self._compress_snapshot_id(\n cinder_snapshot_id),\n destination=cinder_volume_id,\n storage_pool_id=selected_pool['storage_pool_id'])\n\n if not result['success']:\n self._check_error_code(result)\n\n except Exception:\n LOG.warn(\n _(\"Copy snapshot to volume for \"\n \"snapshot %(snap)s volume %(vol)s failed!\") %\n {'snap': cinder_snapshot_id,\n 'vol': cinder_volume_id})\n raise\n\n # get the destination lun info and extract virtualdeviceid\n info = self.vmem_mg.lun.get_lun_info(object_id=result['object_id'])\n\n self._wait_for_lun_or_snap_copy(\n snapshot['volume_id'], dest_vdev_id=info['virtualDeviceID'])\n\n if volume.get('consistencygroup_id'):\n LOG.debug('Adding volume %(v)s to consistency group %(g)s',\n {'v': cinder_volume_id,\n 'g': volume['consistencygroup_id']})\n self._ensure_snapshot_resource_area(cinder_volume_id)\n self._add_to_consistencygroup(\n volume['consistencygroup_id'], cinder_volume_id)", "def auto_mount(pvc_name='', volume_mount_path=''):\n if pvc_name and volume_mount_path:\n return mount_pvc(volume_name=pvc_name, volume_mount_path=volume_mount_path)\n if 'V3IO_ACCESS_KEY' in os.environ:\n return mount_v3io()\n if 'MLRUN_PVC_MOUNT' in os.environ:\n mount = os.environ.get('MLRUN_PVC_MOUNT')\n items = mount.split(':')\n if len(items) != 2:\n raise ValueError('MLRUN_PVC_MOUNT should include <pvc-name>:<mount-path>')\n return mount_pvc(volume_name=items[0], volume_mount_path=items[1])\n raise ValueError('failed to auto mount, need to set env vars')", "def snap(**kwargs):\n srt = kwargs.get(\"snap\", kwargs.get(\"sn\", \"rt\")).lower()\n obj = kwargs.get(\"object\", kwargs.get(\"obj\", None))\n tgt = kwargs.get(\"target\", kwargs.get(\"tgt\", None))\n pivot = kwargs.get(\"pivot\", kwargs.get(\"pv\", None))\n\n try:\n if not obj:\n obj = cmds.ls(selection=True)[0]\n if not tgt:\n tgt = cmds.ls(selection=True)[1]\n except IndexError:\n cmds.warning(\n \"Select 'source' then 'target' or use the command's flags\"\n )\n\n if \"t\" in srt:\n if pivot:\n pos = cmds.xform(\n tgt + \".rp\", query=True, translation=True, worldSpace=True\n )\n cmds.move(pos[0], pos[1], pos[2], rotatePivotRelative=True)\n else:\n pos = cmds.xform(\n tgt, query=True, translation=True, worldSpace=True\n )\n cmds.xform(obj, translation=pos, worldSpace=True)\n\n if \"r\" in srt:\n rot = cmds.xform(tgt, query=True, rotation=True, worldSpace=True)\n cmds.xform(obj, rotation=rot, worldSpace=True)\n\n if \"s\" in srt:\n scl = cmds.getAttr(tgt + \".scale\")[0]\n cmds.setAttr(obj + \".scale\", scl[0], scl[1], scl[2], type=\"double3\")", "def create_volume():\n with settings(warn_only=True):\n run(f'docker volume create {db_volume}')", "def _chopped_volume_default(self):\n grid = self.grid\n grid.trait_set(x_max=self.slicePosition[1])\n\n volume = mlab.pipeline.volume(\n grid,\n figure=self.vscene3d.mayavi_scene,\n vmin=self.dataRange[0],\n vmax=self.dataRange[1]\n )\n\n volume._otf = self.otf\n volume._volume_property.set_scalar_opacity(self.otf)\n\n return volume" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of guarantee types that are supported on this volume. This just does semantic checks and so enabling supported guarantees can still fail because of space checks.
def volume_get_supported_guarantees(self, volume): return self.request( "volume-get-supported-guarantees", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'guarantee-types': [ Guarantee, True ], } )
[ "def _get_supportedProductTypes(self) -> \"std::vector< std::string,std::allocator< std::string > >\" :\n return _core.Application__get_supportedProductTypes(self)", "def supported_types(self) -> List[BundleType]:", "def _get_io_supported_types(opset_version: target) -> Set[type]:\n supported_types = {types.fp32, types.int32}\n if opset_version is not None and opset_version >= target.iOS16:\n supported_types.add(types.fp16)\n return supported_types", "def get_grade_system_record_types(self):\n return # osid.type.TypeList", "def _get_runtime_supported_types(opset_version: target) -> Set[type]:\n supported_types = {types.fp16, types.fp32, types.int32, types.str, types.bool}\n if opset_version >= target.iOS17:\n supported_types.update({types.int8, types.uint8, types.int16, types.uint16})\n return supported_types", "def get_quantizer_metatypes() -> List[OperatorMetatype]:", "def get_quantizable_metatypes() -> List[OperatorMetatype]:", "def get_qualifier_record_types(self):\n return # osid.type.TypeList", "def get_grade_system_search_record_types(self):\n return # osid.type.TypeList", "def supported_tags(self, force_manylinux=True):\n return _get_supported(\n platform=self.platform,\n impl=self.impl,\n version=self.version,\n abi=self.abi,\n force_manylinux=force_manylinux\n )", "def list_supported_algorithms(self):\r\n return list(self._ALGORITHM_TO_VALUE_MAP.keys())", "def list_available_expectation_types(self):\n keys = dir(self)\n return [\n expectation for expectation in keys if expectation.startswith(\"expect_\")\n ]", "def valid_content_types() -> List[str]:", "def GetSupportedEngines():\r\n pass", "def valid_types(self):\n types = re.sub(r'[ ]?,[ ]?', ',', self.node.content_types).split(',')\n return [t.lower() for t in types]", "def allocated_disk_types(self) -> Sequence[str]:\n return pulumi.get(self, \"allocated_disk_types\")", "def get_all_supports(str_names_only: bool = False) -> Union[list[Support], set[str]]:\n\n if not str_names_only:\n return list(Truss.Support)\n else:\n return {s.name for s in Truss.Support}", "def supported_measure_types(self):\n return self._supported_measure_types", "def get_valid_order_types():\n response = requests.get(PublicMethods.api_url + \"/Public/GetValidOrderTypes\")\n return response" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associate a charmap description with a specified volume.
def volume_charmap_set(self, volume, charmap=None): return self.request( "volume-charmap-set", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'charmap': [ charmap, 'charmap', [ basestring, 'None' ], False ], }, { } )
[ "def volume_charmap_get(self, volume):\n return self.request( \"volume-charmap-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'charmap': [ basestring, False ],\n } )", "def set_description(self, room_description):\n self.description = room_description", "def link_spec_volume(spec_dict, volume_name, pvc_name):\n is_pvc_linked = False\n for vol in spec_dict[\"volumes\"]:\n if vol[\"name\"] == volume_name:\n vol[\"persistentVolumeClaim\"][\"claimName\"] = pvc_name\n is_pvc_linked = True\n break\n if not is_pvc_linked:\n raise NotFoundError(\"volume %s not found in given spec\")", "def createChar(self, location, charmap):\n \"\"\"location: char location 0-7, equals ascii value\"\"\"\n \"\"\"charmap: array[8] of uint, five lower bits representing a pixel\"\"\"\n location = location & 0x07 # we only have 8 locations 0-7\n self._command(LiquidCrystal_I2C._LCD_SETCGRAMADDR | (location << 3))\n for i in range(0,8):\n self._send(charmap[i], LiquidCrystal_I2C._Rs)", "def set_description(description):", "def add_volume(self, volume, offset=(0,0)):\r\n self.volumes.append(volume)\r\n self.offsets.append(offset)\r\n self._set_bbox()", "def add_location(self, name, description) -> None:\n self.game_map[name] = [ description, []]", "def volumeBind(influence=\"string\", name=\"string\"):\n pass", "def attach_volume(self, node, volume, device=None):\r\n raise NotImplementedError('attach not implemented for this driver')", "def _add_mapping(self, adapter, host_uuid, vm_uuid, vios_uuid,\n device_name):\n pv = pvm_stor.PV.bld(adapter, device_name)\n tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, vm_uuid, pv)", "def add_variation(spec, title):\n variations[spec] = title", "def AttachVolume(self,\n volume: 'ebs.AWSVolume',\n device_name: str) -> None:\n\n client = self.aws_account.ClientApi(common.EC2_SERVICE)\n try:\n client.attach_volume(Device=device_name,\n InstanceId=self.instance_id,\n VolumeId=volume.volume_id)\n except client.exceptions.ClientError as exception:\n raise RuntimeError('Could not attach volume {0:s}: {1:s}'.format(\n volume.volume_id, str(exception)))\n\n volume.device_name = device_name", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n instance_name = instance['name']\n if instance_name not in self.__mounts:\n self.__mounts[instance_name] = {}\n self.__mounts[instance_name][mountpoint] = connection_info", "def attach_volume(self, context, **kwargs):\n # TODO(lyarwood): Remove this encryptor and refactor the LUKS based\n # encryptors in the U release.\n versionutils.report_deprecated_feature(\n LOG,\n \"The plain CryptsetupEncryptor is deprecated and will be removed \"\n \"in a future release. Existing users are encouraged to retype \"\n \"any existing volumes using this encryptor to the 'luks' \"\n \"LuksEncryptor or 'luks2' Luks2Encryptor encryptors as soon as \"\n \"possible.\")\n key = self._get_key(context).get_encoded()\n passphrase = self._get_passphrase(key)\n\n self._open_volume(passphrase, **kwargs)\n\n # modify the original symbolic link to refer to the decrypted device\n self._execute('ln', '--symbolic', '--force',\n '/dev/mapper/%s' % self.dev_name, self.symlink_path,\n root_helper=self._root_helper,\n run_as_root=True, check_exit_code=True)", "def set_vol(self, vol_dict):\n\t\tsystem = self.system\n\t\tbucell_dict = system.bucell_dict\n\n\t\t# Need to loop over bucell_dict because there might be more cells than bucells\n\t\tfor i in bucell_dict:\n\t\t\tbucell = bucell_dict[i]\n\t\t\tif bucell.name in vol_dict:\n\t\t\t\tbucell.vol = vol_dict[bucell.name]\n\n\t\t# We treat total volume separately\n\t\tsystem.total_vol = vol_dict['total volume']\n\n\t\tself._volume_set = 'yes'", "def change_map(self, pos: int, char: str) -> None:\r\n map_list = list(self.map)\r\n map_list[pos] = char\r\n self.map = \"\".join(map_list)", "def create_apt_conf(self, entry, metadata):\n raise NotImplementedError", "def add_to_vol(self, volume):\n volume[self.segmentation, 0] = self.color[0]/255\n volume[self.segmentation, 1] = self.color[1]/255\n volume[self.segmentation, 2] = self.color[2]/255", "def _set_description(self, revision_range=None):\n if self.options.guess_description and not self.options.description:\n self.options.description = self.extract_description(revision_range)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DEFINED HERE FOR BACKWARDS COMPATIBILITY ONLY. CHANGE OVER TO USING THE NEW 'volumegetfilerinfo' AS SOON AS POSSIBLE. Get WAFL status information.
def volume_wafl_info(self): return self.request( "volume-wafl-info", { }, { 'root-volume': [ basestring, False ], 'disk-types': [ basestring, False ], 'snapshots-max': [ int, False ], 'checksum-types': [ basestring, False ], } )
[ "def volume_get_filer_info(self):\n return self.request( \"volume-get-filer-info\", {\n }, {\n 'disk-types': [ basestring, False ],\n 'default-raidtype': [ basestring, False ],\n 'checksum-types': [ basestring, False ],\n 'root-volume': [ basestring, False ],\n 'raidgroup-size': [ RaidgroupSizeInfo, True ],\n 'allowed-raidtypes': [ RaidtypeInfo, True ],\n 'snapshots-max': [ int, False ],\n } )", "def test_volume_info(self):\n pass", "def get_status(self):\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result", "def describe_volume_status(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "def get_status(self):\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result", "def volume_status(mnode, volname):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/status\" % volname,\n httplib.OK, None)", "def advapi32_FileEncryptionStatus(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpFileName\", \"lpStatus\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_AVs(filter='F606W',verbose=True):\n\n fieldinfo = cft.get_fieldinfo()\n\n for ff in fieldinfo.keys():\n if verbose: print ' - Getting Av in filter '+filter+' for '+fieldinfo[ff]['name']\n Av,Ebv = kbs.getAv(fieldinfo[ff]['ra'],fieldinfo[ff]['dec'],filter)\n print Av, Ebv", "def get_fru_info():\n\n status, ret_values = \\\n grk.run_key_u(\"Run IPMI Standard Command fru print -N 50\")\n\n # Manipulate the \"Device not present\" line to create a \"state\" key.\n ret_values = re.sub(\"Device not present\", \"state : Device not present\",\n ret_values)\n\n return [vf.key_value_outbuf_to_dict(x) for x in re.split(\"\\n\\n\",\n ret_values)]", "def test_azure_service_api_volume_get(self):\n pass", "def get_status(self): # {\n device = \"\"\n volume = \"\"\n artist = \"\"\n title = \"\"\n album = \"\"\n current_time = \"\"\n duration = \"\"\n paused = \"\"\n with self.lock: # {\n if self.cas: # {\n device = self.cas.get_name()\n if self.connected: # {\n muted, pre_muted_vol = self.cas.get_muted()\n # unicode speaker characters\n SPEAKER = \"\\U0001F508\"\n SPEAKER_1 = \"\\U0001F509\"\n SPEAKER_3 = \"\\U0001F50A\"\n SPEAKER_MUTE = \"\\U0001F507\"\n if muted:\n volume = SPEAKER_MUTE + \"%03d\" % int(100 * pre_muted_vol + 0.5)\n else:\n volume = SPEAKER_3 + \"%03d\" % int(100 * self.cas.get_vol() + 0.5)\n\n track_info = self.cas.get_track_info()\n if track_info is None:\n print(\"Disconnected from device:\")\n self.disconnect()\n self.cas = None\n self.connected = False\n else:\n if track_info != \"\":\n artist, title, album, current_time, duration = track_info\n # track_status = \"%s - %s (%s)\" % (artist, title, album)\n # playback_status = \"%s/%s \" % (current_time, duration)\n\n try:\n if self.cas.get_paused():\n paused = \"1\"\n else:\n paused = \"0\"\n except AttributeError:\n # think this can occur if self.cas happens to die in the midst\n pass\n # }\n # }\n\n connected = \"1\" if self.connected else \"0\"\n # }\n return connected, device, volume, artist, title, album, current_time, duration, paused", "def __parseStatInfoFromApiOutput( self, statInfo ):\n metadataDict = {'File' : False, 'Directory' : False}\n metadataDict['ModTime'] = statInfo.modtime\n metadataDict['ModTimeStr'] = statInfo.modtimestr\n metadataDict['Id'] = statInfo.id\n metadataDict['Size'] = statInfo.size\n\n statFlags = statInfo.flags\n metadataDict['Executable'] = bool( statFlags & StatInfoFlags.X_BIT_SET )\n metadataDict['Directory'] = bool( statFlags & StatInfoFlags.IS_DIR )\n metadataDict['Other'] = bool( statFlags & StatInfoFlags.OTHER )\n metadataDict['File'] = ( not metadataDict['Other'] and not metadataDict['Directory'] )\n metadataDict['Offline'] = bool( statFlags & StatInfoFlags.OFFLINE )\n metadataDict['PoscPending'] = bool( statFlags & StatInfoFlags.POSC_PENDING )\n metadataDict['Readable'] = bool( statFlags & StatInfoFlags.IS_READABLE )\n metadataDict['Writable'] = bool( statFlags & StatInfoFlags.IS_WRITABLE )\n\n return metadataDict", "def revisionfiles_info(unrestricted=False):", "def svn_fs_info(*args) -> \"SWIGTYPE **\":\n return _fs.svn_fs_info(*args)", "def lv_params(self):\n ALLLV = []\n if self.LVM_SNAP:\n vglist = self.__snap_stanza_read(self.LVM_SNAP, 'lsvg -o')\n if vglist:\n for vg in vglist:\n LVS = []\n tempfile = self.CWD + 'lvm/' + vg + '.snap'\n if access(tempfile, R_OK):\n lv_params = self.__snap_stanza_read(open(tempfile), 'lsvg -l ' + vg)\n if lv_params:\n for record in lv_params:\n LV = {}\n if not ('LV NAME' in record) and not (vg+':' in record):\n LV.update({'name' : record.split()[0]})\n LV.update({'type' : record.split()[1]})\n if record.split()[2] == record.split()[3]:\n LV.update({'copies' : '1'})\n else:\n if (int(record.split()[3])/int(record.split()[2])) == 2:\n LV.update({'copies' : '2'})\n else:\n LV.update({'copies' : 'N/A'})\n LV.update({'state' : record.split()[5]})\n LV.update({'mount' : record.split()[6]})\n LVS.append(LV)\n ALLLV.append({'volgroup' : vg , 'volumes' : LVS})\n else:\n print \"cannot access vg - \" + vg\n else:\n return None\n else:\n return None\n if self.FS_SNAP:\n df_params = self.__snap_stanza_read(self.FS_SNAP, 'df -k')\n if df_params:\n for vg in ALLLV:\n for lv in vg['volumes']:\n lv.update({'mounted' : 'No', 'used' : 'N/A', 'iused' : 'N/A'})\n for vg in ALLLV:\n for lv in vg['volumes']:\n for record in df_params:\n if '/dev/'+lv['name'] in record:\n lv.update({'mounted' : 'Yes', 'used' : record.split()[3], 'iused' : record.split()[5]})\n return ALLLV", "def get_upload_status(uploadId=None):\n pass", "def test_azure_service_api_volume_types_get(self):\n pass", "def test_vrfs_read(self):\n pass", "def getInfo(self, data):\n\n id = data.get(\"id\", None)\n if not id:\n raise Exception(\"[ERR]: id required.\")\n\n out = self.getListQuery(id)\n mediabox = out.copy()\n result = self.mediaBoxStatus(mediabox)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the given volume's language mapping.
def volume_set_language(self, volume, language_code): return self.request( "volume-set-language", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'language_code': [ language_code, 'language-code', [ basestring, 'None' ], False ], }, { } )
[ "def _set_language(self, language):\n self.m_language = language", "def update(self,language):\n\n for key, value in language.items():\n self.language[key] = value", "def change_lang(self, new_lang: str):\r\n self.lang = new_lang", "def lang(self, language):\r\n doc.lang = language", "def set_language_properties(self,iSurveyID,aSurveyLocaleData,sLanguage=None):", "def set_language(lang):\n # from django.utils.translation import activate\n # activate(lang)\n request.session['language'] = lang\n from .. import language\n language.set_language(lang)", "def lang_change():\n global lang, current_lang\n if lang == 'english':\n lang = 'polish'\n elif lang == 'polish':\n lang = 'english'\n current_lang = language[lang]\n game.caption_change(current_lang['title'])", "def setRobotLanguage(session, language):\n\n tts = session.service(\"ALTextToSpeech\")\n\n try:\n assert language in tts.getSupportedLanguages()\n tts.setLanguage(language)\n\n except AssertionError:\n if language.lower() == \"indian\":\n print language + \" is not supported by the robot, \" \\\n \"language set to English\"\n\n tts.setLanguage(\"English\")", "def set_locale(self, locale):\n self.logger.debug(\"changing browser's locale to %s\" % locale)\n self.mapper.set_locale(locale)\n self.locale = locale", "def change_language():\n\tglobal language_dict,k\n\thello.config(text=language_dict[k])\n\tk = (1 + k) % 6", "def change_en_US(self):\n self.language = 'en_US'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()", "def languageChanged(self, language = None):\r\n self.createTextDictionary()\r\n\r\n if language:\r\n self.phoneLanguage = language\r\n else:\r\n self.phoneLanguage = self.getLanguage()", "def updateLanguage(self, lang):\n # if an unsupported language is requested default to English\n if lang in appC.supLang:\n selLang = appC.supLang[lang]\n else:\n selLang = wx.LANGUAGE_DEFAULT\n\n if self.locale:\n assert sys.getrefcount(self.locale) <= 2\n del self.locale\n\n # create a locale object for this language\n self.locale = wx.Locale(selLang)\n if self.locale.IsOk():\n self.locale.AddCatalog(appC.langDomain)\n # self.act_log.AppendText(\"updated\")\n else:\n self.locale = None", "def load(self,language):\n\n self.language = language", "def set_languages(self, languages=list()):\n self._lang = languages", "def on_action_english_triggered(self):\n self.set_language('en_US')", "def change_ru_RU(self):\n self.language = 'ru_RU'\n self.save_settings_to_file()\n self.load_settings()\n self.start_setting()", "def set_app_locale(localeinfo):", "def volume_get_language(self, volume):\n return self.request( \"volume-get-language\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'language-code': [ basestring, False ],\n 'nfs-character-set': [ basestring, False ],\n 'oem-character-set': [ basestring, False ],\n 'language': [ basestring, False ],\n } )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Mount a volume on another volume (parent) with a junctionpath. This API is not supported on Infinite Volume constituents.
def volume_mount(self, volume_name, junction_path, export_policy_override=None, activate_junction=None): return self.request( "volume-mount", { 'export_policy_override': [ export_policy_override, 'export-policy-override', [ bool, 'None' ], False ], 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ], 'activate_junction': [ activate_junction, 'activate-junction', [ bool, 'None' ], False ], 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ], }, { } )
[ "def mount(device, mountpoint, *args, readonly=False, mkfs=False):\n raise NotImplementedError(\"Contribute on github.com/alej0varas/pybolator\")", "def attach_to_instance(self, volume, instance, mountpoint):\r\n return volume.attach_to_instance(instance, mountpoint)", "def attach_volume(self, instance_name, device_path, mountpoint):\n\n # Find the actual instance ref so we can see if it has a Reddwarf\n # friendly volume. i.e. a formatted filesystem with UUID attribute\n # set.\n meta = self._find_by_name(instance_name)\n instance = db.instance_get(context.get_admin_context(), meta['id'])\n if instance['volumes']:\n for vol in instance['volumes']:\n if vol['mountpoint'] == mountpoint and vol.has_key('uuid'):\n # Volume has a UUID so do all the mount magic using the\n # UUID instead of the device name.\n self._container_script_modify(instance, None, vol['uuid'],\n mountpoint, 'add')\n else:\n self._container_script_modify(instance, device_path, None,\n mountpoint, 'add')\n else:\n LOG.error('No volume in the db for this instance')\n LOG.error('Instance: %s' % (instance_name,))\n LOG.error('Device: %s' % (device_path,))\n LOG.error('Mount: %s' % (mountpoint,))\n raise exception.Error('No volume in the db for this instance')", "def attach_volume(self, context, connection_info, instance, mountpoint,\n disk_bus=None, device_type=None, encryption=None):\n instance_name = instance['name']\n if instance_name not in self.__mounts:\n self.__mounts[instance_name] = {}\n self.__mounts[instance_name][mountpoint] = connection_info", "def attach_volume(self, node, volume, device=None):\r\n raise NotImplementedError('attach not implemented for this driver')", "def attach_volume(self, context, **kwargs):\n # TODO(lyarwood): Remove this encryptor and refactor the LUKS based\n # encryptors in the U release.\n versionutils.report_deprecated_feature(\n LOG,\n \"The plain CryptsetupEncryptor is deprecated and will be removed \"\n \"in a future release. Existing users are encouraged to retype \"\n \"any existing volumes using this encryptor to the 'luks' \"\n \"LuksEncryptor or 'luks2' Luks2Encryptor encryptors as soon as \"\n \"possible.\")\n key = self._get_key(context).get_encoded()\n passphrase = self._get_passphrase(key)\n\n self._open_volume(passphrase, **kwargs)\n\n # modify the original symbolic link to refer to the decrypted device\n self._execute('ln', '--symbolic', '--force',\n '/dev/mapper/%s' % self.dev_name, self.symlink_path,\n root_helper=self._root_helper,\n run_as_root=True, check_exit_code=True)", "def mount(self, path, mount):\n self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount", "def mount(self, volume_id, client_name, mountpath, do_vssprotection=True):\n return self._snap_operation(0, volume_id, client_name, mountpath, do_vssprotection)", "def attach_to_instance(self, instance, mountpoint):\r\n instance_id = _resolve_id(instance)\r\n try:\r\n resp = self._nova_volumes.create_server_volume(instance_id,\r\n self.id, mountpoint)\r\n except Exception as e:\r\n raise exc.VolumeAttachmentFailed(\"%s\" % e)", "def auto_mount(pvc_name='', volume_mount_path=''):\n if pvc_name and volume_mount_path:\n return mount_pvc(volume_name=pvc_name, volume_mount_path=volume_mount_path)\n if 'V3IO_ACCESS_KEY' in os.environ:\n return mount_v3io()\n if 'MLRUN_PVC_MOUNT' in os.environ:\n mount = os.environ.get('MLRUN_PVC_MOUNT')\n items = mount.split(':')\n if len(items) != 2:\n raise ValueError('MLRUN_PVC_MOUNT should include <pvc-name>:<mount-path>')\n return mount_pvc(volume_name=items[0], volume_mount_path=items[1])\n raise ValueError('failed to auto mount, need to set env vars')", "def attach_volume(self, node, volume, device=None, ex_mode=None,\r\n ex_boot=False):\r\n volume_data = {}\r\n if volume is None:\r\n volume_data['type'] = 'SCRATCH'\r\n else:\r\n volume_data['type'] = 'PERSISTENT'\r\n volume_data['source'] = volume.extra['selfLink']\r\n volume_data['kind'] = 'compute#attachedDisk'\r\n volume_data['mode'] = ex_mode or 'READ_WRITE'\r\n\r\n if device:\r\n volume_data['deviceName'] = device\r\n else:\r\n volume_data['deviceName'] = volume.name\r\n\r\n volume_data['boot'] = ex_boot\r\n\r\n request = '/zones/%s/instances/%s/attachDisk' % (\r\n node.extra['zone'].name, node.name)\r\n self.connection.async_request(request, method='POST',\r\n data=volume_data)\r\n return True", "def _connect_boot_volume(self, volume, mountpoint, context, instance):\n LOG.debug('Connecting boot volume')\n instance_uuid = instance['uuid']\n volume_id = volume['id']\n\n connector = self.get_volume_connector(instance)\n connection_info = self._initialize_volume_connection(context,\n volume_id,\n connector)\n\n # Check connection_info to determine if the provided volume is\n # local to this compute node. If it is, then don't use it for\n # Solaris branded zones in order to avoid a known ZFS deadlock issue\n # when using a zpool within another zpool on the same system.\n extra_specs = self._get_flavor(instance)['extra_specs'].copy()\n brand = extra_specs.get('zonecfg:brand', ZONE_BRAND_SOLARIS)\n if brand == ZONE_BRAND_SOLARIS:\n driver_type = connection_info['driver_volume_type']\n if driver_type == 'local':\n msg = _(\"Detected 'local' zvol driver volume type \"\n \"from volume service, which should not be \"\n \"used as a boot device for 'solaris' \"\n \"branded zones.\")\n raise exception.InvalidVolume(reason=msg)\n elif driver_type == 'iscsi':\n # Check for a potential loopback iSCSI situation\n data = connection_info['data']\n target_portal = data['target_portal']\n # Strip off the port number (eg. 127.0.0.1:3260)\n host = target_portal.rsplit(':', 1)\n # Strip any enclosing '[' and ']' brackets for\n # IPv6 addresses.\n target_host = host[0].strip('[]')\n\n # Check if target_host is an IP or hostname matching the\n # connector host or IP, which would mean the provisioned\n # iSCSI LUN is on the same host as the instance.\n if target_host in [connector['ip'], connector['host']]:\n msg = _(\"iSCSI connection info from volume \"\n \"service indicates that the target is a \"\n \"local volume, which should not be used \"\n \"as a boot device for 'solaris' branded \"\n \"zones.\")\n raise exception.InvalidVolume(reason=msg)\n # Assuming that fibre_channel is non-local\n elif driver_type != 'fibre_channel':\n # Some other connection type that we don't understand\n # Let zone use some local fallback instead.\n msg = _(\"Unsupported volume driver type '%s' can not be used \"\n \"as a boot device for zones.\" % driver_type)\n raise exception.InvalidVolume(reason=msg)\n\n # Volume looks OK to use. Notify Cinder of the attachment.\n self._volume_api.attach(context, volume_id, instance_uuid, mountpoint)\n return connection_info", "def mount(path='/sd'):\n from machine import SD\n sd = SD()\n os.mount(sd, path)", "def _do_attach_oci_block_volume(sess, ocid, chap=False):\n _logger.debug('Attaching volume [%s]', ocid)\n vol = sess.get_volume(ocid)\n if vol is None:\n raise Exception('Volume [%s] not found' % ocid)\n\n if vol.is_attached():\n if vol.get_instance().get_ocid() == sess.this_instance().get_ocid():\n # attached to this instance already\n _msg = 'Volume [%s] already attached to this instance' % ocid\n else:\n _msg = 'Volume [%s] already attached to instance %s [%s]' % (ocid,\n vol.get_instance().get_ocid(),\n vol.get_instance().get_display_name())\n raise Exception(_msg)\n\n _logger.info('Attaching OCI Volume [%s] to this instance.' % ocid)\n # vol = vol.attach_to(instance_id=sess.this_instance().get_ocid(), wait=True)\n vol = vol.attach_to(instance_id=sess.this_instance().get_ocid(), use_chap=chap, wait=True)\n _logger.debug(\"Volume [%s] attached\", ocid)\n\n return vol", "def mount_device(device_name):\n device_config = settings.config['network_device'][device_name]\n print('device_mount(' + device_name, *device_config.values(), sep=', ', end=')\\n')\n topology.mount(\n device_name,\n device_config['address'],\n device_config['port'],\n device_config['username'],\n device_config['password'])", "def attach_to(child, parent, position=None):\n # this is essentially a shorthand function\n # NOTE notice the only difference in return value\n parent.add_child(child, position)\n return parent", "def getMountPoint(ob):\n container = aq_parent(aq_inner(ob))\n mps = getattr(container, '_mount_points', None)\n if mps:\n mp = mps.get(ob.getId())\n if mp is not None and (mp._p_jar is ob._p_jar or ob._p_jar is None):\n # Since the mount point and the mounted object are from\n # the same connection, the mount point must have been\n # replaced. The object is not mounted after all.\n return None\n # else the object is mounted.\n return mp\n return None", "def test_attach_elsewhere_attached_volume(self):\n api = gceblockdeviceapi_for_test(self)\n gce_fixture = self.useFixture(GCEComputeTestObjects(\n compute=api._compute,\n project=get_machine_project(),\n zone=get_machine_zone()\n ))\n\n instance_name = u\"functional-test-\" + unicode(uuid4())\n other_instance = gce_fixture.create_instance(instance_name)\n\n new_volume = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n\n attached_volume = api.attach_volume(\n new_volume.blockdevice_id,\n attach_to=other_instance.name,\n )\n\n self.assertRaises(\n AlreadyAttachedVolume,\n api.attach_volume,\n blockdevice_id=attached_volume.blockdevice_id,\n attach_to=api.compute_instance_id(),\n )", "def mountShares(self, node, sourcedir, sourceip, mountpoint, interval):\r\n log.info(\"Mounting NFS shares on %s\", node.alias)\r\n cmd = \"mount -t nfs \" + sourceip + \":\" + sourcedir + \" \" + mountpoint\r\n log.info(cmd)\r\n\n if not node.ssh.isdir(mountpoint): node.ssh.makedirs(mountpoint)\r\n\n # TRY REPEATEDLY TO MOUNT\r\n file_list = []\r\n while not file_list:\r\n log.debug(\"automount.NfsShares.mountShares cmd: %s\" % cmd)\r\n node.ssh.execute(cmd)\r\n file_list = node.ssh.ls(mountpoint)\r\n if file_list: break\r\n log.debug(\"Sleeping %s seconds\" % interval)\r\n time.sleep(float(interval))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of volumes and a breakdown of their data and metadata footprints in their parent aggregates. The term footprint is used to refer to the portion of aggregate used space that will be freed when the relevant volume is destroyed. This can exceed the size of the volume due to metadata. If no volume is specified, footprints are displayed for all online volumes on the filer. Note that if space footprint information for more than 20 volumes is desired, the volumefootprintlistinfoiter ZAPIs will be more efficient and should be used instead.
def volume_footprint_list_info(self, volume=None): return self.request( "volume-footprint-list-info", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'vol-footprint-infos': [ VolFootprintInfo, False ], } )
[ "def _display_oci_volume_list(volumes, output_mode, details, truncate):\n\n def _get_displayable_size(_, volume):\n return volume.get_size(format_str=OCI_VOLUME_SIZE_FMT.HUMAN.name)\n\n def _get_attached_instance_name(_, volume):\n global _this_instance_ocid\n if not volume.is_attached():\n return '-'\n _vol_instance_attach_to = volume.get_instance()\n if _vol_instance_attach_to.get_ocid() == _this_instance_ocid:\n return \"this instance\"\n pip = _vol_instance_attach_to.get_public_ip()\n if pip:\n return \"%s (%s)\" % (_vol_instance_attach_to.get_display_name(), _vol_instance_attach_to.get_public_ip())\n return _vol_instance_attach_to.get_display_name()\n\n def _get_comp_name(_, volume):\n \"\"\" keep track of compartment per ID as it may be expensive info to fetch \"\"\"\n _map = getattr(_get_comp_name, 'c_id_to_name', {})\n if volume.get_compartment_id() not in _map:\n _map[volume.get_compartment_id()] = volume.get_compartment().get_display_name()\n setattr(_get_comp_name, 'c_id_to_name', _map)\n return _map[volume.get_compartment_id()]\n\n if len(volumes) == 0:\n print('No other volumes found.')\n else:\n _title = 'Block volumes information'\n _columns = [['Name', 32, 'get_display_name'],\n ['Size', 6, _get_displayable_size],\n ['Attached to', 32, _get_attached_instance_name],\n ['OCID', 32, 'get_ocid']]\n if details:\n _columns.extend((['IQN', 14, 'get_iqn'],\n ['Compartment', 14, _get_comp_name],\n ['Availability domain', 19, 'get_availability_domain_name']))\n if output_mode == 'compat':\n printerKlass = get_row_printer_impl('text')\n else:\n printerKlass = get_row_printer_impl(output_mode)\n\n printer = printerKlass(title=_title, columns=_columns, text_truncate=truncate)\n printer.printHeader()\n for vol in volumes:\n printer.printRow(vol)\n printer.rowBreak()\n printer.printFooter()\n printer.finish()", "def volume_space_list_info(self, volume=None):\n return self.request( \"volume-space-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-space-infos': [ VolSpaceInfo, True ],\n } )", "def volume_get_filer_info(self):\n return self.request( \"volume-get-filer-info\", {\n }, {\n 'disk-types': [ basestring, False ],\n 'default-raidtype': [ basestring, False ],\n 'checksum-types': [ basestring, False ],\n 'root-volume': [ basestring, False ],\n 'raidgroup-size': [ RaidgroupSizeInfo, True ],\n 'allowed-raidtypes': [ RaidtypeInfo, True ],\n 'snapshots-max': [ int, False ],\n } )", "def _createVolumesMd(self, volumes):\n mdPath = self.protocol._getTmpPath('viewer_volumes.xmd')\n cleanPath(mdPath)\n md = xmipp.MetaData()\n \n for volFn in volumes:\n md.clear()\n md.setValue(xmipp.MDL_IMAGE, volFn, md.addObject())\n blockName = volFn.split(\"/\")[3]\n #print \"Volume: \", volFn, blockName\n md.write(\"%s@%s\"% (blockName, mdPath), xmipp.MD_APPEND)\n return [self.createDataView(mdPath)]", "def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )", "def describe_volumes(DryRun=None, VolumeIds=None, Filters=None, NextToken=None, MaxResults=None):\n pass", "def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )", "def cli(env, sortby):\n file_manager = SoftLayer.FileStorageManager(env.client)\n file_volumes = file_manager.list_file_volume_limit()\n\n table = formatting.KeyValueTable(DEFAULT_COLUMNS)\n table.sortby = sortby\n for volume in file_volumes:\n datacenter_name = volume['datacenterName']\n maximum_available_count = volume['maximumAvailableCount']\n provisioned_count = volume['provisionedCount']\n table.add_row([datacenter_name, maximum_available_count, provisioned_count])\n env.fout(table)", "def describe_volumes(self, xml_bytes):\n root = XML(xml_bytes)\n result = []\n for volume_data in root.find(\"volumeSet\"):\n volume_id = volume_data.findtext(\"volumeId\")\n size = int(volume_data.findtext(\"size\"))\n snapshot_id = volume_data.findtext(\"snapshotId\")\n availability_zone = volume_data.findtext(\"availabilityZone\")\n status = volume_data.findtext(\"status\")\n create_time = volume_data.findtext(\"createTime\")\n create_time = datetime.strptime(\n create_time[:19], \"%Y-%m-%dT%H:%M:%S\")\n volume = model.Volume(\n volume_id, size, status, create_time, availability_zone,\n snapshot_id)\n result.append(volume)\n for attachment_data in volume_data.find(\"attachmentSet\"):\n instance_id = attachment_data.findtext(\"instanceId\")\n device = attachment_data.findtext(\"device\")\n status = attachment_data.findtext(\"status\")\n attach_time = attachment_data.findtext(\"attachTime\")\n attach_time = datetime.strptime(\n attach_time[:19], \"%Y-%m-%dT%H:%M:%S\")\n attachment = model.Attachment(\n instance_id, device, status, attach_time)\n volume.attachments.append(attachment)\n return result", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)", "def volumes_from_entity_info_data(data):\n # Check if we have data and if it contains areas\n if data is None or 'areas' not in data:\n return {}\n\n # Loop over all areas\n volumes = {}\n for a in data['areas']:\n\n # Check if the volume has a name. Otherwise: skip\n if 'name' not in a:\n continue\n name = a['name']\n\n # Check if this is an 'OffsetVolume'\n if 'offset' in a:\n volumes[name] = OffsetVolume(a['offset'])\n continue\n\n # Check if we have a shape\n if 'shape' in a:\n\n shapes = a['shape']\n\n # Check if this is a single shape\n if len(shapes) > 1:\n print \"\\nError [volumes_from_entity_info_data]: Cannot handle compound shapes yet...\\n\"\n continue\n shape = shapes[0]\n\n # Check if this one shape is a box\n if 'box' in shape:\n box = shape['box']\n mic = box['min']\n min_corner = kdl.Vector(mic['x'], mic['y'], mic['z'])\n mac = box['max']\n max_corner = kdl.Vector(mac['x'], mac['y'], mac['z'])\n volumes[name] = BoxVolume(min_corner=min_corner, max_corner=max_corner)\n continue\n\n # If we end up here, we don't know what to do with the area\n print \"\\nError [volumes_from_entity_info_data]: don't know what to do with {}\\n\".format(a)\n\n return volumes", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def block_stats_for_volume(block_shape, volume, physical_box):\n block_grid = Grid(block_shape)\n \n block_dfs = []\n block_boxes = boxes_from_grid(physical_box, block_grid)\n for box in block_boxes:\n clipped_box = box_intersection(box, physical_box) - physical_box[0]\n block_vol = volume[box_to_slicing(*clipped_box)]\n counts = pd.Series(block_vol.reshape(-1)).value_counts(sort=False)\n segment_ids = counts.index.values\n counts = counts.values.astype(np.uint32)\n\n box = box.astype(np.int32)\n\n block_df = pd.DataFrame( { 'segment_id': segment_ids,\n 'count': counts,\n 'z': box[0][0],\n 'y': box[0][1],\n 'x': box[0][2] } )\n\n # Exclude segment 0 from output\n block_df = block_df[block_df['segment_id'] != 0]\n\n block_dfs.append(block_df)\n\n brick_df = pd.concat(block_dfs, ignore_index=True)\n brick_df = brick_df[['segment_id', 'z', 'y', 'x', 'count']]\n assert list(brick_df.columns) == list(BLOCK_STATS_DTYPES.keys())\n return brick_df", "def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device", "def volumes(self) -> List:\n if self.node is None:\n return []\n # Removing boot volume from the list\n volume_attachments = []\n for i in self.node[\"volume_attachments\"]:\n volume_detail = self.service.get_volume(i[\"volume\"][\"id\"])\n for vol in volume_detail.get_result()[\"volume_attachments\"]:\n if vol[\"type\"] == \"data\":\n volume_attachments.append(vol)\n return volume_attachments", "def get_volumes(instance):\n volumes = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n volume_tag_reference = \"%s_data\" % tag['Value']\n\n for volume in instance['volumes']:\n if volume['volume_tags'] != None:\n for volume_tag in volume['volume_tags']:\n if volume_tag['Key'] == 'Name' and volume_tag['Value'] == volume_tag_reference:\n volumes.append(volume['VolumeId'])\n snapshot_volumes = []\n for volume in instance['volumes']:\n if volume['VolumeId'] in volumes:\n vol = {\"VolumeId\": volume['VolumeId'], \"volume_tags\": volume['volume_tags']}\n snapshot_volumes.append(vol)\n return snapshot_volumes", "def heal_info_summary(mnode, volname):\n cmd = (\"gluster volume heal %s info | grep 'entries\\|Brick\\|Status'\" %\n volname)\n return g.run(mnode, cmd)", "def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set a volume's 'filestotal' value to the given quantity. This specifies the maximum number of uservisible files that the given volume can hold,
def volume_set_total_files(self, volume, requested_total_files, force=None): return self.request( "volume-set-total-files", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'force': [ force, 'force', [ bool, 'None' ], False ], 'requested_total_files': [ requested_total_files, 'requested-total-files', [ int, 'None' ], False ], }, { 'resulting-total-files': [ int, False ], } )
[ "def total_vol(self, total_vol):\n\t\tself._total_vol = total_vol", "def files_count(self, value):\n self.logger.warn(\n \"Setting values on files_count will NOT update the remote Canvas instance.\"\n )\n self._files_count = value", "def set_total_sent(buf, total):\n buf[TOTAL_SENT_COUNT_POINTER] = total", "def setfsquota(self, vfsname, fpg=None, username=None, groupname=None,\n fstore=None, scapacity=None, hcapacity=None, sfile=None,\n hfile=None, clear=False, archive=False, restore=None):", "def update_total(self):\n self.total = self.lineitems.aggregate(Sum('lineitem_total'))['lineitem_total__sum'] or 0\n self.save()", "def setNumberOfPackages(self, total):\n self.__numberOfPackages = total", "def set_total_records(self, total_recs):\n self._total_recs = total_recs\n self._calculate_total_changes()\n if not self._force:\n LOG.info('Total changes set to: %s', self.max_changes)\n else:\n LOG.info('Total changes ingored. Force flag given')", "def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )", "def add_to_total_size(path, total_size):\n\n if os.path.exists(path):\n # Get the file size\n fsize = get_local_file_size(path)\n if fsize:\n logger.info(\"size of file %s: %d B\", path, fsize)\n try:\n total_size += long(fsize) # Python 2 # noqa: F821\n except Exception:\n total_size += int(fsize) # Python 3 (note order in try statement)\n else:\n logger.warning(\"skipping file %s since it is not present\", path)\n\n return total_size", "def update_storage(self, amount):\n self.amount = amount", "def set_sfx_volume(cls, new_volume: float) -> None:\n new_volume = max(0.0, min(1.0, new_volume))\n cls.sfx_volume = new_volume", "def volume_total(self, volume_total):\n if volume_total is None:\n raise ValueError(\"Invalid value for `volume_total`, must not be `None`\") # noqa: E501\n\n self._volume_total = volume_total", "def set_n_files(self, n_files):\n self._n_files = n_files", "def cli(env, sortby):\n file_manager = SoftLayer.FileStorageManager(env.client)\n file_volumes = file_manager.list_file_volume_limit()\n\n table = formatting.KeyValueTable(DEFAULT_COLUMNS)\n table.sortby = sortby\n for volume in file_volumes:\n datacenter_name = volume['datacenterName']\n maximum_available_count = volume['maximumAvailableCount']\n provisioned_count = volume['provisionedCount']\n table.add_row([datacenter_name, maximum_available_count, provisioned_count])\n env.fout(table)", "def update_count(self, user_hash, total_items):\n conn = sqlite3.connect('{}/users.db'.format(DATA_PATH))\n c = conn.cursor()\n c.execute('UPDATE users SET total_items = ?', (total_items,))\n conn.commit()\n conn.close()", "def set_total_credits_used(self, total_credits_used):\n self.total_credits_used = total_credits_used", "def calc_number_servers_file_size(self):\n if self.total_size < 20:\n servers = self.total_size\n file_size = 1\n else:\n file_size = int(self.total_size / 20)\n servers = 21\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"filesize\"] = f\"{file_size}GiB\"\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\n \"storagesize\"\n ] = f\"{int(file_size + 2)}Gi\"\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"servers\"] = servers", "def VolumeExtend(new_size,\n gib,\n#pylint: disable=unused-argument\n volume_names,\n volume_ids,\n volume_prefix,\n volume_regex,\n volume_count,\n source_account,\n source_account_id,\n test,\n mvip,\n username,\n password):\n#pylint: enable=unused-argument\n options = copy.deepcopy(locals())\n options.pop(\"new_size\", None)\n options.pop(\"gib\", None)\n\n if gib:\n multiplier = 1024 * 1024 * 1024\n else:\n multiplier = 1000 * 1000 * 1000\n\n new_size = new_size * multiplier\n post_value = new_size\n if new_size % 4096 != 0:\n post_value = int((new_size // 4096 + 1) * 4096)\n\n return VolumeModify(property_name=\"totalSize\",\n property_value=new_size,\n post_value=post_value,\n **options)", "def number_of_files(self) -> int:\n return pulumi.get(self, \"number_of_files\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Starts an iteration through the list of volumes.
def volume_list_info_iter_start(self, volume=None, verbose=None): return self.request( "volume-list-info-iter-start", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'verbose': [ verbose, 'verbose', [ bool, 'None' ], False ], }, { 'records': [ int, False ], 'tag': [ basestring, False ], } )
[ "def _iter_volumes(self):\n if self.volumes:\n for volume_name, container_path in self.volumes.iteritems():\n if \"/\" in volume_name:\n # if a / is found in the name, assume it's a full path specified on the host\n host_path = volume_name\n else:\n host_path = \"%s/volumes/%s/%s\" % (self.project.home_path, self.name, volume_name)\n yield (host_path, container_path)", "def enable(self):\n for volume in self.volumes:\n try:\n self._renderer.AddVolume(volume)\n except:\n pass # TBD: any error logging.", "def iterate_volumes(self, node=None, ex_datacenter=None):\n if node is not None:\n if ex_datacenter:\n raise ValueError(\n \"Cannot list the volumes for the datacenter and the \"\n \"virtual machine at the same time\")\n virtual_machine = self.ex_get_vm(node)\n else:\n virtual_machine = None\n\n if ex_datacenter is not None:\n ex_datacenter = self._get_datacenter_by_id(ex_datacenter)\n\n # querying the creation timestamps of node(s) and volumes\n node_creation_times = self._query_node_creation_times(\n virtual_machine=virtual_machine)\n volume_creation_times = self._query_volume_creation_times(\n virtual_machine=virtual_machine)\n\n shared_files = collections.defaultdict(list)\n\n def result_to_volumes(files_info, allow_shared=False):\n \"\"\"\n :type disks_page: tp.Union[tp.List[_FileInfo], tp.List[_VMDiskInfo]]\n :rtype: tp.List[StorageVolume]\n \"\"\"\n if files_info and isinstance(files_info[0], _VMDiskInfo):\n files_info = (disk.file_info for disk in files_info)\n\n volumes = []\n for file_info in files_info:\n\n if not allow_shared and any(\n d.sharing\n for d in file_info.devices):\n shared_files[file_info.path].append(file_info)\n continue\n\n try:\n volume = self._to_volume(file_info)\n except LibcloudError as err:\n # one broken volume should not break the whole iteration\n LOG.warning(str(err))\n continue\n\n created_at = volume_creation_times.get(volume.id)\n for device in file_info.devices:\n if created_at:\n break\n if device.is_root:\n created_at = node_creation_times.get(device.owner_id)\n volume.extra['created_at'] = created_at\n\n volumes.append(volume)\n return volumes\n\n for item in self._query_vm_virtual_disks(\n virtual_machine=virtual_machine,\n datacenter=ex_datacenter,\n process_fn=result_to_volumes):\n yield item\n\n # collect and yield the shared volumes at the end of iteration\n merged_shared_files = []\n for files_info in shared_files.values():\n files_info[0].devices = list({\n device for file_info in files_info\n for device in file_info.devices})\n merged_shared_files.append(files_info[0])\n for item in result_to_volumes(merged_shared_files, allow_shared=True):\n yield item", "def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device", "def test_01_list_volumes(self):\n list_volume_response = Volume.list(\n self.apiclient,\n ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id],\n type='ROOT',\n listAll=True\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"List Volume response was not a valid list\"\n )\n self.assertEqual(\n len(list_volume_response),\n 3,\n \"ListVolumes response expected 3 Volumes, received %s\" % len(list_volume_response)\n )", "def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list", "def volume_list_paged(request, search_opts=None, marker=None, paginate=False,\n sort_dir=\"desc\"):\n has_more_data = False\n has_prev_data = False\n volumes = []\n\n # To support filtering with group_id, we need to use the microversion.\n c_client = _cinderclient_with_generic_groups(request)\n if c_client is None:\n return volumes, has_more_data, has_prev_data\n\n # build a dictionary of volume_id -> transfer\n transfers = {t.volume_id: t\n for t in transfer_list(request, search_opts=search_opts)}\n\n if paginate:\n page_size = utils.get_page_size(request)\n # sort_key and sort_dir deprecated in kilo, use sort\n # if pagination is true, we use a single sort parameter\n # by default, it is \"created_at\"\n sort = 'created_at:' + sort_dir\n for v in c_client.volumes.list(search_opts=search_opts,\n limit=page_size + 1,\n marker=marker,\n sort=sort):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n volumes, has_more_data, has_prev_data = update_pagination(\n volumes, page_size, marker, sort_dir)\n else:\n for v in c_client.volumes.list(search_opts=search_opts):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n\n return volumes, has_more_data, has_prev_data", "def iterate_containers(self):\r\n\r\n for container_name in os.listdir(self.base_path):\r\n full_path = os.path.join(self.base_path, container_name)\r\n if not os.path.isdir(full_path):\r\n continue\r\n yield self._make_container(container_name)", "def all_volumes(self):\n _logger.debug('%s', where_am_i())\n volumes = []\n for compartment in self.all_compartments():\n comp_volumes = compartment.all_volumes()\n if comp_volumes is not None:\n volumes += comp_volumes\n return volumes", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)", "def list_volumes( fields ):\n global conf\n\n volume_names = VOLUME_NAMES( conf )\n ret = []\n \n for name in volume_names:\n vol_conf = read_volume( name, fields )\n vol_conf['NAME'] = name\n ret.append( vol_conf )\n\n return ret", "def test_list_volumes_walks_pages(self):\n api = gceblockdeviceapi_for_test(self)\n self.patch(api, '_page_size', 1)\n\n volume_1 = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n volume_2 = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n\n blockdevice_ids = [v.blockdevice_id for v in api.list_volumes()]\n self.assertThat(\n blockdevice_ids,\n MatchesAll(Contains(volume_1.blockdevice_id),\n Contains(volume_2.blockdevice_id))\n )\n\n api.destroy_volume(volume_2.blockdevice_id)\n blockdevice_ids = [v.blockdevice_id for v in api.list_volumes()]\n self.assertThat(\n blockdevice_ids,\n MatchesAll(Contains(volume_1.blockdevice_id),\n Not(Contains(volume_2.blockdevice_id)))\n )", "def list(connection):\n volumes = get_watched_volumes(connection)\n\n if not volumes:\n logger.info('No watched volumes found')\n return\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n logger.info(\n '| {volume:<21} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume='Volume ID',\n volume_name='Volume name',\n interval='Interval',\n retention='Retention'))\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')\n\n for volume in volumes:\n if 'AutomatedEBSSnapshots' not in volume.tags:\n interval = 'Interval tag not found'\n elif volume.tags['AutomatedEBSSnapshots'] not in VALID_INTERVALS:\n interval = 'Invalid interval'\n else:\n interval = volume.tags['AutomatedEBSSnapshots']\n\n if 'AutomatedEBSSnapshotsRetention' not in volume.tags:\n retention = 0\n else:\n retention = volume.tags['AutomatedEBSSnapshotsRetention']\n\n # Get the volume name\n try:\n volume_name = volume.tags['Name']\n except KeyError:\n volume_name = ''\n\n logger.info(\n '| {volume_id:<14} '\n '| {volume_name:<20.20} '\n '| {interval:<12} '\n '| {retention:<10} |'.format(\n volume_id=volume.id,\n volume_name=volume_name,\n interval=interval,\n retention=retention))\n\n logger.info(\n '+-----------------------'\n '+----------------------'\n '+--------------'\n '+------------+')", "def volume_list(mnode):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes\", httplib.OK, None)", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def volumes_prepare(volumes:list,instances:list,projects:dict) -> list:\n v2 = []\n \n for volume in volumes:\n \n volume_dict = volume.to_dict()\n \n volume_dict[\"project_name\"] = projects[volume_dict[\"os-vol-tenant-attr:tenant_id\"]]\n \n if volume_dict[\"name\"] == \"None\" or volume_dict[\"name\"] == None:\n volume_dict[\"name\"] = \"\"\n\n if volume_dict[\"name\"] != \"\": #replace space to _ so its usable in the volume name, if it has volume name\n volume_dict[\"name\"] = str(volume_dict[\"name\"]).replace(\" \",\"_\") \n\n #check if volume is attached to an instance and act accordingly\n if volume_dict[\"attachments\"] != [] :\n volume_dict[\"server_id\"] = volume_dict[\"attachments\"][0][\"server_id\"]\n volume_dict[\"server_name\"] = get_server_name(volume_dict[\"attachments\"][0][\"server_id\"],instances)\n volume_dict[\"mountpoint\"] = volume_dict[\"attachments\"][0][\"device\"].split('/')[-1]\n if volume_dict[\"mountpoint\"] == \"vda\":\n volume_dict[\"mountpoint\"] = \"root\"\n else:\n volume_dict[\"server_id\"] = \"not attached\"\n volume_dict[\"server_name\"] = \"\"\n volume_dict[\"mountpoint\"] = \"\"\n \n volume_dict[\"volume_migration_name\"] = volume_dict[\"id\"]+\"-\"+volume_dict[\"name\"]+\"-\"+volume_dict[\"server_name\"]+\"-\"+volume_dict[\"mountpoint\"]\n v2.append(volume_dict)\n \n v2 = filter_volumes(v2)\n return v2", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def volumes(self) -> list[EyeVolume]:\n volumes = []\n for s in self.series:\n try:\n volumes.append(s.get_volume())\n except Exception as e:\n logger.debug(''.join(traceback.format_exception(e)))\n return volumes", "def volume_list_info_iter_next(self, tag, maximum):\n return self.request( \"volume-list-info-iter-next\", {\n 'tag': tag,\n 'maximum': [ maximum, 'maximum', [ int, 'None' ], False ],\n }, {\n 'records': [ int, False ],\n 'volumes': [ VolumeInfo, True ],\n } )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds disks to the given traditional volume. Specify the disks to add in the same way as for 'volumecreate'. Disks cannot be added to a mirrored traditional volume if one of its plexes is offline. Addition of the specified disk(s) may not have completed by the time the API returns. Use 'volumelistinfo' to query the traditional volume's status, and thus determine when the disk addition is complete. It is not possible to add disks directly to a flexible volume; if that is the goal, then consider using 'volumecontainer' to find the flexible volume's containing aggregate, then use 'aggradd' to add the desired disks there (which, of course, will make their storage available to all flexible volumes contained in that same aggregate).
def volume_add(self, volume, disk_size_with_unit=None, mirror_disks=None, disk_size=None, force=None, disks=None, raid_group=None, disk_count=None): return self.request( "volume-add", { 'disk_size_with_unit': [ disk_size_with_unit, 'disk-size-with-unit', [ basestring, 'None' ], False ], 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ], 'disk_size': [ disk_size, 'disk-size', [ int, 'None' ], False ], 'force': [ force, 'force', [ bool, 'None' ], False ], 'disks': [ disks, 'disks', [ DiskInfo, 'None' ], True ], 'raid_group': [ raid_group, 'raid-group', [ basestring, 'None' ], False ], 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'disk_count': [ disk_count, 'disk-count', [ int, 'None' ], False ], }, { 'bad-disks': [ DiskInfo, True ], } )
[ "def add(self, *params):\n if not params or len(params)==0:\n raise TypeError(\"add takes at lease 1 argument 0 given.\")\n elif params and len(params)>2:\n raise TypeError(\"add takes at lease 1 argument %u given.\" %(len(params)))\n disk=params[0]\n return self._add(\"vdisk\", disk.getAttribute(\"name\"), disk.getProperties())", "def add_volumes(self, volumes_to_add):\n volumes_to_add = generate_volumes_owned(volumes_to_add)\n vol_arr_to_add = [int(x) for x in\n volumes_to_add.split(\",\")]\n self.vol_arr = [x | y for x, y in\n zip(vol_arr_to_add, self.vol_arr)]\n\n # update related fields\n self.next_volume = self.calculate_next_volume()\n self.volumes_owned_readable = \"\"\n self.volumes_owned = generate_volumes_owned(\n self.get_volumes_owned())", "def add_disk_to_vm_on_iscsi(request, storage):\n self = request.node.cls\n\n vm_disk_2 = storage_helpers.create_unique_object_name(\n self.__name__, config.OBJECT_TYPE_DISK\n )\n testflow.setup(\n \"Adding disk %s to VM %s on storage domain %s\", vm_disk_2,\n self.vm_name, self.storage_domains[config.ISCSI]\n )\n helpers.add_disk_to_sd(\n vm_disk_2, self.storage_domains[config.ISCSI],\n attach_to_vm=self.vm_name\n )", "def _add_to_consistencygroup(self, group, add_volumes):\n LOG.debug(_(\"Adding %(vols)s to consistencygroup %(group)s\") %\n {'vols': add_volumes, 'group': group})\n\n if not add_volumes:\n add_volumes = []\n elif hasattr(add_volumes, 'isdigit'):\n add_volumes = [add_volumes, ]\n\n for volume in add_volumes:\n self._ensure_snapshot_resource_area(volume)\n\n ans = self.vmem_mg.snapshot.add_luns_to_snapgroup(group, add_volumes)\n\n if not ans['success']:\n msg = (_(\"Failed to add volumes %(vols)s to \" +\n \"consistencygroup %(group)s: %(msg)s\") %\n {'vols': add_volumes, 'group': group, 'msg': ans['msg']})\n raise exception.ViolinBackendErr(message=msg)", "def addVolume(shellTags, tag=-1):\n api_shellTags_, api_shellTags_n_ = _ivectorint(shellTags)\n ierr = c_int()\n api__result__ = lib.gmshModelGeoAddVolume(\n api_shellTags_, api_shellTags_n_,\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelGeoAddVolume returned non-zero error code: \",\n ierr.value)\n return api__result__", "def add_volume(self, volume):\n self.volumes.append(volume)\n return self", "def addVolume(shellTags, tag=-1):\n api_shellTags_, api_shellTags_n_ = _ivectorint(shellTags)\n ierr = c_int()\n api__result__ = lib.gmshModelOccAddVolume(\n api_shellTags_, api_shellTags_n_,\n c_int(tag),\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelOccAddVolume returned non-zero error code: \",\n ierr.value)\n return api__result__", "def add_floating_disks(request, storage):\n\n self = request.node.cls\n\n testflow.setup(\n \"Adding 1 shareable disk and 1 non-shareable disk to VM %s\",\n self.vm_name\n )\n self.disk_aliases = []\n\n for disk_interface in self.interfaces:\n for shareable in (True, False):\n disk_params = config.disk_args.copy()\n disk_params['interface'] = disk_interface\n disk_params['shareable'] = shareable\n if shareable:\n disk_params['format'] = config.DISK_FORMAT_RAW\n disk_params['sparse'] = False\n disk_params['storagedomain'] = self.storage_domain\n disk_params['alias'] = (\n storage_helpers.create_unique_object_name(\n self.__class__.__name__, config.OBJECT_TYPE_DISK\n )\n )\n assert ll_disks.addDisk(True, **disk_params), (\n \"Can't create disk with params: %s\" % disk_params\n )\n logger.info(\n \"Waiting for disk %s to be OK\", disk_params['alias']\n )\n assert ll_disks.wait_for_disks_status(disk_params['alias']), (\n \"Disk '%s' has not reached state 'OK'\" % disk_params['alias']\n )\n self.disk_aliases.append(disk_params['alias'])\n # initialize for delete_disks fixture\n self.disks_to_remove = self.disk_aliases", "def attach_disks(vm_id, disk_query, show=None, headers='yes', ovirt=None):\n vm = ovirt.vms.get(id=vm_id)\n if vm is None:\n abort(\"VM with specified ID '{0}' not found\".format(vm_id))\n disks = ovirt.disks.list(query=disk_query)\n for disk in disks:\n vm.disks.add(disk)\n oVirtObjectType.all_types['disk'].print_table(\n disks, show=show, headers=headers\n )\n return disks", "def add_mdisks(self, userid, disk_list, start_vdev=None):\n\n # Firstly, check disk_pool in disk_list, if disk_pool not specified\n # and not configured(the default vaule is None), report error\n # report error\n for idx, disk in enumerate(disk_list):\n disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool\n disk['disk_pool'] = disk_pool\n if disk_pool is None:\n msg = ('disk_pool not configured for sdkserver.')\n LOG.error(msg)\n raise exception.SDKGuestOperationError(rs=2, msg=msg)\n\n for idx, disk in enumerate(disk_list):\n if 'vdev' in disk:\n # this means user want to create their own device number\n vdev = disk['vdev']\n else:\n vdev = self.generate_disk_vdev(start_vdev=start_vdev,\n offset=idx)\n self._add_mdisk(userid, disk, vdev)\n disk['vdev'] = vdev\n\n sizeUpper = disk.get('size').strip().upper()\n sizeUnit = sizeUpper[-1]\n if sizeUnit != 'G' and sizeUnit != 'M':\n sizeValue = sizeUpper\n disk_pool = disk.get('disk_pool')\n [diskpool_type, diskpool_name] = disk_pool.split(':')\n if (diskpool_type.upper() == 'ECKD'):\n # Convert the cylinders to bytes\n convert = 737280\n else:\n # Convert the blocks to bytes\n convert = 512\n byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)\n unit = \"M\"\n if (byteSize > 1024):\n byteSize = float(byteSize / 1024)\n unit = \"G\"\n byteSize = \"%.1f\" % byteSize\n disk['size'] = byteSize + unit\n\n return disk_list", "def create_disk(image_info, disk_id, sd_name, disks_service):\n initial_size = image_info['actual-size']\n provisioned_size = image_info['virtual-size']\n image_id = os.path.basename(image_info['filename'])\n\n disk = disks_service.add(\n types.Disk(\n id=disk_id,\n image_id=image_id,\n name=disk_id,\n format=types.DiskFormat.RAW,\n provisioned_size=provisioned_size,\n initial_size=initial_size,\n storage_domains=[\n types.StorageDomain(\n name=sd_name\n )\n ]\n )\n )\n disk_service = disks_service.disk_service(disk.id)\n while True:\n time.sleep(5)\n disk = disk_service.get()\n if disk.status == types.DiskStatus.OK:\n break\n\n return disk", "def test_volumes_add(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 8)\n svc.volumes.append(sm.Volume(\"foo\", \"bar\"))\n svc.volumes.append(sm.Volume(\"bar\", \"baz\"))\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n if not \"foo\" in [v.owner for v in svc.volumes]:\n raise ValueError(\"Failed to alter volumes.\")\n if not \"bar\" in [v.owner for v in svc.volumes]:\n raise ValueError(\"Failed to alter volumes.\")\n for v in svc.volumes:\n if v.owner == \"foo\":\n self.assertEqual(v.permission, \"bar\")\n if v.owner == \"bar\":\n self.assertEqual(v.permission, \"baz\")\n self.assertEqual(len(svc.volumes), 10)", "def _multi_create_disk(self, status, node_attrs):\r\n disk = None\r\n # Check for existing disk\r\n if node_attrs['use_existing_disk']:\r\n try:\r\n disk = self.ex_get_volume(status['name'],\r\n node_attrs['location'])\r\n except ResourceNotFoundError:\r\n pass\r\n\r\n if disk:\r\n status['disk'] = disk\r\n else:\r\n # Create disk and return response object back in the status dict.\r\n # Or, if there is an error, mark as failed.\r\n disk_req, disk_data, disk_params = self._create_vol_req(\r\n None, status['name'], location=node_attrs['location'],\r\n image=node_attrs['image'])\r\n try:\r\n disk_res = self.connection.request(\r\n disk_req, method='POST', data=disk_data,\r\n params=disk_params).object\r\n except GoogleBaseError:\r\n e = self._catch_error(\r\n ignore_errors=node_attrs['ignore_errors'])\r\n error = e.value\r\n code = e.code\r\n disk_res = None\r\n status['disk'] = GCEFailedDisk(status['name'],\r\n error, code)\r\n status['disk_response'] = disk_res", "def test_add_vdisk(self):\n vdisks = self.dwrap.virtual_disks\n\n self.assertEqual(1, len(vdisks))\n encryptor = stor._LUKSEncryptor.bld(None,\n cipher='aes-cbc-essiv:sha256',\n key_size=512, hash_spec='sha256')\n\n disk = stor.VDisk.bld(\n None, 'disk_name', 10.9876543, label='label', base_image='cache',\n file_format=stor.FileFormatType.RAW)\n disk._encryption_agent = encryptor\n disk._encryption_key = 'password'\n disk._encryption_state = 'Unlocked'\n self.assertIsNotNone(disk)\n\n vdisks.append(disk)\n self.dwrap.virtual_disks = vdisks\n\n self.assertEqual(2, len(self.dwrap.virtual_disks))\n\n # make sure the second virt disk matches what we put in\n vdisk = self.dwrap.virtual_disks[1]\n self.assertEqual('disk_name', vdisk.name)\n self.assertEqual(10.987654, vdisk.capacity)\n self.assertEqual('label', vdisk.label)\n self.assertEqual(None, vdisk.udid)\n self.assertEqual('cache', vdisk._get_val_str(stor._DISK_BASE))\n self.assertEqual(stor.FileFormatType.RAW, vdisk.file_format)\n self.assertEqual(stor.VDiskType.LV, vdisk.vdtype)\n self.assertEqual('password', vdisk._encryption_key)\n self.assertEqual('Unlocked', vdisk._encryption_state)\n self.assertIsInstance(vdisk._encryption_agent, stor._LUKSEncryptor)\n self.assertEqual('aes-cbc-essiv:sha256',\n vdisk._encryption_agent.cipher)\n self.assertEqual(512, vdisk._encryption_agent.key_size)\n self.assertEqual('sha256', vdisk._encryption_agent.hash_spec)\n\n # Try a remove\n self.dwrap.virtual_disks.remove(vdisk)\n self.assertEqual(1, len(self.dwrap.virtual_disks))", "def addPhysicalVolume(self, pv):\n self.pvs[pv.getAttribute(\"name\")] = pv\n self.getElement().appendChild(pv.getElement())\n pv.parentvg=self", "def spanDisks(self, files, disks=[]):\n\t\t\n\t\t\n\t\t# Set the roll size to 0 to bypass the disk spanning\n\t\t# logic. The updates Roll does this.\n\t\t\n\t\tavail = self.config.getISOMaxSize()\n\t\tif avail <= 0:\n\t\t\tinfinite = 1\n\t\telse:\n\t\t\tinfinite = 0\n\t\tconsumed = []\n\t\tremaining = []\n\t\t\n\t\t# Fill the CDs, note that we start with an order of RPMS before\n\t\t# SRPMS but this will not be preserved. A large RPM could\n\t\t# be bumped from the CD and SRMPS backfilled in its place.\n\t\t\n\t\tfor file in files:\n\t\t\tif file and infinite:\n\t\t\t\tconsumed.append(file)\n\t\t\telif file and (avail - file.getSize()) > 0:\n\t\t\t\tconsumed.append(file)\n\t\t\t\tavail -= file.getSize()\n\t\t\telse:\n\t\t\t\tremaining.append(file)\n\t\t\n\t\tid\t= len(disks) + 1\n\t\tname\t= 'disk%d' % id\n\t\tsize\t= self.config.getISOMaxSize() - avail\n\t\tdisks.append((name, id, size, consumed))\n\t\tif len(remaining):\n\t\t\tself.spanDisks(remaining, disks)\n\t\treturn disks", "def attach_hdd(self, name, size):\n if not size or size < VM_MIN_HDD or size > VM_MAX_HDD:\n raise VmCLIException('Hdd size must be between {}-{}'.format(VM_MIN_HDD, VM_MAX_HDD))\n\n vm = self.get_vm_obj(name, fail_missing=True)\n\n disks = []\n controller = None\n # iterate over existing devices and try to find disks and controllerKey\n self.logger.info('Searching for already existing disks and SCSI controllers...')\n for device in vm.config.hardware.device:\n # search for existing SCSI controller or create one if none found\n # TODO: provide flag when to create new controller\n if isinstance(device, vim.vm.device.VirtualSCSIController) and not controller:\n controller = device\n elif isinstance(device, vim.vm.device.VirtualDisk):\n disks.append(device)\n\n disk_unit_number = 0\n controller_unit_number = 7\n scsispec = None\n # if controller exists, calculate next unit number for disks otherwise create new controller and use defaults\n if controller:\n self.logger.info('Using existing SCSI controller(id:{}) to attach disk'.format(controller.key))\n controller_unit_number = int(controller.key)\n for disk in disks:\n if disk.controllerKey == controller.key and disk_unit_number <= int(device.unitNumber):\n disk_unit_number = int(device.unitNumber) + 1\n else:\n self.logger.info('No existing SCSI controller found. Creating new one...')\n scsispec = vim.vm.device.VirtualDeviceSpec()\n scsispec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n scsispec.device = vim.vm.device.ParaVirtualSCSIController(deviceInfo=vim.Description())\n scsispec.device.slotInfo = vim.vm.device.VirtualDevice.PciBusSlotInfo()\n # if there is no controller on the device present, assign it default values\n scsispec.device.controllerKey = 100\n scsispec.device.unitNumber = 3\n scsispec.device.busNumber = 0\n scsispec.device.hotAddRemove = True\n scsispec.device.sharedBus = 'noSharing'\n scsispec.device.scsiCtlrUnitNumber = controller_unit_number\n controller = scsispec.device\n controller.key = 100\n\n if disk_unit_number >= 16:\n raise VmCLIException('The SCSI controller does not support any more disks!')\n elif disk_unit_number == 7:\n disk_unit_number =+ 1 # 7 is reserved for SCSI controller itself\n\n self.logger.info('Creating new empty disk with size {}G'.format(size))\n diskspec = vim.vm.device.VirtualDeviceSpec()\n diskspec.fileOperation = \"create\"\n diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add\n diskspec.device = vim.vm.device.VirtualDisk()\n diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()\n diskspec.device.backing.diskMode = 'persistent'\n diskspec.device.backing.thinProvisioned = True\n diskspec.device.unitNumber = disk_unit_number\n diskspec.device.capacityInBytes = size * 1024 * 1024 * 1024\n diskspec.device.capacityInKB = size * 1024 * 1024\n diskspec.device.controllerKey = controller.key\n\n if scsispec:\n dev_change = [scsispec, diskspec]\n else:\n dev_change = [diskspec]\n\n config_spec = vim.vm.ConfigSpec(deviceChange=dev_change)\n self.logger.info('Attaching device to the virtual machine...')\n task = vm.ReconfigVM_Task(config_spec)\n self.wait_for_tasks([task])", "def _mount_volumes(volumes):\n\n user_data_script_section = ''\n\n for volume in volumes:\n device = volume.device\n vol_type = volume.vol_type\n directory = volume.mount\n\n user_data_script_section += f\"\"\"\nmkfs -t {vol_type} {device}\nls {directory} || mkdir {directory}\nmount {device} {directory}\n\"\"\"\n\n return user_data_script_section", "def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transition a volume between 7Mode and ClusterMode. Currently the only supported operation type is transitioning a 7Mode Flexible Volume that has been copied from a 7Mode Filer for the purpose of transitioning it to a ClusterMode Flexible Volume. A jobid will be returned that can be used to query to progress of the transition job.
def volume_transition(self, source_node, volumes, affinity_node=None, operation_type=None, override_warnings=None, destination_vserver_name=None, non_disruptive=None): return self.request( "volume-transition", { 'affinity_node': [ affinity_node, 'affinity-node', [ basestring, 'None' ], False ], 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ], 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ], 'override_warnings': [ override_warnings, 'override-warnings', [ bool, 'None' ], False ], 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ], 'volumes': [ volumes, 'volumes', [ VolumeTransitionVolinfo, 'None' ], True ], 'non_disruptive': [ non_disruptive, 'non-disruptive', [ bool, 'None' ], False ], }, { 'job-id': [ int, False ], } )
[ "def modify_cluster(ClusterId=None, StepConcurrencyLevel=None):\n pass", "def intTransition(self):\n \n state = self.state.get()\n\n if state == \"idle\":\n return PolicemanMode(\"working\")\n elif state == \"working\":\n return PolicemanMode(\"idle\")\n else:\n raise DEVSException(\\\n \"unknown state <%s> in Policeman internal transition function\"\\\n % state)", "def _SwitchStorageMode(self):\n operation = 'Switch ATFA device to Storage Mode'\n self._SendOperationStartEvent(operation)\n self.PauseRefresh()\n\n try:\n self.atft_manager.SwitchATFAStorage()\n except DeviceNotFoundException as e:\n e.SetMsg('No Available ATFA!')\n self._HandleException('W', e, operation)\n return\n except FastbootFailure as e:\n self._HandleException('E', e, operation, self.atft_manager.atfa_dev)\n return\n finally:\n self.ResumeRefresh()\n\n self._SendOperationSucceedEvent(operation)", "def current_operation(self):\n return EVO_STATE_TO_HA.get(self._status['systemModeStatus']['mode'])", "def UpdateClusterParamsFromFlags(self, cluster, job_name):\n cluster.mode = FLAGS.mode\n cluster.job = job_name\n cluster.task = FLAGS.task\n\n cluster.controller.name = FLAGS.controller_job\n cluster.controller.gpus_per_replica = FLAGS.controller_gpus\n\n cluster.worker.name = FLAGS.worker_job\n cluster.worker.replicas = FLAGS.worker_replicas\n cluster.worker.gpus_per_replica = FLAGS.worker_gpus\n cluster.worker.tpus_per_replica = FLAGS.worker_tpus\n cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts\n cluster.worker.devices_per_split = FLAGS.worker_split_size\n\n cluster.ps.name = FLAGS.ps_job\n cluster.ps.replicas = FLAGS.ps_replicas\n cluster.ps.gpus_per_replica = FLAGS.ps_gpus\n\n cluster.input.name = FLAGS.input_job\n cluster.input.replicas = FLAGS.input_replicas\n cluster.input.targets = FLAGS.input_targets\n\n cluster.evaler.name = FLAGS.evaler_job\n cluster.evaler.replicas = FLAGS.evaler_replicas\n cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus\n\n cluster.decoder.name = FLAGS.decoder_job\n cluster.decoder.replicas = FLAGS.decoder_replicas\n cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus", "async def test_shark_operation_modes(hass: HomeAssistant) -> None:\n ayla_api = MockAyla()\n shark_vac = _get_mock_shark_vac(ayla_api)\n coordinator = SharkIqUpdateCoordinator(hass, None, ayla_api, [shark_vac])\n shark = SharkVacuumEntity(shark_vac, coordinator)\n\n # These come from the setup\n assert isinstance(shark.is_docked, bool) and not shark.is_docked\n assert (\n isinstance(shark.recharging_to_resume, bool) and not shark.recharging_to_resume\n )\n # Go through the operation modes while it's \"off the dock\"\n await shark.async_start()\n assert shark.operating_mode == shark.state == STATE_CLEANING\n await shark.async_pause()\n assert shark.operating_mode == shark.state == STATE_PAUSED\n await shark.async_stop()\n assert shark.operating_mode == shark.state == STATE_IDLE\n await shark.async_return_to_base()\n assert shark.operating_mode == shark.state == STATE_RETURNING\n\n # Test the docked modes\n await shark.async_stop()\n shark.sharkiq.set_property_value(Properties.RECHARGING_TO_RESUME, 1)\n shark.sharkiq.set_property_value(Properties.DOCKED_STATUS, 1)\n assert isinstance(shark.is_docked, bool) and shark.is_docked\n assert isinstance(shark.recharging_to_resume, bool) and shark.recharging_to_resume\n assert shark.state == STATE_DOCKED\n\n shark.sharkiq.set_property_value(Properties.RECHARGING_TO_RESUME, 0)\n assert shark.state == STATE_DOCKED\n\n await shark.async_set_fan_speed(\"Eco\")\n assert shark.fan_speed == \"Eco\"\n await shark.async_set_fan_speed(\"Max\")\n assert shark.fan_speed == \"Max\"\n await shark.async_set_fan_speed(\"Normal\")\n assert shark.fan_speed == \"Normal\"\n\n assert set(shark.fan_speed_list) == {\"Normal\", \"Max\", \"Eco\"}", "def retype(self, context, volume, new_type, diff, host):\n\n qos = None\n # TODO: Can remove this once new_type is a VolumeType OVO\n new_type = volume_type.VolumeType.get_by_name_or_id(context,\n new_type['id'])\n previous_vol_replicated = volume.is_replicated()\n new_vol_replicated = (new_type and new_type.is_replicated())\n\n prev_repl_type = None\n new_repl_type = None\n\n # See if the type specifies the replication type. If we know it is\n # replicated but doesn't specify a type assume that it is async rep\n # for backwards compatibility. This applies to both old and new types\n\n if previous_vol_replicated:\n prev_repl_type = self._get_replication_type_from_vol_type(\n volume.volume_type)\n\n if new_vol_replicated:\n new_repl_type = self._get_replication_type_from_vol_type(new_type)\n if new_repl_type is None:\n new_repl_type = REPLICATION_TYPE_ASYNC\n\n # There are a few cases we care about, going from non-replicated to\n # replicated, from replicated to non-replicated, and switching\n # replication types.\n model_update = None\n if previous_vol_replicated and not new_vol_replicated:\n if prev_repl_type == REPLICATION_TYPE_ASYNC:\n # Remove from protection group.\n self._disable_async_replication(volume)\n model_update = {\n \"replication_status\": fields.ReplicationStatus.DISABLED\n }\n elif prev_repl_type in [REPLICATION_TYPE_SYNC,\n REPLICATION_TYPE_TRISYNC]:\n # We can't pull a volume out of a stretched pod, indicate\n # to the volume manager that we need to use a migration instead\n return False, None\n elif not previous_vol_replicated and new_vol_replicated:\n if new_repl_type == REPLICATION_TYPE_ASYNC:\n # Add to protection group.\n self._enable_async_replication(self._get_current_array(),\n volume)\n model_update = {\n \"replication_status\": fields.ReplicationStatus.ENABLED\n }\n elif new_repl_type in [REPLICATION_TYPE_SYNC,\n REPLICATION_TYPE_TRISYNC]:\n # We can't add a volume to a stretched pod, they must be\n # created in one, indicate to the volume manager that it\n # should do a migration.\n return False, None\n elif previous_vol_replicated and new_vol_replicated:\n if prev_repl_type == REPLICATION_TYPE_ASYNC:\n if new_repl_type in [REPLICATION_TYPE_SYNC,\n REPLICATION_TYPE_TRISYNC]:\n # We can't add a volume to a stretched pod, they must be\n # created in one, indicate to the volume manager that it\n # should do a migration.\n return False, None\n if prev_repl_type == REPLICATION_TYPE_SYNC:\n if new_repl_type == REPLICATION_TYPE_ASYNC:\n # We can't move a volume in or out of a pod, indicate to\n # the manager that it should do a migration for this retype\n return False, None\n elif new_repl_type == REPLICATION_TYPE_TRISYNC:\n # Add to trisync protection group\n self._enable_trisync_replication(self._get_current_array(),\n volume)\n if prev_repl_type == REPLICATION_TYPE_TRISYNC:\n if new_repl_type == REPLICATION_TYPE_ASYNC:\n # We can't move a volume in or out of a pod, indicate to\n # the manager that it should do a migration for this retype\n return False, None\n elif new_repl_type == REPLICATION_TYPE_SYNC:\n # Remove from trisync protection group\n self._disable_trisync_replication(\n self._get_current_array(), volume\n )\n\n # If we are moving to a volume type with QoS settings then\n # make sure the volume gets the correct new QoS settings.\n # This could mean removing existing QoS settings.\n current_array = self._get_current_array()\n qos = self._get_qos_settings(new_type)\n vol_name = self._generate_purity_vol_name(volume)\n if qos is not None:\n self.set_qos(current_array, vol_name, qos)\n else:\n current_array.set_volume(vol_name,\n iops_limit='',\n bandwidth_limit='')\n\n return True, model_update", "def switch_nodes(self):\n\n\t\t# Get current info\n\t\tnode_A=self.node_A\n\t\tport_A=self.port_A\n\t\tapp_id_A=self.app_id_A\n\t\tnode_B=self.node_B\n\t\tport_B=self.port_B\n\t\tapp_id_B=self.app_id_B\n\t\tDF=self.DF\n\n\t\t# Update\n\t\tself.node_A=node_B\n\t\tself.port_A=port_B\n\t\tself.app_id_A=app_id_B\n\t\tself.node_B=node_A\n\t\tself.port_B=port_A\n\t\tself.app_id_B=app_id_A\n\t\tif DF==0:\n\t\t\tself.DF=0\n\t\telif DF==1:\n\t\t\tself.DF=2\n\t\telif DF==2:\n\t\t\tself.DF=1\n\t\telse:\n\t\t\tlogging.warning(\"Unknown directionality flag\")\n\t\t\tself.DF=DF", "def read_operation_mode():\n\n debug(\"Reading operation mode...\")\n self.modbusclient.write_multiple_registers(0, str2duohex(\"v00101\"))\n operation_state = duohex2str(self.modbusclient.read_holdingregisters(0, 8))[7:]\n info(\"Operation mode was succesfully read!\")\n return int(operation_state)", "def solver_change(nHairShape,new_nucleus):\n new_nucleus=pymel.PyNode(nHairShape)\n nHairShape.currentState.disconnectAttr() \n nHairShape.startState.disconnectAttr()\n (source,index)=zip(maya_utils.next_available(new_nucleus.outputObjects)[0])\n source.connectAttr(nHairShape.nextState,force=True)\n pymel.PyNode(nHairShape.name()+'.currentState['+str(index)+']').connectAttr(nucleus.inputActive,nextAvailable=True,force=True)", "def test_02_upgrade_kubernetes_cluster(self):\n if self.setup_failed == True:\n self.fail(\"Setup incomplete\")\n global k8s_cluster\n k8s_cluster = self.getValidKubernetesCluster(version=self.kubernetes_version_v1)\n\n time.sleep(self.services[\"sleep\"])\n self.debug(\"Upgrading Kubernetes cluster with ID: %s\" % k8s_cluster.id)\n try:\n k8s_cluster = self.upgradeKubernetesCluster(k8s_cluster.id, self.kubernetes_version_v2.id)\n except Exception as e:\n self.deleteKubernetesClusterAndVerify(k8s_cluster.id, False, True)\n self.fail(\"Failed to upgrade Kubernetes cluster due to: %s\" % e)\n\n self.verifyKubernetesClusterUpgrade(k8s_cluster, self.kubernetes_version_v2.id)\n return", "def _parse_operation_mode(sdk_key, config):\n if sdk_key == 'localhost':\n _LOGGER.debug('Using Localhost operation mode')\n return 'localhost', 'localhost'\n\n if 'redisHost' in config or 'redisSentinels' in config:\n _LOGGER.debug('Using Redis storage operation mode')\n return 'consumer', 'redis'\n\n if config.get('storageType') is not None:\n if config.get('storageType').lower() == 'pluggable':\n _LOGGER.debug('Using Pluggable storage operation mode')\n return 'consumer', 'pluggable'\n\n _LOGGER.warning('You passed an invalid storageType, acceptable value is '\n '`pluggable`. Defaulting storage to In-Memory mode.')\n\n _LOGGER.debug('Using In-Memory operation mode')\n return 'standalone', 'memory'", "def get_switching_mode(self):\n self.board_socket.send(bytes.fromhex(\"10 00 01 00\"))\n temp = self.board_socket.recv(1024)\n return(temp[3])", "def make_transition(self):\n # next transition is a departure\n if self.state == 'COLD' or self.state == 'WARM':\n self.state = 'IDLE'\n self.is_busy = False\n self.is_cold = False\n\n # next transition is a termination\n elif self.state == 'IDLE':\n self.state = 'TERM'\n self.is_busy = False\n\n # if terminated\n else:\n raise Exception(\"Cannot make transition on terminated instance!\")\n\n return self.state", "def current_operation(self):\n if self.device.mode == 'cool':\n return STATE_COOL\n elif self.device.mode == 'heat':\n return STATE_HEAT\n elif self.device.mode == 'range':\n return STATE_AUTO\n elif self.device.mode == 'off':\n return STATE_OFF\n else:\n return STATE_UNKNOWN", "def create_cluster(module, switch, name, node1, node2, mod, CHANGED_FLAG, task, msg):\n cli = pn_cli(module)\n clicopy = cli\n\n if mod == 'l3-vrrp' or mod == 'l2-vrrp':\n spine_list = module.params['pn_spine_list']\n leaf_list = module.params['pn_leaf_list']\n\n cli += ' switch %s system-settings-show ' % node1\n cli += ' format auto-trunk '\n status = run_command(module, cli, task, msg).split()[1]\n if status != 'on':\n if (node1 in leaf_list and node2 in leaf_list) or \\\n (node1 in spine_list and node2 in spine_list):\n\n ports = get_ports(module, node1, node2, task, msg)\n trunk_name = node1 + '-' + node2 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node1, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n ports = get_ports(module, node2, node1, task, msg)\n trunk_name = node2 + '-' + node1 + '-trunk'\n ports_string = ','.join(ports)\n CHANGED_FLAG, output = create_trunk(module, node2, trunk_name, ports_string,\n CHANGED_FLAG, task, msg)\n cli = clicopy\n\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = list(set(run_command(module, cli, task, msg).split()))\n if name not in cluster_list:\n cli = clicopy\n cli += ' switch %s cluster-create name %s ' % (switch, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n if 'Success' in run_command(module, cli, task, msg):\n CHANGED_FLAG.append(True)\n return ' %s: Created %s \\n' % (switch, name), CHANGED_FLAG\n return '', CHANGED_FLAG", "def TransformPersistenceMode(self, *args):\n return _Graphic3d.Graphic3d_Structure_TransformPersistenceMode(self, *args)", "def switch_mode(self):\n if self.mode == SAVE_AS_MODE:\n self.set_mode(OPEN_MODE)\n elif self.mode == OPEN_MODE:\n self.set_mode(SAVE_AS_MODE)", "def upgrade_legacy(self):\n # Transform to version 1\n if not hasattr(self, 'version'):\n self._models = [\n self.conv1,\n self.conv2,\n self.conv3,\n self.conv4\n ]\n\n self.version = '1'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return charmap information for a specified volume.
def volume_charmap_get(self, volume): return self.request( "volume-charmap-get", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'charmap': [ basestring, False ], } )
[ "def volume_charmap_set(self, volume, charmap=None):\n return self.request( \"volume-charmap-set\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'charmap': [ charmap, 'charmap', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_options_list_info(self, volume):\n return self.request( \"volume-options-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'options': [ VolumeOptionInfo, True ],\n } )", "def getCharacterMapping(self):", "def volume_space_list_info(self, volume=None):\n return self.request( \"volume-space-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-space-infos': [ VolSpaceInfo, True ],\n } )", "def get_volume_by_name(self, volume_name):\n LOG.info(\"Getting volume details by name: '%s'\" % volume_name)\n resp = self.client.request(\n constants.GET,\n constants.GET_VOLUME_BY_NAME_URL.format(self.server_ip),\n payload=None, querystring=helpers.prepare_querystring(\n constants.SELECT_ALL_VOLUME,\n name=constants.EQUALS + volume_name\n )\n )\n\n if resp:\n LOG.info(\"Getting host volume mapping from vol ID: '%s'\"\n % resp[0]['id'])\n hlu_details = self.get_host_volume_mapping(volume_id=resp[0]['id'])\n resp[0]['hlu_details'] = hlu_details\n\n return resp", "def volume_info(mnode, volname):\n return RestClient(mnode).handle_request(\"GET\",\n \"/v1/volumes/%s\" % volname,\n httplib.OK, None)", "def create_volume_map():\n volume_map = {}\n for name, obj in inspect.getmembers(laserchicken.volume_specification):\n if inspect.isclass(obj) and issubclass(obj, Volume) and obj is not Volume:\n volume_map[obj.TYPE] = obj\n return volume_map", "def _get_host_map_info_by_lunid(self, lunid):\n\n cli_cmd = 'showhostmap -lun %(lunid)s' % {'lunid': lunid}\n out = self._execute_cli(cli_cmd)\n if re.search('Map Information', out):\n mapinfo = []\n try:\n for line in out.split('\\r\\n')[6:-2]:\n new_line = line.split()\n if len(new_line) < 5:\n continue\n mapinfo.append(new_line)\n except Exception:\n err_msg = (_('CLI out is not normal. CLI out: %s') % out)\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n\n return mapinfo\n else:\n return None", "def volume_footprint_list_info(self, volume=None):\n return self.request( \"volume-footprint-list-info\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'vol-footprint-infos': [ VolFootprintInfo, False ],\n } )", "def get_host_map_info(self, hostid):\n\n cli_cmd = 'showhostmap -host %(hostid)s' % {'hostid': hostid}\n out = self._execute_cli(cli_cmd)\n if re.search('Map Information', out):\n mapinfo = []\n try:\n for line in out.split('\\r\\n')[6:-2]:\n new_line = line.split()\n if len(new_line) < 5:\n continue\n mapinfo.append(new_line)\n except Exception:\n err_msg = (_('CLI out is not normal. CLI out: %s') % out)\n LOG.error(err_msg)\n raise exception.VolumeBackendAPIException(data=err_msg)\n # Sorted by host LUN ID.\n return sorted(mapinfo, key=lambda x: int(x[4]))\n else:\n return None", "def getChampMap():\n\tglobal champIdMap\n\tglobal keyURL\n\ttry:\n\t\tfile = requests.get(\"https://na.api.pvp.net/api/lol/static-data/na/v1.2/champion\" + keyURL + \"&dataById=true&champData=image\")\n\t\twrapperMap = file.json()\n\t\tif \"data\" in wrapperMap:\n\t\t\treturn wrapperMap\n\texcept:\n\t\treturn champIdMap", "def get_host_volume_mapping(self, volume_id):\n LOG.info(\"Getting host mapping with vol: '%s'\" % volume_id)\n return self.client.request(\n constants.GET,\n constants.HOST_VOLUME_MAPPING_URL.format(self.server_ip),\n payload=None, querystring=helpers.prepare_querystring(\n constants.SELECT_ALL_HOST_VOLUME_MAPPING,\n volume_id=constants.EQUALS +\n volume_id\n )\n )", "def __get_pv_attrs(k8s_conf, pv_name):\n core_client = k8s_core_client(k8s_conf)\n pv_list = core_client.list_persistent_volume()\n logger.debug('pv_list - %s', pv_list)\n for pv in pv_list.items:\n logger.debug('pv - %s', pv)\n if pv.metadata.name == pv_name:\n return pv.spec.capacity.get('storage'), pv.spec.host_path.path\n return None, None", "def getChampMapByKeys():\n\tglobal champIdMap\n\tglobal keyURL\n\ttry:\n\t\tfile = requests.get(\"https://na.api.pvp.net/api/lol/static-data/na/v1.2/champion\" + keyURL + \"&champData=image\")\n\t\twrapperMap = file.json()\n\t\tif \"data\" in wrapperMap:\n\t\t\treturn wrapperMap[\"data\"]\n\texcept:\n\t\treturn champIdMap", "def get_map(self):\n if not self._get_disk_map():\n rlog_warning('Could not get the disk mapping')\n self.__map = ''\n return self.__map.rstrip()", "def volume_wafl_info(self):\n return self.request( \"volume-wafl-info\", {\n }, {\n 'root-volume': [ basestring, False ],\n 'disk-types': [ basestring, False ],\n 'snapshots-max': [ int, False ],\n 'checksum-types': [ basestring, False ],\n } )", "def show_asm_volumes(self):\n sql = \"select NAME from v$asm_diskgroup_stat ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#ASMVOLUME}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps({'data': lst}) )", "def getTrueCryptMapperDevice(self):\n mapperDevice = \"\"\n virtDevPrefix = \"Virtual Device: \"\n args = self.truecrypt + [\"--non-interactive\", \"-l\", \"-v\"]\n args.append(self.volume)\n tc = self.callProcess(args)\n for line in tc.stdout.readlines():\n if line.startswith(virtDevPrefix):\n mapperDevice = line[len(virtDevPrefix):].rstrip()\n break\n self.waitProcess(tc)\n return mapperDevice", "def _get_cinder_meters_info(self):\n\n # TODO(lsmola) Unless the Ceilometer will provide the information\n # below, I need to define it as a static here. I will be joining this\n # to info that I am able to obtain from Ceilometer meters, hopefully\n # some day it will be supported all.\n return datastructures.SortedDict([\n ('volume', {\n 'type': _(\"Cinder\"),\n 'label': '',\n 'description': _(\"Existence of volume\"),\n }),\n ('volume.size', {\n 'type': _(\"Cinder\"),\n 'label': '',\n 'description': _(\"Size of volume\"),\n }),\n ])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Start RAID mirror verification on the named traditional volume. RAID mirror verification compares the data in both plexes of a mirrored aggregate (whether it's freestanding or embedded in a traditional volume). In the default case, any blocks that differ are logged and no changes are made. The fixplex option is used to fix any mismatches. It specifies which plex to fix. If no name is given, then RAID mirror verification is started on all online aggregates (including those embedded in traditional volumes). Use either the "aggrverifylistinfo" or "volumeverifylistinfo" API to check RAID mirror verification status. If the fixplex option is used, then a name must be specified.
def volume_verify_start(self, volume=None, fix_plex=None, log_only=None): return self.request( "volume-verify-start", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'fix_plex': [ fix_plex, 'fix-plex', [ int, 'None' ], False ], 'log_only': [ log_only, 'log-only', [ bool, 'None' ], False ], }, { } )
[ "def execute(self):\n to_trigger = ValidateVerification(content = self.content).run()\n\n logger.info('Sending gp_primarymirror requests...')\n pool = WorkerPool(min(len(to_trigger), self.batch_default))\n\n for pseg in to_trigger:\n host, port = pseg.getSegmentHostName(), pseg.getSegmentPort()\n cmd = SendFilerepVerifyMessage(name = 'verify %s' % host, host = host, port = port,\n token = self.token,\n full = self.full,\n verify_file = self.verify_file,\n verify_dir = self.verify_dir,\n abort = self.abort,\n suspend = self.suspend,\n resume = self.resume,\n ignore_dir = self.ignore_dir,\n ignore_file = self.ignore_file,\n results = self.results,\n results_level = self.results_level)\n logger.debug(\"Sending request to %s:%d\" % (host, port))\n pool.addCommand(cmd)\n\n logger.info('Waiting for gp_primarymirror commands to complete...')\n pool.wait_and_printdots(len(to_trigger))\n\n for cmd in pool.getCompletedItems():\n res = cmd.get_results()\n if not res.wasSuccessful():\n logger.error('Failed to send gp_primarymirror message to %s:%s' % (cmd.host, cmd.port))\n logger.error('Error: %s' % res.stderr)\n raise TriggerGpPrimaryMirrorFailure()\n logger.info('gp_primarymirror messages have been triggered succesfully.')", "def start(memsize=25, journal=True, verify=False, restrict=False,\n server=False, metaname=None, transparent=False,\n unmapped=False, pngonly=False, quiet=False, linebuffer=False):\n # check memsize\n try:\n flt_memsize = float(memsize)\n if flt_memsize <= 0.0:\n raise ValueError\n except:\n raise ValueError(\"memsize must be a positive number\")\n # check metaname\n if metaname == None:\n str_metaname = \"\"\n elif not isinstance(metaname, str):\n raise ValueError(\"metaname must either be None or a string\")\n elif metaname.isspace():\n str_metaname = \"\"\n else:\n str_metaname = metaname\n\n # Get the known viewer bindings\n knownengines = graphbind.knownPyFerretEngines()\n # Add PViewerPQPyFerretBindings, as \"PipedViewerPQ\" to the known bindings\n if not (\"PipedViewerPQ\" in knownengines):\n graphbind.addPyFerretBindings(\"PipedViewerPQ\",\n pipedviewer.pyferretbindings.PViewerPQPyFerretBindings)\n # Add PImagerPQPyFerretBindings, as \"PipedImagerPQ\" to the known bindings\n if not (\"PipedImagerPQ\" in knownengines):\n graphbind.addPyFerretBindings(\"PipedImagerPQ\",\n pipedviewer.pyferretbindings.PImagerPQPyFerretBindings)\n\n # the actual call to ferret's start\n success = libpyferret._start(flt_memsize, bool(journal), bool(verify),\n bool(restrict), bool(server), str_metaname,\n bool(transparent), bool(unmapped),\n bool(pngonly), bool(quiet), bool(linebuffer))\n if success:\n # register the libpyferret._quit function with atexit to ensure\n # open viewer windows do not hang a Python shutdown\n atexit.register(libpyferret._quit)\n\n # Use tab completion for readline (for Ferret) by default\n readline.parse_and_bind('tab: complete');\n\n # Execute the $PYTHONSTARTUP file, if it exists and -secure not given\n if not restrict:\n try:\n startfilename = os.getenv('PYTHONSTARTUP', '')\n if startfilename:\n if sys.version_info[0] > 2:\n exec(compile(open(startfilename).read(), startfilename, 'exec'));\n else:\n execfile(startfilename)\n except Exception:\n pass\n\n return success", "def start(tolerant):\n # report operation\n llecho('Activating all existing LVM volume groups')\n\n # tolerant mode on: LVM with --partial\n if tolerant == True:\n status = run('%s %s' % (CMD_START_LVM, '--partial'))\n\n # tolerant mode off: start LVM manually\n else:\n status = run(CMD_START_LVM)\n\n # cannot start volume groups: fail\n if status != 0:\n llecho('Error: cannot activate LVM volume groups')\n sys.exit(1)", "def verify(self, verifier: verify_mod.SnapshotVerifier) -> None:\n with self.edenfs() as eden:\n eden.start()\n print(\"Verifing snapshot data:\")\n print(\"=\" * 60)\n self.verify_snapshot_data(verifier, eden)\n print(\"=\" * 60)", "def refine_candid(candid, indexprefix='new', ddm=50, npix_max=8192, npix_max_orig=None, mode='deployment', devicenum=None, cl=None):\n\n from rfpipe import reproduce\n from realfast import elastic\n\n doc = elastic.get_doc(indexprefix+'cands', Id=candid)\n if 'sdmname' not in doc['_source']:\n logger.warn(\"No SDM found for candId {0}\".format(candid))\n return\n sdmname = doc['_source']['sdmname']\n prefsname = doc['_source']['prefsname']\n prefsdoc = elastic.get_doc(indexprefix+'preferences', Id=prefsname)\n if npix_max_orig is None:\n npix_max_orig = prefsdoc['_source']['npix_max']\n\n workdir = '/lustre/evla/test/realfast/archive/refined'\n sdmloc0 = '/home/mctest/evla/mcaf/workspace/'\n sdmloc0b = '/home/mctest/evla/mcaf/workspace/realfast-archived/'\n sdmloc1 = '/lustre/evla/test/realfast/archive/sdm_archive'\n if os.path.exists(os.path.join(sdmloc0, sdmname)):\n sdmname_full = os.path.join(sdmloc0, sdmname)\n elif os.path.exists(os.path.join(sdmloc0b, sdmname)):\n sdmname_full = os.path.join(sdmloc0b, sdmname)\n else:\n sdmname_full = os.path.join(sdmloc1, sdmname)\n# sdmname_full = os.path.join(sdmloc0, sdmname) if os.path.exists(os.path.join(sdmloc0, sdmname)) else os.path.join(sdmloc1, sdmname)\n assert os.path.exists(sdmname_full)\n dm = doc['_source']['canddm']\n scanId = doc['_source']['scanId']\n refined_png = 'cands_{0}.1.1_refined.png'.format(sdmname)\n refined_loc = os.path.join(workdir, refined_png)\n refined_url = os.path.join(_candplot_url_prefix, 'refined', refined_png)\n\n def move_refined_plots(cc):\n if os.path.exists(refined_loc):\n logger.info(\"Refined candidate plot for candId {0} and sdm {1} found. Copying...\".format(candid, sdmname))\n moveplots('/lustre/evla/test/realfast/archive/refined/', sdmname, destination='claw@nmpost-master:/lustre/aoc/projects/fasttransients/realfast/plots/refined')\n else:\n logger.info(\"No refinement plot found for candId {0}.\".format(candid))\n\n# Ids = elastic.get_ids(indexprefix+'cands', sdmname)\n# if cc is not None:\n if os.path.exists(refined_loc):\n if len(cc):\n url = refined_url\n logger.info(\"Updating refinement plot for new new refined_url.\")\n else:\n url = 'No candidate found during refinement'\n logger.info(\"Updating refinement plot for no refined_url.\")\n else:\n url = 'No candidate found during refinement'\n logger.info(\"Updating refinement plot for no refined_url.\")\n\n# for Id in Ids:\n elastic.update_field(indexprefix+'cands', 'refined_url', url, Id=candid)\n for k,v in elastic.gettags(indexprefix, candid).items(): # remove notify tag\n if 'notify' in v: \n newtags = ','.join([tag for tag in v.split(',') if tag != 'notify'])\n elastic.update_field(indexprefix+'cands', k, newtags, Id=candid)\n\n # decide whether to submit or update index for known plots\n if os.path.exists(refined_loc):\n logger.info(\"Refined candidate plot for candId {0} and sdm {1} exists locally. Skipping.\".format(candid, sdmname))\n return\n\n if cl is not None:\n logger.info(\"Submitting refinement for candId {0} and sdm {1}\".format(candid, sdmname))\n workernames = [v['id'] for k, v in cl.scheduler_info()['workers'].items() if 'fetch' in v['id']]\n assert len(workernames)\n\n fut = cl.submit(reproduce.refine_sdm, sdmname_full, dm, preffile='/lustre/evla/test/realfast/realfast.yml',\n npix_max=npix_max, npix_max_orig=npix_max_orig,\n refine=True, classify=True, ddm=ddm, workdir=workdir,\n resources={\"GPU\": 1, \"MEMORY\": 10e9}, devicenum=devicenum, retries=1, workers=workernames)\n\n fut2 = cl.submit(move_refined_plots, fut)\n distributed.fire_and_forget(fut2)\n else:\n logger.info(\"Running refinement for candId {0} and sdm {1}\".format(candid, sdmname))\n cc = reproduce.refine_sdm(sdmname_full, dm, preffile='/lustre/evla/test/realfast/realfast.yml', npix_max_orig=npix_max_orig,\n npix_max=npix_max, refine=True, classify=True, ddm=ddm, workdir=workdir, devicenum=devicenum)\n move_refined_plots(cc)", "def PreverifyApks(self):\n logging.info('enabling preverify...')\n # no longer applicable in ART world.\n if self.GetApiVersion() <= 20:\n # v=a,o=v means -Xverify:all -Xdexopt:verified\n self.ExecOnDevice(['setprop', 'dalvik.vm.dexopt-flags', 'v=a,o=v'])\n self._RestartAndroid()\n self._PollEmulatorStatus()\n if not self._direct_boot:\n self._UnlockScreen()", "def start(self, logfile_name):\n\n self._verify_not_running()\n\n self.logfile = logfile_name\n\n process_args = [\n \"--port\", str(self.port),\n \"--dir\", \".\"\n ]\n\n if self.storage_engine == \"ForestDB\" or self.storage_engine == \"ForestDB+Encryption\":\n process_args.append(\"--storage\")\n process_args.append(\"ForestDB\")\n else:\n process_args.append(\"--storage\")\n process_args.append(\"SQLite\")\n\n if self.storage_engine == \"SQLCipher\" or self.storage_engine == \"ForestDB+Encryption\":\n log_info(\"Using Encryption ...\")\n db_flags = []\n for db_name in REGISTERED_CLIENT_DBS:\n db_flags.append(\"--dbpassword\")\n db_flags.append(\"{}=pass\".format(db_name))\n process_args.extend(db_flags)\n\n # The package structure for LiteServ is different pre 1.4. Handle for this case\n if has_dot_net4_dot_5(self.version_build):\n binary_path = \"couchbase-lite-net-msft-{}-liteserv/net45/LiteServ.exe\".format(self.version_build)\n else:\n binary_path = \"couchbase-lite-net-msft-{}-liteserv/LiteServ.exe\".format(self.version_build)\n\n joined_args = \" \".join(process_args)\n log_info(\"Starting LiteServ {} with: {}\".format(binary_path, joined_args))\n\n # Start LiteServ via Ansible on remote machine\n status = self.ansible_runner.run_ansible_playbook(\n \"start-liteserv-msft.yml\",\n extra_vars={\n \"binary_path\": binary_path,\n \"launch_args\": joined_args,\n }\n )\n if status != 0:\n raise LiteServError(\"Could not stop Liteserv\")\n\n self._verify_launched()\n\n return \"http://{}:{}\".format(self.host, self.port)", "def lint(strict=True):\n opts = '' if strict else '-l isort,pydocstyle,radon,pycodestyle,pyflakes'\n files = 'tests setup.py napps_server'\n print('Pylama is running. It may take a while...')\n cmd = 'pylama {} {}'.format(opts, files)\n try:\n check_call(cmd, shell=True)\n print('Low grades (<= C) for Maintainability Index (if any):')\n check_call('radon mi --min=C ' + files, shell=True)\n except CalledProcessError as e:\n print('Linter check failed: ' + e.cmd)\n sys.exit(e.returncode)", "def startMonitor(enable = True, dataFilename = None, remoteHost = None, simulator = \"atlas\"):\r\n \r\n if not enable:\r\n return\r\n # end if\r\n\r\n try:\r\n import slalomWindow\r\n\r\n # tkinter is not always installed by default (e.g. in RedHat Enterprise Linux 7+ or CentOS 6.x)...\r\n # ...if not installed, the optimizer monitor cannot be started...\r\n # ...(just (re)install it or install a more recent python/numpy/scipy/matplotlib/tk version...\r\n # ... and restart OptimizerMonitor).\r\n if not slalomWindow.TkFound:\r\n return\r\n # end if\r\n\r\n cmdT = [pythonInterpreter, \"slalomMonitor.py\"]\r\n if dataFilename is not None:\r\n cmdT.append(dataFilename)\r\n if remoteHost is not None:\r\n cmdT.append(remoteHost)\r\n #end if\r\n #end if\r\n cmdT.append(simulator)\r\n curDir = os.path.dirname(os.path.realpath(__file__))\r\n subprocess.Popen(cmdT, shell=False, cwd=curDir)\r\n except:\r\n pass\r\n # end try\r", "async def verify(self, ctx, member: discord.Member, tag, *, options=None):\n # Check options\n if options is None:\n options = ''\n options = options.split(' ')\n\n include_tourney = '--notourney' not in options\n\n # Check clan info\n sctag = SCTag(tag)\n if not sctag.valid:\n await self.bot.say(sctag.invalid_error_msg)\n return\n\n tag = sctag.tag\n player = await self.fetch_player_profile(tag)\n try:\n player_clan_tag = player[\"clan\"][\"tag\"]\n except KeyError:\n await self.bot.say(\"Cannot find clan tag in API. Aborting…\")\n return\n\n server = ctx.message.server\n clans = self.settings[server.id][\"clans\"]\n if player_clan_tag not in clans:\n await self.bot.say(\"User is not in one of our clans, or the clan has not be set by MODs.\")\n return\n\n clan_settings = self.settings[server.id][\"clans\"][player_clan_tag]\n\n # Assign roles\n roles = [\"Trusted\", clan_settings[\"role_name\"]]\n if include_tourney:\n roles.append('Tournaments')\n await self.changerole(ctx, member, *roles)\n\n # Rename member to IGN (role_nick)\n nick = \"{ign} ({role_nick})\".format(\n ign=player[\"name\"],\n role_nick=clan_settings[\"role_nick\"]\n )\n\n try:\n await self.bot.change_nickname(member, nick)\n await self.bot.say(\"Renamed {} to {}\".format(member, nick))\n except discord.errors.Forbidden:\n await self.bot.say(\"I do not have permission to change the nick of {}\".format(member))", "def do_restart_gridpack(self, line):\n \n \n args = self.split_arg(line)\n # Check argument's validity\n self.check_survey(args)\n \n # initialize / remove lhapdf mode\n #self.run_card = banner_mod.RunCard(pjoin(self.me_dir, 'Cards', 'run_card.dat'))\n #self.configure_directory()\n \n gensym = gen_ximprove.gensym(self)\n \n min_precision = 1.0\n resubmit_zero=False\n if '--precision=' in line:\n s = line.index('--precision=') + len('--precision=')\n arg=line[s:].split(1)[0]\n min_precision = float(arg)\n \n if '--restart_zero' in line:\n resubmit_zero = True\n \n \n gensym.resubmit(min_precision, resubmit_zero)\n self.monitor(run_type='All jobs submitted for gridpack', html=True)\n\n #will be done during the refine (more precisely in gen_ximprove)\n cross, error = sum_html.make_all_html_results(self)\n self.results.add_detail('cross', cross)\n self.results.add_detail('error', error) \n self.exec_cmd(\"print_results %s\" % self.run_name,\n errorhandling=False, printcmd=False, precmd=False, postcmd=False) \n \n self.results.add_detail('run_statistics', dict(gensym.run_statistics))\n\n \n #self.exec_cmd('combine_events', postcmd=False)\n #self.exec_cmd('store_events', postcmd=False)\n self.exec_cmd('decay_events -from_cards', postcmd=False)\n self.exec_cmd('create_gridpack', postcmd=False)", "def volume_scrub_start(self, name=None):\n return self.request( \"volume-scrub-start\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def run_snpeff(alignment_group, vcf_source_tool):\n assert vcf_source_tool in MAP_VCF_SOURCE_TOOL_TO_ORIGINAL_VCF_DATASET_TYPE\n\n # Get the reference genome uid to get the config path and snpeff genome name\n ref_genome = alignment_group.reference_genome\n ref_genome_uid = alignment_group.reference_genome.uid\n\n source_vcf_dataset_type = (\n MAP_VCF_SOURCE_TOOL_TO_ORIGINAL_VCF_DATASET_TYPE[vcf_source_tool])\n source_vcf_dataset = get_dataset_with_type(alignment_group,\n type=source_vcf_dataset_type)\n assert source_vcf_dataset is not None\n vcf_input_filename = source_vcf_dataset.get_absolute_location()\n assert os.path.exists(vcf_input_filename)\n\n # Make sure vcf has at least one record. If not, return.\n with open(vcf_input_filename) as unannotated_fh:\n vcf_reader = vcf.Reader(unannotated_fh)\n try:\n vcf_reader.next()\n except StopIteration:\n # No variants called. No need to do SnpEff.\n return\n\n # Prepare a directory to put the output file.\n vcf_output_filename = get_snpeff_vcf_output_path(alignment_group,\n vcf_source_tool)\n\n snpeff_args = [\n 'java',\n '-jar', settings.SNPEFF_JAR_PATH,\n 'eff',\n '-v',\n '-i',\n 'vcf',\n '-o',\n 'vcf',\n '-c', os.path.join(get_snpeff_config_path(ref_genome),'snpeff.config'),\n '-ud', str(settings.SNPEFF_UD_INTERVAL_LENGTH),\n '-q',\n '-noLog',\n# '-t', str(settings.SNPEFF_THREADS),\n ref_genome_uid,\n vcf_input_filename\n ]\n\n print ' '.join(snpeff_args)\n\n with open(vcf_output_filename, 'w') as fh_out:\n snpeff_proc = subprocess.Popen(\n snpeff_args,\n stdout=subprocess.PIPE)\n convert_snpeff_info_fields(snpeff_proc.stdout, fh_out)\n\n return vcf_output_filename", "def call_dReal(verifier):\n path = verifier.path\n file = verifier.file_name\n client = docker.from_env()\n volume_dict = {path: {'bind': '/data', 'mode': 'ro'}}\n container = client.containers.run(\"dreal/dreal4\",\n \" dreal data/\" + file + \" --model\",\n volumes=volume_dict,\n detach=True)\n if verifier.t_max is not None:\n container.stop(timeout=verifier.t_max)\n outputdReal = container.logs().decode(\"utf-8\")\n container.remove()\n if outputdReal == '':\n print(\"dReal time out: {}\".format(verifier.t_max))\n outputdReal = 'time-out'\n\n return outputdReal", "def start(\n release,\n openstack_vip,\n sql_pass,\n sql_ip,\n rabbit_ips_list,\n rabbit_pass,\n ceph,\n kolla_ansible_dir,\n cloud_name,\n):\n click.echo(\"starting arcus manager\")\n enable_ceph = \"true\" if ceph else \"false\"\n arcus_mgr.start(\n release,\n openstack_vip,\n sql_pass,\n sql_ip,\n rabbit_ips_list,\n rabbit_pass,\n enable_ceph,\n kolla_ansible_dir,\n cloud_name,\n )", "def check_launch(self, args, options):\n # modify args in order to be DIR \n # mode being either standalone or madevent\n \n if options['force']:\n self.force = True\n \n \n if not args:\n args.append('auto')\n return\n \n if len(args) > 1:\n self.help_launch()\n raise self.InvalidCmd, 'Invalid Syntax: Too many argument'\n\n elif len(args) == 1:\n if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']:\n raise self.InvalidCmd, '%s is not a valid mode, please use \"LO\", \"NLO\", \"aMC@NLO\" or \"aMC@LO\"' % args[0]\n mode = args[0]\n \n # check for incompatible options/modes\n if options['multicore'] and options['cluster']:\n raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \\\n ' are not compatible. Please choose one.'\n if mode == 'NLO' and options['reweightonly']:\n raise self.InvalidCmd, 'option -r (--reweightonly) needs mode \"aMC@NLO\" or \"aMC@LO\"'", "def run(hostname):\n\n logging.info('Run existing vm')\n run_cmd(\"VBoxManage startvm \"+hostname)", "def startPuckDetector(self, i_reconfigure):\n builder = PuckDetectorBuilder(self.MODE, 30, i_reconfigure)\n self.stopCurrentPuckDetector()\n self.puckDetector = builder.build()\n self.puckDetector.findPuck()", "def loadName(self, name):\n if not isinstance(name, str):\n return False\n if len(name) > 15:\n return False\n\n conn = Connection(self.host)\n commandString = ':modelld0{0}#'.format(name)\n suc, response, numberOfChunks = conn.communicate(commandString)\n if not suc:\n return False\n\n if response[0] != '1':\n return False\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Suspend RAID mirror verification on the named traditional volume. If no name is given, suspend mirror verification on all aggregates (including those embedded in traditional volumes) currently being verified.
def volume_verify_suspend(self, volume=None): return self.request( "volume-verify-suspend", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { } )
[ "def volume_scrub_suspend(self, name=None):\n return self.request( \"volume-scrub-suspend\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_scrub_resume(self, name=None):\n return self.request( \"volume-scrub-resume\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_restrict_async(self, volume_name):\n return self.request( \"volume-restrict-async\", {\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def notclustered(self):\n\n LinuxVolumeManager.has_lvm()\n LinuxVolumeManager.lvm('vgchange', '-cn', str(self.getAttribute(\"name\")))", "def volume_scrub_start(self, name=None):\n return self.request( \"volume-scrub-start\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )", "def suspend_resume_vm_test(vm_name):\n assert ll_vms.suspendVm(True, vm_name), \"Failed to suspend vm\"\n logging.info(\"VM status: %s\", ll_vms.get_vm_state(vm_name=vm_name))\n assert ll_vms.startVm(\n positive=True, vm=vm_name,\n wait_for_status=config.VM_UP,\n timeout=2 * config_virt.VM_ACTION_TIMEOUT\n )\n return True", "def unmanage(self, volume):\n\n vol_name = self._get_vol_name(volume)\n if len(vol_name + UNMANAGED_SUFFIX) > MAX_VOL_LENGTH:\n unmanaged_vol_name = vol_name[:-len(UNMANAGED_SUFFIX)] + \\\n UNMANAGED_SUFFIX\n else:\n unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX\n LOG.info(\"Renaming existing volume %(ref_name)s to %(new_name)s\",\n {\"ref_name\": vol_name, \"new_name\": unmanaged_vol_name})\n self._rename_volume_object(vol_name, unmanaged_vol_name)", "def volume_offline_async(self, volume_name):\n return self.request( \"volume-offline-async\", {\n 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def volume_mirror(self, volume, mirror_disks=None, force=None, victim_volume=None):\n return self.request( \"volume-mirror\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'victim_volume': [ victim_volume, 'victim-volume', [ basestring, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )", "def _disable_async_replication(self, volume):\n\n current_array = self._get_current_array()\n LOG.debug(\"Disabling replication for volume %(id)s residing on \"\n \"array %(backend_id)s.\",\n {\"id\": volume[\"id\"],\n \"backend_id\": current_array.backend_id})\n try:\n current_array.set_pgroup(self._replication_pg_name,\n remvollist=([self._get_vol_name(volume)]))\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_COULD_NOT_BE_FOUND in err.text):\n ctxt.reraise = False\n LOG.warning(\"Disable replication on volume failed: \"\n \"already disabled: %s\", err.text)\n else:\n LOG.error(\"Disable replication on volume failed with \"\n \"message: %s\", err.text)", "def promote_original_master(s, name):\n s.execute_command('SENTINEL', 'FAILOVER', name)", "def vm_ejectiso(vmname: str):\n subprocess.run(\"virsh --connect qemu:///system change-media {0} sda --eject --config\".format(vmname), shell=True, check=False)", "def trigger_heal_full(mnode, volname):\n cmd = \"gluster volume heal %s full\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n return False\n\n return True", "def TakeSnapshot(self, name='default_boot'):\n self._SnapshotPresent().value = 'True'\n telnet = self._ConnectToEmulatorConsole()\n telnet.write('avd stop\\n')\n telnet.write('avd snapshot save %s\\n' % name)\n telnet.write('exit\\n')\n telnet.read_all()\n self._vm_running = False", "def suspend(shelf=None):\n\n _act_on_guests(shelf, \"suspend\")", "def clustered(self):\n\n LinuxVolumeManager.has_lvm()\n LinuxVolumeManager.lvm('vgchange', '-cy ', str(self.getAttribute(\"name\")))", "def volume_restrict(self, name, cifs_delay=None):\n return self.request( \"volume-restrict\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n 'cifs_delay': [ cifs_delay, 'cifs-delay', [ int, 'None' ], False ],\n }, {\n } )", "def disable_heal(mnode, volname):\n cmd = \"gluster volume heal %s disable\" % volname\n ret, _, _ = g.run(mnode, cmd)\n if ret != 0:\n return False\n\n return True", "def volume_scrub_stop(self, name=None):\n return self.request( \"volume-scrub-stop\", {\n 'name': [ name, 'name', [ basestring, 'None' ], False ],\n }, {\n } )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Volume Storage Service Rename
def volume_storage_service_rename(self, volume, storage_service, new_storage_service): return self.request( "volume-storage-service-rename", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'storage_service': [ storage_service, 'storage-service', [ basestring, 'None' ], False ], 'new_storage_service': [ new_storage_service, 'new-storage-service', [ basestring, 'None' ], False ], }, { } )
[ "def rename(nitro, service):\n __service = NSService()\n __service.set_name(service.get_name())\n __service.set_newname(service.get_newname())\n return __service.perform_operation(nitro, \"rename\")", "def rename(cls, client, resource, new_servicename) :\n\t\ttry :\n\t\t\trenameresource = gslbservice()\n\t\t\tif type(resource) == cls :\n\t\t\t\trenameresource.servicename = resource.servicename\n\t\t\telse :\n\t\t\t\trenameresource.servicename = resource\n\t\t\treturn renameresource.rename_resource(client,new_servicename)\n\t\texcept Exception as e :\n\t\t\traise e", "def test_filesystem_rename(self):\n pool_name = make_test_pool(StratisCertify.DISKS[0:1])\n filesystem_name = make_test_filesystem(pool_name)\n fs_name_rename = fs_n()\n self.unittest_command(\n [\n _STRATIS_CLI,\n \"filesystem\",\n \"rename\",\n pool_name,\n filesystem_name,\n fs_name_rename,\n ],\n 0,\n True,\n True,\n )", "def rename(cls, client, resource, new_name) :\n try :\n renameresource = service()\n if type(resource) == cls :\n renameresource.name = resource.name\n else :\n renameresource.name = resource\n return renameresource.rename_resource(client,new_name)\n except Exception as e :\n raise e", "def volume_rename(self, volume, new_volume_name):\n return self.request( \"volume-rename\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n }, {\n } )", "def test_filesystem_rename_same_name(self):\n pool_name = make_test_pool(StratisCertify.DISKS[0:1])\n filesystem_name = make_test_filesystem(pool_name)\n self.unittest_command(\n [\n _STRATIS_CLI,\n \"filesystem\",\n \"rename\",\n pool_name,\n filesystem_name,\n filesystem_name,\n ],\n 1,\n False,\n True,\n )", "def _rename_volume_object(self, old_name, new_name, raise_not_exist=False):\n current_array = self._get_current_array()\n try:\n current_array.rename_volume(old_name, new_name)\n except purestorage.PureHTTPError as err:\n with excutils.save_and_reraise_exception() as ctxt:\n if (err.code == 400 and\n ERR_MSG_NOT_EXIST in err.text):\n ctxt.reraise = raise_not_exist\n LOG.warning(\"Unable to rename %(old_name)s, error \"\n \"message: %(error)s\",\n {\"old_name\": old_name, \"error\": err.text})\n return new_name", "def _rename_cache(self):\n try:\n self.state.check(\"migration\", \"fs_cache_renamed\", \"ok\")\n return\n except StateCheckError:\n pass\n\n poolname = '{}_fscache'.format(self.node_sal.name)\n try:\n sp = self.node_sal.storagepools.get(poolname)\n except ValueError:\n self.logger.info(\"storage pool %s doesn't exist on node %s\" % (poolname, self.node_sal.name))\n return\n\n if sp.mountpoint:\n self.logger.info(\"rename mounted volume %s...\" % poolname)\n cmd = 'btrfs filesystem label %s sp_zos-cache' % sp.mountpoint\n else:\n self.logger.info(\"rename unmounted volume %s...\" % poolname)\n cmd = 'btrfs filesystem label %s sp_zos-cache' % sp.devices[0]\n result = self.node_sal.client.system(cmd).get()\n if result.state == \"SUCCESS\":\n self.logger.info(\"Rebooting %s ...\" % self.node_sal.name)\n self.state.set(\"migration\", \"fs_cache_renamed\", \"ok\")\n self.reboot()\n raise RuntimeWarning(\"Aborting monitor because system is rebooting for a migration.\")\n self.logger.error('error: %s' % result.stderr)", "def do_rename(self, args):\n lb = self.findlb(args.loadbalancer)\n lb.name = args.name\n lb.update()", "def _getNewStorageControllerName(self, type):\n baseNames = {\n Constants.StorageBus_IDE : \"IDE Controller\",\n Constants.StorageBus_SATA : \"SATA Controller\",\n Constants.StorageBus_SCSI : \"SCSI Controller\",\n Constants.StorageBus_Floppy : \"Floppy Controller\"\n }\n if not baseNames.has_key(type):\n # Todo: Use correct argument type here\n raise Exception(\"Invalid type '%d'\" % type)\n count = 1\n name = baseNames[type]\n while self.doesStorageControllerExist(name):\n count += 1\n name = \"%s %d\" % (baseNames[type], count)\n return name", "def _rename_machine(self, machine, node, name):\n self.connection.ex_rename_container(node, name)", "def name_mac(new_name):\n name_types = ['HostName', 'LocalHostName', 'ComputerName']\n for name_type in name_types:\n subprocess.call(['scutil', '--set', name_type, new_name])", "def convert_name(name, to_version=False):\n if to_version:\n return name.split('-')[-1].replace('.ova', '')\n else:\n return 'ECS-Connection-Manager-{}.ova'.format(name)", "def rename(self, new_name):\n\n if not new_name:\n raise LvmVolumeError(_(\"No new name for logical volume given.\"))\n\n new_name = str(new_name).strip()\n if new_name == '':\n raise LvmVolumeError(_(\"Empty name for logical volume given.\"))\n\n if new_name == self.name:\n LOG.debug(_(\n \"New logical volume name is equal the current name %r.\"), new_name)\n return\n\n cur_cname = self.vgname + '/' + self.name\n new_cname = self.vgname + '/' + new_name\n\n cmd_params = [\n 'lvrename',\n self.vgname,\n self.name,\n new_name\n ]\n\n LOG.info(_(\"Renaming logical volume %(old)r to %(new)r.\") % {\n 'old': cur_cname, 'new': new_cname})\n\n (ret_code, std_out, std_err) = self.exec_lvm(\n cmd_params, quiet=True, force=False)\n\n self._name = new_name\n\n return", "def rename(self, new_name, flags=0):\n ret = libvirtmod.virDomainRename(self._o, new_name, flags)\n if ret == -1: raise libvirtError ('virDomainRename() failed', dom=self)\n return ret", "def rename(self, oldkey, newkey):\r\n return self.execute_command(\"RENAME\", oldkey, newkey)", "def rename(server, name):\r\n server.update(name)", "def GenerateServiceName(source_ref):\n base_name = os.path.basename(source_ref.source_path.rstrip(os.sep))\n base_name = base_name.split(':')[0] # Discard image tag if present.\n base_name = base_name.split('@')[0] # Disacard image hash if present.\n # Remove non-supported special characters.\n return re.sub(r'[^a-zA-Z0-9-]', '', base_name).strip('-').lower()", "def rename_resource(self, current_name: str, new_name: str) -> str:\n logger.info(f'Renaming resource \"{current_name}\" to \"{new_name}\"')\n while True:\n try:\n self._api.RenameResource(current_name, new_name)\n except CloudShellAPIError as e:\n if str(e.code) != \"114\":\n raise\n new_name = generate_new_resource_name(new_name)\n else:\n break\n logger.debug(f'Resource \"{current_name}\" renamed to \"{new_name}\"')\n return new_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over a list of volumestorageservice objects.
def volume_storage_service_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None): return self.request( "volume-storage-service-get-iter", { 'max_records': max_records, 'query': [ query, 'query', [ StorageServiceInfo, 'None' ], False ], 'tag': tag, 'desired_attributes': [ desired_attributes, 'desired-attributes', [ StorageServiceInfo, 'None' ], False ], }, { 'attributes-list': [ StorageServiceInfo, True ], } )
[ "def iter_storages(self) -> Iterator[Storage]:\n raise NotImplementedError", "def _iter_volumes(self):\n if self.volumes:\n for volume_name, container_path in self.volumes.iteritems():\n if \"/\" in volume_name:\n # if a / is found in the name, assume it's a full path specified on the host\n host_path = volume_name\n else:\n host_path = \"%s/volumes/%s/%s\" % (self.project.home_path, self.name, volume_name)\n yield (host_path, container_path)", "def iterate_container_objects(self, container):\r\n\r\n return self._get_objects(container)", "def services(self):\n for service_id in self.service_ids():\n yield self._get_service_from_graph(service_id)", "def iterate_volumes(self, node=None, ex_datacenter=None):\n if node is not None:\n if ex_datacenter:\n raise ValueError(\n \"Cannot list the volumes for the datacenter and the \"\n \"virtual machine at the same time\")\n virtual_machine = self.ex_get_vm(node)\n else:\n virtual_machine = None\n\n if ex_datacenter is not None:\n ex_datacenter = self._get_datacenter_by_id(ex_datacenter)\n\n # querying the creation timestamps of node(s) and volumes\n node_creation_times = self._query_node_creation_times(\n virtual_machine=virtual_machine)\n volume_creation_times = self._query_volume_creation_times(\n virtual_machine=virtual_machine)\n\n shared_files = collections.defaultdict(list)\n\n def result_to_volumes(files_info, allow_shared=False):\n \"\"\"\n :type disks_page: tp.Union[tp.List[_FileInfo], tp.List[_VMDiskInfo]]\n :rtype: tp.List[StorageVolume]\n \"\"\"\n if files_info and isinstance(files_info[0], _VMDiskInfo):\n files_info = (disk.file_info for disk in files_info)\n\n volumes = []\n for file_info in files_info:\n\n if not allow_shared and any(\n d.sharing\n for d in file_info.devices):\n shared_files[file_info.path].append(file_info)\n continue\n\n try:\n volume = self._to_volume(file_info)\n except LibcloudError as err:\n # one broken volume should not break the whole iteration\n LOG.warning(str(err))\n continue\n\n created_at = volume_creation_times.get(volume.id)\n for device in file_info.devices:\n if created_at:\n break\n if device.is_root:\n created_at = node_creation_times.get(device.owner_id)\n volume.extra['created_at'] = created_at\n\n volumes.append(volume)\n return volumes\n\n for item in self._query_vm_virtual_disks(\n virtual_machine=virtual_machine,\n datacenter=ex_datacenter,\n process_fn=result_to_volumes):\n yield item\n\n # collect and yield the shared volumes at the end of iteration\n merged_shared_files = []\n for files_info in shared_files.values():\n files_info[0].devices = list({\n device for file_info in files_info\n for device in file_info.devices})\n merged_shared_files.append(files_info[0])\n for item in result_to_volumes(merged_shared_files, allow_shared=True):\n yield item", "def list(service_template_name,\n sort_by,\n descending,\n model_storage,\n logger):\n if service_template_name:\n logger.info('Listing services for service template {0}...'.format(\n service_template_name))\n service_template = model_storage.service_template.get_by_name(service_template_name)\n filters = dict(service_template=service_template)\n else:\n logger.info('Listing all services...')\n filters = {}\n\n services_list = model_storage.service.list(\n sort=utils.storage_sort_param(sort_by=sort_by, descending=descending),\n filters=filters)\n table.print_data(SERVICE_COLUMNS, services_list, 'Services:')", "def init_service_list(self):\n images = self._cli.images()\n for image in images:\n self._services[image['Id']] = image\n\n # liang: dump the images, need to move to cache abstraction in the future.\n if image['Id'] == u'3d3b49d80014e2df3434f282586b3bb2cff0f7b5f58a3e63d9229c48085a53a8':\n continue\n if not os.path.exists(conf['image_dir']+image['Id']+'.tar'):\n logger.info('dumping %s' % image['Id'])\n raw = self._cli.get_image(image['Id'])\n tar = open(conf['image_dir']+image['Id']+'.tar', 'w')\n tar.write(raw.data)\n tar.close()\n pass", "def iter_procs(self):\n for row in self:\n if row.service_def:\n yield row", "def test_list_services(self):\n services = (self.admin_volume_services_client.list_services()\n ['services'])\n self.assertNotEmpty(services)", "def list_container_objects(self, container, ex_prefix=None):\r\n return list(self.iterate_container_objects(container,\r\n ex_prefix=ex_prefix))", "def get_host_all_storages(self):\n if self._hypervisor_handler is None:\n self._hypervisor_handler = self.get_handler()\n\n ret_storage_list = []\n try:\n all_storage = self._hypervisor_handler.xenapi.SR.get_all()\n ret_storage_list = [self._hypervisor_handler.xenapi.SR.get_name_label(sr_ref) for sr_ref in all_storage]\n except Exception as error:\n log.exception(\"Exception when get all storage info:%s\", error)\n\n return ret_storage_list", "def list_container_objects(self, container):\r\n return list(self.iterate_container_objects(container))", "def iterate_containers(self):\r\n\r\n for container_name in os.listdir(self.base_path):\r\n full_path = os.path.join(self.base_path, container_name)\r\n if not os.path.isdir(full_path):\r\n continue\r\n yield self._make_container(container_name)", "def test_storage_project_iso_service_list(self):\n pass", "def _enumerate_services_generator(self):\n size_needed = gdef.DWORD()\n nb_services = gdef.DWORD()\n counter = gdef.DWORD()\n try:\n windows.winproxy.EnumServicesStatusExW(self.handle, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, None, 0, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)\n except WindowsError:\n pass\n\n while True:\n size = size_needed.value\n buffer = (BYTE * size)()\n try:\n windows.winproxy.EnumServicesStatusExW(self.handle, SC_ENUM_PROCESS_INFO, SERVICE_TYPE_ALL, SERVICE_STATE_ALL, buffer, size, ctypes.byref(size_needed), ctypes.byref(nb_services), byref(counter), None)\n except WindowsError as e:\n continue\n break\n services_array = (gdef.ENUM_SERVICE_STATUS_PROCESSW * nb_services.value).from_buffer(buffer)\n for service_info in services_array:\n shandle = self.open_service(service_info.lpServiceName)\n yield Service(handle=shandle, name=service_info.lpServiceName, description=service_info.lpDisplayName)\n return", "def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device", "def list(sort_by, descending, model_storage, logger):\n\n logger.info('Listing all service templates...')\n service_templates_list = model_storage.service_template.list(\n sort=utils.storage_sort_param(sort_by, descending))\n\n column_formatters = \\\n dict(description=table.trim_formatter_generator(DESCRIPTION_FIELD_LENGTH_LIMIT))\n table.print_data(SERVICE_TEMPLATE_COLUMNS, service_templates_list, 'Service templates:',\n column_formatters=column_formatters)", "def services_all(ctx):\n ctx.run(KUBERNETES_GET_SERVICES_ALL_CMD)", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transition a volume from one type to another. Delete the logged information about a transition operation.
def volume_transition_log_delete(self, volume_name, operation_type=None, destination_vserver_name=None, source_node=None): return self.request( "volume-transition-log-delete", { 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ], 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ], 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ], 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ], }, { } )
[ "def volume_transition(self, source_node, volumes, affinity_node=None, operation_type=None, override_warnings=None, destination_vserver_name=None, non_disruptive=None):\n return self.request( \"volume-transition\", {\n 'affinity_node': [ affinity_node, 'affinity-node', [ basestring, 'None' ], False ],\n 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ],\n 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ],\n 'override_warnings': [ override_warnings, 'override-warnings', [ bool, 'None' ], False ],\n 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ],\n 'volumes': [ volumes, 'volumes', [ VolumeTransitionVolinfo, 'None' ], True ],\n 'non_disruptive': [ non_disruptive, 'non-disruptive', [ bool, 'None' ], False ],\n }, {\n 'job-id': [ int, False ],\n } )", "def _stash_log(self) -> None:\n self.log.info(f\"Move source log for {self.__api.upload_id} to\"\n f\" '{self.__api.storage.deleted_logs_path}'.\")\n self.log.info(f\"Delete workspace '{self.__api.upload_id}'.\")\n try:\n self.__api.storage.stash_deleted_log(self,\n self.log.file)\n except Exception as e:\n self.log.info(f'Saving source.log failed: {e}')", "def test_volume_stop(self):\n pass", "def volume_move_abort(self, source_volume):\n return self.request( \"volume-move-abort\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def store_transition(self, transition: Transition) -> None:\n super().store_transition(transition)", "def revert(self, volume_id):\n return self._snap_operation(3, volume_id)", "def volume_transition_check(self, source_node, volumes, operation_type=None, override_warnings=None, destination_vserver_name=None, non_disruptive=None):\n return self.request( \"volume-transition-check\", {\n 'source_node': [ source_node, 'source-node', [ basestring, 'None' ], False ],\n 'operation_type': [ operation_type, 'operation-type', [ basestring, 'None' ], False ],\n 'override_warnings': [ override_warnings, 'override-warnings', [ bool, 'None' ], False ],\n 'destination_vserver_name': [ destination_vserver_name, 'destination-vserver-name', [ basestring, 'None' ], False ],\n 'volumes': [ volumes, 'volumes', [ VolumeTransitionVolinfo, 'None' ], True ],\n 'non_disruptive': [ non_disruptive, 'non-disruptive', [ bool, 'None' ], False ],\n }, {\n } )", "def storageLoss(self):\r\n self.model.totalGrain -= round(self.grain * 0.1) # Prevent grain going to a float because unrestricted types\r\n self.grain -= round(self.grain*0.1)", "def volume_down(self):\n if self.volume_level > 0:\n self.set_volume_level(max(0, self.volume_level - .2))", "def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLStateElt_removeTransition(self, transition)", "def transition(self):\n next_state = self.current_state.transition()\n # self.printStateChange(self.current_state, next_state)\n self.current_state = next_state", "def removeTransition(self, transition: 'ScXMLTransitionElt') -> \"void\":\n return _coin.ScXMLParallelElt_removeTransition(self, transition)", "def detach(self, volume):\r\n return volume.detach()", "def removeVolume(self,remove):\n if not self.ingredients:\n # No ingredients, but removing something -- happens during initial passes\n self.ingredients[self.name]=-remove\n else:\n for k in self.ingredients:\n self.ingredients[k] *= (self.volume-remove)/self.volume\n\n self.volume=self.volume-remove\n self.checkingredients()", "def test_create_volume_from_snapshot(self, snapshot, volumes_steps_ui):\n volumes_steps_ui.create_volume_from_snapshot(snapshot.name)\n volumes_steps_ui.delete_volume(snapshot.name)", "def decrease_volume(self):\n if self.is_playing:\n self.volume *= 0.8", "def change_volume(self, volume):\n self.signalPlayer.volume = volume", "def _revert_to_snapshot_generic(self,\n ctxt: context.RequestContext,\n volume,\n snapshot) -> None:\n temp_vol = None\n\n try:\n v_options = {'display_name': '[revert] temporary volume created '\n 'from snapshot %s' % snapshot.id}\n ctxt = context.get_internal_tenant_context() or ctxt\n temp_vol = self.driver._create_temp_volume_from_snapshot(\n ctxt, volume, snapshot, volume_options=v_options,\n status=fields.VolumeStatus.IN_USE)\n self._copy_volume_data(ctxt, temp_vol, volume)\n self.driver_delete_volume(temp_vol)\n temp_vol.destroy()\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.exception(\n \"Failed to use snapshot %(snapshot)s to create \"\n \"a temporary volume and copy data to volume \"\n \" %(volume)s.\",\n {'snapshot': snapshot.id,\n 'volume': volume.id})\n if temp_vol and temp_vol.status == fields.VolumeStatus.IN_USE:\n self.driver_delete_volume(temp_vol)\n temp_vol.destroy()", "def transition_model(self, state, action):\n ..." ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtains the status of the volume move operation. This is a synchronous API.
def volume_move_status(self, source_volume=None, is_verbose=None): return self.request( "volume-move-status", { 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ], 'is_verbose': [ is_verbose, 'is-verbose', [ bool, 'None' ], False ], }, { 'status': [ VolMoveStatusInfo, True ], } )
[ "def is_moving(self):\n mcl_is_moving = self.madlib['MCL_MicroDriveMoveStatus']\n mcl_is_moving.restype = c_int\n mcl_is_moving(self.isMoving_pointer, c_int(self.handler))\n return self.isMoving.value", "def volume_status(self, volume):\r\n volume = self._get_volume(volume)\r\n raid = self._get_raid(volume[\"devicefile\"])\r\n if volume is not None and raid is not None:\r\n return raid[\"state\"]", "def get_status(self):\n result = None\n try:\n r = requests.get(self.url_status)\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result", "def get_status(self):\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n LOGGER.error('Could not get status of this volume: %s. Exception is: %s' % (self.url_status, e))\n result = None\n return result", "def do_showVolumeMove(self, line):\n\t\tcommand = 'ssh -qn admin@%s vol move show -fields replication-throughput,state,bytes-remaining,\\\n\t\t\testimated-completion-time,actual-duration,estimated-remaining-duration,details,\\\n\t\t\tpercent-complete' % self.filer\n\t\tproc = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n\t\tp_stdout = proc.communicate()[0]\n\t\tprint p_stdout", "def volume_status(mnode, volname):\n return RestClient(mnode).handle_request(\n \"GET\", \"/v1/volumes/%s/status\" % volname,\n httplib.OK, None)", "def position():\n\t\twhile (_stg.getStatus() == _stg.StageStatus.MOVING) and (not terminated):\n\t\t\ttime.sleep(0.01)\n\t\treturn _stg.getPosition()", "def volume_control(self):\n\n volume = self.volume_prompt.text()\n if self.PushBtn.isChecked():\n direction = \"D\"\n elif self.PullBtn.isChecked():\n direction = \"P\"\n else:\n raise Exception(\"Somethings wrong in the volume_control function\")\n\n (done, answer) = self.pump.volume_command(volume, direction)\n\n if not done:\n QMessageBox.warning(self, __appname__, answer)\n if done:\n print \"plunger own status position\" + pump.own_status[\"plung_pos_mine\"]\n print \"answer: \" + answer", "def get_status (self):\n return self.__status", "def getMoving(self):\n\n if not self.connected() or self.exiting:\n printf(\"Robot not avaliable, returning False\")\n return False\n\n with self.lock:\n return self.uArm.getIsMoving()", "def state(self):\n if self.device.vacuum_status is not None and self.device.is_available == True:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]", "def get_status(self):\n status = lowlevel.SM_PATH_STATUS_PARMS()\n status.path = self.path\n\n rc = lowlevel.sm_path_status(status)\n if rc:\n raise AculabSpeechError(rc, 'sm_path_status')\n\n return status.status", "def ex_wait_storage_state(self, volume, state=VolumeState.DETACHED,\r\n wait_period=60, timeout=1200):\r\n start = time.time()\r\n end = start + timeout\r\n\r\n while time.time() < end:\r\n volumes = self.list_volumes()\r\n volumes = list([v for v in volumes if v.uuid == volume.uuid])\r\n\r\n if (len(volumes) == 1 and volumes[0].extra['state'] == state):\r\n return volumes[0]\r\n else:\r\n time.sleep(wait_period)\r\n continue\r\n\r\n raise LibcloudError(value='Timed out after %d seconds' % (timeout),\r\n driver=self)", "def state(self):\n if self.device.vacuum_status is not None:\n return STATE_CODE_TO_STATE[self.device.vacuum_status]", "def volume_move_pause(self, source_volume):\n return self.request( \"volume-move-pause\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def move(self):\n old_position = self.__position\n self.__position = tuple(map(sum, zip(self.__position, self.__velocity)))\n print(f\"Asteroid {self.__id} Moved! Old Pos: {old_position} -> New Pos: {self.__position}\")\n return self.__position", "def vision_status(self, position: Tuple[float, float]) -> VisionStatus:\n return self._map.vision_status(position)", "def status(self):\n self.lastStatus = ord(self.hardware.transfer(chr(Cmd.NOP), 1)[0])\n return self.lastStatus", "def omniSnmpStatus(self):\n status = -1\n try:\n status = self.netcool.getSnmpStatus(system=self.getOrganizerName())\n status = self.convertStatus(status)\n except Exception: pass\n return status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Continues an iteration through the list of volumes.
def volume_list_info_iter_next(self, tag, maximum): return self.request( "volume-list-info-iter-next", { 'tag': tag, 'maximum': [ maximum, 'maximum', [ int, 'None' ], False ], }, { 'records': [ int, False ], 'volumes': [ VolumeInfo, True ], } )
[ "def _iter_volumes(self):\n if self.volumes:\n for volume_name, container_path in self.volumes.iteritems():\n if \"/\" in volume_name:\n # if a / is found in the name, assume it's a full path specified on the host\n host_path = volume_name\n else:\n host_path = \"%s/volumes/%s/%s\" % (self.project.home_path, self.name, volume_name)\n yield (host_path, container_path)", "def volume_list_info_iter_start(self, volume=None, verbose=None):\n return self.request( \"volume-list-info-iter-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'verbose': [ verbose, 'verbose', [ bool, 'None' ], False ],\n }, {\n 'records': [ int, False ],\n 'tag': [ basestring, False ],\n } )", "def volume_list_paged(request, search_opts=None, marker=None, paginate=False,\n sort_dir=\"desc\"):\n has_more_data = False\n has_prev_data = False\n volumes = []\n\n # To support filtering with group_id, we need to use the microversion.\n c_client = _cinderclient_with_generic_groups(request)\n if c_client is None:\n return volumes, has_more_data, has_prev_data\n\n # build a dictionary of volume_id -> transfer\n transfers = {t.volume_id: t\n for t in transfer_list(request, search_opts=search_opts)}\n\n if paginate:\n page_size = utils.get_page_size(request)\n # sort_key and sort_dir deprecated in kilo, use sort\n # if pagination is true, we use a single sort parameter\n # by default, it is \"created_at\"\n sort = 'created_at:' + sort_dir\n for v in c_client.volumes.list(search_opts=search_opts,\n limit=page_size + 1,\n marker=marker,\n sort=sort):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n volumes, has_more_data, has_prev_data = update_pagination(\n volumes, page_size, marker, sort_dir)\n else:\n for v in c_client.volumes.list(search_opts=search_opts):\n v.transfer = transfers.get(v.id)\n volumes.append(Volume(v))\n\n return volumes, has_more_data, has_prev_data", "def iterate_volumes(self, node=None, ex_datacenter=None):\n if node is not None:\n if ex_datacenter:\n raise ValueError(\n \"Cannot list the volumes for the datacenter and the \"\n \"virtual machine at the same time\")\n virtual_machine = self.ex_get_vm(node)\n else:\n virtual_machine = None\n\n if ex_datacenter is not None:\n ex_datacenter = self._get_datacenter_by_id(ex_datacenter)\n\n # querying the creation timestamps of node(s) and volumes\n node_creation_times = self._query_node_creation_times(\n virtual_machine=virtual_machine)\n volume_creation_times = self._query_volume_creation_times(\n virtual_machine=virtual_machine)\n\n shared_files = collections.defaultdict(list)\n\n def result_to_volumes(files_info, allow_shared=False):\n \"\"\"\n :type disks_page: tp.Union[tp.List[_FileInfo], tp.List[_VMDiskInfo]]\n :rtype: tp.List[StorageVolume]\n \"\"\"\n if files_info and isinstance(files_info[0], _VMDiskInfo):\n files_info = (disk.file_info for disk in files_info)\n\n volumes = []\n for file_info in files_info:\n\n if not allow_shared and any(\n d.sharing\n for d in file_info.devices):\n shared_files[file_info.path].append(file_info)\n continue\n\n try:\n volume = self._to_volume(file_info)\n except LibcloudError as err:\n # one broken volume should not break the whole iteration\n LOG.warning(str(err))\n continue\n\n created_at = volume_creation_times.get(volume.id)\n for device in file_info.devices:\n if created_at:\n break\n if device.is_root:\n created_at = node_creation_times.get(device.owner_id)\n volume.extra['created_at'] = created_at\n\n volumes.append(volume)\n return volumes\n\n for item in self._query_vm_virtual_disks(\n virtual_machine=virtual_machine,\n datacenter=ex_datacenter,\n process_fn=result_to_volumes):\n yield item\n\n # collect and yield the shared volumes at the end of iteration\n merged_shared_files = []\n for files_info in shared_files.values():\n files_info[0].devices = list({\n device for file_info in files_info\n for device in file_info.devices})\n merged_shared_files.append(files_info[0])\n for item in result_to_volumes(merged_shared_files, allow_shared=True):\n yield item", "def enable(self):\n for volume in self.volumes:\n try:\n self._renderer.AddVolume(volume)\n except:\n pass # TBD: any error logging.", "def test_list_volumes_walks_pages(self):\n api = gceblockdeviceapi_for_test(self)\n self.patch(api, '_page_size', 1)\n\n volume_1 = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n volume_2 = api.create_volume(\n dataset_id=uuid4(),\n size=get_minimum_allocatable_size()\n )\n\n blockdevice_ids = [v.blockdevice_id for v in api.list_volumes()]\n self.assertThat(\n blockdevice_ids,\n MatchesAll(Contains(volume_1.blockdevice_id),\n Contains(volume_2.blockdevice_id))\n )\n\n api.destroy_volume(volume_2.blockdevice_id)\n blockdevice_ids = [v.blockdevice_id for v in api.list_volumes()]\n self.assertThat(\n blockdevice_ids,\n MatchesAll(Contains(volume_1.blockdevice_id),\n Not(Contains(volume_2.blockdevice_id)))\n )", "def volumes_prepare(volumes:list,instances:list,projects:dict) -> list:\n v2 = []\n \n for volume in volumes:\n \n volume_dict = volume.to_dict()\n \n volume_dict[\"project_name\"] = projects[volume_dict[\"os-vol-tenant-attr:tenant_id\"]]\n \n if volume_dict[\"name\"] == \"None\" or volume_dict[\"name\"] == None:\n volume_dict[\"name\"] = \"\"\n\n if volume_dict[\"name\"] != \"\": #replace space to _ so its usable in the volume name, if it has volume name\n volume_dict[\"name\"] = str(volume_dict[\"name\"]).replace(\" \",\"_\") \n\n #check if volume is attached to an instance and act accordingly\n if volume_dict[\"attachments\"] != [] :\n volume_dict[\"server_id\"] = volume_dict[\"attachments\"][0][\"server_id\"]\n volume_dict[\"server_name\"] = get_server_name(volume_dict[\"attachments\"][0][\"server_id\"],instances)\n volume_dict[\"mountpoint\"] = volume_dict[\"attachments\"][0][\"device\"].split('/')[-1]\n if volume_dict[\"mountpoint\"] == \"vda\":\n volume_dict[\"mountpoint\"] = \"root\"\n else:\n volume_dict[\"server_id\"] = \"not attached\"\n volume_dict[\"server_name\"] = \"\"\n volume_dict[\"mountpoint\"] = \"\"\n \n volume_dict[\"volume_migration_name\"] = volume_dict[\"id\"]+\"-\"+volume_dict[\"name\"]+\"-\"+volume_dict[\"server_name\"]+\"-\"+volume_dict[\"mountpoint\"]\n v2.append(volume_dict)\n \n v2 = filter_volumes(v2)\n return v2", "def test_01_list_volumes(self):\n list_volume_response = Volume.list(\n self.apiclient,\n ids=[self.vm1_root_volume.id, self.vm2_root_volume.id, self.vm3_root_volume.id],\n type='ROOT',\n listAll=True\n )\n self.assertEqual(\n isinstance(list_volume_response, list),\n True,\n \"List Volume response was not a valid list\"\n )\n self.assertEqual(\n len(list_volume_response),\n 3,\n \"ListVolumes response expected 3 Volumes, received %s\" % len(list_volume_response)\n )", "def detach_and_delete_vols(self, volumes):\n for v in volumes:\n if v.status == \"in-use\":\n v.detach()\n v.get()\n sample = TimeoutSampler(\n 100,\n 5,\n self.check_expected_vol_status,\n vol=v,\n expected_state=\"available\",\n )\n if not sample.wait_for_func_status(True):\n logger.error(f\"Volume {v.name} failed to detach\")\n raise exceptions.PSIVolumeNotInExpectedState()\n\n v.delete()\n sample = TimeoutSampler(100, 5, self.check_vol_deleted, vol=v)\n if not sample.wait_for_func_status(True):\n logger.error(f\"Failed to delete Volume {v.name}\")\n raise exceptions.PSIVolumeDeletionFailed()", "def iterate_containers(self):\r\n\r\n for container_name in os.listdir(self.base_path):\r\n full_path = os.path.join(self.base_path, container_name)\r\n if not os.path.isdir(full_path):\r\n continue\r\n yield self._make_container(container_name)", "def get_each_volume(wildfrag):\n for (i_system,) in wildfrag.retrieve_system_ids():\n system = wildfrag.retrieve_system(i_system)\n\n for i_device, device in enumerate(system.devices):\n for i_volume, volume in enumerate(device.volumes):\n yield volume, system, device, i_volume, i_system, i_device", "def get_volumes(self):\n\tapi = NaElement(\"volume-get-iter\")\n\txi = NaElement(\"desired-attributes\")\n\tapi.child_add(xi)\n\t## This specifies max number of volume records to pull from sdk api\n\t## Default is 20. 20000 is enough for most clusters\n\tapi.child_add_string(\"max-records\",self.MAX_VOLUMES)\n\txi1 = NaElement(\"volume-attributes\")\n\txi.child_add(xi1)\n\txi41 = NaElement(\"volume-id-attributes\")\n\txi41.child_add_string(\"instance-uuid\",\"<instance-uuid>\")\n\txi41.child_add_string(\"name\",\"<name>\")\n\txi41.child_add_string(\"owning-vserver-name\",\"<owning-vserver-name>\")\n\txi41.child_add_string(\"uuid\",\"<uuid>\")\n\txi1.child_add(xi41)\n\txo = self.s.invoke_elem(api)\n\tself.sd.incr(\"api.invoke\")\n\tf = xmltodict.parse(xo.sprintf())\n\tvolumes = f['results']['attributes-list']['volume-attributes']\n\tvol_list = []\n\tfor volume in volumes:\n\t vol_list.append({'cluster-name':self.CLUSTER_NAME,\n\t\t\t 'owning-vserver-name':volume['volume-id-attributes']['owning-vserver-name'],\n\t\t\t 'name':volume['volume-id-attributes']['name'],\n\t\t\t 'instance-uuid':volume['volume-id-attributes']['instance-uuid']\n\t\t\t })\n\treturn vol_list", "def all_volumes(self):\n _logger.debug('%s', where_am_i())\n volumes = []\n for compartment in self.all_compartments():\n comp_volumes = compartment.all_volumes()\n if comp_volumes is not None:\n volumes += comp_volumes\n return volumes", "def delete_volumes(client, volumes):\n failed_volumes = []\n for volume in volumes:\n try:\n client.delete_volume(VolumeId=volume)\n except ClientError as error:\n code = error.response['Error']['Code']\n if code == 'VolumeInUse':\n client.detach_volume(\n VolumeId=volume,\n Force=True)\n waiter = client.get_waiter('volume_available')\n waiter.wait(VolumeIds=[volume])\n client.delete_volume(VolumeId=volume)\n continue\n failed_volumes.append(volume)\n return failed_volumes", "def volume_list(search_opts=None):\r\n c_client = cinderclient()\r\n if c_client is None:\r\n return []\r\n # print c_client.volumes.list(search_opts=search_opts)\r\n return c_client.volumes.list(search_opts=search_opts)", "def test_volumes_add(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 8)\n svc.volumes.append(sm.Volume(\"foo\", \"bar\"))\n svc.volumes.append(sm.Volume(\"bar\", \"baz\"))\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n if not \"foo\" in [v.owner for v in svc.volumes]:\n raise ValueError(\"Failed to alter volumes.\")\n if not \"bar\" in [v.owner for v in svc.volumes]:\n raise ValueError(\"Failed to alter volumes.\")\n for v in svc.volumes:\n if v.owner == \"foo\":\n self.assertEqual(v.permission, \"bar\")\n if v.owner == \"bar\":\n self.assertEqual(v.permission, \"baz\")\n self.assertEqual(len(svc.volumes), 10)", "def list_vol(tag=None, device=None):\n conn = _ec2connect()\n vols = conn.get_all_volumes(filters=_get_filters(tag))\n if not vols:\n print('\\tNone.')\n return\n for v in vols:\n t = v.tags.get(TAG_NAME, 'root')\n s = v.attachment_state()\n z = v.size\n i = v.attach_data.instance_id\n d = v.attach_data.device\n print('\\t{0:25} {1:2}GB {2:15} {3:15} {4} {5}'.format(t, z, v.id, s, i, d ))", "def test_volumes_remove(self):\n ctx = sm.ServiceContext(INFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 8)\n svc.volumes = filter(lambda r: r.resourcePath not in [\"zenjobs\", \"zenoss-export\"], svc.volumes)\n ctx.commit(OUTFILENAME)\n ctx = sm.ServiceContext(OUTFILENAME)\n svc = filter(lambda x: x.description == \"Zope server\", ctx.services)[0]\n self.assertEqual(len(svc.volumes), 6)\n for v in svc.volumes:\n if v.resourcePath in [\"zenjobs\", \"zenoss-export\"]:\n raise ValueError(\"Error removing volume.\")", "def _attach_volumes(self, instance):\n if instance['volumes']:\n for volume in instance['volumes']:\n if volume.uuid:\n self._container_script_modify(instance, None,\n volume.uuid,\n volume.mountpoint, 'add')\n LOG.debug('Added volume %s to %s' % (volume.uuid,\n instance['id']))\n else:\n self._container_script_modify(instance, volume.export_device,\n None, 'add')\n LOG.debug('Added volume %s to %s' % (volume.export_device,\n instance['id']))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Begin the process by which the given clone is split off from its underlying parent volume and snapshot. New storage is allocated for the clone that is distinct from its parent. This process may take some time and proceeds in the background. Use the 'volumeclonesplitstatus' command to view the operation's progress. Both clone and parent volumes remain available during the process of splitting them apart. Upon completion, the snapshot on which the clone was based will be unlocked in the parent volume. Any snapshots in the clone are removed at the end of processing. Use the 'volumeclonesplitstop' command to stop this process. This command fails if applied to a traditional volume. Cloning is a new capability that applies exclusively to flexible volumes. In Data ONTAP ClusterMode, a job is created to perform the split operation. The job id of the job is returned in the API response. The progress of the job can be tracked using the job APIs.
def volume_clone_split_start(self, volume): return self.request( "volume-clone-split-start", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], }, { 'result-error-message': [ basestring, False ], 'result-jobid': [ int, False ], 'result-error-code': [ int, False ], 'result-status': [ basestring, False ], } )
[ "def volume_clone_split_status(self, volume=None):\n return self.request( \"volume-clone-split-status\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-details': [ CloneSplitDetailInfo, True ],\n } )", "def volume_clone_split_estimate(self, volume):\n return self.request( \"volume-clone-split-estimate\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'clone-split-estimate': [ CloneSplitEstimateInfo, True ],\n } )", "def create_clone(\n self,\n client,\n vol_name,\n subvol_name,\n snap_name,\n target_subvol_name,\n validate=True,\n **kwargs,\n ):\n clone_cmd = f\"ceph fs subvolume snapshot clone {vol_name} {subvol_name} {snap_name} {target_subvol_name}\"\n if kwargs.get(\"group_name\"):\n clone_cmd += f\" --group_name {kwargs.get('group_name')}\"\n if kwargs.get(\"target_group_name\"):\n clone_cmd += f\" --target_group_name {kwargs.get('target_group_name')}\"\n if kwargs.get(\"pool_layout\"):\n clone_cmd += f\" --pool_layout {kwargs.get('pool_layout')}\"\n cmd_out, cmd_rc = client.exec_command(\n sudo=True, cmd=clone_cmd, check_ec=kwargs.get(\"check_ec\", True)\n )\n if validate:\n listsubvolumes_cmd = f\"ceph fs subvolume ls {vol_name}\"\n if kwargs.get(\"target_group_name\"):\n listsubvolumes_cmd += f\" --group_name {kwargs.get('target_group_name')}\"\n out, rc = client.exec_command(\n sudo=True, cmd=f\"{listsubvolumes_cmd} --format json\"\n )\n subvolume_ls = json.loads(out.read().decode())\n if target_subvol_name not in [i[\"name\"] for i in subvolume_ls]:\n raise CommandFailed(f\"Creation of clone : {target_subvol_name} failed\")\n return cmd_out, cmd_rc", "def volume_clone_split_stop(self, volume):\n return self.request( \"volume-clone-split-stop\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def volume_clone_create_async(self, parent_volume, volume, use_snaprestore_license=None, junction_active=None, space_reserve=None, junction_path=None, parent_snapshot=None):\n return self.request( \"volume-clone-create-async\", {\n 'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],\n 'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],\n 'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n 'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def volume_clone_create(self, parent_volume, volume, use_snaprestore_license=None, force_worm_clone=None, junction_active=None, qos_policy_group_name=None, space_reserve=None, junction_path=None, parent_snapshot=None, volume_type=None):\n return self.request( \"volume-clone-create\", {\n 'use_snaprestore_license': [ use_snaprestore_license, 'use-snaprestore-license', [ bool, 'None' ], False ],\n 'parent_volume': [ parent_volume, 'parent-volume', [ basestring, 'None' ], False ],\n 'force_worm_clone': [ force_worm_clone, 'force-worm-clone', [ bool, 'None' ], False ],\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'junction_active': [ junction_active, 'junction-active', [ bool, 'None' ], False ],\n 'qos_policy_group_name': [ qos_policy_group_name, 'qos-policy-group-name', [ basestring, 'None' ], False ],\n 'space_reserve': [ space_reserve, 'space-reserve', [ basestring, 'None' ], False ],\n 'junction_path': [ junction_path, 'junction-path', [ basestring, 'None' ], False ],\n 'parent_snapshot': [ parent_snapshot, 'parent-snapshot', [ basestring, 'None' ], False ],\n 'volume_type': [ volume_type, 'volume-type', [ basestring, 'None' ], False ],\n }, {\n } )", "def test_create_cloned_volume(self):\n self.mox.StubOutWithMock(self._driver, '_create_file')\n self.mox.StubOutWithMock(self._driver, '_copy_file')\n\n vol_size = self._driver._size_bytes(self.TEST_VOLSIZE)\n self._driver._create_file(self.TEST_CLONEPATH, vol_size)\n self._driver._copy_file(self.TEST_VOLPATH, self.TEST_CLONEPATH)\n\n self.mox.ReplayAll()\n\n self._driver.create_cloned_volume(self.TEST_CLONE, self.TEST_VOLUME)", "def clone(self, source_name, snapshot_id, dest_name):\n wrap_popen('collie', 'vdi', 'clone', '-s', snapshot_id, source_name,\n dest_name)", "def wait_for_clone(client, clone, timeout=600):\n clone_status_cmd = f\"ceph fs clone status {clone.get('vol_name')} {clone.get('target_subvol_name')} \"\n clone_status_cmd = (\n clone_status_cmd + f\"--group_name {clone.get('group_name')} --format json\"\n )\n cmd_out, cmd_rc = client.exec_command(\n sudo=True,\n cmd=clone_status_cmd,\n check_ec=clone.get(\"check_ec\", True),\n timeout=timeout,\n )\n status = json.loads(cmd_out)\n clone_state = status[\"status\"][\"state\"]\n if \"complete\" not in clone_state:\n raise AssertionError(f\"Clone state : {clone_state}\")", "def _create_clone_pair(self, pvol, svol):\n snapshot_name = '%(prefix)s%(svol)s' % {\n 'prefix': CLONE_NAME,\n 'svol': svol % _SNAP_HASH_SIZE,\n }\n try:\n body = {\"snapshotGroupName\": snapshot_name,\n \"snapshotPoolId\": self.storage_info['snap_pool_id'],\n \"pvolLdevId\": pvol,\n \"svolLdevId\": svol,\n \"isClone\": True,\n \"clonesAutomation\": True,\n \"copySpeed\": 'medium',\n \"isDataReductionForceCopy\": True}\n self.client.add_snapshot(body)\n except utils.HBSDError as ex:\n if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==\n rest_api.INVALID_SNAPSHOT_POOL and\n not self.conf.hitachi_snap_pool):\n msg = utils.output_log(\n MSG.INVALID_PARAMETER, param='hitachi_snap_pool')\n raise utils.HBSDError(msg)\n else:\n raise\n try:\n self._wait_copy_pair_status(svol, set([PSUS, SMPP, SMPL]))\n except Exception:\n with excutils.save_and_reraise_exception():\n try:\n self._delete_pair_from_storage(pvol, svol)\n except utils.HBSDError:\n utils.output_log(\n MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)", "def NETRBufVSplitOpen(self):\n self.NETROpen(Vim.Var('NETRSplitOrientation') + ' vsplit',\n use_rifle=False)", "def test_start_and_capture_snapshot(sdc_builder, sdc_executor):\n builder = sdc_builder.get_pipeline_builder()\n\n # Super simple cluster pipeline that will fail if it will get to validation\n source = builder.add_stage('Hadoop FS', type='origin')\n source.data_format = 'TEXT'\n source.input_paths.append('/i/do/not/exists')\n\n trash = builder.add_stage('Trash')\n\n source >> trash\n\n pipeline = builder.build()\n pipeline.configuration['executionMode'] = 'CLUSTER_BATCH'\n\n sdc_executor.add_pipeline(pipeline)\n try:\n sdc_executor.capture_snapshot(pipeline, start_pipeline=True)\n except StartError:\n # We are expecting error that we will further inspect via pipeline status\n pass\n else:\n assert False\n\n sdc_executor.get_pipeline_status(pipeline).wait_for_status('START_ERROR', ignore_errors=True)\n status = sdc_executor.get_pipeline_status(pipeline).response.json()\n assert 'Cluster mode does not support snapshots.' in status['message']", "def split(\n self, split_func, num_splits, f_args=None, f_kwargs=None, extract_metadata=False\n ):\n f_args = tuple() if f_args is None else f_args\n f_kwargs = {} if f_kwargs is None else f_kwargs\n return self._wrap_partitions(\n self.deploy_splitting_func(\n self.axis,\n split_func,\n f_args,\n f_kwargs,\n num_splits,\n *self.list_of_blocks,\n extract_metadata=extract_metadata,\n ),\n extract_metadata=extract_metadata,\n )", "def volume_split(self, new_volume_name, plex):\n return self.request( \"volume-split\", {\n 'new_volume_name': [ new_volume_name, 'new-volume-name', [ basestring, 'None' ], False ],\n 'plex': [ plex, 'plex', [ basestring, 'None' ], False ],\n }, {\n } )", "def divide_parcel(self, ):\n\n pass\n\n '''\n // ParcelDivide\n // If the selection is a subsection of exactly one parcel,\n // chop out that section and make a new parcel of it.\n // viewer -> sim\n // reliable\n {\n \tParcelDivide Low 211 NotTrusted Unencoded\n \t{\n \t\tAgentData\t\t\tSingle\n \t\t{\tAgentID\t\t\tLLUUID\t}\n \t\t{\tSessionID\t\tLLUUID\t}\n \t}\n \t{\n \t\tParcelData\t\t\tSingle\n \t\t{\tWest\t\tF32\t\t}\n \t\t{\tSouth\t\tF32\t\t}\n \t\t{\tEast\t\tF32\t\t}\n \t\t{\tNorth\t\tF32\t\t}\n \t}\n }\n '''", "def label_mp(split):\n start = time()\n print('start processing {} split...'.format(split))\n data_dir = join(DATA_DIR, split)\n n_data = count_data(data_dir)\n with mp.Pool() as pool:\n list(pool.imap_unordered(process(split),\n list(range(n_data)), chunksize=1024))\n print('finished in {}'.format(timedelta(seconds=time()-start)))", "def _split_lot(\n num_shares,\n lot,\n lots,\n logger,\n type_of_lot,\n existing_loss_lot=None,\n existing_replacement_lot=None,\n):\n existing_lot_portion = float(num_shares) / float(lot.num_shares)\n new_lot_portion = float(lot.num_shares - num_shares) / float(lot.num_shares)\n\n new_lot = copy.deepcopy(lot)\n new_lot.num_shares -= num_shares\n new_lot.basis = int(round(new_lot.basis * new_lot_portion))\n new_lot.adjusted_basis = int(\n round(new_lot.adjusted_basis * new_lot_portion)\n )\n new_lot.proceeds = int(round(new_lot.proceeds * new_lot_portion))\n new_lot.adjustment = int(round(new_lot.adjustment * new_lot_portion))\n lots.add(new_lot)\n\n lot.num_shares = num_shares\n lot.basis = int(round(lot.basis * existing_lot_portion))\n lot.adjusted_basis = int(round(lot.adjusted_basis * existing_lot_portion))\n lot.proceeds = int(round(lot.proceeds * existing_lot_portion))\n lot.adjustment = int(round(lot.adjustment * existing_lot_portion))\n\n loss_lots = [lot] if type_of_lot == \"loss\" else [existing_loss_lot]\n split_off_loss_lots = [new_lot] if type_of_lot == \"loss\" else []\n replacement_lots = (\n [lot] if type_of_lot == \"replacement\" else [existing_replacement_lot]\n )\n split_off_replacement_lots = (\n [new_lot] if type_of_lot == \"replacement\" else []\n )\n logger.print_lots(\n \"Split {} in two\".format(type_of_lot),\n lots,\n loss_lots=loss_lots,\n split_off_loss_lots=split_off_loss_lots,\n replacement_lots=replacement_lots,\n split_off_replacement_lots=split_off_replacement_lots,\n )", "def set_split(self, mode, view):\n\n if isinstance(mode, int):\n mode = \"Split{}\".format(mode)\n group = view - 1\n\n # Get object id\n method = \"split.factory.instance\"\n params = {'channel': 0}\n r = self.request(method=method, params=params)\n object_id = r['result']\n\n # Set split mode\n method = \"split.setMode\"\n params = {'displayType': \"General\",\n 'workMode': \"Local\",\n 'mode': mode,\n 'group': group}\n r = self.request(method=method, params=params, object_id=object_id)\n\n if r['result'] is False:\n raise RequestError(str(r))", "def split(self, widget, orientation, index=None):\n if widget.original:\n base = widget.original\n else:\n base = widget\n clone = base.split()\n if not clone:\n return\n if orientation == int(QtCore.Qt.Horizontal):\n orientation = QtCore.Qt.Horizontal\n else:\n orientation = QtCore.Qt.Vertical\n self.setOrientation(orientation)\n splitter = self._make_splitter()\n splitter.show()\n if index is None:\n self.addWidget(splitter)\n self.child_splitters.append(splitter)\n else:\n self.insertWidget(index, splitter)\n self.child_splitters.insert(index, splitter)\n if clone not in base.clones:\n # code editors maintain the list of clones internally but some\n # other widgets (user widgets) might not.\n base.clones.append(clone)\n clone.original = base\n splitter._parent_splitter = self\n splitter.last_tab_closed.connect(self._on_last_child_tab_closed)\n splitter.tab_detached.connect(self.tab_detached.emit)\n if hasattr(base, '_icon'):\n icon = base._icon\n else:\n icon = None\n # same group of tab splitter (user might have a group for editors and\n # another group for consoles or whatever).\n splitter._uuid = self._uuid\n splitter.add_tab(clone, title=self.main_tab_widget.tabText(\n self.main_tab_widget.indexOf(widget)), icon=icon)\n self.setSizes([1 for i in range(self.count())])\n # In order for the focus to switch to the newly splitted editor, it\n # appears that there first needs to be a splitter with a widget in it,\n # and then first the splitter and then the widget need to explicitly\n # receive focus. There may be a more elegant way to achieve this.\n splitter.main_tab_widget.setFocus()\n clone.setFocus()\n return splitter" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the name of the "root" volume on the filer. If this request is executed in the context of a vfiler, the "root" volume of the vfiler will be returned. If this request is executed in the context of a Vserver the "namespace root" volume of the Vserver will be returned. If the "namespace root" volume of the Admin Vserver is requested, EVSERVER_OP_NOT_ALLOWED will be returned.
def volume_get_root_name(self): return self.request( "volume-get-root-name", { }, { 'volume': [ basestring, False ], } )
[ "def get_gluster_default_volume_name():\n # type: (None) -> str\n return _GLUSTER_DEFAULT_VOLNAME", "def get_file_server_glusterfs_volume_name(sc):\n # type: (StorageClusterSettings) -> str\n try:\n volname = sc.file_server.server_options['glusterfs']['volume_name']\n except KeyError:\n volname = get_gluster_default_volume_name()\n return volname", "def docker_volume_name(self) -> str:\n return os.environ.get(\n 'DATA_VOLUME_NAME',\n f\"{self.docker_container_name}-vol\"\n )", "def _get_volume_name(self):\n pass", "def docker_vpn_volume_name(self) -> str:\n return os.environ.get(\n 'VPN_VOLUME_NAME',\n f\"{self.docker_container_name}-vpn-vol\"\n )", "def root_volume_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"root_volume_size\")", "def svn_fs_root_fs(root: \"svn_fs_root_t\") -> \"svn_fs_t *\":\n return _fs.svn_fs_root_fs(root)", "def root_volume_size(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"root_volume_size\")", "def IsVolumeSystemRoot(self):\n return self.path_spec.IsVolumeSystemRoot()", "def _get_volume_name(self):\n return self._heat_resource.properties[\"volume_id\"]", "def get_inner_fileserver_root():\n\n return seahub.settings.INNER_FILE_SERVER_ROOT", "def get_gluster_on_compute_volume():\n # type: (None) -> str\n return _GLUSTER_ON_COMPUTE_VOLUME", "def _get_volume_path(self):\n return heconflib.get_volume_path(\n self._parent.environment[ohostedcons.StorageEnv.SP_UUID],\n self._parent.environment[ohostedcons.StorageEnv.SD_UUID],\n self._parent.environment[ohostedcons.StorageEnv.IMG_UUID],\n self._parent.environment[ohostedcons.StorageEnv.VOL_UUID]\n )", "def volume(self):\n try:\n if self._api == 'iex':\n return self.get_api().quote().get('latestVolume')\n elif self._api == 'iexfinance':\n vol = self.get_api().get_volume()\n if vol:\n return float(vol)\n else:\n return 0.\n except iexfinance.utils.exceptions.IEXQueryError:\n return None", "def vsphere_volume(self) -> Optional[pulumi.Input['AlertmanagerSpecVolumesVsphereVolumeArgs']]:\n return pulumi.get(self, \"vsphere_volume\")", "def vsphere_volume(self) -> Optional[pulumi.Input['ThanosRulerSpecVolumesVsphereVolumeArgs']]:\n return pulumi.get(self, \"vsphere_volume\")", "def initialvol():\r\n inital_volume = mastervol().GetMasterVolumeLevel()\r\n return inital_volume", "def _get_volume(self, volume_devicefile):\r\n if self._data is not None:\r\n for volume in self._data[\"volumes\"]:\r\n if volume[\"devicefile\"] == volume_devicefile:\r\n return volume", "def docker_ssh_volume_name(self) -> str:\n return os.environ.get(\n 'SSH_TUNNEL_VOLUME_NAME',\n f\"{self.docker_container_name}-ssh-vol\"\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the option named 'optionname' to the value specified by 'optionvalue' in the specified volume. The change remains effective even after the filer is rebooted. Some options have values that are numbers or strings, and others have values that are 'on' (also expressible as 'yes', 'true', or '1' ) or "off" (also expressible as 'no', 'false', or '0'). A mixture of uppercase and lowercase characters may be used for an option's value. Note that the 'root' option is special in that it does not have an associated value. Also, note that some of these options can NOT be set for a flexible volume, as they relate only to aggregates (either freestanding ones or those embedded in traditional volumes). Other options may only apply for flexible volumes.
def volume_set_option(self, volume, option_value, option_name): return self.request( "volume-set-option", { 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'option_value': [ option_value, 'option-value', [ basestring, 'None' ], False ], 'option_name': [ option_name, 'option-name', [ basestring, 'None' ], False ], }, { } )
[ "def set_option(a_option_name, a_value):\n if a_value is None or str(a_value) == \"\":\n execute(\"let &\" + a_option_name + \" = \\\"\\\"\")\n else:\n execute(\"let &\" + a_option_name + \" = \\\"\" + str(a_value) + \"\\\"\")", "def setOption(name, value):\n \n if _fwk is not None:\n logging.warning(\"psana.setOption() called after DataSource(), has no effect\")\n \n _options[name] = str(value)", "def set_option(self, varname, value):\n option = self.get_option_by_varname(varname)\n option.set_value(value)", "def set_option(self, section, option, value, write=False):\n if not self.has_section(section):\n self.add_section(section)\n if isinstance(value, basestring):\n value = to_unicode(value)\n if value.startswith(' ') or value.endswith(' '):\n value = \"%(ws)s%(value)s%(ws)s\" % {\"value\" : value,\n \"ws\" : self.mrk_ws}\n RawConfigParser.set(self, section, str(option), value)\n if write:\n self.write()", "def set_option(self, key, value):\n self.options.set(key, value)", "def SetValue(self, section, optionName, value, infoLevel=0, ignoreSerializationError=False):\n\t\ttry:\n\t\t\teval(repr(value))\n\t\texcept:\n\t\t\tif not ignoreSerializationError:\n\t\t\t\traise Exception(\"Object cannot be serialized: %s\", repr(value))\n\n\t\tself.cfgObj.set(section, optionName, repr(value))\n\t\tself.__dict__[section].Set(optionName, value)\n\n\t\tself.Logger.debug(\"Changing %s.%s to %s\" % (section, optionName, value))", "def set_option(self, option, value):\n var_name = option['value'].replace('_values', '')\n setattr(self, var_name, value)\n # Logs.debug(\"set option \",self.get_option(option))", "def setoption(self, name, value):\n\n if name.upper() == \"STAGECERTIFICATEFILE\":\n if not(self._validate_certificatefile(\"STAGECERTIFICATEFILE\", value)):\n return True # value found, but not set\n else:\n # reset all key/cert data, it might change now\n self.privatekey = None\n self.fingerprint = None\n self.certificate = None\n self.publickeyxml = None\n\n if name.upper() == \"TIMEOUT\" and (not isint(value) or int(value) < 1 or int(value) > 100):\n print_error(\"TIMEOUT should be 1 <= TIMEOUT <= 100\")\n return True # value found, but not set\n if name.upper() == \"RETRIES\" and (not isint(value) or int(value) < 0 or int(value) > 100):\n print_error(\"RETRIES should be 0 <= RETRIES <= 100\")\n return True # value found, but not set\n\n return ModuleBase.setoption(self, name, value)", "def set(self, section, option, value):\r\n if value.__class__ != str:\r\n value = str(value) # this will prevent floating point inaccuracies from being saved to file\r\n else:\r\n value = repr(value)\r\n if not self.has_section(section):\r\n raise ConfigParser.NoSectionError(section)\r\n if not self.has_option(section, option):\r\n raise ConfigParser.NoOptionError(option, section)\r\n ConfigParser.RawConfigParser.set(self, section, option, value)", "def set_option(self, key: str, value: aiowamp.WAMPType) -> None:\n if self.options is None:\n self.options = {key: value}\n return\n\n self.options[key] = value", "def _set_option(msat_config, name, value):\n check = mathsat.msat_set_option(msat_config, name, value)\n if check != 0:\n raise PysmtValueError(\"Error setting the option '%s=%s'\" % (name,value))", "def set(self, option, value):\n # Add section if it does not exist\n runtime_element = self.root.find('.//RunTime')\n if runtime_element is None:\n runtime_element = ElementTree.Element('RunTime')\n self.root.append(runtime_element)\n element = runtime_element.find(option)\n if element is None:\n # Add option\n element = ElementTree.SubElement(runtime_element, option)\n element.text = str(value)", "def set(self, option, value):\n # add section if does not exist\n if 'RunTime' not in self.configObject.sections():\n self.configObject.add_section('RunTime')\n\n # add option\n self.configObject.set('RunTime', option, str(value))", "def setOption(self, key, value):\n if self.readyMoves:\n log.warning(\n \"Options set after 'readyok' are not sent to the engine\",\n extra={\"task\": self.defname},\n )\n if key == \"cores\":\n self.optionQueue.append(\"cores %s\" % value)\n elif key == \"memory\":\n self.optionQueue.append(\"memory %s\" % value)\n elif key.lower() == \"ponder\":\n self.__setPonder(value == 1)\n else:\n self.optionQueue.append(\"option %s=%s\" % (key, value))", "def set(self, section, option, value=None):\r\n self._validate_value_types(option=option, value=value)\r\n super(ConfigParser, self).set(section, option, value)", "def set_plugin_option(self, plugin, key, value):\n if plugin in self.plugins:\n plugin = self.plugins[plugin]\n plugin.set_option(key, value)", "def option(name=None, value=None):\r\n if name is None:\r\n return options\r\n if name not in options:\r\n error = f'Configuration option \"{name}\" does not exist.'\r\n log.error(error)\r\n raise LookupError(error)\r\n if value is None:\r\n return options[name]\r\n else:\r\n options[name] = value", "def set(self, section, option, value=None):\n self._dict[section.strip()][option.strip().lower()] = value", "def setsockopt(self, option, value):\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initiates a manual cutover operation on the specified source volume. This is a synchronous API. Cutover is the final phase of volume move operation after which destination volume takes the identity of the source volume. If cutover cannot be initiated or completed, the API will return with an error. The move will pause and an EMS message will be printed. The volumemovestatus API will show the state of the move as move(paused). The user can resume or abort the move.
def volume_move_cutover(self, source_volume, cutover_window=None): return self.request( "volume-move-cutover", { 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ], 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ], }, { } )
[ "def volume_move_resume(self, source_volume, cutover_window=None, is_manual_cutover=None, is_override_warnings=None, cutover_attempts=None, is_keep_source=None):\n return self.request( \"volume-move-resume\", {\n 'cutover_window': [ cutover_window, 'cutover-window', [ int, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_manual_cutover': [ is_manual_cutover, 'is-manual-cutover', [ bool, 'None' ], False ],\n 'is_override_warnings': [ is_override_warnings, 'is-override-warnings', [ bool, 'None' ], False ],\n 'cutover_attempts': [ cutover_attempts, 'cutover-attempts', [ int, 'None' ], False ],\n 'is_keep_source': [ is_keep_source, 'is-keep-source', [ bool, 'None' ], False ],\n }, {\n 'errors-warnings': [ ErrorsWarningsInfo, True ],\n } )", "def volume_move_trigger_cutover(self, source_volume, vserver=None, force=None):\n return self.request( \"volume-move-trigger-cutover\", {\n 'vserver': [ vserver, 'vserver', [ basestring, 'None' ], False ],\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n }, {\n } )", "def volume_move_abort(self, source_volume):\n return self.request( \"volume-move-abort\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def distribute(self, volume, source, dest, *args, **kwargs):\n # Note: currently it varies whether the pipette should have a tip on\n # or not depending on the parameters for this call, so we cannot\n # create a very reliable assertion on tip status\n\n args = [volume, source, dest, *args]\n kwargs['mode'] = 'distribute'\n kwargs['mix_after'] = (0, 0)\n if 'disposal_vol' not in kwargs:\n kwargs['disposal_vol'] = self.min_volume\n return self.transfer(*args, **kwargs)", "def volume_move_pause(self, source_volume):\n return self.request( \"volume-move-pause\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n }, {\n } )", "def transfer(self, volume, source, dest, **kwargs):\n # Note: currently it varies whether the pipette should have a tip on\n # or not depending on the parameters for this call, so we cannot\n # create a very reliable assertion on tip status\n\n kwargs['mode'] = kwargs.get('mode', 'transfer')\n\n touch_tip = kwargs.get('touch_tip', False)\n if touch_tip is True:\n touch_tip = -1\n kwargs['touch_tip'] = touch_tip\n\n tip_options = {\n 'once': 1,\n 'never': 0,\n 'always': float('inf')\n }\n tip_option = kwargs.get('new_tip', 'once')\n tips = tip_options.get(tip_option)\n\n # if air gap exceeds these bounds it breaks preconditions the transfer\n # logic\n if 'air_gap' in kwargs:\n expected = self._expected_working_volume()\n if kwargs['air_gap'] < 0 or kwargs['air_gap'] >= expected:\n raise ValueError(\n \"air_gap must be between 0uL and the pipette's expected \"\n f\"working volume, {expected}uL\")\n\n if tips is None:\n raise ValueError('Unknown \"new_tip\" option: {}'.format(tip_option))\n\n plan = self._create_transfer_plan(volume, source, dest, **kwargs)\n self._run_transfer_plan(tips, plan, **kwargs)\n\n return self", "def consolidate(self, volume, source, dest, *args, **kwargs):\n\n kwargs['mode'] = 'consolidate'\n kwargs['mix_before'] = (0, 0)\n kwargs['air_gap'] = 0\n kwargs['disposal_vol'] = 0\n args = [volume, source, dest, *args]\n return self.transfer(*args, **kwargs)", "def volume_move_status(self, source_volume=None, is_verbose=None):\n return self.request( \"volume-move-status\", {\n 'source_volume': [ source_volume, 'source-volume', [ basestring, 'None' ], False ],\n 'is_verbose': [ is_verbose, 'is-verbose', [ bool, 'None' ], False ],\n }, {\n 'status': [ VolMoveStatusInfo, True ],\n } )", "def _copy_volume(self, src_vol_id, tgt_vol_id):\n luncopy_name = VOL_AND_SNAP_NAME_PREFIX + src_vol_id + '_' + tgt_vol_id\n self._create_luncopy(luncopy_name, src_vol_id, tgt_vol_id)\n self.luncopy_list.append(luncopy_name)\n luncopy_id = self._get_luncopy_info(luncopy_name)[1]\n try:\n self._start_luncopy(luncopy_id)\n self._wait_for_luncopy(luncopy_name)\n # Delete the target volume if LUNcopy failed.\n except Exception:\n with excutils.save_and_reraise_exception():\n # Need to remove the LUNcopy of the volume first.\n self._delete_luncopy(luncopy_id)\n self.luncopy_list.remove(luncopy_name)\n self._delete_volume(tgt_vol_id)\n # Need to delete LUNcopy finally.\n self._delete_luncopy(luncopy_id)\n self.luncopy_list.remove(luncopy_name)", "def open_cover_tilt(self, **kwargs: Any) -> None:\n self.gateway.send(self.device, {'motor': 6})", "def test_move_volumes_from_source_to_target_new_vol(\n self, mock_mv, mock_create_vol):\n self.utils.move_volumes_from_source_to_target(\n self.data.device_list, self.data.smis_sg_2,\n self.data.rest_sg_3, True)\n mock_create_vol.assert_called_once()\n mock_create_vol.reset_mock()", "def volume_control(self):\n\n volume = self.volume_prompt.text()\n if self.PushBtn.isChecked():\n direction = \"D\"\n elif self.PullBtn.isChecked():\n direction = \"P\"\n else:\n raise Exception(\"Somethings wrong in the volume_control function\")\n\n (done, answer) = self.pump.volume_command(volume, direction)\n\n if not done:\n QMessageBox.warning(self, __appname__, answer)\n if done:\n print \"plunger own status position\" + pump.own_status[\"plung_pos_mine\"]\n print \"answer: \" + answer", "def send_handover_start_msg_to_vnfs(self, handover):\n dp = self.switch.dp\n parser = dp.ofproto_parser\n\n with handover.state_lock:\n if handover.states[self.position][0] != Handover.STATE_WAITING_FOR_START_PKT:\n # start message sent already\n return\n\n tlvs = TlvBase.from_ofpmatch(handover.matches[self.position])\n tlvs.append(TlvBase(TlvBase.TYPE_VNF_FROM, handover.src_vnf.id))\n tlvs.append(TlvBase(TlvBase.TYPE_VNF_TO, handover.dst_vnf.id))\n\n src_pkt = self._build_handover_ctrl_message(handover,\n handover.src_vnf,\n HandoverMessage.CMD_HANDOVER_START_SRC_INST,\n tlvs)\n\n dst_pkt = self._build_handover_ctrl_message(handover,\n handover.dst_vnf,\n HandoverMessage.CMD_HANDOVER_START_DST_INST,\n tlvs)\n\n actions = [parser.OFPActionOutput(self.vnf_id_to_port[handover.new_rule.vnf_id])]\n\n self.mod_flow(dp,\n match=handover.matches[self.position],\n actions=actions,\n cookie=COOKIE_IN_HOLDOVER + handover.id)\n\n self._repeat_handover_msg_to_vnf(dp, handover, handover.src_vnf, src_pkt)\n self._repeat_handover_msg_to_vnf(dp, handover, handover.dst_vnf, dst_pkt)", "def test_copy_volume_to_image(self):\n self.mox.StubOutWithMock(image_utils, 'upload_volume')\n\n image_utils.upload_volume(context,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_META,\n self.TEST_VOLPATH)\n\n self.mox.ReplayAll()\n\n self._driver.copy_volume_to_image(context,\n self.TEST_VOLUME,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_META)", "def test_copy_image_to_volume(self):\n self.mox.StubOutWithMock(image_utils, 'fetch_to_raw')\n\n image_utils.fetch_to_raw(context,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_ID,\n self.TEST_VOLPATH,\n mox_lib.IgnoreArg(),\n size=self.TEST_VOLSIZE)\n\n self.mox.ReplayAll()\n\n self._driver.copy_image_to_volume(context,\n self.TEST_VOLUME,\n self.TEST_IMAGE_SERVICE,\n self.TEST_IMAGE_ID)", "def volume_clone_split_start(self, volume):\n return self.request( \"volume-clone-split-start\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'result-error-message': [ basestring, False ],\n 'result-jobid': [ int, False ],\n 'result-error-code': [ int, False ],\n 'result-status': [ basestring, False ],\n } )", "def create_cloned_volume(self, volume, src_vref):\n self._login()\n self._create_lun(volume)\n self.copy_volume_data(self.context, src_vref, volume)", "def VolumeToDisplace (self, volume):\n overAspirateVolume = volume + self.m_OverAspirationVolumeUl * self.m_OverAspirationMultiple\n return Displacement.VolumeToDisplace (self, overAspirateVolume)", "def test_confirm_snapshot_based_resize_at_source(self):\n self.flags(long_rpc_timeout=1234)\n self._test_compute_api(\n 'confirm_snapshot_based_resize_at_source', 'call',\n # compute method kwargs\n instance=self.fake_instance_obj,\n migration=migration_obj.Migration(source_compute='source'),\n # client.prepare kwargs\n version='6.0', prepare_server='source',\n call_monitor_timeout=60, timeout=1234)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return calculated volume limits, which are based on the current configuration of the Vserver. If an Infinite Volume already exists on the Vserver, the values returned are in relation to the existing volume. This API is not supported for Flexible Volumes.
def volume_get_limits(self, space_guarantee=None, data_aggr_list=None, enable_snapdiff=None, namespace_mirror_aggr_list=None, max_data_constituent_size=None, max_namespace_constituent_size=None, namespace_aggregate=None): return self.request( "volume-get-limits", { 'space_guarantee': [ space_guarantee, 'space-guarantee', [ basestring, 'None' ], False ], 'data_aggr_list': [ data_aggr_list, 'data-aggr-list', [ basestring, 'aggr-name' ], True ], 'enable_snapdiff': [ enable_snapdiff, 'enable-snapdiff', [ bool, 'None' ], False ], 'namespace_mirror_aggr_list': [ namespace_mirror_aggr_list, 'namespace-mirror-aggr-list', [ basestring, 'aggr-name' ], True ], 'max_data_constituent_size': [ max_data_constituent_size, 'max-data-constituent-size', [ int, 'None' ], False ], 'max_namespace_constituent_size': [ max_namespace_constituent_size, 'max-namespace-constituent-size', [ int, 'None' ], False ], 'namespace_aggregate': [ namespace_aggregate, 'namespace-aggregate', [ basestring, 'None' ], False ], }, { 'max-infinitevol-size': [ int, False ], 'min-infinitevol-size': [ int, False ], } )
[ "def get_limits(self):\n self.LOG.info(\"Get Limits\")\n limits = {}\n limits.update(self.conn.get_compute_limits())\n limits.update(self.conn.get_volume_limits()[\"absolute\"])\n\n return {\n \"max_total_cores\": str(limits[\"max_total_cores\"]),\n \"max_total_instances\": str(limits[\"max_total_instances\"]),\n \"max_total_ram_size\": str(math.ceil(limits[\"max_total_ram_size\"] / 1024)),\n \"total_cores_used\": str(limits[\"total_cores_used\"]),\n \"total_instances_used\": str(limits[\"total_instances_used\"]),\n \"total_ram_used\": str(math.ceil(limits[\"total_ram_used\"] / 1024)),\n \"maxTotalVolumes\": str(limits[\"maxTotalVolumes\"]),\n \"maxTotalVolumeGigabytes\": str(limits[\"maxTotalVolumeGigabytes\"]),\n \"totalVolumesUsed\": str(limits[\"totalVolumesUsed\"]),\n \"totalGigabytesUsed\": str(limits[\"totalGigabytesUsed\"]),\n }", "def get_extents(self, resource):\n info_json = json.loads(resource.cloudvolume.info)\n min_point = info_json[\"voxel_offset\"]\n max_point = info_json[\"volume_size\"]\n extents = [\n [min_point[0], max_point[0]],\n [min_point[1], max_point[1]],\n [min_point[2], max_point[2]],\n ]\n return extents", "def get_max_volume(self) -> float:", "def limits(self):\n return self.discretization.limits", "def default_limits(self):\n self._reset_minmax()\n return self.vmin, self.vmax", "def boundingBoxVolume(self):\n return _cpp_methods.boundingBoxVolume(self)", "def MaxVolume (self):\n return self.m_Pump.MaxDualPistonModeVolume()", "def MaxVolume (self):\n return self.m_Pump.MaxSinglePistonModeVolume()", "def get_nvme_max_capacity(self):\n drive_info = {}\n nvme_lsblk, nvme_readlink = self.get_nvme_readlink()\n\n #Create the dictionary for NVMe size for all the servers and drives.\n for server in nvme_lsblk:\n tmp_dict = {}\n for daos_io_server in range(len(self.daos_io_servers)):\n tmp_disk_list = []\n for disk in (self.server_managers[0].manager.job.yaml.\n server_params[daos_io_server].bdev_list.value):\n if disk in nvme_readlink[server].keys():\n size = int(nvme_lsblk[server]\n [nvme_readlink[server][disk]])\n tmp_disk_list.append(size)\n else:\n self.fail(\"Disk {} can not found on server {}\"\n .format(disk, server))\n tmp_dict[daos_io_server] = tmp_disk_list\n drive_info[server] = tmp_dict\n\n return self.get_max_capacity(drive_info)", "def ex_limits(self):\r\n\r\n result = self._sync_request(command='listResourceLimits',\r\n method='GET')\r\n\r\n limits = {}\r\n resource_map = {\r\n 0: 'max_instances',\r\n 1: 'max_public_ips',\r\n 2: 'max_volumes',\r\n 3: 'max_snapshots',\r\n 4: 'max_images',\r\n 5: 'max_projects',\r\n 6: 'max_networks',\r\n 7: 'max_vpc',\r\n 8: 'max_cpu',\r\n 9: 'max_memory',\r\n 10: 'max_primary_storage',\r\n 11: 'max_secondary_storage'\r\n }\r\n\r\n for limit in result.get('resourcelimit', []):\r\n # We will ignore unknown types\r\n resource = resource_map.get(int(limit['resourcetype']), None)\r\n if not resource:\r\n continue\r\n limits[resource] = int(limit['max'])\r\n\r\n return limits", "async def get_limits(self):\r\n lo = await trio.to_thread.run_sync(self.handle.get_travel_range_min)\r\n hi = await trio.to_thread.run_sync(self.handle.get_travel_range_max)\r\n return lo, hi", "def view_limits(self, vmin, vmax):\n b = self._base\n\n vmin, vmax = self.nonsingular(vmin, vmax)\n\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = _decade_less_equal(vmin, self._base)\n vmax = _decade_greater_equal(vmax, self._base)\n\n return vmin, vmax", "def Box_limits(input_file):\n if input_file.split('.')[-1] == 'gz':\n out2,err2=cf.bash_command(\"\"\"zgrep -n -m1 \"BOUNDS\" %s\"\"\"%input_file)\n LineNumber=int(out2.decode(\"utf-8\").split(\":\")[0])\n f=gzip.open(input_file, 'rb')\n\n\n limits=[]\n for i in range(LineNumber+3):\n line=f.readline().decode(\"utf-8\")\n if i>=LineNumber:\n limits.append(line.strip('\\n').split())\n limits=np.array(limits,dtype='double')\n L=limits[:,1]-limits[:,0]\n\n else:\n out2,err2=cf.bash_command(\"\"\"grep -n -m1 \"BOUNDS\" %s\"\"\"%input_file)\n LineNumber=int(out2.split(\":\")[0])\n\n limits=[]\n for i in range(LineNumber,LineNumber+3):\n limits.append(linecache.getline(input_file, i+1).strip('\\n').split())\n linecache.clearcache()\n limits=np.array(limits,dtype='double')\n L=limits[:,1]-limits[:,0]\n\n return limits,L", "def get_absolute_limits(self):\r\n resp, body = self.method_get(\"/limits\")\r\n absolute_limits = body.get(\"limits\", {}).get(\"absolute\")\r\n return absolute_limits", "def get_motor_velocity_limits(self):\n max_velocity = c_double()\n max_acceleration = c_double()\n self.sdk.SCC_GetMotorVelocityLimits(self._serial, byref(max_velocity), byref(max_acceleration))\n return max_velocity.value, max_acceleration.value", "def rect_limits(self) -> ztyping.RectLimitsReturnType:\n if not self.has_limits:\n raise LimitsNotSpecifiedError(\n \"Limits are False or not set, cannot return the rectangular limits.\"\n )\n rect_limits = self._rect_limits\n return rect_limits", "def get_vlim(key):\n key = key.replace('CONV', '').replace('APEX', '')\n if '-' in key:\n # Get rid of the observatory tag\n key = key.split('-')[0]\n vlims = {}\n vlims_keys = ['vmin', 'vmax']\n if key in vlim_memo:\n val = vlim_memo[key]\n for k, v in zip(vlims_keys, val):\n if v is not None:\n vlims[k] = v\n # special rule (can remove later): if no vmin but velocity specified (e.g. moment map), vmin=0\n if 'vmin' not in vlims and '.' in key.replace('.generic', ''):\n # since the '.generic' tag has a '.' in it, filter that out\n vlims['vmin'] = 0\n return vlims", "def max_delta_volume(self):\n vols = [v.vol_charge for v in self.voltage_pairs]\n vols.extend([v.vol_discharge for v in self.voltage_pairs])\n return max(vols) / min(vols) - 1", "def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"maximum_volume_size\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given the name of a flexible volume, set the autosize settings. This API is not supported for Infinite Volumes.
def volume_autosize_set(self, volume, reset=None, increment_size=None, minimum_size=None, grow_threshold_percent=None, maximum_size=None, shrink_threshold_percent=None, is_enabled=None, mode=None): return self.request( "volume-autosize-set", { 'reset': [ reset, 'reset', [ bool, 'None' ], False ], 'increment_size': [ increment_size, 'increment-size', [ basestring, 'None' ], False ], 'minimum_size': [ minimum_size, 'minimum-size', [ basestring, 'None' ], False ], 'grow_threshold_percent': [ grow_threshold_percent, 'grow-threshold-percent', [ int, 'None' ], False ], 'volume': [ volume, 'volume', [ basestring, 'None' ], False ], 'maximum_size': [ maximum_size, 'maximum-size', [ basestring, 'None' ], False ], 'shrink_threshold_percent': [ shrink_threshold_percent, 'shrink-threshold-percent', [ int, 'None' ], False ], 'is_enabled': [ is_enabled, 'is-enabled', [ bool, 'None' ], False ], 'mode': [ mode, 'mode', [ basestring, 'None' ], False ], }, { } )
[ "def volume_autosize_get(self, volume):\n return self.request( \"volume-autosize-get\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n }, {\n 'increment-size': [ basestring, False ],\n 'minimum-size': [ basestring, False ],\n 'grow-threshold-percent': [ int, False ],\n 'maximum-size': [ basestring, False ],\n 'shrink-threshold-percent': [ int, False ],\n 'is-enabled': [ bool, False ],\n 'mode': [ basestring, False ],\n } )", "def resize(self, size):\r\n self.instance.resize_volume(size)\r\n self.size = size", "def resize_volume(self, size):\r\n curr_size = self.volume.size\r\n if size <= curr_size:\r\n raise exc.InvalidVolumeResize(\"The new volume size must be larger \"\r\n \"than the current volume size of '%s'.\" % curr_size)\r\n body = {\"volume\": {\"size\": size}}\r\n self.manager.action(self, \"resize\", body=body)", "def _extend_volume(self, name, new_size):\n LOG.debug('_extend__volume name: %s', name)\n params = {}\n params['volsize'] = ix_utils.get_bytes_from_gb(new_size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s/id/%s') % (\n FreeNASServer.REST_API_VOLUME,\n urllib.parse.quote_plus(\n self.configuration.ixsystems_dataset_path + '/' + name))\n ret = self.handle.invoke_command(FreeNASServer.UPDATE_COMMAND,\n request_urn, jparams)\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while extending volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def autosize_cooling_storage(self, **kwargs):\n\n demand = self.energy_simulation.__getattr__(\n 'cooling_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.cooling_storage.autosize(demand, **kwargs)", "def modify_volume_attribute(DryRun=None, VolumeId=None, AutoEnableIO=None):\n pass", "def resize_vdi(self, name, size):\n wrap_popen('collie', 'vdi', 'resize', name, size)", "def volume_set_option(self, volume, option_value, option_name):\n return self.request( \"volume-set-option\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'option_value': [ option_value, 'option-value', [ basestring, 'None' ], False ],\n 'option_name': [ option_name, 'option-name', [ basestring, 'None' ], False ],\n }, {\n } )", "def resize(self, capacity, flags=0):\n ret = libvirtmod.virStorageVolResize(self._o, capacity, flags)\n if ret == -1: raise libvirtError ('virStorageVolResize() failed', vol=self)\n return ret", "def autosize_pv(self, **kwargs):\n\n solar_generation = self.energy_simulation.__getattr__(\n 'solar_generation', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.pv.autosize(self.pv.get_generation(solar_generation), **kwargs)", "def setGrowSize(*args, **kwargs):\n \n pass", "def autosize_heating_storage(self, **kwargs):\n\n demand = self.energy_simulation.__getattr__(\n 'heating_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.heating_storage.autosize(demand, **kwargs)", "def allow_volume_expansion(self, value: bool):\n self._properties[\"allowVolumeExpansion\"] = value", "def autosize_dhw_storage(self, **kwargs):\n\n demand = self.energy_simulation.__getattr__(\n 'dhw_demand', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.dhw_storage.autosize(demand, **kwargs)", "def autosize_electrical_storage(self, **kwargs):\n\n solar_generation = self.energy_simulation.__getattr__(\n 'solar_generation', \n start_time_step=self.episode_tracker.simulation_start_time_step, \n end_time_step=self.episode_tracker.simulation_end_time_step\n )\n self.electrical_storage.autosize(self.pv.get_generation(solar_generation), **kwargs)", "def _create_volume(self, name, size):\n\n params = {}\n params['name'] = self.configuration.ixsystems_dataset_path + '/' + name\n params['type'] = 'VOLUME'\n params['volsize'] = ix_utils.get_bytes_from_gb(size)\n jparams = json.dumps(params)\n jparams = jparams.encode('utf8')\n request_urn = ('%s') % (FreeNASServer.REST_API_VOLUME)\n LOG.debug('_create_volume params : %s', params)\n LOG.debug('_create_volume urn : %s', request_urn)\n ret = self.handle.invoke_command(FreeNASServer.CREATE_COMMAND,\n request_urn, jparams)\n LOG.debug('_create_volume response : %s', json.dumps(ret))\n if ret['status'] != FreeNASServer.STATUS_OK:\n msg = ('Error while creating volume: %s' % ret['response'])\n raise FreeNASApiError('Unexpected error', msg)", "def volume_size(self, volume, new_size=None):\n return self.request( \"volume-size\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'new_size': [ new_size, 'new-size', [ basestring, 'None' ], False ],\n }, {\n 'is-fixed-size-flex-volume': [ bool, False ],\n 'is-readonly-flex-volume': [ bool, False ],\n 'is-replica-flex-volume': [ bool, False ],\n 'volume-size': [ basestring, False ],\n } )", "def _vmware_auto_resize_config(self, args: parser_extensions.Namespace):\n kwargs = {\n 'enabled': self._auto_resize_enabled(args),\n }\n if flags.IsSet(kwargs):\n return messages.VmwareAutoResizeConfig(**kwargs)\n return None", "def _adjust_volume(avr, points, operation):\n current_vol = avr.volume\n new_vol = operation(current_vol, (points * 0.5))\n\n try:\n avr.volume = new_vol\n click.echo(new_vol)\n except ReponseException:\n click.echo(\n click.style(\"New volume must be out of range.\",\n fg='red')\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take the specified Infinite Volume offline, thereby making it unavailable for data access. The Infinite Volume must be unmounted before it can be made offline. This API is not supported for Flexible Volumes. This API is not supported on Infinite Volume constituents.
def volume_offline_async(self, volume_name): return self.request( "volume-offline-async", { 'volume_name': [ volume_name, 'volume-name', [ basestring, 'None' ], False ], }, { 'result-error-message': [ basestring, False ], 'result-jobid': [ int, False ], 'result-error-code': [ int, False ], 'result-status': [ basestring, False ], } )
[ "def async_turn_off_ac_volume(self):\n yield from self._try_command(\n \"Setting volume off of the miio AC failed.\",\n self._device.set_volume, \"off\")", "def detach(self, volume):\r\n return volume.detach()", "def _wait_for_unattachedvol(volume, sleep_time=5.0):\n state = volume.attachment_state()\n while state is not None:\n stdout.write('.')\n stdout.flush()\n sleep(sleep_time)\n volume.update()\n state = volume.attachment_state()", "def unmount(self, volume_id):\n return self._snap_operation(1, volume_id)", "def volume_down(self):\n if self.volume_level > 0:\n self.set_volume_level(max(0, self.volume_level - .2))", "def detach_volume(self, node, volume):\r\n url = REST_BASE + '/instances/%s' % (node.id)\r\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\r\n data = {'storageID': volume.id, 'type': 'detach'}\r\n resp = self.connection.request(action=url,\r\n method='PUT',\r\n headers=headers,\r\n data=data)\r\n return int(resp.status) == 200", "def detach_volume(DryRun=None, VolumeId=None, InstanceId=None, Device=None, Force=None):\n pass", "def volume(self):\n\n volume = self.cache.volume()\n self.close()\n return volume", "def offline_ivr(self, offline_ivr):\n\n self._offline_ivr = offline_ivr", "def detach(self, args):\n parser = OptionParser(usage=\"volume detach <options>\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\",\n help=\"The name of the volume to detach\")\n (options, args) = parser.parse_args(args)\n if not options.name:\n parser.print_help()\n return\n\n try:\n volume = helper.find_volume(self._context, options.name)\n if not volume:\n print \"No volume found with name: %s\" % options.name\n return\n\n vm = helper.get_attached_vm(self._context, volume)\n if not vm:\n print (\"Volume %s is not attached \"\n \"to any virtual machine\") % options.name\n return\n\n log.debug(\"Detaching volume %s from %s...\" % (options.name,\n vm.getInternalName()))\n if vm.getState().existsInHypervisor():\n print \"Detaching volume from a running virtual machine.\",\n print \"This may take some time...\"\n\n disks = [disk for disk in vm.listVirtualDisks()\n if disk.getId() != volume.getId()]\n vm.setVirtualDisks(disks)\n\n pprint_volumes([helper.refresh_volume(self._context, volume)])\n except (AbiquoException, AuthorizationException), ex:\n print \"Error: %s\" % ex.getMessage()", "def volume(ctx, *args, **kwargs):", "def volume(ctx, vol):\n avr = ctx.obj['avr']\n if vol:\n try:\n avr.volume = vol\n click.echo(avr.volume)\n except ReponseException as e:\n if \"Volume\" in str(e):\n msg = \"Volume must be specified in -0.5 increments.\"\n err = click.style(msg, fg='red')\n click.echo(err, err=True)\n else:\n click.echo(avr.volume)", "def unmanage(self, volume):\n\n vol_name = self._get_vol_name(volume)\n if len(vol_name + UNMANAGED_SUFFIX) > MAX_VOL_LENGTH:\n unmanaged_vol_name = vol_name[:-len(UNMANAGED_SUFFIX)] + \\\n UNMANAGED_SUFFIX\n else:\n unmanaged_vol_name = vol_name + UNMANAGED_SUFFIX\n LOG.info(\"Renaming existing volume %(ref_name)s to %(new_name)s\",\n {\"ref_name\": vol_name, \"new_name\": unmanaged_vol_name})\n self._rename_volume_object(vol_name, unmanaged_vol_name)", "def volume_mirror(self, volume, mirror_disks=None, force=None, victim_volume=None):\n return self.request( \"volume-mirror\", {\n 'volume': [ volume, 'volume', [ basestring, 'None' ], False ],\n 'mirror_disks': [ mirror_disks, 'mirror-disks', [ DiskInfo, 'None' ], True ],\n 'force': [ force, 'force', [ bool, 'None' ], False ],\n 'victim_volume': [ victim_volume, 'victim-volume', [ basestring, 'None' ], False ],\n }, {\n 'bad-disks': [ DiskInfo, True ],\n } )", "def test_azure_service_api_volume_get(self):\n pass", "def narrow(self, *args):\n return _coin.SbViewVolume_narrow(self, *args)", "def narrow(self, *args) -> \"SbViewVolume\":\n return _coin.SbViewVolume_narrow(self, *args)", "def on_offline(self):\n return self._on_offline", "def VolumeToDisplace (self, volume):\n overAspirateVolume = volume + self.m_OverAspirationVolumeUl * self.m_OverAspirationMultiple\n return Displacement.VolumeToDisplace (self, overAspirateVolume)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }